1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
194 * fastop functions have a special calling convention:
199 * flags: rflags (in/out)
200 * ex: rsi (in:fastop pointer, out:zero if exception)
202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203 * different operand sizes can be reached by calculation, rather than a jump
204 * table (which would be bigger than the code).
206 * fastop functions are declared as taking a never-defined fastop parameter,
207 * so they can't be called from C directly.
216 int (*execute)(struct x86_emulate_ctxt *ctxt);
217 const struct opcode *group;
218 const struct group_dual *gdual;
219 const struct gprefix *gprefix;
220 const struct escape *esc;
221 const struct instr_dual *idual;
222 const struct mode_dual *mdual;
223 void (*fastop)(struct fastop *fake);
225 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
229 struct opcode mod012[8];
230 struct opcode mod3[8];
234 struct opcode pfx_no;
235 struct opcode pfx_66;
236 struct opcode pfx_f2;
237 struct opcode pfx_f3;
242 struct opcode high[64];
246 struct opcode mod012;
251 struct opcode mode32;
252 struct opcode mode64;
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
257 enum x86_transfer_type {
259 X86_TRANSFER_CALL_JMP,
261 X86_TRANSFER_TASK_SWITCH,
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 if (!(ctxt->regs_valid & (1 << nr))) {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
270 return ctxt->_regs[nr];
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 ctxt->regs_valid |= 1 << nr;
276 ctxt->regs_dirty |= 1 << nr;
277 return &ctxt->_regs[nr];
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
283 return reg_write(ctxt, nr);
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
296 ctxt->regs_dirty = 0;
297 ctxt->regs_valid = 0;
301 * These EFLAGS bits are restored from saved value during emulation, and
302 * any changes are written back to the saved value after emulation.
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
315 #define FOP_FUNC(name) \
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
320 #define FOP_RET "ret \n\t"
322 #define FOP_START(op) \
323 extern void em_##op(struct fastop *fake); \
324 asm(".pushsection .text, \"ax\" \n\t" \
325 ".global em_" #op " \n\t" \
332 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
335 #define FOP1E(op, dst) \
336 FOP_FUNC(#op "_" #dst) \
337 "10: " #op " %" #dst " \n\t" FOP_RET
339 #define FOP1EEX(op, dst) \
340 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
342 #define FASTOP1(op) \
347 ON64(FOP1E(op##q, rax)) \
350 /* 1-operand, using src2 (for MUL/DIV r/m) */
351 #define FASTOP1SRC2(op, name) \
356 ON64(FOP1E(op, rcx)) \
359 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
360 #define FASTOP1SRC2EX(op, name) \
365 ON64(FOP1EEX(op, rcx)) \
368 #define FOP2E(op, dst, src) \
369 FOP_FUNC(#op "_" #dst "_" #src) \
370 #op " %" #src ", %" #dst " \n\t" FOP_RET
372 #define FASTOP2(op) \
374 FOP2E(op##b, al, dl) \
375 FOP2E(op##w, ax, dx) \
376 FOP2E(op##l, eax, edx) \
377 ON64(FOP2E(op##q, rax, rdx)) \
380 /* 2 operand, word only */
381 #define FASTOP2W(op) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
389 /* 2 operand, src is CL */
390 #define FASTOP2CL(op) \
392 FOP2E(op##b, al, cl) \
393 FOP2E(op##w, ax, cl) \
394 FOP2E(op##l, eax, cl) \
395 ON64(FOP2E(op##q, rax, cl)) \
398 /* 2 operand, src and dest are reversed */
399 #define FASTOP2R(op, name) \
401 FOP2E(op##b, dl, al) \
402 FOP2E(op##w, dx, ax) \
403 FOP2E(op##l, edx, eax) \
404 ON64(FOP2E(op##q, rdx, rax)) \
407 #define FOP3E(op, dst, src, src2) \
408 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
409 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
415 FOP3E(op##w, ax, dx, cl) \
416 FOP3E(op##l, eax, edx, cl) \
417 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) \
423 ".type " #op ", @function \n\t" \
428 asm(".pushsection .fixup, \"ax\"\n"
429 ".global kvm_fastop_exception \n"
430 "kvm_fastop_exception: xor %esi, %esi; ret\n"
452 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
456 * XXX: inoutclob user must know where the argument is being expanded.
457 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
459 #define asm_safe(insn, inoutclob...) \
463 asm volatile("1:" insn "\n" \
465 ".pushsection .fixup, \"ax\"\n" \
466 "3: movl $1, %[_fault]\n" \
469 _ASM_EXTABLE(1b, 3b) \
470 : [_fault] "+qm"(_fault) inoutclob ); \
472 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
475 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
476 enum x86_intercept intercept,
477 enum x86_intercept_stage stage)
479 struct x86_instruction_info info = {
480 .intercept = intercept,
481 .rep_prefix = ctxt->rep_prefix,
482 .modrm_mod = ctxt->modrm_mod,
483 .modrm_reg = ctxt->modrm_reg,
484 .modrm_rm = ctxt->modrm_rm,
485 .src_val = ctxt->src.val64,
486 .dst_val = ctxt->dst.val64,
487 .src_bytes = ctxt->src.bytes,
488 .dst_bytes = ctxt->dst.bytes,
489 .ad_bytes = ctxt->ad_bytes,
490 .next_rip = ctxt->eip,
493 return ctxt->ops->intercept(ctxt, &info, stage);
496 static void assign_masked(ulong *dest, ulong src, ulong mask)
498 *dest = (*dest & ~mask) | (src & mask);
501 static void assign_register(unsigned long *reg, u64 val, int bytes)
503 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
506 *(u8 *)reg = (u8)val;
509 *(u16 *)reg = (u16)val;
513 break; /* 64b: zero-extend */
520 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
522 return (1UL << (ctxt->ad_bytes << 3)) - 1;
525 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
528 struct desc_struct ss;
530 if (ctxt->mode == X86EMUL_MODE_PROT64)
532 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
533 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
536 static int stack_size(struct x86_emulate_ctxt *ctxt)
538 return (__fls(stack_mask(ctxt)) + 1) >> 3;
541 /* Access/update address held in a register, based on addressing mode. */
542 static inline unsigned long
543 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
545 if (ctxt->ad_bytes == sizeof(unsigned long))
548 return reg & ad_mask(ctxt);
551 static inline unsigned long
552 register_address(struct x86_emulate_ctxt *ctxt, int reg)
554 return address_mask(ctxt, reg_read(ctxt, reg));
557 static void masked_increment(ulong *reg, ulong mask, int inc)
559 assign_masked(reg, *reg + inc, mask);
563 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
565 ulong *preg = reg_rmw(ctxt, reg);
567 assign_register(preg, *preg + inc, ctxt->ad_bytes);
570 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
572 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
575 static u32 desc_limit_scaled(struct desc_struct *desc)
577 u32 limit = get_desc_limit(desc);
579 return desc->g ? (limit << 12) | 0xfff : limit;
582 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
584 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
587 return ctxt->ops->get_cached_segment_base(ctxt, seg);
590 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
591 u32 error, bool valid)
594 ctxt->exception.vector = vec;
595 ctxt->exception.error_code = error;
596 ctxt->exception.error_code_valid = valid;
597 return X86EMUL_PROPAGATE_FAULT;
600 static int emulate_db(struct x86_emulate_ctxt *ctxt)
602 return emulate_exception(ctxt, DB_VECTOR, 0, false);
605 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
607 return emulate_exception(ctxt, GP_VECTOR, err, true);
610 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
612 return emulate_exception(ctxt, SS_VECTOR, err, true);
615 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
617 return emulate_exception(ctxt, UD_VECTOR, 0, false);
620 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
622 return emulate_exception(ctxt, TS_VECTOR, err, true);
625 static int emulate_de(struct x86_emulate_ctxt *ctxt)
627 return emulate_exception(ctxt, DE_VECTOR, 0, false);
630 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
632 return emulate_exception(ctxt, NM_VECTOR, 0, false);
635 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
638 struct desc_struct desc;
640 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
644 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
649 struct desc_struct desc;
651 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
652 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
656 * x86 defines three classes of vector instructions: explicitly
657 * aligned, explicitly unaligned, and the rest, which change behaviour
658 * depending on whether they're AVX encoded or not.
660 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
661 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
662 * 512 bytes of data must be aligned to a 16 byte boundary.
664 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
666 u64 alignment = ctxt->d & AlignMask;
668 if (likely(size < 16))
683 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
684 struct segmented_address addr,
685 unsigned *max_size, unsigned size,
686 bool write, bool fetch,
687 enum x86emul_mode mode, ulong *linear)
689 struct desc_struct desc;
696 la = seg_base(ctxt, addr.seg) + addr.ea;
699 case X86EMUL_MODE_PROT64:
701 va_bits = ctxt_virt_addr_bits(ctxt);
702 if (get_canonical(la, va_bits) != la)
705 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
706 if (size > *max_size)
710 *linear = la = (u32)la;
711 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
715 /* code segment in protected mode or read-only data segment */
716 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
717 || !(desc.type & 2)) && write)
719 /* unreadable code segment */
720 if (!fetch && (desc.type & 8) && !(desc.type & 2))
722 lim = desc_limit_scaled(&desc);
723 if (!(desc.type & 8) && (desc.type & 4)) {
724 /* expand-down segment */
727 lim = desc.d ? 0xffffffff : 0xffff;
731 if (lim == 0xffffffff)
734 *max_size = (u64)lim + 1 - addr.ea;
735 if (size > *max_size)
740 if (la & (insn_alignment(ctxt, size) - 1))
741 return emulate_gp(ctxt, 0);
742 return X86EMUL_CONTINUE;
744 if (addr.seg == VCPU_SREG_SS)
745 return emulate_ss(ctxt, 0);
747 return emulate_gp(ctxt, 0);
750 static int linearize(struct x86_emulate_ctxt *ctxt,
751 struct segmented_address addr,
752 unsigned size, bool write,
756 return __linearize(ctxt, addr, &max_size, size, write, false,
760 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
761 enum x86emul_mode mode)
766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 if (ctxt->op_bytes != sizeof(unsigned long))
770 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
771 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
772 if (rc == X86EMUL_CONTINUE)
773 ctxt->_eip = addr.ea;
777 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
779 return assign_eip(ctxt, dst, ctxt->mode);
782 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
783 const struct desc_struct *cs_desc)
785 enum x86emul_mode mode = ctxt->mode;
789 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
793 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
795 mode = X86EMUL_MODE_PROT64;
797 mode = X86EMUL_MODE_PROT32; /* temporary value */
800 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
801 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
802 rc = assign_eip(ctxt, dst, mode);
803 if (rc == X86EMUL_CONTINUE)
808 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
810 return assign_eip_near(ctxt, ctxt->_eip + rel);
813 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
814 struct segmented_address addr,
821 rc = linearize(ctxt, addr, size, false, &linear);
822 if (rc != X86EMUL_CONTINUE)
824 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
827 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
828 struct segmented_address addr,
835 rc = linearize(ctxt, addr, size, true, &linear);
836 if (rc != X86EMUL_CONTINUE)
838 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
842 * Prefetch the remaining bytes of the instruction without crossing page
843 * boundary if they are not in fetch_cache yet.
845 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
848 unsigned size, max_size;
849 unsigned long linear;
850 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
851 struct segmented_address addr = { .seg = VCPU_SREG_CS,
852 .ea = ctxt->eip + cur_size };
855 * We do not know exactly how many bytes will be needed, and
856 * __linearize is expensive, so fetch as much as possible. We
857 * just have to avoid going beyond the 15 byte limit, the end
858 * of the segment, or the end of the page.
860 * __linearize is called with size 0 so that it does not do any
861 * boundary check itself. Instead, we use max_size to check
864 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
866 if (unlikely(rc != X86EMUL_CONTINUE))
869 size = min_t(unsigned, 15UL ^ cur_size, max_size);
870 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
873 * One instruction can only straddle two pages,
874 * and one has been loaded at the beginning of
875 * x86_decode_insn. So, if not enough bytes
876 * still, we must have hit the 15-byte boundary.
878 if (unlikely(size < op_size))
879 return emulate_gp(ctxt, 0);
881 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
882 size, &ctxt->exception);
883 if (unlikely(rc != X86EMUL_CONTINUE))
885 ctxt->fetch.end += size;
886 return X86EMUL_CONTINUE;
889 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
892 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
894 if (unlikely(done_size < size))
895 return __do_insn_fetch_bytes(ctxt, size - done_size);
897 return X86EMUL_CONTINUE;
900 /* Fetch next part of the instruction being emulated. */
901 #define insn_fetch(_type, _ctxt) \
904 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
905 if (rc != X86EMUL_CONTINUE) \
907 ctxt->_eip += sizeof(_type); \
908 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
909 ctxt->fetch.ptr += sizeof(_type); \
913 #define insn_fetch_arr(_arr, _size, _ctxt) \
915 rc = do_insn_fetch_bytes(_ctxt, _size); \
916 if (rc != X86EMUL_CONTINUE) \
918 ctxt->_eip += (_size); \
919 memcpy(_arr, ctxt->fetch.ptr, _size); \
920 ctxt->fetch.ptr += (_size); \
924 * Given the 'reg' portion of a ModRM byte, and a register block, return a
925 * pointer into the block that addresses the relevant register.
926 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
928 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
932 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
934 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
935 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
937 p = reg_rmw(ctxt, modrm_reg);
941 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
942 struct segmented_address addr,
943 u16 *size, unsigned long *address, int op_bytes)
950 rc = segmented_read_std(ctxt, addr, size, 2);
951 if (rc != X86EMUL_CONTINUE)
954 rc = segmented_read_std(ctxt, addr, address, op_bytes);
968 FASTOP1SRC2(mul, mul_ex);
969 FASTOP1SRC2(imul, imul_ex);
970 FASTOP1SRC2EX(div, div_ex);
971 FASTOP1SRC2EX(idiv, idiv_ex);
1000 FASTOP2R(cmp, cmp_r);
1002 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1004 /* If src is zero, do not writeback, but update flags */
1005 if (ctxt->src.val == 0)
1006 ctxt->dst.type = OP_NONE;
1007 return fastop(ctxt, em_bsf);
1010 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1012 /* If src is zero, do not writeback, but update flags */
1013 if (ctxt->src.val == 0)
1014 ctxt->dst.type = OP_NONE;
1015 return fastop(ctxt, em_bsr);
1018 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1021 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1023 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1024 asm("push %[flags]; popf; call *%[fastop]"
1025 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1029 static void fetch_register_operand(struct operand *op)
1031 switch (op->bytes) {
1033 op->val = *(u8 *)op->addr.reg;
1036 op->val = *(u16 *)op->addr.reg;
1039 op->val = *(u32 *)op->addr.reg;
1042 op->val = *(u64 *)op->addr.reg;
1047 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1050 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1051 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1052 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1053 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1054 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1055 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1056 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1057 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1058 #ifdef CONFIG_X86_64
1059 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1060 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1061 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1062 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1063 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1064 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1065 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1066 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1072 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1076 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1077 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1078 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1079 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1080 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1081 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1082 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1083 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1084 #ifdef CONFIG_X86_64
1085 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1086 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1087 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1088 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1089 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1090 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1091 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1092 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1098 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1103 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1104 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1105 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1106 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1107 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1113 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1116 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1117 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1118 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1119 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1120 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1121 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1122 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1123 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1128 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1130 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1131 return emulate_nm(ctxt);
1133 asm volatile("fninit");
1134 return X86EMUL_CONTINUE;
1137 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1141 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1142 return emulate_nm(ctxt);
1144 asm volatile("fnstcw %0": "+m"(fcw));
1146 ctxt->dst.val = fcw;
1148 return X86EMUL_CONTINUE;
1151 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1155 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1156 return emulate_nm(ctxt);
1158 asm volatile("fnstsw %0": "+m"(fsw));
1160 ctxt->dst.val = fsw;
1162 return X86EMUL_CONTINUE;
1165 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1168 unsigned reg = ctxt->modrm_reg;
1170 if (!(ctxt->d & ModRM))
1171 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1173 if (ctxt->d & Sse) {
1177 read_sse_reg(ctxt, &op->vec_val, reg);
1180 if (ctxt->d & Mmx) {
1189 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1190 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1192 fetch_register_operand(op);
1193 op->orig_val = op->val;
1196 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1198 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1199 ctxt->modrm_seg = VCPU_SREG_SS;
1202 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1206 int index_reg, base_reg, scale;
1207 int rc = X86EMUL_CONTINUE;
1210 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1211 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1212 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1214 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1215 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1216 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1217 ctxt->modrm_seg = VCPU_SREG_DS;
1219 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1221 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1222 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1224 if (ctxt->d & Sse) {
1227 op->addr.xmm = ctxt->modrm_rm;
1228 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1231 if (ctxt->d & Mmx) {
1234 op->addr.mm = ctxt->modrm_rm & 7;
1237 fetch_register_operand(op);
1243 if (ctxt->ad_bytes == 2) {
1244 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1245 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1246 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1247 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1249 /* 16-bit ModR/M decode. */
1250 switch (ctxt->modrm_mod) {
1252 if (ctxt->modrm_rm == 6)
1253 modrm_ea += insn_fetch(u16, ctxt);
1256 modrm_ea += insn_fetch(s8, ctxt);
1259 modrm_ea += insn_fetch(u16, ctxt);
1262 switch (ctxt->modrm_rm) {
1264 modrm_ea += bx + si;
1267 modrm_ea += bx + di;
1270 modrm_ea += bp + si;
1273 modrm_ea += bp + di;
1282 if (ctxt->modrm_mod != 0)
1289 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1290 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1291 ctxt->modrm_seg = VCPU_SREG_SS;
1292 modrm_ea = (u16)modrm_ea;
1294 /* 32/64-bit ModR/M decode. */
1295 if ((ctxt->modrm_rm & 7) == 4) {
1296 sib = insn_fetch(u8, ctxt);
1297 index_reg |= (sib >> 3) & 7;
1298 base_reg |= sib & 7;
1301 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1302 modrm_ea += insn_fetch(s32, ctxt);
1304 modrm_ea += reg_read(ctxt, base_reg);
1305 adjust_modrm_seg(ctxt, base_reg);
1306 /* Increment ESP on POP [ESP] */
1307 if ((ctxt->d & IncSP) &&
1308 base_reg == VCPU_REGS_RSP)
1309 modrm_ea += ctxt->op_bytes;
1312 modrm_ea += reg_read(ctxt, index_reg) << scale;
1313 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1314 modrm_ea += insn_fetch(s32, ctxt);
1315 if (ctxt->mode == X86EMUL_MODE_PROT64)
1316 ctxt->rip_relative = 1;
1318 base_reg = ctxt->modrm_rm;
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1322 switch (ctxt->modrm_mod) {
1324 modrm_ea += insn_fetch(s8, ctxt);
1327 modrm_ea += insn_fetch(s32, ctxt);
1331 op->addr.mem.ea = modrm_ea;
1332 if (ctxt->ad_bytes != 8)
1333 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1339 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1342 int rc = X86EMUL_CONTINUE;
1345 switch (ctxt->ad_bytes) {
1347 op->addr.mem.ea = insn_fetch(u16, ctxt);
1350 op->addr.mem.ea = insn_fetch(u32, ctxt);
1353 op->addr.mem.ea = insn_fetch(u64, ctxt);
1360 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1364 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1365 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1367 if (ctxt->src.bytes == 2)
1368 sv = (s16)ctxt->src.val & (s16)mask;
1369 else if (ctxt->src.bytes == 4)
1370 sv = (s32)ctxt->src.val & (s32)mask;
1372 sv = (s64)ctxt->src.val & (s64)mask;
1374 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1375 ctxt->dst.addr.mem.ea + (sv >> 3));
1378 /* only subword offset */
1379 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1382 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1383 unsigned long addr, void *dest, unsigned size)
1386 struct read_cache *mc = &ctxt->mem_read;
1388 if (mc->pos < mc->end)
1391 WARN_ON((mc->end + size) >= sizeof(mc->data));
1393 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1395 if (rc != X86EMUL_CONTINUE)
1401 memcpy(dest, mc->data + mc->pos, size);
1403 return X86EMUL_CONTINUE;
1406 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1407 struct segmented_address addr,
1414 rc = linearize(ctxt, addr, size, false, &linear);
1415 if (rc != X86EMUL_CONTINUE)
1417 return read_emulated(ctxt, linear, data, size);
1420 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1421 struct segmented_address addr,
1428 rc = linearize(ctxt, addr, size, true, &linear);
1429 if (rc != X86EMUL_CONTINUE)
1431 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1435 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1437 const void *orig_data, const void *data,
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1446 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1447 size, &ctxt->exception);
1450 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1451 unsigned int size, unsigned short port,
1454 struct read_cache *rc = &ctxt->io_read;
1456 if (rc->pos == rc->end) { /* refill pio read ahead */
1457 unsigned int in_page, n;
1458 unsigned int count = ctxt->rep_prefix ?
1459 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1460 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1461 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1462 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1463 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1466 rc->pos = rc->end = 0;
1467 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1472 if (ctxt->rep_prefix && (ctxt->d & String) &&
1473 !(ctxt->eflags & X86_EFLAGS_DF)) {
1474 ctxt->dst.data = rc->data + rc->pos;
1475 ctxt->dst.type = OP_MEM_STR;
1476 ctxt->dst.count = (rc->end - rc->pos) / size;
1479 memcpy(dest, rc->data + rc->pos, size);
1485 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1486 u16 index, struct desc_struct *desc)
1491 ctxt->ops->get_idt(ctxt, &dt);
1493 if (dt.size < index * 8 + 7)
1494 return emulate_gp(ctxt, index << 3 | 0x2);
1496 addr = dt.address + index * 8;
1497 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1501 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1502 u16 selector, struct desc_ptr *dt)
1504 const struct x86_emulate_ops *ops = ctxt->ops;
1507 if (selector & 1 << 2) {
1508 struct desc_struct desc;
1511 memset (dt, 0, sizeof *dt);
1512 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1516 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1517 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1519 ops->get_gdt(ctxt, dt);
1522 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1523 u16 selector, ulong *desc_addr_p)
1526 u16 index = selector >> 3;
1529 get_descriptor_table_ptr(ctxt, selector, &dt);
1531 if (dt.size < index * 8 + 7)
1532 return emulate_gp(ctxt, selector & 0xfffc);
1534 addr = dt.address + index * 8;
1536 #ifdef CONFIG_X86_64
1537 if (addr >> 32 != 0) {
1540 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1541 if (!(efer & EFER_LMA))
1546 *desc_addr_p = addr;
1547 return X86EMUL_CONTINUE;
1550 /* allowed just for 8 bytes segments */
1551 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1552 u16 selector, struct desc_struct *desc,
1557 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1558 if (rc != X86EMUL_CONTINUE)
1561 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1565 /* allowed just for 8 bytes segments */
1566 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1567 u16 selector, struct desc_struct *desc)
1572 rc = get_descriptor_ptr(ctxt, selector, &addr);
1573 if (rc != X86EMUL_CONTINUE)
1576 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1580 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1581 u16 selector, int seg, u8 cpl,
1582 enum x86_transfer_type transfer,
1583 struct desc_struct *desc)
1585 struct desc_struct seg_desc, old_desc;
1587 unsigned err_vec = GP_VECTOR;
1589 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1595 memset(&seg_desc, 0, sizeof seg_desc);
1597 if (ctxt->mode == X86EMUL_MODE_REAL) {
1598 /* set real mode segment descriptor (keep limit etc. for
1600 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1601 set_desc_base(&seg_desc, selector << 4);
1603 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1604 /* VM86 needs a clean new segment descriptor */
1605 set_desc_base(&seg_desc, selector << 4);
1606 set_desc_limit(&seg_desc, 0xffff);
1616 /* TR should be in GDT only */
1617 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1620 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1621 if (null_selector) {
1622 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1625 if (seg == VCPU_SREG_SS) {
1626 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1630 * ctxt->ops->set_segment expects the CPL to be in
1631 * SS.DPL, so fake an expand-up 32-bit data segment.
1641 /* Skip all following checks */
1645 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1646 if (ret != X86EMUL_CONTINUE)
1649 err_code = selector & 0xfffc;
1650 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1653 /* can't load system descriptor into segment selector */
1654 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1655 if (transfer == X86_TRANSFER_CALL_JMP)
1656 return X86EMUL_UNHANDLEABLE;
1661 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1670 * segment is not a writable data segment or segment
1671 * selector's RPL != CPL or segment selector's RPL != CPL
1673 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1677 if (!(seg_desc.type & 8))
1680 if (seg_desc.type & 4) {
1686 if (rpl > cpl || dpl != cpl)
1689 /* in long-mode d/b must be clear if l is set */
1690 if (seg_desc.d && seg_desc.l) {
1693 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1694 if (efer & EFER_LMA)
1698 /* CS(RPL) <- CPL */
1699 selector = (selector & 0xfffc) | cpl;
1702 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1704 old_desc = seg_desc;
1705 seg_desc.type |= 2; /* busy */
1706 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1707 sizeof(seg_desc), &ctxt->exception);
1708 if (ret != X86EMUL_CONTINUE)
1711 case VCPU_SREG_LDTR:
1712 if (seg_desc.s || seg_desc.type != 2)
1715 default: /* DS, ES, FS, or GS */
1717 * segment is not a data or readable code segment or
1718 * ((segment is a data or nonconforming code segment)
1719 * and (both RPL and CPL > DPL))
1721 if ((seg_desc.type & 0xa) == 0x8 ||
1722 (((seg_desc.type & 0xc) != 0xc) &&
1723 (rpl > dpl && cpl > dpl)))
1729 /* mark segment as accessed */
1730 if (!(seg_desc.type & 1)) {
1732 ret = write_segment_descriptor(ctxt, selector,
1734 if (ret != X86EMUL_CONTINUE)
1737 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1738 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1739 sizeof(base3), &ctxt->exception);
1740 if (ret != X86EMUL_CONTINUE)
1742 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1743 ((u64)base3 << 32), ctxt))
1744 return emulate_gp(ctxt, 0);
1747 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1750 return X86EMUL_CONTINUE;
1752 return emulate_exception(ctxt, err_vec, err_code, true);
1755 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1756 u16 selector, int seg)
1758 u8 cpl = ctxt->ops->cpl(ctxt);
1761 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1762 * they can load it at CPL<3 (Intel's manual says only LSS can,
1765 * However, the Intel manual says that putting IST=1/DPL=3 in
1766 * an interrupt gate will result in SS=3 (the AMD manual instead
1767 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1768 * and only forbid it here.
1770 if (seg == VCPU_SREG_SS && selector == 3 &&
1771 ctxt->mode == X86EMUL_MODE_PROT64)
1772 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1774 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1775 X86_TRANSFER_NONE, NULL);
1778 static void write_register_operand(struct operand *op)
1780 return assign_register(op->addr.reg, op->val, op->bytes);
1783 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1787 write_register_operand(op);
1790 if (ctxt->lock_prefix)
1791 return segmented_cmpxchg(ctxt,
1797 return segmented_write(ctxt,
1803 return segmented_write(ctxt,
1806 op->bytes * op->count);
1809 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1812 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1820 return X86EMUL_CONTINUE;
1823 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1825 struct segmented_address addr;
1827 rsp_increment(ctxt, -bytes);
1828 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1829 addr.seg = VCPU_SREG_SS;
1831 return segmented_write(ctxt, addr, data, bytes);
1834 static int em_push(struct x86_emulate_ctxt *ctxt)
1836 /* Disable writeback. */
1837 ctxt->dst.type = OP_NONE;
1838 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1841 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1842 void *dest, int len)
1845 struct segmented_address addr;
1847 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1848 addr.seg = VCPU_SREG_SS;
1849 rc = segmented_read(ctxt, addr, dest, len);
1850 if (rc != X86EMUL_CONTINUE)
1853 rsp_increment(ctxt, len);
1857 static int em_pop(struct x86_emulate_ctxt *ctxt)
1859 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1862 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1863 void *dest, int len)
1866 unsigned long val, change_mask;
1867 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1868 int cpl = ctxt->ops->cpl(ctxt);
1870 rc = emulate_pop(ctxt, &val, len);
1871 if (rc != X86EMUL_CONTINUE)
1874 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1875 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1876 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1877 X86_EFLAGS_AC | X86_EFLAGS_ID;
1879 switch(ctxt->mode) {
1880 case X86EMUL_MODE_PROT64:
1881 case X86EMUL_MODE_PROT32:
1882 case X86EMUL_MODE_PROT16:
1884 change_mask |= X86_EFLAGS_IOPL;
1886 change_mask |= X86_EFLAGS_IF;
1888 case X86EMUL_MODE_VM86:
1890 return emulate_gp(ctxt, 0);
1891 change_mask |= X86_EFLAGS_IF;
1893 default: /* real mode */
1894 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1898 *(unsigned long *)dest =
1899 (ctxt->eflags & ~change_mask) | (val & change_mask);
1904 static int em_popf(struct x86_emulate_ctxt *ctxt)
1906 ctxt->dst.type = OP_REG;
1907 ctxt->dst.addr.reg = &ctxt->eflags;
1908 ctxt->dst.bytes = ctxt->op_bytes;
1909 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1912 static int em_enter(struct x86_emulate_ctxt *ctxt)
1915 unsigned frame_size = ctxt->src.val;
1916 unsigned nesting_level = ctxt->src2.val & 31;
1920 return X86EMUL_UNHANDLEABLE;
1922 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1923 rc = push(ctxt, &rbp, stack_size(ctxt));
1924 if (rc != X86EMUL_CONTINUE)
1926 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1928 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1929 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1931 return X86EMUL_CONTINUE;
1934 static int em_leave(struct x86_emulate_ctxt *ctxt)
1936 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1938 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1941 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1943 int seg = ctxt->src2.val;
1945 ctxt->src.val = get_segment_selector(ctxt, seg);
1946 if (ctxt->op_bytes == 4) {
1947 rsp_increment(ctxt, -2);
1951 return em_push(ctxt);
1954 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1956 int seg = ctxt->src2.val;
1957 unsigned long selector;
1960 rc = emulate_pop(ctxt, &selector, 2);
1961 if (rc != X86EMUL_CONTINUE)
1964 if (ctxt->modrm_reg == VCPU_SREG_SS)
1965 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1966 if (ctxt->op_bytes > 2)
1967 rsp_increment(ctxt, ctxt->op_bytes - 2);
1969 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1973 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1975 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1976 int rc = X86EMUL_CONTINUE;
1977 int reg = VCPU_REGS_RAX;
1979 while (reg <= VCPU_REGS_RDI) {
1980 (reg == VCPU_REGS_RSP) ?
1981 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1984 if (rc != X86EMUL_CONTINUE)
1993 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1995 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1996 return em_push(ctxt);
1999 static int em_popa(struct x86_emulate_ctxt *ctxt)
2001 int rc = X86EMUL_CONTINUE;
2002 int reg = VCPU_REGS_RDI;
2005 while (reg >= VCPU_REGS_RAX) {
2006 if (reg == VCPU_REGS_RSP) {
2007 rsp_increment(ctxt, ctxt->op_bytes);
2011 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2012 if (rc != X86EMUL_CONTINUE)
2014 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2020 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2022 const struct x86_emulate_ops *ops = ctxt->ops;
2029 /* TODO: Add limit checks */
2030 ctxt->src.val = ctxt->eflags;
2032 if (rc != X86EMUL_CONTINUE)
2035 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2037 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2039 if (rc != X86EMUL_CONTINUE)
2042 ctxt->src.val = ctxt->_eip;
2044 if (rc != X86EMUL_CONTINUE)
2047 ops->get_idt(ctxt, &dt);
2049 eip_addr = dt.address + (irq << 2);
2050 cs_addr = dt.address + (irq << 2) + 2;
2052 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2053 if (rc != X86EMUL_CONTINUE)
2056 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2057 if (rc != X86EMUL_CONTINUE)
2060 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2061 if (rc != X86EMUL_CONTINUE)
2069 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2073 invalidate_registers(ctxt);
2074 rc = __emulate_int_real(ctxt, irq);
2075 if (rc == X86EMUL_CONTINUE)
2076 writeback_registers(ctxt);
2080 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2082 switch(ctxt->mode) {
2083 case X86EMUL_MODE_REAL:
2084 return __emulate_int_real(ctxt, irq);
2085 case X86EMUL_MODE_VM86:
2086 case X86EMUL_MODE_PROT16:
2087 case X86EMUL_MODE_PROT32:
2088 case X86EMUL_MODE_PROT64:
2090 /* Protected mode interrupts unimplemented yet */
2091 return X86EMUL_UNHANDLEABLE;
2095 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2097 int rc = X86EMUL_CONTINUE;
2098 unsigned long temp_eip = 0;
2099 unsigned long temp_eflags = 0;
2100 unsigned long cs = 0;
2101 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2102 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2103 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2104 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2105 X86_EFLAGS_AC | X86_EFLAGS_ID |
2107 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2110 /* TODO: Add stack limit check */
2112 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2114 if (rc != X86EMUL_CONTINUE)
2117 if (temp_eip & ~0xffff)
2118 return emulate_gp(ctxt, 0);
2120 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2122 if (rc != X86EMUL_CONTINUE)
2125 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2127 if (rc != X86EMUL_CONTINUE)
2130 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2132 if (rc != X86EMUL_CONTINUE)
2135 ctxt->_eip = temp_eip;
2137 if (ctxt->op_bytes == 4)
2138 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2139 else if (ctxt->op_bytes == 2) {
2140 ctxt->eflags &= ~0xffff;
2141 ctxt->eflags |= temp_eflags;
2144 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2145 ctxt->eflags |= X86_EFLAGS_FIXED;
2146 ctxt->ops->set_nmi_mask(ctxt, false);
2151 static int em_iret(struct x86_emulate_ctxt *ctxt)
2153 switch(ctxt->mode) {
2154 case X86EMUL_MODE_REAL:
2155 return emulate_iret_real(ctxt);
2156 case X86EMUL_MODE_VM86:
2157 case X86EMUL_MODE_PROT16:
2158 case X86EMUL_MODE_PROT32:
2159 case X86EMUL_MODE_PROT64:
2161 /* iret from protected mode unimplemented yet */
2162 return X86EMUL_UNHANDLEABLE;
2166 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2170 struct desc_struct new_desc;
2171 u8 cpl = ctxt->ops->cpl(ctxt);
2173 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2175 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2176 X86_TRANSFER_CALL_JMP,
2178 if (rc != X86EMUL_CONTINUE)
2181 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2182 /* Error handling is not implemented. */
2183 if (rc != X86EMUL_CONTINUE)
2184 return X86EMUL_UNHANDLEABLE;
2189 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2191 return assign_eip_near(ctxt, ctxt->src.val);
2194 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2199 old_eip = ctxt->_eip;
2200 rc = assign_eip_near(ctxt, ctxt->src.val);
2201 if (rc != X86EMUL_CONTINUE)
2203 ctxt->src.val = old_eip;
2208 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2210 u64 old = ctxt->dst.orig_val64;
2212 if (ctxt->dst.bytes == 16)
2213 return X86EMUL_UNHANDLEABLE;
2215 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2216 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2217 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2218 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2219 ctxt->eflags &= ~X86_EFLAGS_ZF;
2221 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2222 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2224 ctxt->eflags |= X86_EFLAGS_ZF;
2226 return X86EMUL_CONTINUE;
2229 static int em_ret(struct x86_emulate_ctxt *ctxt)
2234 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2235 if (rc != X86EMUL_CONTINUE)
2238 return assign_eip_near(ctxt, eip);
2241 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2244 unsigned long eip, cs;
2245 int cpl = ctxt->ops->cpl(ctxt);
2246 struct desc_struct new_desc;
2248 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2249 if (rc != X86EMUL_CONTINUE)
2251 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2252 if (rc != X86EMUL_CONTINUE)
2254 /* Outer-privilege level return is not implemented */
2255 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2256 return X86EMUL_UNHANDLEABLE;
2257 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2260 if (rc != X86EMUL_CONTINUE)
2262 rc = assign_eip_far(ctxt, eip, &new_desc);
2263 /* Error handling is not implemented. */
2264 if (rc != X86EMUL_CONTINUE)
2265 return X86EMUL_UNHANDLEABLE;
2270 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2274 rc = em_ret_far(ctxt);
2275 if (rc != X86EMUL_CONTINUE)
2277 rsp_increment(ctxt, ctxt->src.val);
2278 return X86EMUL_CONTINUE;
2281 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2283 /* Save real source value, then compare EAX against destination. */
2284 ctxt->dst.orig_val = ctxt->dst.val;
2285 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2286 ctxt->src.orig_val = ctxt->src.val;
2287 ctxt->src.val = ctxt->dst.orig_val;
2288 fastop(ctxt, em_cmp);
2290 if (ctxt->eflags & X86_EFLAGS_ZF) {
2291 /* Success: write back to memory; no update of EAX */
2292 ctxt->src.type = OP_NONE;
2293 ctxt->dst.val = ctxt->src.orig_val;
2295 /* Failure: write the value we saw to EAX. */
2296 ctxt->src.type = OP_REG;
2297 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 /* Create write-cycle to dest by writing the same value */
2300 ctxt->dst.val = ctxt->dst.orig_val;
2302 return X86EMUL_CONTINUE;
2305 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2307 int seg = ctxt->src2.val;
2311 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2313 rc = load_segment_descriptor(ctxt, sel, seg);
2314 if (rc != X86EMUL_CONTINUE)
2317 ctxt->dst.val = ctxt->src.val;
2321 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2323 u32 eax, ebx, ecx, edx;
2327 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2328 return edx & bit(X86_FEATURE_LM);
2331 #define GET_SMSTATE(type, smbase, offset) \
2334 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2336 if (r != X86EMUL_CONTINUE) \
2337 return X86EMUL_UNHANDLEABLE; \
2341 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2343 desc->g = (flags >> 23) & 1;
2344 desc->d = (flags >> 22) & 1;
2345 desc->l = (flags >> 21) & 1;
2346 desc->avl = (flags >> 20) & 1;
2347 desc->p = (flags >> 15) & 1;
2348 desc->dpl = (flags >> 13) & 3;
2349 desc->s = (flags >> 12) & 1;
2350 desc->type = (flags >> 8) & 15;
2353 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2355 struct desc_struct desc;
2359 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2362 offset = 0x7f84 + n * 12;
2364 offset = 0x7f2c + (n - 3) * 12;
2366 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2367 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2368 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2369 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2370 return X86EMUL_CONTINUE;
2373 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2375 struct desc_struct desc;
2380 offset = 0x7e00 + n * 16;
2382 selector = GET_SMSTATE(u16, smbase, offset);
2383 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2384 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2385 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2386 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2388 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2389 return X86EMUL_CONTINUE;
2392 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2393 u64 cr0, u64 cr3, u64 cr4)
2398 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2400 if (cr4 & X86_CR4_PCIDE) {
2405 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2407 return X86EMUL_UNHANDLEABLE;
2410 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2411 * Then enable protected mode. However, PCID cannot be enabled
2412 * if EFER.LMA=0, so set it separately.
2414 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2416 return X86EMUL_UNHANDLEABLE;
2418 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2420 return X86EMUL_UNHANDLEABLE;
2422 if (cr4 & X86_CR4_PCIDE) {
2423 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2425 return X86EMUL_UNHANDLEABLE;
2427 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2429 return X86EMUL_UNHANDLEABLE;
2434 return X86EMUL_CONTINUE;
2437 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2439 struct desc_struct desc;
2442 u32 val, cr0, cr3, cr4;
2445 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2446 cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
2447 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2448 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2450 for (i = 0; i < 8; i++)
2451 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2453 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2454 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2455 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2456 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2458 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2459 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2460 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2461 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2462 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2464 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2465 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2466 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2467 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2468 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2470 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2471 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2472 ctxt->ops->set_gdt(ctxt, &dt);
2474 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2475 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2476 ctxt->ops->set_idt(ctxt, &dt);
2478 for (i = 0; i < 6; i++) {
2479 int r = rsm_load_seg_32(ctxt, smbase, i);
2480 if (r != X86EMUL_CONTINUE)
2484 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2486 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2488 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2491 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2493 struct desc_struct desc;
2495 u64 val, cr0, cr3, cr4;
2500 for (i = 0; i < 16; i++)
2501 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2503 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2504 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2506 val = GET_SMSTATE(u32, smbase, 0x7f68);
2507 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2508 val = GET_SMSTATE(u32, smbase, 0x7f60);
2509 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2511 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2512 cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
2513 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2514 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2515 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2516 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2518 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2519 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2520 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2521 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2522 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2523 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2525 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2526 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2527 ctxt->ops->set_idt(ctxt, &dt);
2529 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2530 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2531 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2532 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2533 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2534 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2536 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2537 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2538 ctxt->ops->set_gdt(ctxt, &dt);
2540 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2541 if (r != X86EMUL_CONTINUE)
2544 for (i = 0; i < 6; i++) {
2545 r = rsm_load_seg_64(ctxt, smbase, i);
2546 if (r != X86EMUL_CONTINUE)
2550 return X86EMUL_CONTINUE;
2553 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2555 unsigned long cr0, cr4, efer;
2559 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2560 return emulate_ud(ctxt);
2563 * Get back to real mode, to prepare a safe state in which to load
2564 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2565 * supports long mode.
2567 cr4 = ctxt->ops->get_cr(ctxt, 4);
2568 if (emulator_has_longmode(ctxt)) {
2569 struct desc_struct cs_desc;
2571 /* Zero CR4.PCIDE before CR0.PG. */
2572 if (cr4 & X86_CR4_PCIDE) {
2573 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2574 cr4 &= ~X86_CR4_PCIDE;
2577 /* A 32-bit code segment is required to clear EFER.LMA. */
2578 memset(&cs_desc, 0, sizeof(cs_desc));
2580 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2581 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2584 /* For the 64-bit case, this will clear EFER.LMA. */
2585 cr0 = ctxt->ops->get_cr(ctxt, 0);
2586 if (cr0 & X86_CR0_PE)
2587 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2589 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2590 if (cr4 & X86_CR4_PAE)
2591 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2593 /* And finally go back to 32-bit mode. */
2595 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2597 smbase = ctxt->ops->get_smbase(ctxt);
2600 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2601 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2604 if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2605 return X86EMUL_UNHANDLEABLE;
2607 if (emulator_has_longmode(ctxt))
2608 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2610 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2612 if (ret != X86EMUL_CONTINUE) {
2613 /* FIXME: should triple fault */
2614 return X86EMUL_UNHANDLEABLE;
2617 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2618 ctxt->ops->set_nmi_mask(ctxt, false);
2620 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2621 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2622 return X86EMUL_CONTINUE;
2626 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2627 struct desc_struct *cs, struct desc_struct *ss)
2629 cs->l = 0; /* will be adjusted later */
2630 set_desc_base(cs, 0); /* flat segment */
2631 cs->g = 1; /* 4kb granularity */
2632 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2633 cs->type = 0x0b; /* Read, Execute, Accessed */
2635 cs->dpl = 0; /* will be adjusted later */
2640 set_desc_base(ss, 0); /* flat segment */
2641 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2642 ss->g = 1; /* 4kb granularity */
2644 ss->type = 0x03; /* Read/Write, Accessed */
2645 ss->d = 1; /* 32bit stack segment */
2652 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2654 u32 eax, ebx, ecx, edx;
2657 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2658 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2659 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2660 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2663 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2665 const struct x86_emulate_ops *ops = ctxt->ops;
2666 u32 eax, ebx, ecx, edx;
2669 * syscall should always be enabled in longmode - so only become
2670 * vendor specific (cpuid) if other modes are active...
2672 if (ctxt->mode == X86EMUL_MODE_PROT64)
2677 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2679 * Intel ("GenuineIntel")
2680 * remark: Intel CPUs only support "syscall" in 64bit
2681 * longmode. Also an 64bit guest with a
2682 * 32bit compat-app running will #UD !! While this
2683 * behaviour can be fixed (by emulating) into AMD
2684 * response - CPUs of AMD can't behave like Intel.
2686 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2687 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2688 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2691 /* AMD ("AuthenticAMD") */
2692 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2693 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2694 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2697 /* AMD ("AMDisbetter!") */
2698 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2699 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2700 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2703 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2707 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2709 const struct x86_emulate_ops *ops = ctxt->ops;
2710 struct desc_struct cs, ss;
2715 /* syscall is not available in real mode */
2716 if (ctxt->mode == X86EMUL_MODE_REAL ||
2717 ctxt->mode == X86EMUL_MODE_VM86)
2718 return emulate_ud(ctxt);
2720 if (!(em_syscall_is_enabled(ctxt)))
2721 return emulate_ud(ctxt);
2723 ops->get_msr(ctxt, MSR_EFER, &efer);
2724 setup_syscalls_segments(ctxt, &cs, &ss);
2726 if (!(efer & EFER_SCE))
2727 return emulate_ud(ctxt);
2729 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2731 cs_sel = (u16)(msr_data & 0xfffc);
2732 ss_sel = (u16)(msr_data + 8);
2734 if (efer & EFER_LMA) {
2738 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2739 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2741 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2742 if (efer & EFER_LMA) {
2743 #ifdef CONFIG_X86_64
2744 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2747 ctxt->mode == X86EMUL_MODE_PROT64 ?
2748 MSR_LSTAR : MSR_CSTAR, &msr_data);
2749 ctxt->_eip = msr_data;
2751 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2752 ctxt->eflags &= ~msr_data;
2753 ctxt->eflags |= X86_EFLAGS_FIXED;
2757 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2758 ctxt->_eip = (u32)msr_data;
2760 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2763 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2764 return X86EMUL_CONTINUE;
2767 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2769 const struct x86_emulate_ops *ops = ctxt->ops;
2770 struct desc_struct cs, ss;
2775 ops->get_msr(ctxt, MSR_EFER, &efer);
2776 /* inject #GP if in real mode */
2777 if (ctxt->mode == X86EMUL_MODE_REAL)
2778 return emulate_gp(ctxt, 0);
2781 * Not recognized on AMD in compat mode (but is recognized in legacy
2784 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2785 && !vendor_intel(ctxt))
2786 return emulate_ud(ctxt);
2788 /* sysenter/sysexit have not been tested in 64bit mode. */
2789 if (ctxt->mode == X86EMUL_MODE_PROT64)
2790 return X86EMUL_UNHANDLEABLE;
2792 setup_syscalls_segments(ctxt, &cs, &ss);
2794 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2795 if ((msr_data & 0xfffc) == 0x0)
2796 return emulate_gp(ctxt, 0);
2798 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2799 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2800 ss_sel = cs_sel + 8;
2801 if (efer & EFER_LMA) {
2806 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2807 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2809 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2810 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2812 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2813 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2816 return X86EMUL_CONTINUE;
2819 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2821 const struct x86_emulate_ops *ops = ctxt->ops;
2822 struct desc_struct cs, ss;
2823 u64 msr_data, rcx, rdx;
2825 u16 cs_sel = 0, ss_sel = 0;
2827 /* inject #GP if in real mode or Virtual 8086 mode */
2828 if (ctxt->mode == X86EMUL_MODE_REAL ||
2829 ctxt->mode == X86EMUL_MODE_VM86)
2830 return emulate_gp(ctxt, 0);
2832 setup_syscalls_segments(ctxt, &cs, &ss);
2834 if ((ctxt->rex_prefix & 0x8) != 0x0)
2835 usermode = X86EMUL_MODE_PROT64;
2837 usermode = X86EMUL_MODE_PROT32;
2839 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2840 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2844 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2846 case X86EMUL_MODE_PROT32:
2847 cs_sel = (u16)(msr_data + 16);
2848 if ((msr_data & 0xfffc) == 0x0)
2849 return emulate_gp(ctxt, 0);
2850 ss_sel = (u16)(msr_data + 24);
2854 case X86EMUL_MODE_PROT64:
2855 cs_sel = (u16)(msr_data + 32);
2856 if (msr_data == 0x0)
2857 return emulate_gp(ctxt, 0);
2858 ss_sel = cs_sel + 8;
2861 if (emul_is_noncanonical_address(rcx, ctxt) ||
2862 emul_is_noncanonical_address(rdx, ctxt))
2863 return emulate_gp(ctxt, 0);
2866 cs_sel |= SEGMENT_RPL_MASK;
2867 ss_sel |= SEGMENT_RPL_MASK;
2869 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2870 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2873 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2875 return X86EMUL_CONTINUE;
2878 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2881 if (ctxt->mode == X86EMUL_MODE_REAL)
2883 if (ctxt->mode == X86EMUL_MODE_VM86)
2885 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2886 return ctxt->ops->cpl(ctxt) > iopl;
2889 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2892 const struct x86_emulate_ops *ops = ctxt->ops;
2893 struct desc_struct tr_seg;
2896 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2897 unsigned mask = (1 << len) - 1;
2900 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2903 if (desc_limit_scaled(&tr_seg) < 103)
2905 base = get_desc_base(&tr_seg);
2906 #ifdef CONFIG_X86_64
2907 base |= ((u64)base3) << 32;
2909 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2910 if (r != X86EMUL_CONTINUE)
2912 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2914 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2915 if (r != X86EMUL_CONTINUE)
2917 if ((perm >> bit_idx) & mask)
2922 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2928 if (emulator_bad_iopl(ctxt))
2929 if (!emulator_io_port_access_allowed(ctxt, port, len))
2932 ctxt->perm_ok = true;
2937 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2940 * Intel CPUs mask the counter and pointers in quite strange
2941 * manner when ECX is zero due to REP-string optimizations.
2943 #ifdef CONFIG_X86_64
2944 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2947 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2950 case 0xa4: /* movsb */
2951 case 0xa5: /* movsd/w */
2952 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2954 case 0xaa: /* stosb */
2955 case 0xab: /* stosd/w */
2956 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2961 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2962 struct tss_segment_16 *tss)
2964 tss->ip = ctxt->_eip;
2965 tss->flag = ctxt->eflags;
2966 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2967 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2968 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2969 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2970 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2971 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2972 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2973 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2975 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2976 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2977 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2978 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2979 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2982 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2983 struct tss_segment_16 *tss)
2988 ctxt->_eip = tss->ip;
2989 ctxt->eflags = tss->flag | 2;
2990 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2991 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2992 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2993 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2994 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2995 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2996 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2997 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3000 * SDM says that segment selectors are loaded before segment
3003 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3004 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3005 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3006 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3007 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3012 * Now load segment descriptors. If fault happens at this stage
3013 * it is handled in a context of new task
3015 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3016 X86_TRANSFER_TASK_SWITCH, NULL);
3017 if (ret != X86EMUL_CONTINUE)
3019 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3020 X86_TRANSFER_TASK_SWITCH, NULL);
3021 if (ret != X86EMUL_CONTINUE)
3023 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3024 X86_TRANSFER_TASK_SWITCH, NULL);
3025 if (ret != X86EMUL_CONTINUE)
3027 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3028 X86_TRANSFER_TASK_SWITCH, NULL);
3029 if (ret != X86EMUL_CONTINUE)
3031 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3032 X86_TRANSFER_TASK_SWITCH, NULL);
3033 if (ret != X86EMUL_CONTINUE)
3036 return X86EMUL_CONTINUE;
3039 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3040 u16 tss_selector, u16 old_tss_sel,
3041 ulong old_tss_base, struct desc_struct *new_desc)
3043 const struct x86_emulate_ops *ops = ctxt->ops;
3044 struct tss_segment_16 tss_seg;
3046 u32 new_tss_base = get_desc_base(new_desc);
3048 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3050 if (ret != X86EMUL_CONTINUE)
3053 save_state_to_tss16(ctxt, &tss_seg);
3055 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3057 if (ret != X86EMUL_CONTINUE)
3060 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3062 if (ret != X86EMUL_CONTINUE)
3065 if (old_tss_sel != 0xffff) {
3066 tss_seg.prev_task_link = old_tss_sel;
3068 ret = ops->write_std(ctxt, new_tss_base,
3069 &tss_seg.prev_task_link,
3070 sizeof tss_seg.prev_task_link,
3072 if (ret != X86EMUL_CONTINUE)
3076 return load_state_from_tss16(ctxt, &tss_seg);
3079 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3080 struct tss_segment_32 *tss)
3082 /* CR3 and ldt selector are not saved intentionally */
3083 tss->eip = ctxt->_eip;
3084 tss->eflags = ctxt->eflags;
3085 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3086 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3087 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3088 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3089 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3090 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3091 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3092 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3094 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3095 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3096 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3097 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3098 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3099 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3102 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3103 struct tss_segment_32 *tss)
3108 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3109 return emulate_gp(ctxt, 0);
3110 ctxt->_eip = tss->eip;
3111 ctxt->eflags = tss->eflags | 2;
3113 /* General purpose registers */
3114 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3115 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3116 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3117 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3118 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3119 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3120 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3121 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3124 * SDM says that segment selectors are loaded before segment
3125 * descriptors. This is important because CPL checks will
3128 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3129 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3130 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3131 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3132 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3133 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3134 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3137 * If we're switching between Protected Mode and VM86, we need to make
3138 * sure to update the mode before loading the segment descriptors so
3139 * that the selectors are interpreted correctly.
3141 if (ctxt->eflags & X86_EFLAGS_VM) {
3142 ctxt->mode = X86EMUL_MODE_VM86;
3145 ctxt->mode = X86EMUL_MODE_PROT32;
3150 * Now load segment descriptors. If fault happenes at this stage
3151 * it is handled in a context of new task
3153 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3154 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3155 if (ret != X86EMUL_CONTINUE)
3157 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3158 X86_TRANSFER_TASK_SWITCH, NULL);
3159 if (ret != X86EMUL_CONTINUE)
3161 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3162 X86_TRANSFER_TASK_SWITCH, NULL);
3163 if (ret != X86EMUL_CONTINUE)
3165 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3166 X86_TRANSFER_TASK_SWITCH, NULL);
3167 if (ret != X86EMUL_CONTINUE)
3169 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3170 X86_TRANSFER_TASK_SWITCH, NULL);
3171 if (ret != X86EMUL_CONTINUE)
3173 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3174 X86_TRANSFER_TASK_SWITCH, NULL);
3175 if (ret != X86EMUL_CONTINUE)
3177 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3178 X86_TRANSFER_TASK_SWITCH, NULL);
3183 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3184 u16 tss_selector, u16 old_tss_sel,
3185 ulong old_tss_base, struct desc_struct *new_desc)
3187 const struct x86_emulate_ops *ops = ctxt->ops;
3188 struct tss_segment_32 tss_seg;
3190 u32 new_tss_base = get_desc_base(new_desc);
3191 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3192 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3194 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3196 if (ret != X86EMUL_CONTINUE)
3199 save_state_to_tss32(ctxt, &tss_seg);
3201 /* Only GP registers and segment selectors are saved */
3202 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3203 ldt_sel_offset - eip_offset, &ctxt->exception);
3204 if (ret != X86EMUL_CONTINUE)
3207 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3209 if (ret != X86EMUL_CONTINUE)
3212 if (old_tss_sel != 0xffff) {
3213 tss_seg.prev_task_link = old_tss_sel;
3215 ret = ops->write_std(ctxt, new_tss_base,
3216 &tss_seg.prev_task_link,
3217 sizeof tss_seg.prev_task_link,
3219 if (ret != X86EMUL_CONTINUE)
3223 return load_state_from_tss32(ctxt, &tss_seg);
3226 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3227 u16 tss_selector, int idt_index, int reason,
3228 bool has_error_code, u32 error_code)
3230 const struct x86_emulate_ops *ops = ctxt->ops;
3231 struct desc_struct curr_tss_desc, next_tss_desc;
3233 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3234 ulong old_tss_base =
3235 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3237 ulong desc_addr, dr7;
3239 /* FIXME: old_tss_base == ~0 ? */
3241 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3242 if (ret != X86EMUL_CONTINUE)
3244 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3245 if (ret != X86EMUL_CONTINUE)
3248 /* FIXME: check that next_tss_desc is tss */
3251 * Check privileges. The three cases are task switch caused by...
3253 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3254 * 2. Exception/IRQ/iret: No check is performed
3255 * 3. jmp/call to TSS/task-gate: No check is performed since the
3256 * hardware checks it before exiting.
3258 if (reason == TASK_SWITCH_GATE) {
3259 if (idt_index != -1) {
3260 /* Software interrupts */
3261 struct desc_struct task_gate_desc;
3264 ret = read_interrupt_descriptor(ctxt, idt_index,
3266 if (ret != X86EMUL_CONTINUE)
3269 dpl = task_gate_desc.dpl;
3270 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3271 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3275 desc_limit = desc_limit_scaled(&next_tss_desc);
3276 if (!next_tss_desc.p ||
3277 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3278 desc_limit < 0x2b)) {
3279 return emulate_ts(ctxt, tss_selector & 0xfffc);
3282 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3283 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3284 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3287 if (reason == TASK_SWITCH_IRET)
3288 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3290 /* set back link to prev task only if NT bit is set in eflags
3291 note that old_tss_sel is not used after this point */
3292 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3293 old_tss_sel = 0xffff;
3295 if (next_tss_desc.type & 8)
3296 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3297 old_tss_base, &next_tss_desc);
3299 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3300 old_tss_base, &next_tss_desc);
3301 if (ret != X86EMUL_CONTINUE)
3304 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3305 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3307 if (reason != TASK_SWITCH_IRET) {
3308 next_tss_desc.type |= (1 << 1); /* set busy flag */
3309 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3312 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3313 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3315 if (has_error_code) {
3316 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3317 ctxt->lock_prefix = 0;
3318 ctxt->src.val = (unsigned long) error_code;
3319 ret = em_push(ctxt);
3322 ops->get_dr(ctxt, 7, &dr7);
3323 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3328 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3329 u16 tss_selector, int idt_index, int reason,
3330 bool has_error_code, u32 error_code)
3334 invalidate_registers(ctxt);
3335 ctxt->_eip = ctxt->eip;
3336 ctxt->dst.type = OP_NONE;
3338 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3339 has_error_code, error_code);
3341 if (rc == X86EMUL_CONTINUE) {
3342 ctxt->eip = ctxt->_eip;
3343 writeback_registers(ctxt);
3346 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3349 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3352 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3354 register_address_increment(ctxt, reg, df * op->bytes);
3355 op->addr.mem.ea = register_address(ctxt, reg);
3358 static int em_das(struct x86_emulate_ctxt *ctxt)
3361 bool af, cf, old_cf;
3363 cf = ctxt->eflags & X86_EFLAGS_CF;
3369 af = ctxt->eflags & X86_EFLAGS_AF;
3370 if ((al & 0x0f) > 9 || af) {
3372 cf = old_cf | (al >= 250);
3377 if (old_al > 0x99 || old_cf) {
3383 /* Set PF, ZF, SF */
3384 ctxt->src.type = OP_IMM;
3386 ctxt->src.bytes = 1;
3387 fastop(ctxt, em_or);
3388 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3390 ctxt->eflags |= X86_EFLAGS_CF;
3392 ctxt->eflags |= X86_EFLAGS_AF;
3393 return X86EMUL_CONTINUE;
3396 static int em_aam(struct x86_emulate_ctxt *ctxt)
3400 if (ctxt->src.val == 0)
3401 return emulate_de(ctxt);
3403 al = ctxt->dst.val & 0xff;
3404 ah = al / ctxt->src.val;
3405 al %= ctxt->src.val;
3407 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3409 /* Set PF, ZF, SF */
3410 ctxt->src.type = OP_IMM;
3412 ctxt->src.bytes = 1;
3413 fastop(ctxt, em_or);
3415 return X86EMUL_CONTINUE;
3418 static int em_aad(struct x86_emulate_ctxt *ctxt)
3420 u8 al = ctxt->dst.val & 0xff;
3421 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3423 al = (al + (ah * ctxt->src.val)) & 0xff;
3425 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3427 /* Set PF, ZF, SF */
3428 ctxt->src.type = OP_IMM;
3430 ctxt->src.bytes = 1;
3431 fastop(ctxt, em_or);
3433 return X86EMUL_CONTINUE;
3436 static int em_call(struct x86_emulate_ctxt *ctxt)
3439 long rel = ctxt->src.val;
3441 ctxt->src.val = (unsigned long)ctxt->_eip;
3442 rc = jmp_rel(ctxt, rel);
3443 if (rc != X86EMUL_CONTINUE)
3445 return em_push(ctxt);
3448 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3453 struct desc_struct old_desc, new_desc;
3454 const struct x86_emulate_ops *ops = ctxt->ops;
3455 int cpl = ctxt->ops->cpl(ctxt);
3456 enum x86emul_mode prev_mode = ctxt->mode;
3458 old_eip = ctxt->_eip;
3459 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3461 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3462 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3463 X86_TRANSFER_CALL_JMP, &new_desc);
3464 if (rc != X86EMUL_CONTINUE)
3467 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3468 if (rc != X86EMUL_CONTINUE)
3471 ctxt->src.val = old_cs;
3473 if (rc != X86EMUL_CONTINUE)
3476 ctxt->src.val = old_eip;
3478 /* If we failed, we tainted the memory, but the very least we should
3480 if (rc != X86EMUL_CONTINUE) {
3481 pr_warn_once("faulting far call emulation tainted memory\n");
3486 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3487 ctxt->mode = prev_mode;
3492 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3497 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3498 if (rc != X86EMUL_CONTINUE)
3500 rc = assign_eip_near(ctxt, eip);
3501 if (rc != X86EMUL_CONTINUE)
3503 rsp_increment(ctxt, ctxt->src.val);
3504 return X86EMUL_CONTINUE;
3507 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3509 /* Write back the register source. */
3510 ctxt->src.val = ctxt->dst.val;
3511 write_register_operand(&ctxt->src);
3513 /* Write back the memory destination with implicit LOCK prefix. */
3514 ctxt->dst.val = ctxt->src.orig_val;
3515 ctxt->lock_prefix = 1;
3516 return X86EMUL_CONTINUE;
3519 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3521 ctxt->dst.val = ctxt->src2.val;
3522 return fastop(ctxt, em_imul);
3525 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3527 ctxt->dst.type = OP_REG;
3528 ctxt->dst.bytes = ctxt->src.bytes;
3529 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3530 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3532 return X86EMUL_CONTINUE;
3535 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3539 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3540 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3541 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3542 return X86EMUL_CONTINUE;
3545 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3549 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3550 return emulate_gp(ctxt, 0);
3551 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3552 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3553 return X86EMUL_CONTINUE;
3556 static int em_mov(struct x86_emulate_ctxt *ctxt)
3558 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3559 return X86EMUL_CONTINUE;
3562 #define FFL(x) bit(X86_FEATURE_##x)
3564 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3566 u32 ebx, ecx, edx, eax = 1;
3570 * Check MOVBE is set in the guest-visible CPUID leaf.
3572 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3573 if (!(ecx & FFL(MOVBE)))
3574 return emulate_ud(ctxt);
3576 switch (ctxt->op_bytes) {
3579 * From MOVBE definition: "...When the operand size is 16 bits,
3580 * the upper word of the destination register remains unchanged
3583 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3584 * rules so we have to do the operation almost per hand.
3586 tmp = (u16)ctxt->src.val;
3587 ctxt->dst.val &= ~0xffffUL;
3588 ctxt->dst.val |= (unsigned long)swab16(tmp);
3591 ctxt->dst.val = swab32((u32)ctxt->src.val);
3594 ctxt->dst.val = swab64(ctxt->src.val);
3599 return X86EMUL_CONTINUE;
3602 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3604 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3605 return emulate_gp(ctxt, 0);
3607 /* Disable writeback. */
3608 ctxt->dst.type = OP_NONE;
3609 return X86EMUL_CONTINUE;
3612 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3616 if (ctxt->mode == X86EMUL_MODE_PROT64)
3617 val = ctxt->src.val & ~0ULL;
3619 val = ctxt->src.val & ~0U;
3621 /* #UD condition is already handled. */
3622 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3623 return emulate_gp(ctxt, 0);
3625 /* Disable writeback. */
3626 ctxt->dst.type = OP_NONE;
3627 return X86EMUL_CONTINUE;
3630 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3634 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3635 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3636 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3637 return emulate_gp(ctxt, 0);
3639 return X86EMUL_CONTINUE;
3642 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3646 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3647 return emulate_gp(ctxt, 0);
3649 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3650 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3651 return X86EMUL_CONTINUE;
3654 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3656 if (ctxt->modrm_reg > VCPU_SREG_GS)
3657 return emulate_ud(ctxt);
3659 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3660 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3661 ctxt->dst.bytes = 2;
3662 return X86EMUL_CONTINUE;
3665 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3667 u16 sel = ctxt->src.val;
3669 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3670 return emulate_ud(ctxt);
3672 if (ctxt->modrm_reg == VCPU_SREG_SS)
3673 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3675 /* Disable writeback. */
3676 ctxt->dst.type = OP_NONE;
3677 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3680 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3682 u16 sel = ctxt->src.val;
3684 /* Disable writeback. */
3685 ctxt->dst.type = OP_NONE;
3686 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3689 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3691 u16 sel = ctxt->src.val;
3693 /* Disable writeback. */
3694 ctxt->dst.type = OP_NONE;
3695 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3698 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3703 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3704 if (rc == X86EMUL_CONTINUE)
3705 ctxt->ops->invlpg(ctxt, linear);
3706 /* Disable writeback. */
3707 ctxt->dst.type = OP_NONE;
3708 return X86EMUL_CONTINUE;
3711 static int em_clts(struct x86_emulate_ctxt *ctxt)
3715 cr0 = ctxt->ops->get_cr(ctxt, 0);
3717 ctxt->ops->set_cr(ctxt, 0, cr0);
3718 return X86EMUL_CONTINUE;
3721 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3723 int rc = ctxt->ops->fix_hypercall(ctxt);
3725 if (rc != X86EMUL_CONTINUE)
3728 /* Let the processor re-execute the fixed hypercall */
3729 ctxt->_eip = ctxt->eip;
3730 /* Disable writeback. */
3731 ctxt->dst.type = OP_NONE;
3732 return X86EMUL_CONTINUE;
3735 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3736 void (*get)(struct x86_emulate_ctxt *ctxt,
3737 struct desc_ptr *ptr))
3739 struct desc_ptr desc_ptr;
3741 if (ctxt->mode == X86EMUL_MODE_PROT64)
3743 get(ctxt, &desc_ptr);
3744 if (ctxt->op_bytes == 2) {
3746 desc_ptr.address &= 0x00ffffff;
3748 /* Disable writeback. */
3749 ctxt->dst.type = OP_NONE;
3750 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3751 &desc_ptr, 2 + ctxt->op_bytes);
3754 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3756 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3759 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3761 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3764 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3766 struct desc_ptr desc_ptr;
3769 if (ctxt->mode == X86EMUL_MODE_PROT64)
3771 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3772 &desc_ptr.size, &desc_ptr.address,
3774 if (rc != X86EMUL_CONTINUE)
3776 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3777 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3778 return emulate_gp(ctxt, 0);
3780 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3782 ctxt->ops->set_idt(ctxt, &desc_ptr);
3783 /* Disable writeback. */
3784 ctxt->dst.type = OP_NONE;
3785 return X86EMUL_CONTINUE;
3788 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3790 return em_lgdt_lidt(ctxt, true);
3793 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3795 return em_lgdt_lidt(ctxt, false);
3798 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3800 if (ctxt->dst.type == OP_MEM)
3801 ctxt->dst.bytes = 2;
3802 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3803 return X86EMUL_CONTINUE;
3806 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3808 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3809 | (ctxt->src.val & 0x0f));
3810 ctxt->dst.type = OP_NONE;
3811 return X86EMUL_CONTINUE;
3814 static int em_loop(struct x86_emulate_ctxt *ctxt)
3816 int rc = X86EMUL_CONTINUE;
3818 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3819 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3820 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3821 rc = jmp_rel(ctxt, ctxt->src.val);
3826 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3828 int rc = X86EMUL_CONTINUE;
3830 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3831 rc = jmp_rel(ctxt, ctxt->src.val);
3836 static int em_in(struct x86_emulate_ctxt *ctxt)
3838 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3840 return X86EMUL_IO_NEEDED;
3842 return X86EMUL_CONTINUE;
3845 static int em_out(struct x86_emulate_ctxt *ctxt)
3847 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3849 /* Disable writeback. */
3850 ctxt->dst.type = OP_NONE;
3851 return X86EMUL_CONTINUE;
3854 static int em_cli(struct x86_emulate_ctxt *ctxt)
3856 if (emulator_bad_iopl(ctxt))
3857 return emulate_gp(ctxt, 0);
3859 ctxt->eflags &= ~X86_EFLAGS_IF;
3860 return X86EMUL_CONTINUE;
3863 static int em_sti(struct x86_emulate_ctxt *ctxt)
3865 if (emulator_bad_iopl(ctxt))
3866 return emulate_gp(ctxt, 0);
3868 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3869 ctxt->eflags |= X86_EFLAGS_IF;
3870 return X86EMUL_CONTINUE;
3873 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3875 u32 eax, ebx, ecx, edx;
3878 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3879 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3880 ctxt->ops->cpl(ctxt)) {
3881 return emulate_gp(ctxt, 0);
3884 eax = reg_read(ctxt, VCPU_REGS_RAX);
3885 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3886 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3887 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3888 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3889 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3890 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3891 return X86EMUL_CONTINUE;
3894 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3898 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3900 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3902 ctxt->eflags &= ~0xffUL;
3903 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3904 return X86EMUL_CONTINUE;
3907 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3909 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3910 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3911 return X86EMUL_CONTINUE;
3914 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3916 switch (ctxt->op_bytes) {
3917 #ifdef CONFIG_X86_64
3919 asm("bswap %0" : "+r"(ctxt->dst.val));
3923 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3926 return X86EMUL_CONTINUE;
3929 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3931 /* emulating clflush regardless of cpuid */
3932 return X86EMUL_CONTINUE;
3935 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3937 ctxt->dst.val = (s32) ctxt->src.val;
3938 return X86EMUL_CONTINUE;
3941 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3943 u32 eax = 1, ebx, ecx = 0, edx;
3945 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3946 if (!(edx & FFL(FXSR)))
3947 return emulate_ud(ctxt);
3949 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3950 return emulate_nm(ctxt);
3953 * Don't emulate a case that should never be hit, instead of working
3954 * around a lack of fxsave64/fxrstor64 on old compilers.
3956 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3957 return X86EMUL_UNHANDLEABLE;
3959 return X86EMUL_CONTINUE;
3963 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3964 * and restore MXCSR.
3966 static size_t __fxstate_size(int nregs)
3968 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3971 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3974 if (ctxt->mode == X86EMUL_MODE_PROT64)
3975 return __fxstate_size(16);
3977 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3978 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3982 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3985 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3986 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3988 * 3) 64-bit mode with REX.W prefix
3989 * - like (2), but XMM 8-15 are being saved and restored
3990 * 4) 64-bit mode without REX.W prefix
3991 * - like (3), but FIP and FDP are 64 bit
3993 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3994 * desired result. (4) is not emulated.
3996 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3997 * and FPU DS) should match.
3999 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4001 struct fxregs_state fx_state;
4004 rc = check_fxsr(ctxt);
4005 if (rc != X86EMUL_CONTINUE)
4008 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4010 if (rc != X86EMUL_CONTINUE)
4013 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4014 fxstate_size(ctxt));
4018 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4019 * in the host registers (via FXSAVE) instead, so they won't be modified.
4020 * (preemption has to stay disabled until FXRSTOR).
4022 * Use noinline to keep the stack for other functions called by callers small.
4024 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4025 const size_t used_size)
4027 struct fxregs_state fx_tmp;
4030 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4031 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4032 __fxstate_size(16) - used_size);
4037 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4039 struct fxregs_state fx_state;
4043 rc = check_fxsr(ctxt);
4044 if (rc != X86EMUL_CONTINUE)
4047 size = fxstate_size(ctxt);
4048 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4049 if (rc != X86EMUL_CONTINUE)
4052 if (size < __fxstate_size(16)) {
4053 rc = fxregs_fixup(&fx_state, size);
4054 if (rc != X86EMUL_CONTINUE)
4058 if (fx_state.mxcsr >> 16) {
4059 rc = emulate_gp(ctxt, 0);
4063 if (rc == X86EMUL_CONTINUE)
4064 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4070 static bool valid_cr(int nr)
4082 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4084 if (!valid_cr(ctxt->modrm_reg))
4085 return emulate_ud(ctxt);
4087 return X86EMUL_CONTINUE;
4090 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4092 u64 new_val = ctxt->src.val64;
4093 int cr = ctxt->modrm_reg;
4096 static u64 cr_reserved_bits[] = {
4097 0xffffffff00000000ULL,
4098 0, 0, 0, /* CR3 checked later */
4105 return emulate_ud(ctxt);
4107 if (new_val & cr_reserved_bits[cr])
4108 return emulate_gp(ctxt, 0);
4113 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4114 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4115 return emulate_gp(ctxt, 0);
4117 cr4 = ctxt->ops->get_cr(ctxt, 4);
4118 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4120 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4121 !(cr4 & X86_CR4_PAE))
4122 return emulate_gp(ctxt, 0);
4129 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4130 if (efer & EFER_LMA) {
4132 u32 eax, ebx, ecx, edx;
4136 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4138 maxphyaddr = eax & 0xff;
4141 rsvd = rsvd_bits(maxphyaddr, 62);
4145 return emulate_gp(ctxt, 0);
4150 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4152 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4153 return emulate_gp(ctxt, 0);
4159 return X86EMUL_CONTINUE;
4162 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4166 ctxt->ops->get_dr(ctxt, 7, &dr7);
4168 /* Check if DR7.Global_Enable is set */
4169 return dr7 & (1 << 13);
4172 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4174 int dr = ctxt->modrm_reg;
4178 return emulate_ud(ctxt);
4180 cr4 = ctxt->ops->get_cr(ctxt, 4);
4181 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4182 return emulate_ud(ctxt);
4184 if (check_dr7_gd(ctxt)) {
4187 ctxt->ops->get_dr(ctxt, 6, &dr6);
4189 dr6 |= DR6_BD | DR6_RTM;
4190 ctxt->ops->set_dr(ctxt, 6, dr6);
4191 return emulate_db(ctxt);
4194 return X86EMUL_CONTINUE;
4197 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4199 u64 new_val = ctxt->src.val64;
4200 int dr = ctxt->modrm_reg;
4202 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4203 return emulate_gp(ctxt, 0);
4205 return check_dr_read(ctxt);
4208 static int check_svme(struct x86_emulate_ctxt *ctxt)
4212 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4214 if (!(efer & EFER_SVME))
4215 return emulate_ud(ctxt);
4217 return X86EMUL_CONTINUE;
4220 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4222 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4224 /* Valid physical address? */
4225 if (rax & 0xffff000000000000ULL)
4226 return emulate_gp(ctxt, 0);
4228 return check_svme(ctxt);
4231 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4233 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4235 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4236 return emulate_ud(ctxt);
4238 return X86EMUL_CONTINUE;
4241 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4243 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4244 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4246 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4247 ctxt->ops->check_pmc(ctxt, rcx))
4248 return emulate_gp(ctxt, 0);
4250 return X86EMUL_CONTINUE;
4253 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4255 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4256 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4257 return emulate_gp(ctxt, 0);
4259 return X86EMUL_CONTINUE;
4262 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4264 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4265 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4266 return emulate_gp(ctxt, 0);
4268 return X86EMUL_CONTINUE;
4271 #define D(_y) { .flags = (_y) }
4272 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4273 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4274 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4275 #define N D(NotImpl)
4276 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4277 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4278 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4279 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4280 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4281 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4282 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4283 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4284 #define II(_f, _e, _i) \
4285 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4286 #define IIP(_f, _e, _i, _p) \
4287 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4288 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4289 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4291 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4292 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4293 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4294 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4295 #define I2bvIP(_f, _e, _i, _p) \
4296 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4298 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4299 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4300 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4302 static const struct opcode group7_rm0[] = {
4304 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4308 static const struct opcode group7_rm1[] = {
4309 DI(SrcNone | Priv, monitor),
4310 DI(SrcNone | Priv, mwait),
4314 static const struct opcode group7_rm3[] = {
4315 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4316 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4317 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4318 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4319 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4320 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4321 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4322 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4325 static const struct opcode group7_rm7[] = {
4327 DIP(SrcNone, rdtscp, check_rdtsc),
4331 static const struct opcode group1[] = {
4333 F(Lock | PageTable, em_or),
4336 F(Lock | PageTable, em_and),
4342 static const struct opcode group1A[] = {
4343 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4346 static const struct opcode group2[] = {
4347 F(DstMem | ModRM, em_rol),
4348 F(DstMem | ModRM, em_ror),
4349 F(DstMem | ModRM, em_rcl),
4350 F(DstMem | ModRM, em_rcr),
4351 F(DstMem | ModRM, em_shl),
4352 F(DstMem | ModRM, em_shr),
4353 F(DstMem | ModRM, em_shl),
4354 F(DstMem | ModRM, em_sar),
4357 static const struct opcode group3[] = {
4358 F(DstMem | SrcImm | NoWrite, em_test),
4359 F(DstMem | SrcImm | NoWrite, em_test),
4360 F(DstMem | SrcNone | Lock, em_not),
4361 F(DstMem | SrcNone | Lock, em_neg),
4362 F(DstXacc | Src2Mem, em_mul_ex),
4363 F(DstXacc | Src2Mem, em_imul_ex),
4364 F(DstXacc | Src2Mem, em_div_ex),
4365 F(DstXacc | Src2Mem, em_idiv_ex),
4368 static const struct opcode group4[] = {
4369 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4370 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4374 static const struct opcode group5[] = {
4375 F(DstMem | SrcNone | Lock, em_inc),
4376 F(DstMem | SrcNone | Lock, em_dec),
4377 I(SrcMem | NearBranch, em_call_near_abs),
4378 I(SrcMemFAddr | ImplicitOps, em_call_far),
4379 I(SrcMem | NearBranch, em_jmp_abs),
4380 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4381 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4384 static const struct opcode group6[] = {
4385 DI(Prot | DstMem, sldt),
4386 DI(Prot | DstMem, str),
4387 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4388 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4392 static const struct group_dual group7 = { {
4393 II(Mov | DstMem, em_sgdt, sgdt),
4394 II(Mov | DstMem, em_sidt, sidt),
4395 II(SrcMem | Priv, em_lgdt, lgdt),
4396 II(SrcMem | Priv, em_lidt, lidt),
4397 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4398 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4399 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4403 N, EXT(0, group7_rm3),
4404 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4405 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4409 static const struct opcode group8[] = {
4411 F(DstMem | SrcImmByte | NoWrite, em_bt),
4412 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4413 F(DstMem | SrcImmByte | Lock, em_btr),
4414 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4417 static const struct group_dual group9 = { {
4418 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4420 N, N, N, N, N, N, N, N,
4423 static const struct opcode group11[] = {
4424 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4428 static const struct gprefix pfx_0f_ae_7 = {
4429 I(SrcMem | ByteOp, em_clflush), N, N, N,
4432 static const struct group_dual group15 = { {
4433 I(ModRM | Aligned16, em_fxsave),
4434 I(ModRM | Aligned16, em_fxrstor),
4435 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4437 N, N, N, N, N, N, N, N,
4440 static const struct gprefix pfx_0f_6f_0f_7f = {
4441 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4444 static const struct instr_dual instr_dual_0f_2b = {
4448 static const struct gprefix pfx_0f_2b = {
4449 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4452 static const struct gprefix pfx_0f_28_0f_29 = {
4453 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4456 static const struct gprefix pfx_0f_e7 = {
4457 N, I(Sse, em_mov), N, N,
4460 static const struct escape escape_d9 = { {
4461 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4464 N, N, N, N, N, N, N, N,
4466 N, N, N, N, N, N, N, N,
4468 N, N, N, N, N, N, N, N,
4470 N, N, N, N, N, N, N, N,
4472 N, N, N, N, N, N, N, N,
4474 N, N, N, N, N, N, N, N,
4476 N, N, N, N, N, N, N, N,
4478 N, N, N, N, N, N, N, N,
4481 static const struct escape escape_db = { {
4482 N, N, N, N, N, N, N, N,
4485 N, N, N, N, N, N, N, N,
4487 N, N, N, N, N, N, N, N,
4489 N, N, N, N, N, N, N, N,
4491 N, N, N, N, N, N, N, N,
4493 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4495 N, N, N, N, N, N, N, N,
4497 N, N, N, N, N, N, N, N,
4499 N, N, N, N, N, N, N, N,
4502 static const struct escape escape_dd = { {
4503 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4506 N, N, N, N, N, N, N, N,
4508 N, N, N, N, N, N, N, N,
4510 N, N, N, N, N, N, N, N,
4512 N, N, N, N, N, N, N, N,
4514 N, N, N, N, N, N, N, N,
4516 N, N, N, N, N, N, N, N,
4518 N, N, N, N, N, N, N, N,
4520 N, N, N, N, N, N, N, N,
4523 static const struct instr_dual instr_dual_0f_c3 = {
4524 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4527 static const struct mode_dual mode_dual_63 = {
4528 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4531 static const struct opcode opcode_table[256] = {
4533 F6ALU(Lock, em_add),
4534 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4535 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4537 F6ALU(Lock | PageTable, em_or),
4538 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4541 F6ALU(Lock, em_adc),
4542 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4543 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4545 F6ALU(Lock, em_sbb),
4546 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4547 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4549 F6ALU(Lock | PageTable, em_and), N, N,
4551 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4553 F6ALU(Lock, em_xor), N, N,
4555 F6ALU(NoWrite, em_cmp), N, N,
4557 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4559 X8(I(SrcReg | Stack, em_push)),
4561 X8(I(DstReg | Stack, em_pop)),
4563 I(ImplicitOps | Stack | No64, em_pusha),
4564 I(ImplicitOps | Stack | No64, em_popa),
4565 N, MD(ModRM, &mode_dual_63),
4568 I(SrcImm | Mov | Stack, em_push),
4569 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4570 I(SrcImmByte | Mov | Stack, em_push),
4571 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4572 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4573 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4575 X16(D(SrcImmByte | NearBranch)),
4577 G(ByteOp | DstMem | SrcImm, group1),
4578 G(DstMem | SrcImm, group1),
4579 G(ByteOp | DstMem | SrcImm | No64, group1),
4580 G(DstMem | SrcImmByte, group1),
4581 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4582 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4584 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4585 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4586 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4587 D(ModRM | SrcMem | NoAccess | DstReg),
4588 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4591 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4593 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4594 I(SrcImmFAddr | No64, em_call_far), N,
4595 II(ImplicitOps | Stack, em_pushf, pushf),
4596 II(ImplicitOps | Stack, em_popf, popf),
4597 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4599 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4600 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4601 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4602 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4604 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4605 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4606 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4607 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4609 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4611 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4613 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4614 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4615 I(ImplicitOps | NearBranch, em_ret),
4616 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4617 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4618 G(ByteOp, group11), G(0, group11),
4620 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4621 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4622 I(ImplicitOps, em_ret_far),
4623 D(ImplicitOps), DI(SrcImmByte, intn),
4624 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4626 G(Src2One | ByteOp, group2), G(Src2One, group2),
4627 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4628 I(DstAcc | SrcImmUByte | No64, em_aam),
4629 I(DstAcc | SrcImmUByte | No64, em_aad),
4630 F(DstAcc | ByteOp | No64, em_salc),
4631 I(DstAcc | SrcXLat | ByteOp, em_mov),
4633 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4635 X3(I(SrcImmByte | NearBranch, em_loop)),
4636 I(SrcImmByte | NearBranch, em_jcxz),
4637 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4638 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4640 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4641 I(SrcImmFAddr | No64, em_jmp_far),
4642 D(SrcImmByte | ImplicitOps | NearBranch),
4643 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4644 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4646 N, DI(ImplicitOps, icebp), N, N,
4647 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4648 G(ByteOp, group3), G(0, group3),
4650 D(ImplicitOps), D(ImplicitOps),
4651 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4652 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4655 static const struct opcode twobyte_table[256] = {
4657 G(0, group6), GD(0, &group7), N, N,
4658 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4659 II(ImplicitOps | Priv, em_clts, clts), N,
4660 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4661 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4663 N, N, N, N, N, N, N, N,
4664 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4665 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4667 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4668 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4669 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4671 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4674 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4675 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4676 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4679 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4680 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4681 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4682 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4683 I(ImplicitOps | EmulateOnUD, em_sysenter),
4684 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4686 N, N, N, N, N, N, N, N,
4688 X16(D(DstReg | SrcMem | ModRM)),
4690 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4695 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4700 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4702 X16(D(SrcImm | NearBranch)),
4704 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4706 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4707 II(ImplicitOps, em_cpuid, cpuid),
4708 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4709 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4710 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4712 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4713 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4714 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4715 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4716 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4717 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4719 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4720 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4721 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4722 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4723 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4724 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4728 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4729 I(DstReg | SrcMem | ModRM, em_bsf_c),
4730 I(DstReg | SrcMem | ModRM, em_bsr_c),
4731 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4733 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4734 N, ID(0, &instr_dual_0f_c3),
4735 N, N, N, GD(0, &group9),
4737 X8(I(DstReg, em_bswap)),
4739 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4741 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4742 N, N, N, N, N, N, N, N,
4744 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4747 static const struct instr_dual instr_dual_0f_38_f0 = {
4748 I(DstReg | SrcMem | Mov, em_movbe), N
4751 static const struct instr_dual instr_dual_0f_38_f1 = {
4752 I(DstMem | SrcReg | Mov, em_movbe), N
4755 static const struct gprefix three_byte_0f_38_f0 = {
4756 ID(0, &instr_dual_0f_38_f0), N, N, N
4759 static const struct gprefix three_byte_0f_38_f1 = {
4760 ID(0, &instr_dual_0f_38_f1), N, N, N
4764 * Insns below are selected by the prefix which indexed by the third opcode
4767 static const struct opcode opcode_map_0f_38[256] = {
4769 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4771 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4773 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4774 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4795 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4799 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4805 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4806 unsigned size, bool sign_extension)
4808 int rc = X86EMUL_CONTINUE;
4812 op->addr.mem.ea = ctxt->_eip;
4813 /* NB. Immediates are sign-extended as necessary. */
4814 switch (op->bytes) {
4816 op->val = insn_fetch(s8, ctxt);
4819 op->val = insn_fetch(s16, ctxt);
4822 op->val = insn_fetch(s32, ctxt);
4825 op->val = insn_fetch(s64, ctxt);
4828 if (!sign_extension) {
4829 switch (op->bytes) {
4837 op->val &= 0xffffffff;
4845 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4848 int rc = X86EMUL_CONTINUE;
4852 decode_register_operand(ctxt, op);
4855 rc = decode_imm(ctxt, op, 1, false);
4858 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4862 if (ctxt->d & BitOp)
4863 fetch_bit_operand(ctxt);
4864 op->orig_val = op->val;
4867 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4871 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4872 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4873 fetch_register_operand(op);
4874 op->orig_val = op->val;
4878 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4879 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4880 fetch_register_operand(op);
4881 op->orig_val = op->val;
4884 if (ctxt->d & ByteOp) {
4889 op->bytes = ctxt->op_bytes;
4890 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4891 fetch_register_operand(op);
4892 op->orig_val = op->val;
4896 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4898 register_address(ctxt, VCPU_REGS_RDI);
4899 op->addr.mem.seg = VCPU_SREG_ES;
4906 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4907 fetch_register_operand(op);
4912 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4915 rc = decode_imm(ctxt, op, 1, true);
4923 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4926 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4929 ctxt->memop.bytes = 1;
4930 if (ctxt->memop.type == OP_REG) {
4931 ctxt->memop.addr.reg = decode_register(ctxt,
4932 ctxt->modrm_rm, true);
4933 fetch_register_operand(&ctxt->memop);
4937 ctxt->memop.bytes = 2;
4940 ctxt->memop.bytes = 4;
4943 rc = decode_imm(ctxt, op, 2, false);
4946 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4950 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4952 register_address(ctxt, VCPU_REGS_RSI);
4953 op->addr.mem.seg = ctxt->seg_override;
4959 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4962 reg_read(ctxt, VCPU_REGS_RBX) +
4963 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4964 op->addr.mem.seg = ctxt->seg_override;
4969 op->addr.mem.ea = ctxt->_eip;
4970 op->bytes = ctxt->op_bytes + 2;
4971 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4974 ctxt->memop.bytes = ctxt->op_bytes + 2;
4978 op->val = VCPU_SREG_ES;
4982 op->val = VCPU_SREG_CS;
4986 op->val = VCPU_SREG_SS;
4990 op->val = VCPU_SREG_DS;
4994 op->val = VCPU_SREG_FS;
4998 op->val = VCPU_SREG_GS;
5001 /* Special instructions do their own operand decoding. */
5003 op->type = OP_NONE; /* Disable writeback. */
5011 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5013 int rc = X86EMUL_CONTINUE;
5014 int mode = ctxt->mode;
5015 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5016 bool op_prefix = false;
5017 bool has_seg_override = false;
5018 struct opcode opcode;
5020 struct desc_struct desc;
5022 ctxt->memop.type = OP_NONE;
5023 ctxt->memopp = NULL;
5024 ctxt->_eip = ctxt->eip;
5025 ctxt->fetch.ptr = ctxt->fetch.data;
5026 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5027 ctxt->opcode_len = 1;
5029 memcpy(ctxt->fetch.data, insn, insn_len);
5031 rc = __do_insn_fetch_bytes(ctxt, 1);
5032 if (rc != X86EMUL_CONTINUE)
5037 case X86EMUL_MODE_REAL:
5038 case X86EMUL_MODE_VM86:
5039 def_op_bytes = def_ad_bytes = 2;
5040 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5042 def_op_bytes = def_ad_bytes = 4;
5044 case X86EMUL_MODE_PROT16:
5045 def_op_bytes = def_ad_bytes = 2;
5047 case X86EMUL_MODE_PROT32:
5048 def_op_bytes = def_ad_bytes = 4;
5050 #ifdef CONFIG_X86_64
5051 case X86EMUL_MODE_PROT64:
5057 return EMULATION_FAILED;
5060 ctxt->op_bytes = def_op_bytes;
5061 ctxt->ad_bytes = def_ad_bytes;
5063 /* Legacy prefixes. */
5065 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5066 case 0x66: /* operand-size override */
5068 /* switch between 2/4 bytes */
5069 ctxt->op_bytes = def_op_bytes ^ 6;
5071 case 0x67: /* address-size override */
5072 if (mode == X86EMUL_MODE_PROT64)
5073 /* switch between 4/8 bytes */
5074 ctxt->ad_bytes = def_ad_bytes ^ 12;
5076 /* switch between 2/4 bytes */
5077 ctxt->ad_bytes = def_ad_bytes ^ 6;
5079 case 0x26: /* ES override */
5080 case 0x2e: /* CS override */
5081 case 0x36: /* SS override */
5082 case 0x3e: /* DS override */
5083 has_seg_override = true;
5084 ctxt->seg_override = (ctxt->b >> 3) & 3;
5086 case 0x64: /* FS override */
5087 case 0x65: /* GS override */
5088 has_seg_override = true;
5089 ctxt->seg_override = ctxt->b & 7;
5091 case 0x40 ... 0x4f: /* REX */
5092 if (mode != X86EMUL_MODE_PROT64)
5094 ctxt->rex_prefix = ctxt->b;
5096 case 0xf0: /* LOCK */
5097 ctxt->lock_prefix = 1;
5099 case 0xf2: /* REPNE/REPNZ */
5100 case 0xf3: /* REP/REPE/REPZ */
5101 ctxt->rep_prefix = ctxt->b;
5107 /* Any legacy prefix after a REX prefix nullifies its effect. */
5109 ctxt->rex_prefix = 0;
5115 if (ctxt->rex_prefix & 8)
5116 ctxt->op_bytes = 8; /* REX.W */
5118 /* Opcode byte(s). */
5119 opcode = opcode_table[ctxt->b];
5120 /* Two-byte opcode? */
5121 if (ctxt->b == 0x0f) {
5122 ctxt->opcode_len = 2;
5123 ctxt->b = insn_fetch(u8, ctxt);
5124 opcode = twobyte_table[ctxt->b];
5126 /* 0F_38 opcode map */
5127 if (ctxt->b == 0x38) {
5128 ctxt->opcode_len = 3;
5129 ctxt->b = insn_fetch(u8, ctxt);
5130 opcode = opcode_map_0f_38[ctxt->b];
5133 ctxt->d = opcode.flags;
5135 if (ctxt->d & ModRM)
5136 ctxt->modrm = insn_fetch(u8, ctxt);
5138 /* vex-prefix instructions are not implemented */
5139 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5140 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5144 while (ctxt->d & GroupMask) {
5145 switch (ctxt->d & GroupMask) {
5147 goffset = (ctxt->modrm >> 3) & 7;
5148 opcode = opcode.u.group[goffset];
5151 goffset = (ctxt->modrm >> 3) & 7;
5152 if ((ctxt->modrm >> 6) == 3)
5153 opcode = opcode.u.gdual->mod3[goffset];
5155 opcode = opcode.u.gdual->mod012[goffset];
5158 goffset = ctxt->modrm & 7;
5159 opcode = opcode.u.group[goffset];
5162 if (ctxt->rep_prefix && op_prefix)
5163 return EMULATION_FAILED;
5164 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5165 switch (simd_prefix) {
5166 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5167 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5168 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5169 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5173 if (ctxt->modrm > 0xbf)
5174 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5176 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5179 if ((ctxt->modrm >> 6) == 3)
5180 opcode = opcode.u.idual->mod3;
5182 opcode = opcode.u.idual->mod012;
5185 if (ctxt->mode == X86EMUL_MODE_PROT64)
5186 opcode = opcode.u.mdual->mode64;
5188 opcode = opcode.u.mdual->mode32;
5191 return EMULATION_FAILED;
5194 ctxt->d &= ~(u64)GroupMask;
5195 ctxt->d |= opcode.flags;
5200 return EMULATION_FAILED;
5202 ctxt->execute = opcode.u.execute;
5204 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5205 return EMULATION_FAILED;
5207 if (unlikely(ctxt->d &
5208 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5211 * These are copied unconditionally here, and checked unconditionally
5212 * in x86_emulate_insn.
5214 ctxt->check_perm = opcode.check_perm;
5215 ctxt->intercept = opcode.intercept;
5217 if (ctxt->d & NotImpl)
5218 return EMULATION_FAILED;
5220 if (mode == X86EMUL_MODE_PROT64) {
5221 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5223 else if (ctxt->d & NearBranch)
5227 if (ctxt->d & Op3264) {
5228 if (mode == X86EMUL_MODE_PROT64)
5234 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5238 ctxt->op_bytes = 16;
5239 else if (ctxt->d & Mmx)
5243 /* ModRM and SIB bytes. */
5244 if (ctxt->d & ModRM) {
5245 rc = decode_modrm(ctxt, &ctxt->memop);
5246 if (!has_seg_override) {
5247 has_seg_override = true;
5248 ctxt->seg_override = ctxt->modrm_seg;
5250 } else if (ctxt->d & MemAbs)
5251 rc = decode_abs(ctxt, &ctxt->memop);
5252 if (rc != X86EMUL_CONTINUE)
5255 if (!has_seg_override)
5256 ctxt->seg_override = VCPU_SREG_DS;
5258 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5261 * Decode and fetch the source operand: register, memory
5264 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5265 if (rc != X86EMUL_CONTINUE)
5269 * Decode and fetch the second source operand: register, memory
5272 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5273 if (rc != X86EMUL_CONTINUE)
5276 /* Decode and fetch the destination operand: register or memory. */
5277 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5279 if (ctxt->rip_relative && likely(ctxt->memopp))
5280 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5281 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5284 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5287 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5289 return ctxt->d & PageTable;
5292 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5294 /* The second termination condition only applies for REPE
5295 * and REPNE. Test if the repeat string operation prefix is
5296 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5297 * corresponding termination condition according to:
5298 * - if REPE/REPZ and ZF = 0 then done
5299 * - if REPNE/REPNZ and ZF = 1 then done
5301 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5302 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5303 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5304 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5305 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5306 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5312 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5316 rc = asm_safe("fwait");
5318 if (unlikely(rc != X86EMUL_CONTINUE))
5319 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5321 return X86EMUL_CONTINUE;
5324 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5327 if (op->type == OP_MM)
5328 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5331 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5333 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5335 if (!(ctxt->d & ByteOp))
5336 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5338 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5339 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5340 [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
5341 : "c"(ctxt->src2.val));
5343 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5344 if (!fop) /* exception is returned in fop variable */
5345 return emulate_de(ctxt);
5346 return X86EMUL_CONTINUE;
5349 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5351 memset(&ctxt->rip_relative, 0,
5352 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5354 ctxt->io_read.pos = 0;
5355 ctxt->io_read.end = 0;
5356 ctxt->mem_read.end = 0;
5359 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5361 const struct x86_emulate_ops *ops = ctxt->ops;
5362 int rc = X86EMUL_CONTINUE;
5363 int saved_dst_type = ctxt->dst.type;
5364 unsigned emul_flags;
5366 ctxt->mem_read.pos = 0;
5368 /* LOCK prefix is allowed only with some instructions */
5369 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5370 rc = emulate_ud(ctxt);
5374 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5375 rc = emulate_ud(ctxt);
5379 emul_flags = ctxt->ops->get_hflags(ctxt);
5380 if (unlikely(ctxt->d &
5381 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5382 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5383 (ctxt->d & Undefined)) {
5384 rc = emulate_ud(ctxt);
5388 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5389 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5390 rc = emulate_ud(ctxt);
5394 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5395 rc = emulate_nm(ctxt);
5399 if (ctxt->d & Mmx) {
5400 rc = flush_pending_x87_faults(ctxt);
5401 if (rc != X86EMUL_CONTINUE)
5404 * Now that we know the fpu is exception safe, we can fetch
5407 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5408 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5409 if (!(ctxt->d & Mov))
5410 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5413 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5414 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5415 X86_ICPT_PRE_EXCEPT);
5416 if (rc != X86EMUL_CONTINUE)
5420 /* Instruction can only be executed in protected mode */
5421 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5422 rc = emulate_ud(ctxt);
5426 /* Privileged instruction can be executed only in CPL=0 */
5427 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5428 if (ctxt->d & PrivUD)
5429 rc = emulate_ud(ctxt);
5431 rc = emulate_gp(ctxt, 0);
5435 /* Do instruction specific permission checks */
5436 if (ctxt->d & CheckPerm) {
5437 rc = ctxt->check_perm(ctxt);
5438 if (rc != X86EMUL_CONTINUE)
5442 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5443 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5444 X86_ICPT_POST_EXCEPT);
5445 if (rc != X86EMUL_CONTINUE)
5449 if (ctxt->rep_prefix && (ctxt->d & String)) {
5450 /* All REP prefixes have the same first termination condition */
5451 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5452 string_registers_quirk(ctxt);
5453 ctxt->eip = ctxt->_eip;
5454 ctxt->eflags &= ~X86_EFLAGS_RF;
5460 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5461 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5462 ctxt->src.valptr, ctxt->src.bytes);
5463 if (rc != X86EMUL_CONTINUE)
5465 ctxt->src.orig_val64 = ctxt->src.val64;
5468 if (ctxt->src2.type == OP_MEM) {
5469 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5470 &ctxt->src2.val, ctxt->src2.bytes);
5471 if (rc != X86EMUL_CONTINUE)
5475 if ((ctxt->d & DstMask) == ImplicitOps)
5479 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5480 /* optimisation - avoid slow emulated read if Mov */
5481 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5482 &ctxt->dst.val, ctxt->dst.bytes);
5483 if (rc != X86EMUL_CONTINUE) {
5484 if (!(ctxt->d & NoWrite) &&
5485 rc == X86EMUL_PROPAGATE_FAULT &&
5486 ctxt->exception.vector == PF_VECTOR)
5487 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5491 /* Copy full 64-bit value for CMPXCHG8B. */
5492 ctxt->dst.orig_val64 = ctxt->dst.val64;
5496 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5497 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5498 X86_ICPT_POST_MEMACCESS);
5499 if (rc != X86EMUL_CONTINUE)
5503 if (ctxt->rep_prefix && (ctxt->d & String))
5504 ctxt->eflags |= X86_EFLAGS_RF;
5506 ctxt->eflags &= ~X86_EFLAGS_RF;
5508 if (ctxt->execute) {
5509 if (ctxt->d & Fastop) {
5510 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5511 rc = fastop(ctxt, fop);
5512 if (rc != X86EMUL_CONTINUE)
5516 rc = ctxt->execute(ctxt);
5517 if (rc != X86EMUL_CONTINUE)
5522 if (ctxt->opcode_len == 2)
5524 else if (ctxt->opcode_len == 3)
5525 goto threebyte_insn;
5528 case 0x70 ... 0x7f: /* jcc (short) */
5529 if (test_cc(ctxt->b, ctxt->eflags))
5530 rc = jmp_rel(ctxt, ctxt->src.val);
5532 case 0x8d: /* lea r16/r32, m */
5533 ctxt->dst.val = ctxt->src.addr.mem.ea;
5535 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5536 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5537 ctxt->dst.type = OP_NONE;
5541 case 0x98: /* cbw/cwde/cdqe */
5542 switch (ctxt->op_bytes) {
5543 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5544 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5545 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5548 case 0xcc: /* int3 */
5549 rc = emulate_int(ctxt, 3);
5551 case 0xcd: /* int n */
5552 rc = emulate_int(ctxt, ctxt->src.val);
5554 case 0xce: /* into */
5555 if (ctxt->eflags & X86_EFLAGS_OF)
5556 rc = emulate_int(ctxt, 4);
5558 case 0xe9: /* jmp rel */
5559 case 0xeb: /* jmp rel short */
5560 rc = jmp_rel(ctxt, ctxt->src.val);
5561 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5563 case 0xf4: /* hlt */
5564 ctxt->ops->halt(ctxt);
5566 case 0xf5: /* cmc */
5567 /* complement carry flag from eflags reg */
5568 ctxt->eflags ^= X86_EFLAGS_CF;
5570 case 0xf8: /* clc */
5571 ctxt->eflags &= ~X86_EFLAGS_CF;
5573 case 0xf9: /* stc */
5574 ctxt->eflags |= X86_EFLAGS_CF;
5576 case 0xfc: /* cld */
5577 ctxt->eflags &= ~X86_EFLAGS_DF;
5579 case 0xfd: /* std */
5580 ctxt->eflags |= X86_EFLAGS_DF;
5583 goto cannot_emulate;
5586 if (rc != X86EMUL_CONTINUE)
5590 if (ctxt->d & SrcWrite) {
5591 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5592 rc = writeback(ctxt, &ctxt->src);
5593 if (rc != X86EMUL_CONTINUE)
5596 if (!(ctxt->d & NoWrite)) {
5597 rc = writeback(ctxt, &ctxt->dst);
5598 if (rc != X86EMUL_CONTINUE)
5603 * restore dst type in case the decoding will be reused
5604 * (happens for string instruction )
5606 ctxt->dst.type = saved_dst_type;
5608 if ((ctxt->d & SrcMask) == SrcSI)
5609 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5611 if ((ctxt->d & DstMask) == DstDI)
5612 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5614 if (ctxt->rep_prefix && (ctxt->d & String)) {
5616 struct read_cache *r = &ctxt->io_read;
5617 if ((ctxt->d & SrcMask) == SrcSI)
5618 count = ctxt->src.count;
5620 count = ctxt->dst.count;
5621 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5623 if (!string_insn_completed(ctxt)) {
5625 * Re-enter guest when pio read ahead buffer is empty
5626 * or, if it is not used, after each 1024 iteration.
5628 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5629 (r->end == 0 || r->end != r->pos)) {
5631 * Reset read cache. Usually happens before
5632 * decode, but since instruction is restarted
5633 * we have to do it here.
5635 ctxt->mem_read.end = 0;
5636 writeback_registers(ctxt);
5637 return EMULATION_RESTART;
5639 goto done; /* skip rip writeback */
5641 ctxt->eflags &= ~X86_EFLAGS_RF;
5644 ctxt->eip = ctxt->_eip;
5647 if (rc == X86EMUL_PROPAGATE_FAULT) {
5648 WARN_ON(ctxt->exception.vector > 0x1f);
5649 ctxt->have_exception = true;
5651 if (rc == X86EMUL_INTERCEPTED)
5652 return EMULATION_INTERCEPTED;
5654 if (rc == X86EMUL_CONTINUE)
5655 writeback_registers(ctxt);
5657 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5661 case 0x09: /* wbinvd */
5662 (ctxt->ops->wbinvd)(ctxt);
5664 case 0x08: /* invd */
5665 case 0x0d: /* GrpP (prefetch) */
5666 case 0x18: /* Grp16 (prefetch/nop) */
5667 case 0x1f: /* nop */
5669 case 0x20: /* mov cr, reg */
5670 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5672 case 0x21: /* mov from dr to reg */
5673 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5675 case 0x40 ... 0x4f: /* cmov */
5676 if (test_cc(ctxt->b, ctxt->eflags))
5677 ctxt->dst.val = ctxt->src.val;
5678 else if (ctxt->op_bytes != 4)
5679 ctxt->dst.type = OP_NONE; /* no writeback */
5681 case 0x80 ... 0x8f: /* jnz rel, etc*/
5682 if (test_cc(ctxt->b, ctxt->eflags))
5683 rc = jmp_rel(ctxt, ctxt->src.val);
5685 case 0x90 ... 0x9f: /* setcc r/m8 */
5686 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5688 case 0xb6 ... 0xb7: /* movzx */
5689 ctxt->dst.bytes = ctxt->op_bytes;
5690 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5691 : (u16) ctxt->src.val;
5693 case 0xbe ... 0xbf: /* movsx */
5694 ctxt->dst.bytes = ctxt->op_bytes;
5695 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5696 (s16) ctxt->src.val;
5699 goto cannot_emulate;
5704 if (rc != X86EMUL_CONTINUE)
5710 return EMULATION_FAILED;
5713 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5715 invalidate_registers(ctxt);
5718 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5720 writeback_registers(ctxt);
5723 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5725 if (ctxt->rep_prefix && (ctxt->d & String))
5728 if (ctxt->d & TwoMemOp)