2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #define TARGET_LONG_BITS 64
28 #define TARGET_LONG_BITS 32
31 /* target supports implicit self modifying code */
32 #define TARGET_HAS_SMC
33 /* support for self modifying code even if the modified instruction is
34 close to the modifying instruction */
35 #define TARGET_HAS_PRECISE_SMC
37 #define TARGET_HAS_ICE 1
41 #include "softfloat.h"
43 #if defined(__i386__) && !defined(CONFIG_SOFTMMU)
72 /* segment descriptor fields */
73 #define DESC_G_MASK (1 << 23)
74 #define DESC_B_SHIFT 22
75 #define DESC_B_MASK (1 << DESC_B_SHIFT)
76 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
77 #define DESC_L_MASK (1 << DESC_L_SHIFT)
78 #define DESC_AVL_MASK (1 << 20)
79 #define DESC_P_MASK (1 << 15)
80 #define DESC_DPL_SHIFT 13
81 #define DESC_S_MASK (1 << 12)
82 #define DESC_TYPE_SHIFT 8
83 #define DESC_A_MASK (1 << 8)
85 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
86 #define DESC_C_MASK (1 << 10) /* code: conforming */
87 #define DESC_R_MASK (1 << 9) /* code: readable */
89 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
90 #define DESC_W_MASK (1 << 9) /* data: writable */
92 #define DESC_TSS_BUSY_MASK (1 << 9)
103 #define IOPL_SHIFT 12
106 #define TF_MASK 0x00000100
107 #define IF_MASK 0x00000200
108 #define DF_MASK 0x00000400
109 #define IOPL_MASK 0x00003000
110 #define NT_MASK 0x00004000
111 #define RF_MASK 0x00010000
112 #define VM_MASK 0x00020000
113 #define AC_MASK 0x00040000
114 #define VIF_MASK 0x00080000
115 #define VIP_MASK 0x00100000
116 #define ID_MASK 0x00200000
118 /* hidden flags - used internally by qemu to represent additionnal cpu
119 states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
120 using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
123 #define HF_CPL_SHIFT 0
124 /* true if soft mmu is being used */
125 #define HF_SOFTMMU_SHIFT 2
126 /* true if hardware interrupts must be disabled for next instruction */
127 #define HF_INHIBIT_IRQ_SHIFT 3
128 /* 16 or 32 segments */
129 #define HF_CS32_SHIFT 4
130 #define HF_SS32_SHIFT 5
131 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
132 #define HF_ADDSEG_SHIFT 6
133 /* copy of CR0.PE (protected mode) */
134 #define HF_PE_SHIFT 7
135 #define HF_TF_SHIFT 8 /* must be same as eflags */
136 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
137 #define HF_EM_SHIFT 10
138 #define HF_TS_SHIFT 11
139 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
140 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
141 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
142 #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
143 #define HF_VM_SHIFT 17 /* must be same as eflags */
145 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
146 #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
147 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
148 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
149 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
150 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
151 #define HF_PE_MASK (1 << HF_PE_SHIFT)
152 #define HF_TF_MASK (1 << HF_TF_SHIFT)
153 #define HF_MP_MASK (1 << HF_MP_SHIFT)
154 #define HF_EM_MASK (1 << HF_EM_SHIFT)
155 #define HF_TS_MASK (1 << HF_TS_SHIFT)
156 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
157 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
158 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
160 #define CR0_PE_MASK (1 << 0)
161 #define CR0_MP_MASK (1 << 1)
162 #define CR0_EM_MASK (1 << 2)
163 #define CR0_TS_MASK (1 << 3)
164 #define CR0_ET_MASK (1 << 4)
165 #define CR0_NE_MASK (1 << 5)
166 #define CR0_WP_MASK (1 << 16)
167 #define CR0_AM_MASK (1 << 18)
168 #define CR0_PG_MASK (1 << 31)
170 #define CR4_VME_MASK (1 << 0)
171 #define CR4_PVI_MASK (1 << 1)
172 #define CR4_TSD_MASK (1 << 2)
173 #define CR4_DE_MASK (1 << 3)
174 #define CR4_PSE_MASK (1 << 4)
175 #define CR4_PAE_MASK (1 << 5)
176 #define CR4_PGE_MASK (1 << 7)
177 #define CR4_PCE_MASK (1 << 8)
178 #define CR4_OSFXSR_MASK (1 << 9)
179 #define CR4_OSXMMEXCPT_MASK (1 << 10)
181 #define PG_PRESENT_BIT 0
183 #define PG_USER_BIT 2
186 #define PG_ACCESSED_BIT 5
187 #define PG_DIRTY_BIT 6
189 #define PG_GLOBAL_BIT 8
191 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
192 #define PG_RW_MASK (1 << PG_RW_BIT)
193 #define PG_USER_MASK (1 << PG_USER_BIT)
194 #define PG_PWT_MASK (1 << PG_PWT_BIT)
195 #define PG_PCD_MASK (1 << PG_PCD_BIT)
196 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
197 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
198 #define PG_PSE_MASK (1 << PG_PSE_BIT)
199 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
201 #define PG_ERROR_W_BIT 1
203 #define PG_ERROR_P_MASK 0x01
204 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
205 #define PG_ERROR_U_MASK 0x04
206 #define PG_ERROR_RSVD_MASK 0x08
208 #define MSR_IA32_APICBASE 0x1b
209 #define MSR_IA32_APICBASE_BSP (1<<8)
210 #define MSR_IA32_APICBASE_ENABLE (1<<11)
211 #define MSR_IA32_APICBASE_BASE (0xfffff<<12)
213 #define MSR_IA32_SYSENTER_CS 0x174
214 #define MSR_IA32_SYSENTER_ESP 0x175
215 #define MSR_IA32_SYSENTER_EIP 0x176
217 #define MSR_EFER 0xc0000080
219 #define MSR_EFER_SCE (1 << 0)
220 #define MSR_EFER_LME (1 << 8)
221 #define MSR_EFER_LMA (1 << 10)
222 #define MSR_EFER_NXE (1 << 11)
223 #define MSR_EFER_FFXSR (1 << 14)
225 #define MSR_STAR 0xc0000081
226 #define MSR_LSTAR 0xc0000082
227 #define MSR_CSTAR 0xc0000083
228 #define MSR_FMASK 0xc0000084
229 #define MSR_FSBASE 0xc0000100
230 #define MSR_GSBASE 0xc0000101
231 #define MSR_KERNELGSBASE 0xc0000102
233 /* cpuid_features bits */
234 #define CPUID_FP87 (1 << 0)
235 #define CPUID_VME (1 << 1)
236 #define CPUID_DE (1 << 2)
237 #define CPUID_PSE (1 << 3)
238 #define CPUID_TSC (1 << 4)
239 #define CPUID_MSR (1 << 5)
240 #define CPUID_PAE (1 << 6)
241 #define CPUID_MCE (1 << 7)
242 #define CPUID_CX8 (1 << 8)
243 #define CPUID_APIC (1 << 9)
244 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
245 #define CPUID_MTRR (1 << 12)
246 #define CPUID_PGE (1 << 13)
247 #define CPUID_MCA (1 << 14)
248 #define CPUID_CMOV (1 << 15)
250 #define CPUID_MMX (1 << 23)
251 #define CPUID_FXSR (1 << 24)
252 #define CPUID_SSE (1 << 25)
253 #define CPUID_SSE2 (1 << 26)
255 #define CPUID_EXT_SS3 (1 << 0)
256 #define CPUID_EXT_MONITOR (1 << 3)
257 #define CPUID_EXT_CX16 (1 << 13)
259 #define CPUID_EXT2_SYSCALL (1 << 11)
260 #define CPUID_EXT2_NX (1 << 20)
261 #define CPUID_EXT2_FFXSR (1 << 25)
262 #define CPUID_EXT2_LM (1 << 29)
264 #define EXCP00_DIVZ 0
265 #define EXCP01_SSTP 1
267 #define EXCP03_INT3 3
268 #define EXCP04_INTO 4
269 #define EXCP05_BOUND 5
270 #define EXCP06_ILLOP 6
271 #define EXCP07_PREX 7
272 #define EXCP08_DBLE 8
273 #define EXCP09_XERR 9
274 #define EXCP0A_TSS 10
275 #define EXCP0B_NOSEG 11
276 #define EXCP0C_STACK 12
277 #define EXCP0D_GPF 13
278 #define EXCP0E_PAGE 14
279 #define EXCP10_COPR 16
280 #define EXCP11_ALGN 17
281 #define EXCP12_MCHK 18
284 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
285 CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
287 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
292 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
297 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
302 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
307 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
312 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
317 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
322 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
327 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
332 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
341 #define USE_X86LDOUBLE
344 #ifdef USE_X86LDOUBLE
345 typedef floatx80 CPU86_LDouble;
347 typedef float64 CPU86_LDouble;
350 typedef struct SegmentCache {
373 #ifdef WORDS_BIGENDIAN
374 #define XMM_B(n) _b[15 - (n)]
375 #define XMM_W(n) _w[7 - (n)]
376 #define XMM_L(n) _l[3 - (n)]
377 #define XMM_S(n) _s[3 - (n)]
378 #define XMM_Q(n) _q[1 - (n)]
379 #define XMM_D(n) _d[1 - (n)]
381 #define MMX_B(n) _b[7 - (n)]
382 #define MMX_W(n) _w[3 - (n)]
383 #define MMX_L(n) _l[1 - (n)]
385 #define XMM_B(n) _b[n]
386 #define XMM_W(n) _w[n]
387 #define XMM_L(n) _l[n]
388 #define XMM_S(n) _s[n]
389 #define XMM_Q(n) _q[n]
390 #define XMM_D(n) _d[n]
392 #define MMX_B(n) _b[n]
393 #define MMX_W(n) _w[n]
394 #define MMX_L(n) _l[n]
399 #define CPU_NB_REGS 16
401 #define CPU_NB_REGS 8
404 typedef struct CPUX86State {
405 #if TARGET_LONG_BITS > HOST_LONG_BITS
406 /* temporaries if we cannot store them in host registers */
407 target_ulong t0, t1, t2;
410 /* standard registers */
411 target_ulong regs[CPU_NB_REGS];
413 target_ulong eflags; /* eflags register. During CPU emulation, CC
414 flags and DF are set to zero because they are
417 /* emulator internal eflags handling */
421 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
422 uint32_t hflags; /* hidden flags, see HF_xxx constants */
425 SegmentCache segs[6]; /* selector values */
428 SegmentCache gdt; /* only base and limit are used */
429 SegmentCache idt; /* only base and limit are used */
431 target_ulong cr[5]; /* NOTE: cr1 is unused */
435 unsigned int fpstt; /* top of stack index */
438 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
440 #ifdef USE_X86LDOUBLE
441 CPU86_LDouble d __attribute__((aligned(16)));
448 /* emulator internal variables */
449 float_status fp_status;
458 float_status sse_status;
460 XMMReg xmm_regs[CPU_NB_REGS];
464 /* sysenter registers */
465 uint32_t sysenter_cs;
466 uint32_t sysenter_esp;
467 uint32_t sysenter_eip;
474 target_ulong kernelgsbase;
477 /* temporary data for USE_CODE_COPY mode */
481 int native_fp_regs; /* if true, the FPU state is in the native CPU regs */
484 /* exception/interrupt handling */
488 int exception_is_int;
489 target_ulong exception_next_eip;
490 struct TranslationBlock *current_tb; /* currently executing TB */
491 target_ulong dr[8]; /* debug registers */
492 int interrupt_request;
493 int user_mode_only; /* user mode only simulation */
495 /* soft mmu support */
496 /* in order to avoid passing too many arguments to the memory
497 write helpers, we store some rarely used information in the CPU
499 unsigned long mem_write_pc; /* host pc at which the memory was
501 target_ulong mem_write_vaddr; /* target virtual addr at which the
502 memory was written */
503 /* 0 = kernel, 1 = user */
504 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
505 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
507 /* from this point: preserved by CPU reset */
508 /* ice debug support */
509 target_ulong breakpoints[MAX_BREAKPOINTS];
511 int singlestep_enabled;
513 /* processor features (e.g. for CPUID insn) */
514 uint32_t cpuid_level;
515 uint32_t cpuid_vendor1;
516 uint32_t cpuid_vendor2;
517 uint32_t cpuid_vendor3;
518 uint32_t cpuid_version;
519 uint32_t cpuid_features;
520 uint32_t cpuid_ext_features;
521 uint32_t cpuid_xlevel;
522 uint32_t cpuid_model[12];
523 uint32_t cpuid_ext2_features;
528 /* in order to simplify APIC support, we leave this pointer to the
530 struct APICState *apic_state;
535 CPUX86State *cpu_x86_init(void);
536 int cpu_x86_exec(CPUX86State *s);
537 void cpu_x86_close(CPUX86State *s);
538 int cpu_get_pic_interrupt(CPUX86State *s);
539 /* MSDOS compatibility mode FPU exception support */
540 void cpu_set_ferr(CPUX86State *s);
542 /* this function must always be used to load data in the segment
543 cache: it synchronizes the hflags with the segment cache values */
544 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
545 int seg_reg, unsigned int selector,
546 uint32_t base, unsigned int limit,
550 unsigned int new_hflags;
552 sc = &env->segs[seg_reg];
553 sc->selector = selector;
558 /* update the hidden flags */
560 if (seg_reg == R_CS) {
562 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
564 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
565 env->hflags &= ~(HF_ADDSEG_MASK);
569 /* legacy / compatibility case */
570 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
571 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
572 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
576 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
577 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
578 if (env->hflags & HF_CS64_MASK) {
579 /* zero base assumed for DS, ES and SS in long mode */
580 } else if (!(env->cr[0] & CR0_PE_MASK) ||
581 (env->eflags & VM_MASK) ||
582 !(env->hflags & HF_CS32_MASK)) {
583 /* XXX: try to avoid this test. The problem comes from the
584 fact that is real mode or vm86 mode we only modify the
585 'base' and 'selector' fields of the segment cache to go
586 faster. A solution may be to force addseg to one in
588 new_hflags |= HF_ADDSEG_MASK;
590 new_hflags |= ((env->segs[R_DS].base |
591 env->segs[R_ES].base |
592 env->segs[R_SS].base) != 0) <<
595 env->hflags = (env->hflags &
596 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
600 /* wrapper, just in case memory mappings must be changed */
601 static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
604 s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
606 #error HF_CPL_MASK is hardcoded
610 /* used for debug or cpu save/restore */
611 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
612 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
614 /* the following helpers are only usable in user mode simulation as
615 they can trigger unexpected exceptions */
616 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
617 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
618 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
620 /* you can call this signal handler from your SIGBUS and SIGSEGV
621 signal handlers to inform the virtual CPU of exceptions. non zero
622 is returned if the signal was handled by the virtual CPU. */
624 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
626 void cpu_x86_set_a20(CPUX86State *env, int a20_state);
628 uint64_t cpu_get_tsc(CPUX86State *env);
630 void cpu_set_apic_base(CPUX86State *env, uint64_t val);
631 uint64_t cpu_get_apic_base(CPUX86State *env);
632 void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
633 #ifndef NO_CPU_IO_DEFS
634 uint8_t cpu_get_apic_tpr(CPUX86State *env);
637 /* will be suppressed */
638 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
641 #define X86_DUMP_FPU 0x0001 /* dump FPU state too */
642 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
644 #define TARGET_PAGE_BITS 12
647 #endif /* CPU_I386_H */