2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
59 #define TF_MASK 0x00000100
60 #define IF_MASK 0x00000200
61 #define DF_MASK 0x00000400
62 #define IOPL_MASK 0x00003000
63 #define NT_MASK 0x00004000
64 #define RF_MASK 0x00010000
65 #define VM_MASK 0x00020000
66 #define AC_MASK 0x00040000
67 #define VIF_MASK 0x00080000
68 #define VIP_MASK 0x00100000
69 #define ID_MASK 0x00200000
76 #define EXCP05_BOUND 5
77 #define EXCP06_ILLOP 6
82 #define EXCP0B_NOSEG 11
83 #define EXCP0C_STACK 12
85 #define EXCP0E_PAGE 14
86 #define EXCP10_COPR 16
87 #define EXCP11_ALGN 17
88 #define EXCP12_MCHK 18
90 #define EXCP_INTERRUPT 256 /* async interruption */
93 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
94 CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
95 CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
97 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
101 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
105 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
109 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
113 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
117 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
121 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
125 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
129 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
137 #define USE_X86LDOUBLE
140 #ifdef USE_X86LDOUBLE
141 typedef long double CPU86_LDouble;
143 typedef double CPU86_LDouble;
146 typedef struct SegmentCache {
152 typedef struct SegmentDescriptorTable {
155 /* this is the returned base when reading the register, just to
156 avoid that the emulated program modifies it */
157 unsigned long emu_base;
158 } SegmentDescriptorTable;
160 typedef struct CPUX86State {
161 /* standard registers */
164 uint32_t eflags; /* eflags register. During CPU emulation, CC
165 flags and DF are set to zero because they are
168 /* emulator internal eflags handling */
172 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
175 unsigned int fpstt; /* top of stack index */
178 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
179 CPU86_LDouble fpregs[8];
181 /* emulator internal variables */
191 uint32_t segs[6]; /* selector values */
192 SegmentCache seg_cache[6]; /* info taken from LDT/GDT */
193 SegmentDescriptorTable gdt;
194 SegmentDescriptorTable ldt;
195 SegmentDescriptorTable idt;
197 /* exception/interrupt handling */
202 int interrupt_request;
208 /* all CPU memory access use these macros */
209 static inline int ldub(void *ptr)
211 return *(uint8_t *)ptr;
214 static inline int ldsb(void *ptr)
216 return *(int8_t *)ptr;
219 static inline void stb(void *ptr, int v)
224 #ifdef WORDS_BIGENDIAN
226 /* conservative code for little endian unaligned accesses */
227 static inline int lduw(void *ptr)
231 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
235 return p[0] | (p[1] << 8);
239 static inline int ldsw(void *ptr)
243 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
247 return (int16_t)(p[0] | (p[1] << 8));
251 static inline int ldl(void *ptr)
255 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
259 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
263 static inline uint64_t ldq(void *ptr)
269 return v1 | ((uint64_t)v2 << 32);
272 static inline void stw(void *ptr, int v)
275 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
283 static inline void stl(void *ptr, int v)
286 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
296 static inline void stq(void *ptr, uint64_t v)
305 static inline float ldfl(void *ptr)
315 static inline double ldfq(void *ptr)
325 static inline void stfl(void *ptr, float v)
335 static inline void stfq(void *ptr, double v)
347 static inline int lduw(void *ptr)
349 return *(uint16_t *)ptr;
352 static inline int ldsw(void *ptr)
354 return *(int16_t *)ptr;
357 static inline int ldl(void *ptr)
359 return *(uint32_t *)ptr;
362 static inline uint64_t ldq(void *ptr)
364 return *(uint64_t *)ptr;
367 static inline void stw(void *ptr, int v)
369 *(uint16_t *)ptr = v;
372 static inline void stl(void *ptr, int v)
374 *(uint32_t *)ptr = v;
377 static inline void stq(void *ptr, uint64_t v)
379 *(uint64_t *)ptr = v;
384 static inline float ldfl(void *ptr)
386 return *(float *)ptr;
389 static inline double ldfq(void *ptr)
391 return *(double *)ptr;
394 static inline void stfl(void *ptr, float v)
399 static inline void stfq(void *ptr, double v)
406 void cpu_x86_outb(CPUX86State *env, int addr, int val);
407 void cpu_x86_outw(CPUX86State *env, int addr, int val);
408 void cpu_x86_outl(CPUX86State *env, int addr, int val);
409 int cpu_x86_inb(CPUX86State *env, int addr);
410 int cpu_x86_inw(CPUX86State *env, int addr);
411 int cpu_x86_inl(CPUX86State *env, int addr);
414 CPUX86State *cpu_x86_init(void);
415 int cpu_x86_exec(CPUX86State *s);
416 void cpu_x86_interrupt(CPUX86State *s);
417 void cpu_x86_close(CPUX86State *s);
419 /* needed to load some predefinied segment registers */
420 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
422 /* you can call this signal handler from your SIGBUS and SIGSEGV
423 signal handlers to inform the virtual CPU of exceptions. non zero
424 is returned if the signal was handled by the virtual CPU. */
426 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
430 #define X86_DUMP_FPU 0x0001 /* dump FPU state too */
431 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
432 void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags);
434 /* internal functions */
436 #define GEN_FLAG_CODE32_SHIFT 0
437 #define GEN_FLAG_ADDSEG_SHIFT 1
438 #define GEN_FLAG_SS32_SHIFT 2
439 #define GEN_FLAG_VM_SHIFT 3
440 #define GEN_FLAG_ST_SHIFT 4
441 #define GEN_FLAG_CPL_SHIFT 7
442 #define GEN_FLAG_IOPL_SHIFT 9
443 #define GEN_FLAG_TF_SHIFT 11
445 int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
446 int *gen_code_size_ptr,
447 uint8_t *pc_start, uint8_t *cs_base, int flags);
448 void cpu_x86_tblocks_init(void);
450 #endif /* CPU_I386_H */