2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
32 #define __builtin_expect(x, n) (x)
36 #define REGPARM(n) __attribute((regparm(n)))
41 /* is_jmp field values */
42 #define DISAS_NEXT 0 /* next instruction can be analyzed */
43 #define DISAS_JUMP 1 /* only pc was modified dynamically */
44 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
47 struct TranslationBlock;
49 /* XXX: make safe guess about sizes */
50 #define MAX_OP_PER_INSTR 32
51 #define OPC_BUF_SIZE 512
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58 extern long gen_labels[OPC_BUF_SIZE];
59 extern int nb_gen_labels;
60 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
61 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
62 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
63 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
64 extern target_ulong gen_opc_jump_pc[2];
65 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
67 typedef void (GenOpFunc)(void);
68 typedef void (GenOpFunc1)(long);
69 typedef void (GenOpFunc2)(long, long);
70 typedef void (GenOpFunc3)(long, long, long);
72 #if defined(TARGET_I386)
74 void optimize_flags_init(void);
81 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
82 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
83 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
84 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
85 int max_code_size, int *gen_code_size_ptr);
86 int cpu_restore_state(struct TranslationBlock *tb,
87 CPUState *env, unsigned long searched_pc,
89 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
90 int max_code_size, int *gen_code_size_ptr);
91 int cpu_restore_state_copy(struct TranslationBlock *tb,
92 CPUState *env, unsigned long searched_pc,
94 void cpu_resume_from_signal(CPUState *env1, void *puc);
95 void cpu_exec_init(CPUState *env);
96 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
97 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
98 int is_cpu_write_access);
99 void tb_invalidate_page_range(target_ulong start, target_ulong end);
100 void tlb_flush_page(CPUState *env, target_ulong addr);
101 void tlb_flush(CPUState *env, int flush_global);
102 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
103 target_phys_addr_t paddr, int prot,
104 int is_user, int is_softmmu);
105 static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
106 target_phys_addr_t paddr, int prot,
107 int is_user, int is_softmmu)
109 if (prot & PAGE_READ)
111 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
114 #define CODE_GEN_MAX_SIZE 65536
115 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
117 #define CODE_GEN_PHYS_HASH_BITS 15
118 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
120 /* maximum total translate dcode allocated */
122 /* NOTE: the translated code area cannot be too big because on some
123 archs the range of "fast" function calls is limited. Here is a
124 summary of the ranges:
126 i386 : signed 32 bits
129 sparc : signed 32 bits
130 alpha : signed 23 bits
133 #if defined(__alpha__)
134 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
135 #elif defined(__ia64)
136 #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */
137 #elif defined(__powerpc__)
138 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
140 #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024)
143 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
145 /* estimated block size for TB allocation */
146 /* XXX: use a per code average code fragment size and modulate it
147 according to the host CPU */
148 #if defined(CONFIG_SOFTMMU)
149 #define CODE_GEN_AVG_BLOCK_SIZE 128
151 #define CODE_GEN_AVG_BLOCK_SIZE 64
154 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
156 #if defined(__powerpc__)
157 #define USE_DIRECT_JUMP
159 #if defined(__i386__) && !defined(_WIN32)
160 #define USE_DIRECT_JUMP
163 typedef struct TranslationBlock {
164 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
165 target_ulong cs_base; /* CS base for this block */
166 unsigned int flags; /* flags defining in which context the code was generated */
167 uint16_t size; /* size of target code for this block (1 <=
168 size <= TARGET_PAGE_SIZE) */
169 uint16_t cflags; /* compile flags */
170 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
171 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
172 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
173 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
175 uint8_t *tc_ptr; /* pointer to the translated code */
176 /* next matching tb for physical address. */
177 struct TranslationBlock *phys_hash_next;
178 /* first and second physical page containing code. The lower bit
179 of the pointer tells the index in page_next[] */
180 struct TranslationBlock *page_next[2];
181 target_ulong page_addr[2];
183 /* the following data are used to directly call another TB from
184 the code of this one. */
185 uint16_t tb_next_offset[2]; /* offset of original jump target */
186 #ifdef USE_DIRECT_JUMP
187 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
189 uint32_t tb_next[2]; /* address of jump generated code */
191 /* list of TBs jumping to this one. This is a circular list using
192 the two least significant bits of the pointers to tell what is
193 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
195 struct TranslationBlock *jmp_next[2];
196 struct TranslationBlock *jmp_first;
199 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
202 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
203 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
206 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
209 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
210 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
211 (tmp & TB_JMP_ADDR_MASK));
214 static inline unsigned int tb_phys_hash_func(unsigned long pc)
216 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
219 TranslationBlock *tb_alloc(target_ulong pc);
220 void tb_flush(CPUState *env);
221 void tb_link_phys(TranslationBlock *tb,
222 target_ulong phys_pc, target_ulong phys_page2);
224 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
226 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
227 extern uint8_t *code_gen_ptr;
229 #if defined(USE_DIRECT_JUMP)
231 #if defined(__powerpc__)
232 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
236 /* patch the branch destination */
237 ptr = (uint32_t *)jmp_addr;
239 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
242 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
243 asm volatile ("sync" : : : "memory");
244 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
245 asm volatile ("sync" : : : "memory");
246 asm volatile ("isync" : : : "memory");
248 #elif defined(__i386__)
249 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
251 /* patch the branch destination */
252 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
253 /* no need to flush icache explicitely */
257 static inline void tb_set_jmp_target(TranslationBlock *tb,
258 int n, unsigned long addr)
260 unsigned long offset;
262 offset = tb->tb_jmp_offset[n];
263 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
264 offset = tb->tb_jmp_offset[n + 2];
265 if (offset != 0xffff)
266 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
271 /* set the jump target */
272 static inline void tb_set_jmp_target(TranslationBlock *tb,
273 int n, unsigned long addr)
275 tb->tb_next[n] = addr;
280 static inline void tb_add_jump(TranslationBlock *tb, int n,
281 TranslationBlock *tb_next)
283 /* NOTE: this test is only needed for thread safety */
284 if (!tb->jmp_next[n]) {
285 /* patch the native jump address */
286 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
288 /* add in TB jmp circular list */
289 tb->jmp_next[n] = tb_next->jmp_first;
290 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
294 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
297 #define offsetof(type, field) ((size_t) &((type *)0)->field)
301 #define ASM_DATA_SECTION ".section \".data\"\n"
302 #define ASM_PREVIOUS_SECTION ".section .text\n"
303 #elif defined(__APPLE__)
304 #define ASM_DATA_SECTION ".data\n"
305 #define ASM_PREVIOUS_SECTION ".text\n"
307 #define ASM_DATA_SECTION ".section \".data\"\n"
308 #define ASM_PREVIOUS_SECTION ".previous\n"
311 #define ASM_OP_LABEL_NAME(n, opname) \
312 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
314 #if defined(__powerpc__)
316 /* we patch the jump instruction directly */
317 #define GOTO_TB(opname, tbparam, n)\
319 asm volatile (ASM_DATA_SECTION\
320 ASM_OP_LABEL_NAME(n, opname) ":\n"\
322 ASM_PREVIOUS_SECTION \
323 "b " ASM_NAME(__op_jmp) #n "\n"\
327 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
329 /* we patch the jump instruction directly */
330 #define GOTO_TB(opname, tbparam, n)\
332 asm volatile (".section .data\n"\
333 ASM_OP_LABEL_NAME(n, opname) ":\n"\
335 ASM_PREVIOUS_SECTION \
336 "jmp " ASM_NAME(__op_jmp) #n "\n"\
342 /* jump to next block operations (more portable code, does not need
343 cache flushing, but slower because of indirect jump) */
344 #define GOTO_TB(opname, tbparam, n)\
346 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
347 static void __attribute__((unused)) *__op_label ## n \
348 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
349 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
356 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
357 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
358 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
360 #if defined(__powerpc__)
361 static inline int testandset (int *p)
364 __asm__ __volatile__ (
372 : "r" (p), "r" (1), "r" (0)
376 #elif defined(__i386__)
377 static inline int testandset (int *p)
379 long int readval = 0;
381 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
382 : "+m" (*p), "+a" (readval)
387 #elif defined(__x86_64__)
388 static inline int testandset (int *p)
390 long int readval = 0;
392 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
393 : "+m" (*p), "+a" (readval)
398 #elif defined(__s390__)
399 static inline int testandset (int *p)
403 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
406 : "r" (1), "a" (p), "0" (*p)
410 #elif defined(__alpha__)
411 static inline int testandset (int *p)
416 __asm__ __volatile__ ("0: mov 1,%2\n"
423 : "=r" (ret), "=m" (*p), "=r" (one)
427 #elif defined(__sparc__)
428 static inline int testandset (int *p)
432 __asm__ __volatile__("ldstub [%1], %0"
437 return (ret ? 1 : 0);
439 #elif defined(__arm__)
440 static inline int testandset (int *spinlock)
442 register unsigned int ret;
443 __asm__ __volatile__("swp %0, %1, [%2]"
445 : "0"(1), "r"(spinlock));
449 #elif defined(__mc68000)
450 static inline int testandset (int *p)
453 __asm__ __volatile__("tas %1; sne %0"
459 #elif defined(__ia64)
461 #include <ia64intrin.h>
463 static inline int testandset (int *p)
465 return __sync_lock_test_and_set (p, 1);
467 #elif defined(__mips__)
468 static inline int testandset (int *p)
472 __asm__ __volatile__ (
481 : "=r" (ret), "+R" (*p)
488 #error unimplemented CPU support
491 typedef int spinlock_t;
493 #define SPIN_LOCK_UNLOCKED 0
495 #if defined(CONFIG_USER_ONLY)
496 static inline void spin_lock(spinlock_t *lock)
498 while (testandset(lock));
501 static inline void spin_unlock(spinlock_t *lock)
506 static inline int spin_trylock(spinlock_t *lock)
508 return !testandset(lock);
511 static inline void spin_lock(spinlock_t *lock)
515 static inline void spin_unlock(spinlock_t *lock)
519 static inline int spin_trylock(spinlock_t *lock)
525 extern spinlock_t tb_lock;
527 extern int tb_invalidated_flag;
529 #if !defined(CONFIG_USER_ONLY)
531 void tlb_fill(target_ulong addr, int is_write, int is_user,
534 #define ACCESS_TYPE 3
535 #define MEMSUFFIX _code
536 #define env cpu_single_env
539 #include "softmmu_header.h"
542 #include "softmmu_header.h"
545 #include "softmmu_header.h"
548 #include "softmmu_header.h"
556 #if defined(CONFIG_USER_ONLY)
557 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
562 /* NOTE: this function can trigger an exception */
563 /* NOTE2: the returned address is not exactly the physical address: it
564 is the offset relative to phys_ram_base */
565 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
567 int is_user, index, pd;
569 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
570 #if defined(TARGET_I386)
571 is_user = ((env->hflags & HF_CPL_MASK) == 3);
572 #elif defined (TARGET_PPC)
574 #elif defined (TARGET_MIPS)
575 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
576 #elif defined (TARGET_SPARC)
577 is_user = (env->psrs == 0);
578 #elif defined (TARGET_ARM)
579 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
580 #elif defined (TARGET_SH4)
581 is_user = ((env->sr & SR_MD) == 0);
582 #elif defined (TARGET_ALPHA)
583 is_user = ((env->ps >> 3) & 3);
585 #error unimplemented CPU
587 if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
588 (addr & TARGET_PAGE_MASK), 0)) {
591 pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
592 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
593 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
595 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
600 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
602 int kqemu_init(CPUState *env);
603 int kqemu_cpu_exec(CPUState *env);
604 void kqemu_flush_page(CPUState *env, target_ulong addr);
605 void kqemu_flush(CPUState *env, int global);
606 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
607 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
608 void kqemu_cpu_interrupt(CPUState *env);
609 void kqemu_record_dump(void);
611 static inline int kqemu_is_ok(CPUState *env)
613 return(env->kqemu_enabled &&
614 (env->cr[0] & CR0_PE_MASK) &&
615 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
616 (env->eflags & IF_MASK) &&
617 !(env->eflags & VM_MASK) &&
618 (env->kqemu_enabled == 2 ||
619 ((env->hflags & HF_CPL_MASK) == 3 &&
620 (env->eflags & IOPL_MASK) != IOPL_MASK)));