]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
2a6a4076 MA |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H | |
7d99a001 BS |
22 | |
23 | #include "qemu-common.h" | |
00f6da6a | 24 | #include "exec/tb-context.h" |
7d99a001 | 25 | |
b346ff46 | 26 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
de9a95f0 | 27 | #define DEBUG_DISAS |
b346ff46 | 28 | |
41c1b1c9 PB |
29 | /* Page tracking code uses ram addresses in system mode, and virtual |
30 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate | |
31 | type. */ | |
32 | #if defined(CONFIG_USER_ONLY) | |
b480d9b7 | 33 | typedef abi_ulong tb_page_addr_t; |
41c1b1c9 PB |
34 | #else |
35 | typedef ram_addr_t tb_page_addr_t; | |
36 | #endif | |
37 | ||
1de7afc9 | 38 | #include "qemu/log.h" |
b346ff46 | 39 | |
9c489ea6 | 40 | void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb); |
9349b4f9 | 41 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
bad729e2 | 42 | target_ulong *data); |
d2856f1a | 43 | |
57fec1fe | 44 | void cpu_gen_init(void); |
3f38f309 | 45 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); |
a8a826a3 | 46 | |
6886b980 | 47 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
90b40a69 | 48 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
648f034c | 49 | TranslationBlock *tb_gen_code(CPUState *cpu, |
89fee74a EC |
50 | target_ulong pc, target_ulong cs_base, |
51 | uint32_t flags, | |
2e70f6ef | 52 | int cflags); |
1bc7e522 | 53 | |
5638d180 | 54 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
1c3c8af1 | 55 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
fdbc2b57 | 56 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
1652b974 | 57 | |
0cac1b66 | 58 | #if !defined(CONFIG_USER_ONLY) |
32857f4d | 59 | void cpu_reloading_memory_map(void); |
56943e8c PM |
60 | /** |
61 | * cpu_address_space_init: | |
62 | * @cpu: CPU to add this address space to | |
63 | * @as: address space to add | |
64 | * @asidx: integer index of this address space | |
65 | * | |
66 | * Add the specified address space to the CPU's cpu_ases list. | |
67 | * The address space added with @asidx 0 is the one used for the | |
68 | * convenience pointer cpu->as. | |
69 | * The target-specific code which registers ASes is responsible | |
70 | * for defining what semantics address space 0, 1, 2, etc have. | |
71 | * | |
12ebc9a7 PM |
72 | * Before the first call to this function, the caller must set |
73 | * cpu->num_ases to the total number of address spaces it needs | |
74 | * to support. | |
75 | * | |
56943e8c PM |
76 | * Note that with KVM only one address space is supported. |
77 | */ | |
78 | void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx); | |
b11ec7f2 YZ |
79 | #endif |
80 | ||
81 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) | |
0cac1b66 | 82 | /* cputlb.c */ |
d7a74a9d PM |
83 | /** |
84 | * tlb_flush_page: | |
85 | * @cpu: CPU whose TLB should be flushed | |
86 | * @addr: virtual address of page to be flushed | |
87 | * | |
88 | * Flush one page from the TLB of the specified CPU, for all | |
89 | * MMU indexes. | |
90 | */ | |
31b030d4 | 91 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
c3b9a07a AB |
92 | /** |
93 | * tlb_flush_page_all_cpus: | |
94 | * @cpu: src CPU of the flush | |
95 | * @addr: virtual address of page to be flushed | |
96 | * | |
97 | * Flush one page from the TLB of the specified CPU, for all | |
98 | * MMU indexes. | |
99 | */ | |
100 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); | |
101 | /** | |
102 | * tlb_flush_page_all_cpus_synced: | |
103 | * @cpu: src CPU of the flush | |
104 | * @addr: virtual address of page to be flushed | |
105 | * | |
106 | * Flush one page from the TLB of the specified CPU, for all MMU | |
107 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work | |
108 | * is scheduled as safe work meaning all flushes will be complete once | |
109 | * the source vCPUs safe work is complete. This will depend on when | |
110 | * the guests translation ends the TB. | |
111 | */ | |
112 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); | |
d7a74a9d PM |
113 | /** |
114 | * tlb_flush: | |
115 | * @cpu: CPU whose TLB should be flushed | |
d7a74a9d | 116 | * |
d10eb08f AB |
117 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
118 | * allow the implementation to drop entries from the TLB at any time | |
119 | * so this is generally safe. If more selective flushing is required | |
120 | * use one of the other functions for efficiency. | |
d7a74a9d | 121 | */ |
d10eb08f | 122 | void tlb_flush(CPUState *cpu); |
c3b9a07a AB |
123 | /** |
124 | * tlb_flush_all_cpus: | |
125 | * @cpu: src CPU of the flush | |
126 | */ | |
127 | void tlb_flush_all_cpus(CPUState *src_cpu); | |
128 | /** | |
129 | * tlb_flush_all_cpus_synced: | |
130 | * @cpu: src CPU of the flush | |
131 | * | |
132 | * Like tlb_flush_all_cpus except this except the source vCPUs work is | |
133 | * scheduled as safe work meaning all flushes will be complete once | |
134 | * the source vCPUs safe work is complete. This will depend on when | |
135 | * the guests translation ends the TB. | |
136 | */ | |
137 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); | |
d7a74a9d PM |
138 | /** |
139 | * tlb_flush_page_by_mmuidx: | |
140 | * @cpu: CPU whose TLB should be flushed | |
141 | * @addr: virtual address of page to be flushed | |
0336cbf8 | 142 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
143 | * |
144 | * Flush one page from the TLB of the specified CPU, for the specified | |
145 | * MMU indexes. | |
146 | */ | |
0336cbf8 AB |
147 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
148 | uint16_t idxmap); | |
c3b9a07a AB |
149 | /** |
150 | * tlb_flush_page_by_mmuidx_all_cpus: | |
151 | * @cpu: Originating CPU of the flush | |
152 | * @addr: virtual address of page to be flushed | |
153 | * @idxmap: bitmap of MMU indexes to flush | |
154 | * | |
155 | * Flush one page from the TLB of all CPUs, for the specified | |
156 | * MMU indexes. | |
157 | */ | |
158 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | |
159 | uint16_t idxmap); | |
160 | /** | |
161 | * tlb_flush_page_by_mmuidx_all_cpus_synced: | |
162 | * @cpu: Originating CPU of the flush | |
163 | * @addr: virtual address of page to be flushed | |
164 | * @idxmap: bitmap of MMU indexes to flush | |
165 | * | |
166 | * Flush one page from the TLB of all CPUs, for the specified MMU | |
167 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source | |
168 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
169 | * complete once the source vCPUs safe work is complete. This will | |
170 | * depend on when the guests translation ends the TB. | |
171 | */ | |
172 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, | |
173 | uint16_t idxmap); | |
d7a74a9d PM |
174 | /** |
175 | * tlb_flush_by_mmuidx: | |
176 | * @cpu: CPU whose TLB should be flushed | |
c3b9a07a | 177 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
0336cbf8 | 178 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
179 | * |
180 | * Flush all entries from the TLB of the specified CPU, for the specified | |
181 | * MMU indexes. | |
182 | */ | |
0336cbf8 | 183 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
c3b9a07a AB |
184 | /** |
185 | * tlb_flush_by_mmuidx_all_cpus: | |
186 | * @cpu: Originating CPU of the flush | |
187 | * @idxmap: bitmap of MMU indexes to flush | |
188 | * | |
189 | * Flush all entries from all TLBs of all CPUs, for the specified | |
190 | * MMU indexes. | |
191 | */ | |
192 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); | |
193 | /** | |
194 | * tlb_flush_by_mmuidx_all_cpus_synced: | |
195 | * @cpu: Originating CPU of the flush | |
196 | * @idxmap: bitmap of MMU indexes to flush | |
197 | * | |
198 | * Flush all entries from all TLBs of all CPUs, for the specified | |
199 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source | |
200 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
201 | * complete once the source vCPUs safe work is complete. This will | |
202 | * depend on when the guests translation ends the TB. | |
203 | */ | |
204 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); | |
1787cc8e PM |
205 | /** |
206 | * tlb_set_page_with_attrs: | |
207 | * @cpu: CPU to add this TLB entry for | |
208 | * @vaddr: virtual address of page to add entry for | |
209 | * @paddr: physical address of the page | |
210 | * @attrs: memory transaction attributes | |
211 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) | |
212 | * @mmu_idx: MMU index to insert TLB entry for | |
213 | * @size: size of the page in bytes | |
214 | * | |
215 | * Add an entry to this CPU's TLB (a mapping from virtual address | |
216 | * @vaddr to physical address @paddr) with the specified memory | |
217 | * transaction attributes. This is generally called by the target CPU | |
218 | * specific code after it has been called through the tlb_fill() | |
219 | * entry point and performed a successful page table walk to find | |
220 | * the physical address and attributes for the virtual address | |
221 | * which provoked the TLB miss. | |
222 | * | |
223 | * At most one entry for a given virtual address is permitted. Only a | |
224 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only | |
225 | * used by tlb_flush_page. | |
226 | */ | |
fadc1cbe PM |
227 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
228 | hwaddr paddr, MemTxAttrs attrs, | |
229 | int prot, int mmu_idx, target_ulong size); | |
1787cc8e PM |
230 | /* tlb_set_page: |
231 | * | |
232 | * This function is equivalent to calling tlb_set_page_with_attrs() | |
233 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided | |
234 | * as a convenience for CPUs which don't use memory transaction attributes. | |
235 | */ | |
236 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
237 | hwaddr paddr, int prot, | |
238 | int mmu_idx, target_ulong size); | |
29d8ec7b | 239 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); |
3b4afc9e YK |
240 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, |
241 | uintptr_t retaddr); | |
0cac1b66 | 242 | #else |
31b030d4 | 243 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
0cac1b66 BS |
244 | { |
245 | } | |
c3b9a07a AB |
246 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
247 | { | |
248 | } | |
249 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, | |
250 | target_ulong addr) | |
251 | { | |
252 | } | |
d10eb08f | 253 | static inline void tlb_flush(CPUState *cpu) |
0cac1b66 BS |
254 | { |
255 | } | |
c3b9a07a AB |
256 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
257 | { | |
258 | } | |
259 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
260 | { | |
261 | } | |
d7a74a9d | 262 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
0336cbf8 | 263 | target_ulong addr, uint16_t idxmap) |
d7a74a9d PM |
264 | { |
265 | } | |
266 | ||
0336cbf8 | 267 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d PM |
268 | { |
269 | } | |
c3b9a07a AB |
270 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
271 | target_ulong addr, | |
272 | uint16_t idxmap) | |
273 | { | |
274 | } | |
275 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
276 | target_ulong addr, | |
277 | uint16_t idxmap) | |
278 | { | |
279 | } | |
280 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) | |
281 | { | |
282 | } | |
283 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
284 | uint16_t idxmap) | |
285 | { | |
286 | } | |
406bc339 PK |
287 | static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) |
288 | { | |
289 | } | |
c527ee8f | 290 | #endif |
d4e8164f | 291 | |
d4e8164f FB |
292 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
293 | ||
126d89e8 RH |
294 | /* Estimated block size for TB allocation. */ |
295 | /* ??? The following is based on a 2015 survey of x86_64 host output. | |
296 | Better would seem to be some sort of dynamically sized TB array, | |
297 | adapting to the block sizes actually being produced. */ | |
4390df51 | 298 | #if defined(CONFIG_SOFTMMU) |
126d89e8 | 299 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
4390df51 | 300 | #else |
126d89e8 | 301 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
4390df51 FB |
302 | #endif |
303 | ||
2e70f6ef | 304 | struct TranslationBlock { |
2e12669a FB |
305 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
306 | target_ulong cs_base; /* CS base for this block */ | |
89fee74a | 307 | uint32_t flags; /* flags defining in which context the code was generated */ |
d4e8164f FB |
308 | uint16_t size; /* size of target code for this block (1 <= |
309 | size <= TARGET_PAGE_SIZE) */ | |
0266359e PB |
310 | uint16_t icount; |
311 | uint32_t cflags; /* compile flags */ | |
2e70f6ef PB |
312 | #define CF_COUNT_MASK 0x7fff |
313 | #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ | |
d8a499f1 | 314 | #define CF_NOCACHE 0x10000 /* To be freed after execution */ |
0266359e | 315 | #define CF_USE_ICOUNT 0x20000 |
56c0269a | 316 | #define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */ |
58fe2f10 | 317 | |
61a67f71 LV |
318 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
319 | uint32_t trace_vcpu_dstate; | |
320 | ||
6d21e420 PB |
321 | uint16_t invalid; |
322 | ||
1813e175 | 323 | void *tc_ptr; /* pointer to the translated code */ |
fca8a500 | 324 | uint8_t *tc_search; /* pointer to search data */ |
02d57ea1 SF |
325 | /* original tb when cflags has CF_NOCACHE */ |
326 | struct TranslationBlock *orig_tb; | |
4390df51 FB |
327 | /* first and second physical page containing code. The lower bit |
328 | of the pointer tells the index in page_next[] */ | |
5fafdf24 | 329 | struct TranslationBlock *page_next[2]; |
41c1b1c9 | 330 | tb_page_addr_t page_addr[2]; |
4390df51 | 331 | |
f309101c SF |
332 | /* The following data are used to directly call another TB from |
333 | * the code of this one. This can be done either by emitting direct or | |
334 | * indirect native jump instructions. These jumps are reset so that the TB | |
eb5e2b9e | 335 | * just continues its execution. The TB can be linked to another one by |
f309101c SF |
336 | * setting one of the jump targets (or patching the jump instruction). Only |
337 | * two of such jumps are supported. | |
338 | */ | |
339 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ | |
340 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ | |
a8583393 RH |
341 | uintptr_t jmp_target_arg[2]; /* target address or offset */ |
342 | ||
eb5e2b9e | 343 | /* Each TB has an associated circular list of TBs jumping to this one. |
f309101c SF |
344 | * jmp_list_first points to the first TB jumping to this one. |
345 | * jmp_list_next is used to point to the next TB in a list. | |
346 | * Since each TB can have two jumps, it can participate in two lists. | |
c37e6d7e SF |
347 | * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a |
348 | * TranslationBlock structure, but the two least significant bits of | |
349 | * them are used to encode which data field of the pointed TB should | |
350 | * be used to traverse the list further from that TB: | |
f309101c SF |
351 | * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first. |
352 | * In other words, 0/1 tells which jump is used in the pointed TB, | |
353 | * and 2 means that this is a pointer back to the target TB of this list. | |
354 | */ | |
c37e6d7e SF |
355 | uintptr_t jmp_list_next[2]; |
356 | uintptr_t jmp_list_first; | |
2e70f6ef | 357 | }; |
d4e8164f | 358 | |
2e70f6ef | 359 | void tb_free(TranslationBlock *tb); |
bbd77c18 | 360 | void tb_flush(CPUState *cpu); |
41c1b1c9 | 361 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
cedbcb01 EC |
362 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
363 | target_ulong cs_base, uint32_t flags); | |
a8583393 | 364 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
d4e8164f | 365 | |
01ecaf43 | 366 | /* GETPC is the true target of the return instruction that we'll execute. */ |
7316329a | 367 | #if defined(CONFIG_TCG_INTERPRETER) |
c3ca0467 | 368 | extern uintptr_t tci_tb_ptr; |
01ecaf43 | 369 | # define GETPC() tci_tb_ptr |
0f842f8a | 370 | #else |
01ecaf43 | 371 | # define GETPC() \ |
0f842f8a RH |
372 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
373 | #endif | |
374 | ||
375 | /* The true return address will often point to a host insn that is part of | |
376 | the next translated guest insn. Adjust the address backward to point to | |
377 | the middle of the call insn. Subtracting one would do the job except for | |
378 | several compressed mode architectures (arm, mips) which set the low bit | |
379 | to indicate the compressed mode; subtracting two works around that. It | |
380 | is also the case that there are no host isas that contain a call insn | |
381 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
a17d4482 | 382 | #define GETPC_ADJ 2 |
3917149d | 383 | |
beeaef55 PB |
384 | void tb_lock(void); |
385 | void tb_unlock(void); | |
386 | void tb_lock_reset(void); | |
387 | ||
e95c8d51 | 388 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 389 | |
9d82b5a7 | 390 | struct MemoryRegion *iotlb_to_region(CPUState *cpu, |
a54c87b6 | 391 | hwaddr index, MemTxAttrs attrs); |
b3755a91 | 392 | |
b35399bb SS |
393 | void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type, |
394 | int mmu_idx, uintptr_t retaddr); | |
6e59c1db | 395 | |
6e59c1db | 396 | #endif |
4390df51 FB |
397 | |
398 | #if defined(CONFIG_USER_ONLY) | |
8fd19e6c PB |
399 | void mmap_lock(void); |
400 | void mmap_unlock(void); | |
301e40ed | 401 | bool have_mmap_lock(void); |
8fd19e6c | 402 | |
9349b4f9 | 403 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
4390df51 FB |
404 | { |
405 | return addr; | |
406 | } | |
407 | #else | |
8fd19e6c PB |
408 | static inline void mmap_lock(void) {} |
409 | static inline void mmap_unlock(void) {} | |
410 | ||
0cac1b66 | 411 | /* cputlb.c */ |
9349b4f9 | 412 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
dfccc760 PC |
413 | |
414 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); | |
415 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); | |
416 | ||
417 | /* exec.c */ | |
418 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); | |
419 | ||
420 | MemoryRegionSection * | |
d7898cda PM |
421 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
422 | hwaddr *xlat, hwaddr *plen); | |
dfccc760 PC |
423 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
424 | MemoryRegionSection *section, | |
425 | target_ulong vaddr, | |
426 | hwaddr paddr, hwaddr xlat, | |
427 | int prot, | |
428 | target_ulong *address); | |
429 | bool memory_region_is_unassigned(MemoryRegion *mr); | |
430 | ||
4390df51 | 431 | #endif |
9df217a3 | 432 | |
1b530a6d AJ |
433 | /* vl.c */ |
434 | extern int singlestep; | |
435 | ||
875cdcf6 | 436 | #endif |