]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
2a6a4076 MA |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H | |
7d99a001 BS |
22 | |
23 | #include "qemu-common.h" | |
00f6da6a | 24 | #include "exec/tb-context.h" |
416986d3 | 25 | #include "sysemu/cpus.h" |
7d99a001 | 26 | |
b346ff46 | 27 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
de9a95f0 | 28 | #define DEBUG_DISAS |
b346ff46 | 29 | |
41c1b1c9 PB |
30 | /* Page tracking code uses ram addresses in system mode, and virtual |
31 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate | |
32 | type. */ | |
33 | #if defined(CONFIG_USER_ONLY) | |
b480d9b7 | 34 | typedef abi_ulong tb_page_addr_t; |
67a5b5d2 | 35 | #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx |
41c1b1c9 PB |
36 | #else |
37 | typedef ram_addr_t tb_page_addr_t; | |
67a5b5d2 | 38 | #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT |
41c1b1c9 PB |
39 | #endif |
40 | ||
1de7afc9 | 41 | #include "qemu/log.h" |
b346ff46 | 42 | |
9c489ea6 | 43 | void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb); |
9349b4f9 | 44 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
bad729e2 | 45 | target_ulong *data); |
d2856f1a | 46 | |
57fec1fe | 47 | void cpu_gen_init(void); |
3f38f309 | 48 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); |
a8a826a3 | 49 | |
6886b980 | 50 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
90b40a69 | 51 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
648f034c | 52 | TranslationBlock *tb_gen_code(CPUState *cpu, |
89fee74a EC |
53 | target_ulong pc, target_ulong cs_base, |
54 | uint32_t flags, | |
2e70f6ef | 55 | int cflags); |
1bc7e522 | 56 | |
5638d180 | 57 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
1c3c8af1 | 58 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
fdbc2b57 | 59 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
1652b974 | 60 | |
0cac1b66 | 61 | #if !defined(CONFIG_USER_ONLY) |
32857f4d | 62 | void cpu_reloading_memory_map(void); |
56943e8c PM |
63 | /** |
64 | * cpu_address_space_init: | |
65 | * @cpu: CPU to add this address space to | |
66 | * @as: address space to add | |
67 | * @asidx: integer index of this address space | |
68 | * | |
69 | * Add the specified address space to the CPU's cpu_ases list. | |
70 | * The address space added with @asidx 0 is the one used for the | |
71 | * convenience pointer cpu->as. | |
72 | * The target-specific code which registers ASes is responsible | |
73 | * for defining what semantics address space 0, 1, 2, etc have. | |
74 | * | |
12ebc9a7 PM |
75 | * Before the first call to this function, the caller must set |
76 | * cpu->num_ases to the total number of address spaces it needs | |
77 | * to support. | |
78 | * | |
56943e8c PM |
79 | * Note that with KVM only one address space is supported. |
80 | */ | |
81 | void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx); | |
b11ec7f2 YZ |
82 | #endif |
83 | ||
84 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) | |
0cac1b66 | 85 | /* cputlb.c */ |
d7a74a9d PM |
86 | /** |
87 | * tlb_flush_page: | |
88 | * @cpu: CPU whose TLB should be flushed | |
89 | * @addr: virtual address of page to be flushed | |
90 | * | |
91 | * Flush one page from the TLB of the specified CPU, for all | |
92 | * MMU indexes. | |
93 | */ | |
31b030d4 | 94 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
c3b9a07a AB |
95 | /** |
96 | * tlb_flush_page_all_cpus: | |
97 | * @cpu: src CPU of the flush | |
98 | * @addr: virtual address of page to be flushed | |
99 | * | |
100 | * Flush one page from the TLB of the specified CPU, for all | |
101 | * MMU indexes. | |
102 | */ | |
103 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); | |
104 | /** | |
105 | * tlb_flush_page_all_cpus_synced: | |
106 | * @cpu: src CPU of the flush | |
107 | * @addr: virtual address of page to be flushed | |
108 | * | |
109 | * Flush one page from the TLB of the specified CPU, for all MMU | |
110 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work | |
111 | * is scheduled as safe work meaning all flushes will be complete once | |
112 | * the source vCPUs safe work is complete. This will depend on when | |
113 | * the guests translation ends the TB. | |
114 | */ | |
115 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); | |
d7a74a9d PM |
116 | /** |
117 | * tlb_flush: | |
118 | * @cpu: CPU whose TLB should be flushed | |
d7a74a9d | 119 | * |
d10eb08f AB |
120 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
121 | * allow the implementation to drop entries from the TLB at any time | |
122 | * so this is generally safe. If more selective flushing is required | |
123 | * use one of the other functions for efficiency. | |
d7a74a9d | 124 | */ |
d10eb08f | 125 | void tlb_flush(CPUState *cpu); |
c3b9a07a AB |
126 | /** |
127 | * tlb_flush_all_cpus: | |
128 | * @cpu: src CPU of the flush | |
129 | */ | |
130 | void tlb_flush_all_cpus(CPUState *src_cpu); | |
131 | /** | |
132 | * tlb_flush_all_cpus_synced: | |
133 | * @cpu: src CPU of the flush | |
134 | * | |
135 | * Like tlb_flush_all_cpus except this except the source vCPUs work is | |
136 | * scheduled as safe work meaning all flushes will be complete once | |
137 | * the source vCPUs safe work is complete. This will depend on when | |
138 | * the guests translation ends the TB. | |
139 | */ | |
140 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); | |
d7a74a9d PM |
141 | /** |
142 | * tlb_flush_page_by_mmuidx: | |
143 | * @cpu: CPU whose TLB should be flushed | |
144 | * @addr: virtual address of page to be flushed | |
0336cbf8 | 145 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
146 | * |
147 | * Flush one page from the TLB of the specified CPU, for the specified | |
148 | * MMU indexes. | |
149 | */ | |
0336cbf8 AB |
150 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
151 | uint16_t idxmap); | |
c3b9a07a AB |
152 | /** |
153 | * tlb_flush_page_by_mmuidx_all_cpus: | |
154 | * @cpu: Originating CPU of the flush | |
155 | * @addr: virtual address of page to be flushed | |
156 | * @idxmap: bitmap of MMU indexes to flush | |
157 | * | |
158 | * Flush one page from the TLB of all CPUs, for the specified | |
159 | * MMU indexes. | |
160 | */ | |
161 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | |
162 | uint16_t idxmap); | |
163 | /** | |
164 | * tlb_flush_page_by_mmuidx_all_cpus_synced: | |
165 | * @cpu: Originating CPU of the flush | |
166 | * @addr: virtual address of page to be flushed | |
167 | * @idxmap: bitmap of MMU indexes to flush | |
168 | * | |
169 | * Flush one page from the TLB of all CPUs, for the specified MMU | |
170 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source | |
171 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
172 | * complete once the source vCPUs safe work is complete. This will | |
173 | * depend on when the guests translation ends the TB. | |
174 | */ | |
175 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, | |
176 | uint16_t idxmap); | |
d7a74a9d PM |
177 | /** |
178 | * tlb_flush_by_mmuidx: | |
179 | * @cpu: CPU whose TLB should be flushed | |
c3b9a07a | 180 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
0336cbf8 | 181 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
182 | * |
183 | * Flush all entries from the TLB of the specified CPU, for the specified | |
184 | * MMU indexes. | |
185 | */ | |
0336cbf8 | 186 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
c3b9a07a AB |
187 | /** |
188 | * tlb_flush_by_mmuidx_all_cpus: | |
189 | * @cpu: Originating CPU of the flush | |
190 | * @idxmap: bitmap of MMU indexes to flush | |
191 | * | |
192 | * Flush all entries from all TLBs of all CPUs, for the specified | |
193 | * MMU indexes. | |
194 | */ | |
195 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); | |
196 | /** | |
197 | * tlb_flush_by_mmuidx_all_cpus_synced: | |
198 | * @cpu: Originating CPU of the flush | |
199 | * @idxmap: bitmap of MMU indexes to flush | |
200 | * | |
201 | * Flush all entries from all TLBs of all CPUs, for the specified | |
202 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source | |
203 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
204 | * complete once the source vCPUs safe work is complete. This will | |
205 | * depend on when the guests translation ends the TB. | |
206 | */ | |
207 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); | |
1787cc8e PM |
208 | /** |
209 | * tlb_set_page_with_attrs: | |
210 | * @cpu: CPU to add this TLB entry for | |
211 | * @vaddr: virtual address of page to add entry for | |
212 | * @paddr: physical address of the page | |
213 | * @attrs: memory transaction attributes | |
214 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) | |
215 | * @mmu_idx: MMU index to insert TLB entry for | |
216 | * @size: size of the page in bytes | |
217 | * | |
218 | * Add an entry to this CPU's TLB (a mapping from virtual address | |
219 | * @vaddr to physical address @paddr) with the specified memory | |
220 | * transaction attributes. This is generally called by the target CPU | |
221 | * specific code after it has been called through the tlb_fill() | |
222 | * entry point and performed a successful page table walk to find | |
223 | * the physical address and attributes for the virtual address | |
224 | * which provoked the TLB miss. | |
225 | * | |
226 | * At most one entry for a given virtual address is permitted. Only a | |
227 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only | |
228 | * used by tlb_flush_page. | |
229 | */ | |
fadc1cbe PM |
230 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
231 | hwaddr paddr, MemTxAttrs attrs, | |
232 | int prot, int mmu_idx, target_ulong size); | |
1787cc8e PM |
233 | /* tlb_set_page: |
234 | * | |
235 | * This function is equivalent to calling tlb_set_page_with_attrs() | |
236 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided | |
237 | * as a convenience for CPUs which don't use memory transaction attributes. | |
238 | */ | |
239 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
240 | hwaddr paddr, int prot, | |
241 | int mmu_idx, target_ulong size); | |
29d8ec7b | 242 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); |
3b4afc9e YK |
243 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, |
244 | uintptr_t retaddr); | |
0cac1b66 | 245 | #else |
31b030d4 | 246 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
0cac1b66 BS |
247 | { |
248 | } | |
c3b9a07a AB |
249 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
250 | { | |
251 | } | |
252 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, | |
253 | target_ulong addr) | |
254 | { | |
255 | } | |
d10eb08f | 256 | static inline void tlb_flush(CPUState *cpu) |
0cac1b66 BS |
257 | { |
258 | } | |
c3b9a07a AB |
259 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
260 | { | |
261 | } | |
262 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
263 | { | |
264 | } | |
d7a74a9d | 265 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
0336cbf8 | 266 | target_ulong addr, uint16_t idxmap) |
d7a74a9d PM |
267 | { |
268 | } | |
269 | ||
0336cbf8 | 270 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d PM |
271 | { |
272 | } | |
c3b9a07a AB |
273 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
274 | target_ulong addr, | |
275 | uint16_t idxmap) | |
276 | { | |
277 | } | |
278 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
279 | target_ulong addr, | |
280 | uint16_t idxmap) | |
281 | { | |
282 | } | |
283 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) | |
284 | { | |
285 | } | |
286 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
287 | uint16_t idxmap) | |
288 | { | |
289 | } | |
406bc339 PK |
290 | static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) |
291 | { | |
292 | } | |
c527ee8f | 293 | #endif |
d4e8164f | 294 | |
d4e8164f FB |
295 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
296 | ||
126d89e8 RH |
297 | /* Estimated block size for TB allocation. */ |
298 | /* ??? The following is based on a 2015 survey of x86_64 host output. | |
299 | Better would seem to be some sort of dynamically sized TB array, | |
300 | adapting to the block sizes actually being produced. */ | |
4390df51 | 301 | #if defined(CONFIG_SOFTMMU) |
126d89e8 | 302 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
4390df51 | 303 | #else |
126d89e8 | 304 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
4390df51 FB |
305 | #endif |
306 | ||
e7e168f4 EC |
307 | /* |
308 | * Translation Cache-related fields of a TB. | |
2ac01d6d EC |
309 | * This struct exists just for convenience; we keep track of TB's in a binary |
310 | * search tree, and the only fields needed to compare TB's in the tree are | |
311 | * @ptr and @size. | |
312 | * Note: the address of search data can be obtained by adding @size to @ptr. | |
e7e168f4 EC |
313 | */ |
314 | struct tb_tc { | |
315 | void *ptr; /* pointer to the translated code */ | |
2ac01d6d | 316 | size_t size; |
e7e168f4 EC |
317 | }; |
318 | ||
2e70f6ef | 319 | struct TranslationBlock { |
2e12669a FB |
320 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
321 | target_ulong cs_base; /* CS base for this block */ | |
89fee74a | 322 | uint32_t flags; /* flags defining in which context the code was generated */ |
d4e8164f FB |
323 | uint16_t size; /* size of target code for this block (1 <= |
324 | size <= TARGET_PAGE_SIZE) */ | |
0266359e PB |
325 | uint16_t icount; |
326 | uint32_t cflags; /* compile flags */ | |
416986d3 RH |
327 | #define CF_COUNT_MASK 0x00007fff |
328 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ | |
329 | #define CF_NOCACHE 0x00010000 /* To be freed after execution */ | |
330 | #define CF_USE_ICOUNT 0x00020000 | |
331 | #define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */ | |
332 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ | |
4e2ca83e | 333 | /* cflags' mask for hashing/comparison */ |
0cf8a44c RH |
334 | #define CF_HASH_MASK \ |
335 | (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL) | |
58fe2f10 | 336 | |
61a67f71 LV |
337 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
338 | uint32_t trace_vcpu_dstate; | |
339 | ||
e7e168f4 EC |
340 | struct tb_tc tc; |
341 | ||
02d57ea1 SF |
342 | /* original tb when cflags has CF_NOCACHE */ |
343 | struct TranslationBlock *orig_tb; | |
4390df51 FB |
344 | /* first and second physical page containing code. The lower bit |
345 | of the pointer tells the index in page_next[] */ | |
5fafdf24 | 346 | struct TranslationBlock *page_next[2]; |
41c1b1c9 | 347 | tb_page_addr_t page_addr[2]; |
4390df51 | 348 | |
f309101c SF |
349 | /* The following data are used to directly call another TB from |
350 | * the code of this one. This can be done either by emitting direct or | |
351 | * indirect native jump instructions. These jumps are reset so that the TB | |
eb5e2b9e | 352 | * just continues its execution. The TB can be linked to another one by |
f309101c SF |
353 | * setting one of the jump targets (or patching the jump instruction). Only |
354 | * two of such jumps are supported. | |
355 | */ | |
356 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ | |
357 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ | |
a8583393 RH |
358 | uintptr_t jmp_target_arg[2]; /* target address or offset */ |
359 | ||
eb5e2b9e | 360 | /* Each TB has an associated circular list of TBs jumping to this one. |
f309101c SF |
361 | * jmp_list_first points to the first TB jumping to this one. |
362 | * jmp_list_next is used to point to the next TB in a list. | |
363 | * Since each TB can have two jumps, it can participate in two lists. | |
c37e6d7e SF |
364 | * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a |
365 | * TranslationBlock structure, but the two least significant bits of | |
366 | * them are used to encode which data field of the pointed TB should | |
367 | * be used to traverse the list further from that TB: | |
f309101c SF |
368 | * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first. |
369 | * In other words, 0/1 tells which jump is used in the pointed TB, | |
370 | * and 2 means that this is a pointer back to the target TB of this list. | |
371 | */ | |
c37e6d7e SF |
372 | uintptr_t jmp_list_next[2]; |
373 | uintptr_t jmp_list_first; | |
2e70f6ef | 374 | }; |
d4e8164f | 375 | |
4e2ca83e EC |
376 | extern bool parallel_cpus; |
377 | ||
378 | /* Hide the atomic_read to make code a little easier on the eyes */ | |
379 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | |
380 | { | |
381 | return atomic_read(&tb->cflags); | |
382 | } | |
383 | ||
384 | /* current cflags for hashing/comparison */ | |
385 | static inline uint32_t curr_cflags(void) | |
386 | { | |
416986d3 RH |
387 | return (parallel_cpus ? CF_PARALLEL : 0) |
388 | | (use_icount ? CF_USE_ICOUNT : 0); | |
4e2ca83e EC |
389 | } |
390 | ||
be1e0117 | 391 | void tb_remove(TranslationBlock *tb); |
bbd77c18 | 392 | void tb_flush(CPUState *cpu); |
41c1b1c9 | 393 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
cedbcb01 | 394 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
4e2ca83e EC |
395 | target_ulong cs_base, uint32_t flags, |
396 | uint32_t cf_mask); | |
a8583393 | 397 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
d4e8164f | 398 | |
01ecaf43 | 399 | /* GETPC is the true target of the return instruction that we'll execute. */ |
7316329a | 400 | #if defined(CONFIG_TCG_INTERPRETER) |
c3ca0467 | 401 | extern uintptr_t tci_tb_ptr; |
01ecaf43 | 402 | # define GETPC() tci_tb_ptr |
0f842f8a | 403 | #else |
01ecaf43 | 404 | # define GETPC() \ |
0f842f8a RH |
405 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
406 | #endif | |
407 | ||
408 | /* The true return address will often point to a host insn that is part of | |
409 | the next translated guest insn. Adjust the address backward to point to | |
410 | the middle of the call insn. Subtracting one would do the job except for | |
411 | several compressed mode architectures (arm, mips) which set the low bit | |
412 | to indicate the compressed mode; subtracting two works around that. It | |
413 | is also the case that there are no host isas that contain a call insn | |
414 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
a17d4482 | 415 | #define GETPC_ADJ 2 |
3917149d | 416 | |
beeaef55 PB |
417 | void tb_lock(void); |
418 | void tb_unlock(void); | |
419 | void tb_lock_reset(void); | |
420 | ||
e95c8d51 | 421 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 422 | |
9d82b5a7 | 423 | struct MemoryRegion *iotlb_to_region(CPUState *cpu, |
a54c87b6 | 424 | hwaddr index, MemTxAttrs attrs); |
b3755a91 | 425 | |
b35399bb SS |
426 | void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type, |
427 | int mmu_idx, uintptr_t retaddr); | |
6e59c1db | 428 | |
6e59c1db | 429 | #endif |
4390df51 FB |
430 | |
431 | #if defined(CONFIG_USER_ONLY) | |
8fd19e6c PB |
432 | void mmap_lock(void); |
433 | void mmap_unlock(void); | |
301e40ed | 434 | bool have_mmap_lock(void); |
8fd19e6c | 435 | |
9349b4f9 | 436 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
4390df51 FB |
437 | { |
438 | return addr; | |
439 | } | |
440 | #else | |
8fd19e6c PB |
441 | static inline void mmap_lock(void) {} |
442 | static inline void mmap_unlock(void) {} | |
443 | ||
0cac1b66 | 444 | /* cputlb.c */ |
9349b4f9 | 445 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
dfccc760 PC |
446 | |
447 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); | |
448 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); | |
449 | ||
450 | /* exec.c */ | |
451 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); | |
452 | ||
453 | MemoryRegionSection * | |
d7898cda PM |
454 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
455 | hwaddr *xlat, hwaddr *plen); | |
dfccc760 PC |
456 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
457 | MemoryRegionSection *section, | |
458 | target_ulong vaddr, | |
459 | hwaddr paddr, hwaddr xlat, | |
460 | int prot, | |
461 | target_ulong *address); | |
462 | bool memory_region_is_unassigned(MemoryRegion *mr); | |
463 | ||
4390df51 | 464 | #endif |
9df217a3 | 465 | |
1b530a6d AJ |
466 | /* vl.c */ |
467 | extern int singlestep; | |
468 | ||
875cdcf6 | 469 | #endif |