]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
2a6a4076 MA |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H | |
7d99a001 BS |
22 | |
23 | #include "qemu-common.h" | |
00f6da6a | 24 | #include "exec/tb-context.h" |
7d99a001 | 25 | |
b346ff46 | 26 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
de9a95f0 | 27 | #define DEBUG_DISAS |
b346ff46 | 28 | |
41c1b1c9 PB |
29 | /* Page tracking code uses ram addresses in system mode, and virtual |
30 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate | |
31 | type. */ | |
32 | #if defined(CONFIG_USER_ONLY) | |
b480d9b7 | 33 | typedef abi_ulong tb_page_addr_t; |
41c1b1c9 PB |
34 | #else |
35 | typedef ram_addr_t tb_page_addr_t; | |
36 | #endif | |
37 | ||
b346ff46 FB |
38 | /* is_jmp field values */ |
39 | #define DISAS_NEXT 0 /* next instruction can be analyzed */ | |
40 | #define DISAS_JUMP 1 /* only pc was modified dynamically */ | |
41 | #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ | |
42 | #define DISAS_TB_JUMP 3 /* only pc was modified statically */ | |
43 | ||
1de7afc9 | 44 | #include "qemu/log.h" |
b346ff46 | 45 | |
9349b4f9 | 46 | void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); |
9349b4f9 | 47 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
bad729e2 | 48 | target_ulong *data); |
d2856f1a | 49 | |
57fec1fe | 50 | void cpu_gen_init(void); |
3f38f309 | 51 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); |
a8a826a3 | 52 | |
6886b980 | 53 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
90b40a69 | 54 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
648f034c | 55 | TranslationBlock *tb_gen_code(CPUState *cpu, |
89fee74a EC |
56 | target_ulong pc, target_ulong cs_base, |
57 | uint32_t flags, | |
2e70f6ef | 58 | int cflags); |
1bc7e522 | 59 | |
5638d180 | 60 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
1c3c8af1 | 61 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
fdbc2b57 | 62 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
1652b974 | 63 | |
0cac1b66 | 64 | #if !defined(CONFIG_USER_ONLY) |
32857f4d | 65 | void cpu_reloading_memory_map(void); |
56943e8c PM |
66 | /** |
67 | * cpu_address_space_init: | |
68 | * @cpu: CPU to add this address space to | |
69 | * @as: address space to add | |
70 | * @asidx: integer index of this address space | |
71 | * | |
72 | * Add the specified address space to the CPU's cpu_ases list. | |
73 | * The address space added with @asidx 0 is the one used for the | |
74 | * convenience pointer cpu->as. | |
75 | * The target-specific code which registers ASes is responsible | |
76 | * for defining what semantics address space 0, 1, 2, etc have. | |
77 | * | |
12ebc9a7 PM |
78 | * Before the first call to this function, the caller must set |
79 | * cpu->num_ases to the total number of address spaces it needs | |
80 | * to support. | |
81 | * | |
56943e8c PM |
82 | * Note that with KVM only one address space is supported. |
83 | */ | |
84 | void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx); | |
0cac1b66 | 85 | /* cputlb.c */ |
d7a74a9d PM |
86 | /** |
87 | * tlb_flush_page: | |
88 | * @cpu: CPU whose TLB should be flushed | |
89 | * @addr: virtual address of page to be flushed | |
90 | * | |
91 | * Flush one page from the TLB of the specified CPU, for all | |
92 | * MMU indexes. | |
93 | */ | |
31b030d4 | 94 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
c3b9a07a AB |
95 | /** |
96 | * tlb_flush_page_all_cpus: | |
97 | * @cpu: src CPU of the flush | |
98 | * @addr: virtual address of page to be flushed | |
99 | * | |
100 | * Flush one page from the TLB of the specified CPU, for all | |
101 | * MMU indexes. | |
102 | */ | |
103 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); | |
104 | /** | |
105 | * tlb_flush_page_all_cpus_synced: | |
106 | * @cpu: src CPU of the flush | |
107 | * @addr: virtual address of page to be flushed | |
108 | * | |
109 | * Flush one page from the TLB of the specified CPU, for all MMU | |
110 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work | |
111 | * is scheduled as safe work meaning all flushes will be complete once | |
112 | * the source vCPUs safe work is complete. This will depend on when | |
113 | * the guests translation ends the TB. | |
114 | */ | |
115 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); | |
d7a74a9d PM |
116 | /** |
117 | * tlb_flush: | |
118 | * @cpu: CPU whose TLB should be flushed | |
d7a74a9d | 119 | * |
d10eb08f AB |
120 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
121 | * allow the implementation to drop entries from the TLB at any time | |
122 | * so this is generally safe. If more selective flushing is required | |
123 | * use one of the other functions for efficiency. | |
d7a74a9d | 124 | */ |
d10eb08f | 125 | void tlb_flush(CPUState *cpu); |
c3b9a07a AB |
126 | /** |
127 | * tlb_flush_all_cpus: | |
128 | * @cpu: src CPU of the flush | |
129 | */ | |
130 | void tlb_flush_all_cpus(CPUState *src_cpu); | |
131 | /** | |
132 | * tlb_flush_all_cpus_synced: | |
133 | * @cpu: src CPU of the flush | |
134 | * | |
135 | * Like tlb_flush_all_cpus except this except the source vCPUs work is | |
136 | * scheduled as safe work meaning all flushes will be complete once | |
137 | * the source vCPUs safe work is complete. This will depend on when | |
138 | * the guests translation ends the TB. | |
139 | */ | |
140 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); | |
d7a74a9d PM |
141 | /** |
142 | * tlb_flush_page_by_mmuidx: | |
143 | * @cpu: CPU whose TLB should be flushed | |
144 | * @addr: virtual address of page to be flushed | |
0336cbf8 | 145 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
146 | * |
147 | * Flush one page from the TLB of the specified CPU, for the specified | |
148 | * MMU indexes. | |
149 | */ | |
0336cbf8 AB |
150 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
151 | uint16_t idxmap); | |
c3b9a07a AB |
152 | /** |
153 | * tlb_flush_page_by_mmuidx_all_cpus: | |
154 | * @cpu: Originating CPU of the flush | |
155 | * @addr: virtual address of page to be flushed | |
156 | * @idxmap: bitmap of MMU indexes to flush | |
157 | * | |
158 | * Flush one page from the TLB of all CPUs, for the specified | |
159 | * MMU indexes. | |
160 | */ | |
161 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | |
162 | uint16_t idxmap); | |
163 | /** | |
164 | * tlb_flush_page_by_mmuidx_all_cpus_synced: | |
165 | * @cpu: Originating CPU of the flush | |
166 | * @addr: virtual address of page to be flushed | |
167 | * @idxmap: bitmap of MMU indexes to flush | |
168 | * | |
169 | * Flush one page from the TLB of all CPUs, for the specified MMU | |
170 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source | |
171 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
172 | * complete once the source vCPUs safe work is complete. This will | |
173 | * depend on when the guests translation ends the TB. | |
174 | */ | |
175 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, | |
176 | uint16_t idxmap); | |
d7a74a9d PM |
177 | /** |
178 | * tlb_flush_by_mmuidx: | |
179 | * @cpu: CPU whose TLB should be flushed | |
c3b9a07a | 180 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
0336cbf8 | 181 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
182 | * |
183 | * Flush all entries from the TLB of the specified CPU, for the specified | |
184 | * MMU indexes. | |
185 | */ | |
0336cbf8 | 186 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
c3b9a07a AB |
187 | /** |
188 | * tlb_flush_by_mmuidx_all_cpus: | |
189 | * @cpu: Originating CPU of the flush | |
190 | * @idxmap: bitmap of MMU indexes to flush | |
191 | * | |
192 | * Flush all entries from all TLBs of all CPUs, for the specified | |
193 | * MMU indexes. | |
194 | */ | |
195 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); | |
196 | /** | |
197 | * tlb_flush_by_mmuidx_all_cpus_synced: | |
198 | * @cpu: Originating CPU of the flush | |
199 | * @idxmap: bitmap of MMU indexes to flush | |
200 | * | |
201 | * Flush all entries from all TLBs of all CPUs, for the specified | |
202 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source | |
203 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
204 | * complete once the source vCPUs safe work is complete. This will | |
205 | * depend on when the guests translation ends the TB. | |
206 | */ | |
207 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); | |
1787cc8e PM |
208 | /** |
209 | * tlb_set_page_with_attrs: | |
210 | * @cpu: CPU to add this TLB entry for | |
211 | * @vaddr: virtual address of page to add entry for | |
212 | * @paddr: physical address of the page | |
213 | * @attrs: memory transaction attributes | |
214 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) | |
215 | * @mmu_idx: MMU index to insert TLB entry for | |
216 | * @size: size of the page in bytes | |
217 | * | |
218 | * Add an entry to this CPU's TLB (a mapping from virtual address | |
219 | * @vaddr to physical address @paddr) with the specified memory | |
220 | * transaction attributes. This is generally called by the target CPU | |
221 | * specific code after it has been called through the tlb_fill() | |
222 | * entry point and performed a successful page table walk to find | |
223 | * the physical address and attributes for the virtual address | |
224 | * which provoked the TLB miss. | |
225 | * | |
226 | * At most one entry for a given virtual address is permitted. Only a | |
227 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only | |
228 | * used by tlb_flush_page. | |
229 | */ | |
fadc1cbe PM |
230 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
231 | hwaddr paddr, MemTxAttrs attrs, | |
232 | int prot, int mmu_idx, target_ulong size); | |
1787cc8e PM |
233 | /* tlb_set_page: |
234 | * | |
235 | * This function is equivalent to calling tlb_set_page_with_attrs() | |
236 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided | |
237 | * as a convenience for CPUs which don't use memory transaction attributes. | |
238 | */ | |
239 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
240 | hwaddr paddr, int prot, | |
241 | int mmu_idx, target_ulong size); | |
29d8ec7b | 242 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); |
3b4afc9e YK |
243 | void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, |
244 | uintptr_t retaddr); | |
0cac1b66 | 245 | #else |
31b030d4 | 246 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
0cac1b66 BS |
247 | { |
248 | } | |
c3b9a07a AB |
249 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
250 | { | |
251 | } | |
252 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, | |
253 | target_ulong addr) | |
254 | { | |
255 | } | |
d10eb08f | 256 | static inline void tlb_flush(CPUState *cpu) |
0cac1b66 BS |
257 | { |
258 | } | |
c3b9a07a AB |
259 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
260 | { | |
261 | } | |
262 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
263 | { | |
264 | } | |
d7a74a9d | 265 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
0336cbf8 | 266 | target_ulong addr, uint16_t idxmap) |
d7a74a9d PM |
267 | { |
268 | } | |
269 | ||
0336cbf8 | 270 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d PM |
271 | { |
272 | } | |
c3b9a07a AB |
273 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
274 | target_ulong addr, | |
275 | uint16_t idxmap) | |
276 | { | |
277 | } | |
278 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
279 | target_ulong addr, | |
280 | uint16_t idxmap) | |
281 | { | |
282 | } | |
283 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) | |
284 | { | |
285 | } | |
286 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
287 | uint16_t idxmap) | |
288 | { | |
289 | } | |
c527ee8f | 290 | #endif |
d4e8164f | 291 | |
d4e8164f FB |
292 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
293 | ||
126d89e8 RH |
294 | /* Estimated block size for TB allocation. */ |
295 | /* ??? The following is based on a 2015 survey of x86_64 host output. | |
296 | Better would seem to be some sort of dynamically sized TB array, | |
297 | adapting to the block sizes actually being produced. */ | |
4390df51 | 298 | #if defined(CONFIG_SOFTMMU) |
126d89e8 | 299 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
4390df51 | 300 | #else |
126d89e8 | 301 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
4390df51 FB |
302 | #endif |
303 | ||
5bbd2cae RH |
304 | #if defined(__arm__) || defined(_ARCH_PPC) \ |
305 | || defined(__x86_64__) || defined(__i386__) \ | |
4a136e0a | 306 | || defined(__sparc__) || defined(__aarch64__) \ |
b6bfeea9 | 307 | || defined(__s390x__) || defined(__mips__) \ |
5bbd2cae | 308 | || defined(CONFIG_TCG_INTERPRETER) |
10b4f485 | 309 | /* NOTE: Direct jump patching must be atomic to be thread-safe. */ |
7316329a | 310 | #define USE_DIRECT_JUMP |
d4e8164f FB |
311 | #endif |
312 | ||
2e70f6ef | 313 | struct TranslationBlock { |
2e12669a FB |
314 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
315 | target_ulong cs_base; /* CS base for this block */ | |
89fee74a | 316 | uint32_t flags; /* flags defining in which context the code was generated */ |
d4e8164f FB |
317 | uint16_t size; /* size of target code for this block (1 <= |
318 | size <= TARGET_PAGE_SIZE) */ | |
0266359e PB |
319 | uint16_t icount; |
320 | uint32_t cflags; /* compile flags */ | |
2e70f6ef PB |
321 | #define CF_COUNT_MASK 0x7fff |
322 | #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ | |
d8a499f1 | 323 | #define CF_NOCACHE 0x10000 /* To be freed after execution */ |
0266359e | 324 | #define CF_USE_ICOUNT 0x20000 |
56c0269a | 325 | #define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */ |
58fe2f10 | 326 | |
6d21e420 PB |
327 | uint16_t invalid; |
328 | ||
1813e175 | 329 | void *tc_ptr; /* pointer to the translated code */ |
fca8a500 | 330 | uint8_t *tc_search; /* pointer to search data */ |
02d57ea1 SF |
331 | /* original tb when cflags has CF_NOCACHE */ |
332 | struct TranslationBlock *orig_tb; | |
4390df51 FB |
333 | /* first and second physical page containing code. The lower bit |
334 | of the pointer tells the index in page_next[] */ | |
5fafdf24 | 335 | struct TranslationBlock *page_next[2]; |
41c1b1c9 | 336 | tb_page_addr_t page_addr[2]; |
4390df51 | 337 | |
f309101c SF |
338 | /* The following data are used to directly call another TB from |
339 | * the code of this one. This can be done either by emitting direct or | |
340 | * indirect native jump instructions. These jumps are reset so that the TB | |
341 | * just continue its execution. The TB can be linked to another one by | |
342 | * setting one of the jump targets (or patching the jump instruction). Only | |
343 | * two of such jumps are supported. | |
344 | */ | |
345 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ | |
346 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ | |
d4e8164f | 347 | #ifdef USE_DIRECT_JUMP |
f309101c | 348 | uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */ |
d4e8164f | 349 | #else |
f309101c | 350 | uintptr_t jmp_target_addr[2]; /* target address for indirect jump */ |
d4e8164f | 351 | #endif |
f309101c SF |
352 | /* Each TB has an assosiated circular list of TBs jumping to this one. |
353 | * jmp_list_first points to the first TB jumping to this one. | |
354 | * jmp_list_next is used to point to the next TB in a list. | |
355 | * Since each TB can have two jumps, it can participate in two lists. | |
c37e6d7e SF |
356 | * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a |
357 | * TranslationBlock structure, but the two least significant bits of | |
358 | * them are used to encode which data field of the pointed TB should | |
359 | * be used to traverse the list further from that TB: | |
f309101c SF |
360 | * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first. |
361 | * In other words, 0/1 tells which jump is used in the pointed TB, | |
362 | * and 2 means that this is a pointer back to the target TB of this list. | |
363 | */ | |
c37e6d7e SF |
364 | uintptr_t jmp_list_next[2]; |
365 | uintptr_t jmp_list_first; | |
2e70f6ef | 366 | }; |
d4e8164f | 367 | |
2e70f6ef | 368 | void tb_free(TranslationBlock *tb); |
bbd77c18 | 369 | void tb_flush(CPUState *cpu); |
41c1b1c9 | 370 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
d4e8164f | 371 | |
4390df51 FB |
372 | #if defined(USE_DIRECT_JUMP) |
373 | ||
7316329a SW |
374 | #if defined(CONFIG_TCG_INTERPRETER) |
375 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) | |
376 | { | |
377 | /* patch the branch destination */ | |
76442a93 | 378 | atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); |
7316329a SW |
379 | /* no need to flush icache explicitly */ |
380 | } | |
381 | #elif defined(_ARCH_PPC) | |
9171478c | 382 | void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); |
810260a8 | 383 | #define tb_set_jmp_target1 ppc_tb_set_jmp_target |
57fec1fe | 384 | #elif defined(__i386__) || defined(__x86_64__) |
6375e09e | 385 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
4390df51 FB |
386 | { |
387 | /* patch the branch destination */ | |
0d07abf0 | 388 | atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); |
1235fc06 | 389 | /* no need to flush icache explicitly */ |
4390df51 | 390 | } |
a10c64e0 RH |
391 | #elif defined(__s390x__) |
392 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) | |
393 | { | |
394 | /* patch the branch destination */ | |
395 | intptr_t disp = addr - (jmp_addr - 2); | |
ed3d51ec | 396 | atomic_set((int32_t *)jmp_addr, disp / 2); |
a10c64e0 RH |
397 | /* no need to flush icache explicitly */ |
398 | } | |
4a136e0a CF |
399 | #elif defined(__aarch64__) |
400 | void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); | |
401 | #define tb_set_jmp_target1 aarch64_tb_set_jmp_target | |
811d4cf4 | 402 | #elif defined(__arm__) |
7d14e0e2 SF |
403 | void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); |
404 | #define tb_set_jmp_target1 arm_tb_set_jmp_target | |
b6bfeea9 | 405 | #elif defined(__sparc__) || defined(__mips__) |
5bbd2cae | 406 | void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); |
7316329a SW |
407 | #else |
408 | #error tb_set_jmp_target1 is missing | |
4390df51 | 409 | #endif |
d4e8164f | 410 | |
5fafdf24 | 411 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
6375e09e | 412 | int n, uintptr_t addr) |
4cbb86e1 | 413 | { |
f309101c | 414 | uint16_t offset = tb->jmp_insn_offset[n]; |
6375e09e | 415 | tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); |
4cbb86e1 FB |
416 | } |
417 | ||
d4e8164f FB |
418 | #else |
419 | ||
420 | /* set the jump target */ | |
5fafdf24 | 421 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
6375e09e | 422 | int n, uintptr_t addr) |
d4e8164f | 423 | { |
f309101c | 424 | tb->jmp_target_addr[n] = addr; |
d4e8164f FB |
425 | } |
426 | ||
427 | #endif | |
428 | ||
7d7500d9 | 429 | /* Called with tb_lock held. */ |
5fafdf24 | 430 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
d4e8164f FB |
431 | TranslationBlock *tb_next) |
432 | { | |
43d70ddf | 433 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
9962c478 SF |
434 | if (tb->jmp_list_next[n]) { |
435 | /* Another thread has already done this while we were | |
436 | * outside of the lock; nothing to do in this case */ | |
437 | return; | |
cf25629d | 438 | } |
9962c478 SF |
439 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, |
440 | "Linking TBs %p [" TARGET_FMT_lx | |
441 | "] index %d -> %p [" TARGET_FMT_lx "]\n", | |
442 | tb->tc_ptr, tb->pc, n, | |
443 | tb_next->tc_ptr, tb_next->pc); | |
444 | ||
445 | /* patch the native jump address */ | |
446 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); | |
447 | ||
448 | /* add in TB jmp circular list */ | |
449 | tb->jmp_list_next[n] = tb_next->jmp_list_first; | |
450 | tb_next->jmp_list_first = (uintptr_t)tb | n; | |
d4e8164f FB |
451 | } |
452 | ||
01ecaf43 | 453 | /* GETPC is the true target of the return instruction that we'll execute. */ |
7316329a | 454 | #if defined(CONFIG_TCG_INTERPRETER) |
c3ca0467 | 455 | extern uintptr_t tci_tb_ptr; |
01ecaf43 | 456 | # define GETPC() tci_tb_ptr |
0f842f8a | 457 | #else |
01ecaf43 | 458 | # define GETPC() \ |
0f842f8a RH |
459 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
460 | #endif | |
461 | ||
462 | /* The true return address will often point to a host insn that is part of | |
463 | the next translated guest insn. Adjust the address backward to point to | |
464 | the middle of the call insn. Subtracting one would do the job except for | |
465 | several compressed mode architectures (arm, mips) which set the low bit | |
466 | to indicate the compressed mode; subtracting two works around that. It | |
467 | is also the case that there are no host isas that contain a call insn | |
468 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
a17d4482 | 469 | #define GETPC_ADJ 2 |
3917149d | 470 | |
e95c8d51 | 471 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 472 | |
9d82b5a7 | 473 | struct MemoryRegion *iotlb_to_region(CPUState *cpu, |
a54c87b6 | 474 | hwaddr index, MemTxAttrs attrs); |
b3755a91 | 475 | |
b35399bb SS |
476 | void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type, |
477 | int mmu_idx, uintptr_t retaddr); | |
6e59c1db | 478 | |
6e59c1db | 479 | #endif |
4390df51 FB |
480 | |
481 | #if defined(CONFIG_USER_ONLY) | |
8fd19e6c PB |
482 | void mmap_lock(void); |
483 | void mmap_unlock(void); | |
301e40ed | 484 | bool have_mmap_lock(void); |
8fd19e6c | 485 | |
9349b4f9 | 486 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
4390df51 FB |
487 | { |
488 | return addr; | |
489 | } | |
490 | #else | |
8fd19e6c PB |
491 | static inline void mmap_lock(void) {} |
492 | static inline void mmap_unlock(void) {} | |
493 | ||
0cac1b66 | 494 | /* cputlb.c */ |
9349b4f9 | 495 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
dfccc760 PC |
496 | |
497 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); | |
498 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); | |
499 | ||
500 | /* exec.c */ | |
501 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); | |
502 | ||
503 | MemoryRegionSection * | |
d7898cda PM |
504 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
505 | hwaddr *xlat, hwaddr *plen); | |
dfccc760 PC |
506 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
507 | MemoryRegionSection *section, | |
508 | target_ulong vaddr, | |
509 | hwaddr paddr, hwaddr xlat, | |
510 | int prot, | |
511 | target_ulong *address); | |
512 | bool memory_region_is_unassigned(MemoryRegion *mr); | |
513 | ||
4390df51 | 514 | #endif |
9df217a3 | 515 | |
1b530a6d AJ |
516 | /* vl.c */ |
517 | extern int singlestep; | |
518 | ||
875cdcf6 | 519 | #endif |