]> Git Repo - qemu.git/blame - include/exec/exec-all.h
cpu-exec: lookup/generate TB outside exclusive region during step_atomic
[qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001
BS
22
23#include "qemu-common.h"
00f6da6a 24#include "exec/tb-context.h"
7d99a001 25
b346ff46 26/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 27#define DEBUG_DISAS
b346ff46 28
41c1b1c9
PB
29/* Page tracking code uses ram addresses in system mode, and virtual
30 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 type. */
32#if defined(CONFIG_USER_ONLY)
b480d9b7 33typedef abi_ulong tb_page_addr_t;
67a5b5d2 34#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
41c1b1c9
PB
35#else
36typedef ram_addr_t tb_page_addr_t;
67a5b5d2 37#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41c1b1c9
PB
38#endif
39
1de7afc9 40#include "qemu/log.h"
b346ff46 41
9c489ea6 42void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
9349b4f9 43void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
bad729e2 44 target_ulong *data);
d2856f1a 45
57fec1fe 46void cpu_gen_init(void);
3f38f309 47bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
a8a826a3 48
6886b980 49void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
90b40a69 50void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
648f034c 51TranslationBlock *tb_gen_code(CPUState *cpu,
89fee74a
EC
52 target_ulong pc, target_ulong cs_base,
53 uint32_t flags,
2e70f6ef 54 int cflags);
1bc7e522 55
5638d180 56void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 57void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
fdbc2b57 58void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 59
0cac1b66 60#if !defined(CONFIG_USER_ONLY)
32857f4d 61void cpu_reloading_memory_map(void);
56943e8c
PM
62/**
63 * cpu_address_space_init:
64 * @cpu: CPU to add this address space to
65 * @as: address space to add
66 * @asidx: integer index of this address space
67 *
68 * Add the specified address space to the CPU's cpu_ases list.
69 * The address space added with @asidx 0 is the one used for the
70 * convenience pointer cpu->as.
71 * The target-specific code which registers ASes is responsible
72 * for defining what semantics address space 0, 1, 2, etc have.
73 *
12ebc9a7
PM
74 * Before the first call to this function, the caller must set
75 * cpu->num_ases to the total number of address spaces it needs
76 * to support.
77 *
56943e8c
PM
78 * Note that with KVM only one address space is supported.
79 */
80void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
b11ec7f2
YZ
81#endif
82
83#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 84/* cputlb.c */
d7a74a9d
PM
85/**
86 * tlb_flush_page:
87 * @cpu: CPU whose TLB should be flushed
88 * @addr: virtual address of page to be flushed
89 *
90 * Flush one page from the TLB of the specified CPU, for all
91 * MMU indexes.
92 */
31b030d4 93void tlb_flush_page(CPUState *cpu, target_ulong addr);
c3b9a07a
AB
94/**
95 * tlb_flush_page_all_cpus:
96 * @cpu: src CPU of the flush
97 * @addr: virtual address of page to be flushed
98 *
99 * Flush one page from the TLB of the specified CPU, for all
100 * MMU indexes.
101 */
102void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
103/**
104 * tlb_flush_page_all_cpus_synced:
105 * @cpu: src CPU of the flush
106 * @addr: virtual address of page to be flushed
107 *
108 * Flush one page from the TLB of the specified CPU, for all MMU
109 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
110 * is scheduled as safe work meaning all flushes will be complete once
111 * the source vCPUs safe work is complete. This will depend on when
112 * the guests translation ends the TB.
113 */
114void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
d7a74a9d
PM
115/**
116 * tlb_flush:
117 * @cpu: CPU whose TLB should be flushed
d7a74a9d 118 *
d10eb08f
AB
119 * Flush the entire TLB for the specified CPU. Most CPU architectures
120 * allow the implementation to drop entries from the TLB at any time
121 * so this is generally safe. If more selective flushing is required
122 * use one of the other functions for efficiency.
d7a74a9d 123 */
d10eb08f 124void tlb_flush(CPUState *cpu);
c3b9a07a
AB
125/**
126 * tlb_flush_all_cpus:
127 * @cpu: src CPU of the flush
128 */
129void tlb_flush_all_cpus(CPUState *src_cpu);
130/**
131 * tlb_flush_all_cpus_synced:
132 * @cpu: src CPU of the flush
133 *
134 * Like tlb_flush_all_cpus except this except the source vCPUs work is
135 * scheduled as safe work meaning all flushes will be complete once
136 * the source vCPUs safe work is complete. This will depend on when
137 * the guests translation ends the TB.
138 */
139void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
140/**
141 * tlb_flush_page_by_mmuidx:
142 * @cpu: CPU whose TLB should be flushed
143 * @addr: virtual address of page to be flushed
0336cbf8 144 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
145 *
146 * Flush one page from the TLB of the specified CPU, for the specified
147 * MMU indexes.
148 */
0336cbf8
AB
149void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
150 uint16_t idxmap);
c3b9a07a
AB
151/**
152 * tlb_flush_page_by_mmuidx_all_cpus:
153 * @cpu: Originating CPU of the flush
154 * @addr: virtual address of page to be flushed
155 * @idxmap: bitmap of MMU indexes to flush
156 *
157 * Flush one page from the TLB of all CPUs, for the specified
158 * MMU indexes.
159 */
160void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
161 uint16_t idxmap);
162/**
163 * tlb_flush_page_by_mmuidx_all_cpus_synced:
164 * @cpu: Originating CPU of the flush
165 * @addr: virtual address of page to be flushed
166 * @idxmap: bitmap of MMU indexes to flush
167 *
168 * Flush one page from the TLB of all CPUs, for the specified MMU
169 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
170 * vCPUs work is scheduled as safe work meaning all flushes will be
171 * complete once the source vCPUs safe work is complete. This will
172 * depend on when the guests translation ends the TB.
173 */
174void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
175 uint16_t idxmap);
d7a74a9d
PM
176/**
177 * tlb_flush_by_mmuidx:
178 * @cpu: CPU whose TLB should be flushed
c3b9a07a 179 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 180 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
181 *
182 * Flush all entries from the TLB of the specified CPU, for the specified
183 * MMU indexes.
184 */
0336cbf8 185void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
186/**
187 * tlb_flush_by_mmuidx_all_cpus:
188 * @cpu: Originating CPU of the flush
189 * @idxmap: bitmap of MMU indexes to flush
190 *
191 * Flush all entries from all TLBs of all CPUs, for the specified
192 * MMU indexes.
193 */
194void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
195/**
196 * tlb_flush_by_mmuidx_all_cpus_synced:
197 * @cpu: Originating CPU of the flush
198 * @idxmap: bitmap of MMU indexes to flush
199 *
200 * Flush all entries from all TLBs of all CPUs, for the specified
201 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
202 * vCPUs work is scheduled as safe work meaning all flushes will be
203 * complete once the source vCPUs safe work is complete. This will
204 * depend on when the guests translation ends the TB.
205 */
206void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
1787cc8e
PM
207/**
208 * tlb_set_page_with_attrs:
209 * @cpu: CPU to add this TLB entry for
210 * @vaddr: virtual address of page to add entry for
211 * @paddr: physical address of the page
212 * @attrs: memory transaction attributes
213 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
214 * @mmu_idx: MMU index to insert TLB entry for
215 * @size: size of the page in bytes
216 *
217 * Add an entry to this CPU's TLB (a mapping from virtual address
218 * @vaddr to physical address @paddr) with the specified memory
219 * transaction attributes. This is generally called by the target CPU
220 * specific code after it has been called through the tlb_fill()
221 * entry point and performed a successful page table walk to find
222 * the physical address and attributes for the virtual address
223 * which provoked the TLB miss.
224 *
225 * At most one entry for a given virtual address is permitted. Only a
226 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
227 * used by tlb_flush_page.
228 */
fadc1cbe
PM
229void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
230 hwaddr paddr, MemTxAttrs attrs,
231 int prot, int mmu_idx, target_ulong size);
1787cc8e
PM
232/* tlb_set_page:
233 *
234 * This function is equivalent to calling tlb_set_page_with_attrs()
235 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
236 * as a convenience for CPUs which don't use memory transaction attributes.
237 */
238void tlb_set_page(CPUState *cpu, target_ulong vaddr,
239 hwaddr paddr, int prot,
240 int mmu_idx, target_ulong size);
29d8ec7b 241void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
3b4afc9e
YK
242void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
243 uintptr_t retaddr);
0cac1b66 244#else
31b030d4 245static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
246{
247}
c3b9a07a
AB
248static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
249{
250}
251static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
252 target_ulong addr)
253{
254}
d10eb08f 255static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
256{
257}
c3b9a07a
AB
258static inline void tlb_flush_all_cpus(CPUState *src_cpu)
259{
260}
261static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
262{
263}
d7a74a9d 264static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
0336cbf8 265 target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
266{
267}
268
0336cbf8 269static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
270{
271}
c3b9a07a
AB
272static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
273 target_ulong addr,
274 uint16_t idxmap)
275{
276}
277static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
278 target_ulong addr,
279 uint16_t idxmap)
280{
281}
282static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
283{
284}
285static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
286 uint16_t idxmap)
287{
288}
406bc339
PK
289static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
290{
291}
c527ee8f 292#endif
d4e8164f 293
d4e8164f
FB
294#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
295
126d89e8
RH
296/* Estimated block size for TB allocation. */
297/* ??? The following is based on a 2015 survey of x86_64 host output.
298 Better would seem to be some sort of dynamically sized TB array,
299 adapting to the block sizes actually being produced. */
4390df51 300#if defined(CONFIG_SOFTMMU)
126d89e8 301#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 302#else
126d89e8 303#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
304#endif
305
e7e168f4
EC
306/*
307 * Translation Cache-related fields of a TB.
308 */
309struct tb_tc {
310 void *ptr; /* pointer to the translated code */
311 uint8_t *search; /* pointer to search data */
312};
313
2e70f6ef 314struct TranslationBlock {
2e12669a
FB
315 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
316 target_ulong cs_base; /* CS base for this block */
89fee74a 317 uint32_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
318 uint16_t size; /* size of target code for this block (1 <=
319 size <= TARGET_PAGE_SIZE) */
0266359e
PB
320 uint16_t icount;
321 uint32_t cflags; /* compile flags */
2e70f6ef
PB
322#define CF_COUNT_MASK 0x7fff
323#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
d8a499f1 324#define CF_NOCACHE 0x10000 /* To be freed after execution */
0266359e 325#define CF_USE_ICOUNT 0x20000
56c0269a 326#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
84f1c148 327#define CF_INVALID 0x80000 /* TB is stale. Setters must acquire tb_lock */
4e2ca83e
EC
328#define CF_PARALLEL 0x100000 /* Generate code for a parallel context */
329/* cflags' mask for hashing/comparison */
cdfef171 330#define CF_HASH_MASK (CF_COUNT_MASK | CF_PARALLEL)
58fe2f10 331
61a67f71
LV
332 /* Per-vCPU dynamic tracing state used to generate this TB */
333 uint32_t trace_vcpu_dstate;
334
e7e168f4
EC
335 struct tb_tc tc;
336
02d57ea1
SF
337 /* original tb when cflags has CF_NOCACHE */
338 struct TranslationBlock *orig_tb;
4390df51
FB
339 /* first and second physical page containing code. The lower bit
340 of the pointer tells the index in page_next[] */
5fafdf24 341 struct TranslationBlock *page_next[2];
41c1b1c9 342 tb_page_addr_t page_addr[2];
4390df51 343
f309101c
SF
344 /* The following data are used to directly call another TB from
345 * the code of this one. This can be done either by emitting direct or
346 * indirect native jump instructions. These jumps are reset so that the TB
eb5e2b9e 347 * just continues its execution. The TB can be linked to another one by
f309101c
SF
348 * setting one of the jump targets (or patching the jump instruction). Only
349 * two of such jumps are supported.
350 */
351 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
352#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
a8583393
RH
353 uintptr_t jmp_target_arg[2]; /* target address or offset */
354
eb5e2b9e 355 /* Each TB has an associated circular list of TBs jumping to this one.
f309101c
SF
356 * jmp_list_first points to the first TB jumping to this one.
357 * jmp_list_next is used to point to the next TB in a list.
358 * Since each TB can have two jumps, it can participate in two lists.
c37e6d7e
SF
359 * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
360 * TranslationBlock structure, but the two least significant bits of
361 * them are used to encode which data field of the pointed TB should
362 * be used to traverse the list further from that TB:
f309101c
SF
363 * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
364 * In other words, 0/1 tells which jump is used in the pointed TB,
365 * and 2 means that this is a pointer back to the target TB of this list.
366 */
c37e6d7e
SF
367 uintptr_t jmp_list_next[2];
368 uintptr_t jmp_list_first;
2e70f6ef 369};
d4e8164f 370
4e2ca83e
EC
371extern bool parallel_cpus;
372
373/* Hide the atomic_read to make code a little easier on the eyes */
374static inline uint32_t tb_cflags(const TranslationBlock *tb)
375{
376 return atomic_read(&tb->cflags);
377}
378
379/* current cflags for hashing/comparison */
380static inline uint32_t curr_cflags(void)
381{
382 return parallel_cpus ? CF_PARALLEL : 0;
383}
384
2e70f6ef 385void tb_free(TranslationBlock *tb);
bbd77c18 386void tb_flush(CPUState *cpu);
41c1b1c9 387void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
cedbcb01 388TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e
EC
389 target_ulong cs_base, uint32_t flags,
390 uint32_t cf_mask);
a8583393 391void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 392
01ecaf43 393/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 394#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 395extern uintptr_t tci_tb_ptr;
01ecaf43 396# define GETPC() tci_tb_ptr
0f842f8a 397#else
01ecaf43 398# define GETPC() \
0f842f8a
RH
399 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
400#endif
401
402/* The true return address will often point to a host insn that is part of
403 the next translated guest insn. Adjust the address backward to point to
404 the middle of the call insn. Subtracting one would do the job except for
405 several compressed mode architectures (arm, mips) which set the low bit
406 to indicate the compressed mode; subtracting two works around that. It
407 is also the case that there are no host isas that contain a call insn
408 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 409#define GETPC_ADJ 2
3917149d 410
beeaef55
PB
411void tb_lock(void);
412void tb_unlock(void);
413void tb_lock_reset(void);
414
e95c8d51 415#if !defined(CONFIG_USER_ONLY)
6e59c1db 416
9d82b5a7 417struct MemoryRegion *iotlb_to_region(CPUState *cpu,
a54c87b6 418 hwaddr index, MemTxAttrs attrs);
b3755a91 419
b35399bb
SS
420void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
421 int mmu_idx, uintptr_t retaddr);
6e59c1db 422
6e59c1db 423#endif
4390df51
FB
424
425#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
426void mmap_lock(void);
427void mmap_unlock(void);
301e40ed 428bool have_mmap_lock(void);
8fd19e6c 429
9349b4f9 430static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
431{
432 return addr;
433}
434#else
8fd19e6c
PB
435static inline void mmap_lock(void) {}
436static inline void mmap_unlock(void) {}
437
0cac1b66 438/* cputlb.c */
9349b4f9 439tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
dfccc760
PC
440
441void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
442void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
443
444/* exec.c */
445void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
446
447MemoryRegionSection *
d7898cda
PM
448address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
449 hwaddr *xlat, hwaddr *plen);
dfccc760
PC
450hwaddr memory_region_section_get_iotlb(CPUState *cpu,
451 MemoryRegionSection *section,
452 target_ulong vaddr,
453 hwaddr paddr, hwaddr xlat,
454 int prot,
455 target_ulong *address);
456bool memory_region_is_unassigned(MemoryRegion *mr);
457
4390df51 458#endif
9df217a3 459
1b530a6d
AJ
460/* vl.c */
461extern int singlestep;
462
875cdcf6 463#endif
This page took 0.780136 seconds and 4 git commands to generate.