]> Git Repo - qemu.git/blame - include/qom/cpu.h
target/arm: Enable API, APK bits in SCR, HCR
[qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
961f8395 23#include "hw/qdev-core.h"
37b9de46 24#include "disas/bfd.h"
c658b94f 25#include "exec/hwaddr.h"
66b9b43c 26#include "exec/memattrs.h"
9af23989 27#include "qapi/qapi-types-run-state.h"
48151859 28#include "qemu/bitmap.h"
c0b05ec5 29#include "qemu/fprintf-fn.h"
068a5ea0 30#include "qemu/rcu_queue.h"
bdc44640 31#include "qemu/queue.h"
1de7afc9 32#include "qemu/thread.h"
dd83b06a 33
b5ba1cc6
QN
34typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
35 void *opaque);
c72bf468 36
577f42c0
AF
37/**
38 * vaddr:
39 * Type wide enough to contain any #target_ulong virtual address.
40 */
41typedef uint64_t vaddr;
42#define VADDR_PRId PRId64
43#define VADDR_PRIu PRIu64
44#define VADDR_PRIo PRIo64
45#define VADDR_PRIx PRIx64
46#define VADDR_PRIX PRIX64
47#define VADDR_MAX UINT64_MAX
48
dd83b06a
AF
49/**
50 * SECTION:cpu
51 * @section_id: QEMU-cpu
52 * @title: CPU Class
53 * @short_description: Base class for all CPUs
54 */
55
56#define TYPE_CPU "cpu"
57
0d6d1ab4
AF
58/* Since this macro is used a lot in hot code paths and in conjunction with
59 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
60 * an unchecked cast.
61 */
62#define CPU(obj) ((CPUState *)(obj))
63
dd83b06a
AF
64#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
65#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
66
b35399bb
SS
67typedef enum MMUAccessType {
68 MMU_DATA_LOAD = 0,
69 MMU_DATA_STORE = 1,
70 MMU_INST_FETCH = 2
71} MMUAccessType;
72
568496c0 73typedef struct CPUWatchpoint CPUWatchpoint;
dd83b06a 74
c658b94f
AF
75typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
76 bool is_write, bool is_exec, int opaque,
77 unsigned size);
78
bdf7ae5b
AF
79struct TranslationBlock;
80
dd83b06a
AF
81/**
82 * CPUClass:
2b8c2754
AF
83 * @class_by_name: Callback to map -cpu command line model name to an
84 * instantiatable CPU type.
94a444b2 85 * @parse_features: Callback to parse command line arguments.
f5df5baf 86 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 87 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 88 * @has_work: Callback for checking if there is work to do.
97a8ea5a 89 * @do_interrupt: Callback for interrupt handling.
c658b94f 90 * @do_unassigned_access: Callback for unassigned access handling.
0dff0939 91 * (this is deprecated: new targets should use do_transaction_failed instead)
93e22326
PB
92 * @do_unaligned_access: Callback for unaligned access handling, if
93 * the target defines #ALIGNED_ONLY.
0dff0939
PM
94 * @do_transaction_failed: Callback for handling failed memory transactions
95 * (ie bus faults or external aborts; not MMU faults)
c08295d4
PM
96 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
97 * runtime configurable endianness is currently big-endian. Non-configurable
98 * CPUs can use the default implementation of this method. This method should
99 * not be used by any callers other than the pre-1.0 virtio devices.
f3659eee 100 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
101 * @dump_state: Callback for dumping state.
102 * @dump_statistics: Callback for dumping statistics.
997395d3 103 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 104 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 105 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 106 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
107 * @synchronize_from_tb: Callback for synchronizing state from a TCG
108 * #TranslationBlock.
7510454e 109 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 110 * @get_phys_page_debug: Callback for obtaining a physical address.
1dc6fb1f
PM
111 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
112 * associated memory transaction attributes to use for the access.
113 * CPUs which use memory transaction attributes should implement this
114 * instead of get_phys_page_debug.
d7f25a9e
PM
115 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
116 * a memory access with the specified memory transaction attributes.
5b50e790
AF
117 * @gdb_read_register: Callback for letting GDB read a register.
118 * @gdb_write_register: Callback for letting GDB write a register.
568496c0
SF
119 * @debug_check_watchpoint: Callback: return true if the architectural
120 * watchpoint whose address has matched should really fire.
86025ee4 121 * @debug_excp_handler: Callback for handling debug exceptions.
c08295d4
PM
122 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
123 * 64-bit VM coredump.
124 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
125 * note to a 32-bit VM coredump.
126 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
127 * 32-bit VM coredump.
128 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
129 * note to a 32-bit VM coredump.
b170fce3 130 * @vmsd: State description for migration.
a0e372f0 131 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 132 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
133 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
134 * before the insn which triggers a watchpoint rather than after it.
b3820e6c
DH
135 * @gdb_arch_name: Optional callback that returns the architecture name known
136 * to GDB. The caller must free the returned string with g_free.
200bf5b7
AB
137 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
138 * gdb stub. Returns a pointer to the XML contents for the specified XML file
139 * or NULL if the CPU doesn't have a dynamically generated content for it.
cffe7b32
RH
140 * @cpu_exec_enter: Callback for cpu_exec preparation.
141 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 142 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
37b9de46 143 * @disas_set_info: Setup architecture specific components of disassembly info
40612000
JB
144 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
145 * address before attempting to match it against watchpoints.
dd83b06a
AF
146 *
147 * Represents a CPU family or model.
148 */
149typedef struct CPUClass {
150 /*< private >*/
961f8395 151 DeviceClass parent_class;
dd83b06a
AF
152 /*< public >*/
153
2b8c2754 154 ObjectClass *(*class_by_name)(const char *cpu_model);
62a48a2a 155 void (*parse_features)(const char *typename, char *str, Error **errp);
2b8c2754 156
dd83b06a 157 void (*reset)(CPUState *cpu);
91b1df8c 158 int reset_dump_flags;
8c2e1b00 159 bool (*has_work)(CPUState *cpu);
97a8ea5a 160 void (*do_interrupt)(CPUState *cpu);
c658b94f 161 CPUUnassignedAccess do_unassigned_access;
93e22326 162 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
b35399bb
SS
163 MMUAccessType access_type,
164 int mmu_idx, uintptr_t retaddr);
0dff0939
PM
165 void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
166 unsigned size, MMUAccessType access_type,
167 int mmu_idx, MemTxAttrs attrs,
168 MemTxResult response, uintptr_t retaddr);
bf7663c4 169 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
170 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
171 uint8_t *buf, int len, bool is_write);
878096ee
AF
172 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
173 int flags);
c86f106b 174 GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
878096ee
AF
175 void (*dump_statistics)(CPUState *cpu, FILE *f,
176 fprintf_function cpu_fprintf, int flags);
997395d3 177 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 178 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
179 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
180 Error **errp);
f45748f1 181 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 182 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
98670d47 183 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
7510454e 184 int mmu_index);
00b941e5 185 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
1dc6fb1f
PM
186 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
187 MemTxAttrs *attrs);
d7f25a9e 188 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
5b50e790
AF
189 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
190 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
568496c0 191 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
86025ee4 192 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 193
c72bf468
JF
194 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
195 int cpuid, void *opaque);
196 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
197 void *opaque);
198 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
199 int cpuid, void *opaque);
200 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
201 void *opaque);
a0e372f0
AF
202
203 const struct VMStateDescription *vmsd;
5b24c641 204 const char *gdb_core_xml_file;
b3820e6c 205 gchar * (*gdb_arch_name)(CPUState *cpu);
200bf5b7 206 const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
cffe7b32
RH
207 void (*cpu_exec_enter)(CPUState *cpu);
208 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 209 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
37b9de46
PC
210
211 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
40612000 212 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
55c3ceef
RH
213 void (*tcg_initialize)(void);
214
215 /* Keep non-pointer data at the end to minimize holes. */
216 int gdb_num_core_regs;
217 bool gdb_stop_before_watchpoint;
dd83b06a
AF
218} CPUClass;
219
28ecfd7a
AF
220#ifdef HOST_WORDS_BIGENDIAN
221typedef struct icount_decr_u16 {
222 uint16_t high;
223 uint16_t low;
224} icount_decr_u16;
225#else
226typedef struct icount_decr_u16 {
227 uint16_t low;
228 uint16_t high;
229} icount_decr_u16;
230#endif
231
f0c3c505
AF
232typedef struct CPUBreakpoint {
233 vaddr pc;
234 int flags; /* BP_* */
235 QTAILQ_ENTRY(CPUBreakpoint) entry;
236} CPUBreakpoint;
237
568496c0 238struct CPUWatchpoint {
ff4700b0 239 vaddr vaddr;
05068c0d 240 vaddr len;
08225676 241 vaddr hitaddr;
66b9b43c 242 MemTxAttrs hitattrs;
ff4700b0
AF
243 int flags; /* BP_* */
244 QTAILQ_ENTRY(CPUWatchpoint) entry;
568496c0 245};
ff4700b0 246
a60f24b5 247struct KVMState;
f7575c96 248struct kvm_run;
a60f24b5 249
b0cb0a66
VP
250struct hax_vcpu_state;
251
8cd70437
AF
252#define TB_JMP_CACHE_BITS 12
253#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
254
4b4629d9 255/* work queue */
14e6fe12
PB
256
257/* The union type allows passing of 64 bit target pointers on 32 bit
258 * hosts in a single parameter
259 */
260typedef union {
261 int host_int;
262 unsigned long host_ulong;
263 void *host_ptr;
264 vaddr target_ptr;
265} run_on_cpu_data;
266
267#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
268#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
269#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
270#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
271#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
272
273typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
274
d148d90e 275struct qemu_work_item;
4b4629d9 276
0b8497f0 277#define CPU_UNSET_NUMA_NODE_ID -1
d01c05c9 278#define CPU_TRACE_DSTATE_MAX_EVENTS 32
0b8497f0 279
dd83b06a
AF
280/**
281 * CPUState:
55e5c285 282 * @cpu_index: CPU index (informative).
7ea7b9ad
PM
283 * @cluster_index: Identifies which cluster this CPU is in.
284 * For boards which don't define clusters or for "loose" CPUs not assigned
285 * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
286 * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
287 * QOM parent.
ce3960eb
AF
288 * @nr_cores: Number of cores within this CPU package.
289 * @nr_threads: Number of threads within this CPU.
c265e976
PB
290 * @running: #true if CPU is currently running (lockless).
291 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
ab129972 292 * valid under cpu_list_lock.
61a46217 293 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
294 * @interrupt_request: Indicates a pending interrupt request.
295 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 296 * @stop: Indicates a pending stop request.
f324e766 297 * @stopped: Indicates the CPU has been artificially stopped.
4c055ab5 298 * @unplug: Indicates a pending CPU unplug request.
bac05aa9 299 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
ed2803da 300 * @singlestep_enabled: Flags for single-stepping.
efee7340 301 * @icount_extra: Instructions until next timer event.
1aab16c2
PB
302 * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
303 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
304 * CPU and return to its top level loop (even in non-icount mode).
28ecfd7a
AF
305 * This allows a single read-compare-cbranch-write sequence to test
306 * for both decrementer underflow and exceptions.
414b15c9
PB
307 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
308 * requires that IO only be performed on the last instruction of a TB
309 * so that interrupts take effect immediately.
32857f4d
PM
310 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
311 * AddressSpaces this CPU has)
12ebc9a7 312 * @num_ases: number of CPUAddressSpaces in @cpu_ases
32857f4d
PM
313 * @as: Pointer to the first AddressSpace, for the convenience of targets which
314 * only have a single AddressSpace
c05efcb1 315 * @env_ptr: Pointer to subclass-specific CPUArchState field.
eac8b355 316 * @gdb_regs: Additional GDB registers.
a0e372f0 317 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 318 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 319 * @next_cpu: Next CPU sharing TB cache.
0429a971 320 * @opaque: User data.
93afeade
AF
321 * @mem_io_pc: Host Program Counter at which the memory was accessed.
322 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 323 * @kvm_fd: vCPU file descriptor for KVM.
376692b9
PB
324 * @work_mutex: Lock to prevent multiple access to queued_work_*.
325 * @queued_work_first: First asynchronous work pending.
d4381116
LV
326 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
327 * to @trace_dstate).
48151859 328 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
ed860129
PM
329 * @ignore_memory_transaction_failures: Cached copy of the MachineState
330 * flag of the same name: allows the board to suppress calling of the
331 * CPU do_transaction_failed hook function.
dd83b06a
AF
332 *
333 * State of one CPU core or thread.
334 */
335struct CPUState {
336 /*< private >*/
961f8395 337 DeviceState parent_obj;
dd83b06a
AF
338 /*< public >*/
339
ce3960eb
AF
340 int nr_cores;
341 int nr_threads;
342
814e612e 343 struct QemuThread *thread;
bcba2a72
AF
344#ifdef _WIN32
345 HANDLE hThread;
346#endif
9f09e18a 347 int thread_id;
c265e976 348 bool running, has_waiter;
f5c121b8 349 struct QemuCond *halt_cond;
216fc9a4 350 bool thread_kicked;
61a46217 351 bool created;
4fdeee7c 352 bool stop;
f324e766 353 bool stopped;
4c055ab5 354 bool unplug;
bac05aa9 355 bool crash_occurred;
e0c38211 356 bool exit_request;
9b990ee5 357 uint32_t cflags_next_tb;
8d04fb55 358 /* updates protected by BQL */
259186a7 359 uint32_t interrupt_request;
ed2803da 360 int singlestep_enabled;
e4cd9657 361 int64_t icount_budget;
efee7340 362 int64_t icount_extra;
6f03bef0 363 sigjmp_buf jmp_env;
bcba2a72 364
376692b9
PB
365 QemuMutex work_mutex;
366 struct qemu_work_item *queued_work_first, *queued_work_last;
367
32857f4d 368 CPUAddressSpace *cpu_ases;
12ebc9a7 369 int num_ases;
09daed84 370 AddressSpace *as;
6731d864 371 MemoryRegion *memory;
09daed84 372
c05efcb1 373 void *env_ptr; /* CPUArchState */
7d7500d9 374
f3ced3c5 375 /* Accessed in parallel; all accesses must be atomic */
8cd70437 376 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
7d7500d9 377
eac8b355 378 struct GDBRegisterState *gdb_regs;
a0e372f0 379 int gdb_num_regs;
35143f01 380 int gdb_num_g_regs;
bdc44640 381 QTAILQ_ENTRY(CPUState) node;
d77953b9 382
f0c3c505 383 /* ice debug support */
b58deb34 384 QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
f0c3c505 385
b58deb34 386 QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
ff4700b0
AF
387 CPUWatchpoint *watchpoint_hit;
388
0429a971
AF
389 void *opaque;
390
93afeade
AF
391 /* In order to avoid passing too many arguments to the MMIO helpers,
392 * we store some rarely used information in the CPU context.
393 */
394 uintptr_t mem_io_pc;
395 vaddr mem_io_vaddr;
dbea78a4
PM
396 /*
397 * This is only needed for the legacy cpu_unassigned_access() hook;
398 * when all targets using it have been converted to use
399 * cpu_transaction_failed() instead it can be removed.
400 */
401 MMUAccessType mem_io_access_type;
93afeade 402
8737c51c 403 int kvm_fd;
a60f24b5 404 struct KVMState *kvm_state;
f7575c96 405 struct kvm_run *kvm_run;
8737c51c 406
d01c05c9 407 /* Used for events with 'vcpu' and *without* the 'disabled' properties */
d4381116 408 DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
d01c05c9 409 DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
48151859 410
f5df5baf 411 /* TODO Move common fields from CPUArchState here. */
6fda014e 412 int cpu_index;
7ea7b9ad 413 int cluster_index;
6fda014e 414 uint32_t halted;
99df7dce 415 uint32_t can_do_io;
6fda014e 416 int32_t exception_index;
7e4fb26d 417
99f31832
SAGDR
418 /* shared by kvm, hax and hvf */
419 bool vcpu_dirty;
420
2adcc85d
JH
421 /* Used to keep track of an outstanding cpu throttle thread for migration
422 * autoconverge
423 */
424 bool throttle_thread_scheduled;
425
ed860129
PM
426 bool ignore_memory_transaction_failures;
427
7e4fb26d
RH
428 /* Note that this is accessed at the start of every TB via a negative
429 offset from AREG0. Leave this field at the end so as to make the
430 (absolute value) offset as small as possible. This reduces code
431 size, especially for hosts without large memory offsets. */
1aab16c2
PB
432 union {
433 uint32_t u32;
434 icount_decr_u16 u16;
435 } icount_decr;
b0cb0a66 436
b0cb0a66 437 struct hax_vcpu_state *hax_vcpu;
e3b9ca81 438
c97d6d2c 439 int hvf_fd;
1f871c5e
PM
440
441 /* track IOMMUs whose translations we've cached in the TCG TLB */
442 GArray *iommu_notifiers;
dd83b06a
AF
443};
444
f481ee2d
PB
445typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
446extern CPUTailQ cpus;
447
068a5ea0
EC
448#define first_cpu QTAILQ_FIRST_RCU(&cpus)
449#define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
450#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
bdc44640 451#define CPU_FOREACH_SAFE(cpu, next_cpu) \
068a5ea0 452 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
182735ef 453
f240eb6f 454extern __thread CPUState *current_cpu;
4917cf44 455
f3ced3c5
EC
456static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
457{
458 unsigned int i;
459
460 for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
461 atomic_set(&cpu->tb_jmp_cache[i], NULL);
462 }
463}
464
8d4e9146
FK
465/**
466 * qemu_tcg_mttcg_enabled:
467 * Check whether we are running MultiThread TCG or not.
468 *
469 * Returns: %true if we are in MTTCG mode %false otherwise.
470 */
471extern bool mttcg_enabled;
472#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
473
444d5590
AF
474/**
475 * cpu_paging_enabled:
476 * @cpu: The CPU whose state is to be inspected.
477 *
478 * Returns: %true if paging is enabled, %false otherwise.
479 */
480bool cpu_paging_enabled(const CPUState *cpu);
481
a23bbfda
AF
482/**
483 * cpu_get_memory_mapping:
484 * @cpu: The CPU whose memory mappings are to be obtained.
485 * @list: Where to write the memory mappings to.
486 * @errp: Pointer for reporting an #Error.
487 */
488void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
489 Error **errp);
490
c72bf468
JF
491/**
492 * cpu_write_elf64_note:
493 * @f: pointer to a function that writes memory to a file
494 * @cpu: The CPU whose memory is to be dumped
495 * @cpuid: ID number of the CPU
496 * @opaque: pointer to the CPUState struct
497 */
498int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
499 int cpuid, void *opaque);
500
501/**
502 * cpu_write_elf64_qemunote:
503 * @f: pointer to a function that writes memory to a file
504 * @cpu: The CPU whose memory is to be dumped
505 * @cpuid: ID number of the CPU
506 * @opaque: pointer to the CPUState struct
507 */
508int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
509 void *opaque);
510
511/**
512 * cpu_write_elf32_note:
513 * @f: pointer to a function that writes memory to a file
514 * @cpu: The CPU whose memory is to be dumped
515 * @cpuid: ID number of the CPU
516 * @opaque: pointer to the CPUState struct
517 */
518int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
519 int cpuid, void *opaque);
520
521/**
522 * cpu_write_elf32_qemunote:
523 * @f: pointer to a function that writes memory to a file
524 * @cpu: The CPU whose memory is to be dumped
525 * @cpuid: ID number of the CPU
526 * @opaque: pointer to the CPUState struct
527 */
528int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
529 void *opaque);
dd83b06a 530
c86f106b
AN
531/**
532 * cpu_get_crash_info:
533 * @cpu: The CPU to get crash information for
534 *
535 * Gets the previously saved crash information.
536 * Caller is responsible for freeing the data.
537 */
538GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
539
878096ee
AF
540/**
541 * CPUDumpFlags:
542 * @CPU_DUMP_CODE:
543 * @CPU_DUMP_FPU: dump FPU register state, not just integer
544 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
545 */
546enum CPUDumpFlags {
547 CPU_DUMP_CODE = 0x00010000,
548 CPU_DUMP_FPU = 0x00020000,
549 CPU_DUMP_CCOP = 0x00040000,
550};
551
552/**
553 * cpu_dump_state:
554 * @cpu: The CPU whose state is to be dumped.
555 * @f: File to dump to.
556 * @cpu_fprintf: Function to dump with.
557 * @flags: Flags what to dump.
558 *
559 * Dumps CPU state.
560 */
561void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
562 int flags);
563
564/**
565 * cpu_dump_statistics:
566 * @cpu: The CPU whose state is to be dumped.
567 * @f: File to dump to.
568 * @cpu_fprintf: Function to dump with.
569 * @flags: Flags what to dump.
570 *
571 * Dumps CPU statistics.
572 */
573void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
574 int flags);
575
00b941e5 576#ifndef CONFIG_USER_ONLY
1dc6fb1f
PM
577/**
578 * cpu_get_phys_page_attrs_debug:
579 * @cpu: The CPU to obtain the physical page address for.
580 * @addr: The virtual address.
581 * @attrs: Updated on return with the memory transaction attributes to use
582 * for this access.
583 *
584 * Obtains the physical page corresponding to a virtual one, together
585 * with the corresponding memory transaction attributes to use for the access.
586 * Use it only for debugging because no protection checks are done.
587 *
588 * Returns: Corresponding physical page address or -1 if no page found.
589 */
590static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
591 MemTxAttrs *attrs)
592{
593 CPUClass *cc = CPU_GET_CLASS(cpu);
594
595 if (cc->get_phys_page_attrs_debug) {
596 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
597 }
598 /* Fallback for CPUs which don't implement the _attrs_ hook */
599 *attrs = MEMTXATTRS_UNSPECIFIED;
600 return cc->get_phys_page_debug(cpu, addr);
601}
602
00b941e5
AF
603/**
604 * cpu_get_phys_page_debug:
605 * @cpu: The CPU to obtain the physical page address for.
606 * @addr: The virtual address.
607 *
608 * Obtains the physical page corresponding to a virtual one.
609 * Use it only for debugging because no protection checks are done.
610 *
611 * Returns: Corresponding physical page address or -1 if no page found.
612 */
613static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
614{
1dc6fb1f 615 MemTxAttrs attrs = {};
00b941e5 616
1dc6fb1f 617 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
00b941e5 618}
d7f25a9e
PM
619
620/** cpu_asidx_from_attrs:
621 * @cpu: CPU
622 * @attrs: memory transaction attributes
623 *
624 * Returns the address space index specifying the CPU AddressSpace
625 * to use for a memory access with the given transaction attributes.
626 */
627static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
628{
629 CPUClass *cc = CPU_GET_CLASS(cpu);
9c8c334b 630 int ret = 0;
d7f25a9e
PM
631
632 if (cc->asidx_from_attrs) {
9c8c334b
RH
633 ret = cc->asidx_from_attrs(cpu, attrs);
634 assert(ret < cpu->num_ases && ret >= 0);
d7f25a9e 635 }
9c8c334b 636 return ret;
d7f25a9e 637}
00b941e5
AF
638#endif
639
267f685b
PB
640/**
641 * cpu_list_add:
642 * @cpu: The CPU to be added to the list of CPUs.
643 */
644void cpu_list_add(CPUState *cpu);
645
646/**
647 * cpu_list_remove:
648 * @cpu: The CPU to be removed from the list of CPUs.
649 */
650void cpu_list_remove(CPUState *cpu);
651
dd83b06a
AF
652/**
653 * cpu_reset:
654 * @cpu: The CPU whose state is to be reset.
655 */
656void cpu_reset(CPUState *cpu);
657
2b8c2754
AF
658/**
659 * cpu_class_by_name:
660 * @typename: The CPU base type.
661 * @cpu_model: The model string without any parameters.
662 *
663 * Looks up a CPU #ObjectClass matching name @cpu_model.
664 *
665 * Returns: A #CPUClass or %NULL if not matching class is found.
666 */
667ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
668
3c72234c
IM
669/**
670 * cpu_create:
671 * @typename: The CPU type.
672 *
673 * Instantiates a CPU and realizes the CPU.
674 *
675 * Returns: A #CPUState or %NULL if an error occurred.
676 */
677CPUState *cpu_create(const char *typename);
678
679/**
2278b939 680 * parse_cpu_model:
3c72234c
IM
681 * @cpu_model: The model string including optional parameters.
682 *
683 * processes optional parameters and registers them as global properties
684 *
4482e05c
IM
685 * Returns: type of CPU to create or prints error and terminates process
686 * if an error occurred.
3c72234c 687 */
2278b939 688const char *parse_cpu_model(const char *cpu_model);
9262685b 689
3993c6bd 690/**
8c2e1b00 691 * cpu_has_work:
3993c6bd
AF
692 * @cpu: The vCPU to check.
693 *
694 * Checks whether the CPU has work to do.
695 *
696 * Returns: %true if the CPU has work, %false otherwise.
697 */
8c2e1b00
AF
698static inline bool cpu_has_work(CPUState *cpu)
699{
700 CPUClass *cc = CPU_GET_CLASS(cpu);
701
702 g_assert(cc->has_work);
703 return cc->has_work(cpu);
704}
3993c6bd 705
60e82579
AF
706/**
707 * qemu_cpu_is_self:
708 * @cpu: The vCPU to check against.
709 *
710 * Checks whether the caller is executing on the vCPU thread.
711 *
712 * Returns: %true if called from @cpu's thread, %false otherwise.
713 */
714bool qemu_cpu_is_self(CPUState *cpu);
715
c08d7424
AF
716/**
717 * qemu_cpu_kick:
718 * @cpu: The vCPU to kick.
719 *
720 * Kicks @cpu's thread.
721 */
722void qemu_cpu_kick(CPUState *cpu);
723
2fa45344
AF
724/**
725 * cpu_is_stopped:
726 * @cpu: The CPU to check.
727 *
728 * Checks whether the CPU is stopped.
729 *
730 * Returns: %true if run state is not running or if artificially stopped;
731 * %false otherwise.
732 */
733bool cpu_is_stopped(CPUState *cpu);
734
d148d90e
SF
735/**
736 * do_run_on_cpu:
737 * @cpu: The vCPU to run on.
738 * @func: The function to be executed.
739 * @data: Data to pass to the function.
740 * @mutex: Mutex to release while waiting for @func to run.
741 *
742 * Used internally in the implementation of run_on_cpu.
743 */
14e6fe12 744void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
d148d90e
SF
745 QemuMutex *mutex);
746
f100f0b3
AF
747/**
748 * run_on_cpu:
749 * @cpu: The vCPU to run on.
750 * @func: The function to be executed.
751 * @data: Data to pass to the function.
752 *
753 * Schedules the function @func for execution on the vCPU @cpu.
754 */
14e6fe12 755void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
f100f0b3 756
3c02270d
CV
757/**
758 * async_run_on_cpu:
759 * @cpu: The vCPU to run on.
760 * @func: The function to be executed.
761 * @data: Data to pass to the function.
762 *
763 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
764 */
14e6fe12 765void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
3c02270d 766
53f5ed95
PB
767/**
768 * async_safe_run_on_cpu:
769 * @cpu: The vCPU to run on.
770 * @func: The function to be executed.
771 * @data: Data to pass to the function.
772 *
773 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
774 * while all other vCPUs are sleeping.
775 *
776 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
777 * BQL.
778 */
14e6fe12 779void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
53f5ed95 780
38d8f5c8
AF
781/**
782 * qemu_get_cpu:
783 * @index: The CPUState@cpu_index value of the CPU to obtain.
784 *
785 * Gets a CPU matching @index.
786 *
787 * Returns: The CPU or %NULL if there is no matching CPU.
788 */
789CPUState *qemu_get_cpu(int index);
790
69e5ff06
IM
791/**
792 * cpu_exists:
793 * @id: Guest-exposed CPU ID to lookup.
794 *
795 * Search for CPU with specified ID.
796 *
797 * Returns: %true - CPU is found, %false - CPU isn't found.
798 */
799bool cpu_exists(int64_t id);
800
5ce46cb3
EH
801/**
802 * cpu_by_arch_id:
803 * @id: Guest-exposed CPU ID of the CPU to obtain.
804 *
805 * Get a CPU with matching @id.
806 *
807 * Returns: The CPU or %NULL if there is no matching CPU.
808 */
809CPUState *cpu_by_arch_id(int64_t id);
810
2adcc85d
JH
811/**
812 * cpu_throttle_set:
813 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
814 *
815 * Throttles all vcpus by forcing them to sleep for the given percentage of
816 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
817 * (example: 10ms sleep for every 30ms awake).
818 *
819 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
820 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
821 * is called.
822 */
823void cpu_throttle_set(int new_throttle_pct);
824
825/**
826 * cpu_throttle_stop:
827 *
828 * Stops the vcpu throttling started by cpu_throttle_set.
829 */
830void cpu_throttle_stop(void);
831
832/**
833 * cpu_throttle_active:
834 *
835 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
836 */
837bool cpu_throttle_active(void);
838
839/**
840 * cpu_throttle_get_percentage:
841 *
842 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
843 *
844 * Returns: The throttle percentage in range 1 to 99.
845 */
846int cpu_throttle_get_percentage(void);
847
c3affe56
AF
848#ifndef CONFIG_USER_ONLY
849
850typedef void (*CPUInterruptHandler)(CPUState *, int);
851
852extern CPUInterruptHandler cpu_interrupt_handler;
853
854/**
855 * cpu_interrupt:
856 * @cpu: The CPU to set an interrupt on.
7e63bc38 857 * @mask: The interrupts to set.
c3affe56
AF
858 *
859 * Invokes the interrupt handler.
860 */
861static inline void cpu_interrupt(CPUState *cpu, int mask)
862{
863 cpu_interrupt_handler(cpu, mask);
864}
865
866#else /* USER_ONLY */
867
868void cpu_interrupt(CPUState *cpu, int mask);
869
870#endif /* USER_ONLY */
871
47507383
TH
872#ifdef NEED_CPU_H
873
93e22326 874#ifdef CONFIG_SOFTMMU
c658b94f
AF
875static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
876 bool is_write, bool is_exec,
877 int opaque, unsigned size)
878{
879 CPUClass *cc = CPU_GET_CLASS(cpu);
880
881 if (cc->do_unassigned_access) {
882 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
883 }
884}
885
93e22326 886static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
b35399bb
SS
887 MMUAccessType access_type,
888 int mmu_idx, uintptr_t retaddr)
93e22326
PB
889{
890 CPUClass *cc = CPU_GET_CLASS(cpu);
891
b35399bb 892 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
93e22326 893}
0dff0939
PM
894
895static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
896 vaddr addr, unsigned size,
897 MMUAccessType access_type,
898 int mmu_idx, MemTxAttrs attrs,
899 MemTxResult response,
900 uintptr_t retaddr)
901{
902 CPUClass *cc = CPU_GET_CLASS(cpu);
903
ed860129 904 if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
0dff0939
PM
905 cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
906 mmu_idx, attrs, response, retaddr);
907 }
908}
c658b94f
AF
909#endif
910
47507383
TH
911#endif /* NEED_CPU_H */
912
2991b890
PC
913/**
914 * cpu_set_pc:
915 * @cpu: The CPU to set the program counter for.
916 * @addr: Program counter value.
917 *
918 * Sets the program counter for a CPU.
919 */
920static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
921{
922 CPUClass *cc = CPU_GET_CLASS(cpu);
923
924 cc->set_pc(cpu, addr);
925}
926
d8ed887b
AF
927/**
928 * cpu_reset_interrupt:
929 * @cpu: The CPU to clear the interrupt on.
930 * @mask: The interrupt mask to clear.
931 *
932 * Resets interrupts on the vCPU @cpu.
933 */
934void cpu_reset_interrupt(CPUState *cpu, int mask);
935
60a3e17a
AF
936/**
937 * cpu_exit:
938 * @cpu: The CPU to exit.
939 *
940 * Requests the CPU @cpu to exit execution.
941 */
942void cpu_exit(CPUState *cpu);
943
2993683b
IM
944/**
945 * cpu_resume:
946 * @cpu: The CPU to resume.
947 *
948 * Resumes CPU, i.e. puts CPU into runnable state.
949 */
950void cpu_resume(CPUState *cpu);
dd83b06a 951
4c055ab5
GZ
952/**
953 * cpu_remove:
954 * @cpu: The CPU to remove.
955 *
956 * Requests the CPU to be removed.
957 */
958void cpu_remove(CPUState *cpu);
959
2c579042
BR
960 /**
961 * cpu_remove_sync:
962 * @cpu: The CPU to remove.
963 *
964 * Requests the CPU to be removed and waits till it is removed.
965 */
966void cpu_remove_sync(CPUState *cpu);
967
d148d90e
SF
968/**
969 * process_queued_cpu_work() - process all items on CPU work queue
970 * @cpu: The CPU which work queue to process.
971 */
972void process_queued_cpu_work(CPUState *cpu);
973
ab129972
PB
974/**
975 * cpu_exec_start:
976 * @cpu: The CPU for the current thread.
977 *
978 * Record that a CPU has started execution and can be interrupted with
979 * cpu_exit.
980 */
981void cpu_exec_start(CPUState *cpu);
982
983/**
984 * cpu_exec_end:
985 * @cpu: The CPU for the current thread.
986 *
987 * Record that a CPU has stopped execution and exclusive sections
988 * can be executed without interrupting it.
989 */
990void cpu_exec_end(CPUState *cpu);
991
992/**
993 * start_exclusive:
994 *
995 * Wait for a concurrent exclusive section to end, and then start
996 * a section of work that is run while other CPUs are not running
997 * between cpu_exec_start and cpu_exec_end. CPUs that are running
998 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
999 * during the exclusive section go to sleep until this CPU calls
1000 * end_exclusive.
ab129972
PB
1001 */
1002void start_exclusive(void);
1003
1004/**
1005 * end_exclusive:
1006 *
1007 * Concludes an exclusive execution section started by start_exclusive.
ab129972
PB
1008 */
1009void end_exclusive(void);
1010
c643bed9
AF
1011/**
1012 * qemu_init_vcpu:
1013 * @cpu: The vCPU to initialize.
1014 *
1015 * Initializes a vCPU.
1016 */
1017void qemu_init_vcpu(CPUState *cpu);
1018
3825b28f
AF
1019#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1020#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1021#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1022
1023/**
1024 * cpu_single_step:
1025 * @cpu: CPU to the flags for.
1026 * @enabled: Flags to enable.
1027 *
1028 * Enables or disables single-stepping for @cpu.
1029 */
1030void cpu_single_step(CPUState *cpu, int enabled);
1031
b3310ab3
AF
1032/* Breakpoint/watchpoint flags */
1033#define BP_MEM_READ 0x01
1034#define BP_MEM_WRITE 0x02
1035#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
1036#define BP_STOP_BEFORE_ACCESS 0x04
08225676 1037/* 0x08 currently unused */
b3310ab3
AF
1038#define BP_GDB 0x10
1039#define BP_CPU 0x20
b933066a 1040#define BP_ANY (BP_GDB | BP_CPU)
08225676
PM
1041#define BP_WATCHPOINT_HIT_READ 0x40
1042#define BP_WATCHPOINT_HIT_WRITE 0x80
1043#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
1044
1045int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1046 CPUBreakpoint **breakpoint);
1047int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1048void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1049void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1050
b933066a
RH
1051/* Return true if PC matches an installed breakpoint. */
1052static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1053{
1054 CPUBreakpoint *bp;
1055
1056 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1057 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1058 if (bp->pc == pc && (bp->flags & mask)) {
1059 return true;
1060 }
1061 }
1062 }
1063 return false;
1064}
1065
75a34036
AF
1066int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1067 int flags, CPUWatchpoint **watchpoint);
1068int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1069 vaddr len, int flags);
1070void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1071void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1072
63c91552
PB
1073/**
1074 * cpu_get_address_space:
1075 * @cpu: CPU to get address space from
1076 * @asidx: index identifying which address space to get
1077 *
1078 * Return the requested address space of this CPU. @asidx
1079 * specifies which address space to read.
1080 */
1081AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1082
a47dddd7
AF
1083void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
1084 GCC_FMT_ATTR(2, 3);
c7e002c5 1085extern Property cpu_common_props[];
39e329e3 1086void cpu_exec_initfn(CPUState *cpu);
ce5b1bbf 1087void cpu_exec_realizefn(CPUState *cpu, Error **errp);
7bbc124e 1088void cpu_exec_unrealizefn(CPUState *cpu);
a47dddd7 1089
c95ac103
TH
1090/**
1091 * target_words_bigendian:
1092 * Returns true if the (default) endianness of the target is big endian,
1093 * false otherwise. Note that in target-specific code, you can use
1094 * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
1095 * code should normally never need to know about the endianness of the
1096 * target, so please do *not* use this function unless you know very well
1097 * what you are doing!
1098 */
1099bool target_words_bigendian(void);
1100
47507383
TH
1101#ifdef NEED_CPU_H
1102
1a1562f5
AF
1103#ifdef CONFIG_SOFTMMU
1104extern const struct VMStateDescription vmstate_cpu_common;
1105#else
1106#define vmstate_cpu_common vmstate_dummy
1107#endif
1108
1109#define VMSTATE_CPU() { \
1110 .name = "parent_obj", \
1111 .size = sizeof(CPUState), \
1112 .vmsd = &vmstate_cpu_common, \
1113 .flags = VMS_STRUCT, \
1114 .offset = 0, \
1115}
1116
47507383
TH
1117#endif /* NEED_CPU_H */
1118
a07f953e 1119#define UNASSIGNED_CPU_INDEX -1
7ea7b9ad 1120#define UNASSIGNED_CLUSTER_INDEX -1
a07f953e 1121
dd83b06a 1122#endif
This page took 0.551604 seconds and 4 git commands to generate.