]> Git Repo - qemu.git/blame - include/qom/cpu.h
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
fcd7d003 23#include <signal.h>
6f03bef0 24#include <setjmp.h>
961f8395 25#include "hw/qdev-core.h"
c658b94f 26#include "exec/hwaddr.h"
bdc44640 27#include "qemu/queue.h"
1de7afc9 28#include "qemu/thread.h"
4917cf44 29#include "qemu/tls.h"
a23bbfda 30#include "qemu/typedefs.h"
dd83b06a 31
b5ba1cc6
QN
32typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
33 void *opaque);
c72bf468 34
577f42c0
AF
35/**
36 * vaddr:
37 * Type wide enough to contain any #target_ulong virtual address.
38 */
39typedef uint64_t vaddr;
40#define VADDR_PRId PRId64
41#define VADDR_PRIu PRIu64
42#define VADDR_PRIo PRIo64
43#define VADDR_PRIx PRIx64
44#define VADDR_PRIX PRIX64
45#define VADDR_MAX UINT64_MAX
46
dd83b06a
AF
47/**
48 * SECTION:cpu
49 * @section_id: QEMU-cpu
50 * @title: CPU Class
51 * @short_description: Base class for all CPUs
52 */
53
54#define TYPE_CPU "cpu"
55
0d6d1ab4
AF
56/* Since this macro is used a lot in hot code paths and in conjunction with
57 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
58 * an unchecked cast.
59 */
60#define CPU(obj) ((CPUState *)(obj))
61
dd83b06a
AF
62#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
63#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
64
65typedef struct CPUState CPUState;
66
c658b94f
AF
67typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
68 bool is_write, bool is_exec, int opaque,
69 unsigned size);
70
bdf7ae5b
AF
71struct TranslationBlock;
72
dd83b06a
AF
73/**
74 * CPUClass:
2b8c2754
AF
75 * @class_by_name: Callback to map -cpu command line model name to an
76 * instantiatable CPU type.
94a444b2 77 * @parse_features: Callback to parse command line arguments.
f5df5baf 78 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 79 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 80 * @has_work: Callback for checking if there is work to do.
97a8ea5a 81 * @do_interrupt: Callback for interrupt handling.
c658b94f 82 * @do_unassigned_access: Callback for unassigned access handling.
93e22326
PB
83 * @do_unaligned_access: Callback for unaligned access handling, if
84 * the target defines #ALIGNED_ONLY.
f3659eee 85 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
86 * @dump_state: Callback for dumping state.
87 * @dump_statistics: Callback for dumping statistics.
997395d3 88 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 89 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 90 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 91 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
92 * @synchronize_from_tb: Callback for synchronizing state from a TCG
93 * #TranslationBlock.
7510454e 94 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 95 * @get_phys_page_debug: Callback for obtaining a physical address.
5b50e790
AF
96 * @gdb_read_register: Callback for letting GDB read a register.
97 * @gdb_write_register: Callback for letting GDB write a register.
86025ee4 98 * @debug_excp_handler: Callback for handling debug exceptions.
b170fce3 99 * @vmsd: State description for migration.
a0e372f0 100 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 101 * @gdb_core_xml_file: File name for core registers GDB XML description.
cffe7b32
RH
102 * @cpu_exec_enter: Callback for cpu_exec preparation.
103 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 104 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
dd83b06a
AF
105 *
106 * Represents a CPU family or model.
107 */
108typedef struct CPUClass {
109 /*< private >*/
961f8395 110 DeviceClass parent_class;
dd83b06a
AF
111 /*< public >*/
112
2b8c2754 113 ObjectClass *(*class_by_name)(const char *cpu_model);
94a444b2 114 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
2b8c2754 115
dd83b06a 116 void (*reset)(CPUState *cpu);
91b1df8c 117 int reset_dump_flags;
8c2e1b00 118 bool (*has_work)(CPUState *cpu);
97a8ea5a 119 void (*do_interrupt)(CPUState *cpu);
c658b94f 120 CPUUnassignedAccess do_unassigned_access;
93e22326
PB
121 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
122 int is_write, int is_user, uintptr_t retaddr);
bf7663c4 123 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
124 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
125 uint8_t *buf, int len, bool is_write);
878096ee
AF
126 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
127 int flags);
128 void (*dump_statistics)(CPUState *cpu, FILE *f,
129 fprintf_function cpu_fprintf, int flags);
997395d3 130 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 131 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
132 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
133 Error **errp);
f45748f1 134 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 135 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
7510454e
AF
136 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
137 int mmu_index);
00b941e5 138 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
5b50e790
AF
139 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
140 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
86025ee4 141 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 142
c72bf468
JF
143 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
144 int cpuid, void *opaque);
145 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
146 void *opaque);
147 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
148 int cpuid, void *opaque);
149 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
150 void *opaque);
a0e372f0
AF
151
152 const struct VMStateDescription *vmsd;
153 int gdb_num_core_regs;
5b24c641 154 const char *gdb_core_xml_file;
cffe7b32
RH
155
156 void (*cpu_exec_enter)(CPUState *cpu);
157 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 158 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
dd83b06a
AF
159} CPUClass;
160
28ecfd7a
AF
161#ifdef HOST_WORDS_BIGENDIAN
162typedef struct icount_decr_u16 {
163 uint16_t high;
164 uint16_t low;
165} icount_decr_u16;
166#else
167typedef struct icount_decr_u16 {
168 uint16_t low;
169 uint16_t high;
170} icount_decr_u16;
171#endif
172
f0c3c505
AF
173typedef struct CPUBreakpoint {
174 vaddr pc;
175 int flags; /* BP_* */
176 QTAILQ_ENTRY(CPUBreakpoint) entry;
177} CPUBreakpoint;
178
ff4700b0
AF
179typedef struct CPUWatchpoint {
180 vaddr vaddr;
05068c0d 181 vaddr len;
08225676 182 vaddr hitaddr;
ff4700b0
AF
183 int flags; /* BP_* */
184 QTAILQ_ENTRY(CPUWatchpoint) entry;
185} CPUWatchpoint;
186
a60f24b5 187struct KVMState;
f7575c96 188struct kvm_run;
a60f24b5 189
8cd70437
AF
190#define TB_JMP_CACHE_BITS 12
191#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
192
dd83b06a
AF
193/**
194 * CPUState:
55e5c285 195 * @cpu_index: CPU index (informative).
ce3960eb
AF
196 * @nr_cores: Number of cores within this CPU package.
197 * @nr_threads: Number of threads within this CPU.
1b1ed8dc 198 * @numa_node: NUMA node this CPU is belonging to.
0d34282f 199 * @host_tid: Host thread ID.
0315c31c 200 * @running: #true if CPU is currently running (usermode).
61a46217 201 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
202 * @interrupt_request: Indicates a pending interrupt request.
203 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 204 * @stop: Indicates a pending stop request.
f324e766 205 * @stopped: Indicates the CPU has been artificially stopped.
378df4b2
PM
206 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
207 * CPU and return to its top level loop.
ed2803da 208 * @singlestep_enabled: Flags for single-stepping.
efee7340 209 * @icount_extra: Instructions until next timer event.
28ecfd7a
AF
210 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
211 * This allows a single read-compare-cbranch-write sequence to test
212 * for both decrementer underflow and exceptions.
99df7dce 213 * @can_do_io: Nonzero if memory-mapped IO is safe.
c05efcb1 214 * @env_ptr: Pointer to subclass-specific CPUArchState field.
d77953b9 215 * @current_tb: Currently executing TB.
eac8b355 216 * @gdb_regs: Additional GDB registers.
a0e372f0 217 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 218 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 219 * @next_cpu: Next CPU sharing TB cache.
0429a971 220 * @opaque: User data.
93afeade
AF
221 * @mem_io_pc: Host Program Counter at which the memory was accessed.
222 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 223 * @kvm_fd: vCPU file descriptor for KVM.
dd83b06a
AF
224 *
225 * State of one CPU core or thread.
226 */
227struct CPUState {
228 /*< private >*/
961f8395 229 DeviceState parent_obj;
dd83b06a
AF
230 /*< public >*/
231
ce3960eb
AF
232 int nr_cores;
233 int nr_threads;
1b1ed8dc 234 int numa_node;
ce3960eb 235
814e612e 236 struct QemuThread *thread;
bcba2a72
AF
237#ifdef _WIN32
238 HANDLE hThread;
239#endif
9f09e18a 240 int thread_id;
0d34282f 241 uint32_t host_tid;
0315c31c 242 bool running;
f5c121b8 243 struct QemuCond *halt_cond;
c64ca814 244 struct qemu_work_item *queued_work_first, *queued_work_last;
216fc9a4 245 bool thread_kicked;
61a46217 246 bool created;
4fdeee7c 247 bool stop;
f324e766 248 bool stopped;
fcd7d003 249 volatile sig_atomic_t exit_request;
259186a7 250 uint32_t interrupt_request;
ed2803da 251 int singlestep_enabled;
efee7340 252 int64_t icount_extra;
6f03bef0 253 sigjmp_buf jmp_env;
bcba2a72 254
09daed84
EI
255 AddressSpace *as;
256 MemoryListener *tcg_as_listener;
257
c05efcb1 258 void *env_ptr; /* CPUArchState */
d77953b9 259 struct TranslationBlock *current_tb;
8cd70437 260 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
eac8b355 261 struct GDBRegisterState *gdb_regs;
a0e372f0 262 int gdb_num_regs;
35143f01 263 int gdb_num_g_regs;
bdc44640 264 QTAILQ_ENTRY(CPUState) node;
d77953b9 265
f0c3c505
AF
266 /* ice debug support */
267 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
268
ff4700b0
AF
269 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
270 CPUWatchpoint *watchpoint_hit;
271
0429a971
AF
272 void *opaque;
273
93afeade
AF
274 /* In order to avoid passing too many arguments to the MMIO helpers,
275 * we store some rarely used information in the CPU context.
276 */
277 uintptr_t mem_io_pc;
278 vaddr mem_io_vaddr;
279
8737c51c 280 int kvm_fd;
20d695a9 281 bool kvm_vcpu_dirty;
a60f24b5 282 struct KVMState *kvm_state;
f7575c96 283 struct kvm_run *kvm_run;
8737c51c 284
f5df5baf 285 /* TODO Move common fields from CPUArchState here. */
55e5c285 286 int cpu_index; /* used by alpha TCG */
259186a7 287 uint32_t halted; /* used by alpha, cris, ppc TCG */
28ecfd7a
AF
288 union {
289 uint32_t u32;
290 icount_decr_u16 u16;
291 } icount_decr;
99df7dce 292 uint32_t can_do_io;
27103424 293 int32_t exception_index; /* used by m68k TCG */
7e4fb26d
RH
294
295 /* Note that this is accessed at the start of every TB via a negative
296 offset from AREG0. Leave this field at the end so as to make the
297 (absolute value) offset as small as possible. This reduces code
298 size, especially for hosts without large memory offsets. */
299 volatile sig_atomic_t tcg_exit_req;
dd83b06a
AF
300};
301
bdc44640
AF
302QTAILQ_HEAD(CPUTailQ, CPUState);
303extern struct CPUTailQ cpus;
304#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
305#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
306#define CPU_FOREACH_SAFE(cpu, next_cpu) \
307 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
308#define first_cpu QTAILQ_FIRST(&cpus)
182735ef 309
4917cf44
AF
310DECLARE_TLS(CPUState *, current_cpu);
311#define current_cpu tls_var(current_cpu)
312
444d5590
AF
313/**
314 * cpu_paging_enabled:
315 * @cpu: The CPU whose state is to be inspected.
316 *
317 * Returns: %true if paging is enabled, %false otherwise.
318 */
319bool cpu_paging_enabled(const CPUState *cpu);
320
a23bbfda
AF
321/**
322 * cpu_get_memory_mapping:
323 * @cpu: The CPU whose memory mappings are to be obtained.
324 * @list: Where to write the memory mappings to.
325 * @errp: Pointer for reporting an #Error.
326 */
327void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
328 Error **errp);
329
c72bf468
JF
330/**
331 * cpu_write_elf64_note:
332 * @f: pointer to a function that writes memory to a file
333 * @cpu: The CPU whose memory is to be dumped
334 * @cpuid: ID number of the CPU
335 * @opaque: pointer to the CPUState struct
336 */
337int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
338 int cpuid, void *opaque);
339
340/**
341 * cpu_write_elf64_qemunote:
342 * @f: pointer to a function that writes memory to a file
343 * @cpu: The CPU whose memory is to be dumped
344 * @cpuid: ID number of the CPU
345 * @opaque: pointer to the CPUState struct
346 */
347int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
348 void *opaque);
349
350/**
351 * cpu_write_elf32_note:
352 * @f: pointer to a function that writes memory to a file
353 * @cpu: The CPU whose memory is to be dumped
354 * @cpuid: ID number of the CPU
355 * @opaque: pointer to the CPUState struct
356 */
357int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
358 int cpuid, void *opaque);
359
360/**
361 * cpu_write_elf32_qemunote:
362 * @f: pointer to a function that writes memory to a file
363 * @cpu: The CPU whose memory is to be dumped
364 * @cpuid: ID number of the CPU
365 * @opaque: pointer to the CPUState struct
366 */
367int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
368 void *opaque);
dd83b06a 369
878096ee
AF
370/**
371 * CPUDumpFlags:
372 * @CPU_DUMP_CODE:
373 * @CPU_DUMP_FPU: dump FPU register state, not just integer
374 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
375 */
376enum CPUDumpFlags {
377 CPU_DUMP_CODE = 0x00010000,
378 CPU_DUMP_FPU = 0x00020000,
379 CPU_DUMP_CCOP = 0x00040000,
380};
381
382/**
383 * cpu_dump_state:
384 * @cpu: The CPU whose state is to be dumped.
385 * @f: File to dump to.
386 * @cpu_fprintf: Function to dump with.
387 * @flags: Flags what to dump.
388 *
389 * Dumps CPU state.
390 */
391void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
392 int flags);
393
394/**
395 * cpu_dump_statistics:
396 * @cpu: The CPU whose state is to be dumped.
397 * @f: File to dump to.
398 * @cpu_fprintf: Function to dump with.
399 * @flags: Flags what to dump.
400 *
401 * Dumps CPU statistics.
402 */
403void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
404 int flags);
405
00b941e5
AF
406#ifndef CONFIG_USER_ONLY
407/**
408 * cpu_get_phys_page_debug:
409 * @cpu: The CPU to obtain the physical page address for.
410 * @addr: The virtual address.
411 *
412 * Obtains the physical page corresponding to a virtual one.
413 * Use it only for debugging because no protection checks are done.
414 *
415 * Returns: Corresponding physical page address or -1 if no page found.
416 */
417static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
418{
419 CPUClass *cc = CPU_GET_CLASS(cpu);
420
421 return cc->get_phys_page_debug(cpu, addr);
422}
423#endif
424
dd83b06a
AF
425/**
426 * cpu_reset:
427 * @cpu: The CPU whose state is to be reset.
428 */
429void cpu_reset(CPUState *cpu);
430
2b8c2754
AF
431/**
432 * cpu_class_by_name:
433 * @typename: The CPU base type.
434 * @cpu_model: The model string without any parameters.
435 *
436 * Looks up a CPU #ObjectClass matching name @cpu_model.
437 *
438 * Returns: A #CPUClass or %NULL if not matching class is found.
439 */
440ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
441
9262685b
AF
442/**
443 * cpu_generic_init:
444 * @typename: The CPU base type.
445 * @cpu_model: The model string including optional parameters.
446 *
447 * Instantiates a CPU, processes optional parameters and realizes the CPU.
448 *
449 * Returns: A #CPUState or %NULL if an error occurred.
450 */
451CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
452
3993c6bd 453/**
8c2e1b00 454 * cpu_has_work:
3993c6bd
AF
455 * @cpu: The vCPU to check.
456 *
457 * Checks whether the CPU has work to do.
458 *
459 * Returns: %true if the CPU has work, %false otherwise.
460 */
8c2e1b00
AF
461static inline bool cpu_has_work(CPUState *cpu)
462{
463 CPUClass *cc = CPU_GET_CLASS(cpu);
464
465 g_assert(cc->has_work);
466 return cc->has_work(cpu);
467}
3993c6bd 468
60e82579
AF
469/**
470 * qemu_cpu_is_self:
471 * @cpu: The vCPU to check against.
472 *
473 * Checks whether the caller is executing on the vCPU thread.
474 *
475 * Returns: %true if called from @cpu's thread, %false otherwise.
476 */
477bool qemu_cpu_is_self(CPUState *cpu);
478
c08d7424
AF
479/**
480 * qemu_cpu_kick:
481 * @cpu: The vCPU to kick.
482 *
483 * Kicks @cpu's thread.
484 */
485void qemu_cpu_kick(CPUState *cpu);
486
2fa45344
AF
487/**
488 * cpu_is_stopped:
489 * @cpu: The CPU to check.
490 *
491 * Checks whether the CPU is stopped.
492 *
493 * Returns: %true if run state is not running or if artificially stopped;
494 * %false otherwise.
495 */
496bool cpu_is_stopped(CPUState *cpu);
497
f100f0b3
AF
498/**
499 * run_on_cpu:
500 * @cpu: The vCPU to run on.
501 * @func: The function to be executed.
502 * @data: Data to pass to the function.
503 *
504 * Schedules the function @func for execution on the vCPU @cpu.
505 */
506void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
507
3c02270d
CV
508/**
509 * async_run_on_cpu:
510 * @cpu: The vCPU to run on.
511 * @func: The function to be executed.
512 * @data: Data to pass to the function.
513 *
514 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
515 */
516void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
517
38d8f5c8
AF
518/**
519 * qemu_get_cpu:
520 * @index: The CPUState@cpu_index value of the CPU to obtain.
521 *
522 * Gets a CPU matching @index.
523 *
524 * Returns: The CPU or %NULL if there is no matching CPU.
525 */
526CPUState *qemu_get_cpu(int index);
527
69e5ff06
IM
528/**
529 * cpu_exists:
530 * @id: Guest-exposed CPU ID to lookup.
531 *
532 * Search for CPU with specified ID.
533 *
534 * Returns: %true - CPU is found, %false - CPU isn't found.
535 */
536bool cpu_exists(int64_t id);
537
c3affe56
AF
538#ifndef CONFIG_USER_ONLY
539
540typedef void (*CPUInterruptHandler)(CPUState *, int);
541
542extern CPUInterruptHandler cpu_interrupt_handler;
543
544/**
545 * cpu_interrupt:
546 * @cpu: The CPU to set an interrupt on.
547 * @mask: The interupts to set.
548 *
549 * Invokes the interrupt handler.
550 */
551static inline void cpu_interrupt(CPUState *cpu, int mask)
552{
553 cpu_interrupt_handler(cpu, mask);
554}
555
556#else /* USER_ONLY */
557
558void cpu_interrupt(CPUState *cpu, int mask);
559
560#endif /* USER_ONLY */
561
93e22326 562#ifdef CONFIG_SOFTMMU
c658b94f
AF
563static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
564 bool is_write, bool is_exec,
565 int opaque, unsigned size)
566{
567 CPUClass *cc = CPU_GET_CLASS(cpu);
568
569 if (cc->do_unassigned_access) {
570 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
571 }
572}
573
93e22326
PB
574static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
575 int is_write, int is_user,
576 uintptr_t retaddr)
577{
578 CPUClass *cc = CPU_GET_CLASS(cpu);
579
580 return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
581}
c658b94f
AF
582#endif
583
d8ed887b
AF
584/**
585 * cpu_reset_interrupt:
586 * @cpu: The CPU to clear the interrupt on.
587 * @mask: The interrupt mask to clear.
588 *
589 * Resets interrupts on the vCPU @cpu.
590 */
591void cpu_reset_interrupt(CPUState *cpu, int mask);
592
60a3e17a
AF
593/**
594 * cpu_exit:
595 * @cpu: The CPU to exit.
596 *
597 * Requests the CPU @cpu to exit execution.
598 */
599void cpu_exit(CPUState *cpu);
600
2993683b
IM
601/**
602 * cpu_resume:
603 * @cpu: The CPU to resume.
604 *
605 * Resumes CPU, i.e. puts CPU into runnable state.
606 */
607void cpu_resume(CPUState *cpu);
dd83b06a 608
c643bed9
AF
609/**
610 * qemu_init_vcpu:
611 * @cpu: The vCPU to initialize.
612 *
613 * Initializes a vCPU.
614 */
615void qemu_init_vcpu(CPUState *cpu);
616
3825b28f
AF
617#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
618#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
619#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
620
621/**
622 * cpu_single_step:
623 * @cpu: CPU to the flags for.
624 * @enabled: Flags to enable.
625 *
626 * Enables or disables single-stepping for @cpu.
627 */
628void cpu_single_step(CPUState *cpu, int enabled);
629
b3310ab3
AF
630/* Breakpoint/watchpoint flags */
631#define BP_MEM_READ 0x01
632#define BP_MEM_WRITE 0x02
633#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
634#define BP_STOP_BEFORE_ACCESS 0x04
08225676 635/* 0x08 currently unused */
b3310ab3
AF
636#define BP_GDB 0x10
637#define BP_CPU 0x20
08225676
PM
638#define BP_WATCHPOINT_HIT_READ 0x40
639#define BP_WATCHPOINT_HIT_WRITE 0x80
640#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
641
642int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
643 CPUBreakpoint **breakpoint);
644int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
645void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
646void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
647
75a34036
AF
648int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
649 int flags, CPUWatchpoint **watchpoint);
650int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
651 vaddr len, int flags);
652void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
653void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
654
a47dddd7
AF
655void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
656 GCC_FMT_ATTR(2, 3);
657
1a1562f5
AF
658#ifdef CONFIG_SOFTMMU
659extern const struct VMStateDescription vmstate_cpu_common;
660#else
661#define vmstate_cpu_common vmstate_dummy
662#endif
663
664#define VMSTATE_CPU() { \
665 .name = "parent_obj", \
666 .size = sizeof(CPUState), \
667 .vmsd = &vmstate_cpu_common, \
668 .flags = VMS_STRUCT, \
669 .offset = 0, \
670}
671
dd83b06a 672#endif
This page took 0.297773 seconds and 4 git commands to generate.