#include "disas/bfd.h"
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
+#include "qapi/qapi-types-run-state.h"
#include "qemu/bitmap.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling.
+ * (this is deprecated: new targets should use do_transaction_failed instead)
* @do_unaligned_access: Callback for unaligned access handling, if
* the target defines #ALIGNED_ONLY.
+ * @do_transaction_failed: Callback for handling failed memory transactions
+ * (ie bus faults or external aborts; not MMU faults)
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
* runtime configurable endianness is currently big-endian. Non-configurable
* CPUs can use the default implementation of this method. This method should
* before the insn which triggers a watchpoint rather than after it.
* @gdb_arch_name: Optional callback that returns the architecture name known
* to GDB. The caller must free the returned string with g_free.
+ * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
+ * gdb stub. Returns a pointer to the XML contents for the specified XML file
+ * or NULL if the CPU doesn't have a dynamically generated content for it.
* @cpu_exec_enter: Callback for cpu_exec preparation.
* @cpu_exec_exit: Callback for cpu_exec cleanup.
* @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
bool (*virtio_is_big_endian)(CPUState *cpu);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
uint8_t *buf, int len, bool is_write);
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
- int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
+ int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
int mmu_index);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
void *opaque);
const struct VMStateDescription *vmsd;
- int gdb_num_core_regs;
const char *gdb_core_xml_file;
gchar * (*gdb_arch_name)(CPUState *cpu);
- bool gdb_stop_before_watchpoint;
-
+ const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
void (*cpu_exec_enter)(CPUState *cpu);
void (*cpu_exec_exit)(CPUState *cpu);
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
+ void (*tcg_initialize)(void);
+
+ /* Keep non-pointer data at the end to minimize holes. */
+ int gdb_num_core_regs;
+ bool gdb_stop_before_watchpoint;
} CPUClass;
#ifdef HOST_WORDS_BIGENDIAN
* @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
* to @trace_dstate).
* @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
+ * @ignore_memory_transaction_failures: Cached copy of the MachineState
+ * flag of the same name: allows the board to suppress calling of the
+ * CPU do_transaction_failed hook function.
*
* State of one CPU core or thread.
*/
bool unplug;
bool crash_occurred;
bool exit_request;
+ uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
int singlestep_enabled;
DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
/* TODO Move common fields from CPUArchState here. */
- int cpu_index; /* used by alpha TCG */
- uint32_t halted; /* used by alpha, cris, ppc TCG */
+ int cpu_index;
+ uint32_t halted;
uint32_t can_do_io;
- int32_t exception_index; /* used by m68k TCG */
+ int32_t exception_index;
/* shared by kvm, hax and hvf */
bool vcpu_dirty;
*/
bool throttle_thread_scheduled;
+ bool ignore_memory_transaction_failures;
+
/* Note that this is accessed at the start of every TB via a negative
offset from AREG0. Leave this field at the end so as to make the
(absolute value) offset as small as possible. This reduces code
* unnecessary flushes.
*/
uint16_t pending_tlb_flush;
+
+ int hvf_fd;
};
QTAILQ_HEAD(CPUTailQ, CPUState);
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
/**
- * cpu_generic_init:
- * @typename: The CPU base type.
- * @cpu_model: The model string including optional parameters.
+ * cpu_create:
+ * @typename: The CPU type.
*
- * Instantiates a CPU, processes optional parameters and realizes the CPU.
+ * Instantiates a CPU and realizes the CPU.
*
* Returns: A #CPUState or %NULL if an error occurred.
*/
-CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
+CPUState *cpu_create(const char *typename);
+
+/**
+ * parse_cpu_model:
+ * @cpu_model: The model string including optional parameters.
+ *
+ * processes optional parameters and registers them as global properties
+ *
+ * Returns: type of CPU to create or prints error and terminates process
+ * if an error occurred.
+ */
+const char *parse_cpu_model(const char *cpu_model);
/**
* cpu_has_work:
*/
bool cpu_exists(int64_t id);
+/**
+ * cpu_by_arch_id:
+ * @id: Guest-exposed CPU ID of the CPU to obtain.
+ *
+ * Get a CPU with matching @id.
+ *
+ * Returns: The CPU or %NULL if there is no matching CPU.
+ */
+CPUState *cpu_by_arch_id(int64_t id);
+
/**
* cpu_throttle_set:
* @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
}
+
+static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response,
+ uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
+ cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
+ mmu_idx, attrs, response, retaddr);
+ }
+}
#endif
#endif /* NEED_CPU_H */