void cpu_x86_close(CPUX86State *s);
void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg);
void x86_cpudef_setup(void);
-int cpu_x86_support_mca_broadcast(CPUState *env);
+int cpu_x86_support_mca_broadcast(CPUX86State *env);
int cpu_get_pic_interrupt(CPUX86State *s);
/* MSDOS compatibility mode FPU exception support */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUState *env)
+static inline int cpu_mmu_index (CPUX86State *env)
{
return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
}
void optimize_flags_init(void);
#if defined(CONFIG_USER_ONLY)
-static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
{
if (newsp)
env->regs[R_ESP] = newsp;
#include "hw/apic.h"
#endif
-static inline bool cpu_has_work(CPUState *env)
+static inline bool cpu_has_work(CPUX86State *env)
{
return ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
#include "exec-all.h"
-static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
{
env->eip = tb->pc - tb->cs_base;
}
-static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
*cs_base = env->segs[R_CS].base;
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
}
-void do_cpu_init(CPUState *env);
-void do_cpu_sipi(CPUState *env);
+void do_cpu_init(CPUX86State *env);
+void do_cpu_sipi(CPUX86State *env);
#define MCE_INJECT_BROADCAST 1
#define MCE_INJECT_UNCOND_AO 2
-void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
/* op_helper.c */
-void do_interrupt(CPUState *env);
-void do_interrupt_x86_hardirq(CPUState *env, int intno, int is_hw);
-void QEMU_NORETURN raise_exception_env(int exception_index, CPUState *nenv);
-void QEMU_NORETURN raise_exception_err_env(CPUState *nenv, int exception_index,
+void do_interrupt(CPUX86State *env);
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
+void QEMU_NORETURN raise_exception_env(int exception_index, CPUX86State *nenv);
+void QEMU_NORETURN raise_exception_err_env(CPUX86State *nenv, int exception_index,
int error_code);
-void do_smm_enter(CPUState *env1);
+void do_smm_enter(CPUX86State *env1);
-void svm_check_intercept(CPUState *env1, uint32_t type);
+void svm_check_intercept(CPUX86State *env1, uint32_t type);
-uint32_t cpu_cc_compute_all(CPUState *env1, int op);
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
-void cpu_report_tpr_access(CPUState *env, TPRAccess access);
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
#endif /* CPU_I386_H */
//#define DEBUG_MMU
/* NOTE: must be called outside the CPU execute loop */
-void cpu_state_reset(CPUState *env)
+void cpu_state_reset(CPUX86State *env)
{
int i;
g_free(env);
}
-static void cpu_x86_version(CPUState *env, int *family, int *model)
+static void cpu_x86_version(CPUX86State *env, int *family, int *model)
{
int cpuver = env->cpuid_version;
}
/* Broadcast MCA signal for processor version 06H_EH and above */
-int cpu_x86_support_mca_broadcast(CPUState *env)
+int cpu_x86_support_mca_broadcast(CPUX86State *env)
{
int family = 0;
int model = 0;
};
static void
-cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
const char *name, struct SegmentCache *sc)
{
#ifdef TARGET_X86_64
#define DUMP_CODE_BYTES_TOTAL 50
#define DUMP_CODE_BYTES_BACKWARD 20
-void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int eflags, i, nb;
return 1;
}
-target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
target_ulong pde_addr, pte_addr;
uint64_t pte;
return paddr;
}
-void hw_breakpoint_insert(CPUState *env, int index)
+void hw_breakpoint_insert(CPUX86State *env, int index)
{
int type, err = 0;
env->cpu_breakpoint[index] = NULL;
}
-void hw_breakpoint_remove(CPUState *env, int index)
+void hw_breakpoint_remove(CPUX86State *env, int index)
{
if (!env->cpu_breakpoint[index])
return;
}
}
-int check_hw_breakpoints(CPUState *env, int force_dr6_update)
+int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
{
target_ulong dr6;
int reg, type;
static CPUDebugExcpHandler *prev_debug_excp_handler;
-static void breakpoint_handler(CPUState *env)
+static void breakpoint_handler(CPUX86State *env)
{
CPUBreakpoint *bp;
typedef struct MCEInjectionParams {
Monitor *mon;
- CPUState *env;
+ CPUX86State *env;
int bank;
uint64_t status;
uint64_t mcg_status;
static void do_inject_x86_mce(void *data)
{
MCEInjectionParams *params = data;
- CPUState *cenv = params->env;
+ CPUX86State *cenv = params->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
cpu_synchronize_state(cenv);
}
}
-void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags)
{
.flags = flags,
};
unsigned bank_num = cenv->mcg_cap & 0xff;
- CPUState *env;
+ CPUX86State *env;
if (!cenv->mcg_cap) {
monitor_printf(mon, "MCE injection not supported\n");
}
}
-void cpu_report_tpr_access(CPUState *env, TPRAccess access)
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
{
TranslationBlock *tb;
}
#if !defined(CONFIG_USER_ONLY)
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
{
int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
uint64_t pat = env->pat;
env->halted = !cpu_is_bsp(env);
}
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
{
apic_sipi(env->apic_state);
}
#else
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
{
}
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
{
}
#endif
return -ENOSYS;
}
-static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
+static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code)
{
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
exit(1);
}
-int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
{
ram_addr_t ram_addr;
target_phys_addr_t paddr;
return 0;
}
-static int kvm_inject_mce_oldstyle(CPUState *env)
+static int kvm_inject_mce_oldstyle(CPUX86State *env)
{
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
unsigned int bank, bank_num = env->mcg_cap & 0xff;
static void cpu_update_state(void *opaque, int running, RunState state)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
if (running) {
env->tsc_valid = false;
}
}
-int kvm_arch_init_vcpu(CPUState *env)
+int kvm_arch_init_vcpu(CPUX86State *env)
{
struct {
struct kvm_cpuid2 cpuid;
return 0;
}
-void kvm_arch_reset_vcpu(CPUState *env)
+void kvm_arch_reset_vcpu(CPUX86State *env)
{
env->exception_injected = -1;
env->interrupt_injected = -1;
}
}
-static int kvm_getput_regs(CPUState *env, int set)
+static int kvm_getput_regs(CPUX86State *env, int set)
{
struct kvm_regs regs;
int ret = 0;
return ret;
}
-static int kvm_put_fpu(CPUState *env)
+static int kvm_put_fpu(CPUX86State *env)
{
struct kvm_fpu fpu;
int i;
#define XSAVE_XSTATE_BV 128
#define XSAVE_YMMH_SPACE 144
-static int kvm_put_xsave(CPUState *env)
+static int kvm_put_xsave(CPUX86State *env)
{
struct kvm_xsave* xsave = env->kvm_xsave_buf;
uint16_t cwd, swd, twd;
return r;
}
-static int kvm_put_xcrs(CPUState *env)
+static int kvm_put_xcrs(CPUX86State *env)
{
struct kvm_xcrs xcrs;
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
}
-static int kvm_put_sregs(CPUState *env)
+static int kvm_put_sregs(CPUX86State *env)
{
struct kvm_sregs sregs;
entry->data = value;
}
-static int kvm_put_msrs(CPUState *env, int level)
+static int kvm_put_msrs(CPUX86State *env, int level)
{
struct {
struct kvm_msrs info;
}
-static int kvm_get_fpu(CPUState *env)
+static int kvm_get_fpu(CPUX86State *env)
{
struct kvm_fpu fpu;
int i, ret;
return 0;
}
-static int kvm_get_xsave(CPUState *env)
+static int kvm_get_xsave(CPUX86State *env)
{
struct kvm_xsave* xsave = env->kvm_xsave_buf;
int ret, i;
return 0;
}
-static int kvm_get_xcrs(CPUState *env)
+static int kvm_get_xcrs(CPUX86State *env)
{
int i, ret;
struct kvm_xcrs xcrs;
return 0;
}
-static int kvm_get_sregs(CPUState *env)
+static int kvm_get_sregs(CPUX86State *env)
{
struct kvm_sregs sregs;
uint32_t hflags;
return 0;
}
-static int kvm_get_msrs(CPUState *env)
+static int kvm_get_msrs(CPUX86State *env)
{
struct {
struct kvm_msrs info;
return 0;
}
-static int kvm_put_mp_state(CPUState *env)
+static int kvm_put_mp_state(CPUX86State *env)
{
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
}
-static int kvm_get_mp_state(CPUState *env)
+static int kvm_get_mp_state(CPUX86State *env)
{
struct kvm_mp_state mp_state;
int ret;
return 0;
}
-static int kvm_get_apic(CPUState *env)
+static int kvm_get_apic(CPUX86State *env)
{
DeviceState *apic = env->apic_state;
struct kvm_lapic_state kapic;
return 0;
}
-static int kvm_put_apic(CPUState *env)
+static int kvm_put_apic(CPUX86State *env)
{
DeviceState *apic = env->apic_state;
struct kvm_lapic_state kapic;
return 0;
}
-static int kvm_put_vcpu_events(CPUState *env, int level)
+static int kvm_put_vcpu_events(CPUX86State *env, int level)
{
struct kvm_vcpu_events events;
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
}
-static int kvm_get_vcpu_events(CPUState *env)
+static int kvm_get_vcpu_events(CPUX86State *env)
{
struct kvm_vcpu_events events;
int ret;
return 0;
}
-static int kvm_guest_debug_workarounds(CPUState *env)
+static int kvm_guest_debug_workarounds(CPUX86State *env)
{
int ret = 0;
unsigned long reinject_trap = 0;
return ret;
}
-static int kvm_put_debugregs(CPUState *env)
+static int kvm_put_debugregs(CPUX86State *env)
{
struct kvm_debugregs dbgregs;
int i;
return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
}
-static int kvm_get_debugregs(CPUState *env)
+static int kvm_get_debugregs(CPUX86State *env)
{
struct kvm_debugregs dbgregs;
int i, ret;
return 0;
}
-int kvm_arch_put_registers(CPUState *env, int level)
+int kvm_arch_put_registers(CPUX86State *env, int level)
{
int ret;
return 0;
}
-int kvm_arch_get_registers(CPUState *env)
+int kvm_arch_get_registers(CPUX86State *env)
{
int ret;
return 0;
}
-void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
{
int ret;
}
}
-void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
{
if (run->if_flag) {
env->eflags |= IF_MASK;
cpu_set_apic_base(env->apic_state, run->apic_base);
}
-int kvm_arch_process_async_events(CPUState *env)
+int kvm_arch_process_async_events(CPUX86State *env)
{
if (env->interrupt_request & CPU_INTERRUPT_MCE) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
return env->halted;
}
-static int kvm_handle_halt(CPUState *env)
+static int kvm_handle_halt(CPUX86State *env)
{
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
return 0;
}
-static int kvm_handle_tpr_access(CPUState *env)
+static int kvm_handle_tpr_access(CPUX86State *env)
{
struct kvm_run *run = env->kvm_run;
return 1;
}
-int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
{
static const uint8_t int3 = 0xcc;
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
{
uint8_t int3;
return ret;
}
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
{
const uint8_t type_code[] = {
[GDB_BREAKPOINT_HW] = 0x0,
#define VMX_INVALID_GUEST_STATE 0x80000021
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
{
uint64_t code;
int ret;
return ret;
}
-bool kvm_arch_stop_on_emulation_error(CPUState *env)
+bool kvm_arch_stop_on_emulation_error(CPUX86State *env)
{
kvm_cpu_synchronize_state(env);
return !(env->cr[0] & CR0_PE_MASK) ||
static bool fpregs_is_0(void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
return (env->fpregs_format_vmstate == 0);
}
static bool fpregs_is_1_mmx(void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
int guess_mmx;
guess_mmx = ((env->fptag_vmstate == 0xff) &&
static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
int guess_mmx;
guess_mmx = ((env->fptag_vmstate == 0xff) &&
static void cpu_pre_save(void *opaque)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
int i;
/* FPU */
static int cpu_post_load(void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
int i;
/* XXX: restore FPU round state */
static bool async_pf_msr_needed(void *opaque)
{
- CPUState *cpu = opaque;
+ CPUX86State *cpu = opaque;
return cpu->async_pf_en_msr != 0;
}
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
- VMSTATE_UINT64(async_pf_en_msr, CPUState),
+ VMSTATE_UINT64(async_pf_en_msr, CPUX86State),
VMSTATE_END_OF_LIST()
}
};
static bool fpop_ip_dp_needed(void *opaque)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
}
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
- VMSTATE_UINT16(fpop, CPUState),
- VMSTATE_UINT64(fpip, CPUState),
- VMSTATE_UINT64(fpdp, CPUState),
+ VMSTATE_UINT16(fpop, CPUX86State),
+ VMSTATE_UINT64(fpip, CPUX86State),
+ VMSTATE_UINT64(fpdp, CPUX86State),
VMSTATE_END_OF_LIST()
}
};
static bool tscdeadline_needed(void *opaque)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
return env->tsc_deadline != 0;
}
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
- VMSTATE_UINT64(tsc_deadline, CPUState),
+ VMSTATE_UINT64(tsc_deadline, CPUX86State),
VMSTATE_END_OF_LIST()
}
};
static bool misc_enable_needed(void *opaque)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
}
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
- VMSTATE_UINT64(msr_ia32_misc_enable, CPUState),
+ VMSTATE_UINT64(msr_ia32_misc_enable, CPUX86State),
VMSTATE_END_OF_LIST()
}
};
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField []) {
- VMSTATE_UINTTL_ARRAY(regs, CPUState, CPU_NB_REGS),
- VMSTATE_UINTTL(eip, CPUState),
- VMSTATE_UINTTL(eflags, CPUState),
- VMSTATE_UINT32(hflags, CPUState),
+ VMSTATE_UINTTL_ARRAY(regs, CPUX86State, CPU_NB_REGS),
+ VMSTATE_UINTTL(eip, CPUX86State),
+ VMSTATE_UINTTL(eflags, CPUX86State),
+ VMSTATE_UINT32(hflags, CPUX86State),
/* FPU */
- VMSTATE_UINT16(fpuc, CPUState),
- VMSTATE_UINT16(fpus_vmstate, CPUState),
- VMSTATE_UINT16(fptag_vmstate, CPUState),
- VMSTATE_UINT16(fpregs_format_vmstate, CPUState),
- VMSTATE_FP_REGS(fpregs, CPUState, 8),
-
- VMSTATE_SEGMENT_ARRAY(segs, CPUState, 6),
- VMSTATE_SEGMENT(ldt, CPUState),
- VMSTATE_SEGMENT(tr, CPUState),
- VMSTATE_SEGMENT(gdt, CPUState),
- VMSTATE_SEGMENT(idt, CPUState),
-
- VMSTATE_UINT32(sysenter_cs, CPUState),
+ VMSTATE_UINT16(fpuc, CPUX86State),
+ VMSTATE_UINT16(fpus_vmstate, CPUX86State),
+ VMSTATE_UINT16(fptag_vmstate, CPUX86State),
+ VMSTATE_UINT16(fpregs_format_vmstate, CPUX86State),
+ VMSTATE_FP_REGS(fpregs, CPUX86State, 8),
+
+ VMSTATE_SEGMENT_ARRAY(segs, CPUX86State, 6),
+ VMSTATE_SEGMENT(ldt, CPUX86State),
+ VMSTATE_SEGMENT(tr, CPUX86State),
+ VMSTATE_SEGMENT(gdt, CPUX86State),
+ VMSTATE_SEGMENT(idt, CPUX86State),
+
+ VMSTATE_UINT32(sysenter_cs, CPUX86State),
#ifdef TARGET_X86_64
/* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
- VMSTATE_HACK_UINT32(sysenter_esp, CPUState, less_than_7),
- VMSTATE_HACK_UINT32(sysenter_eip, CPUState, less_than_7),
- VMSTATE_UINTTL_V(sysenter_esp, CPUState, 7),
- VMSTATE_UINTTL_V(sysenter_eip, CPUState, 7),
+ VMSTATE_HACK_UINT32(sysenter_esp, CPUX86State, less_than_7),
+ VMSTATE_HACK_UINT32(sysenter_eip, CPUX86State, less_than_7),
+ VMSTATE_UINTTL_V(sysenter_esp, CPUX86State, 7),
+ VMSTATE_UINTTL_V(sysenter_eip, CPUX86State, 7),
#else
- VMSTATE_UINTTL(sysenter_esp, CPUState),
- VMSTATE_UINTTL(sysenter_eip, CPUState),
+ VMSTATE_UINTTL(sysenter_esp, CPUX86State),
+ VMSTATE_UINTTL(sysenter_eip, CPUX86State),
#endif
- VMSTATE_UINTTL(cr[0], CPUState),
- VMSTATE_UINTTL(cr[2], CPUState),
- VMSTATE_UINTTL(cr[3], CPUState),
- VMSTATE_UINTTL(cr[4], CPUState),
- VMSTATE_UINTTL_ARRAY(dr, CPUState, 8),
+ VMSTATE_UINTTL(cr[0], CPUX86State),
+ VMSTATE_UINTTL(cr[2], CPUX86State),
+ VMSTATE_UINTTL(cr[3], CPUX86State),
+ VMSTATE_UINTTL(cr[4], CPUX86State),
+ VMSTATE_UINTTL_ARRAY(dr, CPUX86State, 8),
/* MMU */
- VMSTATE_INT32(a20_mask, CPUState),
+ VMSTATE_INT32(a20_mask, CPUX86State),
/* XMM */
- VMSTATE_UINT32(mxcsr, CPUState),
- VMSTATE_XMM_REGS(xmm_regs, CPUState, CPU_NB_REGS),
+ VMSTATE_UINT32(mxcsr, CPUX86State),
+ VMSTATE_XMM_REGS(xmm_regs, CPUX86State, CPU_NB_REGS),
#ifdef TARGET_X86_64
- VMSTATE_UINT64(efer, CPUState),
- VMSTATE_UINT64(star, CPUState),
- VMSTATE_UINT64(lstar, CPUState),
- VMSTATE_UINT64(cstar, CPUState),
- VMSTATE_UINT64(fmask, CPUState),
- VMSTATE_UINT64(kernelgsbase, CPUState),
+ VMSTATE_UINT64(efer, CPUX86State),
+ VMSTATE_UINT64(star, CPUX86State),
+ VMSTATE_UINT64(lstar, CPUX86State),
+ VMSTATE_UINT64(cstar, CPUX86State),
+ VMSTATE_UINT64(fmask, CPUX86State),
+ VMSTATE_UINT64(kernelgsbase, CPUX86State),
#endif
- VMSTATE_UINT32_V(smbase, CPUState, 4),
-
- VMSTATE_UINT64_V(pat, CPUState, 5),
- VMSTATE_UINT32_V(hflags2, CPUState, 5),
-
- VMSTATE_UINT32_TEST(halted, CPUState, version_is_5),
- VMSTATE_UINT64_V(vm_hsave, CPUState, 5),
- VMSTATE_UINT64_V(vm_vmcb, CPUState, 5),
- VMSTATE_UINT64_V(tsc_offset, CPUState, 5),
- VMSTATE_UINT64_V(intercept, CPUState, 5),
- VMSTATE_UINT16_V(intercept_cr_read, CPUState, 5),
- VMSTATE_UINT16_V(intercept_cr_write, CPUState, 5),
- VMSTATE_UINT16_V(intercept_dr_read, CPUState, 5),
- VMSTATE_UINT16_V(intercept_dr_write, CPUState, 5),
- VMSTATE_UINT32_V(intercept_exceptions, CPUState, 5),
- VMSTATE_UINT8_V(v_tpr, CPUState, 5),
+ VMSTATE_UINT32_V(smbase, CPUX86State, 4),
+
+ VMSTATE_UINT64_V(pat, CPUX86State, 5),
+ VMSTATE_UINT32_V(hflags2, CPUX86State, 5),
+
+ VMSTATE_UINT32_TEST(halted, CPUX86State, version_is_5),
+ VMSTATE_UINT64_V(vm_hsave, CPUX86State, 5),
+ VMSTATE_UINT64_V(vm_vmcb, CPUX86State, 5),
+ VMSTATE_UINT64_V(tsc_offset, CPUX86State, 5),
+ VMSTATE_UINT64_V(intercept, CPUX86State, 5),
+ VMSTATE_UINT16_V(intercept_cr_read, CPUX86State, 5),
+ VMSTATE_UINT16_V(intercept_cr_write, CPUX86State, 5),
+ VMSTATE_UINT16_V(intercept_dr_read, CPUX86State, 5),
+ VMSTATE_UINT16_V(intercept_dr_write, CPUX86State, 5),
+ VMSTATE_UINT32_V(intercept_exceptions, CPUX86State, 5),
+ VMSTATE_UINT8_V(v_tpr, CPUX86State, 5),
/* MTRRs */
- VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUState, 11, 8),
- VMSTATE_UINT64_V(mtrr_deftype, CPUState, 8),
- VMSTATE_MTRR_VARS(mtrr_var, CPUState, 8, 8),
+ VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUX86State, 11, 8),
+ VMSTATE_UINT64_V(mtrr_deftype, CPUX86State, 8),
+ VMSTATE_MTRR_VARS(mtrr_var, CPUX86State, 8, 8),
/* KVM-related states */
- VMSTATE_INT32_V(interrupt_injected, CPUState, 9),
- VMSTATE_UINT32_V(mp_state, CPUState, 9),
- VMSTATE_UINT64_V(tsc, CPUState, 9),
- VMSTATE_INT32_V(exception_injected, CPUState, 11),
- VMSTATE_UINT8_V(soft_interrupt, CPUState, 11),
- VMSTATE_UINT8_V(nmi_injected, CPUState, 11),
- VMSTATE_UINT8_V(nmi_pending, CPUState, 11),
- VMSTATE_UINT8_V(has_error_code, CPUState, 11),
- VMSTATE_UINT32_V(sipi_vector, CPUState, 11),
+ VMSTATE_INT32_V(interrupt_injected, CPUX86State, 9),
+ VMSTATE_UINT32_V(mp_state, CPUX86State, 9),
+ VMSTATE_UINT64_V(tsc, CPUX86State, 9),
+ VMSTATE_INT32_V(exception_injected, CPUX86State, 11),
+ VMSTATE_UINT8_V(soft_interrupt, CPUX86State, 11),
+ VMSTATE_UINT8_V(nmi_injected, CPUX86State, 11),
+ VMSTATE_UINT8_V(nmi_pending, CPUX86State, 11),
+ VMSTATE_UINT8_V(has_error_code, CPUX86State, 11),
+ VMSTATE_UINT32_V(sipi_vector, CPUX86State, 11),
/* MCE */
- VMSTATE_UINT64_V(mcg_cap, CPUState, 10),
- VMSTATE_UINT64_V(mcg_status, CPUState, 10),
- VMSTATE_UINT64_V(mcg_ctl, CPUState, 10),
- VMSTATE_UINT64_ARRAY_V(mce_banks, CPUState, MCE_BANKS_DEF *4, 10),
+ VMSTATE_UINT64_V(mcg_cap, CPUX86State, 10),
+ VMSTATE_UINT64_V(mcg_status, CPUX86State, 10),
+ VMSTATE_UINT64_V(mcg_ctl, CPUX86State, 10),
+ VMSTATE_UINT64_ARRAY_V(mce_banks, CPUX86State, MCE_BANKS_DEF *4, 10),
/* rdtscp */
- VMSTATE_UINT64_V(tsc_aux, CPUState, 11),
+ VMSTATE_UINT64_V(tsc_aux, CPUX86State, 11),
/* KVM pvclock msr */
- VMSTATE_UINT64_V(system_time_msr, CPUState, 11),
- VMSTATE_UINT64_V(wall_clock_msr, CPUState, 11),
+ VMSTATE_UINT64_V(system_time_msr, CPUX86State, 11),
+ VMSTATE_UINT64_V(wall_clock_msr, CPUX86State, 11),
/* XSAVE related fields */
- VMSTATE_UINT64_V(xcr0, CPUState, 12),
- VMSTATE_UINT64_V(xstate_bv, CPUState, 12),
- VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12),
+ VMSTATE_UINT64_V(xcr0, CPUX86State, 12),
+ VMSTATE_UINT64_V(xstate_bv, CPUX86State, 12),
+ VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUX86State, CPU_NB_REGS, 12),
VMSTATE_END_OF_LIST()
/* The above list is not sorted /wrt version numbers, watch out! */
},
/* load efer and update the corresponding hflags. XXX: do consistency
checks with cpuid bits ? */
-static inline void cpu_load_efer(CPUState *env, uint64_t val)
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
{
env->efer = val;
env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
#endif
}
-void do_interrupt(CPUState *env1)
+void do_interrupt(CPUX86State *env1)
{
- CPUState *saved_env;
+ CPUX86State *saved_env;
saved_env = env;
env = env1;
env = saved_env;
}
-void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
+void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
{
- CPUState *saved_env;
+ CPUX86State *saved_env;
saved_env = env;
env = env1;
raise_interrupt(exception_index, 0, error_code, 0);
}
-void raise_exception_err_env(CPUState *nenv, int exception_index,
+void raise_exception_err_env(CPUX86State *nenv, int exception_index,
int error_code)
{
env = nenv;
raise_interrupt(exception_index, 0, 0, 0);
}
-void raise_exception_env(int exception_index, CPUState *nenv)
+void raise_exception_env(int exception_index, CPUX86State *nenv)
{
env = nenv;
raise_exception(exception_index);
#if defined(CONFIG_USER_ONLY)
-void do_smm_enter(CPUState *env1)
+void do_smm_enter(CPUX86State *env1)
{
}
#define SMM_REVISION_ID 0x00020000
#endif
-void do_smm_enter(CPUState *env1)
+void do_smm_enter(CPUX86State *env1)
{
target_ulong sm_state;
SegmentCache *dt;
int i, offset;
- CPUState *saved_env;
+ CPUX86State *saved_env;
saved_env = env;
env = env1;
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
-void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
void *retaddr)
{
TranslationBlock *tb;
{
}
-void svm_check_intercept(CPUState *env1, uint32_t type)
+void svm_check_intercept(CPUX86State *env1, uint32_t type)
{
}
}
static inline void svm_load_seg_cache(target_phys_addr_t addr,
- CPUState *env, int seg_reg)
+ CPUX86State *env, int seg_reg)
{
SegmentCache sc1, *sc = &sc1;
svm_load_seg(addr, sc);
}
}
-void svm_check_intercept(CPUState *env1, uint32_t type)
+void svm_check_intercept(CPUX86State *env1, uint32_t type)
{
- CPUState *saved_env;
+ CPUX86State *saved_env;
saved_env = env;
env = env1;
}
}
-uint32_t cpu_cc_compute_all(CPUState *env1, int op)
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
{
- CPUState *saved_env;
+ CPUX86State *saved_env;
uint32_t ret;
saved_env = env;
static inline void gen_op_jmp_T0(void)
{
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
+ tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
}
static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
static inline void gen_op_movl_A0_seg(int reg)
{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
+ tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
}
static inline void gen_op_addl_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
#ifdef TARGET_X86_64
static inline void gen_op_movq_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
}
static inline void gen_op_addq_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
}
static inline void gen_jmp_im(target_ulong pc)
{
tcg_gen_movi_tl(cpu_tmp0, pc);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
}
static inline void gen_string_movl_A0_ESI(DisasContext *s)
static inline void gen_op_movl_T0_Dshift(int ot)
{
- tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
+ tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
};
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
- tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
- tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
{
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, cc_op), "cc_op");
- cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
+ offsetof(CPUX86State, cc_op), "cc_op");
+ cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
"cc_src");
- cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
+ cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
"cc_dst");
- cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
+ cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
"cc_tmp");
#ifdef TARGET_X86_64
cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_EAX]), "rax");
+ offsetof(CPUX86State, regs[R_EAX]), "rax");
cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_ECX]), "rcx");
+ offsetof(CPUX86State, regs[R_ECX]), "rcx");
cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_EDX]), "rdx");
+ offsetof(CPUX86State, regs[R_EDX]), "rdx");
cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_EBX]), "rbx");
+ offsetof(CPUX86State, regs[R_EBX]), "rbx");
cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_ESP]), "rsp");
+ offsetof(CPUX86State, regs[R_ESP]), "rsp");
cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_EBP]), "rbp");
+ offsetof(CPUX86State, regs[R_EBP]), "rbp");
cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_ESI]), "rsi");
+ offsetof(CPUX86State, regs[R_ESI]), "rsi");
cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[R_EDI]), "rdi");
+ offsetof(CPUX86State, regs[R_EDI]), "rdi");
cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[8]), "r8");
+ offsetof(CPUX86State, regs[8]), "r8");
cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[9]), "r9");
+ offsetof(CPUX86State, regs[9]), "r9");
cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[10]), "r10");
+ offsetof(CPUX86State, regs[10]), "r10");
cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[11]), "r11");
+ offsetof(CPUX86State, regs[11]), "r11");
cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[12]), "r12");
+ offsetof(CPUX86State, regs[12]), "r12");
cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[13]), "r13");
+ offsetof(CPUX86State, regs[13]), "r13");
cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[14]), "r14");
+ offsetof(CPUX86State, regs[14]), "r14");
cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, regs[15]), "r15");
+ offsetof(CPUX86State, regs[15]), "r15");
#else
cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_EAX]), "eax");
+ offsetof(CPUX86State, regs[R_EAX]), "eax");
cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_ECX]), "ecx");
+ offsetof(CPUX86State, regs[R_ECX]), "ecx");
cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_EDX]), "edx");
+ offsetof(CPUX86State, regs[R_EDX]), "edx");
cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_EBX]), "ebx");
+ offsetof(CPUX86State, regs[R_EBX]), "ebx");
cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_ESP]), "esp");
+ offsetof(CPUX86State, regs[R_ESP]), "esp");
cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_EBP]), "ebp");
+ offsetof(CPUX86State, regs[R_EBP]), "ebp");
cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_ESI]), "esi");
+ offsetof(CPUX86State, regs[R_ESI]), "esi");
cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[R_EDI]), "edi");
+ offsetof(CPUX86State, regs[R_EDI]), "edi");
#endif
/* register helpers */
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUX86State *env,
TranslationBlock *tb,
int search_pc)
{
}
}
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
{
int cc_op;
#ifdef DEBUG_DISAS