#define CPUID_MCA (1 << 14)
#define CPUID_CMOV (1 << 15)
#define CPUID_PAT (1 << 16)
+#define CPUID_PSE36 (1 << 17)
#define CPUID_CLFLUSH (1 << 19)
/* ... */
#define CPUID_MMX (1 << 23)
cache: it synchronizes the hflags with the segment cache values */
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
int seg_reg, unsigned int selector,
- uint32_t base, unsigned int limit,
+ target_ulong base,
+ unsigned int limit,
unsigned int flags)
{
SegmentCache *sc;
#endif
/* init SMM cpu state */
+#ifdef TARGET_X86_64
+ env->efer = 0;
+ env->hflags &= ~HF_LMA_MASK;
+#endif
load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
env->eip = 0x00008000;
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
cpu_x86_update_cr4(env, 0);
env->dr[7] = 0x00000400;
-#ifdef TARGET_X86_64
- env->efer = 0;
-#endif
CC_OP = CC_OP_EFLAGS;
}
sm_state = env->smbase + 0x8000;
#ifdef TARGET_X86_64
+ env->efer = ldq_phys(sm_state + 0x7ed0);
+ if (env->efer & MSR_EFER_LMA)
+ env->hflags |= HF_LMA_MASK;
+ else
+ env->hflags &= ~HF_LMA_MASK;
+
for(i = 0; i < 6; i++) {
offset = 0x7e00 + i * 16;
cpu_x86_load_seg_cache(env, i,
env->tr.limit = ldl_phys(sm_state + 0x7e94);
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
- env->efer = ldq_phys(sm_state + 0x7ed0);
-
EAX = ldq_phys(sm_state + 0x7ff8);
ECX = ldq_phys(sm_state + 0x7ff0);
EDX = ldq_phys(sm_state + 0x7fe8);