]>
Commit | Line | Data |
---|---|---|
2c0262af FB |
1 | /* |
2 | * i386 virtual CPU header | |
5fafdf24 | 3 | * |
2c0262af FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
2c0262af FB |
18 | */ |
19 | #ifndef CPU_I386_H | |
20 | #define CPU_I386_H | |
21 | ||
14ce26e7 | 22 | #include "config.h" |
9a78eead | 23 | #include "qemu-common.h" |
14ce26e7 FB |
24 | |
25 | #ifdef TARGET_X86_64 | |
26 | #define TARGET_LONG_BITS 64 | |
27 | #else | |
3cf1e035 | 28 | #define TARGET_LONG_BITS 32 |
14ce26e7 | 29 | #endif |
3cf1e035 | 30 | |
d720b93d FB |
31 | /* target supports implicit self modifying code */ |
32 | #define TARGET_HAS_SMC | |
33 | /* support for self modifying code even if the modified instruction is | |
34 | close to the modifying instruction */ | |
35 | #define TARGET_HAS_PRECISE_SMC | |
36 | ||
1fddef4b FB |
37 | #define TARGET_HAS_ICE 1 |
38 | ||
9042c0e2 TS |
39 | #ifdef TARGET_X86_64 |
40 | #define ELF_MACHINE EM_X86_64 | |
41 | #else | |
42 | #define ELF_MACHINE EM_386 | |
43 | #endif | |
44 | ||
9349b4f9 | 45 | #define CPUArchState struct CPUX86State |
c2764719 | 46 | |
022c62cb | 47 | #include "exec/cpu-defs.h" |
2c0262af | 48 | |
6b4c305c | 49 | #include "fpu/softfloat.h" |
7a0e1f41 | 50 | |
2c0262af FB |
51 | #define R_EAX 0 |
52 | #define R_ECX 1 | |
53 | #define R_EDX 2 | |
54 | #define R_EBX 3 | |
55 | #define R_ESP 4 | |
56 | #define R_EBP 5 | |
57 | #define R_ESI 6 | |
58 | #define R_EDI 7 | |
59 | ||
60 | #define R_AL 0 | |
61 | #define R_CL 1 | |
62 | #define R_DL 2 | |
63 | #define R_BL 3 | |
64 | #define R_AH 4 | |
65 | #define R_CH 5 | |
66 | #define R_DH 6 | |
67 | #define R_BH 7 | |
68 | ||
69 | #define R_ES 0 | |
70 | #define R_CS 1 | |
71 | #define R_SS 2 | |
72 | #define R_DS 3 | |
73 | #define R_FS 4 | |
74 | #define R_GS 5 | |
75 | ||
76 | /* segment descriptor fields */ | |
77 | #define DESC_G_MASK (1 << 23) | |
78 | #define DESC_B_SHIFT 22 | |
79 | #define DESC_B_MASK (1 << DESC_B_SHIFT) | |
14ce26e7 FB |
80 | #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ |
81 | #define DESC_L_MASK (1 << DESC_L_SHIFT) | |
2c0262af FB |
82 | #define DESC_AVL_MASK (1 << 20) |
83 | #define DESC_P_MASK (1 << 15) | |
84 | #define DESC_DPL_SHIFT 13 | |
a3867ed2 | 85 | #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) |
2c0262af FB |
86 | #define DESC_S_MASK (1 << 12) |
87 | #define DESC_TYPE_SHIFT 8 | |
a3867ed2 | 88 | #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) |
2c0262af FB |
89 | #define DESC_A_MASK (1 << 8) |
90 | ||
e670b89e FB |
91 | #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ |
92 | #define DESC_C_MASK (1 << 10) /* code: conforming */ | |
93 | #define DESC_R_MASK (1 << 9) /* code: readable */ | |
2c0262af | 94 | |
e670b89e FB |
95 | #define DESC_E_MASK (1 << 10) /* data: expansion direction */ |
96 | #define DESC_W_MASK (1 << 9) /* data: writable */ | |
97 | ||
98 | #define DESC_TSS_BUSY_MASK (1 << 9) | |
2c0262af FB |
99 | |
100 | /* eflags masks */ | |
101 | #define CC_C 0x0001 | |
102 | #define CC_P 0x0004 | |
103 | #define CC_A 0x0010 | |
104 | #define CC_Z 0x0040 | |
105 | #define CC_S 0x0080 | |
106 | #define CC_O 0x0800 | |
107 | ||
108 | #define TF_SHIFT 8 | |
109 | #define IOPL_SHIFT 12 | |
110 | #define VM_SHIFT 17 | |
111 | ||
112 | #define TF_MASK 0x00000100 | |
113 | #define IF_MASK 0x00000200 | |
114 | #define DF_MASK 0x00000400 | |
115 | #define IOPL_MASK 0x00003000 | |
116 | #define NT_MASK 0x00004000 | |
117 | #define RF_MASK 0x00010000 | |
118 | #define VM_MASK 0x00020000 | |
5fafdf24 | 119 | #define AC_MASK 0x00040000 |
2c0262af FB |
120 | #define VIF_MASK 0x00080000 |
121 | #define VIP_MASK 0x00100000 | |
122 | #define ID_MASK 0x00200000 | |
123 | ||
aa1f17c1 | 124 | /* hidden flags - used internally by qemu to represent additional cpu |
33c263df | 125 | states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not |
a9321a4d PA |
126 | redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK |
127 | bit positions to ease oring with eflags. */ | |
2c0262af FB |
128 | /* current cpl */ |
129 | #define HF_CPL_SHIFT 0 | |
130 | /* true if soft mmu is being used */ | |
131 | #define HF_SOFTMMU_SHIFT 2 | |
132 | /* true if hardware interrupts must be disabled for next instruction */ | |
133 | #define HF_INHIBIT_IRQ_SHIFT 3 | |
134 | /* 16 or 32 segments */ | |
135 | #define HF_CS32_SHIFT 4 | |
136 | #define HF_SS32_SHIFT 5 | |
dc196a57 | 137 | /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ |
2c0262af | 138 | #define HF_ADDSEG_SHIFT 6 |
65262d57 FB |
139 | /* copy of CR0.PE (protected mode) */ |
140 | #define HF_PE_SHIFT 7 | |
141 | #define HF_TF_SHIFT 8 /* must be same as eflags */ | |
7eee2a50 FB |
142 | #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ |
143 | #define HF_EM_SHIFT 10 | |
144 | #define HF_TS_SHIFT 11 | |
65262d57 | 145 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */ |
14ce26e7 FB |
146 | #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ |
147 | #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ | |
a2397807 | 148 | #define HF_RF_SHIFT 16 /* must be same as eflags */ |
65262d57 | 149 | #define HF_VM_SHIFT 17 /* must be same as eflags */ |
a9321a4d | 150 | #define HF_AC_SHIFT 18 /* must be same as eflags */ |
3b21e03e | 151 | #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ |
db620f46 FB |
152 | #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ |
153 | #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ | |
a2397807 | 154 | #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ |
a9321a4d | 155 | #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ |
2c0262af FB |
156 | |
157 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) | |
158 | #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) | |
159 | #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) | |
160 | #define HF_CS32_MASK (1 << HF_CS32_SHIFT) | |
161 | #define HF_SS32_MASK (1 << HF_SS32_SHIFT) | |
162 | #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) | |
65262d57 | 163 | #define HF_PE_MASK (1 << HF_PE_SHIFT) |
58fe2f10 | 164 | #define HF_TF_MASK (1 << HF_TF_SHIFT) |
7eee2a50 FB |
165 | #define HF_MP_MASK (1 << HF_MP_SHIFT) |
166 | #define HF_EM_MASK (1 << HF_EM_SHIFT) | |
167 | #define HF_TS_MASK (1 << HF_TS_SHIFT) | |
0650f1ab | 168 | #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) |
14ce26e7 FB |
169 | #define HF_LMA_MASK (1 << HF_LMA_SHIFT) |
170 | #define HF_CS64_MASK (1 << HF_CS64_SHIFT) | |
a2397807 | 171 | #define HF_RF_MASK (1 << HF_RF_SHIFT) |
0650f1ab | 172 | #define HF_VM_MASK (1 << HF_VM_SHIFT) |
a9321a4d | 173 | #define HF_AC_MASK (1 << HF_AC_SHIFT) |
3b21e03e | 174 | #define HF_SMM_MASK (1 << HF_SMM_SHIFT) |
872929aa FB |
175 | #define HF_SVME_MASK (1 << HF_SVME_SHIFT) |
176 | #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) | |
a2397807 | 177 | #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) |
a9321a4d | 178 | #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) |
2c0262af | 179 | |
db620f46 FB |
180 | /* hflags2 */ |
181 | ||
182 | #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ | |
183 | #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ | |
184 | #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ | |
185 | #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ | |
186 | ||
187 | #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) | |
188 | #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) | |
189 | #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) | |
190 | #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) | |
191 | ||
0650f1ab AL |
192 | #define CR0_PE_SHIFT 0 |
193 | #define CR0_MP_SHIFT 1 | |
194 | ||
2c0262af | 195 | #define CR0_PE_MASK (1 << 0) |
7eee2a50 FB |
196 | #define CR0_MP_MASK (1 << 1) |
197 | #define CR0_EM_MASK (1 << 2) | |
2c0262af | 198 | #define CR0_TS_MASK (1 << 3) |
2ee73ac3 | 199 | #define CR0_ET_MASK (1 << 4) |
7eee2a50 | 200 | #define CR0_NE_MASK (1 << 5) |
2c0262af FB |
201 | #define CR0_WP_MASK (1 << 16) |
202 | #define CR0_AM_MASK (1 << 18) | |
203 | #define CR0_PG_MASK (1 << 31) | |
204 | ||
205 | #define CR4_VME_MASK (1 << 0) | |
206 | #define CR4_PVI_MASK (1 << 1) | |
207 | #define CR4_TSD_MASK (1 << 2) | |
208 | #define CR4_DE_MASK (1 << 3) | |
209 | #define CR4_PSE_MASK (1 << 4) | |
64a595f2 | 210 | #define CR4_PAE_MASK (1 << 5) |
79c4f6b0 | 211 | #define CR4_MCE_MASK (1 << 6) |
64a595f2 | 212 | #define CR4_PGE_MASK (1 << 7) |
14ce26e7 | 213 | #define CR4_PCE_MASK (1 << 8) |
0650f1ab AL |
214 | #define CR4_OSFXSR_SHIFT 9 |
215 | #define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT) | |
14ce26e7 | 216 | #define CR4_OSXMMEXCPT_MASK (1 << 10) |
a9321a4d PA |
217 | #define CR4_VMXE_MASK (1 << 13) |
218 | #define CR4_SMXE_MASK (1 << 14) | |
219 | #define CR4_FSGSBASE_MASK (1 << 16) | |
220 | #define CR4_PCIDE_MASK (1 << 17) | |
221 | #define CR4_OSXSAVE_MASK (1 << 18) | |
222 | #define CR4_SMEP_MASK (1 << 20) | |
223 | #define CR4_SMAP_MASK (1 << 21) | |
2c0262af | 224 | |
01df040b AL |
225 | #define DR6_BD (1 << 13) |
226 | #define DR6_BS (1 << 14) | |
227 | #define DR6_BT (1 << 15) | |
228 | #define DR6_FIXED_1 0xffff0ff0 | |
229 | ||
230 | #define DR7_GD (1 << 13) | |
231 | #define DR7_TYPE_SHIFT 16 | |
232 | #define DR7_LEN_SHIFT 18 | |
233 | #define DR7_FIXED_1 0x00000400 | |
234 | ||
2c0262af FB |
235 | #define PG_PRESENT_BIT 0 |
236 | #define PG_RW_BIT 1 | |
237 | #define PG_USER_BIT 2 | |
238 | #define PG_PWT_BIT 3 | |
239 | #define PG_PCD_BIT 4 | |
240 | #define PG_ACCESSED_BIT 5 | |
241 | #define PG_DIRTY_BIT 6 | |
242 | #define PG_PSE_BIT 7 | |
243 | #define PG_GLOBAL_BIT 8 | |
5cf38396 | 244 | #define PG_NX_BIT 63 |
2c0262af FB |
245 | |
246 | #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) | |
247 | #define PG_RW_MASK (1 << PG_RW_BIT) | |
248 | #define PG_USER_MASK (1 << PG_USER_BIT) | |
249 | #define PG_PWT_MASK (1 << PG_PWT_BIT) | |
250 | #define PG_PCD_MASK (1 << PG_PCD_BIT) | |
251 | #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) | |
252 | #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) | |
253 | #define PG_PSE_MASK (1 << PG_PSE_BIT) | |
254 | #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) | |
3f2cbf0d | 255 | #define PG_HI_USER_MASK 0x7ff0000000000000LL |
5cf38396 | 256 | #define PG_NX_MASK (1LL << PG_NX_BIT) |
2c0262af FB |
257 | |
258 | #define PG_ERROR_W_BIT 1 | |
259 | ||
260 | #define PG_ERROR_P_MASK 0x01 | |
261 | #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) | |
262 | #define PG_ERROR_U_MASK 0x04 | |
263 | #define PG_ERROR_RSVD_MASK 0x08 | |
5cf38396 | 264 | #define PG_ERROR_I_D_MASK 0x10 |
2c0262af | 265 | |
c0532a76 MT |
266 | #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ |
267 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | |
79c4f6b0 | 268 | |
c0532a76 | 269 | #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) |
79c4f6b0 HY |
270 | #define MCE_BANKS_DEF 10 |
271 | ||
c0532a76 MT |
272 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ |
273 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | |
e6a0575e | 274 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ |
79c4f6b0 | 275 | |
e6a0575e AL |
276 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ |
277 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | |
278 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | |
c0532a76 MT |
279 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ |
280 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | |
281 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | |
282 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | |
283 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | |
284 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | |
285 | ||
286 | /* MISC register defines */ | |
287 | #define MCM_ADDR_SEGOFF 0 /* segment offset */ | |
288 | #define MCM_ADDR_LINEAR 1 /* linear address */ | |
289 | #define MCM_ADDR_PHYS 2 /* physical address */ | |
290 | #define MCM_ADDR_MEM 3 /* memory address */ | |
291 | #define MCM_ADDR_GENERIC 7 /* generic */ | |
79c4f6b0 | 292 | |
0650f1ab | 293 | #define MSR_IA32_TSC 0x10 |
2c0262af FB |
294 | #define MSR_IA32_APICBASE 0x1b |
295 | #define MSR_IA32_APICBASE_BSP (1<<8) | |
296 | #define MSR_IA32_APICBASE_ENABLE (1<<11) | |
297 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12) | |
f28558d3 | 298 | #define MSR_TSC_ADJUST 0x0000003b |
aa82ba54 | 299 | #define MSR_IA32_TSCDEADLINE 0x6e0 |
2c0262af | 300 | |
dd5e3b17 AL |
301 | #define MSR_MTRRcap 0xfe |
302 | #define MSR_MTRRcap_VCNT 8 | |
303 | #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) | |
304 | #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) | |
305 | ||
2c0262af FB |
306 | #define MSR_IA32_SYSENTER_CS 0x174 |
307 | #define MSR_IA32_SYSENTER_ESP 0x175 | |
308 | #define MSR_IA32_SYSENTER_EIP 0x176 | |
309 | ||
8f091a59 FB |
310 | #define MSR_MCG_CAP 0x179 |
311 | #define MSR_MCG_STATUS 0x17a | |
312 | #define MSR_MCG_CTL 0x17b | |
313 | ||
e737b32a AZ |
314 | #define MSR_IA32_PERF_STATUS 0x198 |
315 | ||
21e87c46 AK |
316 | #define MSR_IA32_MISC_ENABLE 0x1a0 |
317 | /* Indicates good rep/movs microcode on some processors: */ | |
318 | #define MSR_IA32_MISC_ENABLE_DEFAULT 1 | |
319 | ||
165d9b82 AL |
320 | #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) |
321 | #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) | |
322 | ||
323 | #define MSR_MTRRfix64K_00000 0x250 | |
324 | #define MSR_MTRRfix16K_80000 0x258 | |
325 | #define MSR_MTRRfix16K_A0000 0x259 | |
326 | #define MSR_MTRRfix4K_C0000 0x268 | |
327 | #define MSR_MTRRfix4K_C8000 0x269 | |
328 | #define MSR_MTRRfix4K_D0000 0x26a | |
329 | #define MSR_MTRRfix4K_D8000 0x26b | |
330 | #define MSR_MTRRfix4K_E0000 0x26c | |
331 | #define MSR_MTRRfix4K_E8000 0x26d | |
332 | #define MSR_MTRRfix4K_F0000 0x26e | |
333 | #define MSR_MTRRfix4K_F8000 0x26f | |
334 | ||
8f091a59 FB |
335 | #define MSR_PAT 0x277 |
336 | ||
165d9b82 AL |
337 | #define MSR_MTRRdefType 0x2ff |
338 | ||
79c4f6b0 HY |
339 | #define MSR_MC0_CTL 0x400 |
340 | #define MSR_MC0_STATUS 0x401 | |
341 | #define MSR_MC0_ADDR 0x402 | |
342 | #define MSR_MC0_MISC 0x403 | |
343 | ||
14ce26e7 FB |
344 | #define MSR_EFER 0xc0000080 |
345 | ||
346 | #define MSR_EFER_SCE (1 << 0) | |
347 | #define MSR_EFER_LME (1 << 8) | |
348 | #define MSR_EFER_LMA (1 << 10) | |
349 | #define MSR_EFER_NXE (1 << 11) | |
872929aa | 350 | #define MSR_EFER_SVME (1 << 12) |
14ce26e7 FB |
351 | #define MSR_EFER_FFXSR (1 << 14) |
352 | ||
353 | #define MSR_STAR 0xc0000081 | |
354 | #define MSR_LSTAR 0xc0000082 | |
355 | #define MSR_CSTAR 0xc0000083 | |
356 | #define MSR_FMASK 0xc0000084 | |
357 | #define MSR_FSBASE 0xc0000100 | |
358 | #define MSR_GSBASE 0xc0000101 | |
359 | #define MSR_KERNELGSBASE 0xc0000102 | |
1b050077 | 360 | #define MSR_TSC_AUX 0xc0000103 |
14ce26e7 | 361 | |
0573fbfc TS |
362 | #define MSR_VM_HSAVE_PA 0xc0010117 |
363 | ||
5ef57876 EH |
364 | /* CPUID feature words */ |
365 | typedef enum FeatureWord { | |
366 | FEAT_1_EDX, /* CPUID[1].EDX */ | |
367 | FEAT_1_ECX, /* CPUID[1].ECX */ | |
368 | FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ | |
369 | FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ | |
370 | FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ | |
371 | FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ | |
372 | FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ | |
373 | FEAT_SVM, /* CPUID[8000_000A].EDX */ | |
374 | FEATURE_WORDS, | |
375 | } FeatureWord; | |
376 | ||
377 | typedef uint32_t FeatureWordArray[FEATURE_WORDS]; | |
378 | ||
14ce26e7 FB |
379 | /* cpuid_features bits */ |
380 | #define CPUID_FP87 (1 << 0) | |
381 | #define CPUID_VME (1 << 1) | |
382 | #define CPUID_DE (1 << 2) | |
383 | #define CPUID_PSE (1 << 3) | |
384 | #define CPUID_TSC (1 << 4) | |
385 | #define CPUID_MSR (1 << 5) | |
386 | #define CPUID_PAE (1 << 6) | |
387 | #define CPUID_MCE (1 << 7) | |
388 | #define CPUID_CX8 (1 << 8) | |
389 | #define CPUID_APIC (1 << 9) | |
390 | #define CPUID_SEP (1 << 11) /* sysenter/sysexit */ | |
391 | #define CPUID_MTRR (1 << 12) | |
392 | #define CPUID_PGE (1 << 13) | |
393 | #define CPUID_MCA (1 << 14) | |
394 | #define CPUID_CMOV (1 << 15) | |
8f091a59 | 395 | #define CPUID_PAT (1 << 16) |
8988ae89 | 396 | #define CPUID_PSE36 (1 << 17) |
a049de61 | 397 | #define CPUID_PN (1 << 18) |
8f091a59 | 398 | #define CPUID_CLFLUSH (1 << 19) |
a049de61 FB |
399 | #define CPUID_DTS (1 << 21) |
400 | #define CPUID_ACPI (1 << 22) | |
14ce26e7 FB |
401 | #define CPUID_MMX (1 << 23) |
402 | #define CPUID_FXSR (1 << 24) | |
403 | #define CPUID_SSE (1 << 25) | |
404 | #define CPUID_SSE2 (1 << 26) | |
a049de61 FB |
405 | #define CPUID_SS (1 << 27) |
406 | #define CPUID_HT (1 << 28) | |
407 | #define CPUID_TM (1 << 29) | |
408 | #define CPUID_IA64 (1 << 30) | |
409 | #define CPUID_PBE (1 << 31) | |
14ce26e7 | 410 | |
465e9838 | 411 | #define CPUID_EXT_SSE3 (1 << 0) |
a75b0818 | 412 | #define CPUID_EXT_PCLMULQDQ (1 << 1) |
558fa836 | 413 | #define CPUID_EXT_DTES64 (1 << 2) |
9df217a3 | 414 | #define CPUID_EXT_MONITOR (1 << 3) |
a049de61 FB |
415 | #define CPUID_EXT_DSCPL (1 << 4) |
416 | #define CPUID_EXT_VMX (1 << 5) | |
417 | #define CPUID_EXT_SMX (1 << 6) | |
418 | #define CPUID_EXT_EST (1 << 7) | |
419 | #define CPUID_EXT_TM2 (1 << 8) | |
420 | #define CPUID_EXT_SSSE3 (1 << 9) | |
421 | #define CPUID_EXT_CID (1 << 10) | |
c8acc380 | 422 | #define CPUID_EXT_FMA (1 << 12) |
9df217a3 | 423 | #define CPUID_EXT_CX16 (1 << 13) |
a049de61 | 424 | #define CPUID_EXT_XTPR (1 << 14) |
558fa836 | 425 | #define CPUID_EXT_PDCM (1 << 15) |
c8acc380 | 426 | #define CPUID_EXT_PCID (1 << 17) |
558fa836 PB |
427 | #define CPUID_EXT_DCA (1 << 18) |
428 | #define CPUID_EXT_SSE41 (1 << 19) | |
429 | #define CPUID_EXT_SSE42 (1 << 20) | |
430 | #define CPUID_EXT_X2APIC (1 << 21) | |
431 | #define CPUID_EXT_MOVBE (1 << 22) | |
432 | #define CPUID_EXT_POPCNT (1 << 23) | |
a75b3e0f | 433 | #define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24) |
a75b0818 | 434 | #define CPUID_EXT_AES (1 << 25) |
558fa836 PB |
435 | #define CPUID_EXT_XSAVE (1 << 26) |
436 | #define CPUID_EXT_OSXSAVE (1 << 27) | |
a75b0818 | 437 | #define CPUID_EXT_AVX (1 << 28) |
c8acc380 AP |
438 | #define CPUID_EXT_F16C (1 << 29) |
439 | #define CPUID_EXT_RDRAND (1 << 30) | |
6c0d7ee8 | 440 | #define CPUID_EXT_HYPERVISOR (1 << 31) |
9df217a3 | 441 | |
a75b0818 | 442 | #define CPUID_EXT2_FPU (1 << 0) |
8fad4b44 | 443 | #define CPUID_EXT2_VME (1 << 1) |
a75b0818 EH |
444 | #define CPUID_EXT2_DE (1 << 2) |
445 | #define CPUID_EXT2_PSE (1 << 3) | |
446 | #define CPUID_EXT2_TSC (1 << 4) | |
447 | #define CPUID_EXT2_MSR (1 << 5) | |
448 | #define CPUID_EXT2_PAE (1 << 6) | |
449 | #define CPUID_EXT2_MCE (1 << 7) | |
450 | #define CPUID_EXT2_CX8 (1 << 8) | |
451 | #define CPUID_EXT2_APIC (1 << 9) | |
9df217a3 | 452 | #define CPUID_EXT2_SYSCALL (1 << 11) |
a75b0818 EH |
453 | #define CPUID_EXT2_MTRR (1 << 12) |
454 | #define CPUID_EXT2_PGE (1 << 13) | |
455 | #define CPUID_EXT2_MCA (1 << 14) | |
456 | #define CPUID_EXT2_CMOV (1 << 15) | |
457 | #define CPUID_EXT2_PAT (1 << 16) | |
458 | #define CPUID_EXT2_PSE36 (1 << 17) | |
a049de61 | 459 | #define CPUID_EXT2_MP (1 << 19) |
9df217a3 | 460 | #define CPUID_EXT2_NX (1 << 20) |
a049de61 | 461 | #define CPUID_EXT2_MMXEXT (1 << 22) |
a75b0818 EH |
462 | #define CPUID_EXT2_MMX (1 << 23) |
463 | #define CPUID_EXT2_FXSR (1 << 24) | |
8d9bfc2b | 464 | #define CPUID_EXT2_FFXSR (1 << 25) |
a049de61 FB |
465 | #define CPUID_EXT2_PDPE1GB (1 << 26) |
466 | #define CPUID_EXT2_RDTSCP (1 << 27) | |
9df217a3 | 467 | #define CPUID_EXT2_LM (1 << 29) |
a049de61 FB |
468 | #define CPUID_EXT2_3DNOWEXT (1 << 30) |
469 | #define CPUID_EXT2_3DNOW (1 << 31) | |
9df217a3 | 470 | |
8fad4b44 EH |
471 | /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ |
472 | #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ | |
473 | CPUID_EXT2_DE | CPUID_EXT2_PSE | \ | |
474 | CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ | |
475 | CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ | |
476 | CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ | |
477 | CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ | |
478 | CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ | |
479 | CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ | |
480 | CPUID_EXT2_MMX | CPUID_EXT2_FXSR) | |
481 | ||
a049de61 FB |
482 | #define CPUID_EXT3_LAHF_LM (1 << 0) |
483 | #define CPUID_EXT3_CMP_LEG (1 << 1) | |
0573fbfc | 484 | #define CPUID_EXT3_SVM (1 << 2) |
a049de61 FB |
485 | #define CPUID_EXT3_EXTAPIC (1 << 3) |
486 | #define CPUID_EXT3_CR8LEG (1 << 4) | |
487 | #define CPUID_EXT3_ABM (1 << 5) | |
488 | #define CPUID_EXT3_SSE4A (1 << 6) | |
489 | #define CPUID_EXT3_MISALIGNSSE (1 << 7) | |
490 | #define CPUID_EXT3_3DNOWPREFETCH (1 << 8) | |
491 | #define CPUID_EXT3_OSVW (1 << 9) | |
492 | #define CPUID_EXT3_IBS (1 << 10) | |
a75b0818 | 493 | #define CPUID_EXT3_XOP (1 << 11) |
872929aa | 494 | #define CPUID_EXT3_SKINIT (1 << 12) |
c8acc380 AP |
495 | #define CPUID_EXT3_WDT (1 << 13) |
496 | #define CPUID_EXT3_LWP (1 << 15) | |
a75b0818 | 497 | #define CPUID_EXT3_FMA4 (1 << 16) |
c8acc380 AP |
498 | #define CPUID_EXT3_TCE (1 << 17) |
499 | #define CPUID_EXT3_NODEID (1 << 19) | |
500 | #define CPUID_EXT3_TBM (1 << 21) | |
501 | #define CPUID_EXT3_TOPOEXT (1 << 22) | |
502 | #define CPUID_EXT3_PERFCORE (1 << 23) | |
503 | #define CPUID_EXT3_PERFNB (1 << 24) | |
0573fbfc | 504 | |
296acb64 JR |
505 | #define CPUID_SVM_NPT (1 << 0) |
506 | #define CPUID_SVM_LBRV (1 << 1) | |
507 | #define CPUID_SVM_SVMLOCK (1 << 2) | |
508 | #define CPUID_SVM_NRIPSAVE (1 << 3) | |
509 | #define CPUID_SVM_TSCSCALE (1 << 4) | |
510 | #define CPUID_SVM_VMCBCLEAN (1 << 5) | |
511 | #define CPUID_SVM_FLUSHASID (1 << 6) | |
512 | #define CPUID_SVM_DECODEASSIST (1 << 7) | |
513 | #define CPUID_SVM_PAUSEFILTER (1 << 10) | |
514 | #define CPUID_SVM_PFTHRESHOLD (1 << 12) | |
515 | ||
c8acc380 AP |
516 | #define CPUID_7_0_EBX_FSGSBASE (1 << 0) |
517 | #define CPUID_7_0_EBX_BMI1 (1 << 3) | |
518 | #define CPUID_7_0_EBX_HLE (1 << 4) | |
519 | #define CPUID_7_0_EBX_AVX2 (1 << 5) | |
a9321a4d | 520 | #define CPUID_7_0_EBX_SMEP (1 << 7) |
c8acc380 AP |
521 | #define CPUID_7_0_EBX_BMI2 (1 << 8) |
522 | #define CPUID_7_0_EBX_ERMS (1 << 9) | |
523 | #define CPUID_7_0_EBX_INVPCID (1 << 10) | |
524 | #define CPUID_7_0_EBX_RTM (1 << 11) | |
525 | #define CPUID_7_0_EBX_RDSEED (1 << 18) | |
526 | #define CPUID_7_0_EBX_ADX (1 << 19) | |
a9321a4d PA |
527 | #define CPUID_7_0_EBX_SMAP (1 << 20) |
528 | ||
9df694ee IM |
529 | #define CPUID_VENDOR_SZ 12 |
530 | ||
c5096daf AZ |
531 | #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ |
532 | #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ | |
533 | #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ | |
534 | ||
535 | #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ | |
b3baa152 | 536 | #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ |
c5096daf AZ |
537 | #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ |
538 | ||
b3baa152 BW |
539 | #define CPUID_VENDOR_VIA_1 0x746e6543 /* "Cent" */ |
540 | #define CPUID_VENDOR_VIA_2 0x48727561 /* "aurH" */ | |
541 | #define CPUID_VENDOR_VIA_3 0x736c7561 /* "auls" */ | |
542 | ||
e737b32a | 543 | #define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */ |
a876e289 | 544 | #define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */ |
e737b32a | 545 | |
2c0262af | 546 | #define EXCP00_DIVZ 0 |
01df040b | 547 | #define EXCP01_DB 1 |
2c0262af FB |
548 | #define EXCP02_NMI 2 |
549 | #define EXCP03_INT3 3 | |
550 | #define EXCP04_INTO 4 | |
551 | #define EXCP05_BOUND 5 | |
552 | #define EXCP06_ILLOP 6 | |
553 | #define EXCP07_PREX 7 | |
554 | #define EXCP08_DBLE 8 | |
555 | #define EXCP09_XERR 9 | |
556 | #define EXCP0A_TSS 10 | |
557 | #define EXCP0B_NOSEG 11 | |
558 | #define EXCP0C_STACK 12 | |
559 | #define EXCP0D_GPF 13 | |
560 | #define EXCP0E_PAGE 14 | |
561 | #define EXCP10_COPR 16 | |
562 | #define EXCP11_ALGN 17 | |
563 | #define EXCP12_MCHK 18 | |
564 | ||
d2fd1af7 FB |
565 | #define EXCP_SYSCALL 0x100 /* only happens in user only emulation |
566 | for syscall instruction */ | |
567 | ||
00a152b4 | 568 | /* i386-specific interrupt pending bits. */ |
5d62c43a | 569 | #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 |
00a152b4 | 570 | #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 |
85097db6 | 571 | #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 |
00a152b4 RH |
572 | #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 |
573 | #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 | |
574 | #define CPU_INTERRUPT_INIT CPU_INTERRUPT_TGT_INT_1 | |
575 | #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_2 | |
d362e757 | 576 | #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_3 |
00a152b4 RH |
577 | |
578 | ||
2c0262af FB |
579 | enum { |
580 | CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ | |
1235fc06 | 581 | CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ |
d36cd60e FB |
582 | |
583 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ | |
584 | CC_OP_MULW, | |
585 | CC_OP_MULL, | |
14ce26e7 | 586 | CC_OP_MULQ, |
2c0262af FB |
587 | |
588 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
589 | CC_OP_ADDW, | |
590 | CC_OP_ADDL, | |
14ce26e7 | 591 | CC_OP_ADDQ, |
2c0262af FB |
592 | |
593 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
594 | CC_OP_ADCW, | |
595 | CC_OP_ADCL, | |
14ce26e7 | 596 | CC_OP_ADCQ, |
2c0262af FB |
597 | |
598 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
599 | CC_OP_SUBW, | |
600 | CC_OP_SUBL, | |
14ce26e7 | 601 | CC_OP_SUBQ, |
2c0262af FB |
602 | |
603 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
604 | CC_OP_SBBW, | |
605 | CC_OP_SBBL, | |
14ce26e7 | 606 | CC_OP_SBBQ, |
2c0262af FB |
607 | |
608 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */ | |
609 | CC_OP_LOGICW, | |
610 | CC_OP_LOGICL, | |
14ce26e7 | 611 | CC_OP_LOGICQ, |
2c0262af FB |
612 | |
613 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | |
614 | CC_OP_INCW, | |
615 | CC_OP_INCL, | |
14ce26e7 | 616 | CC_OP_INCQ, |
2c0262af FB |
617 | |
618 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | |
619 | CC_OP_DECW, | |
620 | CC_OP_DECL, | |
14ce26e7 | 621 | CC_OP_DECQ, |
2c0262af | 622 | |
6b652794 | 623 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ |
2c0262af FB |
624 | CC_OP_SHLW, |
625 | CC_OP_SHLL, | |
14ce26e7 | 626 | CC_OP_SHLQ, |
2c0262af FB |
627 | |
628 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ | |
629 | CC_OP_SARW, | |
630 | CC_OP_SARL, | |
14ce26e7 | 631 | CC_OP_SARQ, |
2c0262af FB |
632 | |
633 | CC_OP_NB, | |
634 | }; | |
635 | ||
2c0262af FB |
636 | typedef struct SegmentCache { |
637 | uint32_t selector; | |
14ce26e7 | 638 | target_ulong base; |
2c0262af FB |
639 | uint32_t limit; |
640 | uint32_t flags; | |
641 | } SegmentCache; | |
642 | ||
826461bb | 643 | typedef union { |
664e0f19 FB |
644 | uint8_t _b[16]; |
645 | uint16_t _w[8]; | |
646 | uint32_t _l[4]; | |
647 | uint64_t _q[2]; | |
7a0e1f41 FB |
648 | float32 _s[4]; |
649 | float64 _d[2]; | |
14ce26e7 FB |
650 | } XMMReg; |
651 | ||
826461bb FB |
652 | typedef union { |
653 | uint8_t _b[8]; | |
a35f3ec7 AJ |
654 | uint16_t _w[4]; |
655 | uint32_t _l[2]; | |
656 | float32 _s[2]; | |
826461bb FB |
657 | uint64_t q; |
658 | } MMXReg; | |
659 | ||
e2542fe2 | 660 | #ifdef HOST_WORDS_BIGENDIAN |
826461bb FB |
661 | #define XMM_B(n) _b[15 - (n)] |
662 | #define XMM_W(n) _w[7 - (n)] | |
663 | #define XMM_L(n) _l[3 - (n)] | |
664e0f19 | 664 | #define XMM_S(n) _s[3 - (n)] |
826461bb | 665 | #define XMM_Q(n) _q[1 - (n)] |
664e0f19 | 666 | #define XMM_D(n) _d[1 - (n)] |
826461bb FB |
667 | |
668 | #define MMX_B(n) _b[7 - (n)] | |
669 | #define MMX_W(n) _w[3 - (n)] | |
670 | #define MMX_L(n) _l[1 - (n)] | |
a35f3ec7 | 671 | #define MMX_S(n) _s[1 - (n)] |
826461bb FB |
672 | #else |
673 | #define XMM_B(n) _b[n] | |
674 | #define XMM_W(n) _w[n] | |
675 | #define XMM_L(n) _l[n] | |
664e0f19 | 676 | #define XMM_S(n) _s[n] |
826461bb | 677 | #define XMM_Q(n) _q[n] |
664e0f19 | 678 | #define XMM_D(n) _d[n] |
826461bb FB |
679 | |
680 | #define MMX_B(n) _b[n] | |
681 | #define MMX_W(n) _w[n] | |
682 | #define MMX_L(n) _l[n] | |
a35f3ec7 | 683 | #define MMX_S(n) _s[n] |
826461bb | 684 | #endif |
664e0f19 | 685 | #define MMX_Q(n) q |
826461bb | 686 | |
acc68836 | 687 | typedef union { |
c31da136 | 688 | floatx80 d __attribute__((aligned(16))); |
acc68836 JQ |
689 | MMXReg mmx; |
690 | } FPReg; | |
691 | ||
c1a54d57 JQ |
692 | typedef struct { |
693 | uint64_t base; | |
694 | uint64_t mask; | |
695 | } MTRRVar; | |
696 | ||
5f30fa18 JK |
697 | #define CPU_NB_REGS64 16 |
698 | #define CPU_NB_REGS32 8 | |
699 | ||
14ce26e7 | 700 | #ifdef TARGET_X86_64 |
5f30fa18 | 701 | #define CPU_NB_REGS CPU_NB_REGS64 |
14ce26e7 | 702 | #else |
5f30fa18 | 703 | #define CPU_NB_REGS CPU_NB_REGS32 |
14ce26e7 FB |
704 | #endif |
705 | ||
a9321a4d | 706 | #define NB_MMU_MODES 3 |
6ebbf390 | 707 | |
d362e757 JK |
708 | typedef enum TPRAccess { |
709 | TPR_ACCESS_READ, | |
710 | TPR_ACCESS_WRITE, | |
711 | } TPRAccess; | |
712 | ||
2c0262af FB |
713 | typedef struct CPUX86State { |
714 | /* standard registers */ | |
14ce26e7 FB |
715 | target_ulong regs[CPU_NB_REGS]; |
716 | target_ulong eip; | |
717 | target_ulong eflags; /* eflags register. During CPU emulation, CC | |
2c0262af FB |
718 | flags and DF are set to zero because they are |
719 | stored elsewhere */ | |
720 | ||
721 | /* emulator internal eflags handling */ | |
14ce26e7 FB |
722 | target_ulong cc_src; |
723 | target_ulong cc_dst; | |
2c0262af FB |
724 | uint32_t cc_op; |
725 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ | |
db620f46 FB |
726 | uint32_t hflags; /* TB flags, see HF_xxx constants. These flags |
727 | are known at translation time. */ | |
728 | uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ | |
2c0262af | 729 | |
9df217a3 FB |
730 | /* segments */ |
731 | SegmentCache segs[6]; /* selector values */ | |
732 | SegmentCache ldt; | |
733 | SegmentCache tr; | |
734 | SegmentCache gdt; /* only base and limit are used */ | |
735 | SegmentCache idt; /* only base and limit are used */ | |
736 | ||
db620f46 | 737 | target_ulong cr[5]; /* NOTE: cr1 is unused */ |
5ee0ffaa | 738 | int32_t a20_mask; |
9df217a3 | 739 | |
2c0262af FB |
740 | /* FPU state */ |
741 | unsigned int fpstt; /* top of stack index */ | |
67b8f419 | 742 | uint16_t fpus; |
eb831623 | 743 | uint16_t fpuc; |
2c0262af | 744 | uint8_t fptags[8]; /* 0 = valid, 1 = empty */ |
acc68836 | 745 | FPReg fpregs[8]; |
42cc8fa6 JK |
746 | /* KVM-only so far */ |
747 | uint16_t fpop; | |
748 | uint64_t fpip; | |
749 | uint64_t fpdp; | |
2c0262af FB |
750 | |
751 | /* emulator internal variables */ | |
7a0e1f41 | 752 | float_status fp_status; |
c31da136 | 753 | floatx80 ft0; |
3b46e624 | 754 | |
a35f3ec7 | 755 | float_status mmx_status; /* for 3DNow! float ops */ |
7a0e1f41 | 756 | float_status sse_status; |
664e0f19 | 757 | uint32_t mxcsr; |
14ce26e7 FB |
758 | XMMReg xmm_regs[CPU_NB_REGS]; |
759 | XMMReg xmm_t0; | |
664e0f19 | 760 | MMXReg mmx_t0; |
1e4840bf | 761 | target_ulong cc_tmp; /* temporary for rcr/rcl */ |
14ce26e7 | 762 | |
2c0262af FB |
763 | /* sysenter registers */ |
764 | uint32_t sysenter_cs; | |
2436b61a AZ |
765 | target_ulong sysenter_esp; |
766 | target_ulong sysenter_eip; | |
8d9bfc2b FB |
767 | uint64_t efer; |
768 | uint64_t star; | |
0573fbfc | 769 | |
5cc1d1e6 FB |
770 | uint64_t vm_hsave; |
771 | uint64_t vm_vmcb; | |
33c263df | 772 | uint64_t tsc_offset; |
0573fbfc TS |
773 | uint64_t intercept; |
774 | uint16_t intercept_cr_read; | |
775 | uint16_t intercept_cr_write; | |
776 | uint16_t intercept_dr_read; | |
777 | uint16_t intercept_dr_write; | |
778 | uint32_t intercept_exceptions; | |
db620f46 | 779 | uint8_t v_tpr; |
0573fbfc | 780 | |
14ce26e7 | 781 | #ifdef TARGET_X86_64 |
14ce26e7 FB |
782 | target_ulong lstar; |
783 | target_ulong cstar; | |
784 | target_ulong fmask; | |
785 | target_ulong kernelgsbase; | |
786 | #endif | |
1a03675d GC |
787 | uint64_t system_time_msr; |
788 | uint64_t wall_clock_msr; | |
f6584ee2 | 789 | uint64_t async_pf_en_msr; |
bc9a839d | 790 | uint64_t pv_eoi_en_msr; |
58fe2f10 | 791 | |
7ba1e619 | 792 | uint64_t tsc; |
f28558d3 | 793 | uint64_t tsc_adjust; |
aa82ba54 | 794 | uint64_t tsc_deadline; |
7ba1e619 | 795 | |
18559232 | 796 | uint64_t mcg_status; |
21e87c46 | 797 | uint64_t msr_ia32_misc_enable; |
18559232 | 798 | |
2c0262af | 799 | /* exception/interrupt handling */ |
2c0262af FB |
800 | int error_code; |
801 | int exception_is_int; | |
826461bb | 802 | target_ulong exception_next_eip; |
14ce26e7 | 803 | target_ulong dr[8]; /* debug registers */ |
01df040b AL |
804 | union { |
805 | CPUBreakpoint *cpu_breakpoint[4]; | |
806 | CPUWatchpoint *cpu_watchpoint[4]; | |
807 | }; /* break/watchpoints for dr[0..3] */ | |
3b21e03e | 808 | uint32_t smbase; |
678dde13 | 809 | int old_exception; /* exception in flight */ |
2c0262af | 810 | |
d8f771d9 JK |
811 | /* KVM states, automatically cleared on reset */ |
812 | uint8_t nmi_injected; | |
813 | uint8_t nmi_pending; | |
814 | ||
a316d335 | 815 | CPU_COMMON |
2c0262af | 816 | |
ebda377f JK |
817 | uint64_t pat; |
818 | ||
14ce26e7 | 819 | /* processor features (e.g. for CPUID insn) */ |
8d9bfc2b | 820 | uint32_t cpuid_level; |
14ce26e7 FB |
821 | uint32_t cpuid_vendor1; |
822 | uint32_t cpuid_vendor2; | |
823 | uint32_t cpuid_vendor3; | |
824 | uint32_t cpuid_version; | |
825 | uint32_t cpuid_features; | |
9df217a3 | 826 | uint32_t cpuid_ext_features; |
8d9bfc2b FB |
827 | uint32_t cpuid_xlevel; |
828 | uint32_t cpuid_model[12]; | |
829 | uint32_t cpuid_ext2_features; | |
0573fbfc | 830 | uint32_t cpuid_ext3_features; |
eae7629b | 831 | uint32_t cpuid_apic_id; |
ef768138 | 832 | int cpuid_vendor_override; |
b3baa152 BW |
833 | /* Store the results of Centaur's CPUID instructions */ |
834 | uint32_t cpuid_xlevel2; | |
835 | uint32_t cpuid_ext4_features; | |
13526728 | 836 | /* Flags from CPUID[EAX=7,ECX=0].EBX */ |
a9321a4d | 837 | uint32_t cpuid_7_0_ebx_features; |
3b46e624 | 838 | |
165d9b82 AL |
839 | /* MTRRs */ |
840 | uint64_t mtrr_fixed[11]; | |
841 | uint64_t mtrr_deftype; | |
c1a54d57 | 842 | MTRRVar mtrr_var[8]; |
165d9b82 | 843 | |
7ba1e619 | 844 | /* For KVM */ |
f8d926e9 | 845 | uint32_t mp_state; |
31827373 | 846 | int32_t exception_injected; |
0e607a80 | 847 | int32_t interrupt_injected; |
a0fb002c | 848 | uint8_t soft_interrupt; |
a0fb002c JK |
849 | uint8_t has_error_code; |
850 | uint32_t sipi_vector; | |
bb0300dc | 851 | uint32_t cpuid_kvm_features; |
296acb64 | 852 | uint32_t cpuid_svm_features; |
b8cc45d6 | 853 | bool tsc_valid; |
b862d1fe | 854 | int tsc_khz; |
fabacc0f JK |
855 | void *kvm_xsave_buf; |
856 | ||
14ce26e7 FB |
857 | /* in order to simplify APIC support, we leave this pointer to the |
858 | user */ | |
92a16d7a | 859 | struct DeviceState *apic_state; |
79c4f6b0 | 860 | |
ac6c4120 | 861 | uint64_t mcg_cap; |
ac6c4120 AF |
862 | uint64_t mcg_ctl; |
863 | uint64_t mce_banks[MCE_BANKS_DEF*4]; | |
1b050077 AP |
864 | |
865 | uint64_t tsc_aux; | |
5a2d0e57 AJ |
866 | |
867 | /* vmstate */ | |
868 | uint16_t fpus_vmstate; | |
869 | uint16_t fptag_vmstate; | |
870 | uint16_t fpregs_format_vmstate; | |
f1665b21 SY |
871 | |
872 | uint64_t xstate_bv; | |
873 | XMMReg ymmh_regs[CPU_NB_REGS]; | |
874 | ||
875 | uint64_t xcr0; | |
d362e757 JK |
876 | |
877 | TPRAccess tpr_access_type; | |
2c0262af FB |
878 | } CPUX86State; |
879 | ||
5fd2087a AF |
880 | #include "cpu-qom.h" |
881 | ||
b47ed996 | 882 | X86CPU *cpu_x86_init(const char *cpu_model); |
2c0262af | 883 | int cpu_x86_exec(CPUX86State *s); |
e916cbf8 | 884 | void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf); |
b5ec5ce0 | 885 | void x86_cpudef_setup(void); |
317ac620 | 886 | int cpu_x86_support_mca_broadcast(CPUX86State *env); |
b5ec5ce0 | 887 | |
d720b93d | 888 | int cpu_get_pic_interrupt(CPUX86State *s); |
2ee73ac3 FB |
889 | /* MSDOS compatibility mode FPU exception support */ |
890 | void cpu_set_ferr(CPUX86State *s); | |
2c0262af FB |
891 | |
892 | /* this function must always be used to load data in the segment | |
893 | cache: it synchronizes the hflags with the segment cache values */ | |
5fafdf24 | 894 | static inline void cpu_x86_load_seg_cache(CPUX86State *env, |
2c0262af | 895 | int seg_reg, unsigned int selector, |
8988ae89 | 896 | target_ulong base, |
5fafdf24 | 897 | unsigned int limit, |
2c0262af FB |
898 | unsigned int flags) |
899 | { | |
900 | SegmentCache *sc; | |
901 | unsigned int new_hflags; | |
3b46e624 | 902 | |
2c0262af FB |
903 | sc = &env->segs[seg_reg]; |
904 | sc->selector = selector; | |
905 | sc->base = base; | |
906 | sc->limit = limit; | |
907 | sc->flags = flags; | |
908 | ||
909 | /* update the hidden flags */ | |
14ce26e7 FB |
910 | { |
911 | if (seg_reg == R_CS) { | |
912 | #ifdef TARGET_X86_64 | |
913 | if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { | |
914 | /* long mode */ | |
915 | env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; | |
916 | env->hflags &= ~(HF_ADDSEG_MASK); | |
5fafdf24 | 917 | } else |
14ce26e7 FB |
918 | #endif |
919 | { | |
920 | /* legacy / compatibility case */ | |
921 | new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) | |
922 | >> (DESC_B_SHIFT - HF_CS32_SHIFT); | |
923 | env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | | |
924 | new_hflags; | |
925 | } | |
926 | } | |
927 | new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) | |
928 | >> (DESC_B_SHIFT - HF_SS32_SHIFT); | |
929 | if (env->hflags & HF_CS64_MASK) { | |
930 | /* zero base assumed for DS, ES and SS in long mode */ | |
5fafdf24 | 931 | } else if (!(env->cr[0] & CR0_PE_MASK) || |
735a8fd3 FB |
932 | (env->eflags & VM_MASK) || |
933 | !(env->hflags & HF_CS32_MASK)) { | |
14ce26e7 FB |
934 | /* XXX: try to avoid this test. The problem comes from the |
935 | fact that is real mode or vm86 mode we only modify the | |
936 | 'base' and 'selector' fields of the segment cache to go | |
937 | faster. A solution may be to force addseg to one in | |
938 | translate-i386.c. */ | |
939 | new_hflags |= HF_ADDSEG_MASK; | |
940 | } else { | |
5fafdf24 | 941 | new_hflags |= ((env->segs[R_DS].base | |
735a8fd3 | 942 | env->segs[R_ES].base | |
5fafdf24 | 943 | env->segs[R_SS].base) != 0) << |
14ce26e7 FB |
944 | HF_ADDSEG_SHIFT; |
945 | } | |
5fafdf24 | 946 | env->hflags = (env->hflags & |
14ce26e7 | 947 | ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; |
2c0262af | 948 | } |
2c0262af FB |
949 | } |
950 | ||
e9f9d6b1 | 951 | static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, |
0e26b7b8 BS |
952 | int sipi_vector) |
953 | { | |
e9f9d6b1 AF |
954 | CPUX86State *env = &cpu->env; |
955 | ||
0e26b7b8 BS |
956 | env->eip = 0; |
957 | cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, | |
958 | sipi_vector << 12, | |
959 | env->segs[R_CS].limit, | |
960 | env->segs[R_CS].flags); | |
961 | env->halted = 0; | |
962 | } | |
963 | ||
84273177 JK |
964 | int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, |
965 | target_ulong *base, unsigned int *limit, | |
966 | unsigned int *flags); | |
967 | ||
2c0262af FB |
968 | /* wrapper, just in case memory mappings must be changed */ |
969 | static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) | |
970 | { | |
971 | #if HF_CPL_MASK == 3 | |
972 | s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; | |
973 | #else | |
974 | #error HF_CPL_MASK is hardcoded | |
975 | #endif | |
976 | } | |
977 | ||
d9957a8b | 978 | /* op_helper.c */ |
1f1af9fd | 979 | /* used for debug or cpu save/restore */ |
c31da136 AJ |
980 | void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); |
981 | floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); | |
1f1af9fd | 982 | |
d9957a8b | 983 | /* cpu-exec.c */ |
2c0262af FB |
984 | /* the following helpers are only usable in user mode simulation as |
985 | they can trigger unexpected exceptions */ | |
986 | void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); | |
6f12a2a6 FB |
987 | void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); |
988 | void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); | |
2c0262af FB |
989 | |
990 | /* you can call this signal handler from your SIGBUS and SIGSEGV | |
991 | signal handlers to inform the virtual CPU of exceptions. non zero | |
992 | is returned if the signal was handled by the virtual CPU. */ | |
5fafdf24 | 993 | int cpu_x86_signal_handler(int host_signum, void *pinfo, |
2c0262af | 994 | void *puc); |
d9957a8b | 995 | |
c6dc6f63 AP |
996 | /* cpuid.c */ |
997 | void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, | |
998 | uint32_t *eax, uint32_t *ebx, | |
999 | uint32_t *ecx, uint32_t *edx); | |
61dcd775 | 1000 | int cpu_x86_register(X86CPU *cpu, const char *cpu_model); |
0e26b7b8 | 1001 | void cpu_clear_apic_feature(CPUX86State *env); |
bb44e0d1 JK |
1002 | void host_cpuid(uint32_t function, uint32_t count, |
1003 | uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); | |
c6dc6f63 | 1004 | |
d9957a8b BS |
1005 | /* helper.c */ |
1006 | int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, | |
97b348e7 | 1007 | int is_write, int mmu_idx); |
0b5c1ce8 | 1008 | #define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault |
461c0471 | 1009 | void cpu_x86_set_a20(CPUX86State *env, int a20_state); |
2c0262af | 1010 | |
d9957a8b BS |
1011 | static inline int hw_breakpoint_enabled(unsigned long dr7, int index) |
1012 | { | |
1013 | return (dr7 >> (index * 2)) & 3; | |
1014 | } | |
28ab0e2e | 1015 | |
d9957a8b BS |
1016 | static inline int hw_breakpoint_type(unsigned long dr7, int index) |
1017 | { | |
d46272c7 | 1018 | return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; |
d9957a8b BS |
1019 | } |
1020 | ||
1021 | static inline int hw_breakpoint_len(unsigned long dr7, int index) | |
1022 | { | |
d46272c7 | 1023 | int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); |
d9957a8b BS |
1024 | return (len == 2) ? 8 : len + 1; |
1025 | } | |
1026 | ||
1027 | void hw_breakpoint_insert(CPUX86State *env, int index); | |
1028 | void hw_breakpoint_remove(CPUX86State *env, int index); | |
1029 | int check_hw_breakpoints(CPUX86State *env, int force_dr6_update); | |
d65e9815 | 1030 | void breakpoint_handler(CPUX86State *env); |
d9957a8b BS |
1031 | |
1032 | /* will be suppressed */ | |
1033 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); | |
1034 | void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); | |
1035 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); | |
1036 | ||
d9957a8b BS |
1037 | /* hw/pc.c */ |
1038 | void cpu_smm_update(CPUX86State *env); | |
1039 | uint64_t cpu_get_tsc(CPUX86State *env); | |
6fd805e1 | 1040 | |
2c0262af | 1041 | #define TARGET_PAGE_BITS 12 |
9467d44c | 1042 | |
52705890 RH |
1043 | #ifdef TARGET_X86_64 |
1044 | #define TARGET_PHYS_ADDR_SPACE_BITS 52 | |
1045 | /* ??? This is really 48 bits, sign-extended, but the only thing | |
1046 | accessible to userland with bit 48 set is the VSYSCALL, and that | |
1047 | is handled via other mechanisms. */ | |
1048 | #define TARGET_VIRT_ADDR_SPACE_BITS 47 | |
1049 | #else | |
1050 | #define TARGET_PHYS_ADDR_SPACE_BITS 36 | |
1051 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | |
1052 | #endif | |
1053 | ||
b47ed996 AF |
1054 | static inline CPUX86State *cpu_init(const char *cpu_model) |
1055 | { | |
1056 | X86CPU *cpu = cpu_x86_init(cpu_model); | |
1057 | if (cpu == NULL) { | |
1058 | return NULL; | |
1059 | } | |
1060 | return &cpu->env; | |
1061 | } | |
1062 | ||
9467d44c TS |
1063 | #define cpu_exec cpu_x86_exec |
1064 | #define cpu_gen_code cpu_x86_gen_code | |
1065 | #define cpu_signal_handler cpu_x86_signal_handler | |
e916cbf8 | 1066 | #define cpu_list x86_cpu_list |
b5ec5ce0 | 1067 | #define cpudef_setup x86_cpudef_setup |
9467d44c | 1068 | |
38d2c27e | 1069 | #define CPU_SAVE_VERSION 12 |
b3c7724c | 1070 | |
6ebbf390 JM |
1071 | /* MMU modes definitions */ |
1072 | #define MMU_MODE0_SUFFIX _kernel | |
1073 | #define MMU_MODE1_SUFFIX _user | |
a9321a4d PA |
1074 | #define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */ |
1075 | #define MMU_KERNEL_IDX 0 | |
1076 | #define MMU_USER_IDX 1 | |
1077 | #define MMU_KSMAP_IDX 2 | |
317ac620 | 1078 | static inline int cpu_mmu_index (CPUX86State *env) |
6ebbf390 | 1079 | { |
a9321a4d PA |
1080 | return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : |
1081 | ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK)) | |
1082 | ? MMU_KSMAP_IDX : MMU_KERNEL_IDX; | |
6ebbf390 JM |
1083 | } |
1084 | ||
f081c76c BS |
1085 | #undef EAX |
1086 | #define EAX (env->regs[R_EAX]) | |
1087 | #undef ECX | |
1088 | #define ECX (env->regs[R_ECX]) | |
1089 | #undef EDX | |
1090 | #define EDX (env->regs[R_EDX]) | |
1091 | #undef EBX | |
1092 | #define EBX (env->regs[R_EBX]) | |
1093 | #undef ESP | |
1094 | #define ESP (env->regs[R_ESP]) | |
1095 | #undef EBP | |
1096 | #define EBP (env->regs[R_EBP]) | |
1097 | #undef ESI | |
1098 | #define ESI (env->regs[R_ESI]) | |
1099 | #undef EDI | |
1100 | #define EDI (env->regs[R_EDI]) | |
1101 | #undef EIP | |
1102 | #define EIP (env->eip) | |
1103 | #define DF (env->df) | |
1104 | ||
1105 | #define CC_SRC (env->cc_src) | |
1106 | #define CC_DST (env->cc_dst) | |
1107 | #define CC_OP (env->cc_op) | |
1108 | ||
5918fffb BS |
1109 | /* n must be a constant to be efficient */ |
1110 | static inline target_long lshift(target_long x, int n) | |
1111 | { | |
1112 | if (n >= 0) { | |
1113 | return x << n; | |
1114 | } else { | |
1115 | return x >> (-n); | |
1116 | } | |
1117 | } | |
1118 | ||
f081c76c BS |
1119 | /* float macros */ |
1120 | #define FT0 (env->ft0) | |
1121 | #define ST0 (env->fpregs[env->fpstt].d) | |
1122 | #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) | |
1123 | #define ST1 ST(1) | |
1124 | ||
d9957a8b | 1125 | /* translate.c */ |
26a5f13b FB |
1126 | void optimize_flags_init(void); |
1127 | ||
6e68e076 | 1128 | #if defined(CONFIG_USER_ONLY) |
317ac620 | 1129 | static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp) |
6e68e076 | 1130 | { |
f8ed7070 | 1131 | if (newsp) |
6e68e076 PB |
1132 | env->regs[R_ESP] = newsp; |
1133 | env->regs[R_EAX] = 0; | |
1134 | } | |
1135 | #endif | |
1136 | ||
022c62cb | 1137 | #include "exec/cpu-all.h" |
0573fbfc TS |
1138 | #include "svm.h" |
1139 | ||
0e26b7b8 BS |
1140 | #if !defined(CONFIG_USER_ONLY) |
1141 | #include "hw/apic.h" | |
1142 | #endif | |
1143 | ||
3993c6bd | 1144 | static inline bool cpu_has_work(CPUState *cpu) |
f081c76c | 1145 | { |
3993c6bd AF |
1146 | CPUX86State *env = &X86_CPU(cpu)->env; |
1147 | ||
5d62c43a JK |
1148 | return ((env->interrupt_request & (CPU_INTERRUPT_HARD | |
1149 | CPU_INTERRUPT_POLL)) && | |
f081c76c BS |
1150 | (env->eflags & IF_MASK)) || |
1151 | (env->interrupt_request & (CPU_INTERRUPT_NMI | | |
1152 | CPU_INTERRUPT_INIT | | |
1153 | CPU_INTERRUPT_SIPI | | |
1154 | CPU_INTERRUPT_MCE)); | |
1155 | } | |
1156 | ||
022c62cb | 1157 | #include "exec/exec-all.h" |
f081c76c | 1158 | |
317ac620 | 1159 | static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb) |
f081c76c BS |
1160 | { |
1161 | env->eip = tb->pc - tb->cs_base; | |
1162 | } | |
1163 | ||
317ac620 | 1164 | static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, |
6b917547 AL |
1165 | target_ulong *cs_base, int *flags) |
1166 | { | |
1167 | *cs_base = env->segs[R_CS].base; | |
1168 | *pc = *cs_base + env->eip; | |
a2397807 | 1169 | *flags = env->hflags | |
a9321a4d | 1170 | (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); |
6b917547 AL |
1171 | } |
1172 | ||
232fc23b AF |
1173 | void do_cpu_init(X86CPU *cpu); |
1174 | void do_cpu_sipi(X86CPU *cpu); | |
2fa11da0 | 1175 | |
747461c7 JK |
1176 | #define MCE_INJECT_BROADCAST 1 |
1177 | #define MCE_INJECT_UNCOND_AO 2 | |
1178 | ||
8c5cf3b6 | 1179 | void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, |
316378e4 | 1180 | uint64_t status, uint64_t mcg_status, uint64_t addr, |
747461c7 | 1181 | uint64_t misc, int flags); |
2fa11da0 | 1182 | |
599b9a5a | 1183 | /* excp_helper.c */ |
77b2bc2c BS |
1184 | void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); |
1185 | void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, | |
1186 | int error_code); | |
599b9a5a BS |
1187 | void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, |
1188 | int error_code, int next_eip_addend); | |
1189 | ||
5918fffb BS |
1190 | /* cc_helper.c */ |
1191 | extern const uint8_t parity_table[256]; | |
1192 | uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); | |
1193 | ||
1194 | static inline uint32_t cpu_compute_eflags(CPUX86State *env) | |
1195 | { | |
1196 | return env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK); | |
1197 | } | |
1198 | ||
1199 | /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ | |
1200 | static inline void cpu_load_eflags(CPUX86State *env, int eflags, | |
1201 | int update_mask) | |
1202 | { | |
1203 | CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | |
1204 | DF = 1 - (2 * ((eflags >> 10) & 1)); | |
1205 | env->eflags = (env->eflags & ~update_mask) | | |
1206 | (eflags & update_mask) | 0x2; | |
1207 | } | |
1208 | ||
1209 | /* load efer and update the corresponding hflags. XXX: do consistency | |
1210 | checks with cpuid bits? */ | |
1211 | static inline void cpu_load_efer(CPUX86State *env, uint64_t val) | |
1212 | { | |
1213 | env->efer = val; | |
1214 | env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); | |
1215 | if (env->efer & MSR_EFER_LMA) { | |
1216 | env->hflags |= HF_LMA_MASK; | |
1217 | } | |
1218 | if (env->efer & MSR_EFER_SVME) { | |
1219 | env->hflags |= HF_SVME_MASK; | |
1220 | } | |
1221 | } | |
1222 | ||
6bada5e8 BS |
1223 | /* svm_helper.c */ |
1224 | void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, | |
1225 | uint64_t param); | |
1226 | void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1); | |
1227 | ||
599b9a5a BS |
1228 | /* op_helper.c */ |
1229 | void do_interrupt(CPUX86State *env); | |
1230 | void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); | |
e694d4e2 | 1231 | |
317ac620 | 1232 | void do_smm_enter(CPUX86State *env1); |
e694d4e2 | 1233 | |
317ac620 | 1234 | void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); |
d362e757 | 1235 | |
dc59944b MT |
1236 | void enable_kvm_pv_eoi(void); |
1237 | ||
8b4beddc EH |
1238 | /* Return name of 32-bit register, from a R_* constant */ |
1239 | const char *get_register_name_32(unsigned int reg); | |
1240 | ||
2c0262af | 1241 | #endif /* CPU_I386_H */ |