]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * i386 virtual CPU header | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #ifndef CPU_I386_H | |
20 | #define CPU_I386_H | |
21 | ||
22 | #include "config.h" | |
23 | #include "qemu-common.h" | |
24 | ||
25 | #ifdef TARGET_X86_64 | |
26 | #define TARGET_LONG_BITS 64 | |
27 | #else | |
28 | #define TARGET_LONG_BITS 32 | |
29 | #endif | |
30 | ||
31 | /* target supports implicit self modifying code */ | |
32 | #define TARGET_HAS_SMC | |
33 | /* support for self modifying code even if the modified instruction is | |
34 | close to the modifying instruction */ | |
35 | #define TARGET_HAS_PRECISE_SMC | |
36 | ||
37 | #define TARGET_HAS_ICE 1 | |
38 | ||
39 | #ifdef TARGET_X86_64 | |
40 | #define ELF_MACHINE EM_X86_64 | |
41 | #else | |
42 | #define ELF_MACHINE EM_386 | |
43 | #endif | |
44 | ||
45 | #define CPUArchState struct CPUX86State | |
46 | ||
47 | #include "exec/cpu-defs.h" | |
48 | ||
49 | #include "fpu/softfloat.h" | |
50 | ||
51 | #define R_EAX 0 | |
52 | #define R_ECX 1 | |
53 | #define R_EDX 2 | |
54 | #define R_EBX 3 | |
55 | #define R_ESP 4 | |
56 | #define R_EBP 5 | |
57 | #define R_ESI 6 | |
58 | #define R_EDI 7 | |
59 | ||
60 | #define R_AL 0 | |
61 | #define R_CL 1 | |
62 | #define R_DL 2 | |
63 | #define R_BL 3 | |
64 | #define R_AH 4 | |
65 | #define R_CH 5 | |
66 | #define R_DH 6 | |
67 | #define R_BH 7 | |
68 | ||
69 | #define R_ES 0 | |
70 | #define R_CS 1 | |
71 | #define R_SS 2 | |
72 | #define R_DS 3 | |
73 | #define R_FS 4 | |
74 | #define R_GS 5 | |
75 | ||
76 | /* segment descriptor fields */ | |
77 | #define DESC_G_MASK (1 << 23) | |
78 | #define DESC_B_SHIFT 22 | |
79 | #define DESC_B_MASK (1 << DESC_B_SHIFT) | |
80 | #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ | |
81 | #define DESC_L_MASK (1 << DESC_L_SHIFT) | |
82 | #define DESC_AVL_MASK (1 << 20) | |
83 | #define DESC_P_MASK (1 << 15) | |
84 | #define DESC_DPL_SHIFT 13 | |
85 | #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) | |
86 | #define DESC_S_MASK (1 << 12) | |
87 | #define DESC_TYPE_SHIFT 8 | |
88 | #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) | |
89 | #define DESC_A_MASK (1 << 8) | |
90 | ||
91 | #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ | |
92 | #define DESC_C_MASK (1 << 10) /* code: conforming */ | |
93 | #define DESC_R_MASK (1 << 9) /* code: readable */ | |
94 | ||
95 | #define DESC_E_MASK (1 << 10) /* data: expansion direction */ | |
96 | #define DESC_W_MASK (1 << 9) /* data: writable */ | |
97 | ||
98 | #define DESC_TSS_BUSY_MASK (1 << 9) | |
99 | ||
100 | /* eflags masks */ | |
101 | #define CC_C 0x0001 | |
102 | #define CC_P 0x0004 | |
103 | #define CC_A 0x0010 | |
104 | #define CC_Z 0x0040 | |
105 | #define CC_S 0x0080 | |
106 | #define CC_O 0x0800 | |
107 | ||
108 | #define TF_SHIFT 8 | |
109 | #define IOPL_SHIFT 12 | |
110 | #define VM_SHIFT 17 | |
111 | ||
112 | #define TF_MASK 0x00000100 | |
113 | #define IF_MASK 0x00000200 | |
114 | #define DF_MASK 0x00000400 | |
115 | #define IOPL_MASK 0x00003000 | |
116 | #define NT_MASK 0x00004000 | |
117 | #define RF_MASK 0x00010000 | |
118 | #define VM_MASK 0x00020000 | |
119 | #define AC_MASK 0x00040000 | |
120 | #define VIF_MASK 0x00080000 | |
121 | #define VIP_MASK 0x00100000 | |
122 | #define ID_MASK 0x00200000 | |
123 | ||
124 | /* hidden flags - used internally by qemu to represent additional cpu | |
125 | states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not | |
126 | redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK | |
127 | bit positions to ease oring with eflags. */ | |
128 | /* current cpl */ | |
129 | #define HF_CPL_SHIFT 0 | |
130 | /* true if soft mmu is being used */ | |
131 | #define HF_SOFTMMU_SHIFT 2 | |
132 | /* true if hardware interrupts must be disabled for next instruction */ | |
133 | #define HF_INHIBIT_IRQ_SHIFT 3 | |
134 | /* 16 or 32 segments */ | |
135 | #define HF_CS32_SHIFT 4 | |
136 | #define HF_SS32_SHIFT 5 | |
137 | /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ | |
138 | #define HF_ADDSEG_SHIFT 6 | |
139 | /* copy of CR0.PE (protected mode) */ | |
140 | #define HF_PE_SHIFT 7 | |
141 | #define HF_TF_SHIFT 8 /* must be same as eflags */ | |
142 | #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ | |
143 | #define HF_EM_SHIFT 10 | |
144 | #define HF_TS_SHIFT 11 | |
145 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */ | |
146 | #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ | |
147 | #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ | |
148 | #define HF_RF_SHIFT 16 /* must be same as eflags */ | |
149 | #define HF_VM_SHIFT 17 /* must be same as eflags */ | |
150 | #define HF_AC_SHIFT 18 /* must be same as eflags */ | |
151 | #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ | |
152 | #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ | |
153 | #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ | |
154 | #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ | |
155 | #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ | |
156 | ||
157 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) | |
158 | #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) | |
159 | #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) | |
160 | #define HF_CS32_MASK (1 << HF_CS32_SHIFT) | |
161 | #define HF_SS32_MASK (1 << HF_SS32_SHIFT) | |
162 | #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) | |
163 | #define HF_PE_MASK (1 << HF_PE_SHIFT) | |
164 | #define HF_TF_MASK (1 << HF_TF_SHIFT) | |
165 | #define HF_MP_MASK (1 << HF_MP_SHIFT) | |
166 | #define HF_EM_MASK (1 << HF_EM_SHIFT) | |
167 | #define HF_TS_MASK (1 << HF_TS_SHIFT) | |
168 | #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) | |
169 | #define HF_LMA_MASK (1 << HF_LMA_SHIFT) | |
170 | #define HF_CS64_MASK (1 << HF_CS64_SHIFT) | |
171 | #define HF_RF_MASK (1 << HF_RF_SHIFT) | |
172 | #define HF_VM_MASK (1 << HF_VM_SHIFT) | |
173 | #define HF_AC_MASK (1 << HF_AC_SHIFT) | |
174 | #define HF_SMM_MASK (1 << HF_SMM_SHIFT) | |
175 | #define HF_SVME_MASK (1 << HF_SVME_SHIFT) | |
176 | #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) | |
177 | #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) | |
178 | #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) | |
179 | ||
180 | /* hflags2 */ | |
181 | ||
182 | #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ | |
183 | #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ | |
184 | #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ | |
185 | #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ | |
186 | ||
187 | #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) | |
188 | #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) | |
189 | #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) | |
190 | #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) | |
191 | ||
192 | #define CR0_PE_SHIFT 0 | |
193 | #define CR0_MP_SHIFT 1 | |
194 | ||
195 | #define CR0_PE_MASK (1 << 0) | |
196 | #define CR0_MP_MASK (1 << 1) | |
197 | #define CR0_EM_MASK (1 << 2) | |
198 | #define CR0_TS_MASK (1 << 3) | |
199 | #define CR0_ET_MASK (1 << 4) | |
200 | #define CR0_NE_MASK (1 << 5) | |
201 | #define CR0_WP_MASK (1 << 16) | |
202 | #define CR0_AM_MASK (1 << 18) | |
203 | #define CR0_PG_MASK (1 << 31) | |
204 | ||
205 | #define CR4_VME_MASK (1 << 0) | |
206 | #define CR4_PVI_MASK (1 << 1) | |
207 | #define CR4_TSD_MASK (1 << 2) | |
208 | #define CR4_DE_MASK (1 << 3) | |
209 | #define CR4_PSE_MASK (1 << 4) | |
210 | #define CR4_PAE_MASK (1 << 5) | |
211 | #define CR4_MCE_MASK (1 << 6) | |
212 | #define CR4_PGE_MASK (1 << 7) | |
213 | #define CR4_PCE_MASK (1 << 8) | |
214 | #define CR4_OSFXSR_SHIFT 9 | |
215 | #define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT) | |
216 | #define CR4_OSXMMEXCPT_MASK (1 << 10) | |
217 | #define CR4_VMXE_MASK (1 << 13) | |
218 | #define CR4_SMXE_MASK (1 << 14) | |
219 | #define CR4_FSGSBASE_MASK (1 << 16) | |
220 | #define CR4_PCIDE_MASK (1 << 17) | |
221 | #define CR4_OSXSAVE_MASK (1 << 18) | |
222 | #define CR4_SMEP_MASK (1 << 20) | |
223 | #define CR4_SMAP_MASK (1 << 21) | |
224 | ||
225 | #define DR6_BD (1 << 13) | |
226 | #define DR6_BS (1 << 14) | |
227 | #define DR6_BT (1 << 15) | |
228 | #define DR6_FIXED_1 0xffff0ff0 | |
229 | ||
230 | #define DR7_GD (1 << 13) | |
231 | #define DR7_TYPE_SHIFT 16 | |
232 | #define DR7_LEN_SHIFT 18 | |
233 | #define DR7_FIXED_1 0x00000400 | |
234 | ||
235 | #define PG_PRESENT_BIT 0 | |
236 | #define PG_RW_BIT 1 | |
237 | #define PG_USER_BIT 2 | |
238 | #define PG_PWT_BIT 3 | |
239 | #define PG_PCD_BIT 4 | |
240 | #define PG_ACCESSED_BIT 5 | |
241 | #define PG_DIRTY_BIT 6 | |
242 | #define PG_PSE_BIT 7 | |
243 | #define PG_GLOBAL_BIT 8 | |
244 | #define PG_NX_BIT 63 | |
245 | ||
246 | #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) | |
247 | #define PG_RW_MASK (1 << PG_RW_BIT) | |
248 | #define PG_USER_MASK (1 << PG_USER_BIT) | |
249 | #define PG_PWT_MASK (1 << PG_PWT_BIT) | |
250 | #define PG_PCD_MASK (1 << PG_PCD_BIT) | |
251 | #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) | |
252 | #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) | |
253 | #define PG_PSE_MASK (1 << PG_PSE_BIT) | |
254 | #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) | |
255 | #define PG_HI_USER_MASK 0x7ff0000000000000LL | |
256 | #define PG_NX_MASK (1LL << PG_NX_BIT) | |
257 | ||
258 | #define PG_ERROR_W_BIT 1 | |
259 | ||
260 | #define PG_ERROR_P_MASK 0x01 | |
261 | #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) | |
262 | #define PG_ERROR_U_MASK 0x04 | |
263 | #define PG_ERROR_RSVD_MASK 0x08 | |
264 | #define PG_ERROR_I_D_MASK 0x10 | |
265 | ||
266 | #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ | |
267 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | |
268 | ||
269 | #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) | |
270 | #define MCE_BANKS_DEF 10 | |
271 | ||
272 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | |
273 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | |
274 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | |
275 | ||
276 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | |
277 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | |
278 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | |
279 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | |
280 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | |
281 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | |
282 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | |
283 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | |
284 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | |
285 | ||
286 | /* MISC register defines */ | |
287 | #define MCM_ADDR_SEGOFF 0 /* segment offset */ | |
288 | #define MCM_ADDR_LINEAR 1 /* linear address */ | |
289 | #define MCM_ADDR_PHYS 2 /* physical address */ | |
290 | #define MCM_ADDR_MEM 3 /* memory address */ | |
291 | #define MCM_ADDR_GENERIC 7 /* generic */ | |
292 | ||
293 | #define MSR_IA32_TSC 0x10 | |
294 | #define MSR_IA32_APICBASE 0x1b | |
295 | #define MSR_IA32_APICBASE_BSP (1<<8) | |
296 | #define MSR_IA32_APICBASE_ENABLE (1<<11) | |
297 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12) | |
298 | #define MSR_TSC_ADJUST 0x0000003b | |
299 | #define MSR_IA32_TSCDEADLINE 0x6e0 | |
300 | ||
301 | #define MSR_MTRRcap 0xfe | |
302 | #define MSR_MTRRcap_VCNT 8 | |
303 | #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) | |
304 | #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) | |
305 | ||
306 | #define MSR_IA32_SYSENTER_CS 0x174 | |
307 | #define MSR_IA32_SYSENTER_ESP 0x175 | |
308 | #define MSR_IA32_SYSENTER_EIP 0x176 | |
309 | ||
310 | #define MSR_MCG_CAP 0x179 | |
311 | #define MSR_MCG_STATUS 0x17a | |
312 | #define MSR_MCG_CTL 0x17b | |
313 | ||
314 | #define MSR_IA32_PERF_STATUS 0x198 | |
315 | ||
316 | #define MSR_IA32_MISC_ENABLE 0x1a0 | |
317 | /* Indicates good rep/movs microcode on some processors: */ | |
318 | #define MSR_IA32_MISC_ENABLE_DEFAULT 1 | |
319 | ||
320 | #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) | |
321 | #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) | |
322 | ||
323 | #define MSR_MTRRfix64K_00000 0x250 | |
324 | #define MSR_MTRRfix16K_80000 0x258 | |
325 | #define MSR_MTRRfix16K_A0000 0x259 | |
326 | #define MSR_MTRRfix4K_C0000 0x268 | |
327 | #define MSR_MTRRfix4K_C8000 0x269 | |
328 | #define MSR_MTRRfix4K_D0000 0x26a | |
329 | #define MSR_MTRRfix4K_D8000 0x26b | |
330 | #define MSR_MTRRfix4K_E0000 0x26c | |
331 | #define MSR_MTRRfix4K_E8000 0x26d | |
332 | #define MSR_MTRRfix4K_F0000 0x26e | |
333 | #define MSR_MTRRfix4K_F8000 0x26f | |
334 | ||
335 | #define MSR_PAT 0x277 | |
336 | ||
337 | #define MSR_MTRRdefType 0x2ff | |
338 | ||
339 | #define MSR_MC0_CTL 0x400 | |
340 | #define MSR_MC0_STATUS 0x401 | |
341 | #define MSR_MC0_ADDR 0x402 | |
342 | #define MSR_MC0_MISC 0x403 | |
343 | ||
344 | #define MSR_EFER 0xc0000080 | |
345 | ||
346 | #define MSR_EFER_SCE (1 << 0) | |
347 | #define MSR_EFER_LME (1 << 8) | |
348 | #define MSR_EFER_LMA (1 << 10) | |
349 | #define MSR_EFER_NXE (1 << 11) | |
350 | #define MSR_EFER_SVME (1 << 12) | |
351 | #define MSR_EFER_FFXSR (1 << 14) | |
352 | ||
353 | #define MSR_STAR 0xc0000081 | |
354 | #define MSR_LSTAR 0xc0000082 | |
355 | #define MSR_CSTAR 0xc0000083 | |
356 | #define MSR_FMASK 0xc0000084 | |
357 | #define MSR_FSBASE 0xc0000100 | |
358 | #define MSR_GSBASE 0xc0000101 | |
359 | #define MSR_KERNELGSBASE 0xc0000102 | |
360 | #define MSR_TSC_AUX 0xc0000103 | |
361 | ||
362 | #define MSR_VM_HSAVE_PA 0xc0010117 | |
363 | ||
364 | /* CPUID feature words */ | |
365 | typedef enum FeatureWord { | |
366 | FEAT_1_EDX, /* CPUID[1].EDX */ | |
367 | FEAT_1_ECX, /* CPUID[1].ECX */ | |
368 | FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ | |
369 | FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ | |
370 | FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ | |
371 | FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ | |
372 | FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ | |
373 | FEAT_SVM, /* CPUID[8000_000A].EDX */ | |
374 | FEATURE_WORDS, | |
375 | } FeatureWord; | |
376 | ||
377 | typedef uint32_t FeatureWordArray[FEATURE_WORDS]; | |
378 | ||
379 | /* cpuid_features bits */ | |
380 | #define CPUID_FP87 (1 << 0) | |
381 | #define CPUID_VME (1 << 1) | |
382 | #define CPUID_DE (1 << 2) | |
383 | #define CPUID_PSE (1 << 3) | |
384 | #define CPUID_TSC (1 << 4) | |
385 | #define CPUID_MSR (1 << 5) | |
386 | #define CPUID_PAE (1 << 6) | |
387 | #define CPUID_MCE (1 << 7) | |
388 | #define CPUID_CX8 (1 << 8) | |
389 | #define CPUID_APIC (1 << 9) | |
390 | #define CPUID_SEP (1 << 11) /* sysenter/sysexit */ | |
391 | #define CPUID_MTRR (1 << 12) | |
392 | #define CPUID_PGE (1 << 13) | |
393 | #define CPUID_MCA (1 << 14) | |
394 | #define CPUID_CMOV (1 << 15) | |
395 | #define CPUID_PAT (1 << 16) | |
396 | #define CPUID_PSE36 (1 << 17) | |
397 | #define CPUID_PN (1 << 18) | |
398 | #define CPUID_CLFLUSH (1 << 19) | |
399 | #define CPUID_DTS (1 << 21) | |
400 | #define CPUID_ACPI (1 << 22) | |
401 | #define CPUID_MMX (1 << 23) | |
402 | #define CPUID_FXSR (1 << 24) | |
403 | #define CPUID_SSE (1 << 25) | |
404 | #define CPUID_SSE2 (1 << 26) | |
405 | #define CPUID_SS (1 << 27) | |
406 | #define CPUID_HT (1 << 28) | |
407 | #define CPUID_TM (1 << 29) | |
408 | #define CPUID_IA64 (1 << 30) | |
409 | #define CPUID_PBE (1 << 31) | |
410 | ||
411 | #define CPUID_EXT_SSE3 (1 << 0) | |
412 | #define CPUID_EXT_PCLMULQDQ (1 << 1) | |
413 | #define CPUID_EXT_DTES64 (1 << 2) | |
414 | #define CPUID_EXT_MONITOR (1 << 3) | |
415 | #define CPUID_EXT_DSCPL (1 << 4) | |
416 | #define CPUID_EXT_VMX (1 << 5) | |
417 | #define CPUID_EXT_SMX (1 << 6) | |
418 | #define CPUID_EXT_EST (1 << 7) | |
419 | #define CPUID_EXT_TM2 (1 << 8) | |
420 | #define CPUID_EXT_SSSE3 (1 << 9) | |
421 | #define CPUID_EXT_CID (1 << 10) | |
422 | #define CPUID_EXT_FMA (1 << 12) | |
423 | #define CPUID_EXT_CX16 (1 << 13) | |
424 | #define CPUID_EXT_XTPR (1 << 14) | |
425 | #define CPUID_EXT_PDCM (1 << 15) | |
426 | #define CPUID_EXT_PCID (1 << 17) | |
427 | #define CPUID_EXT_DCA (1 << 18) | |
428 | #define CPUID_EXT_SSE41 (1 << 19) | |
429 | #define CPUID_EXT_SSE42 (1 << 20) | |
430 | #define CPUID_EXT_X2APIC (1 << 21) | |
431 | #define CPUID_EXT_MOVBE (1 << 22) | |
432 | #define CPUID_EXT_POPCNT (1 << 23) | |
433 | #define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24) | |
434 | #define CPUID_EXT_AES (1 << 25) | |
435 | #define CPUID_EXT_XSAVE (1 << 26) | |
436 | #define CPUID_EXT_OSXSAVE (1 << 27) | |
437 | #define CPUID_EXT_AVX (1 << 28) | |
438 | #define CPUID_EXT_F16C (1 << 29) | |
439 | #define CPUID_EXT_RDRAND (1 << 30) | |
440 | #define CPUID_EXT_HYPERVISOR (1 << 31) | |
441 | ||
442 | #define CPUID_EXT2_FPU (1 << 0) | |
443 | #define CPUID_EXT2_VME (1 << 1) | |
444 | #define CPUID_EXT2_DE (1 << 2) | |
445 | #define CPUID_EXT2_PSE (1 << 3) | |
446 | #define CPUID_EXT2_TSC (1 << 4) | |
447 | #define CPUID_EXT2_MSR (1 << 5) | |
448 | #define CPUID_EXT2_PAE (1 << 6) | |
449 | #define CPUID_EXT2_MCE (1 << 7) | |
450 | #define CPUID_EXT2_CX8 (1 << 8) | |
451 | #define CPUID_EXT2_APIC (1 << 9) | |
452 | #define CPUID_EXT2_SYSCALL (1 << 11) | |
453 | #define CPUID_EXT2_MTRR (1 << 12) | |
454 | #define CPUID_EXT2_PGE (1 << 13) | |
455 | #define CPUID_EXT2_MCA (1 << 14) | |
456 | #define CPUID_EXT2_CMOV (1 << 15) | |
457 | #define CPUID_EXT2_PAT (1 << 16) | |
458 | #define CPUID_EXT2_PSE36 (1 << 17) | |
459 | #define CPUID_EXT2_MP (1 << 19) | |
460 | #define CPUID_EXT2_NX (1 << 20) | |
461 | #define CPUID_EXT2_MMXEXT (1 << 22) | |
462 | #define CPUID_EXT2_MMX (1 << 23) | |
463 | #define CPUID_EXT2_FXSR (1 << 24) | |
464 | #define CPUID_EXT2_FFXSR (1 << 25) | |
465 | #define CPUID_EXT2_PDPE1GB (1 << 26) | |
466 | #define CPUID_EXT2_RDTSCP (1 << 27) | |
467 | #define CPUID_EXT2_LM (1 << 29) | |
468 | #define CPUID_EXT2_3DNOWEXT (1 << 30) | |
469 | #define CPUID_EXT2_3DNOW (1 << 31) | |
470 | ||
471 | /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ | |
472 | #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ | |
473 | CPUID_EXT2_DE | CPUID_EXT2_PSE | \ | |
474 | CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ | |
475 | CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ | |
476 | CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ | |
477 | CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ | |
478 | CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ | |
479 | CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ | |
480 | CPUID_EXT2_MMX | CPUID_EXT2_FXSR) | |
481 | ||
482 | #define CPUID_EXT3_LAHF_LM (1 << 0) | |
483 | #define CPUID_EXT3_CMP_LEG (1 << 1) | |
484 | #define CPUID_EXT3_SVM (1 << 2) | |
485 | #define CPUID_EXT3_EXTAPIC (1 << 3) | |
486 | #define CPUID_EXT3_CR8LEG (1 << 4) | |
487 | #define CPUID_EXT3_ABM (1 << 5) | |
488 | #define CPUID_EXT3_SSE4A (1 << 6) | |
489 | #define CPUID_EXT3_MISALIGNSSE (1 << 7) | |
490 | #define CPUID_EXT3_3DNOWPREFETCH (1 << 8) | |
491 | #define CPUID_EXT3_OSVW (1 << 9) | |
492 | #define CPUID_EXT3_IBS (1 << 10) | |
493 | #define CPUID_EXT3_XOP (1 << 11) | |
494 | #define CPUID_EXT3_SKINIT (1 << 12) | |
495 | #define CPUID_EXT3_WDT (1 << 13) | |
496 | #define CPUID_EXT3_LWP (1 << 15) | |
497 | #define CPUID_EXT3_FMA4 (1 << 16) | |
498 | #define CPUID_EXT3_TCE (1 << 17) | |
499 | #define CPUID_EXT3_NODEID (1 << 19) | |
500 | #define CPUID_EXT3_TBM (1 << 21) | |
501 | #define CPUID_EXT3_TOPOEXT (1 << 22) | |
502 | #define CPUID_EXT3_PERFCORE (1 << 23) | |
503 | #define CPUID_EXT3_PERFNB (1 << 24) | |
504 | ||
505 | #define CPUID_SVM_NPT (1 << 0) | |
506 | #define CPUID_SVM_LBRV (1 << 1) | |
507 | #define CPUID_SVM_SVMLOCK (1 << 2) | |
508 | #define CPUID_SVM_NRIPSAVE (1 << 3) | |
509 | #define CPUID_SVM_TSCSCALE (1 << 4) | |
510 | #define CPUID_SVM_VMCBCLEAN (1 << 5) | |
511 | #define CPUID_SVM_FLUSHASID (1 << 6) | |
512 | #define CPUID_SVM_DECODEASSIST (1 << 7) | |
513 | #define CPUID_SVM_PAUSEFILTER (1 << 10) | |
514 | #define CPUID_SVM_PFTHRESHOLD (1 << 12) | |
515 | ||
516 | #define CPUID_7_0_EBX_FSGSBASE (1 << 0) | |
517 | #define CPUID_7_0_EBX_BMI1 (1 << 3) | |
518 | #define CPUID_7_0_EBX_HLE (1 << 4) | |
519 | #define CPUID_7_0_EBX_AVX2 (1 << 5) | |
520 | #define CPUID_7_0_EBX_SMEP (1 << 7) | |
521 | #define CPUID_7_0_EBX_BMI2 (1 << 8) | |
522 | #define CPUID_7_0_EBX_ERMS (1 << 9) | |
523 | #define CPUID_7_0_EBX_INVPCID (1 << 10) | |
524 | #define CPUID_7_0_EBX_RTM (1 << 11) | |
525 | #define CPUID_7_0_EBX_RDSEED (1 << 18) | |
526 | #define CPUID_7_0_EBX_ADX (1 << 19) | |
527 | #define CPUID_7_0_EBX_SMAP (1 << 20) | |
528 | ||
529 | #define CPUID_VENDOR_SZ 12 | |
530 | ||
531 | #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ | |
532 | #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ | |
533 | #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ | |
534 | ||
535 | #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ | |
536 | #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ | |
537 | #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ | |
538 | ||
539 | #define CPUID_VENDOR_VIA_1 0x746e6543 /* "Cent" */ | |
540 | #define CPUID_VENDOR_VIA_2 0x48727561 /* "aurH" */ | |
541 | #define CPUID_VENDOR_VIA_3 0x736c7561 /* "auls" */ | |
542 | ||
543 | #define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */ | |
544 | #define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */ | |
545 | ||
546 | #define EXCP00_DIVZ 0 | |
547 | #define EXCP01_DB 1 | |
548 | #define EXCP02_NMI 2 | |
549 | #define EXCP03_INT3 3 | |
550 | #define EXCP04_INTO 4 | |
551 | #define EXCP05_BOUND 5 | |
552 | #define EXCP06_ILLOP 6 | |
553 | #define EXCP07_PREX 7 | |
554 | #define EXCP08_DBLE 8 | |
555 | #define EXCP09_XERR 9 | |
556 | #define EXCP0A_TSS 10 | |
557 | #define EXCP0B_NOSEG 11 | |
558 | #define EXCP0C_STACK 12 | |
559 | #define EXCP0D_GPF 13 | |
560 | #define EXCP0E_PAGE 14 | |
561 | #define EXCP10_COPR 16 | |
562 | #define EXCP11_ALGN 17 | |
563 | #define EXCP12_MCHK 18 | |
564 | ||
565 | #define EXCP_SYSCALL 0x100 /* only happens in user only emulation | |
566 | for syscall instruction */ | |
567 | ||
568 | /* i386-specific interrupt pending bits. */ | |
569 | #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 | |
570 | #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 | |
571 | #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 | |
572 | #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 | |
573 | #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 | |
574 | #define CPU_INTERRUPT_INIT CPU_INTERRUPT_TGT_INT_1 | |
575 | #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_2 | |
576 | #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_3 | |
577 | ||
578 | ||
579 | enum { | |
580 | CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ | |
581 | CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ | |
582 | ||
583 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ | |
584 | CC_OP_MULW, | |
585 | CC_OP_MULL, | |
586 | CC_OP_MULQ, | |
587 | ||
588 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
589 | CC_OP_ADDW, | |
590 | CC_OP_ADDL, | |
591 | CC_OP_ADDQ, | |
592 | ||
593 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
594 | CC_OP_ADCW, | |
595 | CC_OP_ADCL, | |
596 | CC_OP_ADCQ, | |
597 | ||
598 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
599 | CC_OP_SUBW, | |
600 | CC_OP_SUBL, | |
601 | CC_OP_SUBQ, | |
602 | ||
603 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
604 | CC_OP_SBBW, | |
605 | CC_OP_SBBL, | |
606 | CC_OP_SBBQ, | |
607 | ||
608 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */ | |
609 | CC_OP_LOGICW, | |
610 | CC_OP_LOGICL, | |
611 | CC_OP_LOGICQ, | |
612 | ||
613 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | |
614 | CC_OP_INCW, | |
615 | CC_OP_INCL, | |
616 | CC_OP_INCQ, | |
617 | ||
618 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | |
619 | CC_OP_DECW, | |
620 | CC_OP_DECL, | |
621 | CC_OP_DECQ, | |
622 | ||
623 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ | |
624 | CC_OP_SHLW, | |
625 | CC_OP_SHLL, | |
626 | CC_OP_SHLQ, | |
627 | ||
628 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ | |
629 | CC_OP_SARW, | |
630 | CC_OP_SARL, | |
631 | CC_OP_SARQ, | |
632 | ||
633 | CC_OP_NB, | |
634 | }; | |
635 | ||
636 | typedef struct SegmentCache { | |
637 | uint32_t selector; | |
638 | target_ulong base; | |
639 | uint32_t limit; | |
640 | uint32_t flags; | |
641 | } SegmentCache; | |
642 | ||
643 | typedef union { | |
644 | uint8_t _b[16]; | |
645 | uint16_t _w[8]; | |
646 | uint32_t _l[4]; | |
647 | uint64_t _q[2]; | |
648 | float32 _s[4]; | |
649 | float64 _d[2]; | |
650 | } XMMReg; | |
651 | ||
652 | typedef union { | |
653 | uint8_t _b[8]; | |
654 | uint16_t _w[4]; | |
655 | uint32_t _l[2]; | |
656 | float32 _s[2]; | |
657 | uint64_t q; | |
658 | } MMXReg; | |
659 | ||
660 | #ifdef HOST_WORDS_BIGENDIAN | |
661 | #define XMM_B(n) _b[15 - (n)] | |
662 | #define XMM_W(n) _w[7 - (n)] | |
663 | #define XMM_L(n) _l[3 - (n)] | |
664 | #define XMM_S(n) _s[3 - (n)] | |
665 | #define XMM_Q(n) _q[1 - (n)] | |
666 | #define XMM_D(n) _d[1 - (n)] | |
667 | ||
668 | #define MMX_B(n) _b[7 - (n)] | |
669 | #define MMX_W(n) _w[3 - (n)] | |
670 | #define MMX_L(n) _l[1 - (n)] | |
671 | #define MMX_S(n) _s[1 - (n)] | |
672 | #else | |
673 | #define XMM_B(n) _b[n] | |
674 | #define XMM_W(n) _w[n] | |
675 | #define XMM_L(n) _l[n] | |
676 | #define XMM_S(n) _s[n] | |
677 | #define XMM_Q(n) _q[n] | |
678 | #define XMM_D(n) _d[n] | |
679 | ||
680 | #define MMX_B(n) _b[n] | |
681 | #define MMX_W(n) _w[n] | |
682 | #define MMX_L(n) _l[n] | |
683 | #define MMX_S(n) _s[n] | |
684 | #endif | |
685 | #define MMX_Q(n) q | |
686 | ||
687 | typedef union { | |
688 | floatx80 d __attribute__((aligned(16))); | |
689 | MMXReg mmx; | |
690 | } FPReg; | |
691 | ||
692 | typedef struct { | |
693 | uint64_t base; | |
694 | uint64_t mask; | |
695 | } MTRRVar; | |
696 | ||
697 | #define CPU_NB_REGS64 16 | |
698 | #define CPU_NB_REGS32 8 | |
699 | ||
700 | #ifdef TARGET_X86_64 | |
701 | #define CPU_NB_REGS CPU_NB_REGS64 | |
702 | #else | |
703 | #define CPU_NB_REGS CPU_NB_REGS32 | |
704 | #endif | |
705 | ||
706 | #define NB_MMU_MODES 3 | |
707 | ||
708 | typedef enum TPRAccess { | |
709 | TPR_ACCESS_READ, | |
710 | TPR_ACCESS_WRITE, | |
711 | } TPRAccess; | |
712 | ||
713 | typedef struct CPUX86State { | |
714 | /* standard registers */ | |
715 | target_ulong regs[CPU_NB_REGS]; | |
716 | target_ulong eip; | |
717 | target_ulong eflags; /* eflags register. During CPU emulation, CC | |
718 | flags and DF are set to zero because they are | |
719 | stored elsewhere */ | |
720 | ||
721 | /* emulator internal eflags handling */ | |
722 | target_ulong cc_src; | |
723 | target_ulong cc_dst; | |
724 | uint32_t cc_op; | |
725 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ | |
726 | uint32_t hflags; /* TB flags, see HF_xxx constants. These flags | |
727 | are known at translation time. */ | |
728 | uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ | |
729 | ||
730 | /* segments */ | |
731 | SegmentCache segs[6]; /* selector values */ | |
732 | SegmentCache ldt; | |
733 | SegmentCache tr; | |
734 | SegmentCache gdt; /* only base and limit are used */ | |
735 | SegmentCache idt; /* only base and limit are used */ | |
736 | ||
737 | target_ulong cr[5]; /* NOTE: cr1 is unused */ | |
738 | int32_t a20_mask; | |
739 | ||
740 | /* FPU state */ | |
741 | unsigned int fpstt; /* top of stack index */ | |
742 | uint16_t fpus; | |
743 | uint16_t fpuc; | |
744 | uint8_t fptags[8]; /* 0 = valid, 1 = empty */ | |
745 | FPReg fpregs[8]; | |
746 | /* KVM-only so far */ | |
747 | uint16_t fpop; | |
748 | uint64_t fpip; | |
749 | uint64_t fpdp; | |
750 | ||
751 | /* emulator internal variables */ | |
752 | float_status fp_status; | |
753 | floatx80 ft0; | |
754 | ||
755 | float_status mmx_status; /* for 3DNow! float ops */ | |
756 | float_status sse_status; | |
757 | uint32_t mxcsr; | |
758 | XMMReg xmm_regs[CPU_NB_REGS]; | |
759 | XMMReg xmm_t0; | |
760 | MMXReg mmx_t0; | |
761 | target_ulong cc_tmp; /* temporary for rcr/rcl */ | |
762 | ||
763 | /* sysenter registers */ | |
764 | uint32_t sysenter_cs; | |
765 | target_ulong sysenter_esp; | |
766 | target_ulong sysenter_eip; | |
767 | uint64_t efer; | |
768 | uint64_t star; | |
769 | ||
770 | uint64_t vm_hsave; | |
771 | uint64_t vm_vmcb; | |
772 | uint64_t tsc_offset; | |
773 | uint64_t intercept; | |
774 | uint16_t intercept_cr_read; | |
775 | uint16_t intercept_cr_write; | |
776 | uint16_t intercept_dr_read; | |
777 | uint16_t intercept_dr_write; | |
778 | uint32_t intercept_exceptions; | |
779 | uint8_t v_tpr; | |
780 | ||
781 | #ifdef TARGET_X86_64 | |
782 | target_ulong lstar; | |
783 | target_ulong cstar; | |
784 | target_ulong fmask; | |
785 | target_ulong kernelgsbase; | |
786 | #endif | |
787 | uint64_t system_time_msr; | |
788 | uint64_t wall_clock_msr; | |
789 | uint64_t async_pf_en_msr; | |
790 | uint64_t pv_eoi_en_msr; | |
791 | ||
792 | uint64_t tsc; | |
793 | uint64_t tsc_adjust; | |
794 | uint64_t tsc_deadline; | |
795 | ||
796 | uint64_t mcg_status; | |
797 | uint64_t msr_ia32_misc_enable; | |
798 | ||
799 | /* exception/interrupt handling */ | |
800 | int error_code; | |
801 | int exception_is_int; | |
802 | target_ulong exception_next_eip; | |
803 | target_ulong dr[8]; /* debug registers */ | |
804 | union { | |
805 | CPUBreakpoint *cpu_breakpoint[4]; | |
806 | CPUWatchpoint *cpu_watchpoint[4]; | |
807 | }; /* break/watchpoints for dr[0..3] */ | |
808 | uint32_t smbase; | |
809 | int old_exception; /* exception in flight */ | |
810 | ||
811 | /* KVM states, automatically cleared on reset */ | |
812 | uint8_t nmi_injected; | |
813 | uint8_t nmi_pending; | |
814 | ||
815 | CPU_COMMON | |
816 | ||
817 | uint64_t pat; | |
818 | ||
819 | /* processor features (e.g. for CPUID insn) */ | |
820 | uint32_t cpuid_level; | |
821 | uint32_t cpuid_vendor1; | |
822 | uint32_t cpuid_vendor2; | |
823 | uint32_t cpuid_vendor3; | |
824 | uint32_t cpuid_version; | |
825 | uint32_t cpuid_features; | |
826 | uint32_t cpuid_ext_features; | |
827 | uint32_t cpuid_xlevel; | |
828 | uint32_t cpuid_model[12]; | |
829 | uint32_t cpuid_ext2_features; | |
830 | uint32_t cpuid_ext3_features; | |
831 | uint32_t cpuid_apic_id; | |
832 | int cpuid_vendor_override; | |
833 | /* Store the results of Centaur's CPUID instructions */ | |
834 | uint32_t cpuid_xlevel2; | |
835 | uint32_t cpuid_ext4_features; | |
836 | /* Flags from CPUID[EAX=7,ECX=0].EBX */ | |
837 | uint32_t cpuid_7_0_ebx_features; | |
838 | ||
839 | /* MTRRs */ | |
840 | uint64_t mtrr_fixed[11]; | |
841 | uint64_t mtrr_deftype; | |
842 | MTRRVar mtrr_var[8]; | |
843 | ||
844 | /* For KVM */ | |
845 | uint32_t mp_state; | |
846 | int32_t exception_injected; | |
847 | int32_t interrupt_injected; | |
848 | uint8_t soft_interrupt; | |
849 | uint8_t has_error_code; | |
850 | uint32_t sipi_vector; | |
851 | uint32_t cpuid_kvm_features; | |
852 | uint32_t cpuid_svm_features; | |
853 | bool tsc_valid; | |
854 | int tsc_khz; | |
855 | void *kvm_xsave_buf; | |
856 | ||
857 | /* in order to simplify APIC support, we leave this pointer to the | |
858 | user */ | |
859 | struct DeviceState *apic_state; | |
860 | ||
861 | uint64_t mcg_cap; | |
862 | uint64_t mcg_ctl; | |
863 | uint64_t mce_banks[MCE_BANKS_DEF*4]; | |
864 | ||
865 | uint64_t tsc_aux; | |
866 | ||
867 | /* vmstate */ | |
868 | uint16_t fpus_vmstate; | |
869 | uint16_t fptag_vmstate; | |
870 | uint16_t fpregs_format_vmstate; | |
871 | ||
872 | uint64_t xstate_bv; | |
873 | XMMReg ymmh_regs[CPU_NB_REGS]; | |
874 | ||
875 | uint64_t xcr0; | |
876 | ||
877 | TPRAccess tpr_access_type; | |
878 | } CPUX86State; | |
879 | ||
880 | #include "cpu-qom.h" | |
881 | ||
882 | X86CPU *cpu_x86_init(const char *cpu_model); | |
883 | int cpu_x86_exec(CPUX86State *s); | |
884 | void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf); | |
885 | void x86_cpudef_setup(void); | |
886 | int cpu_x86_support_mca_broadcast(CPUX86State *env); | |
887 | ||
888 | int cpu_get_pic_interrupt(CPUX86State *s); | |
889 | /* MSDOS compatibility mode FPU exception support */ | |
890 | void cpu_set_ferr(CPUX86State *s); | |
891 | ||
892 | /* this function must always be used to load data in the segment | |
893 | cache: it synchronizes the hflags with the segment cache values */ | |
894 | static inline void cpu_x86_load_seg_cache(CPUX86State *env, | |
895 | int seg_reg, unsigned int selector, | |
896 | target_ulong base, | |
897 | unsigned int limit, | |
898 | unsigned int flags) | |
899 | { | |
900 | SegmentCache *sc; | |
901 | unsigned int new_hflags; | |
902 | ||
903 | sc = &env->segs[seg_reg]; | |
904 | sc->selector = selector; | |
905 | sc->base = base; | |
906 | sc->limit = limit; | |
907 | sc->flags = flags; | |
908 | ||
909 | /* update the hidden flags */ | |
910 | { | |
911 | if (seg_reg == R_CS) { | |
912 | #ifdef TARGET_X86_64 | |
913 | if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { | |
914 | /* long mode */ | |
915 | env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; | |
916 | env->hflags &= ~(HF_ADDSEG_MASK); | |
917 | } else | |
918 | #endif | |
919 | { | |
920 | /* legacy / compatibility case */ | |
921 | new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) | |
922 | >> (DESC_B_SHIFT - HF_CS32_SHIFT); | |
923 | env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | | |
924 | new_hflags; | |
925 | } | |
926 | } | |
927 | new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) | |
928 | >> (DESC_B_SHIFT - HF_SS32_SHIFT); | |
929 | if (env->hflags & HF_CS64_MASK) { | |
930 | /* zero base assumed for DS, ES and SS in long mode */ | |
931 | } else if (!(env->cr[0] & CR0_PE_MASK) || | |
932 | (env->eflags & VM_MASK) || | |
933 | !(env->hflags & HF_CS32_MASK)) { | |
934 | /* XXX: try to avoid this test. The problem comes from the | |
935 | fact that is real mode or vm86 mode we only modify the | |
936 | 'base' and 'selector' fields of the segment cache to go | |
937 | faster. A solution may be to force addseg to one in | |
938 | translate-i386.c. */ | |
939 | new_hflags |= HF_ADDSEG_MASK; | |
940 | } else { | |
941 | new_hflags |= ((env->segs[R_DS].base | | |
942 | env->segs[R_ES].base | | |
943 | env->segs[R_SS].base) != 0) << | |
944 | HF_ADDSEG_SHIFT; | |
945 | } | |
946 | env->hflags = (env->hflags & | |
947 | ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; | |
948 | } | |
949 | } | |
950 | ||
951 | static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, | |
952 | int sipi_vector) | |
953 | { | |
954 | CPUX86State *env = &cpu->env; | |
955 | ||
956 | env->eip = 0; | |
957 | cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, | |
958 | sipi_vector << 12, | |
959 | env->segs[R_CS].limit, | |
960 | env->segs[R_CS].flags); | |
961 | env->halted = 0; | |
962 | } | |
963 | ||
964 | int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, | |
965 | target_ulong *base, unsigned int *limit, | |
966 | unsigned int *flags); | |
967 | ||
968 | /* wrapper, just in case memory mappings must be changed */ | |
969 | static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) | |
970 | { | |
971 | #if HF_CPL_MASK == 3 | |
972 | s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; | |
973 | #else | |
974 | #error HF_CPL_MASK is hardcoded | |
975 | #endif | |
976 | } | |
977 | ||
978 | /* op_helper.c */ | |
979 | /* used for debug or cpu save/restore */ | |
980 | void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); | |
981 | floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); | |
982 | ||
983 | /* cpu-exec.c */ | |
984 | /* the following helpers are only usable in user mode simulation as | |
985 | they can trigger unexpected exceptions */ | |
986 | void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); | |
987 | void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); | |
988 | void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); | |
989 | ||
990 | /* you can call this signal handler from your SIGBUS and SIGSEGV | |
991 | signal handlers to inform the virtual CPU of exceptions. non zero | |
992 | is returned if the signal was handled by the virtual CPU. */ | |
993 | int cpu_x86_signal_handler(int host_signum, void *pinfo, | |
994 | void *puc); | |
995 | ||
996 | /* cpuid.c */ | |
997 | void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, | |
998 | uint32_t *eax, uint32_t *ebx, | |
999 | uint32_t *ecx, uint32_t *edx); | |
1000 | int cpu_x86_register(X86CPU *cpu, const char *cpu_model); | |
1001 | void cpu_clear_apic_feature(CPUX86State *env); | |
1002 | void host_cpuid(uint32_t function, uint32_t count, | |
1003 | uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); | |
1004 | ||
1005 | /* helper.c */ | |
1006 | int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, | |
1007 | int is_write, int mmu_idx); | |
1008 | #define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault | |
1009 | void cpu_x86_set_a20(CPUX86State *env, int a20_state); | |
1010 | ||
1011 | static inline int hw_breakpoint_enabled(unsigned long dr7, int index) | |
1012 | { | |
1013 | return (dr7 >> (index * 2)) & 3; | |
1014 | } | |
1015 | ||
1016 | static inline int hw_breakpoint_type(unsigned long dr7, int index) | |
1017 | { | |
1018 | return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; | |
1019 | } | |
1020 | ||
1021 | static inline int hw_breakpoint_len(unsigned long dr7, int index) | |
1022 | { | |
1023 | int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); | |
1024 | return (len == 2) ? 8 : len + 1; | |
1025 | } | |
1026 | ||
1027 | void hw_breakpoint_insert(CPUX86State *env, int index); | |
1028 | void hw_breakpoint_remove(CPUX86State *env, int index); | |
1029 | int check_hw_breakpoints(CPUX86State *env, int force_dr6_update); | |
1030 | void breakpoint_handler(CPUX86State *env); | |
1031 | ||
1032 | /* will be suppressed */ | |
1033 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); | |
1034 | void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); | |
1035 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); | |
1036 | ||
1037 | /* hw/pc.c */ | |
1038 | void cpu_smm_update(CPUX86State *env); | |
1039 | uint64_t cpu_get_tsc(CPUX86State *env); | |
1040 | ||
1041 | #define TARGET_PAGE_BITS 12 | |
1042 | ||
1043 | #ifdef TARGET_X86_64 | |
1044 | #define TARGET_PHYS_ADDR_SPACE_BITS 52 | |
1045 | /* ??? This is really 48 bits, sign-extended, but the only thing | |
1046 | accessible to userland with bit 48 set is the VSYSCALL, and that | |
1047 | is handled via other mechanisms. */ | |
1048 | #define TARGET_VIRT_ADDR_SPACE_BITS 47 | |
1049 | #else | |
1050 | #define TARGET_PHYS_ADDR_SPACE_BITS 36 | |
1051 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | |
1052 | #endif | |
1053 | ||
1054 | static inline CPUX86State *cpu_init(const char *cpu_model) | |
1055 | { | |
1056 | X86CPU *cpu = cpu_x86_init(cpu_model); | |
1057 | if (cpu == NULL) { | |
1058 | return NULL; | |
1059 | } | |
1060 | return &cpu->env; | |
1061 | } | |
1062 | ||
1063 | #define cpu_exec cpu_x86_exec | |
1064 | #define cpu_gen_code cpu_x86_gen_code | |
1065 | #define cpu_signal_handler cpu_x86_signal_handler | |
1066 | #define cpu_list x86_cpu_list | |
1067 | #define cpudef_setup x86_cpudef_setup | |
1068 | ||
1069 | #define CPU_SAVE_VERSION 12 | |
1070 | ||
1071 | /* MMU modes definitions */ | |
1072 | #define MMU_MODE0_SUFFIX _kernel | |
1073 | #define MMU_MODE1_SUFFIX _user | |
1074 | #define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */ | |
1075 | #define MMU_KERNEL_IDX 0 | |
1076 | #define MMU_USER_IDX 1 | |
1077 | #define MMU_KSMAP_IDX 2 | |
1078 | static inline int cpu_mmu_index (CPUX86State *env) | |
1079 | { | |
1080 | return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : | |
1081 | ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK)) | |
1082 | ? MMU_KSMAP_IDX : MMU_KERNEL_IDX; | |
1083 | } | |
1084 | ||
1085 | #undef EAX | |
1086 | #define EAX (env->regs[R_EAX]) | |
1087 | #undef ECX | |
1088 | #define ECX (env->regs[R_ECX]) | |
1089 | #undef EDX | |
1090 | #define EDX (env->regs[R_EDX]) | |
1091 | #undef EBX | |
1092 | #define EBX (env->regs[R_EBX]) | |
1093 | #undef ESP | |
1094 | #define ESP (env->regs[R_ESP]) | |
1095 | #undef EBP | |
1096 | #define EBP (env->regs[R_EBP]) | |
1097 | #undef ESI | |
1098 | #define ESI (env->regs[R_ESI]) | |
1099 | #undef EDI | |
1100 | #define EDI (env->regs[R_EDI]) | |
1101 | #undef EIP | |
1102 | #define EIP (env->eip) | |
1103 | #define DF (env->df) | |
1104 | ||
1105 | #define CC_SRC (env->cc_src) | |
1106 | #define CC_DST (env->cc_dst) | |
1107 | #define CC_OP (env->cc_op) | |
1108 | ||
1109 | /* n must be a constant to be efficient */ | |
1110 | static inline target_long lshift(target_long x, int n) | |
1111 | { | |
1112 | if (n >= 0) { | |
1113 | return x << n; | |
1114 | } else { | |
1115 | return x >> (-n); | |
1116 | } | |
1117 | } | |
1118 | ||
1119 | /* float macros */ | |
1120 | #define FT0 (env->ft0) | |
1121 | #define ST0 (env->fpregs[env->fpstt].d) | |
1122 | #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) | |
1123 | #define ST1 ST(1) | |
1124 | ||
1125 | /* translate.c */ | |
1126 | void optimize_flags_init(void); | |
1127 | ||
1128 | #if defined(CONFIG_USER_ONLY) | |
1129 | static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp) | |
1130 | { | |
1131 | if (newsp) | |
1132 | env->regs[R_ESP] = newsp; | |
1133 | env->regs[R_EAX] = 0; | |
1134 | } | |
1135 | #endif | |
1136 | ||
1137 | #include "exec/cpu-all.h" | |
1138 | #include "svm.h" | |
1139 | ||
1140 | #if !defined(CONFIG_USER_ONLY) | |
1141 | #include "hw/apic.h" | |
1142 | #endif | |
1143 | ||
1144 | static inline bool cpu_has_work(CPUState *cpu) | |
1145 | { | |
1146 | CPUX86State *env = &X86_CPU(cpu)->env; | |
1147 | ||
1148 | return ((env->interrupt_request & (CPU_INTERRUPT_HARD | | |
1149 | CPU_INTERRUPT_POLL)) && | |
1150 | (env->eflags & IF_MASK)) || | |
1151 | (env->interrupt_request & (CPU_INTERRUPT_NMI | | |
1152 | CPU_INTERRUPT_INIT | | |
1153 | CPU_INTERRUPT_SIPI | | |
1154 | CPU_INTERRUPT_MCE)); | |
1155 | } | |
1156 | ||
1157 | #include "exec/exec-all.h" | |
1158 | ||
1159 | static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb) | |
1160 | { | |
1161 | env->eip = tb->pc - tb->cs_base; | |
1162 | } | |
1163 | ||
1164 | static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, | |
1165 | target_ulong *cs_base, int *flags) | |
1166 | { | |
1167 | *cs_base = env->segs[R_CS].base; | |
1168 | *pc = *cs_base + env->eip; | |
1169 | *flags = env->hflags | | |
1170 | (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); | |
1171 | } | |
1172 | ||
1173 | void do_cpu_init(X86CPU *cpu); | |
1174 | void do_cpu_sipi(X86CPU *cpu); | |
1175 | ||
1176 | #define MCE_INJECT_BROADCAST 1 | |
1177 | #define MCE_INJECT_UNCOND_AO 2 | |
1178 | ||
1179 | void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, | |
1180 | uint64_t status, uint64_t mcg_status, uint64_t addr, | |
1181 | uint64_t misc, int flags); | |
1182 | ||
1183 | /* excp_helper.c */ | |
1184 | void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); | |
1185 | void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, | |
1186 | int error_code); | |
1187 | void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, | |
1188 | int error_code, int next_eip_addend); | |
1189 | ||
1190 | /* cc_helper.c */ | |
1191 | extern const uint8_t parity_table[256]; | |
1192 | uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); | |
1193 | ||
1194 | static inline uint32_t cpu_compute_eflags(CPUX86State *env) | |
1195 | { | |
1196 | return env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK); | |
1197 | } | |
1198 | ||
1199 | /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ | |
1200 | static inline void cpu_load_eflags(CPUX86State *env, int eflags, | |
1201 | int update_mask) | |
1202 | { | |
1203 | CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | |
1204 | DF = 1 - (2 * ((eflags >> 10) & 1)); | |
1205 | env->eflags = (env->eflags & ~update_mask) | | |
1206 | (eflags & update_mask) | 0x2; | |
1207 | } | |
1208 | ||
1209 | /* load efer and update the corresponding hflags. XXX: do consistency | |
1210 | checks with cpuid bits? */ | |
1211 | static inline void cpu_load_efer(CPUX86State *env, uint64_t val) | |
1212 | { | |
1213 | env->efer = val; | |
1214 | env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); | |
1215 | if (env->efer & MSR_EFER_LMA) { | |
1216 | env->hflags |= HF_LMA_MASK; | |
1217 | } | |
1218 | if (env->efer & MSR_EFER_SVME) { | |
1219 | env->hflags |= HF_SVME_MASK; | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | /* svm_helper.c */ | |
1224 | void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, | |
1225 | uint64_t param); | |
1226 | void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1); | |
1227 | ||
1228 | /* op_helper.c */ | |
1229 | void do_interrupt(CPUX86State *env); | |
1230 | void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); | |
1231 | ||
1232 | void do_smm_enter(CPUX86State *env1); | |
1233 | ||
1234 | void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); | |
1235 | ||
1236 | void enable_kvm_pv_eoi(void); | |
1237 | ||
1238 | /* Return name of 32-bit register, from a R_* constant */ | |
1239 | const char *get_register_name_32(unsigned int reg); | |
1240 | ||
1241 | #endif /* CPU_I386_H */ |