]> Git Repo - qemu.git/blame - target-i386/helper.c
target-i386: Fix segment cache dump
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af 1/*
eaa728ee 2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
2c0262af 19
eaa728ee 20#include "cpu.h"
9c17d615 21#include "sysemu/kvm.h"
2fa11da0 22#ifndef CONFIG_USER_ONLY
9c17d615 23#include "sysemu/sysemu.h"
83c9089e 24#include "monitor/monitor.h"
2fa11da0 25#endif
f3f2d9be 26
eaa728ee 27//#define DEBUG_MMU
b5ec5ce0 28
317ac620 29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
2bd3e04c
JD
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41/* Broadcast MCA signal for processor version 06H_EH and above */
317ac620 42int cpu_x86_support_mca_broadcast(CPUX86State *env)
2bd3e04c
JD
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
eaa728ee
FB
55/***********************************************************/
56/* x86 debug */
3b46e624 57
bc4b43dc 58static const char *cc_op_str[CC_OP_NB] = {
eaa728ee
FB
59 "DYNAMIC",
60 "EFLAGS",
7e84c249 61
eaa728ee
FB
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
3b46e624 66
eaa728ee
FB
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
3b46e624 71
eaa728ee
FB
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
3b46e624 76
eaa728ee
FB
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
7e84c249 81
eaa728ee
FB
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
7e84c249 86
eaa728ee
FB
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
7e84c249 91
eaa728ee
FB
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
3b46e624 96
eaa728ee
FB
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
3b46e624 101
eaa728ee
FB
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
3b46e624 106
eaa728ee
FB
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
bc4b43dc
RH
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
cd7f97ca
RH
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
436ff2d2
RH
120
121 "CLR",
eaa728ee 122};
7e84c249 123
a3867ed2 124static void
317ac620 125cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
a3867ed2
AL
126 const char *name, struct SegmentCache *sc)
127{
128#ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
4058fd98 131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
132 } else
133#endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
4058fd98 136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
469936ae
TM
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
a3867ed2
AL
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 }
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 },
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
170 }
171 };
e5c15eff
SW
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
a3867ed2
AL
176 }
177done:
178 cpu_fprintf(f, "\n");
179}
180
f5c848ee
JK
181#define DUMP_CODE_BYTES_TOTAL 50
182#define DUMP_CODE_BYTES_BACKWARD 20
183
878096ee
AF
184void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
eaa728ee 186{
878096ee
AF
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
eaa728ee
FB
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
7e84c249 192
cb446eca 193 cpu_synchronize_state(cs);
ff3c01ca 194
4980ef9e 195 eflags = cpu_compute_eflags(env);
eaa728ee
FB
196#ifdef TARGET_X86_64
197 if (env->hflags & HF_CS64_MASK) {
198 cpu_fprintf(f,
199 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
200 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
201 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
202 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
203 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
204 env->regs[R_EAX],
205 env->regs[R_EBX],
206 env->regs[R_ECX],
207 env->regs[R_EDX],
208 env->regs[R_ESI],
209 env->regs[R_EDI],
210 env->regs[R_EBP],
211 env->regs[R_ESP],
212 env->regs[8],
213 env->regs[9],
214 env->regs[10],
215 env->regs[11],
216 env->regs[12],
217 env->regs[13],
218 env->regs[14],
219 env->regs[15],
220 env->eip, eflags,
221 eflags & DF_MASK ? 'D' : '-',
222 eflags & CC_O ? 'O' : '-',
223 eflags & CC_S ? 'S' : '-',
224 eflags & CC_Z ? 'Z' : '-',
225 eflags & CC_A ? 'A' : '-',
226 eflags & CC_P ? 'P' : '-',
227 eflags & CC_C ? 'C' : '-',
228 env->hflags & HF_CPL_MASK,
229 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 230 (env->a20_mask >> 20) & 1,
eaa728ee 231 (env->hflags >> HF_SMM_SHIFT) & 1,
259186a7 232 cs->halted);
eaa728ee
FB
233 } else
234#endif
235 {
236 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
237 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
238 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
239 (uint32_t)env->regs[R_EAX],
240 (uint32_t)env->regs[R_EBX],
241 (uint32_t)env->regs[R_ECX],
242 (uint32_t)env->regs[R_EDX],
243 (uint32_t)env->regs[R_ESI],
244 (uint32_t)env->regs[R_EDI],
245 (uint32_t)env->regs[R_EBP],
246 (uint32_t)env->regs[R_ESP],
247 (uint32_t)env->eip, eflags,
248 eflags & DF_MASK ? 'D' : '-',
249 eflags & CC_O ? 'O' : '-',
250 eflags & CC_S ? 'S' : '-',
251 eflags & CC_Z ? 'Z' : '-',
252 eflags & CC_A ? 'A' : '-',
253 eflags & CC_P ? 'P' : '-',
254 eflags & CC_C ? 'C' : '-',
255 env->hflags & HF_CPL_MASK,
256 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 257 (env->a20_mask >> 20) & 1,
eaa728ee 258 (env->hflags >> HF_SMM_SHIFT) & 1,
259186a7 259 cs->halted);
8145122b 260 }
3b46e624 261
a3867ed2
AL
262 for(i = 0; i < 6; i++) {
263 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
264 &env->segs[i]);
265 }
266 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
267 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
268
eaa728ee
FB
269#ifdef TARGET_X86_64
270 if (env->hflags & HF_LMA_MASK) {
eaa728ee
FB
271 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
272 env->gdt.base, env->gdt.limit);
273 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
274 env->idt.base, env->idt.limit);
275 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
276 (uint32_t)env->cr[0],
277 env->cr[2],
278 env->cr[3],
279 (uint32_t)env->cr[4]);
a59cb4e0
AL
280 for(i = 0; i < 4; i++)
281 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
282 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
d4b55be5 283 env->dr[6], env->dr[7]);
eaa728ee
FB
284 } else
285#endif
286 {
eaa728ee
FB
287 cpu_fprintf(f, "GDT= %08x %08x\n",
288 (uint32_t)env->gdt.base, env->gdt.limit);
289 cpu_fprintf(f, "IDT= %08x %08x\n",
290 (uint32_t)env->idt.base, env->idt.limit);
291 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
292 (uint32_t)env->cr[0],
293 (uint32_t)env->cr[2],
294 (uint32_t)env->cr[3],
295 (uint32_t)env->cr[4]);
9a78eead
SW
296 for(i = 0; i < 4; i++) {
297 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
298 }
299 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
300 env->dr[6], env->dr[7]);
eaa728ee 301 }
6fd2a026 302 if (flags & CPU_DUMP_CCOP) {
eaa728ee
FB
303 if ((unsigned)env->cc_op < CC_OP_NB)
304 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
305 else
306 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
307#ifdef TARGET_X86_64
308 if (env->hflags & HF_CS64_MASK) {
309 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
310 env->cc_src, env->cc_dst,
311 cc_op_name);
312 } else
313#endif
314 {
315 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
316 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
317 cc_op_name);
318 }
7e84c249 319 }
b5e5a934 320 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
6fd2a026 321 if (flags & CPU_DUMP_FPU) {
eaa728ee
FB
322 int fptag;
323 fptag = 0;
324 for(i = 0; i < 8; i++) {
325 fptag |= ((!env->fptags[i]) << i);
326 }
327 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
328 env->fpuc,
329 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
330 env->fpstt,
331 fptag,
332 env->mxcsr);
333 for(i=0;i<8;i++) {
1ffd41ee
AJ
334 CPU_LDoubleU u;
335 u.d = env->fpregs[i].d;
eaa728ee 336 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1ffd41ee 337 i, u.l.lower, u.l.upper);
eaa728ee
FB
338 if ((i & 1) == 1)
339 cpu_fprintf(f, "\n");
340 else
341 cpu_fprintf(f, " ");
342 }
343 if (env->hflags & HF_CS64_MASK)
344 nb = 16;
345 else
346 nb = 8;
347 for(i=0;i<nb;i++) {
348 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
349 i,
350 env->xmm_regs[i].XMM_L(3),
351 env->xmm_regs[i].XMM_L(2),
352 env->xmm_regs[i].XMM_L(1),
353 env->xmm_regs[i].XMM_L(0));
354 if ((i & 1) == 1)
355 cpu_fprintf(f, "\n");
356 else
357 cpu_fprintf(f, " ");
358 }
7e84c249 359 }
f5c848ee
JK
360 if (flags & CPU_DUMP_CODE) {
361 target_ulong base = env->segs[R_CS].base + env->eip;
362 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
363 uint8_t code;
364 char codestr[3];
365
366 cpu_fprintf(f, "Code=");
367 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
f17ec444 368 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
f5c848ee
JK
369 snprintf(codestr, sizeof(codestr), "%02x", code);
370 } else {
371 snprintf(codestr, sizeof(codestr), "??");
372 }
373 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
374 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
375 }
376 cpu_fprintf(f, "\n");
377 }
2c0262af 378}
7e84c249 379
eaa728ee
FB
380/***********************************************************/
381/* x86 mmu */
382/* XXX: add PGE support */
383
cc36a7a2 384void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
2c0262af 385{
cc36a7a2
AF
386 CPUX86State *env = &cpu->env;
387
eaa728ee
FB
388 a20_state = (a20_state != 0);
389 if (a20_state != ((env->a20_mask >> 20) & 1)) {
390#if defined(DEBUG_MMU)
391 printf("A20 update: a20=%d\n", a20_state);
392#endif
393 /* if the cpu is currently executing code, we must unlink it and
394 all the potentially executing TB */
c3affe56 395 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
3b46e624 396
eaa728ee
FB
397 /* when a20 is changed, all the MMU mappings are invalid, so
398 we must flush everything */
399 tlb_flush(env, 1);
5ee0ffaa 400 env->a20_mask = ~(1 << 20) | (a20_state << 20);
7e84c249 401 }
2c0262af
FB
402}
403
eaa728ee 404void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 405{
eaa728ee 406 int pe_state;
2c0262af 407
eaa728ee
FB
408#if defined(DEBUG_MMU)
409 printf("CR0 update: CR0=0x%08x\n", new_cr0);
410#endif
411 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
412 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
413 tlb_flush(env, 1);
414 }
2c0262af 415
eaa728ee
FB
416#ifdef TARGET_X86_64
417 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
418 (env->efer & MSR_EFER_LME)) {
419 /* enter in long mode */
420 /* XXX: generate an exception */
421 if (!(env->cr[4] & CR4_PAE_MASK))
422 return;
423 env->efer |= MSR_EFER_LMA;
424 env->hflags |= HF_LMA_MASK;
425 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
426 (env->efer & MSR_EFER_LMA)) {
427 /* exit long mode */
428 env->efer &= ~MSR_EFER_LMA;
429 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
430 env->eip &= 0xffffffff;
431 }
432#endif
433 env->cr[0] = new_cr0 | CR0_ET_MASK;
7e84c249 434
eaa728ee
FB
435 /* update PE flag in hidden flags */
436 pe_state = (env->cr[0] & CR0_PE_MASK);
437 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
438 /* ensure that ADDSEG is always set in real mode */
439 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
440 /* update FPU flags */
441 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
442 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
7e84c249
FB
443}
444
eaa728ee
FB
445/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
446 the PDPT */
447void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
7e84c249 448{
eaa728ee
FB
449 env->cr[3] = new_cr3;
450 if (env->cr[0] & CR0_PG_MASK) {
451#if defined(DEBUG_MMU)
452 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
453#endif
454 tlb_flush(env, 0);
455 }
7e84c249
FB
456}
457
eaa728ee 458void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
7e84c249 459{
eaa728ee
FB
460#if defined(DEBUG_MMU)
461 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
462#endif
a9321a4d
PA
463 if ((new_cr4 ^ env->cr[4]) &
464 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
465 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
eaa728ee
FB
466 tlb_flush(env, 1);
467 }
468 /* SSE handling */
0514ef2f 469 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
eaa728ee 470 new_cr4 &= ~CR4_OSFXSR_MASK;
a9321a4d
PA
471 }
472 env->hflags &= ~HF_OSFXSR_MASK;
473 if (new_cr4 & CR4_OSFXSR_MASK) {
eaa728ee 474 env->hflags |= HF_OSFXSR_MASK;
a9321a4d
PA
475 }
476
0514ef2f 477 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
a9321a4d
PA
478 new_cr4 &= ~CR4_SMAP_MASK;
479 }
480 env->hflags &= ~HF_SMAP_MASK;
481 if (new_cr4 & CR4_SMAP_MASK) {
482 env->hflags |= HF_SMAP_MASK;
483 }
b8b6a50b 484
eaa728ee 485 env->cr[4] = new_cr4;
b8b6a50b
FB
486}
487
eaa728ee
FB
488#if defined(CONFIG_USER_ONLY)
489
490int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 491 int is_write, int mmu_idx)
b8b6a50b 492{
eaa728ee
FB
493 /* user mode only emulation */
494 is_write &= 1;
495 env->cr[2] = addr;
496 env->error_code = (is_write << PG_ERROR_W_BIT);
497 env->error_code |= PG_ERROR_U_MASK;
498 env->exception_index = EXCP0E_PAGE;
499 return 1;
2c0262af
FB
500}
501
8d7b0fbb 502#else
891b38e4 503
eaa728ee
FB
504/* XXX: This value should match the one returned by CPUID
505 * and in exec.c */
eaa728ee 506# if defined(TARGET_X86_64)
2c90d794 507# define PHYS_ADDR_MASK 0xfffffff000LL
eaa728ee 508# else
2c90d794 509# define PHYS_ADDR_MASK 0xffffff000LL
eaa728ee 510# endif
eaa728ee
FB
511
512/* return value:
513 -1 = cannot handle fault
514 0 = nothing more to do
515 1 = generate PF fault
eaa728ee
FB
516*/
517int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 518 int is_write1, int mmu_idx)
eaa728ee
FB
519{
520 uint64_t ptep, pte;
521 target_ulong pde_addr, pte_addr;
d4c430a8 522 int error_code, is_dirty, prot, page_size, is_write, is_user;
a8170e5e 523 hwaddr paddr;
eaa728ee
FB
524 uint32_t page_offset;
525 target_ulong vaddr, virt_addr;
526
527 is_user = mmu_idx == MMU_USER_IDX;
528#if defined(DEBUG_MMU)
529 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
530 addr, is_write1, is_user, env->eip);
531#endif
532 is_write = is_write1 & 1;
533
534 if (!(env->cr[0] & CR0_PG_MASK)) {
535 pte = addr;
536 virt_addr = addr & TARGET_PAGE_MASK;
537 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
538 page_size = 4096;
539 goto do_mapping;
540 }
541
542 if (env->cr[4] & CR4_PAE_MASK) {
543 uint64_t pde, pdpe;
544 target_ulong pdpe_addr;
2c0262af 545
eaa728ee
FB
546#ifdef TARGET_X86_64
547 if (env->hflags & HF_LMA_MASK) {
548 uint64_t pml4e_addr, pml4e;
549 int32_t sext;
550
551 /* test virtual address sign extension */
552 sext = (int64_t)addr >> 47;
553 if (sext != 0 && sext != -1) {
554 env->error_code = 0;
555 env->exception_index = EXCP0D_GPF;
556 return 1;
557 }
0573fbfc 558
eaa728ee
FB
559 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
560 env->a20_mask;
561 pml4e = ldq_phys(pml4e_addr);
562 if (!(pml4e & PG_PRESENT_MASK)) {
563 error_code = 0;
564 goto do_fault;
565 }
566 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
567 error_code = PG_ERROR_RSVD_MASK;
568 goto do_fault;
569 }
570 if (!(pml4e & PG_ACCESSED_MASK)) {
571 pml4e |= PG_ACCESSED_MASK;
572 stl_phys_notdirty(pml4e_addr, pml4e);
573 }
574 ptep = pml4e ^ PG_NX_MASK;
575 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
576 env->a20_mask;
577 pdpe = ldq_phys(pdpe_addr);
578 if (!(pdpe & PG_PRESENT_MASK)) {
579 error_code = 0;
580 goto do_fault;
581 }
582 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
583 error_code = PG_ERROR_RSVD_MASK;
584 goto do_fault;
585 }
586 ptep &= pdpe ^ PG_NX_MASK;
587 if (!(pdpe & PG_ACCESSED_MASK)) {
588 pdpe |= PG_ACCESSED_MASK;
589 stl_phys_notdirty(pdpe_addr, pdpe);
590 }
591 } else
592#endif
593 {
594 /* XXX: load them when cr3 is loaded ? */
595 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
596 env->a20_mask;
597 pdpe = ldq_phys(pdpe_addr);
598 if (!(pdpe & PG_PRESENT_MASK)) {
599 error_code = 0;
600 goto do_fault;
601 }
602 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
7e84c249 603 }
7e84c249 604
eaa728ee
FB
605 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
606 env->a20_mask;
607 pde = ldq_phys(pde_addr);
608 if (!(pde & PG_PRESENT_MASK)) {
609 error_code = 0;
610 goto do_fault;
611 }
612 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
613 error_code = PG_ERROR_RSVD_MASK;
614 goto do_fault;
615 }
616 ptep &= pde ^ PG_NX_MASK;
617 if (pde & PG_PSE_MASK) {
618 /* 2 MB page */
619 page_size = 2048 * 1024;
620 ptep ^= PG_NX_MASK;
a9321a4d 621 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
eaa728ee 622 goto do_fault_protect;
a9321a4d
PA
623 }
624 switch (mmu_idx) {
625 case MMU_USER_IDX:
626 if (!(ptep & PG_USER_MASK)) {
eaa728ee 627 goto do_fault_protect;
a9321a4d
PA
628 }
629 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 630 goto do_fault_protect;
a9321a4d
PA
631 }
632 break;
633
634 case MMU_KERNEL_IDX:
635 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
636 (ptep & PG_USER_MASK)) {
637 goto do_fault_protect;
638 }
639 /* fall through */
640 case MMU_KSMAP_IDX:
641 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
642 (ptep & PG_USER_MASK)) {
643 goto do_fault_protect;
644 }
eaa728ee 645 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 646 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 647 goto do_fault_protect;
a9321a4d
PA
648 }
649 break;
650
651 default: /* cannot happen */
652 break;
eaa728ee
FB
653 }
654 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
655 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
656 pde |= PG_ACCESSED_MASK;
657 if (is_dirty)
658 pde |= PG_DIRTY_MASK;
659 stl_phys_notdirty(pde_addr, pde);
660 }
661 /* align to page_size */
662 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
663 virt_addr = addr & ~(page_size - 1);
664 } else {
665 /* 4 KB page */
666 if (!(pde & PG_ACCESSED_MASK)) {
667 pde |= PG_ACCESSED_MASK;
668 stl_phys_notdirty(pde_addr, pde);
669 }
670 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
671 env->a20_mask;
672 pte = ldq_phys(pte_addr);
673 if (!(pte & PG_PRESENT_MASK)) {
674 error_code = 0;
675 goto do_fault;
676 }
677 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
678 error_code = PG_ERROR_RSVD_MASK;
679 goto do_fault;
680 }
681 /* combine pde and pte nx, user and rw protections */
682 ptep &= pte ^ PG_NX_MASK;
683 ptep ^= PG_NX_MASK;
684 if ((ptep & PG_NX_MASK) && is_write1 == 2)
685 goto do_fault_protect;
a9321a4d
PA
686 switch (mmu_idx) {
687 case MMU_USER_IDX:
688 if (!(ptep & PG_USER_MASK)) {
eaa728ee 689 goto do_fault_protect;
a9321a4d
PA
690 }
691 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 692 goto do_fault_protect;
a9321a4d
PA
693 }
694 break;
695
696 case MMU_KERNEL_IDX:
697 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
698 (ptep & PG_USER_MASK)) {
699 goto do_fault_protect;
700 }
701 /* fall through */
702 case MMU_KSMAP_IDX:
703 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
704 (ptep & PG_USER_MASK)) {
705 goto do_fault_protect;
706 }
eaa728ee 707 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 708 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 709 goto do_fault_protect;
a9321a4d
PA
710 }
711 break;
712
713 default: /* cannot happen */
714 break;
eaa728ee
FB
715 }
716 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
717 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
718 pte |= PG_ACCESSED_MASK;
719 if (is_dirty)
720 pte |= PG_DIRTY_MASK;
721 stl_phys_notdirty(pte_addr, pte);
722 }
723 page_size = 4096;
724 virt_addr = addr & ~0xfff;
725 pte = pte & (PHYS_ADDR_MASK | 0xfff);
7e84c249 726 }
2c0262af 727 } else {
eaa728ee
FB
728 uint32_t pde;
729
730 /* page directory entry */
731 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
732 env->a20_mask;
733 pde = ldl_phys(pde_addr);
734 if (!(pde & PG_PRESENT_MASK)) {
735 error_code = 0;
736 goto do_fault;
737 }
738 /* if PSE bit is set, then we use a 4MB page */
739 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
740 page_size = 4096 * 1024;
a9321a4d
PA
741 switch (mmu_idx) {
742 case MMU_USER_IDX:
743 if (!(pde & PG_USER_MASK)) {
eaa728ee 744 goto do_fault_protect;
a9321a4d
PA
745 }
746 if (is_write && !(pde & PG_RW_MASK)) {
eaa728ee 747 goto do_fault_protect;
a9321a4d
PA
748 }
749 break;
750
751 case MMU_KERNEL_IDX:
752 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
753 (pde & PG_USER_MASK)) {
754 goto do_fault_protect;
755 }
756 /* fall through */
757 case MMU_KSMAP_IDX:
758 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
759 (pde & PG_USER_MASK)) {
760 goto do_fault_protect;
761 }
eaa728ee 762 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 763 is_write && !(pde & PG_RW_MASK)) {
eaa728ee 764 goto do_fault_protect;
a9321a4d
PA
765 }
766 break;
767
768 default: /* cannot happen */
769 break;
eaa728ee
FB
770 }
771 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
772 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
773 pde |= PG_ACCESSED_MASK;
774 if (is_dirty)
775 pde |= PG_DIRTY_MASK;
776 stl_phys_notdirty(pde_addr, pde);
777 }
2c0262af 778
eaa728ee
FB
779 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
780 ptep = pte;
781 virt_addr = addr & ~(page_size - 1);
782 } else {
783 if (!(pde & PG_ACCESSED_MASK)) {
784 pde |= PG_ACCESSED_MASK;
785 stl_phys_notdirty(pde_addr, pde);
786 }
891b38e4 787
eaa728ee
FB
788 /* page directory entry */
789 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
790 env->a20_mask;
791 pte = ldl_phys(pte_addr);
792 if (!(pte & PG_PRESENT_MASK)) {
793 error_code = 0;
794 goto do_fault;
8e682019 795 }
eaa728ee
FB
796 /* combine pde and pte user and rw protections */
797 ptep = pte & pde;
a9321a4d
PA
798 switch (mmu_idx) {
799 case MMU_USER_IDX:
800 if (!(ptep & PG_USER_MASK)) {
eaa728ee 801 goto do_fault_protect;
a9321a4d
PA
802 }
803 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 804 goto do_fault_protect;
a9321a4d
PA
805 }
806 break;
807
808 case MMU_KERNEL_IDX:
809 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
810 (ptep & PG_USER_MASK)) {
811 goto do_fault_protect;
812 }
813 /* fall through */
814 case MMU_KSMAP_IDX:
815 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
816 (ptep & PG_USER_MASK)) {
817 goto do_fault_protect;
818 }
eaa728ee 819 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 820 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 821 goto do_fault_protect;
a9321a4d
PA
822 }
823 break;
824
825 default: /* cannot happen */
826 break;
8e682019 827 }
eaa728ee
FB
828 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
829 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
830 pte |= PG_ACCESSED_MASK;
831 if (is_dirty)
832 pte |= PG_DIRTY_MASK;
833 stl_phys_notdirty(pte_addr, pte);
834 }
835 page_size = 4096;
836 virt_addr = addr & ~0xfff;
2c0262af
FB
837 }
838 }
eaa728ee
FB
839 /* the page can be put in the TLB */
840 prot = PAGE_READ;
841 if (!(ptep & PG_NX_MASK))
842 prot |= PAGE_EXEC;
843 if (pte & PG_DIRTY_MASK) {
844 /* only set write access if already dirty... otherwise wait
845 for dirty access */
846 if (is_user) {
847 if (ptep & PG_RW_MASK)
848 prot |= PAGE_WRITE;
849 } else {
850 if (!(env->cr[0] & CR0_WP_MASK) ||
851 (ptep & PG_RW_MASK))
852 prot |= PAGE_WRITE;
8e682019 853 }
891b38e4 854 }
eaa728ee
FB
855 do_mapping:
856 pte = pte & env->a20_mask;
857
858 /* Even if 4MB pages, we map only one 4KB page in the cache to
859 avoid filling it too fast */
860 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
861 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
862 vaddr = virt_addr + page_offset;
863
d4c430a8
PB
864 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
865 return 0;
eaa728ee
FB
866 do_fault_protect:
867 error_code = PG_ERROR_P_MASK;
868 do_fault:
869 error_code |= (is_write << PG_ERROR_W_BIT);
870 if (is_user)
871 error_code |= PG_ERROR_U_MASK;
872 if (is_write1 == 2 &&
a9321a4d
PA
873 (((env->efer & MSR_EFER_NXE) &&
874 (env->cr[4] & CR4_PAE_MASK)) ||
875 (env->cr[4] & CR4_SMEP_MASK)))
eaa728ee 876 error_code |= PG_ERROR_I_D_MASK;
872929aa
FB
877 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
878 /* cr2 is not modified in case of exceptions */
879 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
880 addr);
eaa728ee
FB
881 } else {
882 env->cr[2] = addr;
2c0262af 883 }
eaa728ee
FB
884 env->error_code = error_code;
885 env->exception_index = EXCP0E_PAGE;
eaa728ee 886 return 1;
14ce26e7
FB
887}
888
00b941e5 889hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
14ce26e7 890{
00b941e5
AF
891 X86CPU *cpu = X86_CPU(cs);
892 CPUX86State *env = &cpu->env;
eaa728ee
FB
893 target_ulong pde_addr, pte_addr;
894 uint64_t pte;
a8170e5e 895 hwaddr paddr;
eaa728ee
FB
896 uint32_t page_offset;
897 int page_size;
14ce26e7 898
f2f8560c
PB
899 if (!(env->cr[0] & CR0_PG_MASK)) {
900 pte = addr & env->a20_mask;
901 page_size = 4096;
902 } else if (env->cr[4] & CR4_PAE_MASK) {
eaa728ee
FB
903 target_ulong pdpe_addr;
904 uint64_t pde, pdpe;
14ce26e7 905
eaa728ee
FB
906#ifdef TARGET_X86_64
907 if (env->hflags & HF_LMA_MASK) {
908 uint64_t pml4e_addr, pml4e;
909 int32_t sext;
910
911 /* test virtual address sign extension */
912 sext = (int64_t)addr >> 47;
913 if (sext != 0 && sext != -1)
914 return -1;
915
916 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
917 env->a20_mask;
918 pml4e = ldq_phys(pml4e_addr);
919 if (!(pml4e & PG_PRESENT_MASK))
920 return -1;
921
3f2cbf0d
JK
922 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
923 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
924 pdpe = ldq_phys(pdpe_addr);
925 if (!(pdpe & PG_PRESENT_MASK))
926 return -1;
927 } else
928#endif
929 {
930 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
931 env->a20_mask;
932 pdpe = ldq_phys(pdpe_addr);
933 if (!(pdpe & PG_PRESENT_MASK))
934 return -1;
14ce26e7 935 }
14ce26e7 936
3f2cbf0d
JK
937 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
938 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
939 pde = ldq_phys(pde_addr);
940 if (!(pde & PG_PRESENT_MASK)) {
941 return -1;
942 }
943 if (pde & PG_PSE_MASK) {
944 /* 2 MB page */
945 page_size = 2048 * 1024;
946 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
947 } else {
948 /* 4 KB page */
3f2cbf0d
JK
949 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
950 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
951 page_size = 4096;
952 pte = ldq_phys(pte_addr);
953 }
3f2cbf0d 954 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
ca1c9e15
AL
955 if (!(pte & PG_PRESENT_MASK))
956 return -1;
14ce26e7 957 } else {
eaa728ee 958 uint32_t pde;
3b46e624 959
f2f8560c
PB
960 /* page directory entry */
961 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
962 pde = ldl_phys(pde_addr);
963 if (!(pde & PG_PRESENT_MASK))
964 return -1;
965 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
966 pte = pde & ~0x003ff000; /* align to 4MB */
967 page_size = 4096 * 1024;
eaa728ee
FB
968 } else {
969 /* page directory entry */
f2f8560c
PB
970 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
971 pte = ldl_phys(pte_addr);
972 if (!(pte & PG_PRESENT_MASK))
eaa728ee 973 return -1;
f2f8560c 974 page_size = 4096;
eaa728ee
FB
975 }
976 pte = pte & env->a20_mask;
14ce26e7 977 }
14ce26e7 978
eaa728ee
FB
979 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
980 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
981 return paddr;
3b21e03e 982}
01df040b 983
317ac620 984void hw_breakpoint_insert(CPUX86State *env, int index)
01df040b 985{
1cc21a18 986 int type = 0, err = 0;
01df040b
AL
987
988 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 989 case DR7_TYPE_BP_INST:
5902564a 990 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b
AL
991 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
992 &env->cpu_breakpoint[index]);
5902564a 993 }
01df040b 994 break;
428065ce 995 case DR7_TYPE_DATA_WR:
01df040b 996 type = BP_CPU | BP_MEM_WRITE;
1cc21a18 997 break;
428065ce 998 case DR7_TYPE_IO_RW:
1cc21a18 999 /* No support for I/O watchpoints yet */
01df040b 1000 break;
428065ce 1001 case DR7_TYPE_DATA_RW:
01df040b 1002 type = BP_CPU | BP_MEM_ACCESS;
1cc21a18
LG
1003 break;
1004 }
1005
1006 if (type != 0) {
01df040b
AL
1007 err = cpu_watchpoint_insert(env, env->dr[index],
1008 hw_breakpoint_len(env->dr[7], index),
1009 type, &env->cpu_watchpoint[index]);
01df040b 1010 }
1cc21a18
LG
1011
1012 if (err) {
01df040b 1013 env->cpu_breakpoint[index] = NULL;
1cc21a18 1014 }
01df040b
AL
1015}
1016
317ac620 1017void hw_breakpoint_remove(CPUX86State *env, int index)
01df040b
AL
1018{
1019 if (!env->cpu_breakpoint[index])
1020 return;
1021 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 1022 case DR7_TYPE_BP_INST:
5902564a 1023 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b 1024 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
5902564a 1025 }
01df040b 1026 break;
428065ce
LG
1027 case DR7_TYPE_DATA_WR:
1028 case DR7_TYPE_DATA_RW:
01df040b
AL
1029 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1030 break;
428065ce 1031 case DR7_TYPE_IO_RW:
01df040b
AL
1032 /* No support for I/O watchpoints yet */
1033 break;
1034 }
1035}
1036
e175bce5 1037bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
01df040b
AL
1038{
1039 target_ulong dr6;
e175bce5
LG
1040 int reg;
1041 bool hit_enabled = false;
01df040b
AL
1042
1043 dr6 = env->dr[6] & ~0xf;
428065ce 1044 for (reg = 0; reg < DR7_MAX_BP; reg++) {
e175bce5
LG
1045 bool bp_match = false;
1046 bool wp_match = false;
1047
1048 switch (hw_breakpoint_type(env->dr[7], reg)) {
1049 case DR7_TYPE_BP_INST:
1050 if (env->dr[reg] == env->eip) {
1051 bp_match = true;
1052 }
1053 break;
1054 case DR7_TYPE_DATA_WR:
1055 case DR7_TYPE_DATA_RW:
1056 if (env->cpu_watchpoint[reg] &&
1057 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1058 wp_match = true;
1059 }
1060 break;
1061 case DR7_TYPE_IO_RW:
1062 break;
1063 }
1064 if (bp_match || wp_match) {
01df040b 1065 dr6 |= 1 << reg;
5902564a 1066 if (hw_breakpoint_enabled(env->dr[7], reg)) {
e175bce5 1067 hit_enabled = true;
5902564a 1068 }
01df040b
AL
1069 }
1070 }
e175bce5
LG
1071
1072 if (hit_enabled || force_dr6_update) {
01df040b 1073 env->dr[6] = dr6;
e175bce5
LG
1074 }
1075
01df040b
AL
1076 return hit_enabled;
1077}
1078
d65e9815 1079void breakpoint_handler(CPUX86State *env)
01df040b
AL
1080{
1081 CPUBreakpoint *bp;
1082
1083 if (env->watchpoint_hit) {
1084 if (env->watchpoint_hit->flags & BP_CPU) {
1085 env->watchpoint_hit = NULL;
e175bce5 1086 if (check_hw_breakpoints(env, false)) {
77b2bc2c 1087 raise_exception(env, EXCP01_DB);
e175bce5 1088 } else {
01df040b 1089 cpu_resume_from_signal(env, NULL);
e175bce5 1090 }
01df040b
AL
1091 }
1092 } else {
72cf2d4f 1093 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
01df040b
AL
1094 if (bp->pc == env->eip) {
1095 if (bp->flags & BP_CPU) {
e175bce5 1096 check_hw_breakpoints(env, true);
77b2bc2c 1097 raise_exception(env, EXCP01_DB);
01df040b
AL
1098 }
1099 break;
1100 }
1101 }
01df040b 1102}
79c4f6b0 1103
d5bfda33
JK
1104typedef struct MCEInjectionParams {
1105 Monitor *mon;
55e5c285 1106 X86CPU *cpu;
d5bfda33
JK
1107 int bank;
1108 uint64_t status;
1109 uint64_t mcg_status;
1110 uint64_t addr;
1111 uint64_t misc;
1112 int flags;
1113} MCEInjectionParams;
1114
1115static void do_inject_x86_mce(void *data)
79c4f6b0 1116{
d5bfda33 1117 MCEInjectionParams *params = data;
55e5c285
AF
1118 CPUX86State *cenv = &params->cpu->env;
1119 CPUState *cpu = CPU(params->cpu);
d5bfda33
JK
1120 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1121
cb446eca 1122 cpu_synchronize_state(cpu);
316378e4 1123
747461c7
JK
1124 /*
1125 * If there is an MCE exception being processed, ignore this SRAO MCE
1126 * unless unconditional injection was requested.
1127 */
d5bfda33
JK
1128 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1129 && !(params->status & MCI_STATUS_AR)
747461c7
JK
1130 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1131 return;
1132 }
d5bfda33
JK
1133
1134 if (params->status & MCI_STATUS_UC) {
316378e4
JK
1135 /*
1136 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1137 * reporting is disabled
1138 */
d5bfda33
JK
1139 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1140 monitor_printf(params->mon,
316378e4 1141 "CPU %d: Uncorrected error reporting disabled\n",
55e5c285 1142 cpu->cpu_index);
316378e4
JK
1143 return;
1144 }
1145
1146 /*
1147 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1148 * reporting is disabled for the bank
1149 */
1150 if (banks[0] != ~(uint64_t)0) {
d5bfda33
JK
1151 monitor_printf(params->mon,
1152 "CPU %d: Uncorrected error reporting disabled for"
1153 " bank %d\n",
55e5c285 1154 cpu->cpu_index, params->bank);
316378e4
JK
1155 return;
1156 }
1157
79c4f6b0
HY
1158 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1159 !(cenv->cr[4] & CR4_MCE_MASK)) {
d5bfda33
JK
1160 monitor_printf(params->mon,
1161 "CPU %d: Previous MCE still in progress, raising"
1162 " triple fault\n",
55e5c285 1163 cpu->cpu_index);
79c4f6b0
HY
1164 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1165 qemu_system_reset_request();
1166 return;
1167 }
2fa11da0 1168 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1169 params->status |= MCI_STATUS_OVER;
2fa11da0 1170 }
d5bfda33
JK
1171 banks[2] = params->addr;
1172 banks[3] = params->misc;
1173 cenv->mcg_status = params->mcg_status;
1174 banks[1] = params->status;
c3affe56 1175 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
79c4f6b0
HY
1176 } else if (!(banks[1] & MCI_STATUS_VAL)
1177 || !(banks[1] & MCI_STATUS_UC)) {
2fa11da0 1178 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1179 params->status |= MCI_STATUS_OVER;
2fa11da0 1180 }
d5bfda33
JK
1181 banks[2] = params->addr;
1182 banks[3] = params->misc;
1183 banks[1] = params->status;
2fa11da0 1184 } else {
79c4f6b0 1185 banks[1] |= MCI_STATUS_OVER;
2fa11da0 1186 }
79c4f6b0 1187}
b3cd24e0 1188
8c5cf3b6 1189void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
316378e4 1190 uint64_t status, uint64_t mcg_status, uint64_t addr,
747461c7 1191 uint64_t misc, int flags)
b3cd24e0 1192{
182735ef 1193 CPUState *cs = CPU(cpu);
8c5cf3b6 1194 CPUX86State *cenv = &cpu->env;
d5bfda33
JK
1195 MCEInjectionParams params = {
1196 .mon = mon,
55e5c285 1197 .cpu = cpu,
d5bfda33
JK
1198 .bank = bank,
1199 .status = status,
1200 .mcg_status = mcg_status,
1201 .addr = addr,
1202 .misc = misc,
1203 .flags = flags,
1204 };
b3cd24e0
JD
1205 unsigned bank_num = cenv->mcg_cap & 0xff;
1206
316378e4
JK
1207 if (!cenv->mcg_cap) {
1208 monitor_printf(mon, "MCE injection not supported\n");
b3cd24e0
JD
1209 return;
1210 }
316378e4
JK
1211 if (bank >= bank_num) {
1212 monitor_printf(mon, "Invalid MCE bank number\n");
1213 return;
1214 }
1215 if (!(status & MCI_STATUS_VAL)) {
1216 monitor_printf(mon, "Invalid MCE status code\n");
1217 return;
1218 }
747461c7
JK
1219 if ((flags & MCE_INJECT_BROADCAST)
1220 && !cpu_x86_support_mca_broadcast(cenv)) {
316378e4
JK
1221 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1222 return;
2bd3e04c
JD
1223 }
1224
182735ef 1225 run_on_cpu(cs, do_inject_x86_mce, &params);
c34d440a 1226 if (flags & MCE_INJECT_BROADCAST) {
182735ef
AF
1227 CPUState *other_cs;
1228
c34d440a
JK
1229 params.bank = 1;
1230 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1231 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1232 params.addr = 0;
1233 params.misc = 0;
bdc44640 1234 CPU_FOREACH(other_cs) {
182735ef 1235 if (other_cs == cs) {
c34d440a 1236 continue;
31ce5e0c 1237 }
182735ef
AF
1238 params.cpu = X86_CPU(other_cs);
1239 run_on_cpu(other_cs, do_inject_x86_mce, &params);
31ce5e0c 1240 }
b3cd24e0
JD
1241 }
1242}
d362e757 1243
317ac620 1244void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
d362e757 1245{
d362e757
JK
1246 if (kvm_enabled()) {
1247 env->tpr_access_type = access;
1248
c3affe56 1249 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
d362e757 1250 } else {
a8a826a3 1251 cpu_restore_state(env, env->mem_io_pc);
d362e757
JK
1252
1253 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1254 }
1255}
74ce674f 1256#endif /* !CONFIG_USER_ONLY */
6fd805e1 1257
84273177
JK
1258int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1259 target_ulong *base, unsigned int *limit,
1260 unsigned int *flags)
1261{
f17ec444
AF
1262 X86CPU *cpu = x86_env_get_cpu(env);
1263 CPUState *cs = CPU(cpu);
84273177
JK
1264 SegmentCache *dt;
1265 target_ulong ptr;
1266 uint32_t e1, e2;
1267 int index;
1268
1269 if (selector & 0x4)
1270 dt = &env->ldt;
1271 else
1272 dt = &env->gdt;
1273 index = selector & ~7;
1274 ptr = dt->base + index;
1275 if ((index + 7) > dt->limit
f17ec444
AF
1276 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1277 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
84273177
JK
1278 return 0;
1279
1280 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1281 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1282 if (e2 & DESC_G_MASK)
1283 *limit = (*limit << 12) | 0xfff;
1284 *flags = e2;
1285
1286 return 1;
1287}
1288
b09ea7d5 1289#if !defined(CONFIG_USER_ONLY)
232fc23b 1290void do_cpu_init(X86CPU *cpu)
b09ea7d5 1291{
259186a7 1292 CPUState *cs = CPU(cpu);
232fc23b 1293 CPUX86State *env = &cpu->env;
259186a7 1294 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
ebda377f
JK
1295 uint64_t pat = env->pat;
1296
259186a7
AF
1297 cpu_reset(cs);
1298 cs->interrupt_request = sipi;
ebda377f 1299 env->pat = pat;
4a942cea 1300 apic_init_reset(env->apic_state);
b09ea7d5
GN
1301}
1302
232fc23b 1303void do_cpu_sipi(X86CPU *cpu)
b09ea7d5 1304{
232fc23b
AF
1305 CPUX86State *env = &cpu->env;
1306
4a942cea 1307 apic_sipi(env->apic_state);
b09ea7d5
GN
1308}
1309#else
232fc23b 1310void do_cpu_init(X86CPU *cpu)
b09ea7d5
GN
1311{
1312}
232fc23b 1313void do_cpu_sipi(X86CPU *cpu)
b09ea7d5
GN
1314{
1315}
1316#endif
This page took 1.029523 seconds and 4 git commands to generate.