4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "monitor/monitor.h"
26 #include "monitor/hmp-target.h"
30 static void print_pte(Monitor *mon, hwaddr addr,
35 if (addr & (1ULL << 47)) {
39 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
40 " %c%c%c%c%c%c%c%c%c\n",
43 pte & PG_NX_MASK ? 'X' : '-',
44 pte & PG_GLOBAL_MASK ? 'G' : '-',
45 pte & PG_PSE_MASK ? 'P' : '-',
46 pte & PG_DIRTY_MASK ? 'D' : '-',
47 pte & PG_ACCESSED_MASK ? 'A' : '-',
48 pte & PG_PCD_MASK ? 'C' : '-',
49 pte & PG_PWT_MASK ? 'T' : '-',
50 pte & PG_USER_MASK ? 'U' : '-',
51 pte & PG_RW_MASK ? 'W' : '-');
54 static void tlb_info_32(Monitor *mon, CPUArchState *env)
57 uint32_t pgd, pde, pte;
59 pgd = env->cr[3] & ~0xfff;
60 for(l1 = 0; l1 < 1024; l1++) {
61 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
62 pde = le32_to_cpu(pde);
63 if (pde & PG_PRESENT_MASK) {
64 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
66 print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1));
68 for(l2 = 0; l2 < 1024; l2++) {
69 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
70 pte = le32_to_cpu(pte);
71 if (pte & PG_PRESENT_MASK) {
72 print_pte(mon, (l1 << 22) + (l2 << 12),
82 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
84 unsigned int l1, l2, l3;
85 uint64_t pdpe, pde, pte;
86 uint64_t pdp_addr, pd_addr, pt_addr;
88 pdp_addr = env->cr[3] & ~0x1f;
89 for (l1 = 0; l1 < 4; l1++) {
90 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
91 pdpe = le64_to_cpu(pdpe);
92 if (pdpe & PG_PRESENT_MASK) {
93 pd_addr = pdpe & 0x3fffffffff000ULL;
94 for (l2 = 0; l2 < 512; l2++) {
95 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
96 pde = le64_to_cpu(pde);
97 if (pde & PG_PRESENT_MASK) {
98 if (pde & PG_PSE_MASK) {
99 /* 2M pages with PAE, CR4.PSE is ignored */
100 print_pte(mon, (l1 << 30 ) + (l2 << 21), pde,
101 ~((hwaddr)(1 << 20) - 1));
103 pt_addr = pde & 0x3fffffffff000ULL;
104 for (l3 = 0; l3 < 512; l3++) {
105 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
106 pte = le64_to_cpu(pte);
107 if (pte & PG_PRESENT_MASK) {
108 print_pte(mon, (l1 << 30 ) + (l2 << 21)
122 static void tlb_info_64(Monitor *mon, CPUArchState *env)
124 uint64_t l1, l2, l3, l4;
125 uint64_t pml4e, pdpe, pde, pte;
126 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr;
128 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
129 for (l1 = 0; l1 < 512; l1++) {
130 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
131 pml4e = le64_to_cpu(pml4e);
132 if (pml4e & PG_PRESENT_MASK) {
133 pdp_addr = pml4e & 0x3fffffffff000ULL;
134 for (l2 = 0; l2 < 512; l2++) {
135 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
136 pdpe = le64_to_cpu(pdpe);
137 if (pdpe & PG_PRESENT_MASK) {
138 if (pdpe & PG_PSE_MASK) {
139 /* 1G pages, CR4.PSE is ignored */
140 print_pte(mon, (l1 << 39) + (l2 << 30), pdpe,
143 pd_addr = pdpe & 0x3fffffffff000ULL;
144 for (l3 = 0; l3 < 512; l3++) {
145 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
146 pde = le64_to_cpu(pde);
147 if (pde & PG_PRESENT_MASK) {
148 if (pde & PG_PSE_MASK) {
149 /* 2M pages, CR4.PSE is ignored */
150 print_pte(mon, (l1 << 39) + (l2 << 30) +
154 pt_addr = pde & 0x3fffffffff000ULL;
155 for (l4 = 0; l4 < 512; l4++) {
156 cpu_physical_memory_read(pt_addr
159 pte = le64_to_cpu(pte);
160 if (pte & PG_PRESENT_MASK) {
161 print_pte(mon, (l1 << 39) +
163 (l3 << 21) + (l4 << 12),
177 #endif /* TARGET_X86_64 */
179 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
183 env = mon_get_cpu_env();
185 if (!(env->cr[0] & CR0_PG_MASK)) {
186 monitor_printf(mon, "PG disabled\n");
189 if (env->cr[4] & CR4_PAE_MASK) {
191 if (env->hflags & HF_LMA_MASK) {
192 tlb_info_64(mon, env);
196 tlb_info_pae32(mon, env);
199 tlb_info_32(mon, env);
203 static void mem_print(Monitor *mon, hwaddr *pstart,
205 hwaddr end, int prot)
211 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
212 TARGET_FMT_plx " %c%c%c\n",
213 *pstart, end, end - *pstart,
214 prot1 & PG_USER_MASK ? 'u' : '-',
216 prot1 & PG_RW_MASK ? 'w' : '-');
226 static void mem_info_32(Monitor *mon, CPUArchState *env)
230 uint32_t pgd, pde, pte;
233 pgd = env->cr[3] & ~0xfff;
236 for(l1 = 0; l1 < 1024; l1++) {
237 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
238 pde = le32_to_cpu(pde);
240 if (pde & PG_PRESENT_MASK) {
241 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
242 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
243 mem_print(mon, &start, &last_prot, end, prot);
245 for(l2 = 0; l2 < 1024; l2++) {
246 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
247 pte = le32_to_cpu(pte);
248 end = (l1 << 22) + (l2 << 12);
249 if (pte & PG_PRESENT_MASK) {
251 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
255 mem_print(mon, &start, &last_prot, end, prot);
260 mem_print(mon, &start, &last_prot, end, prot);
263 /* Flush last range */
264 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
267 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
269 unsigned int l1, l2, l3;
271 uint64_t pdpe, pde, pte;
272 uint64_t pdp_addr, pd_addr, pt_addr;
275 pdp_addr = env->cr[3] & ~0x1f;
278 for (l1 = 0; l1 < 4; l1++) {
279 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
280 pdpe = le64_to_cpu(pdpe);
282 if (pdpe & PG_PRESENT_MASK) {
283 pd_addr = pdpe & 0x3fffffffff000ULL;
284 for (l2 = 0; l2 < 512; l2++) {
285 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
286 pde = le64_to_cpu(pde);
287 end = (l1 << 30) + (l2 << 21);
288 if (pde & PG_PRESENT_MASK) {
289 if (pde & PG_PSE_MASK) {
290 prot = pde & (PG_USER_MASK | PG_RW_MASK |
292 mem_print(mon, &start, &last_prot, end, prot);
294 pt_addr = pde & 0x3fffffffff000ULL;
295 for (l3 = 0; l3 < 512; l3++) {
296 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
297 pte = le64_to_cpu(pte);
298 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
299 if (pte & PG_PRESENT_MASK) {
300 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
305 mem_print(mon, &start, &last_prot, end, prot);
310 mem_print(mon, &start, &last_prot, end, prot);
315 mem_print(mon, &start, &last_prot, end, prot);
318 /* Flush last range */
319 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
324 static void mem_info_64(Monitor *mon, CPUArchState *env)
327 uint64_t l1, l2, l3, l4;
328 uint64_t pml4e, pdpe, pde, pte;
329 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
331 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
334 for (l1 = 0; l1 < 512; l1++) {
335 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
336 pml4e = le64_to_cpu(pml4e);
338 if (pml4e & PG_PRESENT_MASK) {
339 pdp_addr = pml4e & 0x3fffffffff000ULL;
340 for (l2 = 0; l2 < 512; l2++) {
341 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
342 pdpe = le64_to_cpu(pdpe);
343 end = (l1 << 39) + (l2 << 30);
344 if (pdpe & PG_PRESENT_MASK) {
345 if (pdpe & PG_PSE_MASK) {
346 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
349 mem_print(mon, &start, &last_prot, end, prot);
351 pd_addr = pdpe & 0x3fffffffff000ULL;
352 for (l3 = 0; l3 < 512; l3++) {
353 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
354 pde = le64_to_cpu(pde);
355 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
356 if (pde & PG_PRESENT_MASK) {
357 if (pde & PG_PSE_MASK) {
358 prot = pde & (PG_USER_MASK | PG_RW_MASK |
360 prot &= pml4e & pdpe;
361 mem_print(mon, &start, &last_prot, end, prot);
363 pt_addr = pde & 0x3fffffffff000ULL;
364 for (l4 = 0; l4 < 512; l4++) {
365 cpu_physical_memory_read(pt_addr
368 pte = le64_to_cpu(pte);
369 end = (l1 << 39) + (l2 << 30) +
370 (l3 << 21) + (l4 << 12);
371 if (pte & PG_PRESENT_MASK) {
372 prot = pte & (PG_USER_MASK | PG_RW_MASK |
374 prot &= pml4e & pdpe & pde;
378 mem_print(mon, &start, &last_prot, end, prot);
383 mem_print(mon, &start, &last_prot, end, prot);
389 mem_print(mon, &start, &last_prot, end, prot);
394 mem_print(mon, &start, &last_prot, end, prot);
397 /* Flush last range */
398 mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
400 #endif /* TARGET_X86_64 */
402 void hmp_info_mem(Monitor *mon, const QDict *qdict)
406 env = mon_get_cpu_env();
408 if (!(env->cr[0] & CR0_PG_MASK)) {
409 monitor_printf(mon, "PG disabled\n");
412 if (env->cr[4] & CR4_PAE_MASK) {
414 if (env->hflags & HF_LMA_MASK) {
415 mem_info_64(mon, env);
419 mem_info_pae32(mon, env);
422 mem_info_32(mon, env);
426 void hmp_mce(Monitor *mon, const QDict *qdict)
430 int cpu_index = qdict_get_int(qdict, "cpu_index");
431 int bank = qdict_get_int(qdict, "bank");
432 uint64_t status = qdict_get_int(qdict, "status");
433 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
434 uint64_t addr = qdict_get_int(qdict, "addr");
435 uint64_t misc = qdict_get_int(qdict, "misc");
436 int flags = MCE_INJECT_UNCOND_AO;
438 if (qdict_get_try_bool(qdict, "broadcast", false)) {
439 flags |= MCE_INJECT_BROADCAST;
441 cs = qemu_get_cpu(cpu_index);
444 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
449 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
451 CPUArchState *env = mon_get_cpu_env();
452 return env->eip + env->segs[R_CS].base;
455 const MonitorDef monitor_defs[] = {
456 #define SEG(name, seg) \
457 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
458 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
459 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
461 { "eax", offsetof(CPUX86State, regs[0]) },
462 { "ecx", offsetof(CPUX86State, regs[1]) },
463 { "edx", offsetof(CPUX86State, regs[2]) },
464 { "ebx", offsetof(CPUX86State, regs[3]) },
465 { "esp|sp", offsetof(CPUX86State, regs[4]) },
466 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
467 { "esi", offsetof(CPUX86State, regs[6]) },
468 { "edi", offsetof(CPUX86State, regs[7]) },
470 { "r8", offsetof(CPUX86State, regs[8]) },
471 { "r9", offsetof(CPUX86State, regs[9]) },
472 { "r10", offsetof(CPUX86State, regs[10]) },
473 { "r11", offsetof(CPUX86State, regs[11]) },
474 { "r12", offsetof(CPUX86State, regs[12]) },
475 { "r13", offsetof(CPUX86State, regs[13]) },
476 { "r14", offsetof(CPUX86State, regs[14]) },
477 { "r15", offsetof(CPUX86State, regs[15]) },
479 { "eflags", offsetof(CPUX86State, eflags) },
480 { "eip", offsetof(CPUX86State, eip) },
487 { "pc", 0, monitor_get_pc, },
491 const MonitorDef *target_monitor_defs(void)