4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "monitor/hmp.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qapi/qmp/qerror.h"
32 #include "sysemu/kvm.h"
33 #include "qapi/error.h"
35 #include "qapi/qapi-commands-misc-target.h"
36 #include "qapi/qapi-commands-misc.h"
37 #include "hw/i386/pc.h"
38 #include "hw/i386/sgx.h"
40 /* Perform linear address sign extension */
41 static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
44 if (env->cr[4] & CR4_LA57_MASK) {
45 if (addr & (1ULL << 56)) {
46 addr |= (hwaddr)-(1LL << 57);
49 if (addr & (1ULL << 47)) {
50 addr |= (hwaddr)-(1LL << 48);
57 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
58 hwaddr pte, hwaddr mask)
60 addr = addr_canonical(env, addr);
62 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
63 " %c%c%c%c%c%c%c%c%c\n",
66 pte & PG_NX_MASK ? 'X' : '-',
67 pte & PG_GLOBAL_MASK ? 'G' : '-',
68 pte & PG_PSE_MASK ? 'P' : '-',
69 pte & PG_DIRTY_MASK ? 'D' : '-',
70 pte & PG_ACCESSED_MASK ? 'A' : '-',
71 pte & PG_PCD_MASK ? 'C' : '-',
72 pte & PG_PWT_MASK ? 'T' : '-',
73 pte & PG_USER_MASK ? 'U' : '-',
74 pte & PG_RW_MASK ? 'W' : '-');
77 static void tlb_info_32(Monitor *mon, CPUArchState *env)
80 uint32_t pgd, pde, pte;
82 pgd = env->cr[3] & ~0xfff;
83 for(l1 = 0; l1 < 1024; l1++) {
84 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
85 pde = le32_to_cpu(pde);
86 if (pde & PG_PRESENT_MASK) {
87 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
89 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
91 for(l2 = 0; l2 < 1024; l2++) {
92 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
93 pte = le32_to_cpu(pte);
94 if (pte & PG_PRESENT_MASK) {
95 print_pte(mon, env, (l1 << 22) + (l2 << 12),
105 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
107 unsigned int l1, l2, l3;
108 uint64_t pdpe, pde, pte;
109 uint64_t pdp_addr, pd_addr, pt_addr;
111 pdp_addr = env->cr[3] & ~0x1f;
112 for (l1 = 0; l1 < 4; l1++) {
113 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
114 pdpe = le64_to_cpu(pdpe);
115 if (pdpe & PG_PRESENT_MASK) {
116 pd_addr = pdpe & 0x3fffffffff000ULL;
117 for (l2 = 0; l2 < 512; l2++) {
118 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
119 pde = le64_to_cpu(pde);
120 if (pde & PG_PRESENT_MASK) {
121 if (pde & PG_PSE_MASK) {
122 /* 2M pages with PAE, CR4.PSE is ignored */
123 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
124 ~((hwaddr)(1 << 20) - 1));
126 pt_addr = pde & 0x3fffffffff000ULL;
127 for (l3 = 0; l3 < 512; l3++) {
128 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
129 pte = le64_to_cpu(pte);
130 if (pte & PG_PRESENT_MASK) {
131 print_pte(mon, env, (l1 << 30) + (l2 << 21)
145 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
146 uint64_t l0, uint64_t pml4_addr)
148 uint64_t l1, l2, l3, l4;
149 uint64_t pml4e, pdpe, pde, pte;
150 uint64_t pdp_addr, pd_addr, pt_addr;
152 for (l1 = 0; l1 < 512; l1++) {
153 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
154 pml4e = le64_to_cpu(pml4e);
155 if (!(pml4e & PG_PRESENT_MASK)) {
159 pdp_addr = pml4e & 0x3fffffffff000ULL;
160 for (l2 = 0; l2 < 512; l2++) {
161 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
162 pdpe = le64_to_cpu(pdpe);
163 if (!(pdpe & PG_PRESENT_MASK)) {
167 if (pdpe & PG_PSE_MASK) {
168 /* 1G pages, CR4.PSE is ignored */
169 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
170 pdpe, 0x3ffffc0000000ULL);
174 pd_addr = pdpe & 0x3fffffffff000ULL;
175 for (l3 = 0; l3 < 512; l3++) {
176 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
177 pde = le64_to_cpu(pde);
178 if (!(pde & PG_PRESENT_MASK)) {
182 if (pde & PG_PSE_MASK) {
183 /* 2M pages, CR4.PSE is ignored */
184 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
185 (l3 << 21), pde, 0x3ffffffe00000ULL);
189 pt_addr = pde & 0x3fffffffff000ULL;
190 for (l4 = 0; l4 < 512; l4++) {
191 cpu_physical_memory_read(pt_addr
194 pte = le64_to_cpu(pte);
195 if (pte & PG_PRESENT_MASK) {
196 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
197 (l2 << 30) + (l3 << 21) + (l4 << 12),
198 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
206 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
212 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
213 for (l0 = 0; l0 < 512; l0++) {
214 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
215 pml5e = le64_to_cpu(pml5e);
216 if (pml5e & PG_PRESENT_MASK) {
217 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
221 #endif /* TARGET_X86_64 */
223 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
227 env = mon_get_cpu_env(mon);
229 monitor_printf(mon, "No CPU available\n");
233 if (!(env->cr[0] & CR0_PG_MASK)) {
234 monitor_printf(mon, "PG disabled\n");
237 if (env->cr[4] & CR4_PAE_MASK) {
239 if (env->hflags & HF_LMA_MASK) {
240 if (env->cr[4] & CR4_LA57_MASK) {
241 tlb_info_la57(mon, env);
243 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
248 tlb_info_pae32(mon, env);
251 tlb_info_32(mon, env);
255 static void mem_print(Monitor *mon, CPUArchState *env,
256 hwaddr *pstart, int *plast_prot,
257 hwaddr end, int prot)
263 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
264 TARGET_FMT_plx " %c%c%c\n",
265 addr_canonical(env, *pstart),
266 addr_canonical(env, end),
267 addr_canonical(env, end - *pstart),
268 prot1 & PG_USER_MASK ? 'u' : '-',
270 prot1 & PG_RW_MASK ? 'w' : '-');
280 static void mem_info_32(Monitor *mon, CPUArchState *env)
284 uint32_t pgd, pde, pte;
287 pgd = env->cr[3] & ~0xfff;
290 for(l1 = 0; l1 < 1024; l1++) {
291 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
292 pde = le32_to_cpu(pde);
294 if (pde & PG_PRESENT_MASK) {
295 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
296 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
297 mem_print(mon, env, &start, &last_prot, end, prot);
299 for(l2 = 0; l2 < 1024; l2++) {
300 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
301 pte = le32_to_cpu(pte);
302 end = (l1 << 22) + (l2 << 12);
303 if (pte & PG_PRESENT_MASK) {
305 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
309 mem_print(mon, env, &start, &last_prot, end, prot);
314 mem_print(mon, env, &start, &last_prot, end, prot);
317 /* Flush last range */
318 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
321 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
323 unsigned int l1, l2, l3;
325 uint64_t pdpe, pde, pte;
326 uint64_t pdp_addr, pd_addr, pt_addr;
329 pdp_addr = env->cr[3] & ~0x1f;
332 for (l1 = 0; l1 < 4; l1++) {
333 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
334 pdpe = le64_to_cpu(pdpe);
336 if (pdpe & PG_PRESENT_MASK) {
337 pd_addr = pdpe & 0x3fffffffff000ULL;
338 for (l2 = 0; l2 < 512; l2++) {
339 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
340 pde = le64_to_cpu(pde);
341 end = (l1 << 30) + (l2 << 21);
342 if (pde & PG_PRESENT_MASK) {
343 if (pde & PG_PSE_MASK) {
344 prot = pde & (PG_USER_MASK | PG_RW_MASK |
346 mem_print(mon, env, &start, &last_prot, end, prot);
348 pt_addr = pde & 0x3fffffffff000ULL;
349 for (l3 = 0; l3 < 512; l3++) {
350 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
351 pte = le64_to_cpu(pte);
352 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
353 if (pte & PG_PRESENT_MASK) {
354 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
359 mem_print(mon, env, &start, &last_prot, end, prot);
364 mem_print(mon, env, &start, &last_prot, end, prot);
369 mem_print(mon, env, &start, &last_prot, end, prot);
372 /* Flush last range */
373 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
378 static void mem_info_la48(Monitor *mon, CPUArchState *env)
381 uint64_t l1, l2, l3, l4;
382 uint64_t pml4e, pdpe, pde, pte;
383 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
385 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
388 for (l1 = 0; l1 < 512; l1++) {
389 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
390 pml4e = le64_to_cpu(pml4e);
392 if (pml4e & PG_PRESENT_MASK) {
393 pdp_addr = pml4e & 0x3fffffffff000ULL;
394 for (l2 = 0; l2 < 512; l2++) {
395 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
396 pdpe = le64_to_cpu(pdpe);
397 end = (l1 << 39) + (l2 << 30);
398 if (pdpe & PG_PRESENT_MASK) {
399 if (pdpe & PG_PSE_MASK) {
400 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
403 mem_print(mon, env, &start, &last_prot, end, prot);
405 pd_addr = pdpe & 0x3fffffffff000ULL;
406 for (l3 = 0; l3 < 512; l3++) {
407 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
408 pde = le64_to_cpu(pde);
409 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
410 if (pde & PG_PRESENT_MASK) {
411 if (pde & PG_PSE_MASK) {
412 prot = pde & (PG_USER_MASK | PG_RW_MASK |
414 prot &= pml4e & pdpe;
415 mem_print(mon, env, &start,
416 &last_prot, end, prot);
418 pt_addr = pde & 0x3fffffffff000ULL;
419 for (l4 = 0; l4 < 512; l4++) {
420 cpu_physical_memory_read(pt_addr
423 pte = le64_to_cpu(pte);
424 end = (l1 << 39) + (l2 << 30) +
425 (l3 << 21) + (l4 << 12);
426 if (pte & PG_PRESENT_MASK) {
427 prot = pte & (PG_USER_MASK | PG_RW_MASK |
429 prot &= pml4e & pdpe & pde;
433 mem_print(mon, env, &start,
434 &last_prot, end, prot);
439 mem_print(mon, env, &start,
440 &last_prot, end, prot);
446 mem_print(mon, env, &start, &last_prot, end, prot);
451 mem_print(mon, env, &start, &last_prot, end, prot);
454 /* Flush last range */
455 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
458 static void mem_info_la57(Monitor *mon, CPUArchState *env)
461 uint64_t l0, l1, l2, l3, l4;
462 uint64_t pml5e, pml4e, pdpe, pde, pte;
463 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
465 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
468 for (l0 = 0; l0 < 512; l0++) {
469 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
470 pml5e = le64_to_cpu(pml5e);
472 if (!(pml5e & PG_PRESENT_MASK)) {
474 mem_print(mon, env, &start, &last_prot, end, prot);
478 pml4_addr = pml5e & 0x3fffffffff000ULL;
479 for (l1 = 0; l1 < 512; l1++) {
480 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
481 pml4e = le64_to_cpu(pml4e);
482 end = (l0 << 48) + (l1 << 39);
483 if (!(pml4e & PG_PRESENT_MASK)) {
485 mem_print(mon, env, &start, &last_prot, end, prot);
489 pdp_addr = pml4e & 0x3fffffffff000ULL;
490 for (l2 = 0; l2 < 512; l2++) {
491 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
492 pdpe = le64_to_cpu(pdpe);
493 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
494 if (pdpe & PG_PRESENT_MASK) {
496 mem_print(mon, env, &start, &last_prot, end, prot);
500 if (pdpe & PG_PSE_MASK) {
501 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
503 prot &= pml5e & pml4e;
504 mem_print(mon, env, &start, &last_prot, end, prot);
508 pd_addr = pdpe & 0x3fffffffff000ULL;
509 for (l3 = 0; l3 < 512; l3++) {
510 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
511 pde = le64_to_cpu(pde);
512 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
513 if (pde & PG_PRESENT_MASK) {
515 mem_print(mon, env, &start, &last_prot, end, prot);
519 if (pde & PG_PSE_MASK) {
520 prot = pde & (PG_USER_MASK | PG_RW_MASK |
522 prot &= pml5e & pml4e & pdpe;
523 mem_print(mon, env, &start, &last_prot, end, prot);
527 pt_addr = pde & 0x3fffffffff000ULL;
528 for (l4 = 0; l4 < 512; l4++) {
529 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
530 pte = le64_to_cpu(pte);
531 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
532 (l3 << 21) + (l4 << 12);
533 if (pte & PG_PRESENT_MASK) {
534 prot = pte & (PG_USER_MASK | PG_RW_MASK |
536 prot &= pml5e & pml4e & pdpe & pde;
540 mem_print(mon, env, &start, &last_prot, end, prot);
546 /* Flush last range */
547 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
549 #endif /* TARGET_X86_64 */
551 void hmp_info_mem(Monitor *mon, const QDict *qdict)
555 env = mon_get_cpu_env(mon);
557 monitor_printf(mon, "No CPU available\n");
561 if (!(env->cr[0] & CR0_PG_MASK)) {
562 monitor_printf(mon, "PG disabled\n");
565 if (env->cr[4] & CR4_PAE_MASK) {
567 if (env->hflags & HF_LMA_MASK) {
568 if (env->cr[4] & CR4_LA57_MASK) {
569 mem_info_la57(mon, env);
571 mem_info_la48(mon, env);
576 mem_info_pae32(mon, env);
579 mem_info_32(mon, env);
583 void hmp_mce(Monitor *mon, const QDict *qdict)
587 int cpu_index = qdict_get_int(qdict, "cpu_index");
588 int bank = qdict_get_int(qdict, "bank");
589 uint64_t status = qdict_get_int(qdict, "status");
590 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
591 uint64_t addr = qdict_get_int(qdict, "addr");
592 uint64_t misc = qdict_get_int(qdict, "misc");
593 int flags = MCE_INJECT_UNCOND_AO;
595 if (qdict_get_try_bool(qdict, "broadcast", false)) {
596 flags |= MCE_INJECT_BROADCAST;
598 cs = qemu_get_cpu(cpu_index);
601 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
606 static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
609 CPUArchState *env = mon_get_cpu_env(mon);
610 return env->eip + env->segs[R_CS].base;
613 const MonitorDef monitor_defs[] = {
614 #define SEG(name, seg) \
615 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
616 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
617 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
619 { "eax", offsetof(CPUX86State, regs[0]) },
620 { "ecx", offsetof(CPUX86State, regs[1]) },
621 { "edx", offsetof(CPUX86State, regs[2]) },
622 { "ebx", offsetof(CPUX86State, regs[3]) },
623 { "esp|sp", offsetof(CPUX86State, regs[4]) },
624 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
625 { "esi", offsetof(CPUX86State, regs[6]) },
626 { "edi", offsetof(CPUX86State, regs[7]) },
628 { "r8", offsetof(CPUX86State, regs[8]) },
629 { "r9", offsetof(CPUX86State, regs[9]) },
630 { "r10", offsetof(CPUX86State, regs[10]) },
631 { "r11", offsetof(CPUX86State, regs[11]) },
632 { "r12", offsetof(CPUX86State, regs[12]) },
633 { "r13", offsetof(CPUX86State, regs[13]) },
634 { "r14", offsetof(CPUX86State, regs[14]) },
635 { "r15", offsetof(CPUX86State, regs[15]) },
637 { "eflags", offsetof(CPUX86State, eflags) },
638 { "eip", offsetof(CPUX86State, eip) },
645 { "pc", 0, monitor_get_pc, },
649 const MonitorDef *target_monitor_defs(void)
654 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
658 if (qdict_haskey(qdict, "apic-id")) {
659 int id = qdict_get_try_int(qdict, "apic-id", 0);
660 cs = cpu_by_arch_id(id);
662 cs = mon_get_cpu(mon);
667 monitor_printf(mon, "No CPU available\n");
670 x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
673 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
675 monitor_printf(mon, "This command is obsolete and will be "
676 "removed soon. Please use 'info pic' instead.\n");
679 SevInfo *qmp_query_sev(Error **errp)
683 info = sev_get_info();
685 error_setg(errp, "SEV feature is not available");
692 void hmp_info_sev(Monitor *mon, const QDict *qdict)
694 SevInfo *info = sev_get_info();
696 if (info && info->enabled) {
697 monitor_printf(mon, "handle: %d\n", info->handle);
698 monitor_printf(mon, "state: %s\n", SevState_str(info->state));
699 monitor_printf(mon, "build: %d\n", info->build_id);
700 monitor_printf(mon, "api version: %d.%d\n",
701 info->api_major, info->api_minor);
702 monitor_printf(mon, "debug: %s\n",
703 info->policy & SEV_POLICY_NODBG ? "off" : "on");
704 monitor_printf(mon, "key-sharing: %s\n",
705 info->policy & SEV_POLICY_NOKS ? "off" : "on");
707 monitor_printf(mon, "SEV is not enabled\n");
710 qapi_free_SevInfo(info);
713 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
716 SevLaunchMeasureInfo *info;
718 data = sev_get_launch_measurement();
720 error_setg(errp, "SEV launch measurement is not available");
724 info = g_malloc0(sizeof(*info));
730 SevCapability *qmp_query_sev_capabilities(Error **errp)
732 return sev_get_capabilities(errp);
735 SGXInfo *qmp_query_sgx(Error **errp)
737 return sgx_get_info(errp);
740 void hmp_info_sgx(Monitor *mon, const QDict *qdict)
743 g_autoptr(SGXInfo) info = qmp_query_sgx(&err);
746 error_report_err(err);
749 monitor_printf(mon, "SGX support: %s\n",
750 info->sgx ? "enabled" : "disabled");
751 monitor_printf(mon, "SGX1 support: %s\n",
752 info->sgx1 ? "enabled" : "disabled");
753 monitor_printf(mon, "SGX2 support: %s\n",
754 info->sgx2 ? "enabled" : "disabled");
755 monitor_printf(mon, "FLC support: %s\n",
756 info->flc ? "enabled" : "disabled");
757 monitor_printf(mon, "size: %" PRIu64 "\n",
761 SGXInfo *qmp_query_sgx_capabilities(Error **errp)
763 return sgx_get_capabilities(errp);