4 * Copyright Fujitsu, Corp. 2011, 2012
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
23 #include "memory_mapping.h"
25 #include "qmp-commands.h"
28 static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
30 if (endian == ELFDATA2LSB) {
31 val = cpu_to_le16(val);
33 val = cpu_to_be16(val);
39 static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
41 if (endian == ELFDATA2LSB) {
42 val = cpu_to_le32(val);
44 val = cpu_to_be32(val);
50 static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
52 if (endian == ELFDATA2LSB) {
53 val = cpu_to_le64(val);
55 val = cpu_to_be64(val);
61 typedef struct DumpState {
62 ArchDumpInfo dump_info;
63 MemoryMappingList list;
69 target_phys_addr_t memory_offset;
80 static int dump_cleanup(DumpState *s)
84 memory_mapping_list_free(&s->list);
95 static void dump_error(DumpState *s, const char *reason)
100 static int fd_write_vmcore(void *buf, size_t size, void *opaque)
102 DumpState *s = opaque;
105 written_size = qemu_write_full(s->fd, buf, size);
106 if (written_size != size) {
113 static int write_elf64_header(DumpState *s)
115 Elf64_Ehdr elf_header;
117 int endian = s->dump_info.d_endian;
119 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
120 memcpy(&elf_header, ELFMAG, SELFMAG);
121 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
122 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
123 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
124 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
125 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
127 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
128 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
129 elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
130 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
132 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
133 if (s->have_section) {
134 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
136 elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
137 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
139 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
142 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
144 dump_error(s, "dump: failed to write elf header.\n");
151 static int write_elf32_header(DumpState *s)
153 Elf32_Ehdr elf_header;
155 int endian = s->dump_info.d_endian;
157 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
158 memcpy(&elf_header, ELFMAG, SELFMAG);
159 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
160 elf_header.e_ident[EI_DATA] = endian;
161 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
162 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
163 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
165 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
166 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
167 elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
168 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
170 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
171 if (s->have_section) {
172 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
174 elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
175 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
177 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
180 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
182 dump_error(s, "dump: failed to write elf header.\n");
189 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
190 int phdr_index, target_phys_addr_t offset)
194 int endian = s->dump_info.d_endian;
196 memset(&phdr, 0, sizeof(Elf64_Phdr));
197 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
198 phdr.p_offset = cpu_convert_to_target64(offset, endian);
199 phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
201 /* When the memory is not stored into vmcore, offset will be -1 */
204 phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
206 phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
207 phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
209 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
211 dump_error(s, "dump: failed to write program header table.\n");
218 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
219 int phdr_index, target_phys_addr_t offset)
223 int endian = s->dump_info.d_endian;
225 memset(&phdr, 0, sizeof(Elf32_Phdr));
226 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
227 phdr.p_offset = cpu_convert_to_target32(offset, endian);
228 phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
230 /* When the memory is not stored into vmcore, offset will be -1 */
233 phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
235 phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
236 phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
238 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
240 dump_error(s, "dump: failed to write program header table.\n");
247 static int write_elf64_note(DumpState *s)
250 int endian = s->dump_info.d_endian;
251 target_phys_addr_t begin = s->memory_offset - s->note_size;
254 memset(&phdr, 0, sizeof(Elf64_Phdr));
255 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
256 phdr.p_offset = cpu_convert_to_target64(begin, endian);
258 phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
259 phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
262 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
264 dump_error(s, "dump: failed to write program header table.\n");
271 static int write_elf64_notes(DumpState *s)
277 for (env = first_cpu; env != NULL; env = env->next_cpu) {
279 ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
281 dump_error(s, "dump: failed to write elf notes.\n");
286 for (env = first_cpu; env != NULL; env = env->next_cpu) {
287 ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
289 dump_error(s, "dump: failed to write CPU status.\n");
297 static int write_elf32_note(DumpState *s)
299 target_phys_addr_t begin = s->memory_offset - s->note_size;
301 int endian = s->dump_info.d_endian;
304 memset(&phdr, 0, sizeof(Elf32_Phdr));
305 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
306 phdr.p_offset = cpu_convert_to_target32(begin, endian);
308 phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
309 phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
312 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
314 dump_error(s, "dump: failed to write program header table.\n");
321 static int write_elf32_notes(DumpState *s)
327 for (env = first_cpu; env != NULL; env = env->next_cpu) {
329 ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
331 dump_error(s, "dump: failed to write elf notes.\n");
336 for (env = first_cpu; env != NULL; env = env->next_cpu) {
337 ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
339 dump_error(s, "dump: failed to write CPU status.\n");
347 static int write_elf_section(DumpState *s, int type)
351 int endian = s->dump_info.d_endian;
357 shdr_size = sizeof(Elf32_Shdr);
358 memset(&shdr32, 0, shdr_size);
359 shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
362 shdr_size = sizeof(Elf64_Shdr);
363 memset(&shdr64, 0, shdr_size);
364 shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
368 ret = fd_write_vmcore(&shdr, shdr_size, s);
370 dump_error(s, "dump: failed to write section header table.\n");
377 static int write_data(DumpState *s, void *buf, int length)
381 ret = fd_write_vmcore(buf, length, s);
383 dump_error(s, "dump: failed to save memory.\n");
390 /* write the memroy to vmcore. 1 page per I/O. */
391 static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
397 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
398 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
405 if ((size % TARGET_PAGE_SIZE) != 0) {
406 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
407 size % TARGET_PAGE_SIZE);
416 /* get the memory's offset in the vmcore */
417 static target_phys_addr_t get_offset(target_phys_addr_t phys_addr,
421 target_phys_addr_t offset = s->memory_offset;
422 int64_t size_in_block, start;
425 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
430 QLIST_FOREACH(block, &ram_list.blocks, next) {
432 if (block->offset >= s->begin + s->length ||
433 block->offset + block->length <= s->begin) {
434 /* This block is out of the range */
438 if (s->begin <= block->offset) {
439 start = block->offset;
444 size_in_block = block->length - (start - block->offset);
445 if (s->begin + s->length < block->offset + block->length) {
446 size_in_block -= block->offset + block->length -
447 (s->begin + s->length);
450 start = block->offset;
451 size_in_block = block->length;
454 if (phys_addr >= start && phys_addr < start + size_in_block) {
455 return phys_addr - start + offset;
458 offset += size_in_block;
464 static int write_elf_loads(DumpState *s)
466 target_phys_addr_t offset;
467 MemoryMapping *memory_mapping;
468 uint32_t phdr_index = 1;
472 if (s->have_section) {
473 max_index = s->sh_info;
475 max_index = s->phdr_num;
478 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
479 offset = get_offset(memory_mapping->phys_addr, s);
480 if (s->dump_info.d_class == ELFCLASS64) {
481 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
483 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
490 if (phdr_index >= max_index) {
498 /* write elf header, PT_NOTE and elf note to vmcore. */
499 static int dump_begin(DumpState *s)
504 * the vmcore's format is:
523 * we only know where the memory is saved after we write elf note into
527 /* write elf header to vmcore */
528 if (s->dump_info.d_class == ELFCLASS64) {
529 ret = write_elf64_header(s);
531 ret = write_elf32_header(s);
537 if (s->dump_info.d_class == ELFCLASS64) {
538 /* write PT_NOTE to vmcore */
539 if (write_elf64_note(s) < 0) {
543 /* write all PT_LOAD to vmcore */
544 if (write_elf_loads(s) < 0) {
548 /* write section to vmcore */
549 if (s->have_section) {
550 if (write_elf_section(s, 1) < 0) {
555 /* write notes to vmcore */
556 if (write_elf64_notes(s) < 0) {
561 /* write PT_NOTE to vmcore */
562 if (write_elf32_note(s) < 0) {
566 /* write all PT_LOAD to vmcore */
567 if (write_elf_loads(s) < 0) {
571 /* write section to vmcore */
572 if (s->have_section) {
573 if (write_elf_section(s, 0) < 0) {
578 /* write notes to vmcore */
579 if (write_elf32_notes(s) < 0) {
587 /* write PT_LOAD to vmcore */
588 static int dump_completed(DumpState *s)
594 static int get_next_block(DumpState *s, RAMBlock *block)
597 block = QLIST_NEXT(block, next);
606 if (block->offset >= s->begin + s->length ||
607 block->offset + block->length <= s->begin) {
608 /* This block is out of the range */
612 if (s->begin > block->offset) {
613 s->start = s->begin - block->offset;
621 /* write all memory to vmcore */
622 static int dump_iterate(DumpState *s)
631 size = block->length;
634 if (s->begin + s->length < block->offset + block->length) {
635 size -= block->offset + block->length - (s->begin + s->length);
638 ret = write_memory(s, block, s->start, size);
643 ret = get_next_block(s, block);
651 static int create_vmcore(DumpState *s)
660 ret = dump_iterate(s);
668 static ram_addr_t get_start_block(DumpState *s)
672 if (!s->has_filter) {
673 s->block = QLIST_FIRST(&ram_list.blocks);
677 QLIST_FOREACH(block, &ram_list.blocks, next) {
678 if (block->offset >= s->begin + s->length ||
679 block->offset + block->length <= s->begin) {
680 /* This block is out of the range */
685 if (s->begin > block->offset) {
686 s->start = s->begin - block->offset;
696 static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
697 int64_t begin, int64_t length, Error **errp)
703 if (runstate_is_running()) {
704 vm_stop(RUN_STATE_SAVE_VM);
712 s->has_filter = has_filter;
715 s->start = get_start_block(s);
716 if (s->start == -1) {
717 error_set(errp, QERR_INVALID_PARAMETER, "begin");
722 * get dump info: endian, class and architecture.
723 * If the target architecture is not supported, cpu_get_dump_info() will
726 * if we use kvm, we should synchronize the register before we get dump
730 for (env = first_cpu; env != NULL; env = env->next_cpu) {
731 cpu_synchronize_state(env);
735 ret = cpu_get_dump_info(&s->dump_info);
737 error_set(errp, QERR_UNSUPPORTED);
741 s->note_size = cpu_get_note_size(s->dump_info.d_class,
742 s->dump_info.d_machine, nr_cpus);
744 error_set(errp, QERR_UNSUPPORTED);
748 /* get memory mapping */
749 memory_mapping_list_init(&s->list);
751 qemu_get_guest_memory_mapping(&s->list);
753 qemu_get_guest_simple_memory_mapping(&s->list);
757 memory_mapping_filter(&s->list, s->begin, s->length);
763 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
765 s->phdr_num = 1; /* PT_NOTE */
766 if (s->list.num < UINT16_MAX - 2) {
767 s->phdr_num += s->list.num;
768 s->have_section = false;
770 s->have_section = true;
771 s->phdr_num = PN_XNUM;
772 s->sh_info = 1; /* PT_NOTE */
774 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
775 if (s->list.num <= UINT32_MAX - 1) {
776 s->sh_info += s->list.num;
778 s->sh_info = UINT32_MAX;
782 if (s->dump_info.d_class == ELFCLASS64) {
783 if (s->have_section) {
784 s->memory_offset = sizeof(Elf64_Ehdr) +
785 sizeof(Elf64_Phdr) * s->sh_info +
786 sizeof(Elf64_Shdr) + s->note_size;
788 s->memory_offset = sizeof(Elf64_Ehdr) +
789 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
792 if (s->have_section) {
793 s->memory_offset = sizeof(Elf32_Ehdr) +
794 sizeof(Elf32_Phdr) * s->sh_info +
795 sizeof(Elf32_Shdr) + s->note_size;
797 s->memory_offset = sizeof(Elf32_Ehdr) +
798 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
812 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
813 int64_t begin, bool has_length, int64_t length,
821 if (has_begin && !has_length) {
822 error_set(errp, QERR_MISSING_PARAMETER, "length");
825 if (!has_begin && has_length) {
826 error_set(errp, QERR_MISSING_PARAMETER, "begin");
831 if (strstart(file, "fd:", &p)) {
832 fd = monitor_get_fd(cur_mon, p, errp);
839 if (strstart(file, "file:", &p)) {
840 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
842 error_set(errp, QERR_OPEN_FILE_FAILED, p);
848 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
852 s = g_malloc(sizeof(DumpState));
854 ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
860 if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
861 error_set(errp, QERR_IO_ERROR);