4 * Copyright Fujitsu, Corp. 2011, 2012
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qmp-commands.h"
28 static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
30 if (endian == ELFDATA2LSB) {
31 val = cpu_to_le16(val);
33 val = cpu_to_be16(val);
39 static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
41 if (endian == ELFDATA2LSB) {
42 val = cpu_to_le32(val);
44 val = cpu_to_be32(val);
50 static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
52 if (endian == ELFDATA2LSB) {
53 val = cpu_to_le64(val);
55 val = cpu_to_be64(val);
61 typedef struct DumpState {
62 GuestPhysBlockList guest_phys_blocks;
63 ArchDumpInfo dump_info;
64 MemoryMappingList list;
73 GuestPhysBlock *next_block;
80 uint8_t *note_buf; /* buffer for notes */
81 size_t note_buf_offset; /* the writing place in note_buf */
82 uint32_t nr_cpus; /* number of guest's cpu */
83 size_t page_size; /* guest's page size */
84 uint32_t page_shift; /* guest's page shift */
85 uint64_t max_mapnr; /* the biggest guest's phys-mem's number */
86 size_t len_dump_bitmap; /* the size of the place used to store
87 dump_bitmap in vmcore */
88 off_t offset_dump_bitmap; /* offset of dump_bitmap part in vmcore */
89 off_t offset_page; /* offset of page part in vmcore */
90 size_t num_dumpable; /* number of page that can be dumped */
91 uint32_t flag_compress; /* indicate the compression format */
94 static int dump_cleanup(DumpState *s)
98 guest_phys_blocks_free(&s->guest_phys_blocks);
99 memory_mapping_list_free(&s->list);
110 static void dump_error(DumpState *s, const char *reason)
115 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
117 DumpState *s = opaque;
120 written_size = qemu_write_full(s->fd, buf, size);
121 if (written_size != size) {
128 static int write_elf64_header(DumpState *s)
130 Elf64_Ehdr elf_header;
132 int endian = s->dump_info.d_endian;
134 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
135 memcpy(&elf_header, ELFMAG, SELFMAG);
136 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
137 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
138 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
139 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
140 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
142 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
143 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
144 elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
145 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
147 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
148 if (s->have_section) {
149 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
151 elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
152 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
154 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
157 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
159 dump_error(s, "dump: failed to write elf header.\n");
166 static int write_elf32_header(DumpState *s)
168 Elf32_Ehdr elf_header;
170 int endian = s->dump_info.d_endian;
172 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
173 memcpy(&elf_header, ELFMAG, SELFMAG);
174 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
175 elf_header.e_ident[EI_DATA] = endian;
176 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
177 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
178 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
180 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
181 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
182 elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
183 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
185 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
186 if (s->have_section) {
187 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
189 elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
190 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
192 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
195 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
197 dump_error(s, "dump: failed to write elf header.\n");
204 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
205 int phdr_index, hwaddr offset,
210 int endian = s->dump_info.d_endian;
212 memset(&phdr, 0, sizeof(Elf64_Phdr));
213 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
214 phdr.p_offset = cpu_convert_to_target64(offset, endian);
215 phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
216 phdr.p_filesz = cpu_convert_to_target64(filesz, endian);
217 phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
218 phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
220 assert(memory_mapping->length >= filesz);
222 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
224 dump_error(s, "dump: failed to write program header table.\n");
231 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
232 int phdr_index, hwaddr offset,
237 int endian = s->dump_info.d_endian;
239 memset(&phdr, 0, sizeof(Elf32_Phdr));
240 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
241 phdr.p_offset = cpu_convert_to_target32(offset, endian);
242 phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
243 phdr.p_filesz = cpu_convert_to_target32(filesz, endian);
244 phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
245 phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
247 assert(memory_mapping->length >= filesz);
249 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
251 dump_error(s, "dump: failed to write program header table.\n");
258 static int write_elf64_note(DumpState *s)
261 int endian = s->dump_info.d_endian;
262 hwaddr begin = s->memory_offset - s->note_size;
265 memset(&phdr, 0, sizeof(Elf64_Phdr));
266 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
267 phdr.p_offset = cpu_convert_to_target64(begin, endian);
269 phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
270 phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
273 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
275 dump_error(s, "dump: failed to write program header table.\n");
282 static inline int cpu_index(CPUState *cpu)
284 return cpu->cpu_index + 1;
287 static int write_elf64_notes(WriteCoreDumpFunction f, DumpState *s)
295 ret = cpu_write_elf64_note(f, cpu, id, s);
297 dump_error(s, "dump: failed to write elf notes.\n");
303 ret = cpu_write_elf64_qemunote(f, cpu, s);
305 dump_error(s, "dump: failed to write CPU status.\n");
313 static int write_elf32_note(DumpState *s)
315 hwaddr begin = s->memory_offset - s->note_size;
317 int endian = s->dump_info.d_endian;
320 memset(&phdr, 0, sizeof(Elf32_Phdr));
321 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
322 phdr.p_offset = cpu_convert_to_target32(begin, endian);
324 phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
325 phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
328 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
330 dump_error(s, "dump: failed to write program header table.\n");
337 static int write_elf32_notes(WriteCoreDumpFunction f, DumpState *s)
345 ret = cpu_write_elf32_note(f, cpu, id, s);
347 dump_error(s, "dump: failed to write elf notes.\n");
353 ret = cpu_write_elf32_qemunote(f, cpu, s);
355 dump_error(s, "dump: failed to write CPU status.\n");
363 static int write_elf_section(DumpState *s, int type)
367 int endian = s->dump_info.d_endian;
373 shdr_size = sizeof(Elf32_Shdr);
374 memset(&shdr32, 0, shdr_size);
375 shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
378 shdr_size = sizeof(Elf64_Shdr);
379 memset(&shdr64, 0, shdr_size);
380 shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
384 ret = fd_write_vmcore(&shdr, shdr_size, s);
386 dump_error(s, "dump: failed to write section header table.\n");
393 static int write_data(DumpState *s, void *buf, int length)
397 ret = fd_write_vmcore(buf, length, s);
399 dump_error(s, "dump: failed to save memory.\n");
406 /* write the memroy to vmcore. 1 page per I/O. */
407 static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
413 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
414 ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
421 if ((size % TARGET_PAGE_SIZE) != 0) {
422 ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
423 size % TARGET_PAGE_SIZE);
432 /* get the memory's offset and size in the vmcore */
433 static void get_offset_range(hwaddr phys_addr,
434 ram_addr_t mapping_length,
439 GuestPhysBlock *block;
440 hwaddr offset = s->memory_offset;
441 int64_t size_in_block, start;
443 /* When the memory is not stored into vmcore, offset will be -1 */
448 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
453 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
455 if (block->target_start >= s->begin + s->length ||
456 block->target_end <= s->begin) {
457 /* This block is out of the range */
461 if (s->begin <= block->target_start) {
462 start = block->target_start;
467 size_in_block = block->target_end - start;
468 if (s->begin + s->length < block->target_end) {
469 size_in_block -= block->target_end - (s->begin + s->length);
472 start = block->target_start;
473 size_in_block = block->target_end - block->target_start;
476 if (phys_addr >= start && phys_addr < start + size_in_block) {
477 *p_offset = phys_addr - start + offset;
479 /* The offset range mapped from the vmcore file must not spill over
480 * the GuestPhysBlock, clamp it. The rest of the mapping will be
481 * zero-filled in memory at load time; see
482 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
484 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
486 size_in_block - (phys_addr - start);
490 offset += size_in_block;
494 static int write_elf_loads(DumpState *s)
496 hwaddr offset, filesz;
497 MemoryMapping *memory_mapping;
498 uint32_t phdr_index = 1;
502 if (s->have_section) {
503 max_index = s->sh_info;
505 max_index = s->phdr_num;
508 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
509 get_offset_range(memory_mapping->phys_addr,
510 memory_mapping->length,
511 s, &offset, &filesz);
512 if (s->dump_info.d_class == ELFCLASS64) {
513 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
516 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
524 if (phdr_index >= max_index) {
532 /* write elf header, PT_NOTE and elf note to vmcore. */
533 static int dump_begin(DumpState *s)
538 * the vmcore's format is:
557 * we only know where the memory is saved after we write elf note into
561 /* write elf header to vmcore */
562 if (s->dump_info.d_class == ELFCLASS64) {
563 ret = write_elf64_header(s);
565 ret = write_elf32_header(s);
571 if (s->dump_info.d_class == ELFCLASS64) {
572 /* write PT_NOTE to vmcore */
573 if (write_elf64_note(s) < 0) {
577 /* write all PT_LOAD to vmcore */
578 if (write_elf_loads(s) < 0) {
582 /* write section to vmcore */
583 if (s->have_section) {
584 if (write_elf_section(s, 1) < 0) {
589 /* write notes to vmcore */
590 if (write_elf64_notes(fd_write_vmcore, s) < 0) {
595 /* write PT_NOTE to vmcore */
596 if (write_elf32_note(s) < 0) {
600 /* write all PT_LOAD to vmcore */
601 if (write_elf_loads(s) < 0) {
605 /* write section to vmcore */
606 if (s->have_section) {
607 if (write_elf_section(s, 0) < 0) {
612 /* write notes to vmcore */
613 if (write_elf32_notes(fd_write_vmcore, s) < 0) {
621 /* write PT_LOAD to vmcore */
622 static int dump_completed(DumpState *s)
628 static int get_next_block(DumpState *s, GuestPhysBlock *block)
631 block = QTAILQ_NEXT(block, next);
638 s->next_block = block;
640 if (block->target_start >= s->begin + s->length ||
641 block->target_end <= s->begin) {
642 /* This block is out of the range */
646 if (s->begin > block->target_start) {
647 s->start = s->begin - block->target_start;
655 /* write all memory to vmcore */
656 static int dump_iterate(DumpState *s)
658 GuestPhysBlock *block;
663 block = s->next_block;
665 size = block->target_end - block->target_start;
668 if (s->begin + s->length < block->target_end) {
669 size -= block->target_end - (s->begin + s->length);
672 ret = write_memory(s, block, s->start, size);
677 ret = get_next_block(s, block);
685 static int create_vmcore(DumpState *s)
694 ret = dump_iterate(s);
702 static int write_start_flat_header(int fd)
705 MakedumpfileHeader mh;
708 memset(&mh, 0, sizeof(mh));
709 strncpy(mh.signature, MAKEDUMPFILE_SIGNATURE,
710 strlen(MAKEDUMPFILE_SIGNATURE));
712 mh.type = cpu_to_be64(TYPE_FLAT_HEADER);
713 mh.version = cpu_to_be64(VERSION_FLAT_HEADER);
715 buf = g_malloc0(MAX_SIZE_MDF_HEADER);
716 memcpy(buf, &mh, sizeof(mh));
719 written_size = qemu_write_full(fd, buf, MAX_SIZE_MDF_HEADER);
720 if (written_size != MAX_SIZE_MDF_HEADER) {
728 static int write_end_flat_header(int fd)
730 MakedumpfileDataHeader mdh;
732 mdh.offset = END_FLAG_FLAT_HEADER;
733 mdh.buf_size = END_FLAG_FLAT_HEADER;
736 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
737 if (written_size != sizeof(mdh)) {
744 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
747 MakedumpfileDataHeader mdh;
749 mdh.offset = cpu_to_be64(offset);
750 mdh.buf_size = cpu_to_be64(size);
752 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
753 if (written_size != sizeof(mdh)) {
757 written_size = qemu_write_full(fd, buf, size);
758 if (written_size != size) {
765 static int buf_write_note(const void *buf, size_t size, void *opaque)
767 DumpState *s = opaque;
769 /* note_buf is not enough */
770 if (s->note_buf_offset + size > s->note_size) {
774 memcpy(s->note_buf + s->note_buf_offset, buf, size);
776 s->note_buf_offset += size;
781 /* write common header, sub header and elf note to vmcore */
782 static int create_header32(DumpState *s)
785 DiskDumpHeader32 *dh = NULL;
786 KdumpSubHeader32 *kh = NULL;
788 int endian = s->dump_info.d_endian;
790 uint32_t sub_hdr_size;
791 uint32_t bitmap_blocks;
793 uint64_t offset_note;
795 /* write common header, the version of kdump-compressed format is 6th */
796 size = sizeof(DiskDumpHeader32);
797 dh = g_malloc0(size);
799 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
800 dh->header_version = cpu_convert_to_target32(6, endian);
801 block_size = s->page_size;
802 dh->block_size = cpu_convert_to_target32(block_size, endian);
803 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
804 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
805 dh->sub_hdr_size = cpu_convert_to_target32(sub_hdr_size, endian);
806 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
807 dh->max_mapnr = cpu_convert_to_target32(MIN(s->max_mapnr, UINT_MAX),
809 dh->nr_cpus = cpu_convert_to_target32(s->nr_cpus, endian);
810 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
811 dh->bitmap_blocks = cpu_convert_to_target32(bitmap_blocks, endian);
812 memcpy(&(dh->utsname.machine), "i686", 4);
814 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
815 status |= DUMP_DH_COMPRESSED_ZLIB;
818 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
819 status |= DUMP_DH_COMPRESSED_LZO;
823 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
824 status |= DUMP_DH_COMPRESSED_SNAPPY;
827 dh->status = cpu_convert_to_target32(status, endian);
829 if (write_buffer(s->fd, 0, dh, size) < 0) {
830 dump_error(s, "dump: failed to write disk dump header.\n");
835 /* write sub header */
836 size = sizeof(KdumpSubHeader32);
837 kh = g_malloc0(size);
839 /* 64bit max_mapnr_64 */
840 kh->max_mapnr_64 = cpu_convert_to_target64(s->max_mapnr, endian);
841 kh->phys_base = cpu_convert_to_target32(PHYS_BASE, endian);
842 kh->dump_level = cpu_convert_to_target32(DUMP_LEVEL, endian);
844 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
845 kh->offset_note = cpu_convert_to_target64(offset_note, endian);
846 kh->note_size = cpu_convert_to_target32(s->note_size, endian);
848 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
849 block_size, kh, size) < 0) {
850 dump_error(s, "dump: failed to write kdump sub header.\n");
856 s->note_buf = g_malloc0(s->note_size);
857 s->note_buf_offset = 0;
859 /* use s->note_buf to store notes temporarily */
860 if (write_elf32_notes(buf_write_note, s) < 0) {
865 if (write_buffer(s->fd, offset_note, s->note_buf,
867 dump_error(s, "dump: failed to write notes");
872 /* get offset of dump_bitmap */
873 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
876 /* get offset of page */
877 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
888 /* write common header, sub header and elf note to vmcore */
889 static int create_header64(DumpState *s)
892 DiskDumpHeader64 *dh = NULL;
893 KdumpSubHeader64 *kh = NULL;
895 int endian = s->dump_info.d_endian;
897 uint32_t sub_hdr_size;
898 uint32_t bitmap_blocks;
900 uint64_t offset_note;
902 /* write common header, the version of kdump-compressed format is 6th */
903 size = sizeof(DiskDumpHeader64);
904 dh = g_malloc0(size);
906 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
907 dh->header_version = cpu_convert_to_target32(6, endian);
908 block_size = s->page_size;
909 dh->block_size = cpu_convert_to_target32(block_size, endian);
910 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
911 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
912 dh->sub_hdr_size = cpu_convert_to_target32(sub_hdr_size, endian);
913 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
914 dh->max_mapnr = cpu_convert_to_target32(MIN(s->max_mapnr, UINT_MAX),
916 dh->nr_cpus = cpu_convert_to_target32(s->nr_cpus, endian);
917 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
918 dh->bitmap_blocks = cpu_convert_to_target32(bitmap_blocks, endian);
919 memcpy(&(dh->utsname.machine), "x86_64", 6);
921 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
922 status |= DUMP_DH_COMPRESSED_ZLIB;
925 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
926 status |= DUMP_DH_COMPRESSED_LZO;
930 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
931 status |= DUMP_DH_COMPRESSED_SNAPPY;
934 dh->status = cpu_convert_to_target32(status, endian);
936 if (write_buffer(s->fd, 0, dh, size) < 0) {
937 dump_error(s, "dump: failed to write disk dump header.\n");
942 /* write sub header */
943 size = sizeof(KdumpSubHeader64);
944 kh = g_malloc0(size);
946 /* 64bit max_mapnr_64 */
947 kh->max_mapnr_64 = cpu_convert_to_target64(s->max_mapnr, endian);
948 kh->phys_base = cpu_convert_to_target64(PHYS_BASE, endian);
949 kh->dump_level = cpu_convert_to_target32(DUMP_LEVEL, endian);
951 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
952 kh->offset_note = cpu_convert_to_target64(offset_note, endian);
953 kh->note_size = cpu_convert_to_target64(s->note_size, endian);
955 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
956 block_size, kh, size) < 0) {
957 dump_error(s, "dump: failed to write kdump sub header.\n");
963 s->note_buf = g_malloc0(s->note_size);
964 s->note_buf_offset = 0;
966 /* use s->note_buf to store notes temporarily */
967 if (write_elf64_notes(buf_write_note, s) < 0) {
972 if (write_buffer(s->fd, offset_note, s->note_buf,
974 dump_error(s, "dump: failed to write notes");
979 /* get offset of dump_bitmap */
980 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
983 /* get offset of page */
984 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
995 static int write_dump_header(DumpState *s)
997 if (s->dump_info.d_machine == EM_386) {
998 return create_header32(s);
1000 return create_header64(s);
1005 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1006 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1007 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1008 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1009 * vmcore, ie. synchronizing un-sync bit into vmcore.
1011 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
1012 uint8_t *buf, DumpState *s)
1014 off_t old_offset, new_offset;
1015 off_t offset_bitmap1, offset_bitmap2;
1018 /* should not set the previous place */
1019 assert(last_pfn <= pfn);
1022 * if the bit needed to be set is not cached in buf, flush the data in buf
1023 * to vmcore firstly.
1024 * making new_offset be bigger than old_offset can also sync remained data
1027 old_offset = BUFSIZE_BITMAP * (last_pfn / PFN_BUFBITMAP);
1028 new_offset = BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
1030 while (old_offset < new_offset) {
1031 /* calculate the offset and write dump_bitmap */
1032 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
1033 if (write_buffer(s->fd, offset_bitmap1, buf,
1034 BUFSIZE_BITMAP) < 0) {
1038 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1039 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
1041 if (write_buffer(s->fd, offset_bitmap2, buf,
1042 BUFSIZE_BITMAP) < 0) {
1046 memset(buf, 0, BUFSIZE_BITMAP);
1047 old_offset += BUFSIZE_BITMAP;
1050 /* get the exact place of the bit in the buf, and set it */
1051 byte = (pfn % PFN_BUFBITMAP) / CHAR_BIT;
1052 bit = (pfn % PFN_BUFBITMAP) % CHAR_BIT;
1054 buf[byte] |= 1u << bit;
1056 buf[byte] &= ~(1u << bit);
1063 * exam every page and return the page frame number and the address of the page.
1064 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1065 * blocks, so block->target_start and block->target_end should be interal
1066 * multiples of the target page size.
1068 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1069 uint8_t **bufptr, DumpState *s)
1071 GuestPhysBlock *block = *blockptr;
1075 /* block == NULL means the start of the iteration */
1077 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1079 assert(block->target_start % s->page_size == 0);
1080 assert(block->target_end % s->page_size == 0);
1081 *pfnptr = paddr_to_pfn(block->target_start, s->page_shift);
1083 *bufptr = block->host_addr;
1088 *pfnptr = *pfnptr + 1;
1089 addr = pfn_to_paddr(*pfnptr, s->page_shift);
1091 if ((addr >= block->target_start) &&
1092 (addr + s->page_size <= block->target_end)) {
1093 buf = block->host_addr + (addr - block->target_start);
1095 /* the next page is in the next block */
1096 block = QTAILQ_NEXT(block, next);
1101 assert(block->target_start % s->page_size == 0);
1102 assert(block->target_end % s->page_size == 0);
1103 *pfnptr = paddr_to_pfn(block->target_start, s->page_shift);
1104 buf = block->host_addr;
1114 static int write_dump_bitmap(DumpState *s)
1117 uint64_t last_pfn, pfn;
1118 void *dump_bitmap_buf;
1119 size_t num_dumpable;
1120 GuestPhysBlock *block_iter = NULL;
1122 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1123 dump_bitmap_buf = g_malloc0(BUFSIZE_BITMAP);
1129 * exam memory page by page, and set the bit in dump_bitmap corresponded
1130 * to the existing page.
1132 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1133 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1135 dump_error(s, "dump: failed to set dump_bitmap.\n");
1145 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1146 * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
1147 * synchronized into vmcore.
1149 if (num_dumpable > 0) {
1150 ret = set_dump_bitmap(last_pfn, last_pfn + PFN_BUFBITMAP, false,
1151 dump_bitmap_buf, s);
1153 dump_error(s, "dump: failed to sync dump_bitmap.\n");
1159 /* number of dumpable pages that will be dumped later */
1160 s->num_dumpable = num_dumpable;
1163 g_free(dump_bitmap_buf);
1168 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1171 data_cache->fd = s->fd;
1172 data_cache->data_size = 0;
1173 data_cache->buf_size = BUFSIZE_DATA_CACHE;
1174 data_cache->buf = g_malloc0(BUFSIZE_DATA_CACHE);
1175 data_cache->offset = offset;
1178 static int write_cache(DataCache *dc, const void *buf, size_t size,
1182 * dc->buf_size should not be less than size, otherwise dc will never be
1185 assert(size <= dc->buf_size);
1188 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1189 * otherwise check if the space is enough for caching data in buf, if not,
1190 * write the data in dc->buf to dc->fd and reset dc->buf
1192 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1193 (flag_sync && dc->data_size > 0)) {
1194 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1198 dc->offset += dc->data_size;
1203 memcpy(dc->buf + dc->data_size, buf, size);
1204 dc->data_size += size;
1210 static void free_data_cache(DataCache *data_cache)
1212 g_free(data_cache->buf);
1215 static ram_addr_t get_start_block(DumpState *s)
1217 GuestPhysBlock *block;
1219 if (!s->has_filter) {
1220 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1224 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1225 if (block->target_start >= s->begin + s->length ||
1226 block->target_end <= s->begin) {
1227 /* This block is out of the range */
1231 s->next_block = block;
1232 if (s->begin > block->target_start) {
1233 s->start = s->begin - block->target_start;
1243 static void get_max_mapnr(DumpState *s)
1245 GuestPhysBlock *last_block;
1247 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
1248 s->max_mapnr = paddr_to_pfn(last_block->target_end, s->page_shift);
1251 static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
1252 int64_t begin, int64_t length, Error **errp)
1259 if (runstate_is_running()) {
1260 vm_stop(RUN_STATE_SAVE_VM);
1266 /* If we use KVM, we should synchronize the registers before we get dump
1267 * info or physmap info.
1269 cpu_synchronize_all_states();
1277 s->has_filter = has_filter;
1281 guest_phys_blocks_init(&s->guest_phys_blocks);
1282 guest_phys_blocks_append(&s->guest_phys_blocks);
1284 s->start = get_start_block(s);
1285 if (s->start == -1) {
1286 error_set(errp, QERR_INVALID_PARAMETER, "begin");
1290 /* get dump info: endian, class and architecture.
1291 * If the target architecture is not supported, cpu_get_dump_info() will
1294 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1296 error_set(errp, QERR_UNSUPPORTED);
1300 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1301 s->dump_info.d_machine, nr_cpus);
1302 if (s->note_size < 0) {
1303 error_set(errp, QERR_UNSUPPORTED);
1307 /* get memory mapping */
1308 memory_mapping_list_init(&s->list);
1310 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1312 error_propagate(errp, err);
1316 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1319 s->nr_cpus = nr_cpus;
1320 s->page_size = TARGET_PAGE_SIZE;
1321 s->page_shift = ffs(s->page_size) - 1;
1326 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), s->page_size);
1327 s->len_dump_bitmap = tmp * s->page_size;
1329 if (s->has_filter) {
1330 memory_mapping_filter(&s->list, s->begin, s->length);
1334 * calculate phdr_num
1336 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1338 s->phdr_num = 1; /* PT_NOTE */
1339 if (s->list.num < UINT16_MAX - 2) {
1340 s->phdr_num += s->list.num;
1341 s->have_section = false;
1343 s->have_section = true;
1344 s->phdr_num = PN_XNUM;
1345 s->sh_info = 1; /* PT_NOTE */
1347 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1348 if (s->list.num <= UINT32_MAX - 1) {
1349 s->sh_info += s->list.num;
1351 s->sh_info = UINT32_MAX;
1355 if (s->dump_info.d_class == ELFCLASS64) {
1356 if (s->have_section) {
1357 s->memory_offset = sizeof(Elf64_Ehdr) +
1358 sizeof(Elf64_Phdr) * s->sh_info +
1359 sizeof(Elf64_Shdr) + s->note_size;
1361 s->memory_offset = sizeof(Elf64_Ehdr) +
1362 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1365 if (s->have_section) {
1366 s->memory_offset = sizeof(Elf32_Ehdr) +
1367 sizeof(Elf32_Phdr) * s->sh_info +
1368 sizeof(Elf32_Shdr) + s->note_size;
1370 s->memory_offset = sizeof(Elf32_Ehdr) +
1371 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1378 guest_phys_blocks_free(&s->guest_phys_blocks);
1387 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
1388 int64_t begin, bool has_length, int64_t length,
1396 if (has_begin && !has_length) {
1397 error_set(errp, QERR_MISSING_PARAMETER, "length");
1400 if (!has_begin && has_length) {
1401 error_set(errp, QERR_MISSING_PARAMETER, "begin");
1406 if (strstart(file, "fd:", &p)) {
1407 fd = monitor_get_fd(cur_mon, p, errp);
1414 if (strstart(file, "file:", &p)) {
1415 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1417 error_setg_file_open(errp, errno, p);
1423 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
1427 s = g_malloc0(sizeof(DumpState));
1429 ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
1435 if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
1436 error_set(errp, QERR_IO_ERROR);