]> Git Repo - qemu.git/blob - dump.c
virtio-serial: cleanup: use QOM casts.
[qemu.git] / dump.c
1 /*
2  * QEMU dump
3  *
4  * Copyright Fujitsu, Corp. 2011, 2012
5  *
6  * Authors:
7  *     Wen Congyang <[email protected]>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13
14 #include "qemu-common.h"
15 #include "elf.h"
16 #include "cpu.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "qapi/error.h"
25 #include "qmp-commands.h"
26 #include "exec/gdbstub.h"
27
28 static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
29 {
30     if (endian == ELFDATA2LSB) {
31         val = cpu_to_le16(val);
32     } else {
33         val = cpu_to_be16(val);
34     }
35
36     return val;
37 }
38
39 static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
40 {
41     if (endian == ELFDATA2LSB) {
42         val = cpu_to_le32(val);
43     } else {
44         val = cpu_to_be32(val);
45     }
46
47     return val;
48 }
49
50 static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
51 {
52     if (endian == ELFDATA2LSB) {
53         val = cpu_to_le64(val);
54     } else {
55         val = cpu_to_be64(val);
56     }
57
58     return val;
59 }
60
61 typedef struct DumpState {
62     ArchDumpInfo dump_info;
63     MemoryMappingList list;
64     uint16_t phdr_num;
65     uint32_t sh_info;
66     bool have_section;
67     bool resume;
68     size_t note_size;
69     hwaddr memory_offset;
70     int fd;
71
72     RAMBlock *block;
73     ram_addr_t start;
74     bool has_filter;
75     int64_t begin;
76     int64_t length;
77     Error **errp;
78 } DumpState;
79
80 static int dump_cleanup(DumpState *s)
81 {
82     int ret = 0;
83
84     memory_mapping_list_free(&s->list);
85     if (s->fd != -1) {
86         close(s->fd);
87     }
88     if (s->resume) {
89         vm_start();
90     }
91
92     return ret;
93 }
94
95 static void dump_error(DumpState *s, const char *reason)
96 {
97     dump_cleanup(s);
98 }
99
100 static int fd_write_vmcore(void *buf, size_t size, void *opaque)
101 {
102     DumpState *s = opaque;
103     size_t written_size;
104
105     written_size = qemu_write_full(s->fd, buf, size);
106     if (written_size != size) {
107         return -1;
108     }
109
110     return 0;
111 }
112
113 static int write_elf64_header(DumpState *s)
114 {
115     Elf64_Ehdr elf_header;
116     int ret;
117     int endian = s->dump_info.d_endian;
118
119     memset(&elf_header, 0, sizeof(Elf64_Ehdr));
120     memcpy(&elf_header, ELFMAG, SELFMAG);
121     elf_header.e_ident[EI_CLASS] = ELFCLASS64;
122     elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
123     elf_header.e_ident[EI_VERSION] = EV_CURRENT;
124     elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
125     elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
126                                                    endian);
127     elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
128     elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
129     elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
130     elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
131                                                      endian);
132     elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
133     if (s->have_section) {
134         uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
135
136         elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
137         elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
138                                                          endian);
139         elf_header.e_shnum = cpu_convert_to_target16(1, endian);
140     }
141
142     ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
143     if (ret < 0) {
144         dump_error(s, "dump: failed to write elf header.\n");
145         return -1;
146     }
147
148     return 0;
149 }
150
151 static int write_elf32_header(DumpState *s)
152 {
153     Elf32_Ehdr elf_header;
154     int ret;
155     int endian = s->dump_info.d_endian;
156
157     memset(&elf_header, 0, sizeof(Elf32_Ehdr));
158     memcpy(&elf_header, ELFMAG, SELFMAG);
159     elf_header.e_ident[EI_CLASS] = ELFCLASS32;
160     elf_header.e_ident[EI_DATA] = endian;
161     elf_header.e_ident[EI_VERSION] = EV_CURRENT;
162     elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
163     elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
164                                                    endian);
165     elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
166     elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
167     elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
168     elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
169                                                      endian);
170     elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
171     if (s->have_section) {
172         uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
173
174         elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
175         elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
176                                                          endian);
177         elf_header.e_shnum = cpu_convert_to_target16(1, endian);
178     }
179
180     ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
181     if (ret < 0) {
182         dump_error(s, "dump: failed to write elf header.\n");
183         return -1;
184     }
185
186     return 0;
187 }
188
189 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
190                             int phdr_index, hwaddr offset)
191 {
192     Elf64_Phdr phdr;
193     int ret;
194     int endian = s->dump_info.d_endian;
195
196     memset(&phdr, 0, sizeof(Elf64_Phdr));
197     phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
198     phdr.p_offset = cpu_convert_to_target64(offset, endian);
199     phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
200     if (offset == -1) {
201         /* When the memory is not stored into vmcore, offset will be -1 */
202         phdr.p_filesz = 0;
203     } else {
204         phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
205     }
206     phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
207     phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
208
209     ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
210     if (ret < 0) {
211         dump_error(s, "dump: failed to write program header table.\n");
212         return -1;
213     }
214
215     return 0;
216 }
217
218 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
219                             int phdr_index, hwaddr offset)
220 {
221     Elf32_Phdr phdr;
222     int ret;
223     int endian = s->dump_info.d_endian;
224
225     memset(&phdr, 0, sizeof(Elf32_Phdr));
226     phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
227     phdr.p_offset = cpu_convert_to_target32(offset, endian);
228     phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
229     if (offset == -1) {
230         /* When the memory is not stored into vmcore, offset will be -1 */
231         phdr.p_filesz = 0;
232     } else {
233         phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
234     }
235     phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
236     phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
237
238     ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
239     if (ret < 0) {
240         dump_error(s, "dump: failed to write program header table.\n");
241         return -1;
242     }
243
244     return 0;
245 }
246
247 static int write_elf64_note(DumpState *s)
248 {
249     Elf64_Phdr phdr;
250     int endian = s->dump_info.d_endian;
251     hwaddr begin = s->memory_offset - s->note_size;
252     int ret;
253
254     memset(&phdr, 0, sizeof(Elf64_Phdr));
255     phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
256     phdr.p_offset = cpu_convert_to_target64(begin, endian);
257     phdr.p_paddr = 0;
258     phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
259     phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
260     phdr.p_vaddr = 0;
261
262     ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
263     if (ret < 0) {
264         dump_error(s, "dump: failed to write program header table.\n");
265         return -1;
266     }
267
268     return 0;
269 }
270
271 static int write_elf64_notes(DumpState *s)
272 {
273     CPUArchState *env;
274     CPUState *cpu;
275     int ret;
276     int id;
277
278     for (env = first_cpu; env != NULL; env = env->next_cpu) {
279         cpu = ENV_GET_CPU(env);
280         id = cpu_index(cpu);
281         ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
282         if (ret < 0) {
283             dump_error(s, "dump: failed to write elf notes.\n");
284             return -1;
285         }
286     }
287
288     for (env = first_cpu; env != NULL; env = env->next_cpu) {
289         ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
290         if (ret < 0) {
291             dump_error(s, "dump: failed to write CPU status.\n");
292             return -1;
293         }
294     }
295
296     return 0;
297 }
298
299 static int write_elf32_note(DumpState *s)
300 {
301     hwaddr begin = s->memory_offset - s->note_size;
302     Elf32_Phdr phdr;
303     int endian = s->dump_info.d_endian;
304     int ret;
305
306     memset(&phdr, 0, sizeof(Elf32_Phdr));
307     phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
308     phdr.p_offset = cpu_convert_to_target32(begin, endian);
309     phdr.p_paddr = 0;
310     phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
311     phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
312     phdr.p_vaddr = 0;
313
314     ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
315     if (ret < 0) {
316         dump_error(s, "dump: failed to write program header table.\n");
317         return -1;
318     }
319
320     return 0;
321 }
322
323 static int write_elf32_notes(DumpState *s)
324 {
325     CPUArchState *env;
326     CPUState *cpu;
327     int ret;
328     int id;
329
330     for (env = first_cpu; env != NULL; env = env->next_cpu) {
331         cpu = ENV_GET_CPU(env);
332         id = cpu_index(cpu);
333         ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
334         if (ret < 0) {
335             dump_error(s, "dump: failed to write elf notes.\n");
336             return -1;
337         }
338     }
339
340     for (env = first_cpu; env != NULL; env = env->next_cpu) {
341         ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
342         if (ret < 0) {
343             dump_error(s, "dump: failed to write CPU status.\n");
344             return -1;
345         }
346     }
347
348     return 0;
349 }
350
351 static int write_elf_section(DumpState *s, int type)
352 {
353     Elf32_Shdr shdr32;
354     Elf64_Shdr shdr64;
355     int endian = s->dump_info.d_endian;
356     int shdr_size;
357     void *shdr;
358     int ret;
359
360     if (type == 0) {
361         shdr_size = sizeof(Elf32_Shdr);
362         memset(&shdr32, 0, shdr_size);
363         shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
364         shdr = &shdr32;
365     } else {
366         shdr_size = sizeof(Elf64_Shdr);
367         memset(&shdr64, 0, shdr_size);
368         shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
369         shdr = &shdr64;
370     }
371
372     ret = fd_write_vmcore(&shdr, shdr_size, s);
373     if (ret < 0) {
374         dump_error(s, "dump: failed to write section header table.\n");
375         return -1;
376     }
377
378     return 0;
379 }
380
381 static int write_data(DumpState *s, void *buf, int length)
382 {
383     int ret;
384
385     ret = fd_write_vmcore(buf, length, s);
386     if (ret < 0) {
387         dump_error(s, "dump: failed to save memory.\n");
388         return -1;
389     }
390
391     return 0;
392 }
393
394 /* write the memroy to vmcore. 1 page per I/O. */
395 static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
396                         int64_t size)
397 {
398     int64_t i;
399     int ret;
400
401     for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
402         ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
403                          TARGET_PAGE_SIZE);
404         if (ret < 0) {
405             return ret;
406         }
407     }
408
409     if ((size % TARGET_PAGE_SIZE) != 0) {
410         ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
411                          size % TARGET_PAGE_SIZE);
412         if (ret < 0) {
413             return ret;
414         }
415     }
416
417     return 0;
418 }
419
420 /* get the memory's offset in the vmcore */
421 static hwaddr get_offset(hwaddr phys_addr,
422                                      DumpState *s)
423 {
424     RAMBlock *block;
425     hwaddr offset = s->memory_offset;
426     int64_t size_in_block, start;
427
428     if (s->has_filter) {
429         if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
430             return -1;
431         }
432     }
433
434     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
435         if (s->has_filter) {
436             if (block->offset >= s->begin + s->length ||
437                 block->offset + block->length <= s->begin) {
438                 /* This block is out of the range */
439                 continue;
440             }
441
442             if (s->begin <= block->offset) {
443                 start = block->offset;
444             } else {
445                 start = s->begin;
446             }
447
448             size_in_block = block->length - (start - block->offset);
449             if (s->begin + s->length < block->offset + block->length) {
450                 size_in_block -= block->offset + block->length -
451                                  (s->begin + s->length);
452             }
453         } else {
454             start = block->offset;
455             size_in_block = block->length;
456         }
457
458         if (phys_addr >= start && phys_addr < start + size_in_block) {
459             return phys_addr - start + offset;
460         }
461
462         offset += size_in_block;
463     }
464
465     return -1;
466 }
467
468 static int write_elf_loads(DumpState *s)
469 {
470     hwaddr offset;
471     MemoryMapping *memory_mapping;
472     uint32_t phdr_index = 1;
473     int ret;
474     uint32_t max_index;
475
476     if (s->have_section) {
477         max_index = s->sh_info;
478     } else {
479         max_index = s->phdr_num;
480     }
481
482     QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
483         offset = get_offset(memory_mapping->phys_addr, s);
484         if (s->dump_info.d_class == ELFCLASS64) {
485             ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
486         } else {
487             ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
488         }
489
490         if (ret < 0) {
491             return -1;
492         }
493
494         if (phdr_index >= max_index) {
495             break;
496         }
497     }
498
499     return 0;
500 }
501
502 /* write elf header, PT_NOTE and elf note to vmcore. */
503 static int dump_begin(DumpState *s)
504 {
505     int ret;
506
507     /*
508      * the vmcore's format is:
509      *   --------------
510      *   |  elf header |
511      *   --------------
512      *   |  PT_NOTE    |
513      *   --------------
514      *   |  PT_LOAD    |
515      *   --------------
516      *   |  ......     |
517      *   --------------
518      *   |  PT_LOAD    |
519      *   --------------
520      *   |  sec_hdr    |
521      *   --------------
522      *   |  elf note   |
523      *   --------------
524      *   |  memory     |
525      *   --------------
526      *
527      * we only know where the memory is saved after we write elf note into
528      * vmcore.
529      */
530
531     /* write elf header to vmcore */
532     if (s->dump_info.d_class == ELFCLASS64) {
533         ret = write_elf64_header(s);
534     } else {
535         ret = write_elf32_header(s);
536     }
537     if (ret < 0) {
538         return -1;
539     }
540
541     if (s->dump_info.d_class == ELFCLASS64) {
542         /* write PT_NOTE to vmcore */
543         if (write_elf64_note(s) < 0) {
544             return -1;
545         }
546
547         /* write all PT_LOAD to vmcore */
548         if (write_elf_loads(s) < 0) {
549             return -1;
550         }
551
552         /* write section to vmcore */
553         if (s->have_section) {
554             if (write_elf_section(s, 1) < 0) {
555                 return -1;
556             }
557         }
558
559         /* write notes to vmcore */
560         if (write_elf64_notes(s) < 0) {
561             return -1;
562         }
563
564     } else {
565         /* write PT_NOTE to vmcore */
566         if (write_elf32_note(s) < 0) {
567             return -1;
568         }
569
570         /* write all PT_LOAD to vmcore */
571         if (write_elf_loads(s) < 0) {
572             return -1;
573         }
574
575         /* write section to vmcore */
576         if (s->have_section) {
577             if (write_elf_section(s, 0) < 0) {
578                 return -1;
579             }
580         }
581
582         /* write notes to vmcore */
583         if (write_elf32_notes(s) < 0) {
584             return -1;
585         }
586     }
587
588     return 0;
589 }
590
591 /* write PT_LOAD to vmcore */
592 static int dump_completed(DumpState *s)
593 {
594     dump_cleanup(s);
595     return 0;
596 }
597
598 static int get_next_block(DumpState *s, RAMBlock *block)
599 {
600     while (1) {
601         block = QTAILQ_NEXT(block, next);
602         if (!block) {
603             /* no more block */
604             return 1;
605         }
606
607         s->start = 0;
608         s->block = block;
609         if (s->has_filter) {
610             if (block->offset >= s->begin + s->length ||
611                 block->offset + block->length <= s->begin) {
612                 /* This block is out of the range */
613                 continue;
614             }
615
616             if (s->begin > block->offset) {
617                 s->start = s->begin - block->offset;
618             }
619         }
620
621         return 0;
622     }
623 }
624
625 /* write all memory to vmcore */
626 static int dump_iterate(DumpState *s)
627 {
628     RAMBlock *block;
629     int64_t size;
630     int ret;
631
632     while (1) {
633         block = s->block;
634
635         size = block->length;
636         if (s->has_filter) {
637             size -= s->start;
638             if (s->begin + s->length < block->offset + block->length) {
639                 size -= block->offset + block->length - (s->begin + s->length);
640             }
641         }
642         ret = write_memory(s, block, s->start, size);
643         if (ret == -1) {
644             return ret;
645         }
646
647         ret = get_next_block(s, block);
648         if (ret == 1) {
649             dump_completed(s);
650             return 0;
651         }
652     }
653 }
654
655 static int create_vmcore(DumpState *s)
656 {
657     int ret;
658
659     ret = dump_begin(s);
660     if (ret < 0) {
661         return -1;
662     }
663
664     ret = dump_iterate(s);
665     if (ret < 0) {
666         return -1;
667     }
668
669     return 0;
670 }
671
672 static ram_addr_t get_start_block(DumpState *s)
673 {
674     RAMBlock *block;
675
676     if (!s->has_filter) {
677         s->block = QTAILQ_FIRST(&ram_list.blocks);
678         return 0;
679     }
680
681     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
682         if (block->offset >= s->begin + s->length ||
683             block->offset + block->length <= s->begin) {
684             /* This block is out of the range */
685             continue;
686         }
687
688         s->block = block;
689         if (s->begin > block->offset) {
690             s->start = s->begin - block->offset;
691         } else {
692             s->start = 0;
693         }
694         return s->start;
695     }
696
697     return -1;
698 }
699
700 static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
701                      int64_t begin, int64_t length, Error **errp)
702 {
703     CPUArchState *env;
704     int nr_cpus;
705     int ret;
706
707     if (runstate_is_running()) {
708         vm_stop(RUN_STATE_SAVE_VM);
709         s->resume = true;
710     } else {
711         s->resume = false;
712     }
713
714     s->errp = errp;
715     s->fd = fd;
716     s->has_filter = has_filter;
717     s->begin = begin;
718     s->length = length;
719     s->start = get_start_block(s);
720     if (s->start == -1) {
721         error_set(errp, QERR_INVALID_PARAMETER, "begin");
722         goto cleanup;
723     }
724
725     /*
726      * get dump info: endian, class and architecture.
727      * If the target architecture is not supported, cpu_get_dump_info() will
728      * return -1.
729      *
730      * if we use kvm, we should synchronize the register before we get dump
731      * info.
732      */
733     nr_cpus = 0;
734     for (env = first_cpu; env != NULL; env = env->next_cpu) {
735         cpu_synchronize_state(env);
736         nr_cpus++;
737     }
738
739     ret = cpu_get_dump_info(&s->dump_info);
740     if (ret < 0) {
741         error_set(errp, QERR_UNSUPPORTED);
742         goto cleanup;
743     }
744
745     s->note_size = cpu_get_note_size(s->dump_info.d_class,
746                                      s->dump_info.d_machine, nr_cpus);
747     if (ret < 0) {
748         error_set(errp, QERR_UNSUPPORTED);
749         goto cleanup;
750     }
751
752     /* get memory mapping */
753     memory_mapping_list_init(&s->list);
754     if (paging) {
755         qemu_get_guest_memory_mapping(&s->list);
756     } else {
757         qemu_get_guest_simple_memory_mapping(&s->list);
758     }
759
760     if (s->has_filter) {
761         memory_mapping_filter(&s->list, s->begin, s->length);
762     }
763
764     /*
765      * calculate phdr_num
766      *
767      * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
768      */
769     s->phdr_num = 1; /* PT_NOTE */
770     if (s->list.num < UINT16_MAX - 2) {
771         s->phdr_num += s->list.num;
772         s->have_section = false;
773     } else {
774         s->have_section = true;
775         s->phdr_num = PN_XNUM;
776         s->sh_info = 1; /* PT_NOTE */
777
778         /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
779         if (s->list.num <= UINT32_MAX - 1) {
780             s->sh_info += s->list.num;
781         } else {
782             s->sh_info = UINT32_MAX;
783         }
784     }
785
786     if (s->dump_info.d_class == ELFCLASS64) {
787         if (s->have_section) {
788             s->memory_offset = sizeof(Elf64_Ehdr) +
789                                sizeof(Elf64_Phdr) * s->sh_info +
790                                sizeof(Elf64_Shdr) + s->note_size;
791         } else {
792             s->memory_offset = sizeof(Elf64_Ehdr) +
793                                sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
794         }
795     } else {
796         if (s->have_section) {
797             s->memory_offset = sizeof(Elf32_Ehdr) +
798                                sizeof(Elf32_Phdr) * s->sh_info +
799                                sizeof(Elf32_Shdr) + s->note_size;
800         } else {
801             s->memory_offset = sizeof(Elf32_Ehdr) +
802                                sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
803         }
804     }
805
806     return 0;
807
808 cleanup:
809     if (s->resume) {
810         vm_start();
811     }
812
813     return -1;
814 }
815
816 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
817                            int64_t begin, bool has_length, int64_t length,
818                            Error **errp)
819 {
820     const char *p;
821     int fd = -1;
822     DumpState *s;
823     int ret;
824
825     if (has_begin && !has_length) {
826         error_set(errp, QERR_MISSING_PARAMETER, "length");
827         return;
828     }
829     if (!has_begin && has_length) {
830         error_set(errp, QERR_MISSING_PARAMETER, "begin");
831         return;
832     }
833
834 #if !defined(WIN32)
835     if (strstart(file, "fd:", &p)) {
836         fd = monitor_get_fd(cur_mon, p, errp);
837         if (fd == -1) {
838             return;
839         }
840     }
841 #endif
842
843     if  (strstart(file, "file:", &p)) {
844         fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
845         if (fd < 0) {
846             error_set(errp, QERR_OPEN_FILE_FAILED, p);
847             return;
848         }
849     }
850
851     if (fd == -1) {
852         error_set(errp, QERR_INVALID_PARAMETER, "protocol");
853         return;
854     }
855
856     s = g_malloc(sizeof(DumpState));
857
858     ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
859     if (ret < 0) {
860         g_free(s);
861         return;
862     }
863
864     if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
865         error_set(errp, QERR_IO_ERROR);
866     }
867
868     g_free(s);
869 }
This page took 0.073666 seconds and 4 git commands to generate.