]> Git Repo - qemu.git/blob - arch_init.c
migration:fix free XBZRLE decoded_buf wrong
[qemu.git] / arch_init.c
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qmp-commands.h"
49 #include "trace.h"
50 #include "exec/cpu-all.h"
51 #include "exec/ram_addr.h"
52 #include "hw/acpi/acpi.h"
53 #include "qemu/host-utils.h"
54
55 #ifdef DEBUG_ARCH_INIT
56 #define DPRINTF(fmt, ...) \
57     do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
58 #else
59 #define DPRINTF(fmt, ...) \
60     do { } while (0)
61 #endif
62
63 #ifdef TARGET_SPARC
64 int graphic_width = 1024;
65 int graphic_height = 768;
66 int graphic_depth = 8;
67 #else
68 int graphic_width = 800;
69 int graphic_height = 600;
70 int graphic_depth = 32;
71 #endif
72
73
74 #if defined(TARGET_ALPHA)
75 #define QEMU_ARCH QEMU_ARCH_ALPHA
76 #elif defined(TARGET_ARM)
77 #define QEMU_ARCH QEMU_ARCH_ARM
78 #elif defined(TARGET_CRIS)
79 #define QEMU_ARCH QEMU_ARCH_CRIS
80 #elif defined(TARGET_I386)
81 #define QEMU_ARCH QEMU_ARCH_I386
82 #elif defined(TARGET_M68K)
83 #define QEMU_ARCH QEMU_ARCH_M68K
84 #elif defined(TARGET_LM32)
85 #define QEMU_ARCH QEMU_ARCH_LM32
86 #elif defined(TARGET_MICROBLAZE)
87 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
88 #elif defined(TARGET_MIPS)
89 #define QEMU_ARCH QEMU_ARCH_MIPS
90 #elif defined(TARGET_MOXIE)
91 #define QEMU_ARCH QEMU_ARCH_MOXIE
92 #elif defined(TARGET_OPENRISC)
93 #define QEMU_ARCH QEMU_ARCH_OPENRISC
94 #elif defined(TARGET_PPC)
95 #define QEMU_ARCH QEMU_ARCH_PPC
96 #elif defined(TARGET_S390X)
97 #define QEMU_ARCH QEMU_ARCH_S390X
98 #elif defined(TARGET_SH4)
99 #define QEMU_ARCH QEMU_ARCH_SH4
100 #elif defined(TARGET_SPARC)
101 #define QEMU_ARCH QEMU_ARCH_SPARC
102 #elif defined(TARGET_XTENSA)
103 #define QEMU_ARCH QEMU_ARCH_XTENSA
104 #elif defined(TARGET_UNICORE32)
105 #define QEMU_ARCH QEMU_ARCH_UNICORE32
106 #endif
107
108 const uint32_t arch_type = QEMU_ARCH;
109 static bool mig_throttle_on;
110 static int dirty_rate_high_cnt;
111 static void check_guest_throttling(void);
112
113 /***********************************************************/
114 /* ram save/restore */
115
116 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
117 #define RAM_SAVE_FLAG_COMPRESS 0x02
118 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
119 #define RAM_SAVE_FLAG_PAGE     0x08
120 #define RAM_SAVE_FLAG_EOS      0x10
121 #define RAM_SAVE_FLAG_CONTINUE 0x20
122 #define RAM_SAVE_FLAG_XBZRLE   0x40
123 /* 0x80 is reserved in migration.h start with 0x100 next */
124
125
126 static struct defconfig_file {
127     const char *filename;
128     /* Indicates it is an user config file (disabled by -no-user-config) */
129     bool userconfig;
130 } default_config_files[] = {
131     { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
132     { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
133     { NULL }, /* end of list */
134 };
135
136
137 int qemu_read_default_config_files(bool userconfig)
138 {
139     int ret;
140     struct defconfig_file *f;
141
142     for (f = default_config_files; f->filename; f++) {
143         if (!userconfig && f->userconfig) {
144             continue;
145         }
146         ret = qemu_read_config_file(f->filename);
147         if (ret < 0 && ret != -ENOENT) {
148             return ret;
149         }
150     }
151
152     return 0;
153 }
154
155 static inline bool is_zero_range(uint8_t *p, uint64_t size)
156 {
157     return buffer_find_nonzero_offset(p, size) == size;
158 }
159
160 /* struct contains XBZRLE cache and a static page
161    used by the compression */
162 static struct {
163     /* buffer used for XBZRLE encoding */
164     uint8_t *encoded_buf;
165     /* buffer for storing page content */
166     uint8_t *current_buf;
167     /* Cache for XBZRLE */
168     PageCache *cache;
169 } XBZRLE = {
170     .encoded_buf = NULL,
171     .current_buf = NULL,
172     .cache = NULL,
173 };
174 /* buffer used for XBZRLE decoding */
175 static uint8_t *xbzrle_decoded_buf;
176
177 int64_t xbzrle_cache_resize(int64_t new_size)
178 {
179     if (new_size < TARGET_PAGE_SIZE) {
180         return -1;
181     }
182
183     if (XBZRLE.cache != NULL) {
184         return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
185             TARGET_PAGE_SIZE;
186     }
187     return pow2floor(new_size);
188 }
189
190 /* accounting for migration statistics */
191 typedef struct AccountingInfo {
192     uint64_t dup_pages;
193     uint64_t skipped_pages;
194     uint64_t norm_pages;
195     uint64_t iterations;
196     uint64_t xbzrle_bytes;
197     uint64_t xbzrle_pages;
198     uint64_t xbzrle_cache_miss;
199     uint64_t xbzrle_overflows;
200 } AccountingInfo;
201
202 static AccountingInfo acct_info;
203
204 static void acct_clear(void)
205 {
206     memset(&acct_info, 0, sizeof(acct_info));
207 }
208
209 uint64_t dup_mig_bytes_transferred(void)
210 {
211     return acct_info.dup_pages * TARGET_PAGE_SIZE;
212 }
213
214 uint64_t dup_mig_pages_transferred(void)
215 {
216     return acct_info.dup_pages;
217 }
218
219 uint64_t skipped_mig_bytes_transferred(void)
220 {
221     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
222 }
223
224 uint64_t skipped_mig_pages_transferred(void)
225 {
226     return acct_info.skipped_pages;
227 }
228
229 uint64_t norm_mig_bytes_transferred(void)
230 {
231     return acct_info.norm_pages * TARGET_PAGE_SIZE;
232 }
233
234 uint64_t norm_mig_pages_transferred(void)
235 {
236     return acct_info.norm_pages;
237 }
238
239 uint64_t xbzrle_mig_bytes_transferred(void)
240 {
241     return acct_info.xbzrle_bytes;
242 }
243
244 uint64_t xbzrle_mig_pages_transferred(void)
245 {
246     return acct_info.xbzrle_pages;
247 }
248
249 uint64_t xbzrle_mig_pages_cache_miss(void)
250 {
251     return acct_info.xbzrle_cache_miss;
252 }
253
254 uint64_t xbzrle_mig_pages_overflow(void)
255 {
256     return acct_info.xbzrle_overflows;
257 }
258
259 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
260                              int cont, int flag)
261 {
262     size_t size;
263
264     qemu_put_be64(f, offset | cont | flag);
265     size = 8;
266
267     if (!cont) {
268         qemu_put_byte(f, strlen(block->idstr));
269         qemu_put_buffer(f, (uint8_t *)block->idstr,
270                         strlen(block->idstr));
271         size += 1 + strlen(block->idstr);
272     }
273     return size;
274 }
275
276 #define ENCODING_FLAG_XBZRLE 0x1
277
278 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
279                             ram_addr_t current_addr, RAMBlock *block,
280                             ram_addr_t offset, int cont, bool last_stage)
281 {
282     int encoded_len = 0, bytes_sent = -1;
283     uint8_t *prev_cached_page;
284
285     if (!cache_is_cached(XBZRLE.cache, current_addr)) {
286         if (!last_stage) {
287             cache_insert(XBZRLE.cache, current_addr, current_data);
288         }
289         acct_info.xbzrle_cache_miss++;
290         return -1;
291     }
292
293     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
294
295     /* save current buffer into memory */
296     memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
297
298     /* XBZRLE encoding (if there is no overflow) */
299     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
300                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
301                                        TARGET_PAGE_SIZE);
302     if (encoded_len == 0) {
303         DPRINTF("Skipping unmodified page\n");
304         return 0;
305     } else if (encoded_len == -1) {
306         DPRINTF("Overflow\n");
307         acct_info.xbzrle_overflows++;
308         /* update data in the cache */
309         memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
310         return -1;
311     }
312
313     /* we need to update the data in the cache, in order to get the same data */
314     if (!last_stage) {
315         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
316     }
317
318     /* Send XBZRLE based compressed page */
319     bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
320     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
321     qemu_put_be16(f, encoded_len);
322     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
323     bytes_sent += encoded_len + 1 + 2;
324     acct_info.xbzrle_pages++;
325     acct_info.xbzrle_bytes += bytes_sent;
326
327     return bytes_sent;
328 }
329
330
331 /* This is the last block that we have visited serching for dirty pages
332  */
333 static RAMBlock *last_seen_block;
334 /* This is the last block from where we have sent data */
335 static RAMBlock *last_sent_block;
336 static ram_addr_t last_offset;
337 static unsigned long *migration_bitmap;
338 static uint64_t migration_dirty_pages;
339 static uint32_t last_version;
340 static bool ram_bulk_stage;
341
342 static inline
343 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
344                                                  ram_addr_t start)
345 {
346     unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
347     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
348     uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
349     unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
350
351     unsigned long next;
352
353     if (ram_bulk_stage && nr > base) {
354         next = nr + 1;
355     } else {
356         next = find_next_bit(migration_bitmap, size, nr);
357     }
358
359     if (next < size) {
360         clear_bit(next, migration_bitmap);
361         migration_dirty_pages--;
362     }
363     return (next - base) << TARGET_PAGE_BITS;
364 }
365
366 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
367 {
368     bool ret;
369     int nr = addr >> TARGET_PAGE_BITS;
370
371     ret = test_and_set_bit(nr, migration_bitmap);
372
373     if (!ret) {
374         migration_dirty_pages++;
375     }
376     return ret;
377 }
378
379 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
380 {
381     ram_addr_t addr;
382     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
383
384     /* start address is aligned at the start of a word? */
385     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
386         int k;
387         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
388         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
389
390         for (k = page; k < page + nr; k++) {
391             if (src[k]) {
392                 unsigned long new_dirty;
393                 new_dirty = ~migration_bitmap[k];
394                 migration_bitmap[k] |= src[k];
395                 new_dirty &= src[k];
396                 migration_dirty_pages += ctpopl(new_dirty);
397                 src[k] = 0;
398             }
399         }
400     } else {
401         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
402             if (cpu_physical_memory_get_dirty(start + addr,
403                                               TARGET_PAGE_SIZE,
404                                               DIRTY_MEMORY_MIGRATION)) {
405                 cpu_physical_memory_reset_dirty(start + addr,
406                                                 TARGET_PAGE_SIZE,
407                                                 DIRTY_MEMORY_MIGRATION);
408                 migration_bitmap_set_dirty(start + addr);
409             }
410         }
411     }
412 }
413
414
415 /* Needs iothread lock! */
416
417 static void migration_bitmap_sync(void)
418 {
419     RAMBlock *block;
420     uint64_t num_dirty_pages_init = migration_dirty_pages;
421     MigrationState *s = migrate_get_current();
422     static int64_t start_time;
423     static int64_t bytes_xfer_prev;
424     static int64_t num_dirty_pages_period;
425     int64_t end_time;
426     int64_t bytes_xfer_now;
427
428     if (!bytes_xfer_prev) {
429         bytes_xfer_prev = ram_bytes_transferred();
430     }
431
432     if (!start_time) {
433         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
434     }
435
436     trace_migration_bitmap_sync_start();
437     address_space_sync_dirty_bitmap(&address_space_memory);
438
439     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
440         migration_bitmap_sync_range(block->mr->ram_addr, block->length);
441     }
442     trace_migration_bitmap_sync_end(migration_dirty_pages
443                                     - num_dirty_pages_init);
444     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
445     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
446
447     /* more than 1 second = 1000 millisecons */
448     if (end_time > start_time + 1000) {
449         if (migrate_auto_converge()) {
450             /* The following detection logic can be refined later. For now:
451                Check to see if the dirtied bytes is 50% more than the approx.
452                amount of bytes that just got transferred since the last time we
453                were in this routine. If that happens >N times (for now N==4)
454                we turn on the throttle down logic */
455             bytes_xfer_now = ram_bytes_transferred();
456             if (s->dirty_pages_rate &&
457                (num_dirty_pages_period * TARGET_PAGE_SIZE >
458                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
459                (dirty_rate_high_cnt++ > 4)) {
460                     trace_migration_throttle();
461                     mig_throttle_on = true;
462                     dirty_rate_high_cnt = 0;
463              }
464              bytes_xfer_prev = bytes_xfer_now;
465         } else {
466              mig_throttle_on = false;
467         }
468         s->dirty_pages_rate = num_dirty_pages_period * 1000
469             / (end_time - start_time);
470         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
471         start_time = end_time;
472         num_dirty_pages_period = 0;
473     }
474 }
475
476 /*
477  * ram_save_block: Writes a page of memory to the stream f
478  *
479  * Returns:  The number of bytes written.
480  *           0 means no dirty pages
481  */
482
483 static int ram_save_block(QEMUFile *f, bool last_stage)
484 {
485     RAMBlock *block = last_seen_block;
486     ram_addr_t offset = last_offset;
487     bool complete_round = false;
488     int bytes_sent = 0;
489     MemoryRegion *mr;
490     ram_addr_t current_addr;
491
492     if (!block)
493         block = QTAILQ_FIRST(&ram_list.blocks);
494
495     while (true) {
496         mr = block->mr;
497         offset = migration_bitmap_find_and_reset_dirty(mr, offset);
498         if (complete_round && block == last_seen_block &&
499             offset >= last_offset) {
500             break;
501         }
502         if (offset >= block->length) {
503             offset = 0;
504             block = QTAILQ_NEXT(block, next);
505             if (!block) {
506                 block = QTAILQ_FIRST(&ram_list.blocks);
507                 complete_round = true;
508                 ram_bulk_stage = false;
509             }
510         } else {
511             int ret;
512             uint8_t *p;
513             int cont = (block == last_sent_block) ?
514                 RAM_SAVE_FLAG_CONTINUE : 0;
515
516             p = memory_region_get_ram_ptr(mr) + offset;
517
518             /* In doubt sent page as normal */
519             bytes_sent = -1;
520             ret = ram_control_save_page(f, block->offset,
521                                offset, TARGET_PAGE_SIZE, &bytes_sent);
522
523             if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
524                 if (ret != RAM_SAVE_CONTROL_DELAYED) {
525                     if (bytes_sent > 0) {
526                         acct_info.norm_pages++;
527                     } else if (bytes_sent == 0) {
528                         acct_info.dup_pages++;
529                     }
530                 }
531             } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
532                 acct_info.dup_pages++;
533                 bytes_sent = save_block_hdr(f, block, offset, cont,
534                                             RAM_SAVE_FLAG_COMPRESS);
535                 qemu_put_byte(f, 0);
536                 bytes_sent++;
537             } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
538                 current_addr = block->offset + offset;
539                 bytes_sent = save_xbzrle_page(f, p, current_addr, block,
540                                               offset, cont, last_stage);
541                 if (!last_stage) {
542                     p = get_cached_data(XBZRLE.cache, current_addr);
543                 }
544             }
545
546             /* XBZRLE overflow or normal page */
547             if (bytes_sent == -1) {
548                 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
549                 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
550                 bytes_sent += TARGET_PAGE_SIZE;
551                 acct_info.norm_pages++;
552             }
553
554             /* if page is unmodified, continue to the next */
555             if (bytes_sent > 0) {
556                 last_sent_block = block;
557                 break;
558             }
559         }
560     }
561     last_seen_block = block;
562     last_offset = offset;
563
564     return bytes_sent;
565 }
566
567 static uint64_t bytes_transferred;
568
569 void acct_update_position(QEMUFile *f, size_t size, bool zero)
570 {
571     uint64_t pages = size / TARGET_PAGE_SIZE;
572     if (zero) {
573         acct_info.dup_pages += pages;
574     } else {
575         acct_info.norm_pages += pages;
576         bytes_transferred += size;
577         qemu_update_position(f, size);
578     }
579 }
580
581 static ram_addr_t ram_save_remaining(void)
582 {
583     return migration_dirty_pages;
584 }
585
586 uint64_t ram_bytes_remaining(void)
587 {
588     return ram_save_remaining() * TARGET_PAGE_SIZE;
589 }
590
591 uint64_t ram_bytes_transferred(void)
592 {
593     return bytes_transferred;
594 }
595
596 uint64_t ram_bytes_total(void)
597 {
598     RAMBlock *block;
599     uint64_t total = 0;
600
601     QTAILQ_FOREACH(block, &ram_list.blocks, next)
602         total += block->length;
603
604     return total;
605 }
606
607 void free_xbzrle_decoded_buf(void)
608 {
609     g_free(xbzrle_decoded_buf);
610     xbzrle_decoded_buf = NULL;
611 }
612
613 static void migration_end(void)
614 {
615     if (migration_bitmap) {
616         memory_global_dirty_log_stop();
617         g_free(migration_bitmap);
618         migration_bitmap = NULL;
619     }
620
621     if (XBZRLE.cache) {
622         cache_fini(XBZRLE.cache);
623         g_free(XBZRLE.cache);
624         g_free(XBZRLE.encoded_buf);
625         g_free(XBZRLE.current_buf);
626         XBZRLE.cache = NULL;
627         XBZRLE.encoded_buf = NULL;
628         XBZRLE.current_buf = NULL;
629     }
630 }
631
632 static void ram_migration_cancel(void *opaque)
633 {
634     migration_end();
635 }
636
637 static void reset_ram_globals(void)
638 {
639     last_seen_block = NULL;
640     last_sent_block = NULL;
641     last_offset = 0;
642     last_version = ram_list.version;
643     ram_bulk_stage = true;
644 }
645
646 #define MAX_WAIT 50 /* ms, half buffered_file limit */
647
648 static int ram_save_setup(QEMUFile *f, void *opaque)
649 {
650     RAMBlock *block;
651     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
652
653     migration_bitmap = bitmap_new(ram_pages);
654     bitmap_set(migration_bitmap, 0, ram_pages);
655     migration_dirty_pages = ram_pages;
656     mig_throttle_on = false;
657     dirty_rate_high_cnt = 0;
658
659     if (migrate_use_xbzrle()) {
660         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
661                                   TARGET_PAGE_SIZE,
662                                   TARGET_PAGE_SIZE);
663         if (!XBZRLE.cache) {
664             DPRINTF("Error creating cache\n");
665             return -1;
666         }
667         XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
668         XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
669         acct_clear();
670     }
671
672     qemu_mutex_lock_iothread();
673     qemu_mutex_lock_ramlist();
674     bytes_transferred = 0;
675     reset_ram_globals();
676
677     memory_global_dirty_log_start();
678     migration_bitmap_sync();
679     qemu_mutex_unlock_iothread();
680
681     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
682
683     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
684         qemu_put_byte(f, strlen(block->idstr));
685         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
686         qemu_put_be64(f, block->length);
687     }
688
689     qemu_mutex_unlock_ramlist();
690
691     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
692     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
693
694     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
695
696     return 0;
697 }
698
699 static int ram_save_iterate(QEMUFile *f, void *opaque)
700 {
701     int ret;
702     int i;
703     int64_t t0;
704     int total_sent = 0;
705
706     qemu_mutex_lock_ramlist();
707
708     if (ram_list.version != last_version) {
709         reset_ram_globals();
710     }
711
712     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
713
714     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
715     i = 0;
716     while ((ret = qemu_file_rate_limit(f)) == 0) {
717         int bytes_sent;
718
719         bytes_sent = ram_save_block(f, false);
720         /* no more blocks to sent */
721         if (bytes_sent == 0) {
722             break;
723         }
724         total_sent += bytes_sent;
725         acct_info.iterations++;
726         check_guest_throttling();
727         /* we want to check in the 1st loop, just in case it was the 1st time
728            and we had to sync the dirty bitmap.
729            qemu_get_clock_ns() is a bit expensive, so we only check each some
730            iterations
731         */
732         if ((i & 63) == 0) {
733             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
734             if (t1 > MAX_WAIT) {
735                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
736                         t1, i);
737                 break;
738             }
739         }
740         i++;
741     }
742
743     qemu_mutex_unlock_ramlist();
744
745     /*
746      * Must occur before EOS (or any QEMUFile operation)
747      * because of RDMA protocol.
748      */
749     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
750
751     bytes_transferred += total_sent;
752
753     /*
754      * Do not count these 8 bytes into total_sent, so that we can
755      * return 0 if no page had been dirtied.
756      */
757     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
758     bytes_transferred += 8;
759
760     ret = qemu_file_get_error(f);
761     if (ret < 0) {
762         return ret;
763     }
764
765     return total_sent;
766 }
767
768 static int ram_save_complete(QEMUFile *f, void *opaque)
769 {
770     qemu_mutex_lock_ramlist();
771     migration_bitmap_sync();
772
773     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
774
775     /* try transferring iterative blocks of memory */
776
777     /* flush all remaining blocks regardless of rate limiting */
778     while (true) {
779         int bytes_sent;
780
781         bytes_sent = ram_save_block(f, true);
782         /* no more blocks to sent */
783         if (bytes_sent == 0) {
784             break;
785         }
786         bytes_transferred += bytes_sent;
787     }
788
789     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
790     migration_end();
791
792     qemu_mutex_unlock_ramlist();
793     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
794
795     return 0;
796 }
797
798 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
799 {
800     uint64_t remaining_size;
801
802     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
803
804     if (remaining_size < max_size) {
805         qemu_mutex_lock_iothread();
806         migration_bitmap_sync();
807         qemu_mutex_unlock_iothread();
808         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
809     }
810     return remaining_size;
811 }
812
813 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
814 {
815     int ret, rc = 0;
816     unsigned int xh_len;
817     int xh_flags;
818
819     if (!xbzrle_decoded_buf) {
820         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
821     }
822
823     /* extract RLE header */
824     xh_flags = qemu_get_byte(f);
825     xh_len = qemu_get_be16(f);
826
827     if (xh_flags != ENCODING_FLAG_XBZRLE) {
828         fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
829         return -1;
830     }
831
832     if (xh_len > TARGET_PAGE_SIZE) {
833         fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
834         return -1;
835     }
836     /* load data and decode */
837     qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
838
839     /* decode RLE */
840     ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
841                                TARGET_PAGE_SIZE);
842     if (ret == -1) {
843         fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
844         rc = -1;
845     } else  if (ret > TARGET_PAGE_SIZE) {
846         fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
847                 ret, TARGET_PAGE_SIZE);
848         abort();
849     }
850
851     return rc;
852 }
853
854 static inline void *host_from_stream_offset(QEMUFile *f,
855                                             ram_addr_t offset,
856                                             int flags)
857 {
858     static RAMBlock *block = NULL;
859     char id[256];
860     uint8_t len;
861
862     if (flags & RAM_SAVE_FLAG_CONTINUE) {
863         if (!block) {
864             fprintf(stderr, "Ack, bad migration stream!\n");
865             return NULL;
866         }
867
868         return memory_region_get_ram_ptr(block->mr) + offset;
869     }
870
871     len = qemu_get_byte(f);
872     qemu_get_buffer(f, (uint8_t *)id, len);
873     id[len] = 0;
874
875     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
876         if (!strncmp(id, block->idstr, sizeof(id)))
877             return memory_region_get_ram_ptr(block->mr) + offset;
878     }
879
880     fprintf(stderr, "Can't find block %s!\n", id);
881     return NULL;
882 }
883
884 /*
885  * If a page (or a whole RDMA chunk) has been
886  * determined to be zero, then zap it.
887  */
888 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
889 {
890     if (ch != 0 || !is_zero_range(host, size)) {
891         memset(host, ch, size);
892     }
893 }
894
895 static int ram_load(QEMUFile *f, void *opaque, int version_id)
896 {
897     ram_addr_t addr;
898     int flags, ret = 0;
899     int error;
900     static uint64_t seq_iter;
901
902     seq_iter++;
903
904     if (version_id < 4 || version_id > 4) {
905         return -EINVAL;
906     }
907
908     do {
909         addr = qemu_get_be64(f);
910
911         flags = addr & ~TARGET_PAGE_MASK;
912         addr &= TARGET_PAGE_MASK;
913
914         if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
915             if (version_id == 4) {
916                 /* Synchronize RAM block list */
917                 char id[256];
918                 ram_addr_t length;
919                 ram_addr_t total_ram_bytes = addr;
920
921                 while (total_ram_bytes) {
922                     RAMBlock *block;
923                     uint8_t len;
924
925                     len = qemu_get_byte(f);
926                     qemu_get_buffer(f, (uint8_t *)id, len);
927                     id[len] = 0;
928                     length = qemu_get_be64(f);
929
930                     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
931                         if (!strncmp(id, block->idstr, sizeof(id))) {
932                             if (block->length != length) {
933                                 fprintf(stderr,
934                                         "Length mismatch: %s: " RAM_ADDR_FMT
935                                         " in != " RAM_ADDR_FMT "\n", id, length,
936                                         block->length);
937                                 ret =  -EINVAL;
938                                 goto done;
939                             }
940                             break;
941                         }
942                     }
943
944                     if (!block) {
945                         fprintf(stderr, "Unknown ramblock \"%s\", cannot "
946                                 "accept migration\n", id);
947                         ret = -EINVAL;
948                         goto done;
949                     }
950
951                     total_ram_bytes -= length;
952                 }
953             }
954         }
955
956         if (flags & RAM_SAVE_FLAG_COMPRESS) {
957             void *host;
958             uint8_t ch;
959
960             host = host_from_stream_offset(f, addr, flags);
961             if (!host) {
962                 return -EINVAL;
963             }
964
965             ch = qemu_get_byte(f);
966             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
967         } else if (flags & RAM_SAVE_FLAG_PAGE) {
968             void *host;
969
970             host = host_from_stream_offset(f, addr, flags);
971             if (!host) {
972                 return -EINVAL;
973             }
974
975             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
976         } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
977             void *host = host_from_stream_offset(f, addr, flags);
978             if (!host) {
979                 return -EINVAL;
980             }
981
982             if (load_xbzrle(f, addr, host) < 0) {
983                 ret = -EINVAL;
984                 goto done;
985             }
986         } else if (flags & RAM_SAVE_FLAG_HOOK) {
987             ram_control_load_hook(f, flags);
988         }
989         error = qemu_file_get_error(f);
990         if (error) {
991             ret = error;
992             goto done;
993         }
994     } while (!(flags & RAM_SAVE_FLAG_EOS));
995
996 done:
997     DPRINTF("Completed load of VM with exit code %d seq iteration "
998             "%" PRIu64 "\n", ret, seq_iter);
999     return ret;
1000 }
1001
1002 SaveVMHandlers savevm_ram_handlers = {
1003     .save_live_setup = ram_save_setup,
1004     .save_live_iterate = ram_save_iterate,
1005     .save_live_complete = ram_save_complete,
1006     .save_live_pending = ram_save_pending,
1007     .load_state = ram_load,
1008     .cancel = ram_migration_cancel,
1009 };
1010
1011 struct soundhw {
1012     const char *name;
1013     const char *descr;
1014     int enabled;
1015     int isa;
1016     union {
1017         int (*init_isa) (ISABus *bus);
1018         int (*init_pci) (PCIBus *bus);
1019     } init;
1020 };
1021
1022 static struct soundhw soundhw[9];
1023 static int soundhw_count;
1024
1025 void isa_register_soundhw(const char *name, const char *descr,
1026                           int (*init_isa)(ISABus *bus))
1027 {
1028     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1029     soundhw[soundhw_count].name = name;
1030     soundhw[soundhw_count].descr = descr;
1031     soundhw[soundhw_count].isa = 1;
1032     soundhw[soundhw_count].init.init_isa = init_isa;
1033     soundhw_count++;
1034 }
1035
1036 void pci_register_soundhw(const char *name, const char *descr,
1037                           int (*init_pci)(PCIBus *bus))
1038 {
1039     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1040     soundhw[soundhw_count].name = name;
1041     soundhw[soundhw_count].descr = descr;
1042     soundhw[soundhw_count].isa = 0;
1043     soundhw[soundhw_count].init.init_pci = init_pci;
1044     soundhw_count++;
1045 }
1046
1047 void select_soundhw(const char *optarg)
1048 {
1049     struct soundhw *c;
1050
1051     if (is_help_option(optarg)) {
1052     show_valid_cards:
1053
1054         if (soundhw_count) {
1055              printf("Valid sound card names (comma separated):\n");
1056              for (c = soundhw; c->name; ++c) {
1057                  printf ("%-11s %s\n", c->name, c->descr);
1058              }
1059              printf("\n-soundhw all will enable all of the above\n");
1060         } else {
1061              printf("Machine has no user-selectable audio hardware "
1062                     "(it may or may not have always-present audio hardware).\n");
1063         }
1064         exit(!is_help_option(optarg));
1065     }
1066     else {
1067         size_t l;
1068         const char *p;
1069         char *e;
1070         int bad_card = 0;
1071
1072         if (!strcmp(optarg, "all")) {
1073             for (c = soundhw; c->name; ++c) {
1074                 c->enabled = 1;
1075             }
1076             return;
1077         }
1078
1079         p = optarg;
1080         while (*p) {
1081             e = strchr(p, ',');
1082             l = !e ? strlen(p) : (size_t) (e - p);
1083
1084             for (c = soundhw; c->name; ++c) {
1085                 if (!strncmp(c->name, p, l) && !c->name[l]) {
1086                     c->enabled = 1;
1087                     break;
1088                 }
1089             }
1090
1091             if (!c->name) {
1092                 if (l > 80) {
1093                     fprintf(stderr,
1094                             "Unknown sound card name (too big to show)\n");
1095                 }
1096                 else {
1097                     fprintf(stderr, "Unknown sound card name `%.*s'\n",
1098                             (int) l, p);
1099                 }
1100                 bad_card = 1;
1101             }
1102             p += l + (e != NULL);
1103         }
1104
1105         if (bad_card) {
1106             goto show_valid_cards;
1107         }
1108     }
1109 }
1110
1111 void audio_init(void)
1112 {
1113     struct soundhw *c;
1114     ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1115     PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1116
1117     for (c = soundhw; c->name; ++c) {
1118         if (c->enabled) {
1119             if (c->isa) {
1120                 if (!isa_bus) {
1121                     fprintf(stderr, "ISA bus not available for %s\n", c->name);
1122                     exit(1);
1123                 }
1124                 c->init.init_isa(isa_bus);
1125             } else {
1126                 if (!pci_bus) {
1127                     fprintf(stderr, "PCI bus not available for %s\n", c->name);
1128                     exit(1);
1129                 }
1130                 c->init.init_pci(pci_bus);
1131             }
1132         }
1133     }
1134 }
1135
1136 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1137 {
1138     int ret;
1139
1140     if (strlen(str) != 36) {
1141         return -1;
1142     }
1143
1144     ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1145                  &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1146                  &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1147                  &uuid[15]);
1148
1149     if (ret != 16) {
1150         return -1;
1151     }
1152     return 0;
1153 }
1154
1155 void do_acpitable_option(const QemuOpts *opts)
1156 {
1157 #ifdef TARGET_I386
1158     Error *err = NULL;
1159
1160     acpi_table_add(opts, &err);
1161     if (err) {
1162         error_report("Wrong acpi table provided: %s",
1163                      error_get_pretty(err));
1164         error_free(err);
1165         exit(1);
1166     }
1167 #endif
1168 }
1169
1170 void do_smbios_option(QemuOpts *opts)
1171 {
1172 #ifdef TARGET_I386
1173     smbios_entry_add(opts);
1174 #endif
1175 }
1176
1177 void cpudef_init(void)
1178 {
1179 #if defined(cpudef_setup)
1180     cpudef_setup(); /* parse cpu definitions in target config file */
1181 #endif
1182 }
1183
1184 int tcg_available(void)
1185 {
1186     return 1;
1187 }
1188
1189 int kvm_available(void)
1190 {
1191 #ifdef CONFIG_KVM
1192     return 1;
1193 #else
1194     return 0;
1195 #endif
1196 }
1197
1198 int xen_available(void)
1199 {
1200 #ifdef CONFIG_XEN
1201     return 1;
1202 #else
1203     return 0;
1204 #endif
1205 }
1206
1207
1208 TargetInfo *qmp_query_target(Error **errp)
1209 {
1210     TargetInfo *info = g_malloc0(sizeof(*info));
1211
1212     info->arch = g_strdup(TARGET_NAME);
1213
1214     return info;
1215 }
1216
1217 /* Stub function that's gets run on the vcpu when its brought out of the
1218    VM to run inside qemu via async_run_on_cpu()*/
1219 static void mig_sleep_cpu(void *opq)
1220 {
1221     qemu_mutex_unlock_iothread();
1222     g_usleep(30*1000);
1223     qemu_mutex_lock_iothread();
1224 }
1225
1226 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1227    much time in the VM. The migration thread will try to catchup.
1228    Workload will experience a performance drop.
1229 */
1230 static void mig_throttle_guest_down(void)
1231 {
1232     CPUState *cpu;
1233
1234     qemu_mutex_lock_iothread();
1235     CPU_FOREACH(cpu) {
1236         async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1237     }
1238     qemu_mutex_unlock_iothread();
1239 }
1240
1241 static void check_guest_throttling(void)
1242 {
1243     static int64_t t0;
1244     int64_t        t1;
1245
1246     if (!mig_throttle_on) {
1247         return;
1248     }
1249
1250     if (!t0)  {
1251         t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1252         return;
1253     }
1254
1255     t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1256
1257     /* If it has been more than 40 ms since the last time the guest
1258      * was throttled then do it again.
1259      */
1260     if (40 < (t1-t0)/1000000) {
1261         mig_throttle_guest_down();
1262         t0 = t1;
1263     }
1264 }
This page took 0.095354 seconds and 4 git commands to generate.