]> Git Repo - qemu.git/blob - arch_init.c
Add check for cache size smaller than page size
[qemu.git] / arch_init.c
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qmp-commands.h"
49 #include "trace.h"
50 #include "exec/cpu-all.h"
51 #include "exec/ram_addr.h"
52 #include "hw/acpi/acpi.h"
53 #include "qemu/host-utils.h"
54
55 #ifdef DEBUG_ARCH_INIT
56 #define DPRINTF(fmt, ...) \
57     do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
58 #else
59 #define DPRINTF(fmt, ...) \
60     do { } while (0)
61 #endif
62
63 #ifdef TARGET_SPARC
64 int graphic_width = 1024;
65 int graphic_height = 768;
66 int graphic_depth = 8;
67 #else
68 int graphic_width = 800;
69 int graphic_height = 600;
70 int graphic_depth = 32;
71 #endif
72
73
74 #if defined(TARGET_ALPHA)
75 #define QEMU_ARCH QEMU_ARCH_ALPHA
76 #elif defined(TARGET_ARM)
77 #define QEMU_ARCH QEMU_ARCH_ARM
78 #elif defined(TARGET_CRIS)
79 #define QEMU_ARCH QEMU_ARCH_CRIS
80 #elif defined(TARGET_I386)
81 #define QEMU_ARCH QEMU_ARCH_I386
82 #elif defined(TARGET_M68K)
83 #define QEMU_ARCH QEMU_ARCH_M68K
84 #elif defined(TARGET_LM32)
85 #define QEMU_ARCH QEMU_ARCH_LM32
86 #elif defined(TARGET_MICROBLAZE)
87 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
88 #elif defined(TARGET_MIPS)
89 #define QEMU_ARCH QEMU_ARCH_MIPS
90 #elif defined(TARGET_MOXIE)
91 #define QEMU_ARCH QEMU_ARCH_MOXIE
92 #elif defined(TARGET_OPENRISC)
93 #define QEMU_ARCH QEMU_ARCH_OPENRISC
94 #elif defined(TARGET_PPC)
95 #define QEMU_ARCH QEMU_ARCH_PPC
96 #elif defined(TARGET_S390X)
97 #define QEMU_ARCH QEMU_ARCH_S390X
98 #elif defined(TARGET_SH4)
99 #define QEMU_ARCH QEMU_ARCH_SH4
100 #elif defined(TARGET_SPARC)
101 #define QEMU_ARCH QEMU_ARCH_SPARC
102 #elif defined(TARGET_XTENSA)
103 #define QEMU_ARCH QEMU_ARCH_XTENSA
104 #elif defined(TARGET_UNICORE32)
105 #define QEMU_ARCH QEMU_ARCH_UNICORE32
106 #endif
107
108 const uint32_t arch_type = QEMU_ARCH;
109 static bool mig_throttle_on;
110 static int dirty_rate_high_cnt;
111 static void check_guest_throttling(void);
112
113 /***********************************************************/
114 /* ram save/restore */
115
116 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
117 #define RAM_SAVE_FLAG_COMPRESS 0x02
118 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
119 #define RAM_SAVE_FLAG_PAGE     0x08
120 #define RAM_SAVE_FLAG_EOS      0x10
121 #define RAM_SAVE_FLAG_CONTINUE 0x20
122 #define RAM_SAVE_FLAG_XBZRLE   0x40
123 /* 0x80 is reserved in migration.h start with 0x100 next */
124
125
126 static struct defconfig_file {
127     const char *filename;
128     /* Indicates it is an user config file (disabled by -no-user-config) */
129     bool userconfig;
130 } default_config_files[] = {
131     { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
132     { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
133     { NULL }, /* end of list */
134 };
135
136
137 int qemu_read_default_config_files(bool userconfig)
138 {
139     int ret;
140     struct defconfig_file *f;
141
142     for (f = default_config_files; f->filename; f++) {
143         if (!userconfig && f->userconfig) {
144             continue;
145         }
146         ret = qemu_read_config_file(f->filename);
147         if (ret < 0 && ret != -ENOENT) {
148             return ret;
149         }
150     }
151
152     return 0;
153 }
154
155 static inline bool is_zero_range(uint8_t *p, uint64_t size)
156 {
157     return buffer_find_nonzero_offset(p, size) == size;
158 }
159
160 /* struct contains XBZRLE cache and a static page
161    used by the compression */
162 static struct {
163     /* buffer used for XBZRLE encoding */
164     uint8_t *encoded_buf;
165     /* buffer for storing page content */
166     uint8_t *current_buf;
167     /* buffer used for XBZRLE decoding */
168     uint8_t *decoded_buf;
169     /* Cache for XBZRLE */
170     PageCache *cache;
171 } XBZRLE = {
172     .encoded_buf = NULL,
173     .current_buf = NULL,
174     .decoded_buf = NULL,
175     .cache = NULL,
176 };
177
178
179 int64_t xbzrle_cache_resize(int64_t new_size)
180 {
181     if (new_size < TARGET_PAGE_SIZE) {
182         return -1;
183     }
184
185     if (XBZRLE.cache != NULL) {
186         return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
187             TARGET_PAGE_SIZE;
188     }
189     return pow2floor(new_size);
190 }
191
192 /* accounting for migration statistics */
193 typedef struct AccountingInfo {
194     uint64_t dup_pages;
195     uint64_t skipped_pages;
196     uint64_t norm_pages;
197     uint64_t iterations;
198     uint64_t xbzrle_bytes;
199     uint64_t xbzrle_pages;
200     uint64_t xbzrle_cache_miss;
201     uint64_t xbzrle_overflows;
202 } AccountingInfo;
203
204 static AccountingInfo acct_info;
205
206 static void acct_clear(void)
207 {
208     memset(&acct_info, 0, sizeof(acct_info));
209 }
210
211 uint64_t dup_mig_bytes_transferred(void)
212 {
213     return acct_info.dup_pages * TARGET_PAGE_SIZE;
214 }
215
216 uint64_t dup_mig_pages_transferred(void)
217 {
218     return acct_info.dup_pages;
219 }
220
221 uint64_t skipped_mig_bytes_transferred(void)
222 {
223     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
224 }
225
226 uint64_t skipped_mig_pages_transferred(void)
227 {
228     return acct_info.skipped_pages;
229 }
230
231 uint64_t norm_mig_bytes_transferred(void)
232 {
233     return acct_info.norm_pages * TARGET_PAGE_SIZE;
234 }
235
236 uint64_t norm_mig_pages_transferred(void)
237 {
238     return acct_info.norm_pages;
239 }
240
241 uint64_t xbzrle_mig_bytes_transferred(void)
242 {
243     return acct_info.xbzrle_bytes;
244 }
245
246 uint64_t xbzrle_mig_pages_transferred(void)
247 {
248     return acct_info.xbzrle_pages;
249 }
250
251 uint64_t xbzrle_mig_pages_cache_miss(void)
252 {
253     return acct_info.xbzrle_cache_miss;
254 }
255
256 uint64_t xbzrle_mig_pages_overflow(void)
257 {
258     return acct_info.xbzrle_overflows;
259 }
260
261 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
262                              int cont, int flag)
263 {
264     size_t size;
265
266     qemu_put_be64(f, offset | cont | flag);
267     size = 8;
268
269     if (!cont) {
270         qemu_put_byte(f, strlen(block->idstr));
271         qemu_put_buffer(f, (uint8_t *)block->idstr,
272                         strlen(block->idstr));
273         size += 1 + strlen(block->idstr);
274     }
275     return size;
276 }
277
278 #define ENCODING_FLAG_XBZRLE 0x1
279
280 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
281                             ram_addr_t current_addr, RAMBlock *block,
282                             ram_addr_t offset, int cont, bool last_stage)
283 {
284     int encoded_len = 0, bytes_sent = -1;
285     uint8_t *prev_cached_page;
286
287     if (!cache_is_cached(XBZRLE.cache, current_addr)) {
288         if (!last_stage) {
289             cache_insert(XBZRLE.cache, current_addr, current_data);
290         }
291         acct_info.xbzrle_cache_miss++;
292         return -1;
293     }
294
295     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
296
297     /* save current buffer into memory */
298     memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
299
300     /* XBZRLE encoding (if there is no overflow) */
301     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
302                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
303                                        TARGET_PAGE_SIZE);
304     if (encoded_len == 0) {
305         DPRINTF("Skipping unmodified page\n");
306         return 0;
307     } else if (encoded_len == -1) {
308         DPRINTF("Overflow\n");
309         acct_info.xbzrle_overflows++;
310         /* update data in the cache */
311         memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
312         return -1;
313     }
314
315     /* we need to update the data in the cache, in order to get the same data */
316     if (!last_stage) {
317         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
318     }
319
320     /* Send XBZRLE based compressed page */
321     bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
322     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
323     qemu_put_be16(f, encoded_len);
324     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
325     bytes_sent += encoded_len + 1 + 2;
326     acct_info.xbzrle_pages++;
327     acct_info.xbzrle_bytes += bytes_sent;
328
329     return bytes_sent;
330 }
331
332
333 /* This is the last block that we have visited serching for dirty pages
334  */
335 static RAMBlock *last_seen_block;
336 /* This is the last block from where we have sent data */
337 static RAMBlock *last_sent_block;
338 static ram_addr_t last_offset;
339 static unsigned long *migration_bitmap;
340 static uint64_t migration_dirty_pages;
341 static uint32_t last_version;
342 static bool ram_bulk_stage;
343
344 static inline
345 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
346                                                  ram_addr_t start)
347 {
348     unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
349     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
350     uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
351     unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
352
353     unsigned long next;
354
355     if (ram_bulk_stage && nr > base) {
356         next = nr + 1;
357     } else {
358         next = find_next_bit(migration_bitmap, size, nr);
359     }
360
361     if (next < size) {
362         clear_bit(next, migration_bitmap);
363         migration_dirty_pages--;
364     }
365     return (next - base) << TARGET_PAGE_BITS;
366 }
367
368 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
369 {
370     bool ret;
371     int nr = addr >> TARGET_PAGE_BITS;
372
373     ret = test_and_set_bit(nr, migration_bitmap);
374
375     if (!ret) {
376         migration_dirty_pages++;
377     }
378     return ret;
379 }
380
381 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
382 {
383     ram_addr_t addr;
384     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
385
386     /* start address is aligned at the start of a word? */
387     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
388         int k;
389         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
390         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
391
392         for (k = page; k < page + nr; k++) {
393             if (src[k]) {
394                 unsigned long new_dirty;
395                 new_dirty = ~migration_bitmap[k];
396                 migration_bitmap[k] |= src[k];
397                 new_dirty &= src[k];
398                 migration_dirty_pages += ctpopl(new_dirty);
399                 src[k] = 0;
400             }
401         }
402     } else {
403         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
404             if (cpu_physical_memory_get_dirty(start + addr,
405                                               TARGET_PAGE_SIZE,
406                                               DIRTY_MEMORY_MIGRATION)) {
407                 cpu_physical_memory_reset_dirty(start + addr,
408                                                 TARGET_PAGE_SIZE,
409                                                 DIRTY_MEMORY_MIGRATION);
410                 migration_bitmap_set_dirty(start + addr);
411             }
412         }
413     }
414 }
415
416
417 /* Needs iothread lock! */
418
419 static void migration_bitmap_sync(void)
420 {
421     RAMBlock *block;
422     uint64_t num_dirty_pages_init = migration_dirty_pages;
423     MigrationState *s = migrate_get_current();
424     static int64_t start_time;
425     static int64_t bytes_xfer_prev;
426     static int64_t num_dirty_pages_period;
427     int64_t end_time;
428     int64_t bytes_xfer_now;
429
430     if (!bytes_xfer_prev) {
431         bytes_xfer_prev = ram_bytes_transferred();
432     }
433
434     if (!start_time) {
435         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
436     }
437
438     trace_migration_bitmap_sync_start();
439     address_space_sync_dirty_bitmap(&address_space_memory);
440
441     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
442         migration_bitmap_sync_range(block->mr->ram_addr, block->length);
443     }
444     trace_migration_bitmap_sync_end(migration_dirty_pages
445                                     - num_dirty_pages_init);
446     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
447     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
448
449     /* more than 1 second = 1000 millisecons */
450     if (end_time > start_time + 1000) {
451         if (migrate_auto_converge()) {
452             /* The following detection logic can be refined later. For now:
453                Check to see if the dirtied bytes is 50% more than the approx.
454                amount of bytes that just got transferred since the last time we
455                were in this routine. If that happens >N times (for now N==4)
456                we turn on the throttle down logic */
457             bytes_xfer_now = ram_bytes_transferred();
458             if (s->dirty_pages_rate &&
459                (num_dirty_pages_period * TARGET_PAGE_SIZE >
460                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
461                (dirty_rate_high_cnt++ > 4)) {
462                     trace_migration_throttle();
463                     mig_throttle_on = true;
464                     dirty_rate_high_cnt = 0;
465              }
466              bytes_xfer_prev = bytes_xfer_now;
467         } else {
468              mig_throttle_on = false;
469         }
470         s->dirty_pages_rate = num_dirty_pages_period * 1000
471             / (end_time - start_time);
472         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
473         start_time = end_time;
474         num_dirty_pages_period = 0;
475     }
476 }
477
478 /*
479  * ram_save_block: Writes a page of memory to the stream f
480  *
481  * Returns:  The number of bytes written.
482  *           0 means no dirty pages
483  */
484
485 static int ram_save_block(QEMUFile *f, bool last_stage)
486 {
487     RAMBlock *block = last_seen_block;
488     ram_addr_t offset = last_offset;
489     bool complete_round = false;
490     int bytes_sent = 0;
491     MemoryRegion *mr;
492     ram_addr_t current_addr;
493
494     if (!block)
495         block = QTAILQ_FIRST(&ram_list.blocks);
496
497     while (true) {
498         mr = block->mr;
499         offset = migration_bitmap_find_and_reset_dirty(mr, offset);
500         if (complete_round && block == last_seen_block &&
501             offset >= last_offset) {
502             break;
503         }
504         if (offset >= block->length) {
505             offset = 0;
506             block = QTAILQ_NEXT(block, next);
507             if (!block) {
508                 block = QTAILQ_FIRST(&ram_list.blocks);
509                 complete_round = true;
510                 ram_bulk_stage = false;
511             }
512         } else {
513             int ret;
514             uint8_t *p;
515             int cont = (block == last_sent_block) ?
516                 RAM_SAVE_FLAG_CONTINUE : 0;
517
518             p = memory_region_get_ram_ptr(mr) + offset;
519
520             /* In doubt sent page as normal */
521             bytes_sent = -1;
522             ret = ram_control_save_page(f, block->offset,
523                                offset, TARGET_PAGE_SIZE, &bytes_sent);
524
525             if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
526                 if (ret != RAM_SAVE_CONTROL_DELAYED) {
527                     if (bytes_sent > 0) {
528                         acct_info.norm_pages++;
529                     } else if (bytes_sent == 0) {
530                         acct_info.dup_pages++;
531                     }
532                 }
533             } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
534                 acct_info.dup_pages++;
535                 bytes_sent = save_block_hdr(f, block, offset, cont,
536                                             RAM_SAVE_FLAG_COMPRESS);
537                 qemu_put_byte(f, 0);
538                 bytes_sent++;
539             } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
540                 current_addr = block->offset + offset;
541                 bytes_sent = save_xbzrle_page(f, p, current_addr, block,
542                                               offset, cont, last_stage);
543                 if (!last_stage) {
544                     p = get_cached_data(XBZRLE.cache, current_addr);
545                 }
546             }
547
548             /* XBZRLE overflow or normal page */
549             if (bytes_sent == -1) {
550                 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
551                 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
552                 bytes_sent += TARGET_PAGE_SIZE;
553                 acct_info.norm_pages++;
554             }
555
556             /* if page is unmodified, continue to the next */
557             if (bytes_sent > 0) {
558                 last_sent_block = block;
559                 break;
560             }
561         }
562     }
563     last_seen_block = block;
564     last_offset = offset;
565
566     return bytes_sent;
567 }
568
569 static uint64_t bytes_transferred;
570
571 void acct_update_position(QEMUFile *f, size_t size, bool zero)
572 {
573     uint64_t pages = size / TARGET_PAGE_SIZE;
574     if (zero) {
575         acct_info.dup_pages += pages;
576     } else {
577         acct_info.norm_pages += pages;
578         bytes_transferred += size;
579         qemu_update_position(f, size);
580     }
581 }
582
583 static ram_addr_t ram_save_remaining(void)
584 {
585     return migration_dirty_pages;
586 }
587
588 uint64_t ram_bytes_remaining(void)
589 {
590     return ram_save_remaining() * TARGET_PAGE_SIZE;
591 }
592
593 uint64_t ram_bytes_transferred(void)
594 {
595     return bytes_transferred;
596 }
597
598 uint64_t ram_bytes_total(void)
599 {
600     RAMBlock *block;
601     uint64_t total = 0;
602
603     QTAILQ_FOREACH(block, &ram_list.blocks, next)
604         total += block->length;
605
606     return total;
607 }
608
609 static void migration_end(void)
610 {
611     if (migration_bitmap) {
612         memory_global_dirty_log_stop();
613         g_free(migration_bitmap);
614         migration_bitmap = NULL;
615     }
616
617     if (XBZRLE.cache) {
618         cache_fini(XBZRLE.cache);
619         g_free(XBZRLE.cache);
620         g_free(XBZRLE.encoded_buf);
621         g_free(XBZRLE.current_buf);
622         g_free(XBZRLE.decoded_buf);
623         XBZRLE.cache = NULL;
624         XBZRLE.encoded_buf = NULL;
625         XBZRLE.current_buf = NULL;
626         XBZRLE.decoded_buf = NULL;
627     }
628 }
629
630 static void ram_migration_cancel(void *opaque)
631 {
632     migration_end();
633 }
634
635 static void reset_ram_globals(void)
636 {
637     last_seen_block = NULL;
638     last_sent_block = NULL;
639     last_offset = 0;
640     last_version = ram_list.version;
641     ram_bulk_stage = true;
642 }
643
644 #define MAX_WAIT 50 /* ms, half buffered_file limit */
645
646 static int ram_save_setup(QEMUFile *f, void *opaque)
647 {
648     RAMBlock *block;
649     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
650
651     migration_bitmap = bitmap_new(ram_pages);
652     bitmap_set(migration_bitmap, 0, ram_pages);
653     migration_dirty_pages = ram_pages;
654     mig_throttle_on = false;
655     dirty_rate_high_cnt = 0;
656
657     if (migrate_use_xbzrle()) {
658         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
659                                   TARGET_PAGE_SIZE,
660                                   TARGET_PAGE_SIZE);
661         if (!XBZRLE.cache) {
662             DPRINTF("Error creating cache\n");
663             return -1;
664         }
665         XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
666         XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
667         acct_clear();
668     }
669
670     qemu_mutex_lock_iothread();
671     qemu_mutex_lock_ramlist();
672     bytes_transferred = 0;
673     reset_ram_globals();
674
675     memory_global_dirty_log_start();
676     migration_bitmap_sync();
677     qemu_mutex_unlock_iothread();
678
679     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
680
681     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
682         qemu_put_byte(f, strlen(block->idstr));
683         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
684         qemu_put_be64(f, block->length);
685     }
686
687     qemu_mutex_unlock_ramlist();
688
689     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
690     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
691
692     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
693
694     return 0;
695 }
696
697 static int ram_save_iterate(QEMUFile *f, void *opaque)
698 {
699     int ret;
700     int i;
701     int64_t t0;
702     int total_sent = 0;
703
704     qemu_mutex_lock_ramlist();
705
706     if (ram_list.version != last_version) {
707         reset_ram_globals();
708     }
709
710     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
711
712     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
713     i = 0;
714     while ((ret = qemu_file_rate_limit(f)) == 0) {
715         int bytes_sent;
716
717         bytes_sent = ram_save_block(f, false);
718         /* no more blocks to sent */
719         if (bytes_sent == 0) {
720             break;
721         }
722         total_sent += bytes_sent;
723         acct_info.iterations++;
724         check_guest_throttling();
725         /* we want to check in the 1st loop, just in case it was the 1st time
726            and we had to sync the dirty bitmap.
727            qemu_get_clock_ns() is a bit expensive, so we only check each some
728            iterations
729         */
730         if ((i & 63) == 0) {
731             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
732             if (t1 > MAX_WAIT) {
733                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
734                         t1, i);
735                 break;
736             }
737         }
738         i++;
739     }
740
741     qemu_mutex_unlock_ramlist();
742
743     /*
744      * Must occur before EOS (or any QEMUFile operation)
745      * because of RDMA protocol.
746      */
747     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
748
749     bytes_transferred += total_sent;
750
751     /*
752      * Do not count these 8 bytes into total_sent, so that we can
753      * return 0 if no page had been dirtied.
754      */
755     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
756     bytes_transferred += 8;
757
758     ret = qemu_file_get_error(f);
759     if (ret < 0) {
760         return ret;
761     }
762
763     return total_sent;
764 }
765
766 static int ram_save_complete(QEMUFile *f, void *opaque)
767 {
768     qemu_mutex_lock_ramlist();
769     migration_bitmap_sync();
770
771     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
772
773     /* try transferring iterative blocks of memory */
774
775     /* flush all remaining blocks regardless of rate limiting */
776     while (true) {
777         int bytes_sent;
778
779         bytes_sent = ram_save_block(f, true);
780         /* no more blocks to sent */
781         if (bytes_sent == 0) {
782             break;
783         }
784         bytes_transferred += bytes_sent;
785     }
786
787     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
788     migration_end();
789
790     qemu_mutex_unlock_ramlist();
791     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
792
793     return 0;
794 }
795
796 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
797 {
798     uint64_t remaining_size;
799
800     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
801
802     if (remaining_size < max_size) {
803         qemu_mutex_lock_iothread();
804         migration_bitmap_sync();
805         qemu_mutex_unlock_iothread();
806         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
807     }
808     return remaining_size;
809 }
810
811 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
812 {
813     int ret, rc = 0;
814     unsigned int xh_len;
815     int xh_flags;
816
817     if (!XBZRLE.decoded_buf) {
818         XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
819     }
820
821     /* extract RLE header */
822     xh_flags = qemu_get_byte(f);
823     xh_len = qemu_get_be16(f);
824
825     if (xh_flags != ENCODING_FLAG_XBZRLE) {
826         fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
827         return -1;
828     }
829
830     if (xh_len > TARGET_PAGE_SIZE) {
831         fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
832         return -1;
833     }
834     /* load data and decode */
835     qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
836
837     /* decode RLE */
838     ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
839                                TARGET_PAGE_SIZE);
840     if (ret == -1) {
841         fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
842         rc = -1;
843     } else  if (ret > TARGET_PAGE_SIZE) {
844         fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
845                 ret, TARGET_PAGE_SIZE);
846         abort();
847     }
848
849     return rc;
850 }
851
852 static inline void *host_from_stream_offset(QEMUFile *f,
853                                             ram_addr_t offset,
854                                             int flags)
855 {
856     static RAMBlock *block = NULL;
857     char id[256];
858     uint8_t len;
859
860     if (flags & RAM_SAVE_FLAG_CONTINUE) {
861         if (!block) {
862             fprintf(stderr, "Ack, bad migration stream!\n");
863             return NULL;
864         }
865
866         return memory_region_get_ram_ptr(block->mr) + offset;
867     }
868
869     len = qemu_get_byte(f);
870     qemu_get_buffer(f, (uint8_t *)id, len);
871     id[len] = 0;
872
873     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
874         if (!strncmp(id, block->idstr, sizeof(id)))
875             return memory_region_get_ram_ptr(block->mr) + offset;
876     }
877
878     fprintf(stderr, "Can't find block %s!\n", id);
879     return NULL;
880 }
881
882 /*
883  * If a page (or a whole RDMA chunk) has been
884  * determined to be zero, then zap it.
885  */
886 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
887 {
888     if (ch != 0 || !is_zero_range(host, size)) {
889         memset(host, ch, size);
890     }
891 }
892
893 static int ram_load(QEMUFile *f, void *opaque, int version_id)
894 {
895     ram_addr_t addr;
896     int flags, ret = 0;
897     int error;
898     static uint64_t seq_iter;
899
900     seq_iter++;
901
902     if (version_id < 4 || version_id > 4) {
903         return -EINVAL;
904     }
905
906     do {
907         addr = qemu_get_be64(f);
908
909         flags = addr & ~TARGET_PAGE_MASK;
910         addr &= TARGET_PAGE_MASK;
911
912         if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
913             if (version_id == 4) {
914                 /* Synchronize RAM block list */
915                 char id[256];
916                 ram_addr_t length;
917                 ram_addr_t total_ram_bytes = addr;
918
919                 while (total_ram_bytes) {
920                     RAMBlock *block;
921                     uint8_t len;
922
923                     len = qemu_get_byte(f);
924                     qemu_get_buffer(f, (uint8_t *)id, len);
925                     id[len] = 0;
926                     length = qemu_get_be64(f);
927
928                     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
929                         if (!strncmp(id, block->idstr, sizeof(id))) {
930                             if (block->length != length) {
931                                 fprintf(stderr,
932                                         "Length mismatch: %s: " RAM_ADDR_FMT
933                                         " in != " RAM_ADDR_FMT "\n", id, length,
934                                         block->length);
935                                 ret =  -EINVAL;
936                                 goto done;
937                             }
938                             break;
939                         }
940                     }
941
942                     if (!block) {
943                         fprintf(stderr, "Unknown ramblock \"%s\", cannot "
944                                 "accept migration\n", id);
945                         ret = -EINVAL;
946                         goto done;
947                     }
948
949                     total_ram_bytes -= length;
950                 }
951             }
952         }
953
954         if (flags & RAM_SAVE_FLAG_COMPRESS) {
955             void *host;
956             uint8_t ch;
957
958             host = host_from_stream_offset(f, addr, flags);
959             if (!host) {
960                 return -EINVAL;
961             }
962
963             ch = qemu_get_byte(f);
964             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
965         } else if (flags & RAM_SAVE_FLAG_PAGE) {
966             void *host;
967
968             host = host_from_stream_offset(f, addr, flags);
969             if (!host) {
970                 return -EINVAL;
971             }
972
973             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
974         } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
975             void *host = host_from_stream_offset(f, addr, flags);
976             if (!host) {
977                 return -EINVAL;
978             }
979
980             if (load_xbzrle(f, addr, host) < 0) {
981                 ret = -EINVAL;
982                 goto done;
983             }
984         } else if (flags & RAM_SAVE_FLAG_HOOK) {
985             ram_control_load_hook(f, flags);
986         }
987         error = qemu_file_get_error(f);
988         if (error) {
989             ret = error;
990             goto done;
991         }
992     } while (!(flags & RAM_SAVE_FLAG_EOS));
993
994 done:
995     DPRINTF("Completed load of VM with exit code %d seq iteration "
996             "%" PRIu64 "\n", ret, seq_iter);
997     return ret;
998 }
999
1000 SaveVMHandlers savevm_ram_handlers = {
1001     .save_live_setup = ram_save_setup,
1002     .save_live_iterate = ram_save_iterate,
1003     .save_live_complete = ram_save_complete,
1004     .save_live_pending = ram_save_pending,
1005     .load_state = ram_load,
1006     .cancel = ram_migration_cancel,
1007 };
1008
1009 struct soundhw {
1010     const char *name;
1011     const char *descr;
1012     int enabled;
1013     int isa;
1014     union {
1015         int (*init_isa) (ISABus *bus);
1016         int (*init_pci) (PCIBus *bus);
1017     } init;
1018 };
1019
1020 static struct soundhw soundhw[9];
1021 static int soundhw_count;
1022
1023 void isa_register_soundhw(const char *name, const char *descr,
1024                           int (*init_isa)(ISABus *bus))
1025 {
1026     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1027     soundhw[soundhw_count].name = name;
1028     soundhw[soundhw_count].descr = descr;
1029     soundhw[soundhw_count].isa = 1;
1030     soundhw[soundhw_count].init.init_isa = init_isa;
1031     soundhw_count++;
1032 }
1033
1034 void pci_register_soundhw(const char *name, const char *descr,
1035                           int (*init_pci)(PCIBus *bus))
1036 {
1037     assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1038     soundhw[soundhw_count].name = name;
1039     soundhw[soundhw_count].descr = descr;
1040     soundhw[soundhw_count].isa = 0;
1041     soundhw[soundhw_count].init.init_pci = init_pci;
1042     soundhw_count++;
1043 }
1044
1045 void select_soundhw(const char *optarg)
1046 {
1047     struct soundhw *c;
1048
1049     if (is_help_option(optarg)) {
1050     show_valid_cards:
1051
1052         if (soundhw_count) {
1053              printf("Valid sound card names (comma separated):\n");
1054              for (c = soundhw; c->name; ++c) {
1055                  printf ("%-11s %s\n", c->name, c->descr);
1056              }
1057              printf("\n-soundhw all will enable all of the above\n");
1058         } else {
1059              printf("Machine has no user-selectable audio hardware "
1060                     "(it may or may not have always-present audio hardware).\n");
1061         }
1062         exit(!is_help_option(optarg));
1063     }
1064     else {
1065         size_t l;
1066         const char *p;
1067         char *e;
1068         int bad_card = 0;
1069
1070         if (!strcmp(optarg, "all")) {
1071             for (c = soundhw; c->name; ++c) {
1072                 c->enabled = 1;
1073             }
1074             return;
1075         }
1076
1077         p = optarg;
1078         while (*p) {
1079             e = strchr(p, ',');
1080             l = !e ? strlen(p) : (size_t) (e - p);
1081
1082             for (c = soundhw; c->name; ++c) {
1083                 if (!strncmp(c->name, p, l) && !c->name[l]) {
1084                     c->enabled = 1;
1085                     break;
1086                 }
1087             }
1088
1089             if (!c->name) {
1090                 if (l > 80) {
1091                     fprintf(stderr,
1092                             "Unknown sound card name (too big to show)\n");
1093                 }
1094                 else {
1095                     fprintf(stderr, "Unknown sound card name `%.*s'\n",
1096                             (int) l, p);
1097                 }
1098                 bad_card = 1;
1099             }
1100             p += l + (e != NULL);
1101         }
1102
1103         if (bad_card) {
1104             goto show_valid_cards;
1105         }
1106     }
1107 }
1108
1109 void audio_init(void)
1110 {
1111     struct soundhw *c;
1112     ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1113     PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1114
1115     for (c = soundhw; c->name; ++c) {
1116         if (c->enabled) {
1117             if (c->isa) {
1118                 if (!isa_bus) {
1119                     fprintf(stderr, "ISA bus not available for %s\n", c->name);
1120                     exit(1);
1121                 }
1122                 c->init.init_isa(isa_bus);
1123             } else {
1124                 if (!pci_bus) {
1125                     fprintf(stderr, "PCI bus not available for %s\n", c->name);
1126                     exit(1);
1127                 }
1128                 c->init.init_pci(pci_bus);
1129             }
1130         }
1131     }
1132 }
1133
1134 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1135 {
1136     int ret;
1137
1138     if (strlen(str) != 36) {
1139         return -1;
1140     }
1141
1142     ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1143                  &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1144                  &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1145                  &uuid[15]);
1146
1147     if (ret != 16) {
1148         return -1;
1149     }
1150     return 0;
1151 }
1152
1153 void do_acpitable_option(const QemuOpts *opts)
1154 {
1155 #ifdef TARGET_I386
1156     Error *err = NULL;
1157
1158     acpi_table_add(opts, &err);
1159     if (err) {
1160         error_report("Wrong acpi table provided: %s",
1161                      error_get_pretty(err));
1162         error_free(err);
1163         exit(1);
1164     }
1165 #endif
1166 }
1167
1168 void do_smbios_option(QemuOpts *opts)
1169 {
1170 #ifdef TARGET_I386
1171     smbios_entry_add(opts);
1172 #endif
1173 }
1174
1175 void cpudef_init(void)
1176 {
1177 #if defined(cpudef_setup)
1178     cpudef_setup(); /* parse cpu definitions in target config file */
1179 #endif
1180 }
1181
1182 int tcg_available(void)
1183 {
1184     return 1;
1185 }
1186
1187 int kvm_available(void)
1188 {
1189 #ifdef CONFIG_KVM
1190     return 1;
1191 #else
1192     return 0;
1193 #endif
1194 }
1195
1196 int xen_available(void)
1197 {
1198 #ifdef CONFIG_XEN
1199     return 1;
1200 #else
1201     return 0;
1202 #endif
1203 }
1204
1205
1206 TargetInfo *qmp_query_target(Error **errp)
1207 {
1208     TargetInfo *info = g_malloc0(sizeof(*info));
1209
1210     info->arch = g_strdup(TARGET_NAME);
1211
1212     return info;
1213 }
1214
1215 /* Stub function that's gets run on the vcpu when its brought out of the
1216    VM to run inside qemu via async_run_on_cpu()*/
1217 static void mig_sleep_cpu(void *opq)
1218 {
1219     qemu_mutex_unlock_iothread();
1220     g_usleep(30*1000);
1221     qemu_mutex_lock_iothread();
1222 }
1223
1224 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1225    much time in the VM. The migration thread will try to catchup.
1226    Workload will experience a performance drop.
1227 */
1228 static void mig_throttle_guest_down(void)
1229 {
1230     CPUState *cpu;
1231
1232     qemu_mutex_lock_iothread();
1233     CPU_FOREACH(cpu) {
1234         async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1235     }
1236     qemu_mutex_unlock_iothread();
1237 }
1238
1239 static void check_guest_throttling(void)
1240 {
1241     static int64_t t0;
1242     int64_t        t1;
1243
1244     if (!mig_throttle_on) {
1245         return;
1246     }
1247
1248     if (!t0)  {
1249         t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1250         return;
1251     }
1252
1253     t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1254
1255     /* If it has been more than 40 ms since the last time the guest
1256      * was throttled then do it again.
1257      */
1258     if (40 < (t1-t0)/1000000) {
1259         mig_throttle_guest_down();
1260         t0 = t1;
1261     }
1262 }
This page took 0.09019 seconds and 4 git commands to generate.