#ifndef QEMU_MIGRATION_H
#define QEMU_MIGRATION_H
-#include "qemu-common.h"
+#include "exec/cpu-common.h"
+#include "hw/qdev-core.h"
#include "qapi/qapi-types-migration.h"
#include "qemu/thread.h"
-#include "exec/cpu-common.h"
#include "qemu/coroutine_int.h"
-#include "hw/qdev.h"
#include "io/channel.h"
+#include "net/announce.h"
struct PostcopyBlocktimeContext;
#define MIGRATION_RESUME_ACK_VALUE (1)
+/*
+ * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us
+ * the benefit that all the chunks are 64 pages aligned then the
+ * bitmaps are always aligned to LONG.
+ */
+#define CLEAR_BITMAP_SHIFT_MIN 6
+/*
+ * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the
+ * default value to use if no one specified.
+ */
+#define CLEAR_BITMAP_SHIFT_DEFAULT 18
+/*
+ * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be
+ * big enough and make sure we won't overflow easily.
+ */
+#define CLEAR_BITMAP_SHIFT_MAX 31
+
/* State for the incoming migration */
struct MigrationIncomingState {
QEMUFile *from_src_file;
*/
QemuEvent main_thread_load_event;
+ /* For network announces */
+ AnnounceTimer announce_timer;
+
size_t largest_page_size;
bool have_fault_thread;
QemuThread fault_thread;
bool postcopy_recover_triggered;
QemuSemaphore postcopy_pause_sem_dst;
QemuSemaphore postcopy_pause_sem_fault;
+
+ /* List of listening socket addresses */
+ SocketAddressList *socket_address_list;
};
MigrationIncomingState *migration_incoming_get_current(void);
DeviceState parent_obj;
/*< public >*/
- size_t bytes_xfer;
- size_t xfer_limit;
QemuThread thread;
QEMUBH *cleanup_bh;
QEMUFile *to_dst_file;
*/
QemuSemaphore rate_limit_sem;
- /* bytes already send at the beggining of current interation */
+ /* pages already send at the beginning of current iteration */
+ uint64_t iteration_initial_pages;
+
+ /* pages transferred per second */
+ double pages_per_second;
+
+ /* bytes already send at the beginning of current iteration */
uint64_t iteration_initial_bytes;
/* time at the start of current iteration */
int64_t iteration_start_time;
*/
bool store_global_state;
- /* Whether the VM is only allowing for migratable devices */
- bool only_migratable;
-
/* Whether we send QEMU_VM_CONFIGURATION during migration */
bool send_configuration;
/* Whether we send section footer during migration */
* do not trigger spurious decompression errors.
*/
bool decompress_error_check;
+
+ /*
+ * This decides the size of guest memory chunk that will be used
+ * to track dirty bitmap clearing. The size of memory chunk will
+ * be GUEST_PAGE_SIZE << N. Say, N=0 means we will clear dirty
+ * bitmap for each page to send (1<<0=1); N=10 means we will clear
+ * dirty bitmap only once for 1<<10=1K continuous guest pages
+ * (which is in 4M chunk).
+ */
+ uint8_t clear_bitmap_shift;
};
void migrate_set_state(int *state, int old_state, int new_state);
void migration_fd_process_incoming(QEMUFile *f);
-void migration_ioc_process_incoming(QIOChannel *ioc);
+void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
void migration_incoming_process(void);
bool migration_has_all_channels(void);
void migrate_fd_connect(MigrationState *s, Error *error_in);
+bool migration_is_setup_or_active(int state);
+
void migrate_init(MigrationState *s);
bool migration_is_blocked(Error **errp);
/* True if outgoing migration has entered postcopy phase */
bool migrate_postcopy_ram(void);
bool migrate_zero_blocks(void);
bool migrate_dirty_bitmaps(void);
+bool migrate_ignore_shared(void);
bool migrate_auto_converge(void);
bool migrate_use_multifd(void);
bool migrate_pause_before_switchover(void);
int migrate_multifd_channels(void);
-int migrate_multifd_page_count(void);
int migrate_use_xbzrle(void);
int64_t migrate_xbzrle_cache_size(void);
int migrate_max_cpu_throttle(void);
bool migrate_use_return_path(void);
+uint64_t ram_get_total_transferred_pages(void);
+
bool migrate_use_compression(void);
int migrate_compress_level(void);
int migrate_compress_threads(void);
void dirty_bitmap_mig_before_vm_start(void);
void init_dirty_bitmap_incoming_migration(void);
+void migrate_add_address(SocketAddress *address);
+
+int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
#define qemu_ram_foreach_block \
- #warning "Use qemu_ram_foreach_block_migratable in migration code"
+ #warning "Use foreach_not_ignored_block in migration code"
void migration_make_urgent_request(void);
void migration_consume_urgent_request(void);