#include "hw/qdev.h"
#include "io/channel.h"
+struct PostcopyBlocktimeContext;
+
+#define MIGRATION_RESUME_ACK_VALUE (1)
+
/* State for the incoming migration */
struct MigrationIncomingState {
QEMUFile *from_src_file;
int userfault_event_fd;
QEMUFile *to_src_file;
QemuMutex rp_mutex; /* We send replies from multiple threads */
+ /* RAMBlock of last request sent to source */
+ RAMBlock *last_rb;
void *postcopy_tmp_page;
void *postcopy_tmp_zero_page;
+ /* PostCopyFD's for external userfaultfds & handlers of shared memory */
+ GArray *postcopy_remote_fds;
QEMUBH *bh;
/* The coroutine we should enter (back) after failover */
Coroutine *migration_incoming_co;
QemuSemaphore colo_incoming_sem;
+
+ /*
+ * PostcopyBlocktimeContext to keep information for postcopy
+ * live migration, to calculate vCPU block time
+ * */
+ struct PostcopyBlocktimeContext *blocktime_ctx;
+
+ /* notify PAUSED postcopy incoming migrations to try to continue */
+ bool postcopy_recover_triggered;
+ QemuSemaphore postcopy_pause_sem_dst;
+ QemuSemaphore postcopy_pause_sem_fault;
};
MigrationIncomingState *migration_incoming_get_current(void);
void migration_incoming_state_destroy(void);
+/*
+ * Functions to work with blocktime context
+ */
+void fill_destination_postcopy_migration_info(MigrationInfo *info);
#define TYPE_MIGRATION "migration"
QemuThread thread;
QEMUBH *cleanup_bh;
QEMUFile *to_dst_file;
+ /*
+ * Protects to_dst_file pointer. We need to make sure we won't
+ * yield or hang during the critical section, since this lock will
+ * be used in OOB command handler.
+ */
+ QemuMutex qemu_file_lock;
+
+ /*
+ * Used to allow urgent requests to override rate limiting.
+ */
+ QemuSemaphore rate_limit_sem;
/* bytes already send at the beggining of current interation */
uint64_t iteration_initial_bytes;
QEMUFile *from_dst_file;
QemuThread rp_thread;
bool error;
+ QemuSemaphore rp_sem;
} rp_state;
double mbps;
bool send_configuration;
/* Whether we send section footer during migration */
bool send_section_footer;
+
+ /* Needed by postcopy-pause state */
+ QemuSemaphore postcopy_pause_sem;
+ QemuSemaphore postcopy_pause_rp_sem;
+ /*
+ * Whether we abort the migration if decompression errors are
+ * detected at the destination. It is left at false for qemu
+ * older than 3.0, since only newer qemu sends streams that
+ * do not trigger spurious decompression errors.
+ */
+ bool decompress_error_check;
};
void migrate_set_state(int *state, int old_state, int new_state);
void migration_fd_process_incoming(QEMUFile *f);
void migration_ioc_process_incoming(QIOChannel *ioc);
+void migration_incoming_process(void);
bool migration_has_all_channels(void);
bool migrate_use_block(void);
bool migrate_use_block_incremental(void);
+int migrate_max_cpu_throttle(void);
bool migrate_use_return_path(void);
bool migrate_use_compression(void);
int migrate_compress_level(void);
int migrate_compress_threads(void);
+int migrate_compress_wait_thread(void);
int migrate_decompress_threads(void);
bool migrate_use_events(void);
+bool migrate_postcopy_blocktime(void);
/* Sending on the return path - generic and then for each message type */
void migrate_send_rp_shut(MigrationIncomingState *mis,
uint32_t value);
int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
ram_addr_t start, size_t len);
+void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
+ char *block_name);
+void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
void dirty_bitmap_mig_before_vm_start(void);
void init_dirty_bitmap_incoming_migration(void);
+#define qemu_ram_foreach_block \
+ #warning "Use qemu_ram_foreach_block_migratable in migration code"
+
+void migration_make_urgent_request(void);
+void migration_consume_urgent_request(void);
+
#endif