2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "migration/migration.h"
23 #include "migration/postcopy-ram.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/balloon.h"
26 #include "qemu/error-report.h"
29 /* Arbitrary limit on size of each discard command,
30 * keeps them around ~200 bytes
32 #define MAX_DISCARDS_PER_COMMAND 12
34 struct PostcopyDiscardState {
35 const char *ramblock_name;
36 uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
39 * Start and length of a discard range (bytes)
41 uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
42 uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
43 unsigned int nsentwords;
44 unsigned int nsentcmds;
47 /* Postcopy needs to detect accesses to pages that haven't yet been copied
48 * across, and efficiently map new pages in, the techniques for doing this
49 * are target OS specific.
51 #if defined(__linux__)
54 #include <sys/ioctl.h>
55 #include <sys/syscall.h>
56 #include <asm/types.h> /* for __u64 */
59 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
60 #include <sys/eventfd.h>
61 #include <linux/userfaultfd.h>
63 static bool ufd_version_check(int ufd)
65 struct uffdio_api api_struct;
68 api_struct.api = UFFD_API;
69 api_struct.features = 0;
70 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
71 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s",
76 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
77 (__u64)1 << _UFFDIO_UNREGISTER;
78 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
79 error_report("Missing userfault features: %" PRIx64,
80 (uint64_t)(~api_struct.ioctls & ioctl_mask));
88 * Check for things that postcopy won't support; returns 0 if the block
91 static int check_range(const char *block_name, void *host_addr,
92 ram_addr_t offset, ram_addr_t length, void *opaque)
94 RAMBlock *rb = qemu_ram_block_by_name(block_name);
96 if (qemu_ram_pagesize(rb) > getpagesize()) {
97 error_report("Postcopy doesn't support large page sizes yet (%s)",
106 * Note: This has the side effect of munlock'ing all of RAM, that's
107 * normally fine since if the postcopy succeeds it gets turned back on at the
110 bool postcopy_ram_supported_by_host(void)
112 long pagesize = getpagesize();
114 bool ret = false; /* Error unless we change it */
115 void *testarea = NULL;
116 struct uffdio_register reg_struct;
117 struct uffdio_range range_struct;
118 uint64_t feature_mask;
120 if ((1ul << qemu_target_page_bits()) > pagesize) {
121 error_report("Target page size bigger than host page size");
125 /* Check for anything about the RAMBlocks we don't support */
126 if (qemu_ram_foreach_block(check_range, NULL)) {
127 /* check_range will have printed its own error */
131 ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
133 error_report("%s: userfaultfd not available: %s", __func__,
138 /* Version and features check */
139 if (!ufd_version_check(ufd)) {
144 * userfault and mlock don't go together; we'll put it back later if
148 error_report("%s: munlockall: %s", __func__, strerror(errno));
153 * We need to check that the ops we need are supported on anon memory
154 * To do that we need to register a chunk and see the flags that
157 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
158 MAP_ANONYMOUS, -1, 0);
159 if (testarea == MAP_FAILED) {
160 error_report("%s: Failed to map test area: %s", __func__,
164 g_assert(((size_t)testarea & (pagesize-1)) == 0);
166 reg_struct.range.start = (uintptr_t)testarea;
167 reg_struct.range.len = pagesize;
168 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
170 if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) {
171 error_report("%s userfault register: %s", __func__, strerror(errno));
175 range_struct.start = (uintptr_t)testarea;
176 range_struct.len = pagesize;
177 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
178 error_report("%s userfault unregister: %s", __func__, strerror(errno));
182 feature_mask = (__u64)1 << _UFFDIO_WAKE |
183 (__u64)1 << _UFFDIO_COPY |
184 (__u64)1 << _UFFDIO_ZEROPAGE;
185 if ((reg_struct.ioctls & feature_mask) != feature_mask) {
186 error_report("Missing userfault map features: %" PRIx64,
187 (uint64_t)(~reg_struct.ioctls & feature_mask));
195 munmap(testarea, pagesize);
204 * Setup an area of RAM so that it *can* be used for postcopy later; this
205 * must be done right at the start prior to pre-copy.
206 * opaque should be the MIS.
208 static int init_range(const char *block_name, void *host_addr,
209 ram_addr_t offset, ram_addr_t length, void *opaque)
211 MigrationIncomingState *mis = opaque;
213 trace_postcopy_init_range(block_name, host_addr, offset, length);
216 * We need the whole of RAM to be truly empty for postcopy, so things
217 * like ROMs and any data tables built during init must be zero'd
218 * - we're going to get the copy from the source anyway.
219 * (Precopy will just overwrite this data, so doesn't need the discard)
221 if (ram_discard_range(mis, block_name, 0, length)) {
229 * At the end of migration, undo the effects of init_range
230 * opaque should be the MIS.
232 static int cleanup_range(const char *block_name, void *host_addr,
233 ram_addr_t offset, ram_addr_t length, void *opaque)
235 MigrationIncomingState *mis = opaque;
236 struct uffdio_range range_struct;
237 trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
240 * We turned off hugepage for the precopy stage with postcopy enabled
241 * we can turn it back on now.
243 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
246 * We can also turn off userfault now since we should have all the
247 * pages. It can be useful to leave it on to debug postcopy
248 * if you're not sure it's always getting every page.
250 range_struct.start = (uintptr_t)host_addr;
251 range_struct.len = length;
253 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
254 error_report("%s: userfault unregister %s", __func__, strerror(errno));
263 * Initialise postcopy-ram, setting the RAM to a state where we can go into
264 * postcopy later; must be called prior to any precopy.
265 * called from arch_init's similarly named ram_postcopy_incoming_init
267 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
269 if (qemu_ram_foreach_block(init_range, mis)) {
277 * At the end of a migration where postcopy_ram_incoming_init was called.
279 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
281 trace_postcopy_ram_incoming_cleanup_entry();
283 if (mis->have_fault_thread) {
286 if (qemu_ram_foreach_block(cleanup_range, mis)) {
290 * Tell the fault_thread to exit, it's an eventfd that should
291 * currently be at 0, we're going to increment it to 1
294 if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) {
295 trace_postcopy_ram_incoming_cleanup_join();
296 qemu_thread_join(&mis->fault_thread);
298 /* Not much we can do here, but may as well report it */
299 error_report("%s: incrementing userfault_quit_fd: %s", __func__,
302 trace_postcopy_ram_incoming_cleanup_closeuf();
303 close(mis->userfault_fd);
304 close(mis->userfault_quit_fd);
305 mis->have_fault_thread = false;
308 qemu_balloon_inhibit(false);
311 if (os_mlock() < 0) {
312 error_report("mlock: %s", strerror(errno));
314 * It doesn't feel right to fail at this point, we have a valid
320 postcopy_state_set(POSTCOPY_INCOMING_END);
321 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
323 if (mis->postcopy_tmp_page) {
324 munmap(mis->postcopy_tmp_page, mis->largest_page_size);
325 mis->postcopy_tmp_page = NULL;
327 if (mis->postcopy_tmp_zero_page) {
328 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
329 mis->postcopy_tmp_zero_page = NULL;
331 trace_postcopy_ram_incoming_cleanup_exit();
336 * Disable huge pages on an area
338 static int nhp_range(const char *block_name, void *host_addr,
339 ram_addr_t offset, ram_addr_t length, void *opaque)
341 trace_postcopy_nhp_range(block_name, host_addr, offset, length);
344 * Before we do discards we need to ensure those discards really
345 * do delete areas of the page, even if THP thinks a hugepage would
346 * be a good idea, so force hugepages off.
348 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
354 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
355 * however leaving it until after precopy means that most of the precopy
358 int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
360 if (qemu_ram_foreach_block(nhp_range, mis)) {
364 postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
370 * Mark the given area of RAM as requiring notification to unwritten areas
371 * Used as a callback on qemu_ram_foreach_block.
372 * host_addr: Base of area to mark
373 * offset: Offset in the whole ram arena
374 * length: Length of the section
375 * opaque: MigrationIncomingState pointer
376 * Returns 0 on success
378 static int ram_block_enable_notify(const char *block_name, void *host_addr,
379 ram_addr_t offset, ram_addr_t length,
382 MigrationIncomingState *mis = opaque;
383 struct uffdio_register reg_struct;
385 reg_struct.range.start = (uintptr_t)host_addr;
386 reg_struct.range.len = length;
387 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
389 /* Now tell our userfault_fd that it's responsible for this area */
390 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) {
391 error_report("%s userfault register: %s", __func__, strerror(errno));
399 * Handle faults detected by the USERFAULT markings
401 static void *postcopy_ram_fault_thread(void *opaque)
403 MigrationIncomingState *mis = opaque;
407 RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */
409 trace_postcopy_ram_fault_thread_entry();
410 qemu_sem_post(&mis->fault_thread_sem);
413 ram_addr_t rb_offset;
414 struct pollfd pfd[2];
417 * We're mainly waiting for the kernel to give us a faulting HVA,
418 * however we can be told to quit via userfault_quit_fd which is
421 pfd[0].fd = mis->userfault_fd;
422 pfd[0].events = POLLIN;
424 pfd[1].fd = mis->userfault_quit_fd;
425 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
428 if (poll(pfd, 2, -1 /* Wait forever */) == -1) {
429 error_report("%s: userfault poll: %s", __func__, strerror(errno));
433 if (pfd[1].revents) {
434 trace_postcopy_ram_fault_thread_quit();
438 ret = read(mis->userfault_fd, &msg, sizeof(msg));
439 if (ret != sizeof(msg)) {
440 if (errno == EAGAIN) {
442 * if a wake up happens on the other thread just after
443 * the poll, there is nothing to read.
448 error_report("%s: Failed to read full userfault message: %s",
449 __func__, strerror(errno));
452 error_report("%s: Read %d bytes from userfaultfd expected %zd",
453 __func__, ret, sizeof(msg));
454 break; /* Lost alignment, don't know what we'd read next */
457 if (msg.event != UFFD_EVENT_PAGEFAULT) {
458 error_report("%s: Read unexpected event %ud from userfaultfd",
459 __func__, msg.event);
460 continue; /* It's not a page fault, shouldn't happen */
463 rb = qemu_ram_block_from_host(
464 (void *)(uintptr_t)msg.arg.pagefault.address,
467 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
468 PRIx64, (uint64_t)msg.arg.pagefault.address);
472 rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
473 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
474 qemu_ram_get_idstr(rb),
478 * Send the request to the source - we want to request one
479 * of our host page sizes (which is >= TPS)
483 migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
484 rb_offset, qemu_ram_pagesize(rb));
486 /* Save some space */
487 migrate_send_rp_req_pages(mis, NULL,
488 rb_offset, qemu_ram_pagesize(rb));
491 trace_postcopy_ram_fault_thread_exit();
495 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
497 /* Open the fd for the kernel to give us userfaults */
498 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
499 if (mis->userfault_fd == -1) {
500 error_report("%s: Failed to open userfault fd: %s", __func__,
506 * Although the host check already tested the API, we need to
507 * do the check again as an ABI handshake on the new fd.
509 if (!ufd_version_check(mis->userfault_fd)) {
513 /* Now an eventfd we use to tell the fault-thread to quit */
514 mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC);
515 if (mis->userfault_quit_fd == -1) {
516 error_report("%s: Opening userfault_quit_fd: %s", __func__,
518 close(mis->userfault_fd);
522 qemu_sem_init(&mis->fault_thread_sem, 0);
523 qemu_thread_create(&mis->fault_thread, "postcopy/fault",
524 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
525 qemu_sem_wait(&mis->fault_thread_sem);
526 qemu_sem_destroy(&mis->fault_thread_sem);
527 mis->have_fault_thread = true;
529 /* Mark so that we get notified of accesses to unwritten areas */
530 if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
535 * Ballooning can mark pages as absent while we're postcopying
536 * that would cause false userfaults.
538 qemu_balloon_inhibit(true);
540 trace_postcopy_ram_enable_notify();
546 * Place a host page (from) at (host) atomically
547 * returns 0 on success
549 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
552 struct uffdio_copy copy_struct;
554 copy_struct.dst = (uint64_t)(uintptr_t)host;
555 copy_struct.src = (uint64_t)(uintptr_t)from;
556 copy_struct.len = pagesize;
557 copy_struct.mode = 0;
559 /* copy also acks to the kernel waking the stalled thread up
560 * TODO: We can inhibit that ack and only do it if it was requested
561 * which would be slightly cheaper, but we'd have to be careful
562 * of the order of updating our page state.
564 if (ioctl(mis->userfault_fd, UFFDIO_COPY, ©_struct)) {
566 error_report("%s: %s copy host: %p from: %p (size: %zd)",
567 __func__, strerror(e), host, from, pagesize);
572 trace_postcopy_place_page(host);
577 * Place a zero page at (host) atomically
578 * returns 0 on success
580 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
583 trace_postcopy_place_page_zero(host);
585 if (pagesize == getpagesize()) {
586 struct uffdio_zeropage zero_struct;
587 zero_struct.range.start = (uint64_t)(uintptr_t)host;
588 zero_struct.range.len = getpagesize();
589 zero_struct.mode = 0;
591 if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) {
593 error_report("%s: %s zero host: %p",
594 __func__, strerror(e), host);
599 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
600 if (!mis->postcopy_tmp_zero_page) {
601 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
602 PROT_READ | PROT_WRITE,
603 MAP_PRIVATE | MAP_ANONYMOUS,
605 if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
607 mis->postcopy_tmp_zero_page = NULL;
608 error_report("%s: %s mapping large zero page",
609 __func__, strerror(e));
612 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
614 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
622 * Returns a target page of memory that can be mapped at a later point in time
623 * using postcopy_place_page
624 * The same address is used repeatedly, postcopy_place_page just takes the
626 * Returns: Pointer to allocated page
629 void *postcopy_get_tmp_page(MigrationIncomingState *mis)
631 if (!mis->postcopy_tmp_page) {
632 mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
633 PROT_READ | PROT_WRITE, MAP_PRIVATE |
634 MAP_ANONYMOUS, -1, 0);
635 if (mis->postcopy_tmp_page == MAP_FAILED) {
636 mis->postcopy_tmp_page = NULL;
637 error_report("%s: %s", __func__, strerror(errno));
642 return mis->postcopy_tmp_page;
646 /* No target OS support, stubs just fail */
647 bool postcopy_ram_supported_by_host(void)
649 error_report("%s: No OS support", __func__);
653 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
655 error_report("postcopy_ram_incoming_init: No OS support");
659 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
665 int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
671 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
677 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
684 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
691 void *postcopy_get_tmp_page(MigrationIncomingState *mis)
699 /* ------------------------------------------------------------------------- */
702 * postcopy_discard_send_init: Called at the start of each RAMBlock before
703 * asking to discard individual ranges.
705 * @ms: The current migration state.
706 * @offset: the bitmap offset of the named RAMBlock in the migration
708 * @name: RAMBlock that discards will operate on.
710 * returns: a new PDS.
712 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
713 unsigned long offset,
716 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
719 res->ramblock_name = name;
720 res->offset = offset;
727 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
728 * discard. May send a discard message, may just leave it queued to
731 * @ms: Current migration state.
732 * @pds: Structure initialised by postcopy_discard_send_init().
733 * @start,@length: a range of pages in the migration bitmap in the
734 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
736 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
737 unsigned long start, unsigned long length)
739 size_t tp_bits = qemu_target_page_bits();
740 /* Convert to byte offsets within the RAM block */
741 pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits;
742 pds->length_list[pds->cur_entry] = length << tp_bits;
743 trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
747 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
748 /* Full set, ship it! */
749 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
760 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
761 * bitmap code. Sends any outstanding discard messages, frees the PDS
763 * @ms: Current migration state.
764 * @pds: Structure initialised by postcopy_discard_send_init().
766 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
768 /* Anything unsent? */
769 if (pds->cur_entry) {
770 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
778 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords,