1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/power/snapshot.c
5 * This file provides system snapshot/restore functionality for swsusp.
11 #define pr_fmt(fmt) "PM: hibernation: " fmt
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/memblock.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33 #include <linux/set_memory.h>
35 #include <linux/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
42 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43 static bool hibernate_restore_protection;
44 static bool hibernate_restore_protection_active;
46 void enable_restore_image_protection(void)
48 hibernate_restore_protection = true;
51 static inline void hibernate_restore_protection_begin(void)
53 hibernate_restore_protection_active = hibernate_restore_protection;
56 static inline void hibernate_restore_protection_end(void)
58 hibernate_restore_protection_active = false;
61 static inline int __must_check hibernate_restore_protect_page(void *page_address)
63 if (hibernate_restore_protection_active)
64 return set_memory_ro((unsigned long)page_address, 1);
68 static inline int hibernate_restore_unprotect_page(void *page_address)
70 if (hibernate_restore_protection_active)
71 return set_memory_rw((unsigned long)page_address, 1);
75 static inline void hibernate_restore_protection_begin(void) {}
76 static inline void hibernate_restore_protection_end(void) {}
77 static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; }
78 static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; }
79 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
83 * The calls to set_direct_map_*() should not fail because remapping a page
84 * here means that we only update protection bits in an existing PTE.
85 * It is still worth to have a warning here if something changes and this
86 * will no longer be the case.
88 static inline void hibernate_map_page(struct page *page)
90 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
91 int ret = set_direct_map_default_noflush(page);
94 pr_warn_once("Failed to remap page\n");
96 debug_pagealloc_map_pages(page, 1);
100 static inline void hibernate_unmap_page(struct page *page)
102 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
103 unsigned long addr = (unsigned long)page_address(page);
104 int ret = set_direct_map_invalid_noflush(page);
107 pr_warn_once("Failed to remap page\n");
109 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
111 debug_pagealloc_unmap_pages(page, 1);
115 static int swsusp_page_is_free(struct page *);
116 static void swsusp_set_page_forbidden(struct page *);
117 static void swsusp_unset_page_forbidden(struct page *);
120 * Number of bytes to reserve for memory allocations made by device drivers
121 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
122 * cause image creation to fail (tunable via /sys/power/reserved_size).
124 unsigned long reserved_size;
126 void __init hibernate_reserved_size_init(void)
128 reserved_size = SPARE_PAGES * PAGE_SIZE;
132 * Preferred image size in bytes (tunable via /sys/power/image_size).
133 * When it is set to N, swsusp will do its best to ensure the image
134 * size will not exceed N bytes, but if that is impossible, it will
135 * try to create the smallest image possible.
137 unsigned long image_size;
139 void __init hibernate_image_size_init(void)
141 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
145 * List of PBEs needed for restoring the pages that were allocated before
146 * the suspend and included in the suspend image, but have also been
147 * allocated by the "resume" kernel, so their contents cannot be written
148 * directly to their "original" page frames.
150 struct pbe *restore_pblist;
152 /* struct linked_page is used to build chains of pages */
154 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
157 struct linked_page *next;
158 char data[LINKED_PAGE_DATA_SIZE];
162 * List of "safe" pages (ie. pages that were not used by the image kernel
163 * before hibernation) that may be used as temporary storage for image kernel
166 static struct linked_page *safe_pages_list;
168 /* Pointer to an auxiliary buffer (1 page) */
173 #define PG_UNSAFE_CLEAR 1
174 #define PG_UNSAFE_KEEP 0
176 static unsigned int allocated_unsafe_pages;
179 * get_image_page - Allocate a page for a hibernation image.
180 * @gfp_mask: GFP mask for the allocation.
181 * @safe_needed: Get pages that were not used before hibernation (restore only)
183 * During image restoration, for storing the PBE list and the image data, we can
184 * only use memory pages that do not conflict with the pages used before
185 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
186 * using allocated_unsafe_pages.
188 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
189 * swsusp_free() can release it.
191 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
195 res = (void *)get_zeroed_page(gfp_mask);
197 while (res && swsusp_page_is_free(virt_to_page(res))) {
198 /* The page is unsafe, mark it for swsusp_free() */
199 swsusp_set_page_forbidden(virt_to_page(res));
200 allocated_unsafe_pages++;
201 res = (void *)get_zeroed_page(gfp_mask);
204 swsusp_set_page_forbidden(virt_to_page(res));
205 swsusp_set_page_free(virt_to_page(res));
210 static void *__get_safe_page(gfp_t gfp_mask)
212 if (safe_pages_list) {
213 void *ret = safe_pages_list;
215 safe_pages_list = safe_pages_list->next;
216 memset(ret, 0, PAGE_SIZE);
219 return get_image_page(gfp_mask, PG_SAFE);
222 unsigned long get_safe_page(gfp_t gfp_mask)
224 return (unsigned long)__get_safe_page(gfp_mask);
227 static struct page *alloc_image_page(gfp_t gfp_mask)
231 page = alloc_page(gfp_mask);
233 swsusp_set_page_forbidden(page);
234 swsusp_set_page_free(page);
239 static void recycle_safe_page(void *page_address)
241 struct linked_page *lp = page_address;
243 lp->next = safe_pages_list;
244 safe_pages_list = lp;
248 * free_image_page - Free a page allocated for hibernation image.
249 * @addr: Address of the page to free.
250 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
252 * The page to free should have been allocated by get_image_page() (page flags
253 * set by it are affected).
255 static inline void free_image_page(void *addr, int clear_nosave_free)
259 BUG_ON(!virt_addr_valid(addr));
261 page = virt_to_page(addr);
263 swsusp_unset_page_forbidden(page);
264 if (clear_nosave_free)
265 swsusp_unset_page_free(page);
270 static inline void free_list_of_pages(struct linked_page *list,
271 int clear_page_nosave)
274 struct linked_page *lp = list->next;
276 free_image_page(list, clear_page_nosave);
282 * struct chain_allocator is used for allocating small objects out of
283 * a linked list of pages called 'the chain'.
285 * The chain grows each time when there is no room for a new object in
286 * the current page. The allocated objects cannot be freed individually.
287 * It is only possible to free them all at once, by freeing the entire
290 * NOTE: The chain allocator may be inefficient if the allocated objects
291 * are not much smaller than PAGE_SIZE.
293 struct chain_allocator {
294 struct linked_page *chain; /* the chain */
295 unsigned int used_space; /* total size of objects allocated out
296 of the current page */
297 gfp_t gfp_mask; /* mask for allocating pages */
298 int safe_needed; /* if set, only "safe" pages are allocated */
301 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
305 ca->used_space = LINKED_PAGE_DATA_SIZE;
306 ca->gfp_mask = gfp_mask;
307 ca->safe_needed = safe_needed;
310 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
314 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
315 struct linked_page *lp;
317 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
318 get_image_page(ca->gfp_mask, PG_ANY);
322 lp->next = ca->chain;
326 ret = ca->chain->data + ca->used_space;
327 ca->used_space += size;
332 * Data types related to memory bitmaps.
334 * Memory bitmap is a structure consisting of many linked lists of
335 * objects. The main list's elements are of type struct zone_bitmap
336 * and each of them corresponds to one zone. For each zone bitmap
337 * object there is a list of objects of type struct bm_block that
338 * represent each blocks of bitmap in which information is stored.
340 * struct memory_bitmap contains a pointer to the main list of zone
341 * bitmap objects, a struct bm_position used for browsing the bitmap,
342 * and a pointer to the list of pages used for allocating all of the
343 * zone bitmap objects and bitmap block objects.
345 * NOTE: It has to be possible to lay out the bitmap in memory
346 * using only allocations of order 0. Additionally, the bitmap is
347 * designed to work with arbitrary number of zones (this is over the
348 * top for now, but let's avoid making unnecessary assumptions ;-).
350 * struct zone_bitmap contains a pointer to a list of bitmap block
351 * objects and a pointer to the bitmap block object that has been
352 * most recently used for setting bits. Additionally, it contains the
353 * PFNs that correspond to the start and end of the represented zone.
355 * struct bm_block contains a pointer to the memory page in which
356 * information is stored (in the form of a block of bitmap)
357 * It also contains the pfns that correspond to the start and end of
358 * the represented memory area.
360 * The memory bitmap is organized as a radix tree to guarantee fast random
361 * access to the bits. There is one radix tree for each zone (as returned
362 * from create_mem_extents).
364 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
365 * two linked lists for the nodes of the tree, one for the inner nodes and
366 * one for the leave nodes. The linked leave nodes are used for fast linear
367 * access of the memory bitmap.
369 * The struct rtree_node represents one node of the radix tree.
372 #define BM_END_OF_MAP (~0UL)
374 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
375 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
376 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
379 * struct rtree_node is a wrapper struct to link the nodes
380 * of the rtree together for easy linear iteration over
381 * bits and easy freeing
384 struct list_head list;
389 * struct mem_zone_bm_rtree represents a bitmap used for one
390 * populated memory zone.
392 struct mem_zone_bm_rtree {
393 struct list_head list; /* Link Zones together */
394 struct list_head nodes; /* Radix Tree inner nodes */
395 struct list_head leaves; /* Radix Tree leaves */
396 unsigned long start_pfn; /* Zone start page frame */
397 unsigned long end_pfn; /* Zone end page frame + 1 */
398 struct rtree_node *rtree; /* Radix Tree Root */
399 int levels; /* Number of Radix Tree Levels */
400 unsigned int blocks; /* Number of Bitmap Blocks */
403 /* struct bm_position is used for browsing memory bitmaps */
406 struct mem_zone_bm_rtree *zone;
407 struct rtree_node *node;
408 unsigned long node_pfn;
409 unsigned long cur_pfn;
413 struct memory_bitmap {
414 struct list_head zones;
415 struct linked_page *p_list; /* list of pages used to store zone
416 bitmap objects and bitmap block
418 struct bm_position cur; /* most recently used bit position */
421 /* Functions that operate on memory bitmaps */
423 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
424 #if BITS_PER_LONG == 32
425 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
427 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
429 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
432 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
433 * @gfp_mask: GFP mask for the allocation.
434 * @safe_needed: Get pages not used before hibernation (restore only)
435 * @ca: Pointer to a linked list of pages ("a chain") to allocate from
436 * @list: Radix Tree node to add.
438 * This function is used to allocate inner nodes as well as the
439 * leave nodes of the radix tree. It also adds the node to the
440 * corresponding linked list passed in by the *list parameter.
442 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
443 struct chain_allocator *ca,
444 struct list_head *list)
446 struct rtree_node *node;
448 node = chain_alloc(ca, sizeof(struct rtree_node));
452 node->data = get_image_page(gfp_mask, safe_needed);
456 list_add_tail(&node->list, list);
462 * add_rtree_block - Add a new leave node to the radix tree.
464 * The leave nodes need to be allocated in order to keep the leaves
465 * linked list in order. This is guaranteed by the zone->blocks
468 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
469 int safe_needed, struct chain_allocator *ca)
471 struct rtree_node *node, *block, **dst;
472 unsigned int levels_needed, block_nr;
475 block_nr = zone->blocks;
478 /* How many levels do we need for this block nr? */
481 block_nr >>= BM_RTREE_LEVEL_SHIFT;
484 /* Make sure the rtree has enough levels */
485 for (i = zone->levels; i < levels_needed; i++) {
486 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
491 node->data[0] = (unsigned long)zone->rtree;
496 /* Allocate new block */
497 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
501 /* Now walk the rtree to insert the block */
504 block_nr = zone->blocks;
505 for (i = zone->levels; i > 0; i--) {
509 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
516 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
517 index &= BM_RTREE_LEVEL_MASK;
518 dst = (struct rtree_node **)&((*dst)->data[index]);
528 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
529 int clear_nosave_free);
532 * create_zone_bm_rtree - Create a radix tree for one zone.
534 * Allocated the mem_zone_bm_rtree structure and initializes it.
535 * This function also allocated and builds the radix tree for the
538 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
540 struct chain_allocator *ca,
544 struct mem_zone_bm_rtree *zone;
545 unsigned int i, nr_blocks;
549 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
553 INIT_LIST_HEAD(&zone->nodes);
554 INIT_LIST_HEAD(&zone->leaves);
555 zone->start_pfn = start;
557 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
559 for (i = 0; i < nr_blocks; i++) {
560 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
561 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
570 * free_zone_bm_rtree - Free the memory of the radix tree.
572 * Free all node pages of the radix tree. The mem_zone_bm_rtree
573 * structure itself is not freed here nor are the rtree_node
576 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
577 int clear_nosave_free)
579 struct rtree_node *node;
581 list_for_each_entry(node, &zone->nodes, list)
582 free_image_page(node->data, clear_nosave_free);
584 list_for_each_entry(node, &zone->leaves, list)
585 free_image_page(node->data, clear_nosave_free);
588 static void memory_bm_position_reset(struct memory_bitmap *bm)
590 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
592 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
593 struct rtree_node, list);
594 bm->cur.node_pfn = 0;
595 bm->cur.cur_pfn = BM_END_OF_MAP;
596 bm->cur.node_bit = 0;
599 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
602 struct list_head hook;
608 * free_mem_extents - Free a list of memory extents.
609 * @list: List of extents to free.
611 static void free_mem_extents(struct list_head *list)
613 struct mem_extent *ext, *aux;
615 list_for_each_entry_safe(ext, aux, list, hook) {
616 list_del(&ext->hook);
622 * create_mem_extents - Create a list of memory extents.
623 * @list: List to put the extents into.
624 * @gfp_mask: Mask to use for memory allocations.
626 * The extents represent contiguous ranges of PFNs.
628 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
632 INIT_LIST_HEAD(list);
634 for_each_populated_zone(zone) {
635 unsigned long zone_start, zone_end;
636 struct mem_extent *ext, *cur, *aux;
638 zone_start = zone->zone_start_pfn;
639 zone_end = zone_end_pfn(zone);
641 list_for_each_entry(ext, list, hook)
642 if (zone_start <= ext->end)
645 if (&ext->hook == list || zone_end < ext->start) {
646 /* New extent is necessary */
647 struct mem_extent *new_ext;
649 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
651 free_mem_extents(list);
654 new_ext->start = zone_start;
655 new_ext->end = zone_end;
656 list_add_tail(&new_ext->hook, &ext->hook);
660 /* Merge this zone's range of PFNs with the existing one */
661 if (zone_start < ext->start)
662 ext->start = zone_start;
663 if (zone_end > ext->end)
666 /* More merging may be possible */
668 list_for_each_entry_safe_continue(cur, aux, list, hook) {
669 if (zone_end < cur->start)
671 if (zone_end < cur->end)
673 list_del(&cur->hook);
682 * memory_bm_create - Allocate memory for a memory bitmap.
684 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
687 struct chain_allocator ca;
688 struct list_head mem_extents;
689 struct mem_extent *ext;
692 chain_init(&ca, gfp_mask, safe_needed);
693 INIT_LIST_HEAD(&bm->zones);
695 error = create_mem_extents(&mem_extents, gfp_mask);
699 list_for_each_entry(ext, &mem_extents, hook) {
700 struct mem_zone_bm_rtree *zone;
702 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
703 ext->start, ext->end);
708 list_add_tail(&zone->list, &bm->zones);
711 bm->p_list = ca.chain;
712 memory_bm_position_reset(bm);
714 free_mem_extents(&mem_extents);
718 bm->p_list = ca.chain;
719 memory_bm_free(bm, PG_UNSAFE_CLEAR);
724 * memory_bm_free - Free memory occupied by the memory bitmap.
725 * @bm: Memory bitmap.
727 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
729 struct mem_zone_bm_rtree *zone;
731 list_for_each_entry(zone, &bm->zones, list)
732 free_zone_bm_rtree(zone, clear_nosave_free);
734 free_list_of_pages(bm->p_list, clear_nosave_free);
736 INIT_LIST_HEAD(&bm->zones);
740 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
742 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
743 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
745 * Walk the radix tree to find the page containing the bit that represents @pfn
746 * and return the position of the bit in @addr and @bit_nr.
748 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
749 void **addr, unsigned int *bit_nr)
751 struct mem_zone_bm_rtree *curr, *zone;
752 struct rtree_node *node;
757 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
762 /* Find the right zone */
763 list_for_each_entry(curr, &bm->zones, list) {
764 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
775 * We have found the zone. Now walk the radix tree to find the leaf node
780 * If the zone we wish to scan is the current zone and the
781 * pfn falls into the current node then we do not need to walk
785 if (zone == bm->cur.zone &&
786 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
790 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
792 for (i = zone->levels; i > 0; i--) {
795 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
796 index &= BM_RTREE_LEVEL_MASK;
797 BUG_ON(node->data[index] == 0);
798 node = (struct rtree_node *)node->data[index];
802 /* Update last position */
805 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
806 bm->cur.cur_pfn = pfn;
808 /* Set return values */
810 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
815 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
821 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
826 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
832 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
839 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
845 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
847 clear_bit(bit, addr);
850 static void memory_bm_clear_current(struct memory_bitmap *bm)
854 bit = max(bm->cur.node_bit - 1, 0);
855 clear_bit(bit, bm->cur.node->data);
858 static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
860 return bm->cur.cur_pfn;
863 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
869 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
871 return test_bit(bit, addr);
874 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
879 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
883 * rtree_next_node - Jump to the next leaf node.
885 * Set the position to the beginning of the next node in the
886 * memory bitmap. This is either the next node in the current
887 * zone's radix tree or the first node in the radix tree of the
890 * Return true if there is a next node, false otherwise.
892 static bool rtree_next_node(struct memory_bitmap *bm)
894 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
895 bm->cur.node = list_entry(bm->cur.node->list.next,
896 struct rtree_node, list);
897 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
898 bm->cur.node_bit = 0;
899 touch_softlockup_watchdog();
903 /* No more nodes, goto next zone */
904 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
905 bm->cur.zone = list_entry(bm->cur.zone->list.next,
906 struct mem_zone_bm_rtree, list);
907 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
908 struct rtree_node, list);
909 bm->cur.node_pfn = 0;
910 bm->cur.node_bit = 0;
919 * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
920 * @bm: Memory bitmap.
922 * Starting from the last returned position this function searches for the next
923 * set bit in @bm and returns the PFN represented by it. If no more bits are
924 * set, BM_END_OF_MAP is returned.
926 * It is required to run memory_bm_position_reset() before the first call to
927 * this function for the given memory bitmap.
929 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
931 unsigned long bits, pfn, pages;
935 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
936 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
937 bit = find_next_bit(bm->cur.node->data, bits,
940 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
941 bm->cur.node_bit = bit + 1;
942 bm->cur.cur_pfn = pfn;
945 } while (rtree_next_node(bm));
947 bm->cur.cur_pfn = BM_END_OF_MAP;
948 return BM_END_OF_MAP;
952 * This structure represents a range of page frames the contents of which
953 * should not be saved during hibernation.
955 struct nosave_region {
956 struct list_head list;
957 unsigned long start_pfn;
958 unsigned long end_pfn;
961 static LIST_HEAD(nosave_regions);
963 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
965 struct rtree_node *node;
967 list_for_each_entry(node, &zone->nodes, list)
968 recycle_safe_page(node->data);
970 list_for_each_entry(node, &zone->leaves, list)
971 recycle_safe_page(node->data);
974 static void memory_bm_recycle(struct memory_bitmap *bm)
976 struct mem_zone_bm_rtree *zone;
977 struct linked_page *p_list;
979 list_for_each_entry(zone, &bm->zones, list)
980 recycle_zone_bm_rtree(zone);
984 struct linked_page *lp = p_list;
987 recycle_safe_page(lp);
992 * register_nosave_region - Register a region of unsaveable memory.
994 * Register a range of page frames the contents of which should not be saved
995 * during hibernation (to be used in the early initialization code).
997 void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
999 struct nosave_region *region;
1001 if (start_pfn >= end_pfn)
1004 if (!list_empty(&nosave_regions)) {
1005 /* Try to extend the previous region (they should be sorted) */
1006 region = list_entry(nosave_regions.prev,
1007 struct nosave_region, list);
1008 if (region->end_pfn == start_pfn) {
1009 region->end_pfn = end_pfn;
1013 /* This allocation cannot fail */
1014 region = memblock_alloc(sizeof(struct nosave_region),
1017 panic("%s: Failed to allocate %zu bytes\n", __func__,
1018 sizeof(struct nosave_region));
1019 region->start_pfn = start_pfn;
1020 region->end_pfn = end_pfn;
1021 list_add_tail(®ion->list, &nosave_regions);
1023 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1024 (unsigned long long) start_pfn << PAGE_SHIFT,
1025 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1029 * Set bits in this map correspond to the page frames the contents of which
1030 * should not be saved during the suspend.
1032 static struct memory_bitmap *forbidden_pages_map;
1034 /* Set bits in this map correspond to free page frames. */
1035 static struct memory_bitmap *free_pages_map;
1038 * Each page frame allocated for creating the image is marked by setting the
1039 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1042 void swsusp_set_page_free(struct page *page)
1045 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1048 static int swsusp_page_is_free(struct page *page)
1050 return free_pages_map ?
1051 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1054 void swsusp_unset_page_free(struct page *page)
1057 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1060 static void swsusp_set_page_forbidden(struct page *page)
1062 if (forbidden_pages_map)
1063 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1066 int swsusp_page_is_forbidden(struct page *page)
1068 return forbidden_pages_map ?
1069 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1072 static void swsusp_unset_page_forbidden(struct page *page)
1074 if (forbidden_pages_map)
1075 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1079 * mark_nosave_pages - Mark pages that should not be saved.
1080 * @bm: Memory bitmap.
1082 * Set the bits in @bm that correspond to the page frames the contents of which
1083 * should not be saved.
1085 static void mark_nosave_pages(struct memory_bitmap *bm)
1087 struct nosave_region *region;
1089 if (list_empty(&nosave_regions))
1092 list_for_each_entry(region, &nosave_regions, list) {
1095 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1096 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1097 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1100 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1101 if (pfn_valid(pfn)) {
1103 * It is safe to ignore the result of
1104 * mem_bm_set_bit_check() here, since we won't
1105 * touch the PFNs for which the error is
1108 mem_bm_set_bit_check(bm, pfn);
1114 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1116 * Create bitmaps needed for marking page frames that should not be saved and
1117 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1118 * only modified if everything goes well, because we don't want the bits to be
1119 * touched before both bitmaps are set up.
1121 int create_basic_memory_bitmaps(void)
1123 struct memory_bitmap *bm1, *bm2;
1126 if (forbidden_pages_map && free_pages_map)
1129 BUG_ON(forbidden_pages_map || free_pages_map);
1131 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1135 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1137 goto Free_first_object;
1139 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1141 goto Free_first_bitmap;
1143 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1145 goto Free_second_object;
1147 forbidden_pages_map = bm1;
1148 free_pages_map = bm2;
1149 mark_nosave_pages(forbidden_pages_map);
1151 pr_debug("Basic memory bitmaps created\n");
1158 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1165 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1167 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1168 * auxiliary pointers are necessary so that the bitmaps themselves are not
1169 * referred to while they are being freed.
1171 void free_basic_memory_bitmaps(void)
1173 struct memory_bitmap *bm1, *bm2;
1175 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1178 bm1 = forbidden_pages_map;
1179 bm2 = free_pages_map;
1180 forbidden_pages_map = NULL;
1181 free_pages_map = NULL;
1182 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1184 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1187 pr_debug("Basic memory bitmaps freed\n");
1190 static void clear_or_poison_free_page(struct page *page)
1192 if (page_poisoning_enabled_static())
1193 __kernel_poison_pages(page, 1);
1194 else if (want_init_on_free())
1195 clear_highpage(page);
1198 void clear_or_poison_free_pages(void)
1200 struct memory_bitmap *bm = free_pages_map;
1203 if (WARN_ON(!(free_pages_map)))
1206 if (page_poisoning_enabled() || want_init_on_free()) {
1207 memory_bm_position_reset(bm);
1208 pfn = memory_bm_next_pfn(bm);
1209 while (pfn != BM_END_OF_MAP) {
1211 clear_or_poison_free_page(pfn_to_page(pfn));
1213 pfn = memory_bm_next_pfn(bm);
1215 memory_bm_position_reset(bm);
1216 pr_info("free pages cleared after restore\n");
1221 * snapshot_additional_pages - Estimate the number of extra pages needed.
1222 * @zone: Memory zone to carry out the computation for.
1224 * Estimate the number of additional pages needed for setting up a hibernation
1225 * image data structures for @zone (usually, the returned value is greater than
1226 * the exact number).
1228 unsigned int snapshot_additional_pages(struct zone *zone)
1230 unsigned int rtree, nodes;
1232 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1233 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1234 LINKED_PAGE_DATA_SIZE);
1236 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1244 * Touch the watchdog for every WD_PAGE_COUNT pages.
1246 #define WD_PAGE_COUNT (128*1024)
1248 static void mark_free_pages(struct zone *zone)
1250 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
1251 unsigned long flags;
1252 unsigned int order, t;
1255 if (zone_is_empty(zone))
1258 spin_lock_irqsave(&zone->lock, flags);
1260 max_zone_pfn = zone_end_pfn(zone);
1261 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1262 if (pfn_valid(pfn)) {
1263 page = pfn_to_page(pfn);
1265 if (!--page_count) {
1266 touch_nmi_watchdog();
1267 page_count = WD_PAGE_COUNT;
1270 if (page_zone(page) != zone)
1273 if (!swsusp_page_is_forbidden(page))
1274 swsusp_unset_page_free(page);
1277 for_each_migratetype_order(order, t) {
1278 list_for_each_entry(page,
1279 &zone->free_area[order].free_list[t], buddy_list) {
1282 pfn = page_to_pfn(page);
1283 for (i = 0; i < (1UL << order); i++) {
1284 if (!--page_count) {
1285 touch_nmi_watchdog();
1286 page_count = WD_PAGE_COUNT;
1288 swsusp_set_page_free(pfn_to_page(pfn + i));
1292 spin_unlock_irqrestore(&zone->lock, flags);
1295 #ifdef CONFIG_HIGHMEM
1297 * count_free_highmem_pages - Compute the total number of free highmem pages.
1299 * The returned number is system-wide.
1301 static unsigned int count_free_highmem_pages(void)
1304 unsigned int cnt = 0;
1306 for_each_populated_zone(zone)
1307 if (is_highmem(zone))
1308 cnt += zone_page_state(zone, NR_FREE_PAGES);
1314 * saveable_highmem_page - Check if a highmem page is saveable.
1316 * Determine whether a highmem page should be included in a hibernation image.
1318 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1319 * and it isn't part of a free chunk of pages.
1321 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1325 if (!pfn_valid(pfn))
1328 page = pfn_to_online_page(pfn);
1329 if (!page || page_zone(page) != zone)
1332 BUG_ON(!PageHighMem(page));
1334 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1337 if (PageReserved(page) || PageOffline(page))
1340 if (page_is_guard(page))
1347 * count_highmem_pages - Compute the total number of saveable highmem pages.
1349 static unsigned int count_highmem_pages(void)
1354 for_each_populated_zone(zone) {
1355 unsigned long pfn, max_zone_pfn;
1357 if (!is_highmem(zone))
1360 mark_free_pages(zone);
1361 max_zone_pfn = zone_end_pfn(zone);
1362 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1363 if (saveable_highmem_page(zone, pfn))
1369 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1373 #endif /* CONFIG_HIGHMEM */
1376 * saveable_page - Check if the given page is saveable.
1378 * Determine whether a non-highmem page should be included in a hibernation
1381 * We should save the page if it isn't Nosave, and is not in the range
1382 * of pages statically defined as 'unsaveable', and it isn't part of
1383 * a free chunk of pages.
1385 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1389 if (!pfn_valid(pfn))
1392 page = pfn_to_online_page(pfn);
1393 if (!page || page_zone(page) != zone)
1396 BUG_ON(PageHighMem(page));
1398 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1401 if (PageOffline(page))
1404 if (PageReserved(page)
1405 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1408 if (page_is_guard(page))
1415 * count_data_pages - Compute the total number of saveable non-highmem pages.
1417 static unsigned int count_data_pages(void)
1420 unsigned long pfn, max_zone_pfn;
1423 for_each_populated_zone(zone) {
1424 if (is_highmem(zone))
1427 mark_free_pages(zone);
1428 max_zone_pfn = zone_end_pfn(zone);
1429 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1430 if (saveable_page(zone, pfn))
1437 * This is needed, because copy_page and memcpy are not usable for copying
1438 * task structs. Returns true if the page was filled with only zeros,
1441 static inline bool do_copy_page(long *dst, long *src)
1446 for (n = PAGE_SIZE / sizeof(long); n; n--) {
1454 * safe_copy_page - Copy a page in a safe way.
1456 * Check if the page we are going to copy is marked as present in the kernel
1457 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1458 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1459 * always returns 'true'. Returns true if the page was entirely composed of
1460 * zeros, otherwise it will return false.
1462 static bool safe_copy_page(void *dst, struct page *s_page)
1466 if (kernel_page_present(s_page)) {
1467 zeros_only = do_copy_page(dst, page_address(s_page));
1469 hibernate_map_page(s_page);
1470 zeros_only = do_copy_page(dst, page_address(s_page));
1471 hibernate_unmap_page(s_page);
1476 #ifdef CONFIG_HIGHMEM
1477 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1479 return is_highmem(zone) ?
1480 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1483 static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1485 struct page *s_page, *d_page;
1489 s_page = pfn_to_page(src_pfn);
1490 d_page = pfn_to_page(dst_pfn);
1491 if (PageHighMem(s_page)) {
1492 src = kmap_local_page(s_page);
1493 dst = kmap_local_page(d_page);
1494 zeros_only = do_copy_page(dst, src);
1498 if (PageHighMem(d_page)) {
1500 * The page pointed to by src may contain some kernel
1501 * data modified by kmap_atomic()
1503 zeros_only = safe_copy_page(buffer, s_page);
1504 dst = kmap_local_page(d_page);
1505 copy_page(dst, buffer);
1508 zeros_only = safe_copy_page(page_address(d_page), s_page);
1514 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1516 static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1518 return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1519 pfn_to_page(src_pfn));
1521 #endif /* CONFIG_HIGHMEM */
1524 * Copy data pages will copy all pages into pages pulled from the copy_bm.
1525 * If a page was entirely filled with zeros it will be marked in the zero_bm.
1527 * Returns the number of pages copied.
1529 static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
1530 struct memory_bitmap *orig_bm,
1531 struct memory_bitmap *zero_bm)
1533 unsigned long copied_pages = 0;
1535 unsigned long pfn, copy_pfn;
1537 for_each_populated_zone(zone) {
1538 unsigned long max_zone_pfn;
1540 mark_free_pages(zone);
1541 max_zone_pfn = zone_end_pfn(zone);
1542 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1543 if (page_is_saveable(zone, pfn))
1544 memory_bm_set_bit(orig_bm, pfn);
1546 memory_bm_position_reset(orig_bm);
1547 memory_bm_position_reset(copy_bm);
1548 copy_pfn = memory_bm_next_pfn(copy_bm);
1550 pfn = memory_bm_next_pfn(orig_bm);
1551 if (unlikely(pfn == BM_END_OF_MAP))
1553 if (copy_data_page(copy_pfn, pfn)) {
1554 memory_bm_set_bit(zero_bm, pfn);
1555 /* Use this copy_pfn for a page that is not full of zeros */
1559 copy_pfn = memory_bm_next_pfn(copy_bm);
1561 return copied_pages;
1564 /* Total number of image pages */
1565 static unsigned int nr_copy_pages;
1566 /* Number of pages needed for saving the original pfns of the image pages */
1567 static unsigned int nr_meta_pages;
1568 /* Number of zero pages */
1569 static unsigned int nr_zero_pages;
1572 * Numbers of normal and highmem page frames allocated for hibernation image
1573 * before suspending devices.
1575 static unsigned int alloc_normal, alloc_highmem;
1577 * Memory bitmap used for marking saveable pages (during hibernation) or
1578 * hibernation image pages (during restore)
1580 static struct memory_bitmap orig_bm;
1582 * Memory bitmap used during hibernation for marking allocated page frames that
1583 * will contain copies of saveable pages. During restore it is initially used
1584 * for marking hibernation image pages, but then the set bits from it are
1585 * duplicated in @orig_bm and it is released. On highmem systems it is next
1586 * used for marking "safe" highmem pages, but it has to be reinitialized for
1589 static struct memory_bitmap copy_bm;
1591 /* Memory bitmap which tracks which saveable pages were zero filled. */
1592 static struct memory_bitmap zero_bm;
1595 * swsusp_free - Free pages allocated for hibernation image.
1597 * Image pages are allocated before snapshot creation, so they need to be
1598 * released after resume.
1600 void swsusp_free(void)
1602 unsigned long fb_pfn, fr_pfn;
1604 if (!forbidden_pages_map || !free_pages_map)
1607 memory_bm_position_reset(forbidden_pages_map);
1608 memory_bm_position_reset(free_pages_map);
1611 fr_pfn = memory_bm_next_pfn(free_pages_map);
1612 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1615 * Find the next bit set in both bitmaps. This is guaranteed to
1616 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1619 if (fb_pfn < fr_pfn)
1620 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1621 if (fr_pfn < fb_pfn)
1622 fr_pfn = memory_bm_next_pfn(free_pages_map);
1623 } while (fb_pfn != fr_pfn);
1625 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1626 struct page *page = pfn_to_page(fr_pfn);
1628 memory_bm_clear_current(forbidden_pages_map);
1629 memory_bm_clear_current(free_pages_map);
1630 hibernate_restore_unprotect_page(page_address(page));
1639 restore_pblist = NULL;
1643 hibernate_restore_protection_end();
1646 /* Helper functions used for the shrinking of memory. */
1648 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1651 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1652 * @nr_pages: Number of page frames to allocate.
1653 * @mask: GFP flags to use for the allocation.
1655 * Return value: Number of page frames actually allocated
1657 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1659 unsigned long nr_alloc = 0;
1661 while (nr_pages > 0) {
1664 page = alloc_image_page(mask);
1667 memory_bm_set_bit(©_bm, page_to_pfn(page));
1668 if (PageHighMem(page))
1679 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1680 unsigned long avail_normal)
1682 unsigned long alloc;
1684 if (avail_normal <= alloc_normal)
1687 alloc = avail_normal - alloc_normal;
1688 if (nr_pages < alloc)
1691 return preallocate_image_pages(alloc, GFP_IMAGE);
1694 #ifdef CONFIG_HIGHMEM
1695 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1697 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1701 * __fraction - Compute (an approximation of) x * (multiplier / base).
1703 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1705 return div64_u64(x * multiplier, base);
1708 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1709 unsigned long highmem,
1710 unsigned long total)
1712 unsigned long alloc = __fraction(nr_pages, highmem, total);
1714 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1716 #else /* CONFIG_HIGHMEM */
1717 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1722 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1723 unsigned long highmem,
1724 unsigned long total)
1728 #endif /* CONFIG_HIGHMEM */
1731 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1733 static unsigned long free_unnecessary_pages(void)
1735 unsigned long save, to_free_normal, to_free_highmem, free;
1737 save = count_data_pages();
1738 if (alloc_normal >= save) {
1739 to_free_normal = alloc_normal - save;
1743 save -= alloc_normal;
1745 save += count_highmem_pages();
1746 if (alloc_highmem >= save) {
1747 to_free_highmem = alloc_highmem - save;
1749 to_free_highmem = 0;
1750 save -= alloc_highmem;
1751 if (to_free_normal > save)
1752 to_free_normal -= save;
1756 free = to_free_normal + to_free_highmem;
1758 memory_bm_position_reset(©_bm);
1760 while (to_free_normal > 0 || to_free_highmem > 0) {
1761 unsigned long pfn = memory_bm_next_pfn(©_bm);
1762 struct page *page = pfn_to_page(pfn);
1764 if (PageHighMem(page)) {
1765 if (!to_free_highmem)
1770 if (!to_free_normal)
1775 memory_bm_clear_bit(©_bm, pfn);
1776 swsusp_unset_page_forbidden(page);
1777 swsusp_unset_page_free(page);
1785 * minimum_image_size - Estimate the minimum acceptable size of an image.
1786 * @saveable: Number of saveable pages in the system.
1788 * We want to avoid attempting to free too much memory too hard, so estimate the
1789 * minimum acceptable size of a hibernation image to use as the lower limit for
1790 * preallocating memory.
1792 * We assume that the minimum image size should be proportional to
1794 * [number of saveable pages] - [number of pages that can be freed in theory]
1796 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1797 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1799 static unsigned long minimum_image_size(unsigned long saveable)
1803 size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1804 + global_node_page_state(NR_ACTIVE_ANON)
1805 + global_node_page_state(NR_INACTIVE_ANON)
1806 + global_node_page_state(NR_ACTIVE_FILE)
1807 + global_node_page_state(NR_INACTIVE_FILE);
1809 return saveable <= size ? 0 : saveable - size;
1813 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1815 * To create a hibernation image it is necessary to make a copy of every page
1816 * frame in use. We also need a number of page frames to be free during
1817 * hibernation for allocations made while saving the image and for device
1818 * drivers, in case they need to allocate memory from their hibernation
1819 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1820 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1821 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1822 * total number of available page frames and allocate at least
1824 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1825 * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1827 * of them, which corresponds to the maximum size of a hibernation image.
1829 * If image_size is set below the number following from the above formula,
1830 * the preallocation of memory is continued until the total number of saveable
1831 * pages in the system is below the requested image size or the minimum
1832 * acceptable image size returned by minimum_image_size(), whichever is greater.
1834 int hibernate_preallocate_memory(void)
1837 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1838 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1839 ktime_t start, stop;
1842 pr_info("Preallocating image memory\n");
1843 start = ktime_get();
1845 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1847 pr_err("Cannot allocate original bitmap\n");
1851 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1853 pr_err("Cannot allocate copy bitmap\n");
1857 error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
1859 pr_err("Cannot allocate zero bitmap\n");
1867 /* Count the number of saveable data pages. */
1868 save_highmem = count_highmem_pages();
1869 saveable = count_data_pages();
1872 * Compute the total number of page frames we can use (count) and the
1873 * number of pages needed for image metadata (size).
1876 saveable += save_highmem;
1877 highmem = save_highmem;
1879 for_each_populated_zone(zone) {
1880 size += snapshot_additional_pages(zone);
1881 if (is_highmem(zone))
1882 highmem += zone_page_state(zone, NR_FREE_PAGES);
1884 count += zone_page_state(zone, NR_FREE_PAGES);
1886 avail_normal = count;
1888 count -= totalreserve_pages;
1890 /* Compute the maximum number of saveable pages to leave in memory. */
1891 max_size = (count - (size + PAGES_FOR_IO)) / 2
1892 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1893 /* Compute the desired number of image pages specified by image_size. */
1894 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1895 if (size > max_size)
1898 * If the desired number of image pages is at least as large as the
1899 * current number of saveable pages in memory, allocate page frames for
1900 * the image and we're done.
1902 if (size >= saveable) {
1903 pages = preallocate_image_highmem(save_highmem);
1904 pages += preallocate_image_memory(saveable - pages, avail_normal);
1908 /* Estimate the minimum size of the image. */
1909 pages = minimum_image_size(saveable);
1911 * To avoid excessive pressure on the normal zone, leave room in it to
1912 * accommodate an image of the minimum size (unless it's already too
1913 * small, in which case don't preallocate pages from it at all).
1915 if (avail_normal > pages)
1916 avail_normal -= pages;
1920 size = min_t(unsigned long, pages, max_size);
1923 * Let the memory management subsystem know that we're going to need a
1924 * large number of page frames to allocate and make it free some memory.
1925 * NOTE: If this is not done, performance will be hurt badly in some
1928 shrink_all_memory(saveable - size);
1931 * The number of saveable pages in memory was too high, so apply some
1932 * pressure to decrease it. First, make room for the largest possible
1933 * image and fail if that doesn't work. Next, try to decrease the size
1934 * of the image as much as indicated by 'size' using allocations from
1935 * highmem and non-highmem zones separately.
1937 pages_highmem = preallocate_image_highmem(highmem / 2);
1938 alloc = count - max_size;
1939 if (alloc > pages_highmem)
1940 alloc -= pages_highmem;
1943 pages = preallocate_image_memory(alloc, avail_normal);
1944 if (pages < alloc) {
1945 /* We have exhausted non-highmem pages, try highmem. */
1947 pages += pages_highmem;
1948 pages_highmem = preallocate_image_highmem(alloc);
1949 if (pages_highmem < alloc) {
1950 pr_err("Image allocation is %lu pages short\n",
1951 alloc - pages_highmem);
1954 pages += pages_highmem;
1956 * size is the desired number of saveable pages to leave in
1957 * memory, so try to preallocate (all memory - size) pages.
1959 alloc = (count - pages) - size;
1960 pages += preallocate_image_highmem(alloc);
1963 * There are approximately max_size saveable pages at this point
1964 * and we want to reduce this number down to size.
1966 alloc = max_size - size;
1967 size = preallocate_highmem_fraction(alloc, highmem, count);
1968 pages_highmem += size;
1970 size = preallocate_image_memory(alloc, avail_normal);
1971 pages_highmem += preallocate_image_highmem(alloc - size);
1972 pages += pages_highmem + size;
1976 * We only need as many page frames for the image as there are saveable
1977 * pages in memory, but we have allocated more. Release the excessive
1980 pages -= free_unnecessary_pages();
1984 pr_info("Allocated %lu pages for snapshot\n", pages);
1985 swsusp_show_speed(start, stop, pages, "Allocated");
1994 #ifdef CONFIG_HIGHMEM
1996 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1998 * Compute the number of non-highmem pages that will be necessary for creating
1999 * copies of highmem pages.
2001 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
2003 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
2005 if (free_highmem >= nr_highmem)
2008 nr_highmem -= free_highmem;
2013 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
2014 #endif /* CONFIG_HIGHMEM */
2017 * enough_free_mem - Check if there is enough free memory for the image.
2019 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
2022 unsigned int free = alloc_normal;
2024 for_each_populated_zone(zone)
2025 if (!is_highmem(zone))
2026 free += zone_page_state(zone, NR_FREE_PAGES);
2028 nr_pages += count_pages_for_highmem(nr_highmem);
2029 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
2030 nr_pages, PAGES_FOR_IO, free);
2032 return free > nr_pages + PAGES_FOR_IO;
2035 #ifdef CONFIG_HIGHMEM
2037 * get_highmem_buffer - Allocate a buffer for highmem pages.
2039 * If there are some highmem pages in the hibernation image, we may need a
2040 * buffer to copy them and/or load their data.
2042 static inline int get_highmem_buffer(int safe_needed)
2044 buffer = get_image_page(GFP_ATOMIC, safe_needed);
2045 return buffer ? 0 : -ENOMEM;
2049 * alloc_highmem_pages - Allocate some highmem pages for the image.
2051 * Try to allocate as many pages as needed, but if the number of free highmem
2052 * pages is less than that, allocate them all.
2054 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2055 unsigned int nr_highmem)
2057 unsigned int to_alloc = count_free_highmem_pages();
2059 if (to_alloc > nr_highmem)
2060 to_alloc = nr_highmem;
2062 nr_highmem -= to_alloc;
2063 while (to_alloc-- > 0) {
2066 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
2067 memory_bm_set_bit(bm, page_to_pfn(page));
2072 static inline int get_highmem_buffer(int safe_needed) { return 0; }
2074 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2075 unsigned int n) { return 0; }
2076 #endif /* CONFIG_HIGHMEM */
2079 * swsusp_alloc - Allocate memory for hibernation image.
2081 * We first try to allocate as many highmem pages as there are
2082 * saveable highmem pages in the system. If that fails, we allocate
2083 * non-highmem pages for the copies of the remaining highmem ones.
2085 * In this approach it is likely that the copies of highmem pages will
2086 * also be located in the high memory, because of the way in which
2087 * copy_data_pages() works.
2089 static int swsusp_alloc(struct memory_bitmap *copy_bm,
2090 unsigned int nr_pages, unsigned int nr_highmem)
2092 if (nr_highmem > 0) {
2093 if (get_highmem_buffer(PG_ANY))
2095 if (nr_highmem > alloc_highmem) {
2096 nr_highmem -= alloc_highmem;
2097 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
2100 if (nr_pages > alloc_normal) {
2101 nr_pages -= alloc_normal;
2102 while (nr_pages-- > 0) {
2105 page = alloc_image_page(GFP_ATOMIC);
2108 memory_bm_set_bit(copy_bm, page_to_pfn(page));
2119 asmlinkage __visible int swsusp_save(void)
2121 unsigned int nr_pages, nr_highmem;
2123 pr_info("Creating image:\n");
2125 drain_local_pages(NULL);
2126 nr_pages = count_data_pages();
2127 nr_highmem = count_highmem_pages();
2128 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2130 if (!enough_free_mem(nr_pages, nr_highmem)) {
2131 pr_err("Not enough free memory\n");
2135 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
2136 pr_err("Memory allocation failed\n");
2141 * During allocating of suspend pagedir, new cold pages may appear.
2144 drain_local_pages(NULL);
2145 nr_copy_pages = copy_data_pages(©_bm, &orig_bm, &zero_bm);
2148 * End of critical section. From now on, we can write to memory,
2149 * but we should not touch disk. This specially means we must _not_
2150 * touch swap space! Except we must write out our image of course.
2152 nr_pages += nr_highmem;
2153 /* We don't actually copy the zero pages */
2154 nr_zero_pages = nr_pages - nr_copy_pages;
2155 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2157 pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
2162 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2163 static int init_header_complete(struct swsusp_info *info)
2165 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2166 info->version_code = LINUX_VERSION_CODE;
2170 static const char *check_image_kernel(struct swsusp_info *info)
2172 if (info->version_code != LINUX_VERSION_CODE)
2173 return "kernel version";
2174 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2175 return "system type";
2176 if (strcmp(info->uts.release,init_utsname()->release))
2177 return "kernel release";
2178 if (strcmp(info->uts.version,init_utsname()->version))
2180 if (strcmp(info->uts.machine,init_utsname()->machine))
2184 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2186 unsigned long snapshot_get_image_size(void)
2188 return nr_copy_pages + nr_meta_pages + 1;
2191 static int init_header(struct swsusp_info *info)
2193 memset(info, 0, sizeof(struct swsusp_info));
2194 info->num_physpages = get_num_physpages();
2195 info->image_pages = nr_copy_pages;
2196 info->pages = snapshot_get_image_size();
2197 info->size = info->pages;
2198 info->size <<= PAGE_SHIFT;
2199 return init_header_complete(info);
2202 #define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2203 #define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
2206 * pack_pfns - Prepare PFNs for saving.
2207 * @bm: Memory bitmap.
2208 * @buf: Memory buffer to store the PFNs in.
2209 * @zero_bm: Memory bitmap containing PFNs of zero pages.
2211 * PFNs corresponding to set bits in @bm are stored in the area of memory
2212 * pointed to by @buf (1 page at a time). Pages which were filled with only
2213 * zeros will have the highest bit set in the packed format to distinguish
2214 * them from PFNs which will be contained in the image file.
2216 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
2217 struct memory_bitmap *zero_bm)
2221 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2222 buf[j] = memory_bm_next_pfn(bm);
2223 if (unlikely(buf[j] == BM_END_OF_MAP))
2225 if (memory_bm_test_bit(zero_bm, buf[j]))
2226 buf[j] |= ENCODED_PFN_ZERO_FLAG;
2231 * snapshot_read_next - Get the address to read the next image page from.
2232 * @handle: Snapshot handle to be used for the reading.
2234 * On the first call, @handle should point to a zeroed snapshot_handle
2235 * structure. The structure gets populated then and a pointer to it should be
2236 * passed to this function every next time.
2238 * On success, the function returns a positive number. Then, the caller
2239 * is allowed to read up to the returned number of bytes from the memory
2240 * location computed by the data_of() macro.
2242 * The function returns 0 to indicate the end of the data stream condition,
2243 * and negative numbers are returned on errors. If that happens, the structure
2244 * pointed to by @handle is not updated and should not be used any more.
2246 int snapshot_read_next(struct snapshot_handle *handle)
2248 if (handle->cur > nr_meta_pages + nr_copy_pages)
2252 /* This makes the buffer be freed by swsusp_free() */
2253 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2260 error = init_header((struct swsusp_info *)buffer);
2263 handle->buffer = buffer;
2264 memory_bm_position_reset(&orig_bm);
2265 memory_bm_position_reset(©_bm);
2266 } else if (handle->cur <= nr_meta_pages) {
2268 pack_pfns(buffer, &orig_bm, &zero_bm);
2272 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2273 if (PageHighMem(page)) {
2275 * Highmem pages are copied to the buffer,
2276 * because we can't return with a kmapped
2277 * highmem page (we may not be called again).
2281 kaddr = kmap_atomic(page);
2282 copy_page(buffer, kaddr);
2283 kunmap_atomic(kaddr);
2284 handle->buffer = buffer;
2286 handle->buffer = page_address(page);
2293 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2294 struct memory_bitmap *src)
2298 memory_bm_position_reset(src);
2299 pfn = memory_bm_next_pfn(src);
2300 while (pfn != BM_END_OF_MAP) {
2301 memory_bm_set_bit(dst, pfn);
2302 pfn = memory_bm_next_pfn(src);
2307 * mark_unsafe_pages - Mark pages that were used before hibernation.
2309 * Mark the pages that cannot be used for storing the image during restoration,
2310 * because they conflict with the pages that had been used before hibernation.
2312 static void mark_unsafe_pages(struct memory_bitmap *bm)
2316 /* Clear the "free"/"unsafe" bit for all PFNs */
2317 memory_bm_position_reset(free_pages_map);
2318 pfn = memory_bm_next_pfn(free_pages_map);
2319 while (pfn != BM_END_OF_MAP) {
2320 memory_bm_clear_current(free_pages_map);
2321 pfn = memory_bm_next_pfn(free_pages_map);
2324 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2325 duplicate_memory_bitmap(free_pages_map, bm);
2327 allocated_unsafe_pages = 0;
2330 static int check_header(struct swsusp_info *info)
2334 reason = check_image_kernel(info);
2335 if (!reason && info->num_physpages != get_num_physpages())
2336 reason = "memory size";
2338 pr_err("Image mismatch: %s\n", reason);
2345 * load_header - Check the image header and copy the data from it.
2347 static int load_header(struct swsusp_info *info)
2351 restore_pblist = NULL;
2352 error = check_header(info);
2354 nr_copy_pages = info->image_pages;
2355 nr_meta_pages = info->pages - info->image_pages - 1;
2361 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2362 * @bm: Memory bitmap.
2363 * @buf: Area of memory containing the PFNs.
2364 * @zero_bm: Memory bitmap with the zero PFNs marked.
2366 * For each element of the array pointed to by @buf (1 page at a time), set the
2367 * corresponding bit in @bm. If the page was originally populated with only
2368 * zeros then a corresponding bit will also be set in @zero_bm.
2370 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
2371 struct memory_bitmap *zero_bm)
2373 unsigned long decoded_pfn;
2377 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2378 if (unlikely(buf[j] == BM_END_OF_MAP))
2381 zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
2382 decoded_pfn = buf[j] & ENCODED_PFN_MASK;
2383 if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
2384 memory_bm_set_bit(bm, decoded_pfn);
2386 memory_bm_set_bit(zero_bm, decoded_pfn);
2390 if (!pfn_valid(decoded_pfn))
2391 pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2392 (unsigned long long)PFN_PHYS(decoded_pfn));
2400 #ifdef CONFIG_HIGHMEM
2402 * struct highmem_pbe is used for creating the list of highmem pages that
2403 * should be restored atomically during the resume from disk, because the page
2404 * frames they have occupied before the suspend are in use.
2406 struct highmem_pbe {
2407 struct page *copy_page; /* data is here now */
2408 struct page *orig_page; /* data was here before the suspend */
2409 struct highmem_pbe *next;
2413 * List of highmem PBEs needed for restoring the highmem pages that were
2414 * allocated before the suspend and included in the suspend image, but have
2415 * also been allocated by the "resume" kernel, so their contents cannot be
2416 * written directly to their "original" page frames.
2418 static struct highmem_pbe *highmem_pblist;
2421 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2422 * @bm: Memory bitmap.
2424 * The bits in @bm that correspond to image pages are assumed to be set.
2426 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2429 unsigned int cnt = 0;
2431 memory_bm_position_reset(bm);
2432 pfn = memory_bm_next_pfn(bm);
2433 while (pfn != BM_END_OF_MAP) {
2434 if (PageHighMem(pfn_to_page(pfn)))
2437 pfn = memory_bm_next_pfn(bm);
2442 static unsigned int safe_highmem_pages;
2444 static struct memory_bitmap *safe_highmem_bm;
2447 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2448 * @bm: Pointer to an uninitialized memory bitmap structure.
2449 * @nr_highmem_p: Pointer to the number of highmem image pages.
2451 * Try to allocate as many highmem pages as there are highmem image pages
2452 * (@nr_highmem_p points to the variable containing the number of highmem image
2453 * pages). The pages that are "safe" (ie. will not be overwritten when the
2454 * hibernation image is restored entirely) have the corresponding bits set in
2455 * @bm (it must be uninitialized).
2457 * NOTE: This function should not be called if there are no highmem image pages.
2459 static int prepare_highmem_image(struct memory_bitmap *bm,
2460 unsigned int *nr_highmem_p)
2462 unsigned int to_alloc;
2464 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2467 if (get_highmem_buffer(PG_SAFE))
2470 to_alloc = count_free_highmem_pages();
2471 if (to_alloc > *nr_highmem_p)
2472 to_alloc = *nr_highmem_p;
2474 *nr_highmem_p = to_alloc;
2476 safe_highmem_pages = 0;
2477 while (to_alloc-- > 0) {
2480 page = alloc_page(__GFP_HIGHMEM);
2481 if (!swsusp_page_is_free(page)) {
2482 /* The page is "safe", set its bit the bitmap */
2483 memory_bm_set_bit(bm, page_to_pfn(page));
2484 safe_highmem_pages++;
2486 /* Mark the page as allocated */
2487 swsusp_set_page_forbidden(page);
2488 swsusp_set_page_free(page);
2490 memory_bm_position_reset(bm);
2491 safe_highmem_bm = bm;
2495 static struct page *last_highmem_page;
2498 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2500 * For a given highmem image page get a buffer that suspend_write_next() should
2501 * return to its caller to write to.
2503 * If the page is to be saved to its "original" page frame or a copy of
2504 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2505 * the copy of the page is to be made in normal memory, so the address of
2506 * the copy is returned.
2508 * If @buffer is returned, the caller of suspend_write_next() will write
2509 * the page's contents to @buffer, so they will have to be copied to the
2510 * right location on the next call to suspend_write_next() and it is done
2511 * with the help of copy_last_highmem_page(). For this purpose, if
2512 * @buffer is returned, @last_highmem_page is set to the page to which
2513 * the data will have to be copied from @buffer.
2515 static void *get_highmem_page_buffer(struct page *page,
2516 struct chain_allocator *ca)
2518 struct highmem_pbe *pbe;
2521 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2523 * We have allocated the "original" page frame and we can
2524 * use it directly to store the loaded page.
2526 last_highmem_page = page;
2530 * The "original" page frame has not been allocated and we have to
2531 * use a "safe" page frame to store the loaded page.
2533 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2536 return ERR_PTR(-ENOMEM);
2538 pbe->orig_page = page;
2539 if (safe_highmem_pages > 0) {
2542 /* Copy of the page will be stored in high memory */
2544 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2545 safe_highmem_pages--;
2546 last_highmem_page = tmp;
2547 pbe->copy_page = tmp;
2549 /* Copy of the page will be stored in normal memory */
2550 kaddr = __get_safe_page(ca->gfp_mask);
2552 return ERR_PTR(-ENOMEM);
2553 pbe->copy_page = virt_to_page(kaddr);
2555 pbe->next = highmem_pblist;
2556 highmem_pblist = pbe;
2561 * copy_last_highmem_page - Copy most the most recent highmem image page.
2563 * Copy the contents of a highmem image from @buffer, where the caller of
2564 * snapshot_write_next() has stored them, to the right location represented by
2565 * @last_highmem_page .
2567 static void copy_last_highmem_page(void)
2569 if (last_highmem_page) {
2572 dst = kmap_atomic(last_highmem_page);
2573 copy_page(dst, buffer);
2575 last_highmem_page = NULL;
2579 static inline int last_highmem_page_copied(void)
2581 return !last_highmem_page;
2584 static inline void free_highmem_data(void)
2586 if (safe_highmem_bm)
2587 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2590 free_image_page(buffer, PG_UNSAFE_CLEAR);
2593 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2595 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2596 unsigned int *nr_highmem_p) { return 0; }
2598 static inline void *get_highmem_page_buffer(struct page *page,
2599 struct chain_allocator *ca)
2601 return ERR_PTR(-EINVAL);
2604 static inline void copy_last_highmem_page(void) {}
2605 static inline int last_highmem_page_copied(void) { return 1; }
2606 static inline void free_highmem_data(void) {}
2607 #endif /* CONFIG_HIGHMEM */
2609 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2612 * prepare_image - Make room for loading hibernation image.
2613 * @new_bm: Uninitialized memory bitmap structure.
2614 * @bm: Memory bitmap with unsafe pages marked.
2615 * @zero_bm: Memory bitmap containing the zero pages.
2617 * Use @bm to mark the pages that will be overwritten in the process of
2618 * restoring the system memory state from the suspend image ("unsafe" pages)
2619 * and allocate memory for the image.
2621 * The idea is to allocate a new memory bitmap first and then allocate
2622 * as many pages as needed for image data, but without specifying what those
2623 * pages will be used for just yet. Instead, we mark them all as allocated and
2624 * create a lists of "safe" pages to be used later. On systems with high
2625 * memory a list of "safe" highmem pages is created too.
2627 * Because it was not known which pages were unsafe when @zero_bm was created,
2628 * make a copy of it and recreate it within safe pages.
2630 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
2631 struct memory_bitmap *zero_bm)
2633 unsigned int nr_pages, nr_highmem;
2634 struct memory_bitmap tmp;
2635 struct linked_page *lp;
2638 /* If there is no highmem, the buffer will not be necessary */
2639 free_image_page(buffer, PG_UNSAFE_CLEAR);
2642 nr_highmem = count_highmem_image_pages(bm);
2643 mark_unsafe_pages(bm);
2645 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2649 duplicate_memory_bitmap(new_bm, bm);
2650 memory_bm_free(bm, PG_UNSAFE_KEEP);
2652 /* Make a copy of zero_bm so it can be created in safe pages */
2653 error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
2657 duplicate_memory_bitmap(&tmp, zero_bm);
2658 memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
2660 /* Recreate zero_bm in safe pages */
2661 error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
2665 duplicate_memory_bitmap(zero_bm, &tmp);
2666 memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
2667 /* At this point zero_bm is in safe pages and it can be used for restoring. */
2669 if (nr_highmem > 0) {
2670 error = prepare_highmem_image(bm, &nr_highmem);
2675 * Reserve some safe pages for potential later use.
2677 * NOTE: This way we make sure there will be enough safe pages for the
2678 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2679 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2681 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2683 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2684 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2685 while (nr_pages > 0) {
2686 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2691 lp->next = safe_pages_list;
2692 safe_pages_list = lp;
2695 /* Preallocate memory for the image */
2696 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2697 while (nr_pages > 0) {
2698 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2703 if (!swsusp_page_is_free(virt_to_page(lp))) {
2704 /* The page is "safe", add it to the list */
2705 lp->next = safe_pages_list;
2706 safe_pages_list = lp;
2708 /* Mark the page as allocated */
2709 swsusp_set_page_forbidden(virt_to_page(lp));
2710 swsusp_set_page_free(virt_to_page(lp));
2721 * get_buffer - Get the address to store the next image data page.
2723 * Get the address that snapshot_write_next() should return to its caller to
2726 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2730 unsigned long pfn = memory_bm_next_pfn(bm);
2732 if (pfn == BM_END_OF_MAP)
2733 return ERR_PTR(-EFAULT);
2735 page = pfn_to_page(pfn);
2736 if (PageHighMem(page))
2737 return get_highmem_page_buffer(page, ca);
2739 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2741 * We have allocated the "original" page frame and we can
2742 * use it directly to store the loaded page.
2744 return page_address(page);
2747 * The "original" page frame has not been allocated and we have to
2748 * use a "safe" page frame to store the loaded page.
2750 pbe = chain_alloc(ca, sizeof(struct pbe));
2753 return ERR_PTR(-ENOMEM);
2755 pbe->orig_address = page_address(page);
2756 pbe->address = __get_safe_page(ca->gfp_mask);
2758 return ERR_PTR(-ENOMEM);
2759 pbe->next = restore_pblist;
2760 restore_pblist = pbe;
2761 return pbe->address;
2765 * snapshot_write_next - Get the address to store the next image page.
2766 * @handle: Snapshot handle structure to guide the writing.
2768 * On the first call, @handle should point to a zeroed snapshot_handle
2769 * structure. The structure gets populated then and a pointer to it should be
2770 * passed to this function every next time.
2772 * On success, the function returns a positive number. Then, the caller
2773 * is allowed to write up to the returned number of bytes to the memory
2774 * location computed by the data_of() macro.
2776 * The function returns 0 to indicate the "end of file" condition. Negative
2777 * numbers are returned on errors, in which cases the structure pointed to by
2778 * @handle is not updated and should not be used any more.
2780 int snapshot_write_next(struct snapshot_handle *handle)
2782 static struct chain_allocator ca;
2786 /* Check if we have already loaded the entire image */
2787 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2792 /* This makes the buffer be freed by swsusp_free() */
2793 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2798 handle->buffer = buffer;
2799 } else if (handle->cur == 1) {
2800 error = load_header(buffer);
2804 safe_pages_list = NULL;
2806 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2810 error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
2816 hibernate_restore_protection_begin();
2817 } else if (handle->cur <= nr_meta_pages + 1) {
2818 error = unpack_orig_pfns(buffer, ©_bm, &zero_bm);
2822 if (handle->cur == nr_meta_pages + 1) {
2823 error = prepare_image(&orig_bm, ©_bm, &zero_bm);
2827 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2828 memory_bm_position_reset(&orig_bm);
2829 memory_bm_position_reset(&zero_bm);
2830 restore_pblist = NULL;
2831 handle->buffer = get_buffer(&orig_bm, &ca);
2832 if (IS_ERR(handle->buffer))
2833 return PTR_ERR(handle->buffer);
2836 copy_last_highmem_page();
2837 error = hibernate_restore_protect_page(handle->buffer);
2840 handle->buffer = get_buffer(&orig_bm, &ca);
2841 if (IS_ERR(handle->buffer))
2842 return PTR_ERR(handle->buffer);
2844 handle->sync_read = (handle->buffer == buffer);
2847 /* Zero pages were not included in the image, memset it and move on. */
2848 if (handle->cur > nr_meta_pages + 1 &&
2849 memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
2850 memset(handle->buffer, 0, PAGE_SIZE);
2858 * snapshot_write_finalize - Complete the loading of a hibernation image.
2860 * Must be called after the last call to snapshot_write_next() in case the last
2861 * page in the image happens to be a highmem page and its contents should be
2862 * stored in highmem. Additionally, it recycles bitmap memory that's not
2863 * necessary any more.
2865 int snapshot_write_finalize(struct snapshot_handle *handle)
2869 copy_last_highmem_page();
2870 error = hibernate_restore_protect_page(handle->buffer);
2871 /* Do that only if we have loaded the image entirely */
2872 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2873 memory_bm_recycle(&orig_bm);
2874 free_highmem_data();
2879 int snapshot_image_loaded(struct snapshot_handle *handle)
2881 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2882 handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2885 #ifdef CONFIG_HIGHMEM
2886 /* Assumes that @buf is ready and points to a "safe" page */
2887 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2890 void *kaddr1, *kaddr2;
2892 kaddr1 = kmap_atomic(p1);
2893 kaddr2 = kmap_atomic(p2);
2894 copy_page(buf, kaddr1);
2895 copy_page(kaddr1, kaddr2);
2896 copy_page(kaddr2, buf);
2897 kunmap_atomic(kaddr2);
2898 kunmap_atomic(kaddr1);
2902 * restore_highmem - Put highmem image pages into their original locations.
2904 * For each highmem page that was in use before hibernation and is included in
2905 * the image, and also has been allocated by the "restore" kernel, swap its
2906 * current contents with the previous (ie. "before hibernation") ones.
2908 * If the restore eventually fails, we can call this function once again and
2909 * restore the highmem state as seen by the restore kernel.
2911 int restore_highmem(void)
2913 struct highmem_pbe *pbe = highmem_pblist;
2919 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2924 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2927 free_image_page(buf, PG_UNSAFE_CLEAR);
2930 #endif /* CONFIG_HIGHMEM */