1 // SPDX-License-Identifier: GPL-2.0
3 * Optimize vmemmap pages associated with HugeTLB
5 * Copyright (c) 2020, Bytedance. All rights reserved.
9 * See Documentation/vm/vmemmap_dedup.rst
11 #define pr_fmt(fmt) "HugeTLB: " fmt
13 #include "hugetlb_vmemmap.h"
16 * There are a lot of struct page structures associated with each HugeTLB page.
17 * For tail pages, the value of compound_head is the same. So we can reuse first
18 * page of head page structures. We map the virtual addresses of all the pages
19 * of tail page structures to the head page struct, and then free these page
20 * frames. Therefore, we need to reserve one pages as vmemmap areas.
22 #define RESERVE_VMEMMAP_NR 1U
23 #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
25 DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
26 hugetlb_optimize_vmemmap_key);
27 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
29 static int __init hugetlb_vmemmap_early_param(char *buf)
31 /* We cannot optimize if a "struct page" crosses page boundaries. */
32 if (!is_power_of_2(sizeof(struct page))) {
33 pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n");
40 if (!strcmp(buf, "on"))
41 static_branch_enable(&hugetlb_optimize_vmemmap_key);
42 else if (!strcmp(buf, "off"))
43 static_branch_disable(&hugetlb_optimize_vmemmap_key);
49 early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
52 * Previously discarded vmemmap pages will be allocated and remapping
53 * after this function returns zero.
55 int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
58 unsigned long vmemmap_addr = (unsigned long)head;
59 unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
61 if (!HPageVmemmapOptimized(head))
64 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
65 vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
66 vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
67 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
70 * The pages which the vmemmap virtual address range [@vmemmap_addr,
71 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
72 * the range is mapped to the page which @vmemmap_reuse is mapped to.
73 * When a HugeTLB page is freed to the buddy allocator, previously
74 * discarded vmemmap pages must be allocated and remapping.
76 ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
77 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
79 ClearHPageVmemmapOptimized(head);
84 void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
86 unsigned long vmemmap_addr = (unsigned long)head;
87 unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
89 vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
93 vmemmap_addr += RESERVE_VMEMMAP_SIZE;
94 vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
95 vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
98 * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
99 * to the page which @vmemmap_reuse is mapped to, then free the pages
100 * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
102 if (!vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
103 SetHPageVmemmapOptimized(head);
106 void __init hugetlb_vmemmap_init(struct hstate *h)
108 unsigned int nr_pages = pages_per_huge_page(h);
109 unsigned int vmemmap_pages;
112 * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
113 * page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP,
114 * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
116 BUILD_BUG_ON(__NR_USED_SUBPAGE >=
117 RESERVE_VMEMMAP_SIZE / sizeof(struct page));
119 if (!hugetlb_optimize_vmemmap_enabled())
122 vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
124 * The head page is not to be freed to buddy allocator, the other tail
125 * pages will map to the head page, so they can be freed.
127 * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
128 * on some architectures (e.g. aarch64). See Documentation/arm64/
129 * hugetlbpage.rst for more details.
131 if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
132 h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
134 pr_info("can optimize %d vmemmap pages for %s\n",
135 h->optimize_vmemmap_pages, h->name);