]>
Commit | Line | Data |
---|---|---|
f41f2ed4 MS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
dff03381 | 3 | * HugeTLB Vmemmap Optimization (HVO) |
f41f2ed4 | 4 | * |
dff03381 | 5 | * Copyright (c) 2020, ByteDance. All rights reserved. |
f41f2ed4 MS |
6 | * |
7 | * Author: Muchun Song <[email protected]> | |
8 | * | |
ee65728e | 9 | * See Documentation/mm/vmemmap_dedup.rst |
f41f2ed4 | 10 | */ |
e9fdff87 MS |
11 | #define pr_fmt(fmt) "HugeTLB: " fmt |
12 | ||
998a2997 | 13 | #include <linux/pgtable.h> |
db5e8d84 | 14 | #include <linux/moduleparam.h> |
998a2997 MS |
15 | #include <linux/bootmem_info.h> |
16 | #include <asm/pgalloc.h> | |
17 | #include <asm/tlbflush.h> | |
f41f2ed4 MS |
18 | #include "hugetlb_vmemmap.h" |
19 | ||
998a2997 MS |
20 | /** |
21 | * struct vmemmap_remap_walk - walk vmemmap page table | |
22 | * | |
23 | * @remap_pte: called for each lowest-level entry (PTE). | |
24 | * @nr_walked: the number of walked pte. | |
25 | * @reuse_page: the page which is reused for the tail vmemmap pages. | |
26 | * @reuse_addr: the virtual address of the @reuse_page page. | |
27 | * @vmemmap_pages: the list head of the vmemmap pages that can be freed | |
28 | * or is mapped from. | |
29 | */ | |
30 | struct vmemmap_remap_walk { | |
31 | void (*remap_pte)(pte_t *pte, unsigned long addr, | |
32 | struct vmemmap_remap_walk *walk); | |
33 | unsigned long nr_walked; | |
34 | struct page *reuse_page; | |
35 | unsigned long reuse_addr; | |
36 | struct list_head *vmemmap_pages; | |
37 | }; | |
38 | ||
3ce2c24c | 39 | static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) |
998a2997 MS |
40 | { |
41 | pmd_t __pmd; | |
42 | int i; | |
43 | unsigned long addr = start; | |
3ce2c24c MS |
44 | struct page *head; |
45 | pte_t *pgtable; | |
46 | ||
47 | spin_lock(&init_mm.page_table_lock); | |
48 | head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL; | |
49 | spin_unlock(&init_mm.page_table_lock); | |
998a2997 | 50 | |
3ce2c24c MS |
51 | if (!head) |
52 | return 0; | |
53 | ||
54 | pgtable = pte_alloc_one_kernel(&init_mm); | |
998a2997 MS |
55 | if (!pgtable) |
56 | return -ENOMEM; | |
57 | ||
58 | pmd_populate_kernel(&init_mm, &__pmd, pgtable); | |
59 | ||
e38f055d | 60 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
998a2997 MS |
61 | pte_t entry, *pte; |
62 | pgprot_t pgprot = PAGE_KERNEL; | |
63 | ||
3ce2c24c | 64 | entry = mk_pte(head + i, pgprot); |
998a2997 MS |
65 | pte = pte_offset_kernel(&__pmd, addr); |
66 | set_pte_at(&init_mm, addr, pte, entry); | |
67 | } | |
68 | ||
69 | spin_lock(&init_mm.page_table_lock); | |
70 | if (likely(pmd_leaf(*pmd))) { | |
71 | /* | |
72 | * Higher order allocations from buddy allocator must be able to | |
73 | * be treated as indepdenent small pages (as they can be freed | |
74 | * individually). | |
75 | */ | |
3ce2c24c MS |
76 | if (!PageReserved(head)) |
77 | split_page(head, get_order(PMD_SIZE)); | |
998a2997 MS |
78 | |
79 | /* Make pte visible before pmd. See comment in pmd_install(). */ | |
80 | smp_wmb(); | |
81 | pmd_populate_kernel(&init_mm, pmd, pgtable); | |
82 | flush_tlb_kernel_range(start, start + PMD_SIZE); | |
83 | } else { | |
84 | pte_free_kernel(&init_mm, pgtable); | |
85 | } | |
86 | spin_unlock(&init_mm.page_table_lock); | |
87 | ||
88 | return 0; | |
89 | } | |
90 | ||
998a2997 MS |
91 | static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, |
92 | unsigned long end, | |
93 | struct vmemmap_remap_walk *walk) | |
94 | { | |
95 | pte_t *pte = pte_offset_kernel(pmd, addr); | |
96 | ||
97 | /* | |
98 | * The reuse_page is found 'first' in table walk before we start | |
99 | * remapping (which is calling @walk->remap_pte). | |
100 | */ | |
101 | if (!walk->reuse_page) { | |
c33c7948 | 102 | walk->reuse_page = pte_page(ptep_get(pte)); |
998a2997 MS |
103 | /* |
104 | * Because the reuse address is part of the range that we are | |
105 | * walking, skip the reuse address range. | |
106 | */ | |
107 | addr += PAGE_SIZE; | |
108 | pte++; | |
109 | walk->nr_walked++; | |
110 | } | |
111 | ||
112 | for (; addr != end; addr += PAGE_SIZE, pte++) { | |
113 | walk->remap_pte(pte, addr, walk); | |
114 | walk->nr_walked++; | |
115 | } | |
116 | } | |
117 | ||
118 | static int vmemmap_pmd_range(pud_t *pud, unsigned long addr, | |
119 | unsigned long end, | |
120 | struct vmemmap_remap_walk *walk) | |
121 | { | |
122 | pmd_t *pmd; | |
123 | unsigned long next; | |
124 | ||
125 | pmd = pmd_offset(pud, addr); | |
126 | do { | |
127 | int ret; | |
128 | ||
129 | ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK); | |
130 | if (ret) | |
131 | return ret; | |
132 | ||
133 | next = pmd_addr_end(addr, end); | |
134 | vmemmap_pte_range(pmd, addr, next, walk); | |
135 | } while (pmd++, addr = next, addr != end); | |
136 | ||
137 | return 0; | |
138 | } | |
139 | ||
140 | static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr, | |
141 | unsigned long end, | |
142 | struct vmemmap_remap_walk *walk) | |
143 | { | |
144 | pud_t *pud; | |
145 | unsigned long next; | |
146 | ||
147 | pud = pud_offset(p4d, addr); | |
148 | do { | |
149 | int ret; | |
150 | ||
151 | next = pud_addr_end(addr, end); | |
152 | ret = vmemmap_pmd_range(pud, addr, next, walk); | |
153 | if (ret) | |
154 | return ret; | |
155 | } while (pud++, addr = next, addr != end); | |
156 | ||
157 | return 0; | |
158 | } | |
159 | ||
160 | static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr, | |
161 | unsigned long end, | |
162 | struct vmemmap_remap_walk *walk) | |
163 | { | |
164 | p4d_t *p4d; | |
165 | unsigned long next; | |
166 | ||
167 | p4d = p4d_offset(pgd, addr); | |
168 | do { | |
169 | int ret; | |
170 | ||
171 | next = p4d_addr_end(addr, end); | |
172 | ret = vmemmap_pud_range(p4d, addr, next, walk); | |
173 | if (ret) | |
174 | return ret; | |
175 | } while (p4d++, addr = next, addr != end); | |
176 | ||
177 | return 0; | |
178 | } | |
179 | ||
180 | static int vmemmap_remap_range(unsigned long start, unsigned long end, | |
181 | struct vmemmap_remap_walk *walk) | |
182 | { | |
183 | unsigned long addr = start; | |
184 | unsigned long next; | |
185 | pgd_t *pgd; | |
186 | ||
187 | VM_BUG_ON(!PAGE_ALIGNED(start)); | |
188 | VM_BUG_ON(!PAGE_ALIGNED(end)); | |
189 | ||
190 | pgd = pgd_offset_k(addr); | |
191 | do { | |
192 | int ret; | |
193 | ||
194 | next = pgd_addr_end(addr, end); | |
195 | ret = vmemmap_p4d_range(pgd, addr, next, walk); | |
196 | if (ret) | |
197 | return ret; | |
198 | } while (pgd++, addr = next, addr != end); | |
199 | ||
11aad263 | 200 | flush_tlb_kernel_range(start, end); |
998a2997 MS |
201 | |
202 | return 0; | |
203 | } | |
204 | ||
205 | /* | |
206 | * Free a vmemmap page. A vmemmap page can be allocated from the memblock | |
207 | * allocator or buddy allocator. If the PG_reserved flag is set, it means | |
208 | * that it allocated from the memblock allocator, just free it via the | |
209 | * free_bootmem_page(). Otherwise, use __free_page(). | |
210 | */ | |
211 | static inline void free_vmemmap_page(struct page *page) | |
212 | { | |
213 | if (PageReserved(page)) | |
214 | free_bootmem_page(page); | |
215 | else | |
216 | __free_page(page); | |
217 | } | |
218 | ||
219 | /* Free a list of the vmemmap pages */ | |
220 | static void free_vmemmap_page_list(struct list_head *list) | |
221 | { | |
222 | struct page *page, *next; | |
223 | ||
1cc53a04 | 224 | list_for_each_entry_safe(page, next, list, lru) |
998a2997 | 225 | free_vmemmap_page(page); |
998a2997 MS |
226 | } |
227 | ||
228 | static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, | |
229 | struct vmemmap_remap_walk *walk) | |
230 | { | |
231 | /* | |
232 | * Remap the tail pages as read-only to catch illegal write operation | |
233 | * to the tail pages. | |
234 | */ | |
235 | pgprot_t pgprot = PAGE_KERNEL_RO; | |
c33c7948 | 236 | struct page *page = pte_page(ptep_get(pte)); |
11aad263 JM |
237 | pte_t entry; |
238 | ||
239 | /* Remapping the head page requires r/w */ | |
240 | if (unlikely(addr == walk->reuse_addr)) { | |
241 | pgprot = PAGE_KERNEL; | |
242 | list_del(&walk->reuse_page->lru); | |
243 | ||
244 | /* | |
245 | * Makes sure that preceding stores to the page contents from | |
246 | * vmemmap_remap_free() become visible before the set_pte_at() | |
247 | * write. | |
248 | */ | |
249 | smp_wmb(); | |
250 | } | |
998a2997 | 251 | |
11aad263 | 252 | entry = mk_pte(walk->reuse_page, pgprot); |
998a2997 MS |
253 | list_add_tail(&page->lru, walk->vmemmap_pages); |
254 | set_pte_at(&init_mm, addr, pte, entry); | |
255 | } | |
256 | ||
257 | /* | |
258 | * How many struct page structs need to be reset. When we reuse the head | |
259 | * struct page, the special metadata (e.g. page->flags or page->mapping) | |
260 | * cannot copy to the tail struct page structs. The invalid value will be | |
8666925c | 261 | * checked in the free_tail_page_prepare(). In order to avoid the message |
998a2997 MS |
262 | * of "corrupted mapping in tail page". We need to reset at least 3 (one |
263 | * head struct page struct and two tail struct page structs) struct page | |
264 | * structs. | |
265 | */ | |
266 | #define NR_RESET_STRUCT_PAGE 3 | |
267 | ||
268 | static inline void reset_struct_pages(struct page *start) | |
269 | { | |
998a2997 MS |
270 | struct page *from = start + NR_RESET_STRUCT_PAGE; |
271 | ||
33febb51 MS |
272 | BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page)); |
273 | memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE); | |
998a2997 MS |
274 | } |
275 | ||
276 | static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, | |
277 | struct vmemmap_remap_walk *walk) | |
278 | { | |
279 | pgprot_t pgprot = PAGE_KERNEL; | |
280 | struct page *page; | |
281 | void *to; | |
282 | ||
c33c7948 | 283 | BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page); |
998a2997 MS |
284 | |
285 | page = list_first_entry(walk->vmemmap_pages, struct page, lru); | |
286 | list_del(&page->lru); | |
287 | to = page_to_virt(page); | |
288 | copy_page(to, (void *)walk->reuse_addr); | |
289 | reset_struct_pages(to); | |
290 | ||
939de63d ML |
291 | /* |
292 | * Makes sure that preceding stores to the page contents become visible | |
293 | * before the set_pte_at() write. | |
294 | */ | |
295 | smp_wmb(); | |
998a2997 MS |
296 | set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); |
297 | } | |
298 | ||
299 | /** | |
300 | * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) | |
301 | * to the page which @reuse is mapped to, then free vmemmap | |
302 | * which the range are mapped to. | |
303 | * @start: start address of the vmemmap virtual address range that we want | |
304 | * to remap. | |
305 | * @end: end address of the vmemmap virtual address range that we want to | |
306 | * remap. | |
307 | * @reuse: reuse address. | |
308 | * | |
309 | * Return: %0 on success, negative error code otherwise. | |
310 | */ | |
311 | static int vmemmap_remap_free(unsigned long start, unsigned long end, | |
312 | unsigned long reuse) | |
313 | { | |
314 | int ret; | |
315 | LIST_HEAD(vmemmap_pages); | |
316 | struct vmemmap_remap_walk walk = { | |
317 | .remap_pte = vmemmap_remap_pte, | |
318 | .reuse_addr = reuse, | |
319 | .vmemmap_pages = &vmemmap_pages, | |
320 | }; | |
11aad263 JM |
321 | int nid = page_to_nid((struct page *)start); |
322 | gfp_t gfp_mask = GFP_KERNEL | __GFP_THISNODE | __GFP_NORETRY | | |
323 | __GFP_NOWARN; | |
324 | ||
325 | /* | |
326 | * Allocate a new head vmemmap page to avoid breaking a contiguous | |
327 | * block of struct page memory when freeing it back to page allocator | |
328 | * in free_vmemmap_page_list(). This will allow the likely contiguous | |
329 | * struct page backing memory to be kept contiguous and allowing for | |
330 | * more allocations of hugepages. Fallback to the currently | |
331 | * mapped head page in case should it fail to allocate. | |
332 | */ | |
333 | walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0); | |
334 | if (walk.reuse_page) { | |
335 | copy_page(page_to_virt(walk.reuse_page), | |
336 | (void *)walk.reuse_addr); | |
337 | list_add(&walk.reuse_page->lru, &vmemmap_pages); | |
338 | } | |
998a2997 MS |
339 | |
340 | /* | |
341 | * In order to make remapping routine most efficient for the huge pages, | |
342 | * the routine of vmemmap page table walking has the following rules | |
343 | * (see more details from the vmemmap_pte_range()): | |
344 | * | |
345 | * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) | |
346 | * should be continuous. | |
347 | * - The @reuse address is part of the range [@reuse, @end) that we are | |
348 | * walking which is passed to vmemmap_remap_range(). | |
349 | * - The @reuse address is the first in the complete range. | |
350 | * | |
351 | * So we need to make sure that @start and @reuse meet the above rules. | |
352 | */ | |
353 | BUG_ON(start - reuse != PAGE_SIZE); | |
354 | ||
355 | mmap_read_lock(&init_mm); | |
356 | ret = vmemmap_remap_range(reuse, end, &walk); | |
357 | if (ret && walk.nr_walked) { | |
358 | end = reuse + walk.nr_walked * PAGE_SIZE; | |
359 | /* | |
360 | * vmemmap_pages contains pages from the previous | |
361 | * vmemmap_remap_range call which failed. These | |
362 | * are pages which were removed from the vmemmap. | |
363 | * They will be restored in the following call. | |
364 | */ | |
365 | walk = (struct vmemmap_remap_walk) { | |
366 | .remap_pte = vmemmap_restore_pte, | |
367 | .reuse_addr = reuse, | |
368 | .vmemmap_pages = &vmemmap_pages, | |
369 | }; | |
370 | ||
371 | vmemmap_remap_range(reuse, end, &walk); | |
372 | } | |
373 | mmap_read_unlock(&init_mm); | |
374 | ||
375 | free_vmemmap_page_list(&vmemmap_pages); | |
376 | ||
377 | return ret; | |
378 | } | |
379 | ||
380 | static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, | |
eb83f652 | 381 | struct list_head *list) |
998a2997 | 382 | { |
eb83f652 | 383 | gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_THISNODE; |
998a2997 MS |
384 | unsigned long nr_pages = (end - start) >> PAGE_SHIFT; |
385 | int nid = page_to_nid((struct page *)start); | |
386 | struct page *page, *next; | |
387 | ||
388 | while (nr_pages--) { | |
389 | page = alloc_pages_node(nid, gfp_mask, 0); | |
390 | if (!page) | |
391 | goto out; | |
392 | list_add_tail(&page->lru, list); | |
393 | } | |
394 | ||
395 | return 0; | |
396 | out: | |
397 | list_for_each_entry_safe(page, next, list, lru) | |
dcc1be11 | 398 | __free_page(page); |
998a2997 MS |
399 | return -ENOMEM; |
400 | } | |
401 | ||
402 | /** | |
403 | * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) | |
404 | * to the page which is from the @vmemmap_pages | |
405 | * respectively. | |
406 | * @start: start address of the vmemmap virtual address range that we want | |
407 | * to remap. | |
408 | * @end: end address of the vmemmap virtual address range that we want to | |
409 | * remap. | |
410 | * @reuse: reuse address. | |
998a2997 MS |
411 | * |
412 | * Return: %0 on success, negative error code otherwise. | |
413 | */ | |
414 | static int vmemmap_remap_alloc(unsigned long start, unsigned long end, | |
eb83f652 | 415 | unsigned long reuse) |
998a2997 MS |
416 | { |
417 | LIST_HEAD(vmemmap_pages); | |
418 | struct vmemmap_remap_walk walk = { | |
419 | .remap_pte = vmemmap_restore_pte, | |
420 | .reuse_addr = reuse, | |
421 | .vmemmap_pages = &vmemmap_pages, | |
422 | }; | |
423 | ||
424 | /* See the comment in the vmemmap_remap_free(). */ | |
425 | BUG_ON(start - reuse != PAGE_SIZE); | |
426 | ||
eb83f652 | 427 | if (alloc_vmemmap_page_list(start, end, &vmemmap_pages)) |
998a2997 MS |
428 | return -ENOMEM; |
429 | ||
430 | mmap_read_lock(&init_mm); | |
431 | vmemmap_remap_range(reuse, end, &walk); | |
432 | mmap_read_unlock(&init_mm); | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
cf5472e5 | 437 | DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); |
f10f1442 | 438 | EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); |
e9fdff87 | 439 | |
30152245 MS |
440 | static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); |
441 | core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); | |
f41f2ed4 | 442 | |
6213834c MS |
443 | /** |
444 | * hugetlb_vmemmap_restore - restore previously optimized (by | |
445 | * hugetlb_vmemmap_optimize()) vmemmap pages which | |
446 | * will be reallocated and remapped. | |
447 | * @h: struct hstate. | |
448 | * @head: the head page whose vmemmap pages will be restored. | |
449 | * | |
450 | * Return: %0 if @head's vmemmap pages have been reallocated and remapped, | |
451 | * negative error code otherwise. | |
ad2fa371 | 452 | */ |
6213834c | 453 | int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) |
ad2fa371 MS |
454 | { |
455 | int ret; | |
6213834c MS |
456 | unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; |
457 | unsigned long vmemmap_reuse; | |
ad2fa371 MS |
458 | |
459 | if (!HPageVmemmapOptimized(head)) | |
460 | return 0; | |
461 | ||
6213834c MS |
462 | vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); |
463 | vmemmap_reuse = vmemmap_start; | |
464 | vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; | |
5981611d | 465 | |
ad2fa371 | 466 | /* |
6213834c | 467 | * The pages which the vmemmap virtual address range [@vmemmap_start, |
ad2fa371 MS |
468 | * @vmemmap_end) are mapped to are freed to the buddy allocator, and |
469 | * the range is mapped to the page which @vmemmap_reuse is mapped to. | |
470 | * When a HugeTLB page is freed to the buddy allocator, previously | |
471 | * discarded vmemmap pages must be allocated and remapping. | |
472 | */ | |
eb83f652 | 473 | ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse); |
78f39084 | 474 | if (!ret) { |
ad2fa371 | 475 | ClearHPageVmemmapOptimized(head); |
78f39084 MS |
476 | static_branch_dec(&hugetlb_optimize_vmemmap_key); |
477 | } | |
ad2fa371 MS |
478 | |
479 | return ret; | |
480 | } | |
481 | ||
6213834c MS |
482 | /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ |
483 | static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) | |
66361095 | 484 | { |
cf5472e5 | 485 | if (!READ_ONCE(vmemmap_optimize_enabled)) |
6213834c MS |
486 | return false; |
487 | ||
488 | if (!hugetlb_vmemmap_optimizable(h)) | |
489 | return false; | |
66361095 MS |
490 | |
491 | if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) { | |
492 | pmd_t *pmdp, pmd; | |
493 | struct page *vmemmap_page; | |
494 | unsigned long vaddr = (unsigned long)head; | |
495 | ||
496 | /* | |
497 | * Only the vmemmap page's vmemmap page can be self-hosted. | |
498 | * Walking the page tables to find the backing page of the | |
499 | * vmemmap page. | |
500 | */ | |
501 | pmdp = pmd_off_k(vaddr); | |
502 | /* | |
503 | * The READ_ONCE() is used to stabilize *pmdp in a register or | |
504 | * on the stack so that it will stop changing under the code. | |
505 | * The only concurrent operation where it can be changed is | |
506 | * split_vmemmap_huge_pmd() (*pmdp will be stable after this | |
507 | * operation). | |
508 | */ | |
509 | pmd = READ_ONCE(*pmdp); | |
510 | if (pmd_leaf(pmd)) | |
511 | vmemmap_page = pmd_page(pmd) + pte_index(vaddr); | |
512 | else | |
513 | vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr)); | |
514 | /* | |
515 | * Due to HugeTLB alignment requirements and the vmemmap pages | |
516 | * being at the start of the hotplugged memory region in | |
517 | * memory_hotplug.memmap_on_memory case. Checking any vmemmap | |
518 | * page's vmemmap page if it is marked as VmemmapSelfHosted is | |
519 | * sufficient. | |
520 | * | |
521 | * [ hotplugged memory ] | |
522 | * [ section ][...][ section ] | |
523 | * [ vmemmap ][ usable memory ] | |
524 | * ^ | | | | |
525 | * +---+ | | | |
526 | * ^ | | | |
527 | * +-------+ | | |
528 | * ^ | | |
529 | * +-------------------------------------------+ | |
530 | */ | |
531 | if (PageVmemmapSelfHosted(vmemmap_page)) | |
6213834c | 532 | return false; |
66361095 MS |
533 | } |
534 | ||
6213834c | 535 | return true; |
66361095 MS |
536 | } |
537 | ||
6213834c MS |
538 | /** |
539 | * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages. | |
540 | * @h: struct hstate. | |
541 | * @head: the head page whose vmemmap pages will be optimized. | |
542 | * | |
543 | * This function only tries to optimize @head's vmemmap pages and does not | |
544 | * guarantee that the optimization will succeed after it returns. The caller | |
545 | * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages | |
546 | * have been optimized. | |
547 | */ | |
548 | void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) | |
f41f2ed4 | 549 | { |
6213834c MS |
550 | unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; |
551 | unsigned long vmemmap_reuse; | |
f41f2ed4 | 552 | |
6213834c | 553 | if (!vmemmap_should_optimize(h, head)) |
f41f2ed4 MS |
554 | return; |
555 | ||
78f39084 MS |
556 | static_branch_inc(&hugetlb_optimize_vmemmap_key); |
557 | ||
6213834c MS |
558 | vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); |
559 | vmemmap_reuse = vmemmap_start; | |
560 | vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; | |
f41f2ed4 MS |
561 | |
562 | /* | |
6213834c | 563 | * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end) |
f41f2ed4 | 564 | * to the page which @vmemmap_reuse is mapped to, then free the pages |
6213834c | 565 | * which the range [@vmemmap_start, @vmemmap_end] is mapped to. |
f41f2ed4 | 566 | */ |
6213834c | 567 | if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse)) |
78f39084 MS |
568 | static_branch_dec(&hugetlb_optimize_vmemmap_key); |
569 | else | |
3bc2b6a7 | 570 | SetHPageVmemmapOptimized(head); |
f41f2ed4 | 571 | } |
77490587 | 572 | |
78f39084 MS |
573 | static struct ctl_table hugetlb_vmemmap_sysctls[] = { |
574 | { | |
575 | .procname = "hugetlb_optimize_vmemmap", | |
cf5472e5 | 576 | .data = &vmemmap_optimize_enabled, |
f1aa2eb5 | 577 | .maxlen = sizeof(vmemmap_optimize_enabled), |
78f39084 | 578 | .mode = 0644, |
cf5472e5 | 579 | .proc_handler = proc_dobool, |
78f39084 MS |
580 | }, |
581 | { } | |
582 | }; | |
583 | ||
6213834c | 584 | static int __init hugetlb_vmemmap_init(void) |
78f39084 | 585 | { |
12318566 MS |
586 | const struct hstate *h; |
587 | ||
6213834c MS |
588 | /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */ |
589 | BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE); | |
590 | ||
12318566 MS |
591 | for_each_hstate(h) { |
592 | if (hugetlb_vmemmap_optimizable(h)) { | |
593 | register_sysctl_init("vm", hugetlb_vmemmap_sysctls); | |
594 | break; | |
6213834c MS |
595 | } |
596 | } | |
78f39084 MS |
597 | return 0; |
598 | } | |
6213834c | 599 | late_initcall(hugetlb_vmemmap_init); |