]> Git Repo - linux.git/blame - mm/khugepaged.c
mm: khugepaged: don't carry huge page to the next loop for !CONFIG_NUMA
[linux.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
80110bbf 19#include <linux/page_table_check.h>
b46e756f 20#include <linux/swapops.h>
f3f0e1d2 21#include <linux/shmem_fs.h>
b46e756f
KS
22
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
26
27enum scan_result {
28 SCAN_FAIL,
29 SCAN_SUCCEED,
30 SCAN_PMD_NULL,
31 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
32 SCAN_EXCEED_SWAP_PTE,
33 SCAN_EXCEED_SHARED_PTE,
b46e756f 34 SCAN_PTE_NON_PRESENT,
e1e267c7 35 SCAN_PTE_UFFD_WP,
b46e756f 36 SCAN_PAGE_RO,
0db501f7 37 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
38 SCAN_PAGE_NULL,
39 SCAN_SCAN_ABORT,
40 SCAN_PAGE_COUNT,
41 SCAN_PAGE_LRU,
42 SCAN_PAGE_LOCK,
43 SCAN_PAGE_ANON,
44 SCAN_PAGE_COMPOUND,
45 SCAN_ANY_PROCESS,
46 SCAN_VMA_NULL,
47 SCAN_VMA_CHECK,
48 SCAN_ADDRESS_RANGE,
b46e756f
KS
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 52 SCAN_TRUNCATED,
99cb0dbd 53 SCAN_PAGE_HAS_PRIVATE,
b46e756f
KS
54};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
4aab2be0
VB
59static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
b46e756f
KS
62/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 79static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
80
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
27e1f827
SL
86#define MAX_PTE_MAPPED_THP 8
87
b46e756f
KS
88/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
336e6b53
AS
93 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
b46e756f
KS
95 */
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
27e1f827
SL
100
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
104};
105
106/**
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
111 *
112 * There is only the one khugepaged_scan instance of this cursor structure.
113 */
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
e1465d12 124#ifdef CONFIG_SYSFS
b46e756f
KS
125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
ae7a927d 129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
dfefd226 136 unsigned int msecs;
b46e756f
KS
137 int err;
138
dfefd226
AD
139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
b46e756f
KS
141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
6dcdc94d 150 __ATTR_RW(scan_sleep_millisecs);
b46e756f
KS
151
152static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
153 struct kobj_attribute *attr,
154 char *buf)
155{
ae7a927d 156 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
157}
158
159static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
160 struct kobj_attribute *attr,
161 const char *buf, size_t count)
162{
dfefd226 163 unsigned int msecs;
b46e756f
KS
164 int err;
165
dfefd226
AD
166 err = kstrtouint(buf, 10, &msecs);
167 if (err)
b46e756f
KS
168 return -EINVAL;
169
170 khugepaged_alloc_sleep_millisecs = msecs;
171 khugepaged_sleep_expire = 0;
172 wake_up_interruptible(&khugepaged_wait);
173
174 return count;
175}
176static struct kobj_attribute alloc_sleep_millisecs_attr =
6dcdc94d 177 __ATTR_RW(alloc_sleep_millisecs);
b46e756f
KS
178
179static ssize_t pages_to_scan_show(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 char *buf)
182{
ae7a927d 183 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
184}
185static ssize_t pages_to_scan_store(struct kobject *kobj,
186 struct kobj_attribute *attr,
187 const char *buf, size_t count)
188{
dfefd226 189 unsigned int pages;
b46e756f 190 int err;
b46e756f 191
dfefd226
AD
192 err = kstrtouint(buf, 10, &pages);
193 if (err || !pages)
b46e756f
KS
194 return -EINVAL;
195
196 khugepaged_pages_to_scan = pages;
197
198 return count;
199}
200static struct kobj_attribute pages_to_scan_attr =
6dcdc94d 201 __ATTR_RW(pages_to_scan);
b46e756f
KS
202
203static ssize_t pages_collapsed_show(struct kobject *kobj,
204 struct kobj_attribute *attr,
205 char *buf)
206{
ae7a927d 207 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
208}
209static struct kobj_attribute pages_collapsed_attr =
210 __ATTR_RO(pages_collapsed);
211
212static ssize_t full_scans_show(struct kobject *kobj,
213 struct kobj_attribute *attr,
214 char *buf)
215{
ae7a927d 216 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
217}
218static struct kobj_attribute full_scans_attr =
219 __ATTR_RO(full_scans);
220
6dcdc94d
ML
221static ssize_t defrag_show(struct kobject *kobj,
222 struct kobj_attribute *attr, char *buf)
b46e756f
KS
223{
224 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 225 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f 226}
6dcdc94d
ML
227static ssize_t defrag_store(struct kobject *kobj,
228 struct kobj_attribute *attr,
229 const char *buf, size_t count)
b46e756f
KS
230{
231 return single_hugepage_flag_store(kobj, attr, buf, count,
232 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
233}
234static struct kobj_attribute khugepaged_defrag_attr =
6dcdc94d 235 __ATTR_RW(defrag);
b46e756f
KS
236
237/*
238 * max_ptes_none controls if khugepaged should collapse hugepages over
239 * any unmapped ptes in turn potentially increasing the memory
240 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
241 * reduce the available free memory in the system as it
242 * runs. Increasing max_ptes_none will instead potentially reduce the
243 * free memory in the system during the khugepaged scan.
244 */
6dcdc94d
ML
245static ssize_t max_ptes_none_show(struct kobject *kobj,
246 struct kobj_attribute *attr,
247 char *buf)
b46e756f 248{
ae7a927d 249 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f 250}
6dcdc94d
ML
251static ssize_t max_ptes_none_store(struct kobject *kobj,
252 struct kobj_attribute *attr,
253 const char *buf, size_t count)
b46e756f
KS
254{
255 int err;
256 unsigned long max_ptes_none;
257
258 err = kstrtoul(buf, 10, &max_ptes_none);
36ee2c78 259 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
b46e756f
KS
260 return -EINVAL;
261
262 khugepaged_max_ptes_none = max_ptes_none;
263
264 return count;
265}
266static struct kobj_attribute khugepaged_max_ptes_none_attr =
6dcdc94d 267 __ATTR_RW(max_ptes_none);
b46e756f 268
6dcdc94d
ML
269static ssize_t max_ptes_swap_show(struct kobject *kobj,
270 struct kobj_attribute *attr,
271 char *buf)
b46e756f 272{
ae7a927d 273 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
274}
275
6dcdc94d
ML
276static ssize_t max_ptes_swap_store(struct kobject *kobj,
277 struct kobj_attribute *attr,
278 const char *buf, size_t count)
b46e756f
KS
279{
280 int err;
281 unsigned long max_ptes_swap;
282
283 err = kstrtoul(buf, 10, &max_ptes_swap);
36ee2c78 284 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
b46e756f
KS
285 return -EINVAL;
286
287 khugepaged_max_ptes_swap = max_ptes_swap;
288
289 return count;
290}
291
292static struct kobj_attribute khugepaged_max_ptes_swap_attr =
6dcdc94d 293 __ATTR_RW(max_ptes_swap);
b46e756f 294
6dcdc94d
ML
295static ssize_t max_ptes_shared_show(struct kobject *kobj,
296 struct kobj_attribute *attr,
297 char *buf)
71a2c112 298{
ae7a927d 299 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
300}
301
6dcdc94d
ML
302static ssize_t max_ptes_shared_store(struct kobject *kobj,
303 struct kobj_attribute *attr,
304 const char *buf, size_t count)
71a2c112
KS
305{
306 int err;
307 unsigned long max_ptes_shared;
308
309 err = kstrtoul(buf, 10, &max_ptes_shared);
36ee2c78 310 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
71a2c112
KS
311 return -EINVAL;
312
313 khugepaged_max_ptes_shared = max_ptes_shared;
314
315 return count;
316}
317
318static struct kobj_attribute khugepaged_max_ptes_shared_attr =
6dcdc94d 319 __ATTR_RW(max_ptes_shared);
71a2c112 320
b46e756f
KS
321static struct attribute *khugepaged_attr[] = {
322 &khugepaged_defrag_attr.attr,
323 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
324 &khugepaged_max_ptes_swap_attr.attr,
325 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
326 &pages_to_scan_attr.attr,
327 &pages_collapsed_attr.attr,
328 &full_scans_attr.attr,
329 &scan_sleep_millisecs_attr.attr,
330 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
331 NULL,
332};
333
334struct attribute_group khugepaged_attr_group = {
335 .attrs = khugepaged_attr,
336 .name = "khugepaged",
337};
e1465d12 338#endif /* CONFIG_SYSFS */
b46e756f 339
b46e756f
KS
340int hugepage_madvise(struct vm_area_struct *vma,
341 unsigned long *vm_flags, int advice)
342{
343 switch (advice) {
344 case MADV_HUGEPAGE:
345#ifdef CONFIG_S390
346 /*
347 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
348 * can't handle this properly after s390_enable_sie, so we simply
349 * ignore the madvise to prevent qemu from causing a SIGSEGV.
350 */
351 if (mm_has_pgste(vma->vm_mm))
352 return 0;
353#endif
354 *vm_flags &= ~VM_NOHUGEPAGE;
355 *vm_flags |= VM_HUGEPAGE;
356 /*
357 * If the vma become good for khugepaged to scan,
358 * register it here without waiting a page fault that
359 * may not happen any time soon.
360 */
c791576c 361 khugepaged_enter_vma(vma, *vm_flags);
b46e756f
KS
362 break;
363 case MADV_NOHUGEPAGE:
364 *vm_flags &= ~VM_HUGEPAGE;
365 *vm_flags |= VM_NOHUGEPAGE;
366 /*
367 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
368 * this vma even if we leave the mm registered in khugepaged if
369 * it got registered before VM_NOHUGEPAGE was set.
370 */
371 break;
372 }
373
374 return 0;
375}
376
377int __init khugepaged_init(void)
378{
379 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
380 sizeof(struct mm_slot),
381 __alignof__(struct mm_slot), 0, NULL);
382 if (!mm_slot_cache)
383 return -ENOMEM;
384
385 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
386 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
387 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 388 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
389
390 return 0;
391}
392
393void __init khugepaged_destroy(void)
394{
395 kmem_cache_destroy(mm_slot_cache);
396}
397
398static inline struct mm_slot *alloc_mm_slot(void)
399{
400 if (!mm_slot_cache) /* initialization failed */
401 return NULL;
402 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
403}
404
405static inline void free_mm_slot(struct mm_slot *mm_slot)
406{
407 kmem_cache_free(mm_slot_cache, mm_slot);
408}
409
410static struct mm_slot *get_mm_slot(struct mm_struct *mm)
411{
412 struct mm_slot *mm_slot;
413
414 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
415 if (mm == mm_slot->mm)
416 return mm_slot;
417
418 return NULL;
419}
420
421static void insert_to_mm_slots_hash(struct mm_struct *mm,
422 struct mm_slot *mm_slot)
423{
424 mm_slot->mm = mm;
425 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
426}
427
428static inline int khugepaged_test_exit(struct mm_struct *mm)
429{
4d45e75a 430 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
431}
432
d2081b2b 433void __khugepaged_enter(struct mm_struct *mm)
b46e756f
KS
434{
435 struct mm_slot *mm_slot;
436 int wakeup;
437
438 mm_slot = alloc_mm_slot();
439 if (!mm_slot)
d2081b2b 440 return;
b46e756f
KS
441
442 /* __khugepaged_exit() must not run from under us */
28ff0a3c 443 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
b46e756f
KS
444 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
445 free_mm_slot(mm_slot);
d2081b2b 446 return;
b46e756f
KS
447 }
448
449 spin_lock(&khugepaged_mm_lock);
450 insert_to_mm_slots_hash(mm, mm_slot);
451 /*
452 * Insert just behind the scanning cursor, to let the area settle
453 * down a little.
454 */
455 wakeup = list_empty(&khugepaged_scan.mm_head);
456 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
457 spin_unlock(&khugepaged_mm_lock);
458
f1f10076 459 mmgrab(mm);
b46e756f
KS
460 if (wakeup)
461 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
462}
463
c791576c
YS
464void khugepaged_enter_vma(struct vm_area_struct *vma,
465 unsigned long vm_flags)
b46e756f 466{
2647d11b 467 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
1064026b 468 hugepage_flags_enabled()) {
7da4e2cb 469 if (hugepage_vma_check(vma, vm_flags, false, false))
2647d11b
YS
470 __khugepaged_enter(vma->vm_mm);
471 }
b46e756f
KS
472}
473
474void __khugepaged_exit(struct mm_struct *mm)
475{
476 struct mm_slot *mm_slot;
477 int free = 0;
478
479 spin_lock(&khugepaged_mm_lock);
480 mm_slot = get_mm_slot(mm);
481 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
482 hash_del(&mm_slot->hash);
483 list_del(&mm_slot->mm_node);
484 free = 1;
485 }
486 spin_unlock(&khugepaged_mm_lock);
487
488 if (free) {
489 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
490 free_mm_slot(mm_slot);
491 mmdrop(mm);
492 } else if (mm_slot) {
493 /*
494 * This is required to serialize against
495 * khugepaged_test_exit() (which is guaranteed to run
496 * under mmap sem read mode). Stop here (after we
497 * return all pagetables will be destroyed) until
498 * khugepaged has finished working on the pagetables
c1e8d7c6 499 * under the mmap_lock.
b46e756f 500 */
d8ed45c5
ML
501 mmap_write_lock(mm);
502 mmap_write_unlock(mm);
b46e756f
KS
503 }
504}
505
506static void release_pte_page(struct page *page)
507{
5503fbf2
KS
508 mod_node_page_state(page_pgdat(page),
509 NR_ISOLATED_ANON + page_is_file_lru(page),
510 -compound_nr(page));
b46e756f
KS
511 unlock_page(page);
512 putback_lru_page(page);
513}
514
5503fbf2
KS
515static void release_pte_pages(pte_t *pte, pte_t *_pte,
516 struct list_head *compound_pagelist)
b46e756f 517{
5503fbf2
KS
518 struct page *page, *tmp;
519
b46e756f
KS
520 while (--_pte >= pte) {
521 pte_t pteval = *_pte;
5503fbf2
KS
522
523 page = pte_page(pteval);
524 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
525 !PageCompound(page))
526 release_pte_page(page);
527 }
528
529 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
530 list_del(&page->lru);
531 release_pte_page(page);
b46e756f
KS
532 }
533}
534
9445689f
KS
535static bool is_refcount_suitable(struct page *page)
536{
537 int expected_refcount;
538
539 expected_refcount = total_mapcount(page);
540 if (PageSwapCache(page))
541 expected_refcount += compound_nr(page);
542
543 return page_count(page) == expected_refcount;
544}
545
b46e756f
KS
546static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
547 unsigned long address,
5503fbf2
KS
548 pte_t *pte,
549 struct list_head *compound_pagelist)
b46e756f
KS
550{
551 struct page *page = NULL;
552 pte_t *_pte;
71a2c112 553 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
0db501f7 554 bool writable = false;
b46e756f 555
36ee2c78 556 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
557 _pte++, address += PAGE_SIZE) {
558 pte_t pteval = *_pte;
559 if (pte_none(pteval) || (pte_present(pteval) &&
560 is_zero_pfn(pte_pfn(pteval)))) {
561 if (!userfaultfd_armed(vma) &&
562 ++none_or_zero <= khugepaged_max_ptes_none) {
563 continue;
564 } else {
565 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 566 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
567 goto out;
568 }
569 }
570 if (!pte_present(pteval)) {
571 result = SCAN_PTE_NON_PRESENT;
572 goto out;
573 }
574 page = vm_normal_page(vma, address, pteval);
3218f871 575 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
576 result = SCAN_PAGE_NULL;
577 goto out;
578 }
579
5503fbf2
KS
580 VM_BUG_ON_PAGE(!PageAnon(page), page);
581
71a2c112
KS
582 if (page_mapcount(page) > 1 &&
583 ++shared > khugepaged_max_ptes_shared) {
584 result = SCAN_EXCEED_SHARED_PTE;
e9ea874a 585 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
71a2c112
KS
586 goto out;
587 }
588
fece2029 589 if (PageCompound(page)) {
5503fbf2
KS
590 struct page *p;
591 page = compound_head(page);
fece2029 592
5503fbf2
KS
593 /*
594 * Check if we have dealt with the compound page
595 * already
596 */
597 list_for_each_entry(p, compound_pagelist, lru) {
598 if (page == p)
599 goto next;
600 }
601 }
b46e756f
KS
602
603 /*
604 * We can do it before isolate_lru_page because the
605 * page can't be freed from under us. NOTE: PG_lock
606 * is needed to serialize against split_huge_page
607 * when invoked from the VM.
608 */
609 if (!trylock_page(page)) {
610 result = SCAN_PAGE_LOCK;
611 goto out;
612 }
613
614 /*
9445689f
KS
615 * Check if the page has any GUP (or other external) pins.
616 *
617 * The page table that maps the page has been already unlinked
618 * from the page table tree and this process cannot get
f0953a1b 619 * an additional pin on the page.
9445689f
KS
620 *
621 * New pins can come later if the page is shared across fork,
622 * but not from this process. The other process cannot write to
623 * the page, only trigger CoW.
b46e756f 624 */
9445689f 625 if (!is_refcount_suitable(page)) {
b46e756f
KS
626 unlock_page(page);
627 result = SCAN_PAGE_COUNT;
628 goto out;
629 }
b46e756f
KS
630
631 /*
632 * Isolate the page to avoid collapsing an hugepage
633 * currently in use by the VM.
634 */
635 if (isolate_lru_page(page)) {
636 unlock_page(page);
637 result = SCAN_DEL_PAGE_LRU;
638 goto out;
639 }
5503fbf2
KS
640 mod_node_page_state(page_pgdat(page),
641 NR_ISOLATED_ANON + page_is_file_lru(page),
642 compound_nr(page));
b46e756f
KS
643 VM_BUG_ON_PAGE(!PageLocked(page), page);
644 VM_BUG_ON_PAGE(PageLRU(page), page);
645
5503fbf2
KS
646 if (PageCompound(page))
647 list_add_tail(&page->lru, compound_pagelist);
648next:
0db501f7 649 /* There should be enough young pte to collapse the page */
b46e756f
KS
650 if (pte_young(pteval) ||
651 page_is_young(page) || PageReferenced(page) ||
652 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 653 referenced++;
5503fbf2
KS
654
655 if (pte_write(pteval))
656 writable = true;
b46e756f 657 }
74e579bf
ML
658
659 if (unlikely(!writable)) {
b46e756f 660 result = SCAN_PAGE_RO;
74e579bf
ML
661 } else if (unlikely(!referenced)) {
662 result = SCAN_LACK_REFERENCED_PAGE;
663 } else {
664 result = SCAN_SUCCEED;
665 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
666 referenced, writable, result);
667 return 1;
b46e756f 668 }
b46e756f 669out:
5503fbf2 670 release_pte_pages(pte, _pte, compound_pagelist);
b46e756f
KS
671 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
672 referenced, writable, result);
673 return 0;
674}
675
676static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
677 struct vm_area_struct *vma,
678 unsigned long address,
5503fbf2
KS
679 spinlock_t *ptl,
680 struct list_head *compound_pagelist)
b46e756f 681{
5503fbf2 682 struct page *src_page, *tmp;
b46e756f 683 pte_t *_pte;
338a16ba
DR
684 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
685 _pte++, page++, address += PAGE_SIZE) {
b46e756f 686 pte_t pteval = *_pte;
b46e756f
KS
687
688 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
689 clear_user_highpage(page, address);
690 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
691 if (is_zero_pfn(pte_pfn(pteval))) {
692 /*
693 * ptl mostly unnecessary.
694 */
695 spin_lock(ptl);
08d5b29e 696 ptep_clear(vma->vm_mm, address, _pte);
b46e756f
KS
697 spin_unlock(ptl);
698 }
699 } else {
700 src_page = pte_page(pteval);
701 copy_user_highpage(page, src_page, address, vma);
5503fbf2
KS
702 if (!PageCompound(src_page))
703 release_pte_page(src_page);
b46e756f
KS
704 /*
705 * ptl mostly unnecessary, but preempt has to
706 * be disabled to update the per-cpu stats
707 * inside page_remove_rmap().
708 */
709 spin_lock(ptl);
08d5b29e 710 ptep_clear(vma->vm_mm, address, _pte);
cea86fe2 711 page_remove_rmap(src_page, vma, false);
b46e756f
KS
712 spin_unlock(ptl);
713 free_page_and_swap_cache(src_page);
714 }
b46e756f 715 }
5503fbf2
KS
716
717 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
718 list_del(&src_page->lru);
1baec203
ML
719 mod_node_page_state(page_pgdat(src_page),
720 NR_ISOLATED_ANON + page_is_file_lru(src_page),
721 -compound_nr(src_page));
722 unlock_page(src_page);
723 free_swap_cache(src_page);
724 putback_lru_page(src_page);
5503fbf2 725 }
b46e756f
KS
726}
727
728static void khugepaged_alloc_sleep(void)
729{
730 DEFINE_WAIT(wait);
731
732 add_wait_queue(&khugepaged_wait, &wait);
733 freezable_schedule_timeout_interruptible(
734 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
735 remove_wait_queue(&khugepaged_wait, &wait);
736}
737
738static int khugepaged_node_load[MAX_NUMNODES];
739
740static bool khugepaged_scan_abort(int nid)
741{
742 int i;
743
744 /*
a5f5f91d 745 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
746 * allocate memory locally.
747 */
202e35db 748 if (!node_reclaim_enabled())
b46e756f
KS
749 return false;
750
751 /* If there is a count for this node already, it must be acceptable */
752 if (khugepaged_node_load[nid])
753 return false;
754
755 for (i = 0; i < MAX_NUMNODES; i++) {
756 if (!khugepaged_node_load[i])
757 continue;
a55c7454 758 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
759 return true;
760 }
761 return false;
762}
763
1064026b
YS
764#define khugepaged_defrag() \
765 (transparent_hugepage_flags & \
766 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
767
b46e756f
KS
768/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
769static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
770{
25160354 771 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
772}
773
774#ifdef CONFIG_NUMA
775static int khugepaged_find_target_node(void)
776{
777 static int last_khugepaged_target_node = NUMA_NO_NODE;
778 int nid, target_node = 0, max_value = 0;
779
780 /* find first node with max normal pages hit */
781 for (nid = 0; nid < MAX_NUMNODES; nid++)
782 if (khugepaged_node_load[nid] > max_value) {
783 max_value = khugepaged_node_load[nid];
784 target_node = nid;
785 }
786
787 /* do some balance if several nodes have the same hit record */
788 if (target_node <= last_khugepaged_target_node)
789 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
790 nid++)
791 if (max_value == khugepaged_node_load[nid]) {
792 target_node = nid;
793 break;
794 }
795
796 last_khugepaged_target_node = target_node;
797 return target_node;
798}
c6a7f445
YS
799#else
800static int khugepaged_find_target_node(void)
b46e756f 801{
c6a7f445 802 return 0;
b46e756f 803}
c6a7f445 804#endif
b46e756f
KS
805
806static struct page *
988ddb71 807khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f 808{
b46e756f
KS
809 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
810 if (unlikely(!*hpage)) {
811 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
812 *hpage = ERR_PTR(-ENOMEM);
813 return NULL;
814 }
815
816 prep_transhuge_page(*hpage);
817 count_vm_event(THP_COLLAPSE_ALLOC);
818 return *hpage;
819}
b46e756f 820
b46e756f 821/*
c1e8d7c6
ML
822 * If mmap_lock temporarily dropped, revalidate vma
823 * before taking mmap_lock.
b46e756f
KS
824 * Return 0 if succeeds, otherwise return none-zero
825 * value (scan code).
826 */
827
c131f751
KS
828static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
829 struct vm_area_struct **vmap)
b46e756f
KS
830{
831 struct vm_area_struct *vma;
b46e756f
KS
832
833 if (unlikely(khugepaged_test_exit(mm)))
834 return SCAN_ANY_PROCESS;
835
c131f751 836 *vmap = vma = find_vma(mm, address);
b46e756f
KS
837 if (!vma)
838 return SCAN_VMA_NULL;
839
4fa6893f 840 if (!transhuge_vma_suitable(vma, address))
b46e756f 841 return SCAN_ADDRESS_RANGE;
7da4e2cb 842 if (!hugepage_vma_check(vma, vma->vm_flags, false, false))
b46e756f 843 return SCAN_VMA_CHECK;
f707fa49
YS
844 /*
845 * Anon VMA expected, the address may be unmapped then
846 * remapped to file after khugepaged reaquired the mmap_lock.
847 *
848 * hugepage_vma_check may return true for qualified file
849 * vmas.
850 */
25fa414a 851 if (!vma->anon_vma || !vma_is_anonymous(vma))
594cced1 852 return SCAN_VMA_CHECK;
b46e756f
KS
853 return 0;
854}
855
856/*
857 * Bring missing pages in from swap, to complete THP collapse.
858 * Only done if khugepaged_scan_pmd believes it is worthwhile.
859 *
4d928e20
ML
860 * Called and returns without pte mapped or spinlocks held.
861 * Note that if false is returned, mmap_lock will be released.
b46e756f
KS
862 */
863
864static bool __collapse_huge_page_swapin(struct mm_struct *mm,
865 struct vm_area_struct *vma,
2b635dd3 866 unsigned long haddr, pmd_t *pmd,
0db501f7 867 int referenced)
b46e756f 868{
2b740303
SJ
869 int swapped_in = 0;
870 vm_fault_t ret = 0;
2b635dd3
WD
871 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
872
873 for (address = haddr; address < end; address += PAGE_SIZE) {
874 struct vm_fault vmf = {
875 .vma = vma,
876 .address = address,
877 .pgoff = linear_page_index(vma, haddr),
878 .flags = FAULT_FLAG_ALLOW_RETRY,
879 .pmd = pmd,
880 };
881
882 vmf.pte = pte_offset_map(pmd, address);
2994302b 883 vmf.orig_pte = *vmf.pte;
2b635dd3
WD
884 if (!is_swap_pte(vmf.orig_pte)) {
885 pte_unmap(vmf.pte);
b46e756f 886 continue;
2b635dd3 887 }
2994302b 888 ret = do_swap_page(&vmf);
0db501f7 889
4d928e20
ML
890 /*
891 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
892 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
893 * we do not retry here and swap entry will remain in pagetable
894 * resulting in later failure.
895 */
b46e756f 896 if (ret & VM_FAULT_RETRY) {
4d928e20
ML
897 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
898 return false;
b46e756f
KS
899 }
900 if (ret & VM_FAULT_ERROR) {
4d928e20 901 mmap_read_unlock(mm);
0db501f7 902 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f
KS
903 return false;
904 }
4d928e20 905 swapped_in++;
b46e756f 906 }
ae2c5d80
KS
907
908 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
909 if (swapped_in)
910 lru_add_drain();
911
0db501f7 912 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
b46e756f
KS
913 return true;
914}
915
916static void collapse_huge_page(struct mm_struct *mm,
917 unsigned long address,
918 struct page **hpage,
ffe945e6 919 int node, int referenced, int unmapped)
b46e756f 920{
5503fbf2 921 LIST_HEAD(compound_pagelist);
b46e756f
KS
922 pmd_t *pmd, _pmd;
923 pte_t *pte;
924 pgtable_t pgtable;
925 struct page *new_page;
926 spinlock_t *pmd_ptl, *pte_ptl;
927 int isolated = 0, result = 0;
c131f751 928 struct vm_area_struct *vma;
ac46d4f3 929 struct mmu_notifier_range range;
b46e756f
KS
930 gfp_t gfp;
931
932 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
933
934 /* Only allocate from the target node */
41b6167e 935 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
b46e756f 936
988ddb71 937 /*
c1e8d7c6 938 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 939 * The allocation can take potentially a long time if it involves
c1e8d7c6 940 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
941 * that. We will recheck the vma after taking it again in write mode.
942 */
d8ed45c5 943 mmap_read_unlock(mm);
988ddb71 944 new_page = khugepaged_alloc_page(hpage, gfp, node);
b46e756f
KS
945 if (!new_page) {
946 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
947 goto out_nolock;
948 }
949
8f425e4e 950 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
b46e756f
KS
951 result = SCAN_CGROUP_CHARGE_FAIL;
952 goto out_nolock;
953 }
9d82c694 954 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
b46e756f 955
d8ed45c5 956 mmap_read_lock(mm);
c131f751 957 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 958 if (result) {
d8ed45c5 959 mmap_read_unlock(mm);
b46e756f
KS
960 goto out_nolock;
961 }
962
963 pmd = mm_find_pmd(mm, address);
964 if (!pmd) {
965 result = SCAN_PMD_NULL;
d8ed45c5 966 mmap_read_unlock(mm);
b46e756f
KS
967 goto out_nolock;
968 }
969
970 /*
4d928e20
ML
971 * __collapse_huge_page_swapin will return with mmap_lock released
972 * when it fails. So we jump out_nolock directly in that case.
b46e756f
KS
973 * Continuing to collapse causes inconsistency.
974 */
ffe945e6
KS
975 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
976 pmd, referenced)) {
b46e756f
KS
977 goto out_nolock;
978 }
979
d8ed45c5 980 mmap_read_unlock(mm);
b46e756f
KS
981 /*
982 * Prevent all access to pagetables with the exception of
983 * gup_fast later handled by the ptep_clear_flush and the VM
984 * handled by the anon_vma lock + PG_lock.
985 */
d8ed45c5 986 mmap_write_lock(mm);
c131f751 987 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 988 if (result)
18d24a7c 989 goto out_up_write;
b46e756f
KS
990 /* check if the pmd is still valid */
991 if (mm_find_pmd(mm, address) != pmd)
18d24a7c 992 goto out_up_write;
b46e756f
KS
993
994 anon_vma_lock_write(vma->anon_vma);
995
7269f999 996 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
6f4f13e8 997 address, address + HPAGE_PMD_SIZE);
ac46d4f3 998 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
999
1000 pte = pte_offset_map(pmd, address);
1001 pte_ptl = pte_lockptr(mm, pmd);
1002
b46e756f
KS
1003 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1004 /*
1005 * After this gup_fast can't run anymore. This also removes
1006 * any huge TLB entry from the CPU so we won't allow
1007 * huge and small TLB entries for the same virtual address
1008 * to avoid the risk of CPU bugs in that area.
1009 */
1010 _pmd = pmdp_collapse_flush(vma, address, pmd);
1011 spin_unlock(pmd_ptl);
ac46d4f3 1012 mmu_notifier_invalidate_range_end(&range);
b46e756f
KS
1013
1014 spin_lock(pte_ptl);
5503fbf2
KS
1015 isolated = __collapse_huge_page_isolate(vma, address, pte,
1016 &compound_pagelist);
b46e756f
KS
1017 spin_unlock(pte_ptl);
1018
1019 if (unlikely(!isolated)) {
1020 pte_unmap(pte);
1021 spin_lock(pmd_ptl);
1022 BUG_ON(!pmd_none(*pmd));
1023 /*
1024 * We can only use set_pmd_at when establishing
1025 * hugepmds and never for establishing regular pmds that
1026 * points to regular pagetables. Use pmd_populate for that
1027 */
1028 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1029 spin_unlock(pmd_ptl);
1030 anon_vma_unlock_write(vma->anon_vma);
1031 result = SCAN_FAIL;
18d24a7c 1032 goto out_up_write;
b46e756f
KS
1033 }
1034
1035 /*
1036 * All pages are isolated and locked so anon_vma rmap
1037 * can't run anymore.
1038 */
1039 anon_vma_unlock_write(vma->anon_vma);
1040
5503fbf2
KS
1041 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1042 &compound_pagelist);
b46e756f 1043 pte_unmap(pte);
588d01f9
ML
1044 /*
1045 * spin_lock() below is not the equivalent of smp_wmb(), but
1046 * the smp_wmb() inside __SetPageUptodate() can be reused to
1047 * avoid the copy_huge_page writes to become visible after
1048 * the set_pmd_at() write.
1049 */
b46e756f
KS
1050 __SetPageUptodate(new_page);
1051 pgtable = pmd_pgtable(_pmd);
1052
1053 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014 1054 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f 1055
b46e756f
KS
1056 spin_lock(pmd_ptl);
1057 BUG_ON(!pmd_none(*pmd));
40f2bbf7 1058 page_add_new_anon_rmap(new_page, vma, address);
b518154e 1059 lru_cache_add_inactive_or_unevictable(new_page, vma);
b46e756f
KS
1060 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1061 set_pmd_at(mm, address, pmd, _pmd);
1062 update_mmu_cache_pmd(vma, address, pmd);
1063 spin_unlock(pmd_ptl);
1064
1065 *hpage = NULL;
1066
1067 khugepaged_pages_collapsed++;
1068 result = SCAN_SUCCEED;
1069out_up_write:
d8ed45c5 1070 mmap_write_unlock(mm);
b46e756f 1071out_nolock:
c6a7f445 1072 if (!IS_ERR_OR_NULL(*hpage)) {
bbc6b703 1073 mem_cgroup_uncharge(page_folio(*hpage));
c6a7f445
YS
1074 put_page(*hpage);
1075 }
b46e756f
KS
1076 trace_mm_collapse_huge_page(mm, isolated, result);
1077 return;
b46e756f
KS
1078}
1079
1080static int khugepaged_scan_pmd(struct mm_struct *mm,
1081 struct vm_area_struct *vma,
1082 unsigned long address,
1083 struct page **hpage)
1084{
1085 pmd_t *pmd;
1086 pte_t *pte, *_pte;
71a2c112
KS
1087 int ret = 0, result = 0, referenced = 0;
1088 int none_or_zero = 0, shared = 0;
b46e756f
KS
1089 struct page *page = NULL;
1090 unsigned long _address;
1091 spinlock_t *ptl;
1092 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1093 bool writable = false;
b46e756f
KS
1094
1095 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1096
1097 pmd = mm_find_pmd(mm, address);
1098 if (!pmd) {
1099 result = SCAN_PMD_NULL;
1100 goto out;
1101 }
1102
1103 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1104 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
36ee2c78 1105 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
1106 _pte++, _address += PAGE_SIZE) {
1107 pte_t pteval = *_pte;
1108 if (is_swap_pte(pteval)) {
1109 if (++unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1110 /*
1111 * Always be strict with uffd-wp
1112 * enabled swap entries. Please see
1113 * comment below for pte_uffd_wp().
1114 */
1115 if (pte_swp_uffd_wp(pteval)) {
1116 result = SCAN_PTE_UFFD_WP;
1117 goto out_unmap;
1118 }
b46e756f
KS
1119 continue;
1120 } else {
1121 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1122 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
b46e756f
KS
1123 goto out_unmap;
1124 }
1125 }
1126 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1127 if (!userfaultfd_armed(vma) &&
1128 ++none_or_zero <= khugepaged_max_ptes_none) {
1129 continue;
1130 } else {
1131 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1132 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
1133 goto out_unmap;
1134 }
1135 }
e1e267c7
PX
1136 if (pte_uffd_wp(pteval)) {
1137 /*
1138 * Don't collapse the page if any of the small
1139 * PTEs are armed with uffd write protection.
1140 * Here we can also mark the new huge pmd as
1141 * write protected if any of the small ones is
8958b249 1142 * marked but that could bring unknown
e1e267c7
PX
1143 * userfault messages that falls outside of
1144 * the registered range. So, just be simple.
1145 */
1146 result = SCAN_PTE_UFFD_WP;
1147 goto out_unmap;
1148 }
b46e756f
KS
1149 if (pte_write(pteval))
1150 writable = true;
1151
1152 page = vm_normal_page(vma, _address, pteval);
3218f871 1153 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
1154 result = SCAN_PAGE_NULL;
1155 goto out_unmap;
1156 }
1157
71a2c112
KS
1158 if (page_mapcount(page) > 1 &&
1159 ++shared > khugepaged_max_ptes_shared) {
1160 result = SCAN_EXCEED_SHARED_PTE;
e9ea874a 1161 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
71a2c112
KS
1162 goto out_unmap;
1163 }
1164
5503fbf2 1165 page = compound_head(page);
b46e756f
KS
1166
1167 /*
1168 * Record which node the original page is from and save this
1169 * information to khugepaged_node_load[].
0b8f0d87 1170 * Khugepaged will allocate hugepage from the node has the max
b46e756f
KS
1171 * hit record.
1172 */
1173 node = page_to_nid(page);
1174 if (khugepaged_scan_abort(node)) {
1175 result = SCAN_SCAN_ABORT;
1176 goto out_unmap;
1177 }
1178 khugepaged_node_load[node]++;
1179 if (!PageLRU(page)) {
1180 result = SCAN_PAGE_LRU;
1181 goto out_unmap;
1182 }
1183 if (PageLocked(page)) {
1184 result = SCAN_PAGE_LOCK;
1185 goto out_unmap;
1186 }
1187 if (!PageAnon(page)) {
1188 result = SCAN_PAGE_ANON;
1189 goto out_unmap;
1190 }
1191
1192 /*
9445689f
KS
1193 * Check if the page has any GUP (or other external) pins.
1194 *
36ee2c78 1195 * Here the check is racy it may see total_mapcount > refcount
9445689f
KS
1196 * in some cases.
1197 * For example, one process with one forked child process.
1198 * The parent has the PMD split due to MADV_DONTNEED, then
1199 * the child is trying unmap the whole PMD, but khugepaged
1200 * may be scanning the parent between the child has
1201 * PageDoubleMap flag cleared and dec the mapcount. So
1202 * khugepaged may see total_mapcount > refcount.
1203 *
1204 * But such case is ephemeral we could always retry collapse
1205 * later. However it may report false positive if the page
1206 * has excessive GUP pins (i.e. 512). Anyway the same check
1207 * will be done again later the risk seems low.
b46e756f 1208 */
9445689f 1209 if (!is_refcount_suitable(page)) {
b46e756f
KS
1210 result = SCAN_PAGE_COUNT;
1211 goto out_unmap;
1212 }
1213 if (pte_young(pteval) ||
1214 page_is_young(page) || PageReferenced(page) ||
1215 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 1216 referenced++;
b46e756f 1217 }
ffe945e6 1218 if (!writable) {
b46e756f 1219 result = SCAN_PAGE_RO;
ffe945e6
KS
1220 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1221 result = SCAN_LACK_REFERENCED_PAGE;
1222 } else {
1223 result = SCAN_SUCCEED;
1224 ret = 1;
b46e756f
KS
1225 }
1226out_unmap:
1227 pte_unmap_unlock(pte, ptl);
1228 if (ret) {
1229 node = khugepaged_find_target_node();
c1e8d7c6 1230 /* collapse_huge_page will return with the mmap_lock released */
ffe945e6
KS
1231 collapse_huge_page(mm, address, hpage, node,
1232 referenced, unmapped);
b46e756f
KS
1233 }
1234out:
1235 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1236 none_or_zero, result, unmapped);
1237 return ret;
1238}
1239
1240static void collect_mm_slot(struct mm_slot *mm_slot)
1241{
1242 struct mm_struct *mm = mm_slot->mm;
1243
35f3aa39 1244 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1245
1246 if (khugepaged_test_exit(mm)) {
1247 /* free mm_slot */
1248 hash_del(&mm_slot->hash);
1249 list_del(&mm_slot->mm_node);
1250
1251 /*
1252 * Not strictly needed because the mm exited already.
1253 *
1254 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1255 */
1256
1257 /* khugepaged_mm_lock actually not necessary for the below */
1258 free_mm_slot(mm_slot);
1259 mmdrop(mm);
1260 }
1261}
1262
396bcc52 1263#ifdef CONFIG_SHMEM
27e1f827
SL
1264/*
1265 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1266 * khugepaged should try to collapse the page table.
1267 */
081c3256
ML
1268static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1269 unsigned long addr)
27e1f827
SL
1270{
1271 struct mm_slot *mm_slot;
1272
1273 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1274
1275 spin_lock(&khugepaged_mm_lock);
1276 mm_slot = get_mm_slot(mm);
1277 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1278 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1279 spin_unlock(&khugepaged_mm_lock);
27e1f827
SL
1280}
1281
e59a47b8
PT
1282static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1283 unsigned long addr, pmd_t *pmdp)
1284{
1285 spinlock_t *ptl;
1286 pmd_t pmd;
1287
80110bbf 1288 mmap_assert_write_locked(mm);
e59a47b8
PT
1289 ptl = pmd_lock(vma->vm_mm, pmdp);
1290 pmd = pmdp_collapse_flush(vma, addr, pmdp);
1291 spin_unlock(ptl);
1292 mm_dec_nr_ptes(mm);
80110bbf 1293 page_table_check_pte_clear_range(mm, addr, pmd);
e59a47b8
PT
1294 pte_free(mm, pmd_pgtable(pmd));
1295}
1296
27e1f827 1297/**
336e6b53
AS
1298 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1299 * address haddr.
1300 *
1301 * @mm: process address space where collapse happens
1302 * @addr: THP collapse address
27e1f827
SL
1303 *
1304 * This function checks whether all the PTEs in the PMD are pointing to the
1305 * right THP. If so, retract the page table so the THP can refault in with
1306 * as pmd-mapped.
1307 */
1308void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1309{
1310 unsigned long haddr = addr & HPAGE_PMD_MASK;
1311 struct vm_area_struct *vma = find_vma(mm, haddr);
119a5fc1 1312 struct page *hpage;
27e1f827 1313 pte_t *start_pte, *pte;
e59a47b8 1314 pmd_t *pmd;
27e1f827
SL
1315 spinlock_t *ptl;
1316 int count = 0;
1317 int i;
1318
1319 if (!vma || !vma->vm_file ||
fef792a4 1320 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
27e1f827
SL
1321 return;
1322
1323 /*
1324 * This vm_flags may not have VM_HUGEPAGE if the page was not
1325 * collapsed by this mm. But we can still collapse if the page is
1326 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1327 * will not fail the vma for missing VM_HUGEPAGE
1328 */
7da4e2cb 1329 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false))
27e1f827
SL
1330 return;
1331
deb4c93a
PX
1332 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1333 if (userfaultfd_wp(vma))
1334 return;
1335
119a5fc1
HD
1336 hpage = find_lock_page(vma->vm_file->f_mapping,
1337 linear_page_index(vma, haddr));
1338 if (!hpage)
1339 return;
1340
1341 if (!PageHead(hpage))
1342 goto drop_hpage;
1343
27e1f827
SL
1344 pmd = mm_find_pmd(mm, haddr);
1345 if (!pmd)
119a5fc1 1346 goto drop_hpage;
27e1f827
SL
1347
1348 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1349
1350 /* step 1: check all mapped PTEs are to the right huge page */
1351 for (i = 0, addr = haddr, pte = start_pte;
1352 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1353 struct page *page;
1354
1355 /* empty pte, skip */
1356 if (pte_none(*pte))
1357 continue;
1358
1359 /* page swapped out, abort */
1360 if (!pte_present(*pte))
1361 goto abort;
1362
1363 page = vm_normal_page(vma, addr, *pte);
3218f871
AS
1364 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1365 page = NULL;
27e1f827 1366 /*
119a5fc1
HD
1367 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1368 * page table, but the new page will not be a subpage of hpage.
27e1f827 1369 */
119a5fc1 1370 if (hpage + i != page)
27e1f827
SL
1371 goto abort;
1372 count++;
1373 }
1374
1375 /* step 2: adjust rmap */
1376 for (i = 0, addr = haddr, pte = start_pte;
1377 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1378 struct page *page;
1379
1380 if (pte_none(*pte))
1381 continue;
1382 page = vm_normal_page(vma, addr, *pte);
3218f871
AS
1383 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1384 goto abort;
cea86fe2 1385 page_remove_rmap(page, vma, false);
27e1f827
SL
1386 }
1387
1388 pte_unmap_unlock(start_pte, ptl);
1389
1390 /* step 3: set proper refcount and mm_counters. */
119a5fc1 1391 if (count) {
27e1f827
SL
1392 page_ref_sub(hpage, count);
1393 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1394 }
1395
1396 /* step 4: collapse pmd */
e59a47b8 1397 collapse_and_free_pmd(mm, vma, haddr, pmd);
119a5fc1
HD
1398drop_hpage:
1399 unlock_page(hpage);
1400 put_page(hpage);
27e1f827
SL
1401 return;
1402
1403abort:
1404 pte_unmap_unlock(start_pte, ptl);
119a5fc1 1405 goto drop_hpage;
27e1f827
SL
1406}
1407
0edf61e5 1408static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
27e1f827
SL
1409{
1410 struct mm_struct *mm = mm_slot->mm;
1411 int i;
1412
1413 if (likely(mm_slot->nr_pte_mapped_thp == 0))
0edf61e5 1414 return;
27e1f827 1415
d8ed45c5 1416 if (!mmap_write_trylock(mm))
0edf61e5 1417 return;
27e1f827
SL
1418
1419 if (unlikely(khugepaged_test_exit(mm)))
1420 goto out;
1421
1422 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1423 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1424
1425out:
1426 mm_slot->nr_pte_mapped_thp = 0;
d8ed45c5 1427 mmap_write_unlock(mm);
27e1f827
SL
1428}
1429
f3f0e1d2
KS
1430static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1431{
1432 struct vm_area_struct *vma;
18e77600 1433 struct mm_struct *mm;
f3f0e1d2 1434 unsigned long addr;
e59a47b8 1435 pmd_t *pmd;
f3f0e1d2
KS
1436
1437 i_mmap_lock_write(mapping);
1438 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
27e1f827
SL
1439 /*
1440 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1441 * got written to. These VMAs are likely not worth investing
3e4e28c5 1442 * mmap_write_lock(mm) as PMD-mapping is likely to be split
27e1f827
SL
1443 * later.
1444 *
36ee2c78 1445 * Note that vma->anon_vma check is racy: it can be set up after
c1e8d7c6 1446 * the check but before we took mmap_lock by the fault path.
27e1f827
SL
1447 * But page lock would prevent establishing any new ptes of the
1448 * page, so we are safe.
1449 *
1450 * An alternative would be drop the check, but check that page
1451 * table is clear before calling pmdp_collapse_flush() under
1452 * ptl. It has higher chance to recover THP for the VMA, but
1453 * has higher cost too.
1454 */
f3f0e1d2
KS
1455 if (vma->anon_vma)
1456 continue;
1457 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1458 if (addr & ~HPAGE_PMD_MASK)
1459 continue;
1460 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1461 continue;
18e77600
HD
1462 mm = vma->vm_mm;
1463 pmd = mm_find_pmd(mm, addr);
f3f0e1d2
KS
1464 if (!pmd)
1465 continue;
1466 /*
c1e8d7c6 1467 * We need exclusive mmap_lock to retract page table.
27e1f827
SL
1468 *
1469 * We use trylock due to lock inversion: we need to acquire
c1e8d7c6 1470 * mmap_lock while holding page lock. Fault path does it in
27e1f827 1471 * reverse order. Trylock is a way to avoid deadlock.
f3f0e1d2 1472 */
18e77600 1473 if (mmap_write_trylock(mm)) {
deb4c93a
PX
1474 /*
1475 * When a vma is registered with uffd-wp, we can't
1476 * recycle the pmd pgtable because there can be pte
1477 * markers installed. Skip it only, so the rest mm/vma
1478 * can still have the same file mapped hugely, however
1479 * it'll always mapped in small page size for uffd-wp
1480 * registered ranges.
1481 */
1482 if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
e59a47b8 1483 collapse_and_free_pmd(mm, vma, addr, pmd);
18e77600 1484 mmap_write_unlock(mm);
27e1f827
SL
1485 } else {
1486 /* Try again later */
18e77600 1487 khugepaged_add_pte_mapped_thp(mm, addr);
f3f0e1d2
KS
1488 }
1489 }
1490 i_mmap_unlock_write(mapping);
1491}
1492
1493/**
99cb0dbd 1494 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1495 *
336e6b53
AS
1496 * @mm: process address space where collapse happens
1497 * @file: file that collapse on
1498 * @start: collapse start address
1499 * @hpage: new allocated huge page for collapse
1500 * @node: appointed node the new huge page allocate from
1501 *
f3f0e1d2 1502 * Basic scheme is simple, details are more complex:
87c460a0 1503 * - allocate and lock a new huge page;
77da9389 1504 * - scan page cache replacing old pages with the new one
99cb0dbd 1505 * + swap/gup in pages if necessary;
f3f0e1d2 1506 * + fill in gaps;
77da9389
MW
1507 * + keep old pages around in case rollback is required;
1508 * - if replacing succeeds:
f3f0e1d2
KS
1509 * + copy data over;
1510 * + free old pages;
87c460a0 1511 * + unlock huge page;
f3f0e1d2
KS
1512 * - if replacing failed;
1513 * + put all pages back and unfreeze them;
77da9389 1514 * + restore gaps in the page cache;
87c460a0 1515 * + unlock and free huge page;
f3f0e1d2 1516 */
579c571e
SL
1517static void collapse_file(struct mm_struct *mm,
1518 struct file *file, pgoff_t start,
f3f0e1d2
KS
1519 struct page **hpage, int node)
1520{
579c571e 1521 struct address_space *mapping = file->f_mapping;
f3f0e1d2 1522 gfp_t gfp;
77da9389 1523 struct page *new_page;
f3f0e1d2
KS
1524 pgoff_t index, end = start + HPAGE_PMD_NR;
1525 LIST_HEAD(pagelist);
77da9389 1526 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1527 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1528 bool is_shmem = shmem_file(file);
bf9ecead 1529 int nr;
f3f0e1d2 1530
99cb0dbd 1531 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1532 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1533
1534 /* Only allocate from the target node */
41b6167e 1535 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
f3f0e1d2
KS
1536
1537 new_page = khugepaged_alloc_page(hpage, gfp, node);
1538 if (!new_page) {
1539 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1540 goto out;
1541 }
1542
8f425e4e 1543 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
f3f0e1d2
KS
1544 result = SCAN_CGROUP_CHARGE_FAIL;
1545 goto out;
1546 }
9d82c694 1547 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
f3f0e1d2 1548
6b24ca4a
MWO
1549 /*
1550 * Ensure we have slots for all the pages in the range. This is
1551 * almost certainly a no-op because most of the pages must be present
1552 */
95feeabb
HD
1553 do {
1554 xas_lock_irq(&xas);
1555 xas_create_range(&xas);
1556 if (!xas_error(&xas))
1557 break;
1558 xas_unlock_irq(&xas);
1559 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb
HD
1560 result = SCAN_FAIL;
1561 goto out;
1562 }
1563 } while (1);
1564
042a3082 1565 __SetPageLocked(new_page);
99cb0dbd
SL
1566 if (is_shmem)
1567 __SetPageSwapBacked(new_page);
f3f0e1d2
KS
1568 new_page->index = start;
1569 new_page->mapping = mapping;
f3f0e1d2 1570
f3f0e1d2 1571 /*
87c460a0
HD
1572 * At this point the new_page is locked and not up-to-date.
1573 * It's safe to insert it into the page cache, because nobody would
1574 * be able to map it or use it in another way until we unlock it.
f3f0e1d2
KS
1575 */
1576
77da9389
MW
1577 xas_set(&xas, start);
1578 for (index = start; index < end; index++) {
1579 struct page *page = xas_next(&xas);
1580
1581 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1582 if (is_shmem) {
1583 if (!page) {
1584 /*
1585 * Stop if extent has been truncated or
1586 * hole-punched, and is now completely
1587 * empty.
1588 */
1589 if (index == start) {
1590 if (!xas_next_entry(&xas, end - 1)) {
1591 result = SCAN_TRUNCATED;
1592 goto xa_locked;
1593 }
1594 xas_set(&xas, index);
1595 }
1596 if (!shmem_charge(mapping->host, 1)) {
1597 result = SCAN_FAIL;
042a3082 1598 goto xa_locked;
701270fa 1599 }
99cb0dbd
SL
1600 xas_store(&xas, new_page);
1601 nr_none++;
1602 continue;
701270fa 1603 }
99cb0dbd
SL
1604
1605 if (xa_is_value(page) || !PageUptodate(page)) {
1606 xas_unlock_irq(&xas);
1607 /* swap in or instantiate fallocated page */
1608 if (shmem_getpage(mapping->host, index, &page,
acdd9f8e 1609 SGP_NOALLOC)) {
99cb0dbd
SL
1610 result = SCAN_FAIL;
1611 goto xa_unlocked;
1612 }
1613 } else if (trylock_page(page)) {
1614 get_page(page);
1615 xas_unlock_irq(&xas);
1616 } else {
1617 result = SCAN_PAGE_LOCK;
042a3082 1618 goto xa_locked;
77da9389 1619 }
99cb0dbd
SL
1620 } else { /* !is_shmem */
1621 if (!page || xa_is_value(page)) {
1622 xas_unlock_irq(&xas);
1623 page_cache_sync_readahead(mapping, &file->f_ra,
1624 file, index,
e5a59d30 1625 end - index);
99cb0dbd
SL
1626 /* drain pagevecs to help isolate_lru_page() */
1627 lru_add_drain();
1628 page = find_lock_page(mapping, index);
1629 if (unlikely(page == NULL)) {
1630 result = SCAN_FAIL;
1631 goto xa_unlocked;
1632 }
75f36069
SL
1633 } else if (PageDirty(page)) {
1634 /*
1635 * khugepaged only works on read-only fd,
1636 * so this page is dirty because it hasn't
1637 * been flushed since first write. There
1638 * won't be new dirty pages.
1639 *
1640 * Trigger async flush here and hope the
1641 * writeback is done when khugepaged
1642 * revisits this page.
1643 *
1644 * This is a one-off situation. We are not
1645 * forcing writeback in loop.
1646 */
1647 xas_unlock_irq(&xas);
1648 filemap_flush(mapping);
1649 result = SCAN_FAIL;
1650 goto xa_unlocked;
74c42e1b
RW
1651 } else if (PageWriteback(page)) {
1652 xas_unlock_irq(&xas);
1653 result = SCAN_FAIL;
1654 goto xa_unlocked;
99cb0dbd
SL
1655 } else if (trylock_page(page)) {
1656 get_page(page);
1657 xas_unlock_irq(&xas);
1658 } else {
1659 result = SCAN_PAGE_LOCK;
1660 goto xa_locked;
f3f0e1d2 1661 }
f3f0e1d2
KS
1662 }
1663
1664 /*
b93b0163 1665 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
1666 * without racing with truncate.
1667 */
1668 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
1669
1670 /* make sure the page is up to date */
1671 if (unlikely(!PageUptodate(page))) {
1672 result = SCAN_FAIL;
1673 goto out_unlock;
1674 }
06a5e126
HD
1675
1676 /*
1677 * If file was truncated then extended, or hole-punched, before
1678 * we locked the first page, then a THP might be there already.
1679 */
1680 if (PageTransCompound(page)) {
1681 result = SCAN_PAGE_COMPOUND;
1682 goto out_unlock;
1683 }
f3f0e1d2
KS
1684
1685 if (page_mapping(page) != mapping) {
1686 result = SCAN_TRUNCATED;
1687 goto out_unlock;
1688 }
f3f0e1d2 1689
74c42e1b
RW
1690 if (!is_shmem && (PageDirty(page) ||
1691 PageWriteback(page))) {
4655e5e5
SL
1692 /*
1693 * khugepaged only works on read-only fd, so this
1694 * page is dirty because it hasn't been flushed
1695 * since first write.
1696 */
1697 result = SCAN_FAIL;
1698 goto out_unlock;
1699 }
1700
f3f0e1d2
KS
1701 if (isolate_lru_page(page)) {
1702 result = SCAN_DEL_PAGE_LRU;
042a3082 1703 goto out_unlock;
f3f0e1d2
KS
1704 }
1705
99cb0dbd
SL
1706 if (page_has_private(page) &&
1707 !try_to_release_page(page, GFP_KERNEL)) {
1708 result = SCAN_PAGE_HAS_PRIVATE;
2f33a706 1709 putback_lru_page(page);
99cb0dbd
SL
1710 goto out_unlock;
1711 }
1712
f3f0e1d2 1713 if (page_mapped(page))
869f7ee6
MWO
1714 try_to_unmap(page_folio(page),
1715 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
f3f0e1d2 1716
77da9389
MW
1717 xas_lock_irq(&xas);
1718 xas_set(&xas, index);
f3f0e1d2 1719
77da9389 1720 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
1721
1722 /*
1723 * The page is expected to have page_count() == 3:
1724 * - we hold a pin on it;
77da9389 1725 * - one reference from page cache;
f3f0e1d2
KS
1726 * - one from isolate_lru_page;
1727 */
1728 if (!page_ref_freeze(page, 3)) {
1729 result = SCAN_PAGE_COUNT;
042a3082
HD
1730 xas_unlock_irq(&xas);
1731 putback_lru_page(page);
1732 goto out_unlock;
f3f0e1d2
KS
1733 }
1734
1735 /*
1736 * Add the page to the list to be able to undo the collapse if
1737 * something go wrong.
1738 */
1739 list_add_tail(&page->lru, &pagelist);
1740
1741 /* Finally, replace with the new page. */
4101196b 1742 xas_store(&xas, new_page);
f3f0e1d2 1743 continue;
f3f0e1d2
KS
1744out_unlock:
1745 unlock_page(page);
1746 put_page(page);
042a3082 1747 goto xa_unlocked;
f3f0e1d2 1748 }
bf9ecead 1749 nr = thp_nr_pages(new_page);
f3f0e1d2 1750
99cb0dbd 1751 if (is_shmem)
57b2847d 1752 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
09d91cda 1753 else {
bf9ecead 1754 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
09d91cda 1755 filemap_nr_thps_inc(mapping);
eb6ecbed
CF
1756 /*
1757 * Paired with smp_mb() in do_dentry_open() to ensure
1758 * i_writecount is up to date and the update to nr_thps is
1759 * visible. Ensures the page cache will be truncated if the
1760 * file is opened writable.
1761 */
1762 smp_mb();
1763 if (inode_is_open_for_write(mapping->host)) {
1764 result = SCAN_FAIL;
1765 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1766 filemap_nr_thps_dec(mapping);
1767 goto xa_locked;
1768 }
09d91cda 1769 }
99cb0dbd 1770
042a3082 1771 if (nr_none) {
9d82c694 1772 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
2f55f070
ML
1773 /* nr_none is always 0 for non-shmem. */
1774 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
042a3082
HD
1775 }
1776
6b24ca4a
MWO
1777 /* Join all the small entries into a single multi-index entry */
1778 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
1779 xas_store(&xas, new_page);
042a3082
HD
1780xa_locked:
1781 xas_unlock_irq(&xas);
77da9389 1782xa_unlocked:
042a3082 1783
6d9df8a5
HD
1784 /*
1785 * If collapse is successful, flush must be done now before copying.
1786 * If collapse is unsuccessful, does flush actually need to be done?
1787 * Do it anyway, to clear the state.
1788 */
1789 try_to_unmap_flush();
1790
f3f0e1d2 1791 if (result == SCAN_SUCCEED) {
77da9389 1792 struct page *page, *tmp;
f3f0e1d2
KS
1793
1794 /*
77da9389
MW
1795 * Replacing old pages with new one has succeeded, now we
1796 * need to copy the content and free the old pages.
f3f0e1d2 1797 */
2af8ff29 1798 index = start;
f3f0e1d2 1799 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2af8ff29
HD
1800 while (index < page->index) {
1801 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1802 index++;
1803 }
f3f0e1d2
KS
1804 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1805 page);
1806 list_del(&page->lru);
f3f0e1d2 1807 page->mapping = NULL;
042a3082 1808 page_ref_unfreeze(page, 1);
f3f0e1d2
KS
1809 ClearPageActive(page);
1810 ClearPageUnevictable(page);
042a3082 1811 unlock_page(page);
f3f0e1d2 1812 put_page(page);
2af8ff29
HD
1813 index++;
1814 }
1815 while (index < end) {
1816 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1817 index++;
f3f0e1d2
KS
1818 }
1819
f3f0e1d2 1820 SetPageUptodate(new_page);
87c460a0 1821 page_ref_add(new_page, HPAGE_PMD_NR - 1);
6058eaec 1822 if (is_shmem)
99cb0dbd 1823 set_page_dirty(new_page);
6058eaec 1824 lru_cache_add(new_page);
f3f0e1d2 1825
042a3082
HD
1826 /*
1827 * Remove pte page tables, so we can re-fault the page as huge.
1828 */
1829 retract_page_tables(mapping, start);
f3f0e1d2 1830 *hpage = NULL;
87aa7529
YS
1831
1832 khugepaged_pages_collapsed++;
f3f0e1d2 1833 } else {
77da9389 1834 struct page *page;
aaa52e34 1835
77da9389 1836 /* Something went wrong: roll back page cache changes */
77da9389 1837 xas_lock_irq(&xas);
2f55f070
ML
1838 if (nr_none) {
1839 mapping->nrpages -= nr_none;
99cb0dbd 1840 shmem_uncharge(mapping->host, nr_none);
2f55f070 1841 }
aaa52e34 1842
77da9389
MW
1843 xas_set(&xas, start);
1844 xas_for_each(&xas, page, end - 1) {
f3f0e1d2
KS
1845 page = list_first_entry_or_null(&pagelist,
1846 struct page, lru);
77da9389 1847 if (!page || xas.xa_index < page->index) {
f3f0e1d2
KS
1848 if (!nr_none)
1849 break;
f3f0e1d2 1850 nr_none--;
59749e6c 1851 /* Put holes back where they were */
77da9389 1852 xas_store(&xas, NULL);
f3f0e1d2
KS
1853 continue;
1854 }
1855
77da9389 1856 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
f3f0e1d2
KS
1857
1858 /* Unfreeze the page. */
1859 list_del(&page->lru);
1860 page_ref_unfreeze(page, 2);
77da9389
MW
1861 xas_store(&xas, page);
1862 xas_pause(&xas);
1863 xas_unlock_irq(&xas);
f3f0e1d2 1864 unlock_page(page);
042a3082 1865 putback_lru_page(page);
77da9389 1866 xas_lock_irq(&xas);
f3f0e1d2
KS
1867 }
1868 VM_BUG_ON(nr_none);
77da9389 1869 xas_unlock_irq(&xas);
f3f0e1d2 1870
f3f0e1d2
KS
1871 new_page->mapping = NULL;
1872 }
042a3082
HD
1873
1874 unlock_page(new_page);
f3f0e1d2
KS
1875out:
1876 VM_BUG_ON(!list_empty(&pagelist));
c6a7f445 1877 if (!IS_ERR_OR_NULL(*hpage)) {
bbc6b703 1878 mem_cgroup_uncharge(page_folio(*hpage));
c6a7f445
YS
1879 put_page(*hpage);
1880 }
f3f0e1d2
KS
1881 /* TODO: tracepoints */
1882}
1883
579c571e
SL
1884static void khugepaged_scan_file(struct mm_struct *mm,
1885 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1886{
1887 struct page *page = NULL;
579c571e 1888 struct address_space *mapping = file->f_mapping;
85b392db 1889 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
1890 int present, swap;
1891 int node = NUMA_NO_NODE;
1892 int result = SCAN_SUCCEED;
1893
1894 present = 0;
1895 swap = 0;
1896 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1897 rcu_read_lock();
85b392db
MW
1898 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1899 if (xas_retry(&xas, page))
f3f0e1d2 1900 continue;
f3f0e1d2 1901
85b392db 1902 if (xa_is_value(page)) {
f3f0e1d2
KS
1903 if (++swap > khugepaged_max_ptes_swap) {
1904 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1905 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
f3f0e1d2
KS
1906 break;
1907 }
1908 continue;
1909 }
1910
6b24ca4a
MWO
1911 /*
1912 * XXX: khugepaged should compact smaller compound pages
1913 * into a PMD sized page
1914 */
f3f0e1d2
KS
1915 if (PageTransCompound(page)) {
1916 result = SCAN_PAGE_COMPOUND;
1917 break;
1918 }
1919
1920 node = page_to_nid(page);
1921 if (khugepaged_scan_abort(node)) {
1922 result = SCAN_SCAN_ABORT;
1923 break;
1924 }
1925 khugepaged_node_load[node]++;
1926
1927 if (!PageLRU(page)) {
1928 result = SCAN_PAGE_LRU;
1929 break;
1930 }
1931
99cb0dbd
SL
1932 if (page_count(page) !=
1933 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
1934 result = SCAN_PAGE_COUNT;
1935 break;
1936 }
1937
1938 /*
1939 * We probably should check if the page is referenced here, but
1940 * nobody would transfer pte_young() to PageReferenced() for us.
1941 * And rmap walk here is just too costly...
1942 */
1943
1944 present++;
1945
1946 if (need_resched()) {
85b392db 1947 xas_pause(&xas);
f3f0e1d2 1948 cond_resched_rcu();
f3f0e1d2
KS
1949 }
1950 }
1951 rcu_read_unlock();
1952
1953 if (result == SCAN_SUCCEED) {
1954 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1955 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1956 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
f3f0e1d2
KS
1957 } else {
1958 node = khugepaged_find_target_node();
579c571e 1959 collapse_file(mm, file, start, hpage, node);
f3f0e1d2
KS
1960 }
1961 }
1962
1963 /* TODO: tracepoints */
1964}
1965#else
579c571e
SL
1966static void khugepaged_scan_file(struct mm_struct *mm,
1967 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1968{
1969 BUILD_BUG();
1970}
27e1f827 1971
0edf61e5 1972static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
27e1f827 1973{
27e1f827 1974}
f3f0e1d2
KS
1975#endif
1976
b46e756f
KS
1977static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1978 struct page **hpage)
1979 __releases(&khugepaged_mm_lock)
1980 __acquires(&khugepaged_mm_lock)
1981{
1982 struct mm_slot *mm_slot;
1983 struct mm_struct *mm;
1984 struct vm_area_struct *vma;
1985 int progress = 0;
1986
1987 VM_BUG_ON(!pages);
35f3aa39 1988 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1989
1990 if (khugepaged_scan.mm_slot)
1991 mm_slot = khugepaged_scan.mm_slot;
1992 else {
1993 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1994 struct mm_slot, mm_node);
1995 khugepaged_scan.address = 0;
1996 khugepaged_scan.mm_slot = mm_slot;
1997 }
1998 spin_unlock(&khugepaged_mm_lock);
27e1f827 1999 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f
KS
2000
2001 mm = mm_slot->mm;
3b454ad3
YS
2002 /*
2003 * Don't wait for semaphore (to avoid long wait times). Just move to
2004 * the next mm on the list.
2005 */
2006 vma = NULL;
d8ed45c5 2007 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2008 goto breakouterloop_mmap_lock;
3b454ad3 2009 if (likely(!khugepaged_test_exit(mm)))
b46e756f
KS
2010 vma = find_vma(mm, khugepaged_scan.address);
2011
2012 progress++;
2013 for (; vma; vma = vma->vm_next) {
2014 unsigned long hstart, hend;
2015
2016 cond_resched();
2017 if (unlikely(khugepaged_test_exit(mm))) {
2018 progress++;
2019 break;
2020 }
7da4e2cb 2021 if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) {
b46e756f
KS
2022skip:
2023 progress++;
2024 continue;
2025 }
4fa6893f
YS
2026 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2027 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
b46e756f
KS
2028 if (khugepaged_scan.address > hend)
2029 goto skip;
2030 if (khugepaged_scan.address < hstart)
2031 khugepaged_scan.address = hstart;
2032 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2033
2034 while (khugepaged_scan.address < hend) {
2035 int ret;
2036 cond_resched();
2037 if (unlikely(khugepaged_test_exit(mm)))
2038 goto breakouterloop;
2039
2040 VM_BUG_ON(khugepaged_scan.address < hstart ||
2041 khugepaged_scan.address + HPAGE_PMD_SIZE >
2042 hend);
99cb0dbd 2043 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2044 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2045 pgoff_t pgoff = linear_page_index(vma,
2046 khugepaged_scan.address);
99cb0dbd 2047
d8ed45c5 2048 mmap_read_unlock(mm);
f3f0e1d2 2049 ret = 1;
579c571e 2050 khugepaged_scan_file(mm, file, pgoff, hpage);
f3f0e1d2
KS
2051 fput(file);
2052 } else {
2053 ret = khugepaged_scan_pmd(mm, vma,
2054 khugepaged_scan.address,
2055 hpage);
2056 }
b46e756f
KS
2057 /* move to next address */
2058 khugepaged_scan.address += HPAGE_PMD_SIZE;
2059 progress += HPAGE_PMD_NR;
2060 if (ret)
c1e8d7c6
ML
2061 /* we released mmap_lock so break loop */
2062 goto breakouterloop_mmap_lock;
b46e756f
KS
2063 if (progress >= pages)
2064 goto breakouterloop;
2065 }
2066 }
2067breakouterloop:
d8ed45c5 2068 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2069breakouterloop_mmap_lock:
b46e756f
KS
2070
2071 spin_lock(&khugepaged_mm_lock);
2072 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2073 /*
2074 * Release the current mm_slot if this mm is about to die, or
2075 * if we scanned all vmas of this mm.
2076 */
2077 if (khugepaged_test_exit(mm) || !vma) {
2078 /*
2079 * Make sure that if mm_users is reaching zero while
2080 * khugepaged runs here, khugepaged_exit will find
2081 * mm_slot not pointing to the exiting mm.
2082 */
2083 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2084 khugepaged_scan.mm_slot = list_entry(
2085 mm_slot->mm_node.next,
2086 struct mm_slot, mm_node);
2087 khugepaged_scan.address = 0;
2088 } else {
2089 khugepaged_scan.mm_slot = NULL;
2090 khugepaged_full_scans++;
2091 }
2092
2093 collect_mm_slot(mm_slot);
2094 }
2095
2096 return progress;
2097}
2098
2099static int khugepaged_has_work(void)
2100{
2101 return !list_empty(&khugepaged_scan.mm_head) &&
1064026b 2102 hugepage_flags_enabled();
b46e756f
KS
2103}
2104
2105static int khugepaged_wait_event(void)
2106{
2107 return !list_empty(&khugepaged_scan.mm_head) ||
2108 kthread_should_stop();
2109}
2110
2111static void khugepaged_do_scan(void)
2112{
2113 struct page *hpage = NULL;
2114 unsigned int progress = 0, pass_through_head = 0;
89dc6a96 2115 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
b46e756f
KS
2116 bool wait = true;
2117
a980df33
KS
2118 lru_add_drain_all();
2119
c6a7f445 2120 while (true) {
b46e756f
KS
2121 cond_resched();
2122
2123 if (unlikely(kthread_should_stop() || try_to_freeze()))
2124 break;
2125
2126 spin_lock(&khugepaged_mm_lock);
2127 if (!khugepaged_scan.mm_slot)
2128 pass_through_head++;
2129 if (khugepaged_has_work() &&
2130 pass_through_head < 2)
2131 progress += khugepaged_scan_mm_slot(pages - progress,
2132 &hpage);
2133 else
2134 progress = pages;
2135 spin_unlock(&khugepaged_mm_lock);
b46e756f 2136
c6a7f445
YS
2137 if (progress >= pages)
2138 break;
2139
2140 if (IS_ERR(hpage)) {
2141 /*
2142 * If fail to allocate the first time, try to sleep for
2143 * a while. When hit again, cancel the scan.
2144 */
2145 if (!wait)
2146 break;
2147 wait = false;
2148 hpage = NULL;
2149 khugepaged_alloc_sleep();
2150 }
2151 }
b46e756f
KS
2152}
2153
2154static bool khugepaged_should_wakeup(void)
2155{
2156 return kthread_should_stop() ||
2157 time_after_eq(jiffies, khugepaged_sleep_expire);
2158}
2159
2160static void khugepaged_wait_work(void)
2161{
2162 if (khugepaged_has_work()) {
2163 const unsigned long scan_sleep_jiffies =
2164 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2165
2166 if (!scan_sleep_jiffies)
2167 return;
2168
2169 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2170 wait_event_freezable_timeout(khugepaged_wait,
2171 khugepaged_should_wakeup(),
2172 scan_sleep_jiffies);
2173 return;
2174 }
2175
1064026b 2176 if (hugepage_flags_enabled())
b46e756f
KS
2177 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2178}
2179
2180static int khugepaged(void *none)
2181{
2182 struct mm_slot *mm_slot;
2183
2184 set_freezable();
2185 set_user_nice(current, MAX_NICE);
2186
2187 while (!kthread_should_stop()) {
2188 khugepaged_do_scan();
2189 khugepaged_wait_work();
2190 }
2191
2192 spin_lock(&khugepaged_mm_lock);
2193 mm_slot = khugepaged_scan.mm_slot;
2194 khugepaged_scan.mm_slot = NULL;
2195 if (mm_slot)
2196 collect_mm_slot(mm_slot);
2197 spin_unlock(&khugepaged_mm_lock);
2198 return 0;
2199}
2200
2201static void set_recommended_min_free_kbytes(void)
2202{
2203 struct zone *zone;
2204 int nr_zones = 0;
2205 unsigned long recommended_min;
2206
1064026b 2207 if (!hugepage_flags_enabled()) {
bd3400ea
LF
2208 calculate_min_free_kbytes();
2209 goto update_wmarks;
2210 }
2211
b7d349c7
JK
2212 for_each_populated_zone(zone) {
2213 /*
2214 * We don't need to worry about fragmentation of
2215 * ZONE_MOVABLE since it only has movable pages.
2216 */
2217 if (zone_idx(zone) > gfp_zone(GFP_USER))
2218 continue;
2219
b46e756f 2220 nr_zones++;
b7d349c7 2221 }
b46e756f
KS
2222
2223 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2224 recommended_min = pageblock_nr_pages * nr_zones * 2;
2225
2226 /*
2227 * Make sure that on average at least two pageblocks are almost free
2228 * of another type, one for a migratetype to fall back to and a
2229 * second to avoid subsequent fallbacks of other types There are 3
2230 * MIGRATE_TYPES we care about.
2231 */
2232 recommended_min += pageblock_nr_pages * nr_zones *
2233 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2234
2235 /* don't ever allow to reserve more than 5% of the lowmem */
2236 recommended_min = min(recommended_min,
2237 (unsigned long) nr_free_buffer_pages() / 20);
2238 recommended_min <<= (PAGE_SHIFT-10);
2239
2240 if (recommended_min > min_free_kbytes) {
2241 if (user_min_free_kbytes >= 0)
2242 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2243 min_free_kbytes, recommended_min);
2244
2245 min_free_kbytes = recommended_min;
2246 }
bd3400ea
LF
2247
2248update_wmarks:
b46e756f
KS
2249 setup_per_zone_wmarks();
2250}
2251
2252int start_stop_khugepaged(void)
2253{
b46e756f
KS
2254 int err = 0;
2255
2256 mutex_lock(&khugepaged_mutex);
1064026b 2257 if (hugepage_flags_enabled()) {
b46e756f
KS
2258 if (!khugepaged_thread)
2259 khugepaged_thread = kthread_run(khugepaged, NULL,
2260 "khugepaged");
2261 if (IS_ERR(khugepaged_thread)) {
2262 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2263 err = PTR_ERR(khugepaged_thread);
2264 khugepaged_thread = NULL;
2265 goto fail;
2266 }
2267
2268 if (!list_empty(&khugepaged_scan.mm_head))
2269 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
2270 } else if (khugepaged_thread) {
2271 kthread_stop(khugepaged_thread);
2272 khugepaged_thread = NULL;
2273 }
bd3400ea 2274 set_recommended_min_free_kbytes();
b46e756f
KS
2275fail:
2276 mutex_unlock(&khugepaged_mutex);
2277 return err;
2278}
4aab2be0
VB
2279
2280void khugepaged_min_free_kbytes_update(void)
2281{
2282 mutex_lock(&khugepaged_mutex);
1064026b 2283 if (hugepage_flags_enabled() && khugepaged_thread)
4aab2be0
VB
2284 set_recommended_min_free_kbytes();
2285 mutex_unlock(&khugepaged_mutex);
2286}
This page took 0.738353 seconds and 4 git commands to generate.