]> Git Repo - linux.git/blame - mm/khugepaged.c
mm/khugepaged: recover from poisoned file-backed memory
[linux.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
80110bbf 19#include <linux/page_table_check.h>
b46e756f 20#include <linux/swapops.h>
f3f0e1d2 21#include <linux/shmem_fs.h>
b46e756f
KS
22
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
b26e2701 26#include "mm_slot.h"
b46e756f
KS
27
28enum scan_result {
29 SCAN_FAIL,
30 SCAN_SUCCEED,
31 SCAN_PMD_NULL,
34488399 32 SCAN_PMD_NONE,
50722804 33 SCAN_PMD_MAPPED,
b46e756f 34 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
35 SCAN_EXCEED_SWAP_PTE,
36 SCAN_EXCEED_SHARED_PTE,
b46e756f 37 SCAN_PTE_NON_PRESENT,
e1e267c7 38 SCAN_PTE_UFFD_WP,
58ac9a89 39 SCAN_PTE_MAPPED_HUGEPAGE,
b46e756f 40 SCAN_PAGE_RO,
0db501f7 41 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
42 SCAN_PAGE_NULL,
43 SCAN_SCAN_ABORT,
44 SCAN_PAGE_COUNT,
45 SCAN_PAGE_LRU,
46 SCAN_PAGE_LOCK,
47 SCAN_PAGE_ANON,
48 SCAN_PAGE_COMPOUND,
49 SCAN_ANY_PROCESS,
50 SCAN_VMA_NULL,
51 SCAN_VMA_CHECK,
52 SCAN_ADDRESS_RANGE,
b46e756f
KS
53 SCAN_DEL_PAGE_LRU,
54 SCAN_ALLOC_HUGE_PAGE_FAIL,
55 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 56 SCAN_TRUNCATED,
99cb0dbd 57 SCAN_PAGE_HAS_PRIVATE,
2ce0bdfe 58 SCAN_STORE_FAILED,
98c76c9f 59 SCAN_COPY_MC,
b46e756f
KS
60};
61
62#define CREATE_TRACE_POINTS
63#include <trace/events/huge_memory.h>
64
4aab2be0
VB
65static struct task_struct *khugepaged_thread __read_mostly;
66static DEFINE_MUTEX(khugepaged_mutex);
67
b46e756f
KS
68/* default scan 8*512 pte (or vmas) every 30 second */
69static unsigned int khugepaged_pages_to_scan __read_mostly;
70static unsigned int khugepaged_pages_collapsed;
71static unsigned int khugepaged_full_scans;
72static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
73/* during fragmentation poll the hugepage allocator once every minute */
74static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
75static unsigned long khugepaged_sleep_expire;
76static DEFINE_SPINLOCK(khugepaged_mm_lock);
77static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
78/*
79 * default collapse hugepages if there is at least one pte mapped like
80 * it would have happened if the vma was large enough during page
81 * fault.
d8ea7cc8
ZK
82 *
83 * Note that these are only respected if collapse was initiated by khugepaged.
b46e756f
KS
84 */
85static unsigned int khugepaged_max_ptes_none __read_mostly;
86static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 87static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
88
89#define MM_SLOTS_HASH_BITS 10
90static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
91
92static struct kmem_cache *mm_slot_cache __read_mostly;
93
27e1f827
SL
94#define MAX_PTE_MAPPED_THP 8
95
34d6b470 96struct collapse_control {
d8ea7cc8
ZK
97 bool is_khugepaged;
98
34d6b470
ZK
99 /* Num pages scanned per node */
100 u32 node_load[MAX_NUMNODES];
101
e031ff96
YS
102 /* nodemask for allocation fallback */
103 nodemask_t alloc_nmask;
34d6b470
ZK
104};
105
b46e756f 106/**
b26e2701
QZ
107 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
108 * @slot: hash lookup from mm to mm_slot
336e6b53
AS
109 * @nr_pte_mapped_thp: number of pte mapped THP
110 * @pte_mapped_thp: address array corresponding pte mapped THP
b46e756f 111 */
b26e2701
QZ
112struct khugepaged_mm_slot {
113 struct mm_slot slot;
27e1f827
SL
114
115 /* pte-mapped THP in this mm */
116 int nr_pte_mapped_thp;
117 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
118};
119
120/**
121 * struct khugepaged_scan - cursor for scanning
122 * @mm_head: the head of the mm list to scan
123 * @mm_slot: the current mm_slot we are scanning
124 * @address: the next address inside that to be scanned
125 *
126 * There is only the one khugepaged_scan instance of this cursor structure.
127 */
128struct khugepaged_scan {
129 struct list_head mm_head;
b26e2701 130 struct khugepaged_mm_slot *mm_slot;
b46e756f
KS
131 unsigned long address;
132};
133
134static struct khugepaged_scan khugepaged_scan = {
135 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
136};
137
e1465d12 138#ifdef CONFIG_SYSFS
b46e756f
KS
139static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
140 struct kobj_attribute *attr,
141 char *buf)
142{
ae7a927d 143 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
144}
145
146static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
147 struct kobj_attribute *attr,
148 const char *buf, size_t count)
149{
dfefd226 150 unsigned int msecs;
b46e756f
KS
151 int err;
152
dfefd226
AD
153 err = kstrtouint(buf, 10, &msecs);
154 if (err)
b46e756f
KS
155 return -EINVAL;
156
157 khugepaged_scan_sleep_millisecs = msecs;
158 khugepaged_sleep_expire = 0;
159 wake_up_interruptible(&khugepaged_wait);
160
161 return count;
162}
163static struct kobj_attribute scan_sleep_millisecs_attr =
6dcdc94d 164 __ATTR_RW(scan_sleep_millisecs);
b46e756f
KS
165
166static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
167 struct kobj_attribute *attr,
168 char *buf)
169{
ae7a927d 170 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
171}
172
173static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
174 struct kobj_attribute *attr,
175 const char *buf, size_t count)
176{
dfefd226 177 unsigned int msecs;
b46e756f
KS
178 int err;
179
dfefd226
AD
180 err = kstrtouint(buf, 10, &msecs);
181 if (err)
b46e756f
KS
182 return -EINVAL;
183
184 khugepaged_alloc_sleep_millisecs = msecs;
185 khugepaged_sleep_expire = 0;
186 wake_up_interruptible(&khugepaged_wait);
187
188 return count;
189}
190static struct kobj_attribute alloc_sleep_millisecs_attr =
6dcdc94d 191 __ATTR_RW(alloc_sleep_millisecs);
b46e756f
KS
192
193static ssize_t pages_to_scan_show(struct kobject *kobj,
194 struct kobj_attribute *attr,
195 char *buf)
196{
ae7a927d 197 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
198}
199static ssize_t pages_to_scan_store(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 const char *buf, size_t count)
202{
dfefd226 203 unsigned int pages;
b46e756f 204 int err;
b46e756f 205
dfefd226
AD
206 err = kstrtouint(buf, 10, &pages);
207 if (err || !pages)
b46e756f
KS
208 return -EINVAL;
209
210 khugepaged_pages_to_scan = pages;
211
212 return count;
213}
214static struct kobj_attribute pages_to_scan_attr =
6dcdc94d 215 __ATTR_RW(pages_to_scan);
b46e756f
KS
216
217static ssize_t pages_collapsed_show(struct kobject *kobj,
218 struct kobj_attribute *attr,
219 char *buf)
220{
ae7a927d 221 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
222}
223static struct kobj_attribute pages_collapsed_attr =
224 __ATTR_RO(pages_collapsed);
225
226static ssize_t full_scans_show(struct kobject *kobj,
227 struct kobj_attribute *attr,
228 char *buf)
229{
ae7a927d 230 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
231}
232static struct kobj_attribute full_scans_attr =
233 __ATTR_RO(full_scans);
234
6dcdc94d
ML
235static ssize_t defrag_show(struct kobject *kobj,
236 struct kobj_attribute *attr, char *buf)
b46e756f
KS
237{
238 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 239 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f 240}
6dcdc94d
ML
241static ssize_t defrag_store(struct kobject *kobj,
242 struct kobj_attribute *attr,
243 const char *buf, size_t count)
b46e756f
KS
244{
245 return single_hugepage_flag_store(kobj, attr, buf, count,
246 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
247}
248static struct kobj_attribute khugepaged_defrag_attr =
6dcdc94d 249 __ATTR_RW(defrag);
b46e756f
KS
250
251/*
252 * max_ptes_none controls if khugepaged should collapse hugepages over
253 * any unmapped ptes in turn potentially increasing the memory
254 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
255 * reduce the available free memory in the system as it
256 * runs. Increasing max_ptes_none will instead potentially reduce the
257 * free memory in the system during the khugepaged scan.
258 */
6dcdc94d
ML
259static ssize_t max_ptes_none_show(struct kobject *kobj,
260 struct kobj_attribute *attr,
261 char *buf)
b46e756f 262{
ae7a927d 263 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f 264}
6dcdc94d
ML
265static ssize_t max_ptes_none_store(struct kobject *kobj,
266 struct kobj_attribute *attr,
267 const char *buf, size_t count)
b46e756f
KS
268{
269 int err;
270 unsigned long max_ptes_none;
271
272 err = kstrtoul(buf, 10, &max_ptes_none);
36ee2c78 273 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
b46e756f
KS
274 return -EINVAL;
275
276 khugepaged_max_ptes_none = max_ptes_none;
277
278 return count;
279}
280static struct kobj_attribute khugepaged_max_ptes_none_attr =
6dcdc94d 281 __ATTR_RW(max_ptes_none);
b46e756f 282
6dcdc94d
ML
283static ssize_t max_ptes_swap_show(struct kobject *kobj,
284 struct kobj_attribute *attr,
285 char *buf)
b46e756f 286{
ae7a927d 287 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
288}
289
6dcdc94d
ML
290static ssize_t max_ptes_swap_store(struct kobject *kobj,
291 struct kobj_attribute *attr,
292 const char *buf, size_t count)
b46e756f
KS
293{
294 int err;
295 unsigned long max_ptes_swap;
296
297 err = kstrtoul(buf, 10, &max_ptes_swap);
36ee2c78 298 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
b46e756f
KS
299 return -EINVAL;
300
301 khugepaged_max_ptes_swap = max_ptes_swap;
302
303 return count;
304}
305
306static struct kobj_attribute khugepaged_max_ptes_swap_attr =
6dcdc94d 307 __ATTR_RW(max_ptes_swap);
b46e756f 308
6dcdc94d
ML
309static ssize_t max_ptes_shared_show(struct kobject *kobj,
310 struct kobj_attribute *attr,
311 char *buf)
71a2c112 312{
ae7a927d 313 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
314}
315
6dcdc94d
ML
316static ssize_t max_ptes_shared_store(struct kobject *kobj,
317 struct kobj_attribute *attr,
318 const char *buf, size_t count)
71a2c112
KS
319{
320 int err;
321 unsigned long max_ptes_shared;
322
323 err = kstrtoul(buf, 10, &max_ptes_shared);
36ee2c78 324 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
71a2c112
KS
325 return -EINVAL;
326
327 khugepaged_max_ptes_shared = max_ptes_shared;
328
329 return count;
330}
331
332static struct kobj_attribute khugepaged_max_ptes_shared_attr =
6dcdc94d 333 __ATTR_RW(max_ptes_shared);
71a2c112 334
b46e756f
KS
335static struct attribute *khugepaged_attr[] = {
336 &khugepaged_defrag_attr.attr,
337 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
338 &khugepaged_max_ptes_swap_attr.attr,
339 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
340 &pages_to_scan_attr.attr,
341 &pages_collapsed_attr.attr,
342 &full_scans_attr.attr,
343 &scan_sleep_millisecs_attr.attr,
344 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
345 NULL,
346};
347
348struct attribute_group khugepaged_attr_group = {
349 .attrs = khugepaged_attr,
350 .name = "khugepaged",
351};
e1465d12 352#endif /* CONFIG_SYSFS */
b46e756f 353
b46e756f
KS
354int hugepage_madvise(struct vm_area_struct *vma,
355 unsigned long *vm_flags, int advice)
356{
357 switch (advice) {
358 case MADV_HUGEPAGE:
359#ifdef CONFIG_S390
360 /*
361 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
362 * can't handle this properly after s390_enable_sie, so we simply
363 * ignore the madvise to prevent qemu from causing a SIGSEGV.
364 */
365 if (mm_has_pgste(vma->vm_mm))
366 return 0;
367#endif
368 *vm_flags &= ~VM_NOHUGEPAGE;
369 *vm_flags |= VM_HUGEPAGE;
370 /*
371 * If the vma become good for khugepaged to scan,
372 * register it here without waiting a page fault that
373 * may not happen any time soon.
374 */
c791576c 375 khugepaged_enter_vma(vma, *vm_flags);
b46e756f
KS
376 break;
377 case MADV_NOHUGEPAGE:
378 *vm_flags &= ~VM_HUGEPAGE;
379 *vm_flags |= VM_NOHUGEPAGE;
380 /*
381 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
382 * this vma even if we leave the mm registered in khugepaged if
383 * it got registered before VM_NOHUGEPAGE was set.
384 */
385 break;
386 }
387
388 return 0;
389}
390
391int __init khugepaged_init(void)
392{
393 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
b26e2701
QZ
394 sizeof(struct khugepaged_mm_slot),
395 __alignof__(struct khugepaged_mm_slot),
396 0, NULL);
b46e756f
KS
397 if (!mm_slot_cache)
398 return -ENOMEM;
399
400 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
401 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
402 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 403 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
404
405 return 0;
406}
407
408void __init khugepaged_destroy(void)
409{
410 kmem_cache_destroy(mm_slot_cache);
411}
412
7d2c4385 413static inline int hpage_collapse_test_exit(struct mm_struct *mm)
b46e756f 414{
4d45e75a 415 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
416}
417
d2081b2b 418void __khugepaged_enter(struct mm_struct *mm)
b46e756f 419{
b26e2701
QZ
420 struct khugepaged_mm_slot *mm_slot;
421 struct mm_slot *slot;
b46e756f
KS
422 int wakeup;
423
b26e2701 424 mm_slot = mm_slot_alloc(mm_slot_cache);
b46e756f 425 if (!mm_slot)
d2081b2b 426 return;
b46e756f 427
b26e2701
QZ
428 slot = &mm_slot->slot;
429
b46e756f 430 /* __khugepaged_exit() must not run from under us */
7d2c4385 431 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
b46e756f 432 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
b26e2701 433 mm_slot_free(mm_slot_cache, mm_slot);
d2081b2b 434 return;
b46e756f
KS
435 }
436
437 spin_lock(&khugepaged_mm_lock);
b26e2701 438 mm_slot_insert(mm_slots_hash, mm, slot);
b46e756f
KS
439 /*
440 * Insert just behind the scanning cursor, to let the area settle
441 * down a little.
442 */
443 wakeup = list_empty(&khugepaged_scan.mm_head);
b26e2701 444 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
b46e756f
KS
445 spin_unlock(&khugepaged_mm_lock);
446
f1f10076 447 mmgrab(mm);
b46e756f
KS
448 if (wakeup)
449 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
450}
451
c791576c
YS
452void khugepaged_enter_vma(struct vm_area_struct *vma,
453 unsigned long vm_flags)
b46e756f 454{
2647d11b 455 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
1064026b 456 hugepage_flags_enabled()) {
a7f4e6e4 457 if (hugepage_vma_check(vma, vm_flags, false, false, true))
2647d11b
YS
458 __khugepaged_enter(vma->vm_mm);
459 }
b46e756f
KS
460}
461
462void __khugepaged_exit(struct mm_struct *mm)
463{
b26e2701
QZ
464 struct khugepaged_mm_slot *mm_slot;
465 struct mm_slot *slot;
b46e756f
KS
466 int free = 0;
467
468 spin_lock(&khugepaged_mm_lock);
b26e2701
QZ
469 slot = mm_slot_lookup(mm_slots_hash, mm);
470 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f 471 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
b26e2701
QZ
472 hash_del(&slot->hash);
473 list_del(&slot->mm_node);
b46e756f
KS
474 free = 1;
475 }
476 spin_unlock(&khugepaged_mm_lock);
477
478 if (free) {
479 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
b26e2701 480 mm_slot_free(mm_slot_cache, mm_slot);
b46e756f
KS
481 mmdrop(mm);
482 } else if (mm_slot) {
483 /*
484 * This is required to serialize against
7d2c4385
ZK
485 * hpage_collapse_test_exit() (which is guaranteed to run
486 * under mmap sem read mode). Stop here (after we return all
487 * pagetables will be destroyed) until khugepaged has finished
488 * working on the pagetables under the mmap_lock.
b46e756f 489 */
d8ed45c5
ML
490 mmap_write_lock(mm);
491 mmap_write_unlock(mm);
b46e756f
KS
492 }
493}
494
92644f58
VMO
495static void release_pte_folio(struct folio *folio)
496{
497 node_stat_mod_folio(folio,
498 NR_ISOLATED_ANON + folio_is_file_lru(folio),
499 -folio_nr_pages(folio));
500 folio_unlock(folio);
501 folio_putback_lru(folio);
502}
503
b46e756f
KS
504static void release_pte_page(struct page *page)
505{
92644f58 506 release_pte_folio(page_folio(page));
b46e756f
KS
507}
508
5503fbf2
KS
509static void release_pte_pages(pte_t *pte, pte_t *_pte,
510 struct list_head *compound_pagelist)
b46e756f 511{
9bdfeea4 512 struct folio *folio, *tmp;
5503fbf2 513
b46e756f
KS
514 while (--_pte >= pte) {
515 pte_t pteval = *_pte;
f528260b 516 unsigned long pfn;
5503fbf2 517
f528260b
VMO
518 if (pte_none(pteval))
519 continue;
520 pfn = pte_pfn(pteval);
521 if (is_zero_pfn(pfn))
522 continue;
523 folio = pfn_folio(pfn);
524 if (folio_test_large(folio))
525 continue;
526 release_pte_folio(folio);
5503fbf2
KS
527 }
528
9bdfeea4
VMO
529 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
530 list_del(&folio->lru);
531 release_pte_folio(folio);
b46e756f
KS
532 }
533}
534
9445689f
KS
535static bool is_refcount_suitable(struct page *page)
536{
537 int expected_refcount;
538
539 expected_refcount = total_mapcount(page);
540 if (PageSwapCache(page))
541 expected_refcount += compound_nr(page);
542
543 return page_count(page) == expected_refcount;
544}
545
b46e756f
KS
546static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
547 unsigned long address,
5503fbf2 548 pte_t *pte,
d8ea7cc8 549 struct collapse_control *cc,
5503fbf2 550 struct list_head *compound_pagelist)
b46e756f
KS
551{
552 struct page *page = NULL;
553 pte_t *_pte;
50ad2f24 554 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
0db501f7 555 bool writable = false;
b46e756f 556
36ee2c78 557 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
558 _pte++, address += PAGE_SIZE) {
559 pte_t pteval = *_pte;
560 if (pte_none(pteval) || (pte_present(pteval) &&
561 is_zero_pfn(pte_pfn(pteval)))) {
d8ea7cc8 562 ++none_or_zero;
b46e756f 563 if (!userfaultfd_armed(vma) &&
d8ea7cc8
ZK
564 (!cc->is_khugepaged ||
565 none_or_zero <= khugepaged_max_ptes_none)) {
b46e756f
KS
566 continue;
567 } else {
568 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 569 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
570 goto out;
571 }
572 }
573 if (!pte_present(pteval)) {
574 result = SCAN_PTE_NON_PRESENT;
575 goto out;
576 }
dd47ac42
PX
577 if (pte_uffd_wp(pteval)) {
578 result = SCAN_PTE_UFFD_WP;
579 goto out;
580 }
b46e756f 581 page = vm_normal_page(vma, address, pteval);
3218f871 582 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
583 result = SCAN_PAGE_NULL;
584 goto out;
585 }
586
5503fbf2
KS
587 VM_BUG_ON_PAGE(!PageAnon(page), page);
588
d8ea7cc8
ZK
589 if (page_mapcount(page) > 1) {
590 ++shared;
591 if (cc->is_khugepaged &&
592 shared > khugepaged_max_ptes_shared) {
593 result = SCAN_EXCEED_SHARED_PTE;
594 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
595 goto out;
596 }
71a2c112
KS
597 }
598
fece2029 599 if (PageCompound(page)) {
5503fbf2
KS
600 struct page *p;
601 page = compound_head(page);
fece2029 602
5503fbf2
KS
603 /*
604 * Check if we have dealt with the compound page
605 * already
606 */
607 list_for_each_entry(p, compound_pagelist, lru) {
608 if (page == p)
609 goto next;
610 }
611 }
b46e756f
KS
612
613 /*
614 * We can do it before isolate_lru_page because the
615 * page can't be freed from under us. NOTE: PG_lock
616 * is needed to serialize against split_huge_page
617 * when invoked from the VM.
618 */
619 if (!trylock_page(page)) {
620 result = SCAN_PAGE_LOCK;
621 goto out;
622 }
623
624 /*
9445689f
KS
625 * Check if the page has any GUP (or other external) pins.
626 *
627 * The page table that maps the page has been already unlinked
628 * from the page table tree and this process cannot get
f0953a1b 629 * an additional pin on the page.
9445689f
KS
630 *
631 * New pins can come later if the page is shared across fork,
632 * but not from this process. The other process cannot write to
633 * the page, only trigger CoW.
b46e756f 634 */
9445689f 635 if (!is_refcount_suitable(page)) {
b46e756f
KS
636 unlock_page(page);
637 result = SCAN_PAGE_COUNT;
638 goto out;
639 }
b46e756f
KS
640
641 /*
642 * Isolate the page to avoid collapsing an hugepage
643 * currently in use by the VM.
644 */
f7f9c00d 645 if (!isolate_lru_page(page)) {
b46e756f
KS
646 unlock_page(page);
647 result = SCAN_DEL_PAGE_LRU;
648 goto out;
649 }
5503fbf2
KS
650 mod_node_page_state(page_pgdat(page),
651 NR_ISOLATED_ANON + page_is_file_lru(page),
652 compound_nr(page));
b46e756f
KS
653 VM_BUG_ON_PAGE(!PageLocked(page), page);
654 VM_BUG_ON_PAGE(PageLRU(page), page);
655
5503fbf2
KS
656 if (PageCompound(page))
657 list_add_tail(&page->lru, compound_pagelist);
658next:
d8ea7cc8
ZK
659 /*
660 * If collapse was initiated by khugepaged, check that there is
661 * enough young pte to justify collapsing the page
662 */
663 if (cc->is_khugepaged &&
664 (pte_young(pteval) || page_is_young(page) ||
665 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
666 address)))
0db501f7 667 referenced++;
5503fbf2
KS
668
669 if (pte_write(pteval))
670 writable = true;
b46e756f 671 }
74e579bf
ML
672
673 if (unlikely(!writable)) {
b46e756f 674 result = SCAN_PAGE_RO;
d8ea7cc8 675 } else if (unlikely(cc->is_khugepaged && !referenced)) {
74e579bf
ML
676 result = SCAN_LACK_REFERENCED_PAGE;
677 } else {
678 result = SCAN_SUCCEED;
679 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
680 referenced, writable, result);
50ad2f24 681 return result;
b46e756f 682 }
b46e756f 683out:
5503fbf2 684 release_pte_pages(pte, _pte, compound_pagelist);
b46e756f
KS
685 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
686 referenced, writable, result);
50ad2f24 687 return result;
b46e756f
KS
688}
689
98c76c9f
JY
690static void __collapse_huge_page_copy_succeeded(pte_t *pte,
691 struct vm_area_struct *vma,
692 unsigned long address,
693 spinlock_t *ptl,
694 struct list_head *compound_pagelist)
b46e756f 695{
98c76c9f
JY
696 struct page *src_page;
697 struct page *tmp;
b46e756f 698 pte_t *_pte;
98c76c9f 699 pte_t pteval;
b46e756f 700
98c76c9f
JY
701 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
702 _pte++, address += PAGE_SIZE) {
703 pteval = *_pte;
b46e756f 704 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
b46e756f
KS
705 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
706 if (is_zero_pfn(pte_pfn(pteval))) {
707 /*
708 * ptl mostly unnecessary.
709 */
710 spin_lock(ptl);
08d5b29e 711 ptep_clear(vma->vm_mm, address, _pte);
b46e756f
KS
712 spin_unlock(ptl);
713 }
714 } else {
715 src_page = pte_page(pteval);
5503fbf2
KS
716 if (!PageCompound(src_page))
717 release_pte_page(src_page);
b46e756f
KS
718 /*
719 * ptl mostly unnecessary, but preempt has to
720 * be disabled to update the per-cpu stats
721 * inside page_remove_rmap().
722 */
723 spin_lock(ptl);
08d5b29e 724 ptep_clear(vma->vm_mm, address, _pte);
cea86fe2 725 page_remove_rmap(src_page, vma, false);
b46e756f
KS
726 spin_unlock(ptl);
727 free_page_and_swap_cache(src_page);
728 }
b46e756f 729 }
5503fbf2
KS
730
731 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
732 list_del(&src_page->lru);
1baec203
ML
733 mod_node_page_state(page_pgdat(src_page),
734 NR_ISOLATED_ANON + page_is_file_lru(src_page),
735 -compound_nr(src_page));
736 unlock_page(src_page);
737 free_swap_cache(src_page);
738 putback_lru_page(src_page);
5503fbf2 739 }
b46e756f
KS
740}
741
98c76c9f
JY
742static void __collapse_huge_page_copy_failed(pte_t *pte,
743 pmd_t *pmd,
744 pmd_t orig_pmd,
745 struct vm_area_struct *vma,
746 struct list_head *compound_pagelist)
747{
748 spinlock_t *pmd_ptl;
749
750 /*
751 * Re-establish the PMD to point to the original page table
752 * entry. Restoring PMD needs to be done prior to releasing
753 * pages. Since pages are still isolated and locked here,
754 * acquiring anon_vma_lock_write is unnecessary.
755 */
756 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
757 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
758 spin_unlock(pmd_ptl);
759 /*
760 * Release both raw and compound pages isolated
761 * in __collapse_huge_page_isolate.
762 */
763 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
764}
765
766/*
767 * __collapse_huge_page_copy - attempts to copy memory contents from raw
768 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
769 * otherwise restores the original page table and releases isolated raw pages.
770 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
771 *
772 * @pte: starting of the PTEs to copy from
773 * @page: the new hugepage to copy contents to
774 * @pmd: pointer to the new hugepage's PMD
775 * @orig_pmd: the original raw pages' PMD
776 * @vma: the original raw pages' virtual memory area
777 * @address: starting address to copy
778 * @ptl: lock on raw pages' PTEs
779 * @compound_pagelist: list that stores compound pages
780 */
781static int __collapse_huge_page_copy(pte_t *pte,
782 struct page *page,
783 pmd_t *pmd,
784 pmd_t orig_pmd,
785 struct vm_area_struct *vma,
786 unsigned long address,
787 spinlock_t *ptl,
788 struct list_head *compound_pagelist)
789{
790 struct page *src_page;
791 pte_t *_pte;
792 pte_t pteval;
793 unsigned long _address;
794 int result = SCAN_SUCCEED;
795
796 /*
797 * Copying pages' contents is subject to memory poison at any iteration.
798 */
799 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
800 _pte++, page++, _address += PAGE_SIZE) {
801 pteval = *_pte;
802 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
803 clear_user_highpage(page, _address);
804 continue;
805 }
806 src_page = pte_page(pteval);
807 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
808 result = SCAN_COPY_MC;
809 break;
810 }
811 }
812
813 if (likely(result == SCAN_SUCCEED))
814 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
815 compound_pagelist);
816 else
817 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
818 compound_pagelist);
819
820 return result;
821}
822
b46e756f
KS
823static void khugepaged_alloc_sleep(void)
824{
825 DEFINE_WAIT(wait);
826
827 add_wait_queue(&khugepaged_wait, &wait);
f5d39b02
PZ
828 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
829 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
b46e756f
KS
830 remove_wait_queue(&khugepaged_wait, &wait);
831}
832
34d6b470 833struct collapse_control khugepaged_collapse_control = {
d8ea7cc8 834 .is_khugepaged = true,
34d6b470 835};
b46e756f 836
7d2c4385 837static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
b46e756f
KS
838{
839 int i;
840
841 /*
a5f5f91d 842 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
843 * allocate memory locally.
844 */
202e35db 845 if (!node_reclaim_enabled())
b46e756f
KS
846 return false;
847
848 /* If there is a count for this node already, it must be acceptable */
34d6b470 849 if (cc->node_load[nid])
b46e756f
KS
850 return false;
851
852 for (i = 0; i < MAX_NUMNODES; i++) {
34d6b470 853 if (!cc->node_load[i])
b46e756f 854 continue;
a55c7454 855 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
856 return true;
857 }
858 return false;
859}
860
1064026b
YS
861#define khugepaged_defrag() \
862 (transparent_hugepage_flags & \
863 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
864
b46e756f
KS
865/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
866static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
867{
25160354 868 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
869}
870
871#ifdef CONFIG_NUMA
7d2c4385 872static int hpage_collapse_find_target_node(struct collapse_control *cc)
b46e756f 873{
b46e756f
KS
874 int nid, target_node = 0, max_value = 0;
875
876 /* find first node with max normal pages hit */
877 for (nid = 0; nid < MAX_NUMNODES; nid++)
34d6b470
ZK
878 if (cc->node_load[nid] > max_value) {
879 max_value = cc->node_load[nid];
b46e756f
KS
880 target_node = nid;
881 }
882
e031ff96
YS
883 for_each_online_node(nid) {
884 if (max_value == cc->node_load[nid])
885 node_set(nid, cc->alloc_nmask);
886 }
b46e756f 887
b46e756f
KS
888 return target_node;
889}
c6a7f445 890#else
7d2c4385 891static int hpage_collapse_find_target_node(struct collapse_control *cc)
b46e756f 892{
c6a7f445 893 return 0;
b46e756f 894}
c6a7f445 895#endif
b46e756f 896
e031ff96
YS
897static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
898 nodemask_t *nmask)
b46e756f 899{
e031ff96 900 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
b46e756f
KS
901 if (unlikely(!*hpage)) {
902 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
9710a78a 903 return false;
b46e756f
KS
904 }
905
906 prep_transhuge_page(*hpage);
907 count_vm_event(THP_COLLAPSE_ALLOC);
b46e756f
KS
908 return true;
909}
910
b46e756f 911/*
c1e8d7c6
ML
912 * If mmap_lock temporarily dropped, revalidate vma
913 * before taking mmap_lock.
50ad2f24 914 * Returns enum scan_result value.
b46e756f
KS
915 */
916
c131f751 917static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
34488399 918 bool expect_anon,
a7f4e6e4
ZK
919 struct vm_area_struct **vmap,
920 struct collapse_control *cc)
b46e756f
KS
921{
922 struct vm_area_struct *vma;
b46e756f 923
7d2c4385 924 if (unlikely(hpage_collapse_test_exit(mm)))
b46e756f
KS
925 return SCAN_ANY_PROCESS;
926
c131f751 927 *vmap = vma = find_vma(mm, address);
b46e756f
KS
928 if (!vma)
929 return SCAN_VMA_NULL;
930
4fa6893f 931 if (!transhuge_vma_suitable(vma, address))
b46e756f 932 return SCAN_ADDRESS_RANGE;
a7f4e6e4
ZK
933 if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
934 cc->is_khugepaged))
b46e756f 935 return SCAN_VMA_CHECK;
f707fa49
YS
936 /*
937 * Anon VMA expected, the address may be unmapped then
938 * remapped to file after khugepaged reaquired the mmap_lock.
939 *
940 * hugepage_vma_check may return true for qualified file
941 * vmas.
942 */
34488399
ZK
943 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
944 return SCAN_PAGE_ANON;
50ad2f24 945 return SCAN_SUCCEED;
b46e756f
KS
946}
947
edb5d0cf
ZK
948/*
949 * See pmd_trans_unstable() for how the result may change out from
950 * underneath us, even if we hold mmap_lock in read.
951 */
50722804
ZK
952static int find_pmd_or_thp_or_none(struct mm_struct *mm,
953 unsigned long address,
954 pmd_t **pmd)
955{
956 pmd_t pmde;
957
958 *pmd = mm_find_pmd(mm, address);
959 if (!*pmd)
960 return SCAN_PMD_NULL;
961
dab6e717 962 pmde = pmdp_get_lockless(*pmd);
50722804
ZK
963
964#ifdef CONFIG_TRANSPARENT_HUGEPAGE
965 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
966 barrier();
967#endif
34488399
ZK
968 if (pmd_none(pmde))
969 return SCAN_PMD_NONE;
edb5d0cf
ZK
970 if (!pmd_present(pmde))
971 return SCAN_PMD_NULL;
50722804
ZK
972 if (pmd_trans_huge(pmde))
973 return SCAN_PMD_MAPPED;
edb5d0cf
ZK
974 if (pmd_devmap(pmde))
975 return SCAN_PMD_NULL;
50722804
ZK
976 if (pmd_bad(pmde))
977 return SCAN_PMD_NULL;
978 return SCAN_SUCCEED;
979}
980
981static int check_pmd_still_valid(struct mm_struct *mm,
982 unsigned long address,
983 pmd_t *pmd)
984{
985 pmd_t *new_pmd;
986 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
987
988 if (result != SCAN_SUCCEED)
989 return result;
990 if (new_pmd != pmd)
991 return SCAN_FAIL;
992 return SCAN_SUCCEED;
b46e756f
KS
993}
994
995/*
996 * Bring missing pages in from swap, to complete THP collapse.
7d2c4385 997 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
b46e756f 998 *
4d928e20
ML
999 * Called and returns without pte mapped or spinlocks held.
1000 * Note that if false is returned, mmap_lock will be released.
b46e756f
KS
1001 */
1002
50ad2f24
ZK
1003static int __collapse_huge_page_swapin(struct mm_struct *mm,
1004 struct vm_area_struct *vma,
1005 unsigned long haddr, pmd_t *pmd,
1006 int referenced)
b46e756f 1007{
2b740303
SJ
1008 int swapped_in = 0;
1009 vm_fault_t ret = 0;
2b635dd3
WD
1010 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1011
1012 for (address = haddr; address < end; address += PAGE_SIZE) {
1013 struct vm_fault vmf = {
1014 .vma = vma,
1015 .address = address,
1016 .pgoff = linear_page_index(vma, haddr),
1017 .flags = FAULT_FLAG_ALLOW_RETRY,
1018 .pmd = pmd,
1019 };
1020
1021 vmf.pte = pte_offset_map(pmd, address);
2994302b 1022 vmf.orig_pte = *vmf.pte;
2b635dd3
WD
1023 if (!is_swap_pte(vmf.orig_pte)) {
1024 pte_unmap(vmf.pte);
b46e756f 1025 continue;
2b635dd3 1026 }
2994302b 1027 ret = do_swap_page(&vmf);
0db501f7 1028
4d928e20
ML
1029 /*
1030 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1031 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1032 * we do not retry here and swap entry will remain in pagetable
1033 * resulting in later failure.
1034 */
b46e756f 1035 if (ret & VM_FAULT_RETRY) {
4d928e20 1036 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
50ad2f24
ZK
1037 /* Likely, but not guaranteed, that page lock failed */
1038 return SCAN_PAGE_LOCK;
b46e756f
KS
1039 }
1040 if (ret & VM_FAULT_ERROR) {
4d928e20 1041 mmap_read_unlock(mm);
0db501f7 1042 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
50ad2f24 1043 return SCAN_FAIL;
b46e756f 1044 }
4d928e20 1045 swapped_in++;
b46e756f 1046 }
ae2c5d80
KS
1047
1048 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1049 if (swapped_in)
1050 lru_add_drain();
1051
0db501f7 1052 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
50ad2f24 1053 return SCAN_SUCCEED;
b46e756f
KS
1054}
1055
9710a78a
ZK
1056static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1057 struct collapse_control *cc)
1058{
7d8faaf1 1059 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
e031ff96 1060 GFP_TRANSHUGE);
7d2c4385 1061 int node = hpage_collapse_find_target_node(cc);
94c02ad7 1062 struct folio *folio;
9710a78a 1063
e031ff96 1064 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
9710a78a 1065 return SCAN_ALLOC_HUGE_PAGE_FAIL;
94c02ad7
PX
1066
1067 folio = page_folio(*hpage);
1068 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1069 folio_put(folio);
1070 *hpage = NULL;
9710a78a 1071 return SCAN_CGROUP_CHARGE_FAIL;
94c02ad7 1072 }
9710a78a 1073 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
94c02ad7 1074
9710a78a
ZK
1075 return SCAN_SUCCEED;
1076}
1077
50ad2f24
ZK
1078static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1079 int referenced, int unmapped,
1080 struct collapse_control *cc)
b46e756f 1081{
5503fbf2 1082 LIST_HEAD(compound_pagelist);
b46e756f
KS
1083 pmd_t *pmd, _pmd;
1084 pte_t *pte;
1085 pgtable_t pgtable;
50ad2f24 1086 struct page *hpage;
b46e756f 1087 spinlock_t *pmd_ptl, *pte_ptl;
50ad2f24 1088 int result = SCAN_FAIL;
c131f751 1089 struct vm_area_struct *vma;
ac46d4f3 1090 struct mmu_notifier_range range;
b46e756f
KS
1091
1092 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1093
988ddb71 1094 /*
c1e8d7c6 1095 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 1096 * The allocation can take potentially a long time if it involves
c1e8d7c6 1097 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
1098 * that. We will recheck the vma after taking it again in write mode.
1099 */
d8ed45c5 1100 mmap_read_unlock(mm);
b46e756f 1101
50ad2f24 1102 result = alloc_charge_hpage(&hpage, mm, cc);
9710a78a 1103 if (result != SCAN_SUCCEED)
b46e756f 1104 goto out_nolock;
b46e756f 1105
d8ed45c5 1106 mmap_read_lock(mm);
34488399 1107 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
50ad2f24 1108 if (result != SCAN_SUCCEED) {
d8ed45c5 1109 mmap_read_unlock(mm);
b46e756f
KS
1110 goto out_nolock;
1111 }
1112
50722804
ZK
1113 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1114 if (result != SCAN_SUCCEED) {
d8ed45c5 1115 mmap_read_unlock(mm);
b46e756f
KS
1116 goto out_nolock;
1117 }
1118
50ad2f24
ZK
1119 if (unmapped) {
1120 /*
1121 * __collapse_huge_page_swapin will return with mmap_lock
1122 * released when it fails. So we jump out_nolock directly in
1123 * that case. Continuing to collapse causes inconsistency.
1124 */
1125 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1126 referenced);
1127 if (result != SCAN_SUCCEED)
1128 goto out_nolock;
b46e756f
KS
1129 }
1130
d8ed45c5 1131 mmap_read_unlock(mm);
b46e756f
KS
1132 /*
1133 * Prevent all access to pagetables with the exception of
1134 * gup_fast later handled by the ptep_clear_flush and the VM
1135 * handled by the anon_vma lock + PG_lock.
1136 */
d8ed45c5 1137 mmap_write_lock(mm);
34488399 1138 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
50ad2f24 1139 if (result != SCAN_SUCCEED)
18d24a7c 1140 goto out_up_write;
b46e756f 1141 /* check if the pmd is still valid */
50722804
ZK
1142 result = check_pmd_still_valid(mm, address, pmd);
1143 if (result != SCAN_SUCCEED)
18d24a7c 1144 goto out_up_write;
b46e756f 1145
55fd6fcc 1146 vma_start_write(vma);
b46e756f
KS
1147 anon_vma_lock_write(vma->anon_vma);
1148
7d4a8be0
AP
1149 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1150 address + HPAGE_PMD_SIZE);
ac46d4f3 1151 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
1152
1153 pte = pte_offset_map(pmd, address);
1154 pte_ptl = pte_lockptr(mm, pmd);
1155
b46e756f
KS
1156 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1157 /*
70cbc3cc
YS
1158 * This removes any huge TLB entry from the CPU so we won't allow
1159 * huge and small TLB entries for the same virtual address to
1160 * avoid the risk of CPU bugs in that area.
1161 *
1162 * Parallel fast GUP is fine since fast GUP will back off when
1163 * it detects PMD is changed.
b46e756f
KS
1164 */
1165 _pmd = pmdp_collapse_flush(vma, address, pmd);
1166 spin_unlock(pmd_ptl);
ac46d4f3 1167 mmu_notifier_invalidate_range_end(&range);
2ba99c5e 1168 tlb_remove_table_sync_one();
b46e756f
KS
1169
1170 spin_lock(pte_ptl);
d8ea7cc8 1171 result = __collapse_huge_page_isolate(vma, address, pte, cc,
50ad2f24 1172 &compound_pagelist);
b46e756f
KS
1173 spin_unlock(pte_ptl);
1174
50ad2f24 1175 if (unlikely(result != SCAN_SUCCEED)) {
b46e756f
KS
1176 pte_unmap(pte);
1177 spin_lock(pmd_ptl);
1178 BUG_ON(!pmd_none(*pmd));
1179 /*
1180 * We can only use set_pmd_at when establishing
1181 * hugepmds and never for establishing regular pmds that
1182 * points to regular pagetables. Use pmd_populate for that
1183 */
1184 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1185 spin_unlock(pmd_ptl);
1186 anon_vma_unlock_write(vma->anon_vma);
18d24a7c 1187 goto out_up_write;
b46e756f
KS
1188 }
1189
1190 /*
1191 * All pages are isolated and locked so anon_vma rmap
1192 * can't run anymore.
1193 */
1194 anon_vma_unlock_write(vma->anon_vma);
1195
98c76c9f
JY
1196 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1197 vma, address, pte_ptl,
1198 &compound_pagelist);
b46e756f 1199 pte_unmap(pte);
98c76c9f
JY
1200 if (unlikely(result != SCAN_SUCCEED))
1201 goto out_up_write;
1202
588d01f9
ML
1203 /*
1204 * spin_lock() below is not the equivalent of smp_wmb(), but
1205 * the smp_wmb() inside __SetPageUptodate() can be reused to
1206 * avoid the copy_huge_page writes to become visible after
1207 * the set_pmd_at() write.
1208 */
50ad2f24 1209 __SetPageUptodate(hpage);
b46e756f
KS
1210 pgtable = pmd_pgtable(_pmd);
1211
50ad2f24 1212 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
f55e1014 1213 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f 1214
b46e756f
KS
1215 spin_lock(pmd_ptl);
1216 BUG_ON(!pmd_none(*pmd));
50ad2f24
ZK
1217 page_add_new_anon_rmap(hpage, vma, address);
1218 lru_cache_add_inactive_or_unevictable(hpage, vma);
b46e756f
KS
1219 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1220 set_pmd_at(mm, address, pmd, _pmd);
1221 update_mmu_cache_pmd(vma, address, pmd);
1222 spin_unlock(pmd_ptl);
1223
50ad2f24 1224 hpage = NULL;
b46e756f 1225
b46e756f
KS
1226 result = SCAN_SUCCEED;
1227out_up_write:
d8ed45c5 1228 mmap_write_unlock(mm);
b46e756f 1229out_nolock:
7cb1d7ef 1230 if (hpage)
50ad2f24 1231 put_page(hpage);
50ad2f24
ZK
1232 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1233 return result;
b46e756f
KS
1234}
1235
7d2c4385
ZK
1236static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1237 struct vm_area_struct *vma,
1238 unsigned long address, bool *mmap_locked,
1239 struct collapse_control *cc)
b46e756f
KS
1240{
1241 pmd_t *pmd;
1242 pte_t *pte, *_pte;
50ad2f24 1243 int result = SCAN_FAIL, referenced = 0;
71a2c112 1244 int none_or_zero = 0, shared = 0;
b46e756f
KS
1245 struct page *page = NULL;
1246 unsigned long _address;
1247 spinlock_t *ptl;
1248 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1249 bool writable = false;
b46e756f
KS
1250
1251 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1252
50722804
ZK
1253 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1254 if (result != SCAN_SUCCEED)
b46e756f 1255 goto out;
b46e756f 1256
34d6b470 1257 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 1258 nodes_clear(cc->alloc_nmask);
b46e756f 1259 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
36ee2c78 1260 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
1261 _pte++, _address += PAGE_SIZE) {
1262 pte_t pteval = *_pte;
1263 if (is_swap_pte(pteval)) {
d8ea7cc8
ZK
1264 ++unmapped;
1265 if (!cc->is_khugepaged ||
1266 unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1267 /*
1268 * Always be strict with uffd-wp
1269 * enabled swap entries. Please see
1270 * comment below for pte_uffd_wp().
1271 */
2bad466c 1272 if (pte_swp_uffd_wp_any(pteval)) {
e1e267c7
PX
1273 result = SCAN_PTE_UFFD_WP;
1274 goto out_unmap;
1275 }
b46e756f
KS
1276 continue;
1277 } else {
1278 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1279 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
b46e756f
KS
1280 goto out_unmap;
1281 }
1282 }
1283 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
d8ea7cc8 1284 ++none_or_zero;
b46e756f 1285 if (!userfaultfd_armed(vma) &&
d8ea7cc8
ZK
1286 (!cc->is_khugepaged ||
1287 none_or_zero <= khugepaged_max_ptes_none)) {
b46e756f
KS
1288 continue;
1289 } else {
1290 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1291 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
1292 goto out_unmap;
1293 }
1294 }
e1e267c7
PX
1295 if (pte_uffd_wp(pteval)) {
1296 /*
1297 * Don't collapse the page if any of the small
1298 * PTEs are armed with uffd write protection.
1299 * Here we can also mark the new huge pmd as
1300 * write protected if any of the small ones is
8958b249 1301 * marked but that could bring unknown
e1e267c7
PX
1302 * userfault messages that falls outside of
1303 * the registered range. So, just be simple.
1304 */
1305 result = SCAN_PTE_UFFD_WP;
1306 goto out_unmap;
1307 }
b46e756f
KS
1308 if (pte_write(pteval))
1309 writable = true;
1310
1311 page = vm_normal_page(vma, _address, pteval);
3218f871 1312 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
1313 result = SCAN_PAGE_NULL;
1314 goto out_unmap;
1315 }
1316
d8ea7cc8
ZK
1317 if (page_mapcount(page) > 1) {
1318 ++shared;
1319 if (cc->is_khugepaged &&
1320 shared > khugepaged_max_ptes_shared) {
1321 result = SCAN_EXCEED_SHARED_PTE;
1322 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1323 goto out_unmap;
1324 }
71a2c112
KS
1325 }
1326
5503fbf2 1327 page = compound_head(page);
b46e756f
KS
1328
1329 /*
1330 * Record which node the original page is from and save this
34d6b470 1331 * information to cc->node_load[].
0b8f0d87 1332 * Khugepaged will allocate hugepage from the node has the max
b46e756f
KS
1333 * hit record.
1334 */
1335 node = page_to_nid(page);
7d2c4385 1336 if (hpage_collapse_scan_abort(node, cc)) {
b46e756f
KS
1337 result = SCAN_SCAN_ABORT;
1338 goto out_unmap;
1339 }
34d6b470 1340 cc->node_load[node]++;
b46e756f
KS
1341 if (!PageLRU(page)) {
1342 result = SCAN_PAGE_LRU;
1343 goto out_unmap;
1344 }
1345 if (PageLocked(page)) {
1346 result = SCAN_PAGE_LOCK;
1347 goto out_unmap;
1348 }
1349 if (!PageAnon(page)) {
1350 result = SCAN_PAGE_ANON;
1351 goto out_unmap;
1352 }
1353
1354 /*
9445689f
KS
1355 * Check if the page has any GUP (or other external) pins.
1356 *
cb67f428
HD
1357 * Here the check may be racy:
1358 * it may see total_mapcount > refcount in some cases?
9445689f
KS
1359 * But such case is ephemeral we could always retry collapse
1360 * later. However it may report false positive if the page
1361 * has excessive GUP pins (i.e. 512). Anyway the same check
1362 * will be done again later the risk seems low.
b46e756f 1363 */
9445689f 1364 if (!is_refcount_suitable(page)) {
b46e756f
KS
1365 result = SCAN_PAGE_COUNT;
1366 goto out_unmap;
1367 }
d8ea7cc8
ZK
1368
1369 /*
1370 * If collapse was initiated by khugepaged, check that there is
1371 * enough young pte to justify collapsing the page
1372 */
1373 if (cc->is_khugepaged &&
1374 (pte_young(pteval) || page_is_young(page) ||
1375 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1376 address)))
0db501f7 1377 referenced++;
b46e756f 1378 }
ffe945e6 1379 if (!writable) {
b46e756f 1380 result = SCAN_PAGE_RO;
d8ea7cc8
ZK
1381 } else if (cc->is_khugepaged &&
1382 (!referenced ||
1383 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
ffe945e6
KS
1384 result = SCAN_LACK_REFERENCED_PAGE;
1385 } else {
1386 result = SCAN_SUCCEED;
b46e756f
KS
1387 }
1388out_unmap:
1389 pte_unmap_unlock(pte, ptl);
50ad2f24
ZK
1390 if (result == SCAN_SUCCEED) {
1391 result = collapse_huge_page(mm, address, referenced,
1392 unmapped, cc);
c1e8d7c6 1393 /* collapse_huge_page will return with the mmap_lock released */
50ad2f24 1394 *mmap_locked = false;
b46e756f
KS
1395 }
1396out:
1397 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1398 none_or_zero, result, unmapped);
50ad2f24 1399 return result;
b46e756f
KS
1400}
1401
b26e2701 1402static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
b46e756f 1403{
b26e2701
QZ
1404 struct mm_slot *slot = &mm_slot->slot;
1405 struct mm_struct *mm = slot->mm;
b46e756f 1406
35f3aa39 1407 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f 1408
7d2c4385 1409 if (hpage_collapse_test_exit(mm)) {
b46e756f 1410 /* free mm_slot */
b26e2701
QZ
1411 hash_del(&slot->hash);
1412 list_del(&slot->mm_node);
b46e756f
KS
1413
1414 /*
1415 * Not strictly needed because the mm exited already.
1416 *
1417 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1418 */
1419
1420 /* khugepaged_mm_lock actually not necessary for the below */
b26e2701 1421 mm_slot_free(mm_slot_cache, mm_slot);
b46e756f
KS
1422 mmdrop(mm);
1423 }
1424}
1425
396bcc52 1426#ifdef CONFIG_SHMEM
27e1f827
SL
1427/*
1428 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1429 * khugepaged should try to collapse the page table.
34488399
ZK
1430 *
1431 * Note that following race exists:
1432 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1433 * emptying the A's ->pte_mapped_thp[] array.
1434 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1435 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1436 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1437 * ->pte-mapped_thp[] array.
1438 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1439 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1440 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1441 * Thus, it's possible the same address is added multiple times for the same
1442 * mm_struct. Should this happen, we'll simply attempt
1443 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1444 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1445 * attempts will return quickly (without grabbing any additional locks) when
1446 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1447 * check, and since this is a rare occurrence, the cost of preventing this
1448 * "multiple-add" is thought to be more expensive than just handling it, should
1449 * it occur.
27e1f827 1450 */
58ac9a89 1451static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
081c3256 1452 unsigned long addr)
27e1f827 1453{
b26e2701
QZ
1454 struct khugepaged_mm_slot *mm_slot;
1455 struct mm_slot *slot;
58ac9a89 1456 bool ret = false;
27e1f827
SL
1457
1458 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1459
1460 spin_lock(&khugepaged_mm_lock);
b26e2701
QZ
1461 slot = mm_slot_lookup(mm_slots_hash, mm);
1462 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
58ac9a89 1463 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
27e1f827 1464 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
58ac9a89
ZK
1465 ret = true;
1466 }
27e1f827 1467 spin_unlock(&khugepaged_mm_lock);
58ac9a89 1468 return ret;
27e1f827
SL
1469}
1470
34488399
ZK
1471/* hpage must be locked, and mmap_lock must be held in write */
1472static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1473 pmd_t *pmdp, struct page *hpage)
1474{
1475 struct vm_fault vmf = {
1476 .vma = vma,
1477 .address = addr,
1478 .flags = 0,
1479 .pmd = pmdp,
1480 };
1481
1482 VM_BUG_ON(!PageTransHuge(hpage));
1483 mmap_assert_write_locked(vma->vm_mm);
1484
1485 if (do_set_pmd(&vmf, hpage))
1486 return SCAN_FAIL;
1487
1488 get_page(hpage);
1489 return SCAN_SUCCEED;
27e1f827
SL
1490}
1491
8d3c106e
JH
1492/*
1493 * A note about locking:
1494 * Trying to take the page table spinlocks would be useless here because those
1495 * are only used to synchronize:
1496 *
1497 * - modifying terminal entries (ones that point to a data page, not to another
1498 * page table)
1499 * - installing *new* non-terminal entries
1500 *
1501 * Instead, we need roughly the same kind of protection as free_pgtables() or
1502 * mm_take_all_locks() (but only for a single VMA):
1503 * The mmap lock together with this VMA's rmap locks covers all paths towards
1504 * the page table entries we're messing with here, except for hardware page
1505 * table walks and lockless_pages_from_mm().
1506 */
e59a47b8
PT
1507static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1508 unsigned long addr, pmd_t *pmdp)
1509{
e59a47b8 1510 pmd_t pmd;
f268f6cf 1511 struct mmu_notifier_range range;
e59a47b8 1512
80110bbf 1513 mmap_assert_write_locked(mm);
8d3c106e
JH
1514 if (vma->vm_file)
1515 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1516 /*
1517 * All anon_vmas attached to the VMA have the same root and are
1518 * therefore locked by the same lock.
1519 */
1520 if (vma->anon_vma)
1521 lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1522
7d4a8be0 1523 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
f268f6cf
JH
1524 addr + HPAGE_PMD_SIZE);
1525 mmu_notifier_invalidate_range_start(&range);
e59a47b8 1526 pmd = pmdp_collapse_flush(vma, addr, pmdp);
2ba99c5e 1527 tlb_remove_table_sync_one();
f268f6cf 1528 mmu_notifier_invalidate_range_end(&range);
e59a47b8 1529 mm_dec_nr_ptes(mm);
80110bbf 1530 page_table_check_pte_clear_range(mm, addr, pmd);
e59a47b8
PT
1531 pte_free(mm, pmd_pgtable(pmd));
1532}
1533
27e1f827 1534/**
336e6b53
AS
1535 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1536 * address haddr.
1537 *
1538 * @mm: process address space where collapse happens
1539 * @addr: THP collapse address
34488399 1540 * @install_pmd: If a huge PMD should be installed
27e1f827
SL
1541 *
1542 * This function checks whether all the PTEs in the PMD are pointing to the
1543 * right THP. If so, retract the page table so the THP can refault in with
34488399 1544 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
27e1f827 1545 */
34488399
ZK
1546int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1547 bool install_pmd)
27e1f827
SL
1548{
1549 unsigned long haddr = addr & HPAGE_PMD_MASK;
94d815b2 1550 struct vm_area_struct *vma = vma_lookup(mm, haddr);
119a5fc1 1551 struct page *hpage;
27e1f827 1552 pte_t *start_pte, *pte;
e59a47b8 1553 pmd_t *pmd;
27e1f827 1554 spinlock_t *ptl;
58ac9a89 1555 int count = 0, result = SCAN_FAIL;
27e1f827
SL
1556 int i;
1557
58ac9a89
ZK
1558 mmap_assert_write_locked(mm);
1559
34488399 1560 /* Fast check before locking page if already PMD-mapped */
58ac9a89 1561 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
34488399
ZK
1562 if (result == SCAN_PMD_MAPPED)
1563 return result;
58ac9a89 1564
27e1f827 1565 if (!vma || !vma->vm_file ||
fef792a4 1566 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
34488399 1567 return SCAN_VMA_CHECK;
27e1f827
SL
1568
1569 /*
a7f4e6e4
ZK
1570 * If we are here, we've succeeded in replacing all the native pages
1571 * in the page cache with a single hugepage. If a mm were to fault-in
1572 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1573 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1574 * analogously elide sysfs THP settings here.
27e1f827 1575 */
a7f4e6e4 1576 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
34488399 1577 return SCAN_VMA_CHECK;
27e1f827 1578
deb4c93a
PX
1579 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1580 if (userfaultfd_wp(vma))
34488399 1581 return SCAN_PTE_UFFD_WP;
deb4c93a 1582
119a5fc1
HD
1583 hpage = find_lock_page(vma->vm_file->f_mapping,
1584 linear_page_index(vma, haddr));
1585 if (!hpage)
34488399 1586 return SCAN_PAGE_NULL;
119a5fc1 1587
34488399
ZK
1588 if (!PageHead(hpage)) {
1589 result = SCAN_FAIL;
119a5fc1 1590 goto drop_hpage;
34488399 1591 }
119a5fc1 1592
34488399
ZK
1593 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1594 result = SCAN_PAGE_COMPOUND;
119a5fc1 1595 goto drop_hpage;
34488399 1596 }
119a5fc1 1597
34488399
ZK
1598 switch (result) {
1599 case SCAN_SUCCEED:
1600 break;
1601 case SCAN_PMD_NONE:
1602 /*
1603 * In MADV_COLLAPSE path, possible race with khugepaged where
1604 * all pte entries have been removed and pmd cleared. If so,
1605 * skip all the pte checks and just update the pmd mapping.
1606 */
1607 goto maybe_install_pmd;
1608 default:
119a5fc1 1609 goto drop_hpage;
34488399 1610 }
27e1f827 1611
55fd6fcc
SB
1612 /* Lock the vma before taking i_mmap and page table locks */
1613 vma_start_write(vma);
1614
8d3c106e
JH
1615 /*
1616 * We need to lock the mapping so that from here on, only GUP-fast and
1617 * hardware page walks can access the parts of the page tables that
1618 * we're operating on.
1619 * See collapse_and_free_pmd().
1620 */
1621 i_mmap_lock_write(vma->vm_file->f_mapping);
1622
1623 /*
1624 * This spinlock should be unnecessary: Nobody else should be accessing
1625 * the page tables under spinlock protection here, only
1626 * lockless_pages_from_mm() and the hardware page walker can access page
1627 * tables while all the high-level locks are held in write mode.
1628 */
27e1f827 1629 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
34488399 1630 result = SCAN_FAIL;
27e1f827
SL
1631
1632 /* step 1: check all mapped PTEs are to the right huge page */
1633 for (i = 0, addr = haddr, pte = start_pte;
1634 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1635 struct page *page;
1636
1637 /* empty pte, skip */
1638 if (pte_none(*pte))
1639 continue;
1640
1641 /* page swapped out, abort */
34488399
ZK
1642 if (!pte_present(*pte)) {
1643 result = SCAN_PTE_NON_PRESENT;
27e1f827 1644 goto abort;
34488399 1645 }
27e1f827
SL
1646
1647 page = vm_normal_page(vma, addr, *pte);
3218f871
AS
1648 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1649 page = NULL;
27e1f827 1650 /*
119a5fc1
HD
1651 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1652 * page table, but the new page will not be a subpage of hpage.
27e1f827 1653 */
119a5fc1 1654 if (hpage + i != page)
27e1f827
SL
1655 goto abort;
1656 count++;
1657 }
1658
1659 /* step 2: adjust rmap */
1660 for (i = 0, addr = haddr, pte = start_pte;
1661 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1662 struct page *page;
1663
1664 if (pte_none(*pte))
1665 continue;
1666 page = vm_normal_page(vma, addr, *pte);
3218f871
AS
1667 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1668 goto abort;
cea86fe2 1669 page_remove_rmap(page, vma, false);
27e1f827
SL
1670 }
1671
1672 pte_unmap_unlock(start_pte, ptl);
1673
1674 /* step 3: set proper refcount and mm_counters. */
119a5fc1 1675 if (count) {
27e1f827
SL
1676 page_ref_sub(hpage, count);
1677 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1678 }
1679
34488399 1680 /* step 4: remove pte entries */
ab0c3f12
HD
1681 /* we make no change to anon, but protect concurrent anon page lookup */
1682 if (vma->anon_vma)
1683 anon_vma_lock_write(vma->anon_vma);
1684
e59a47b8 1685 collapse_and_free_pmd(mm, vma, haddr, pmd);
34488399 1686
ab0c3f12
HD
1687 if (vma->anon_vma)
1688 anon_vma_unlock_write(vma->anon_vma);
8d3c106e
JH
1689 i_mmap_unlock_write(vma->vm_file->f_mapping);
1690
34488399
ZK
1691maybe_install_pmd:
1692 /* step 5: install pmd entry */
1693 result = install_pmd
1694 ? set_huge_pmd(vma, haddr, pmd, hpage)
1695 : SCAN_SUCCEED;
1696
119a5fc1
HD
1697drop_hpage:
1698 unlock_page(hpage);
1699 put_page(hpage);
34488399 1700 return result;
27e1f827
SL
1701
1702abort:
1703 pte_unmap_unlock(start_pte, ptl);
8d3c106e 1704 i_mmap_unlock_write(vma->vm_file->f_mapping);
119a5fc1 1705 goto drop_hpage;
27e1f827
SL
1706}
1707
b26e2701 1708static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
27e1f827 1709{
b26e2701
QZ
1710 struct mm_slot *slot = &mm_slot->slot;
1711 struct mm_struct *mm = slot->mm;
27e1f827
SL
1712 int i;
1713
1714 if (likely(mm_slot->nr_pte_mapped_thp == 0))
0edf61e5 1715 return;
27e1f827 1716
d8ed45c5 1717 if (!mmap_write_trylock(mm))
0edf61e5 1718 return;
27e1f827 1719
7d2c4385 1720 if (unlikely(hpage_collapse_test_exit(mm)))
27e1f827
SL
1721 goto out;
1722
1723 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
34488399 1724 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
27e1f827
SL
1725
1726out:
1727 mm_slot->nr_pte_mapped_thp = 0;
d8ed45c5 1728 mmap_write_unlock(mm);
27e1f827
SL
1729}
1730
34488399
ZK
1731static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1732 struct mm_struct *target_mm,
1733 unsigned long target_addr, struct page *hpage,
1734 struct collapse_control *cc)
f3f0e1d2
KS
1735{
1736 struct vm_area_struct *vma;
34488399 1737 int target_result = SCAN_FAIL;
f3f0e1d2
KS
1738
1739 i_mmap_lock_write(mapping);
1740 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
34488399
ZK
1741 int result = SCAN_FAIL;
1742 struct mm_struct *mm = NULL;
1743 unsigned long addr = 0;
1744 pmd_t *pmd;
1745 bool is_target = false;
1746
27e1f827
SL
1747 /*
1748 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1749 * got written to. These VMAs are likely not worth investing
3e4e28c5 1750 * mmap_write_lock(mm) as PMD-mapping is likely to be split
27e1f827
SL
1751 * later.
1752 *
36ee2c78 1753 * Note that vma->anon_vma check is racy: it can be set up after
c1e8d7c6 1754 * the check but before we took mmap_lock by the fault path.
27e1f827
SL
1755 * But page lock would prevent establishing any new ptes of the
1756 * page, so we are safe.
1757 *
1758 * An alternative would be drop the check, but check that page
1759 * table is clear before calling pmdp_collapse_flush() under
1760 * ptl. It has higher chance to recover THP for the VMA, but
8d3c106e
JH
1761 * has higher cost too. It would also probably require locking
1762 * the anon_vma.
27e1f827 1763 */
023f47a8 1764 if (READ_ONCE(vma->anon_vma)) {
34488399
ZK
1765 result = SCAN_PAGE_ANON;
1766 goto next;
1767 }
f3f0e1d2 1768 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
34488399
ZK
1769 if (addr & ~HPAGE_PMD_MASK ||
1770 vma->vm_end < addr + HPAGE_PMD_SIZE) {
1771 result = SCAN_VMA_CHECK;
1772 goto next;
1773 }
18e77600 1774 mm = vma->vm_mm;
34488399
ZK
1775 is_target = mm == target_mm && addr == target_addr;
1776 result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1777 if (result != SCAN_SUCCEED)
1778 goto next;
f3f0e1d2 1779 /*
c1e8d7c6 1780 * We need exclusive mmap_lock to retract page table.
27e1f827
SL
1781 *
1782 * We use trylock due to lock inversion: we need to acquire
c1e8d7c6 1783 * mmap_lock while holding page lock. Fault path does it in
27e1f827 1784 * reverse order. Trylock is a way to avoid deadlock.
34488399
ZK
1785 *
1786 * Also, it's not MADV_COLLAPSE's job to collapse other
1787 * mappings - let khugepaged take care of them later.
f3f0e1d2 1788 */
34488399
ZK
1789 result = SCAN_PTE_MAPPED_HUGEPAGE;
1790 if ((cc->is_khugepaged || is_target) &&
1791 mmap_write_trylock(mm)) {
55fd6fcc
SB
1792 /* trylock for the same lock inversion as above */
1793 if (!vma_try_start_write(vma))
1794 goto unlock_next;
1795
023f47a8
JH
1796 /*
1797 * Re-check whether we have an ->anon_vma, because
1798 * collapse_and_free_pmd() requires that either no
1799 * ->anon_vma exists or the anon_vma is locked.
1800 * We already checked ->anon_vma above, but that check
1801 * is racy because ->anon_vma can be populated under the
1802 * mmap lock in read mode.
1803 */
1804 if (vma->anon_vma) {
1805 result = SCAN_PAGE_ANON;
1806 goto unlock_next;
1807 }
deb4c93a
PX
1808 /*
1809 * When a vma is registered with uffd-wp, we can't
1810 * recycle the pmd pgtable because there can be pte
1811 * markers installed. Skip it only, so the rest mm/vma
1812 * can still have the same file mapped hugely, however
1813 * it'll always mapped in small page size for uffd-wp
1814 * registered ranges.
1815 */
34488399
ZK
1816 if (hpage_collapse_test_exit(mm)) {
1817 result = SCAN_ANY_PROCESS;
1818 goto unlock_next;
1819 }
1820 if (userfaultfd_wp(vma)) {
1821 result = SCAN_PTE_UFFD_WP;
1822 goto unlock_next;
1823 }
1824 collapse_and_free_pmd(mm, vma, addr, pmd);
1825 if (!cc->is_khugepaged && is_target)
1826 result = set_huge_pmd(vma, addr, pmd, hpage);
1827 else
1828 result = SCAN_SUCCEED;
1829
1830unlock_next:
18e77600 1831 mmap_write_unlock(mm);
34488399
ZK
1832 goto next;
1833 }
1834 /*
1835 * Calling context will handle target mm/addr. Otherwise, let
1836 * khugepaged try again later.
1837 */
1838 if (!is_target) {
18e77600 1839 khugepaged_add_pte_mapped_thp(mm, addr);
34488399 1840 continue;
f3f0e1d2 1841 }
34488399
ZK
1842next:
1843 if (is_target)
1844 target_result = result;
f3f0e1d2
KS
1845 }
1846 i_mmap_unlock_write(mapping);
34488399 1847 return target_result;
f3f0e1d2
KS
1848}
1849
1850/**
99cb0dbd 1851 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1852 *
336e6b53 1853 * @mm: process address space where collapse happens
34488399 1854 * @addr: virtual collapse start address
336e6b53
AS
1855 * @file: file that collapse on
1856 * @start: collapse start address
9710a78a 1857 * @cc: collapse context and scratchpad
336e6b53 1858 *
f3f0e1d2 1859 * Basic scheme is simple, details are more complex:
87c460a0 1860 * - allocate and lock a new huge page;
77da9389 1861 * - scan page cache replacing old pages with the new one
99cb0dbd 1862 * + swap/gup in pages if necessary;
f3f0e1d2 1863 * + fill in gaps;
77da9389
MW
1864 * + keep old pages around in case rollback is required;
1865 * - if replacing succeeds:
f3f0e1d2
KS
1866 * + copy data over;
1867 * + free old pages;
87c460a0 1868 * + unlock huge page;
f3f0e1d2
KS
1869 * - if replacing failed;
1870 * + put all pages back and unfreeze them;
77da9389 1871 * + restore gaps in the page cache;
87c460a0 1872 * + unlock and free huge page;
f3f0e1d2 1873 */
34488399
ZK
1874static int collapse_file(struct mm_struct *mm, unsigned long addr,
1875 struct file *file, pgoff_t start,
1876 struct collapse_control *cc)
f3f0e1d2 1877{
579c571e 1878 struct address_space *mapping = file->f_mapping;
50ad2f24 1879 struct page *hpage;
12904d95
JY
1880 struct page *page;
1881 struct page *tmp;
1882 struct folio *folio;
4c9473e8 1883 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
f3f0e1d2 1884 LIST_HEAD(pagelist);
77da9389 1885 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1886 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1887 bool is_shmem = shmem_file(file);
4c9473e8 1888 int nr = 0;
f3f0e1d2 1889
99cb0dbd 1890 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1891 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1892
50ad2f24 1893 result = alloc_charge_hpage(&hpage, mm, cc);
9710a78a 1894 if (result != SCAN_SUCCEED)
f3f0e1d2 1895 goto out;
f3f0e1d2 1896
6b24ca4a
MWO
1897 /*
1898 * Ensure we have slots for all the pages in the range. This is
1899 * almost certainly a no-op because most of the pages must be present
1900 */
95feeabb
HD
1901 do {
1902 xas_lock_irq(&xas);
1903 xas_create_range(&xas);
1904 if (!xas_error(&xas))
1905 break;
1906 xas_unlock_irq(&xas);
1907 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb
HD
1908 result = SCAN_FAIL;
1909 goto out;
1910 }
1911 } while (1);
1912
50ad2f24 1913 __SetPageLocked(hpage);
99cb0dbd 1914 if (is_shmem)
50ad2f24
ZK
1915 __SetPageSwapBacked(hpage);
1916 hpage->index = start;
1917 hpage->mapping = mapping;
f3f0e1d2 1918
f3f0e1d2 1919 /*
50ad2f24 1920 * At this point the hpage is locked and not up-to-date.
87c460a0
HD
1921 * It's safe to insert it into the page cache, because nobody would
1922 * be able to map it or use it in another way until we unlock it.
f3f0e1d2
KS
1923 */
1924
77da9389
MW
1925 xas_set(&xas, start);
1926 for (index = start; index < end; index++) {
12904d95 1927 page = xas_next(&xas);
77da9389
MW
1928
1929 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1930 if (is_shmem) {
1931 if (!page) {
1932 /*
1933 * Stop if extent has been truncated or
1934 * hole-punched, and is now completely
1935 * empty.
1936 */
1937 if (index == start) {
1938 if (!xas_next_entry(&xas, end - 1)) {
1939 result = SCAN_TRUNCATED;
1940 goto xa_locked;
1941 }
1942 xas_set(&xas, index);
1943 }
1944 if (!shmem_charge(mapping->host, 1)) {
1945 result = SCAN_FAIL;
042a3082 1946 goto xa_locked;
701270fa 1947 }
50ad2f24 1948 xas_store(&xas, hpage);
2ce0bdfe
IO
1949 if (xas_error(&xas)) {
1950 /* revert shmem_charge performed
1951 * in the previous condition
1952 */
1953 mapping->nrpages--;
1954 shmem_uncharge(mapping->host, 1);
1955 result = SCAN_STORE_FAILED;
1956 goto xa_locked;
1957 }
99cb0dbd
SL
1958 nr_none++;
1959 continue;
701270fa 1960 }
99cb0dbd
SL
1961
1962 if (xa_is_value(page) || !PageUptodate(page)) {
1963 xas_unlock_irq(&xas);
1964 /* swap in or instantiate fallocated page */
7459c149
MWO
1965 if (shmem_get_folio(mapping->host, index,
1966 &folio, SGP_NOALLOC)) {
99cb0dbd
SL
1967 result = SCAN_FAIL;
1968 goto xa_unlocked;
1969 }
7459c149 1970 page = folio_file_page(folio, index);
99cb0dbd
SL
1971 } else if (trylock_page(page)) {
1972 get_page(page);
1973 xas_unlock_irq(&xas);
1974 } else {
1975 result = SCAN_PAGE_LOCK;
042a3082 1976 goto xa_locked;
77da9389 1977 }
99cb0dbd
SL
1978 } else { /* !is_shmem */
1979 if (!page || xa_is_value(page)) {
1980 xas_unlock_irq(&xas);
1981 page_cache_sync_readahead(mapping, &file->f_ra,
1982 file, index,
e5a59d30 1983 end - index);
99cb0dbd
SL
1984 /* drain pagevecs to help isolate_lru_page() */
1985 lru_add_drain();
1986 page = find_lock_page(mapping, index);
1987 if (unlikely(page == NULL)) {
1988 result = SCAN_FAIL;
1989 goto xa_unlocked;
1990 }
75f36069
SL
1991 } else if (PageDirty(page)) {
1992 /*
1993 * khugepaged only works on read-only fd,
1994 * so this page is dirty because it hasn't
1995 * been flushed since first write. There
1996 * won't be new dirty pages.
1997 *
1998 * Trigger async flush here and hope the
1999 * writeback is done when khugepaged
2000 * revisits this page.
2001 *
2002 * This is a one-off situation. We are not
2003 * forcing writeback in loop.
2004 */
2005 xas_unlock_irq(&xas);
2006 filemap_flush(mapping);
2007 result = SCAN_FAIL;
2008 goto xa_unlocked;
74c42e1b
RW
2009 } else if (PageWriteback(page)) {
2010 xas_unlock_irq(&xas);
2011 result = SCAN_FAIL;
2012 goto xa_unlocked;
99cb0dbd
SL
2013 } else if (trylock_page(page)) {
2014 get_page(page);
2015 xas_unlock_irq(&xas);
2016 } else {
2017 result = SCAN_PAGE_LOCK;
2018 goto xa_locked;
f3f0e1d2 2019 }
f3f0e1d2
KS
2020 }
2021
2022 /*
b93b0163 2023 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
2024 * without racing with truncate.
2025 */
2026 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
2027
2028 /* make sure the page is up to date */
2029 if (unlikely(!PageUptodate(page))) {
2030 result = SCAN_FAIL;
2031 goto out_unlock;
2032 }
06a5e126
HD
2033
2034 /*
2035 * If file was truncated then extended, or hole-punched, before
2036 * we locked the first page, then a THP might be there already.
58ac9a89 2037 * This will be discovered on the first iteration.
06a5e126
HD
2038 */
2039 if (PageTransCompound(page)) {
58ac9a89
ZK
2040 struct page *head = compound_head(page);
2041
2042 result = compound_order(head) == HPAGE_PMD_ORDER &&
2043 head->index == start
2044 /* Maybe PMD-mapped */
2045 ? SCAN_PTE_MAPPED_HUGEPAGE
2046 : SCAN_PAGE_COMPOUND;
06a5e126
HD
2047 goto out_unlock;
2048 }
f3f0e1d2 2049
64ab3195
VMO
2050 folio = page_folio(page);
2051
2052 if (folio_mapping(folio) != mapping) {
f3f0e1d2
KS
2053 result = SCAN_TRUNCATED;
2054 goto out_unlock;
2055 }
f3f0e1d2 2056
64ab3195
VMO
2057 if (!is_shmem && (folio_test_dirty(folio) ||
2058 folio_test_writeback(folio))) {
4655e5e5
SL
2059 /*
2060 * khugepaged only works on read-only fd, so this
2061 * page is dirty because it hasn't been flushed
2062 * since first write.
2063 */
2064 result = SCAN_FAIL;
2065 goto out_unlock;
2066 }
2067
be2d5756 2068 if (!folio_isolate_lru(folio)) {
f3f0e1d2 2069 result = SCAN_DEL_PAGE_LRU;
042a3082 2070 goto out_unlock;
f3f0e1d2
KS
2071 }
2072
64ab3195
VMO
2073 if (folio_has_private(folio) &&
2074 !filemap_release_folio(folio, GFP_KERNEL)) {
99cb0dbd 2075 result = SCAN_PAGE_HAS_PRIVATE;
64ab3195 2076 folio_putback_lru(folio);
99cb0dbd
SL
2077 goto out_unlock;
2078 }
2079
64ab3195
VMO
2080 if (folio_mapped(folio))
2081 try_to_unmap(folio,
869f7ee6 2082 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
f3f0e1d2 2083
77da9389
MW
2084 xas_lock_irq(&xas);
2085 xas_set(&xas, index);
f3f0e1d2 2086
77da9389 2087 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
2088
2089 /*
2090 * The page is expected to have page_count() == 3:
2091 * - we hold a pin on it;
77da9389 2092 * - one reference from page cache;
f3f0e1d2
KS
2093 * - one from isolate_lru_page;
2094 */
2095 if (!page_ref_freeze(page, 3)) {
2096 result = SCAN_PAGE_COUNT;
042a3082
HD
2097 xas_unlock_irq(&xas);
2098 putback_lru_page(page);
2099 goto out_unlock;
f3f0e1d2
KS
2100 }
2101
2102 /*
2103 * Add the page to the list to be able to undo the collapse if
2104 * something go wrong.
2105 */
2106 list_add_tail(&page->lru, &pagelist);
2107
2108 /* Finally, replace with the new page. */
50ad2f24 2109 xas_store(&xas, hpage);
2ce0bdfe
IO
2110 /* We can't get an ENOMEM here (because the allocation happened before)
2111 * but let's check for errors (XArray implementation can be
2112 * changed in the future)
2113 */
2114 WARN_ON_ONCE(xas_error(&xas));
f3f0e1d2 2115 continue;
f3f0e1d2
KS
2116out_unlock:
2117 unlock_page(page);
2118 put_page(page);
042a3082 2119 goto xa_unlocked;
f3f0e1d2
KS
2120 }
2121
12904d95 2122 if (!is_shmem) {
09d91cda 2123 filemap_nr_thps_inc(mapping);
eb6ecbed
CF
2124 /*
2125 * Paired with smp_mb() in do_dentry_open() to ensure
2126 * i_writecount is up to date and the update to nr_thps is
2127 * visible. Ensures the page cache will be truncated if the
2128 * file is opened writable.
2129 */
2130 smp_mb();
2131 if (inode_is_open_for_write(mapping->host)) {
2132 result = SCAN_FAIL;
eb6ecbed 2133 filemap_nr_thps_dec(mapping);
eb6ecbed 2134 }
09d91cda 2135 }
99cb0dbd 2136
2ce0bdfe
IO
2137 /* Here we can't get an ENOMEM (because entries were
2138 * previously allocated) But let's check for errors
2139 * (XArray implementation can be changed in the future)
2140 */
2141 WARN_ON_ONCE(xas_error(&xas));
042a3082
HD
2142xa_locked:
2143 xas_unlock_irq(&xas);
77da9389 2144xa_unlocked:
042a3082 2145
6d9df8a5
HD
2146 /*
2147 * If collapse is successful, flush must be done now before copying.
2148 * If collapse is unsuccessful, does flush actually need to be done?
2149 * Do it anyway, to clear the state.
2150 */
2151 try_to_unmap_flush();
2152
f3f0e1d2 2153 if (result == SCAN_SUCCEED) {
f3f0e1d2 2154 /*
77da9389 2155 * Replacing old pages with new one has succeeded, now we
12904d95 2156 * attempt to copy the contents.
f3f0e1d2 2157 */
2af8ff29 2158 index = start;
12904d95 2159 list_for_each_entry(page, &pagelist, lru) {
2af8ff29 2160 while (index < page->index) {
50ad2f24 2161 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2af8ff29
HD
2162 index++;
2163 }
12904d95
JY
2164 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
2165 page) > 0) {
2166 result = SCAN_COPY_MC;
2167 break;
2168 }
2169 index++;
2170 }
2171 while (result == SCAN_SUCCEED && index < end) {
2172 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2173 index++;
2174 }
2175 }
2176
2177 nr = thp_nr_pages(hpage);
2178 if (result == SCAN_SUCCEED) {
2179 /*
2180 * Copying old pages to huge one has succeeded, now we
2181 * need to free the old pages.
2182 */
2183 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
f3f0e1d2 2184 list_del(&page->lru);
f3f0e1d2 2185 page->mapping = NULL;
042a3082 2186 page_ref_unfreeze(page, 1);
f3f0e1d2
KS
2187 ClearPageActive(page);
2188 ClearPageUnevictable(page);
042a3082 2189 unlock_page(page);
f3f0e1d2 2190 put_page(page);
2af8ff29 2191 }
12904d95
JY
2192
2193 xas_lock_irq(&xas);
2194 if (is_shmem)
2195 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2196 else
2197 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
2198
2199 if (nr_none) {
2200 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2201 /* nr_none is always 0 for non-shmem. */
2202 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
f3f0e1d2 2203 }
12904d95
JY
2204 /* Join all the small entries into a single multi-index entry. */
2205 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2206 xas_store(&xas, hpage);
2207 xas_unlock_irq(&xas);
f3f0e1d2 2208
284a344e
VMO
2209 folio = page_folio(hpage);
2210 folio_mark_uptodate(folio);
2211 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2212
6058eaec 2213 if (is_shmem)
284a344e
VMO
2214 folio_mark_dirty(folio);
2215 folio_add_lru(folio);
f3f0e1d2 2216
042a3082
HD
2217 /*
2218 * Remove pte page tables, so we can re-fault the page as huge.
2219 */
34488399
ZK
2220 result = retract_page_tables(mapping, start, mm, addr, hpage,
2221 cc);
50ad2f24
ZK
2222 unlock_page(hpage);
2223 hpage = NULL;
f3f0e1d2 2224 } else {
77da9389 2225 /* Something went wrong: roll back page cache changes */
77da9389 2226 xas_lock_irq(&xas);
2f55f070
ML
2227 if (nr_none) {
2228 mapping->nrpages -= nr_none;
99cb0dbd 2229 shmem_uncharge(mapping->host, nr_none);
2f55f070 2230 }
aaa52e34 2231
77da9389
MW
2232 xas_set(&xas, start);
2233 xas_for_each(&xas, page, end - 1) {
f3f0e1d2
KS
2234 page = list_first_entry_or_null(&pagelist,
2235 struct page, lru);
77da9389 2236 if (!page || xas.xa_index < page->index) {
f3f0e1d2
KS
2237 if (!nr_none)
2238 break;
f3f0e1d2 2239 nr_none--;
59749e6c 2240 /* Put holes back where they were */
77da9389 2241 xas_store(&xas, NULL);
f3f0e1d2
KS
2242 continue;
2243 }
2244
77da9389 2245 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
f3f0e1d2
KS
2246
2247 /* Unfreeze the page. */
2248 list_del(&page->lru);
2249 page_ref_unfreeze(page, 2);
77da9389
MW
2250 xas_store(&xas, page);
2251 xas_pause(&xas);
2252 xas_unlock_irq(&xas);
f3f0e1d2 2253 unlock_page(page);
042a3082 2254 putback_lru_page(page);
77da9389 2255 xas_lock_irq(&xas);
f3f0e1d2
KS
2256 }
2257 VM_BUG_ON(nr_none);
12904d95
JY
2258 /*
2259 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2260 * file only. This undo is not needed unless failure is
2261 * due to SCAN_COPY_MC.
2262 */
2263 if (!is_shmem && result == SCAN_COPY_MC) {
2264 filemap_nr_thps_dec(mapping);
2265 /*
2266 * Paired with smp_mb() in do_dentry_open() to
2267 * ensure the update to nr_thps is visible.
2268 */
2269 smp_mb();
2270 }
2271
77da9389 2272 xas_unlock_irq(&xas);
f3f0e1d2 2273
50ad2f24 2274 hpage->mapping = NULL;
f3f0e1d2 2275 }
042a3082 2276
50ad2f24
ZK
2277 if (hpage)
2278 unlock_page(hpage);
f3f0e1d2
KS
2279out:
2280 VM_BUG_ON(!list_empty(&pagelist));
7cb1d7ef 2281 if (hpage)
50ad2f24 2282 put_page(hpage);
4c9473e8
GM
2283
2284 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
50ad2f24 2285 return result;
f3f0e1d2
KS
2286}
2287
34488399
ZK
2288static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2289 struct file *file, pgoff_t start,
2290 struct collapse_control *cc)
f3f0e1d2
KS
2291{
2292 struct page *page = NULL;
579c571e 2293 struct address_space *mapping = file->f_mapping;
85b392db 2294 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
2295 int present, swap;
2296 int node = NUMA_NO_NODE;
2297 int result = SCAN_SUCCEED;
2298
2299 present = 0;
2300 swap = 0;
34d6b470 2301 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 2302 nodes_clear(cc->alloc_nmask);
f3f0e1d2 2303 rcu_read_lock();
85b392db
MW
2304 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2305 if (xas_retry(&xas, page))
f3f0e1d2 2306 continue;
f3f0e1d2 2307
85b392db 2308 if (xa_is_value(page)) {
d8ea7cc8
ZK
2309 ++swap;
2310 if (cc->is_khugepaged &&
2311 swap > khugepaged_max_ptes_swap) {
f3f0e1d2 2312 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 2313 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
f3f0e1d2
KS
2314 break;
2315 }
2316 continue;
2317 }
2318
6b24ca4a 2319 /*
58ac9a89 2320 * TODO: khugepaged should compact smaller compound pages
6b24ca4a
MWO
2321 * into a PMD sized page
2322 */
f3f0e1d2 2323 if (PageTransCompound(page)) {
58ac9a89
ZK
2324 struct page *head = compound_head(page);
2325
2326 result = compound_order(head) == HPAGE_PMD_ORDER &&
2327 head->index == start
2328 /* Maybe PMD-mapped */
2329 ? SCAN_PTE_MAPPED_HUGEPAGE
2330 : SCAN_PAGE_COMPOUND;
2331 /*
2332 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2333 * by the caller won't touch the page cache, and so
2334 * it's safe to skip LRU and refcount checks before
2335 * returning.
2336 */
f3f0e1d2
KS
2337 break;
2338 }
2339
2340 node = page_to_nid(page);
7d2c4385 2341 if (hpage_collapse_scan_abort(node, cc)) {
f3f0e1d2
KS
2342 result = SCAN_SCAN_ABORT;
2343 break;
2344 }
34d6b470 2345 cc->node_load[node]++;
f3f0e1d2
KS
2346
2347 if (!PageLRU(page)) {
2348 result = SCAN_PAGE_LRU;
2349 break;
2350 }
2351
99cb0dbd
SL
2352 if (page_count(page) !=
2353 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
2354 result = SCAN_PAGE_COUNT;
2355 break;
2356 }
2357
2358 /*
2359 * We probably should check if the page is referenced here, but
2360 * nobody would transfer pte_young() to PageReferenced() for us.
2361 * And rmap walk here is just too costly...
2362 */
2363
2364 present++;
2365
2366 if (need_resched()) {
85b392db 2367 xas_pause(&xas);
f3f0e1d2 2368 cond_resched_rcu();
f3f0e1d2
KS
2369 }
2370 }
2371 rcu_read_unlock();
2372
2373 if (result == SCAN_SUCCEED) {
d8ea7cc8
ZK
2374 if (cc->is_khugepaged &&
2375 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
f3f0e1d2 2376 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 2377 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
f3f0e1d2 2378 } else {
34488399 2379 result = collapse_file(mm, addr, file, start, cc);
f3f0e1d2
KS
2380 }
2381 }
2382
045634ff 2383 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
50ad2f24 2384 return result;
f3f0e1d2
KS
2385}
2386#else
34488399
ZK
2387static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2388 struct file *file, pgoff_t start,
2389 struct collapse_control *cc)
f3f0e1d2
KS
2390{
2391 BUILD_BUG();
2392}
27e1f827 2393
b26e2701 2394static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
27e1f827 2395{
27e1f827 2396}
58ac9a89
ZK
2397
2398static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2399 unsigned long addr)
2400{
2401 return false;
2402}
f3f0e1d2
KS
2403#endif
2404
50ad2f24 2405static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
34d6b470 2406 struct collapse_control *cc)
b46e756f
KS
2407 __releases(&khugepaged_mm_lock)
2408 __acquires(&khugepaged_mm_lock)
2409{
68540502 2410 struct vma_iterator vmi;
b26e2701
QZ
2411 struct khugepaged_mm_slot *mm_slot;
2412 struct mm_slot *slot;
b46e756f
KS
2413 struct mm_struct *mm;
2414 struct vm_area_struct *vma;
2415 int progress = 0;
2416
2417 VM_BUG_ON(!pages);
35f3aa39 2418 lockdep_assert_held(&khugepaged_mm_lock);
50ad2f24 2419 *result = SCAN_FAIL;
b46e756f 2420
b26e2701 2421 if (khugepaged_scan.mm_slot) {
b46e756f 2422 mm_slot = khugepaged_scan.mm_slot;
b26e2701
QZ
2423 slot = &mm_slot->slot;
2424 } else {
2425 slot = list_entry(khugepaged_scan.mm_head.next,
b46e756f 2426 struct mm_slot, mm_node);
b26e2701 2427 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f
KS
2428 khugepaged_scan.address = 0;
2429 khugepaged_scan.mm_slot = mm_slot;
2430 }
2431 spin_unlock(&khugepaged_mm_lock);
27e1f827 2432 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f 2433
b26e2701 2434 mm = slot->mm;
3b454ad3
YS
2435 /*
2436 * Don't wait for semaphore (to avoid long wait times). Just move to
2437 * the next mm on the list.
2438 */
2439 vma = NULL;
d8ed45c5 2440 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2441 goto breakouterloop_mmap_lock;
b46e756f
KS
2442
2443 progress++;
68540502
MWO
2444 if (unlikely(hpage_collapse_test_exit(mm)))
2445 goto breakouterloop;
2446
2447 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2448 for_each_vma(vmi, vma) {
b46e756f
KS
2449 unsigned long hstart, hend;
2450
2451 cond_resched();
7d2c4385 2452 if (unlikely(hpage_collapse_test_exit(mm))) {
b46e756f
KS
2453 progress++;
2454 break;
2455 }
a7f4e6e4 2456 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
b46e756f
KS
2457skip:
2458 progress++;
2459 continue;
2460 }
4fa6893f
YS
2461 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2462 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
b46e756f
KS
2463 if (khugepaged_scan.address > hend)
2464 goto skip;
2465 if (khugepaged_scan.address < hstart)
2466 khugepaged_scan.address = hstart;
2467 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2468
2469 while (khugepaged_scan.address < hend) {
50ad2f24
ZK
2470 bool mmap_locked = true;
2471
b46e756f 2472 cond_resched();
7d2c4385 2473 if (unlikely(hpage_collapse_test_exit(mm)))
b46e756f
KS
2474 goto breakouterloop;
2475
2476 VM_BUG_ON(khugepaged_scan.address < hstart ||
2477 khugepaged_scan.address + HPAGE_PMD_SIZE >
2478 hend);
99cb0dbd 2479 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2480 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2481 pgoff_t pgoff = linear_page_index(vma,
2482 khugepaged_scan.address);
99cb0dbd 2483
d8ed45c5 2484 mmap_read_unlock(mm);
34488399
ZK
2485 *result = hpage_collapse_scan_file(mm,
2486 khugepaged_scan.address,
2487 file, pgoff, cc);
50ad2f24 2488 mmap_locked = false;
f3f0e1d2
KS
2489 fput(file);
2490 } else {
7d2c4385
ZK
2491 *result = hpage_collapse_scan_pmd(mm, vma,
2492 khugepaged_scan.address,
2493 &mmap_locked,
2494 cc);
f3f0e1d2 2495 }
58ac9a89
ZK
2496 switch (*result) {
2497 case SCAN_PTE_MAPPED_HUGEPAGE: {
2498 pmd_t *pmd;
2499
2500 *result = find_pmd_or_thp_or_none(mm,
2501 khugepaged_scan.address,
2502 &pmd);
2503 if (*result != SCAN_SUCCEED)
2504 break;
2505 if (!khugepaged_add_pte_mapped_thp(mm,
2506 khugepaged_scan.address))
2507 break;
2508 } fallthrough;
2509 case SCAN_SUCCEED:
50ad2f24 2510 ++khugepaged_pages_collapsed;
58ac9a89
ZK
2511 break;
2512 default:
2513 break;
f3f0e1d2 2514 }
58ac9a89 2515
b46e756f
KS
2516 /* move to next address */
2517 khugepaged_scan.address += HPAGE_PMD_SIZE;
2518 progress += HPAGE_PMD_NR;
50ad2f24
ZK
2519 if (!mmap_locked)
2520 /*
2521 * We released mmap_lock so break loop. Note
2522 * that we drop mmap_lock before all hugepage
2523 * allocations, so if allocation fails, we are
2524 * guaranteed to break here and report the
2525 * correct result back to caller.
2526 */
c1e8d7c6 2527 goto breakouterloop_mmap_lock;
b46e756f
KS
2528 if (progress >= pages)
2529 goto breakouterloop;
2530 }
2531 }
2532breakouterloop:
d8ed45c5 2533 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2534breakouterloop_mmap_lock:
b46e756f
KS
2535
2536 spin_lock(&khugepaged_mm_lock);
2537 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2538 /*
2539 * Release the current mm_slot if this mm is about to die, or
2540 * if we scanned all vmas of this mm.
2541 */
7d2c4385 2542 if (hpage_collapse_test_exit(mm) || !vma) {
b46e756f
KS
2543 /*
2544 * Make sure that if mm_users is reaching zero while
2545 * khugepaged runs here, khugepaged_exit will find
2546 * mm_slot not pointing to the exiting mm.
2547 */
b26e2701
QZ
2548 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2549 slot = list_entry(slot->mm_node.next,
2550 struct mm_slot, mm_node);
2551 khugepaged_scan.mm_slot =
2552 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f
KS
2553 khugepaged_scan.address = 0;
2554 } else {
2555 khugepaged_scan.mm_slot = NULL;
2556 khugepaged_full_scans++;
2557 }
2558
2559 collect_mm_slot(mm_slot);
2560 }
2561
2562 return progress;
2563}
2564
2565static int khugepaged_has_work(void)
2566{
2567 return !list_empty(&khugepaged_scan.mm_head) &&
1064026b 2568 hugepage_flags_enabled();
b46e756f
KS
2569}
2570
2571static int khugepaged_wait_event(void)
2572{
2573 return !list_empty(&khugepaged_scan.mm_head) ||
2574 kthread_should_stop();
2575}
2576
34d6b470 2577static void khugepaged_do_scan(struct collapse_control *cc)
b46e756f 2578{
b46e756f 2579 unsigned int progress = 0, pass_through_head = 0;
89dc6a96 2580 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
b46e756f 2581 bool wait = true;
50ad2f24 2582 int result = SCAN_SUCCEED;
b46e756f 2583
a980df33
KS
2584 lru_add_drain_all();
2585
c6a7f445 2586 while (true) {
b46e756f
KS
2587 cond_resched();
2588
2589 if (unlikely(kthread_should_stop() || try_to_freeze()))
2590 break;
2591
2592 spin_lock(&khugepaged_mm_lock);
2593 if (!khugepaged_scan.mm_slot)
2594 pass_through_head++;
2595 if (khugepaged_has_work() &&
2596 pass_through_head < 2)
2597 progress += khugepaged_scan_mm_slot(pages - progress,
50ad2f24 2598 &result, cc);
b46e756f
KS
2599 else
2600 progress = pages;
2601 spin_unlock(&khugepaged_mm_lock);
b46e756f 2602
c6a7f445
YS
2603 if (progress >= pages)
2604 break;
2605
50ad2f24 2606 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
c6a7f445
YS
2607 /*
2608 * If fail to allocate the first time, try to sleep for
2609 * a while. When hit again, cancel the scan.
2610 */
2611 if (!wait)
2612 break;
2613 wait = false;
c6a7f445
YS
2614 khugepaged_alloc_sleep();
2615 }
2616 }
b46e756f
KS
2617}
2618
2619static bool khugepaged_should_wakeup(void)
2620{
2621 return kthread_should_stop() ||
2622 time_after_eq(jiffies, khugepaged_sleep_expire);
2623}
2624
2625static void khugepaged_wait_work(void)
2626{
2627 if (khugepaged_has_work()) {
2628 const unsigned long scan_sleep_jiffies =
2629 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2630
2631 if (!scan_sleep_jiffies)
2632 return;
2633
2634 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2635 wait_event_freezable_timeout(khugepaged_wait,
2636 khugepaged_should_wakeup(),
2637 scan_sleep_jiffies);
2638 return;
2639 }
2640
1064026b 2641 if (hugepage_flags_enabled())
b46e756f
KS
2642 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2643}
2644
2645static int khugepaged(void *none)
2646{
b26e2701 2647 struct khugepaged_mm_slot *mm_slot;
b46e756f
KS
2648
2649 set_freezable();
2650 set_user_nice(current, MAX_NICE);
2651
2652 while (!kthread_should_stop()) {
34d6b470 2653 khugepaged_do_scan(&khugepaged_collapse_control);
b46e756f
KS
2654 khugepaged_wait_work();
2655 }
2656
2657 spin_lock(&khugepaged_mm_lock);
2658 mm_slot = khugepaged_scan.mm_slot;
2659 khugepaged_scan.mm_slot = NULL;
2660 if (mm_slot)
2661 collect_mm_slot(mm_slot);
2662 spin_unlock(&khugepaged_mm_lock);
2663 return 0;
2664}
2665
2666static void set_recommended_min_free_kbytes(void)
2667{
2668 struct zone *zone;
2669 int nr_zones = 0;
2670 unsigned long recommended_min;
2671
1064026b 2672 if (!hugepage_flags_enabled()) {
bd3400ea
LF
2673 calculate_min_free_kbytes();
2674 goto update_wmarks;
2675 }
2676
b7d349c7
JK
2677 for_each_populated_zone(zone) {
2678 /*
2679 * We don't need to worry about fragmentation of
2680 * ZONE_MOVABLE since it only has movable pages.
2681 */
2682 if (zone_idx(zone) > gfp_zone(GFP_USER))
2683 continue;
2684
b46e756f 2685 nr_zones++;
b7d349c7 2686 }
b46e756f
KS
2687
2688 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2689 recommended_min = pageblock_nr_pages * nr_zones * 2;
2690
2691 /*
2692 * Make sure that on average at least two pageblocks are almost free
2693 * of another type, one for a migratetype to fall back to and a
2694 * second to avoid subsequent fallbacks of other types There are 3
2695 * MIGRATE_TYPES we care about.
2696 */
2697 recommended_min += pageblock_nr_pages * nr_zones *
2698 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2699
2700 /* don't ever allow to reserve more than 5% of the lowmem */
2701 recommended_min = min(recommended_min,
2702 (unsigned long) nr_free_buffer_pages() / 20);
2703 recommended_min <<= (PAGE_SHIFT-10);
2704
2705 if (recommended_min > min_free_kbytes) {
2706 if (user_min_free_kbytes >= 0)
2707 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2708 min_free_kbytes, recommended_min);
2709
2710 min_free_kbytes = recommended_min;
2711 }
bd3400ea
LF
2712
2713update_wmarks:
b46e756f
KS
2714 setup_per_zone_wmarks();
2715}
2716
2717int start_stop_khugepaged(void)
2718{
b46e756f
KS
2719 int err = 0;
2720
2721 mutex_lock(&khugepaged_mutex);
1064026b 2722 if (hugepage_flags_enabled()) {
b46e756f
KS
2723 if (!khugepaged_thread)
2724 khugepaged_thread = kthread_run(khugepaged, NULL,
2725 "khugepaged");
2726 if (IS_ERR(khugepaged_thread)) {
2727 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2728 err = PTR_ERR(khugepaged_thread);
2729 khugepaged_thread = NULL;
2730 goto fail;
2731 }
2732
2733 if (!list_empty(&khugepaged_scan.mm_head))
2734 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
2735 } else if (khugepaged_thread) {
2736 kthread_stop(khugepaged_thread);
2737 khugepaged_thread = NULL;
2738 }
bd3400ea 2739 set_recommended_min_free_kbytes();
b46e756f
KS
2740fail:
2741 mutex_unlock(&khugepaged_mutex);
2742 return err;
2743}
4aab2be0
VB
2744
2745void khugepaged_min_free_kbytes_update(void)
2746{
2747 mutex_lock(&khugepaged_mutex);
1064026b 2748 if (hugepage_flags_enabled() && khugepaged_thread)
4aab2be0
VB
2749 set_recommended_min_free_kbytes();
2750 mutex_unlock(&khugepaged_mutex);
2751}
7d8faaf1 2752
57e9cc50
JW
2753bool current_is_khugepaged(void)
2754{
2755 return kthread_func(current) == khugepaged;
2756}
2757
7d8faaf1
ZK
2758static int madvise_collapse_errno(enum scan_result r)
2759{
2760 /*
2761 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2762 * actionable feedback to caller, so they may take an appropriate
2763 * fallback measure depending on the nature of the failure.
2764 */
2765 switch (r) {
2766 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2767 return -ENOMEM;
2768 case SCAN_CGROUP_CHARGE_FAIL:
2769 return -EBUSY;
2770 /* Resource temporary unavailable - trying again might succeed */
ae63c898 2771 case SCAN_PAGE_COUNT:
7d8faaf1
ZK
2772 case SCAN_PAGE_LOCK:
2773 case SCAN_PAGE_LRU:
0f3e2a2c 2774 case SCAN_DEL_PAGE_LRU:
7d8faaf1
ZK
2775 return -EAGAIN;
2776 /*
2777 * Other: Trying again likely not to succeed / error intrinsic to
2778 * specified memory range. khugepaged likely won't be able to collapse
2779 * either.
2780 */
2781 default:
2782 return -EINVAL;
2783 }
2784}
2785
2786int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2787 unsigned long start, unsigned long end)
2788{
2789 struct collapse_control *cc;
2790 struct mm_struct *mm = vma->vm_mm;
2791 unsigned long hstart, hend, addr;
2792 int thps = 0, last_fail = SCAN_FAIL;
2793 bool mmap_locked = true;
2794
2795 BUG_ON(vma->vm_start > start);
2796 BUG_ON(vma->vm_end < end);
2797
2798 *prev = vma;
2799
7d8faaf1
ZK
2800 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2801 return -EINVAL;
2802
2803 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2804 if (!cc)
2805 return -ENOMEM;
2806 cc->is_khugepaged = false;
7d8faaf1
ZK
2807
2808 mmgrab(mm);
2809 lru_add_drain_all();
2810
2811 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2812 hend = end & HPAGE_PMD_MASK;
2813
2814 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2815 int result = SCAN_FAIL;
2816
2817 if (!mmap_locked) {
2818 cond_resched();
2819 mmap_read_lock(mm);
2820 mmap_locked = true;
34488399
ZK
2821 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2822 cc);
7d8faaf1
ZK
2823 if (result != SCAN_SUCCEED) {
2824 last_fail = result;
2825 goto out_nolock;
2826 }
4d24de94 2827
52dc0310 2828 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
7d8faaf1
ZK
2829 }
2830 mmap_assert_locked(mm);
2831 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 2832 nodes_clear(cc->alloc_nmask);
34488399
ZK
2833 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2834 struct file *file = get_file(vma->vm_file);
2835 pgoff_t pgoff = linear_page_index(vma, addr);
2836
2837 mmap_read_unlock(mm);
2838 mmap_locked = false;
2839 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2840 cc);
2841 fput(file);
2842 } else {
2843 result = hpage_collapse_scan_pmd(mm, vma, addr,
2844 &mmap_locked, cc);
2845 }
7d8faaf1
ZK
2846 if (!mmap_locked)
2847 *prev = NULL; /* Tell caller we dropped mmap_lock */
2848
34488399 2849handle_result:
7d8faaf1
ZK
2850 switch (result) {
2851 case SCAN_SUCCEED:
2852 case SCAN_PMD_MAPPED:
2853 ++thps;
2854 break;
34488399
ZK
2855 case SCAN_PTE_MAPPED_HUGEPAGE:
2856 BUG_ON(mmap_locked);
2857 BUG_ON(*prev);
2858 mmap_write_lock(mm);
2859 result = collapse_pte_mapped_thp(mm, addr, true);
2860 mmap_write_unlock(mm);
2861 goto handle_result;
7d8faaf1
ZK
2862 /* Whitelisted set of results where continuing OK */
2863 case SCAN_PMD_NULL:
2864 case SCAN_PTE_NON_PRESENT:
2865 case SCAN_PTE_UFFD_WP:
2866 case SCAN_PAGE_RO:
2867 case SCAN_LACK_REFERENCED_PAGE:
2868 case SCAN_PAGE_NULL:
2869 case SCAN_PAGE_COUNT:
2870 case SCAN_PAGE_LOCK:
2871 case SCAN_PAGE_COMPOUND:
2872 case SCAN_PAGE_LRU:
0f3e2a2c 2873 case SCAN_DEL_PAGE_LRU:
7d8faaf1
ZK
2874 last_fail = result;
2875 break;
2876 default:
2877 last_fail = result;
2878 /* Other error, exit */
2879 goto out_maybelock;
2880 }
2881 }
2882
2883out_maybelock:
2884 /* Caller expects us to hold mmap_lock on return */
2885 if (!mmap_locked)
2886 mmap_read_lock(mm);
2887out_nolock:
2888 mmap_assert_locked(mm);
2889 mmdrop(mm);
2890 kfree(cc);
2891
2892 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2893 : madvise_collapse_errno(last_fail);
2894}
This page took 0.994975 seconds and 4 git commands to generate.