]> Git Repo - linux.git/blob - mm/khugepaged.c
net: airoha: Introduce ethernet support for EN7581 SoC
[linux.git] / mm / khugepaged.c
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/rcupdate_wait.h>
21 #include <linux/swapops.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/ksm.h>
24
25 #include <asm/tlb.h>
26 #include <asm/pgalloc.h>
27 #include "internal.h"
28 #include "mm_slot.h"
29
30 enum scan_result {
31         SCAN_FAIL,
32         SCAN_SUCCEED,
33         SCAN_PMD_NULL,
34         SCAN_PMD_NONE,
35         SCAN_PMD_MAPPED,
36         SCAN_EXCEED_NONE_PTE,
37         SCAN_EXCEED_SWAP_PTE,
38         SCAN_EXCEED_SHARED_PTE,
39         SCAN_PTE_NON_PRESENT,
40         SCAN_PTE_UFFD_WP,
41         SCAN_PTE_MAPPED_HUGEPAGE,
42         SCAN_PAGE_RO,
43         SCAN_LACK_REFERENCED_PAGE,
44         SCAN_PAGE_NULL,
45         SCAN_SCAN_ABORT,
46         SCAN_PAGE_COUNT,
47         SCAN_PAGE_LRU,
48         SCAN_PAGE_LOCK,
49         SCAN_PAGE_ANON,
50         SCAN_PAGE_COMPOUND,
51         SCAN_ANY_PROCESS,
52         SCAN_VMA_NULL,
53         SCAN_VMA_CHECK,
54         SCAN_ADDRESS_RANGE,
55         SCAN_DEL_PAGE_LRU,
56         SCAN_ALLOC_HUGE_PAGE_FAIL,
57         SCAN_CGROUP_CHARGE_FAIL,
58         SCAN_TRUNCATED,
59         SCAN_PAGE_HAS_PRIVATE,
60         SCAN_STORE_FAILED,
61         SCAN_COPY_MC,
62         SCAN_PAGE_FILLED,
63 };
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/huge_memory.h>
67
68 static struct task_struct *khugepaged_thread __read_mostly;
69 static DEFINE_MUTEX(khugepaged_mutex);
70
71 /* default scan 8*512 pte (or vmas) every 30 second */
72 static unsigned int khugepaged_pages_to_scan __read_mostly;
73 static unsigned int khugepaged_pages_collapsed;
74 static unsigned int khugepaged_full_scans;
75 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
76 /* during fragmentation poll the hugepage allocator once every minute */
77 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
78 static unsigned long khugepaged_sleep_expire;
79 static DEFINE_SPINLOCK(khugepaged_mm_lock);
80 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
81 /*
82  * default collapse hugepages if there is at least one pte mapped like
83  * it would have happened if the vma was large enough during page
84  * fault.
85  *
86  * Note that these are only respected if collapse was initiated by khugepaged.
87  */
88 static unsigned int khugepaged_max_ptes_none __read_mostly;
89 static unsigned int khugepaged_max_ptes_swap __read_mostly;
90 static unsigned int khugepaged_max_ptes_shared __read_mostly;
91
92 #define MM_SLOTS_HASH_BITS 10
93 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
94
95 static struct kmem_cache *mm_slot_cache __ro_after_init;
96
97 struct collapse_control {
98         bool is_khugepaged;
99
100         /* Num pages scanned per node */
101         u32 node_load[MAX_NUMNODES];
102
103         /* nodemask for allocation fallback */
104         nodemask_t alloc_nmask;
105 };
106
107 /**
108  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109  * @slot: hash lookup from mm to mm_slot
110  */
111 struct khugepaged_mm_slot {
112         struct mm_slot slot;
113 };
114
115 /**
116  * struct khugepaged_scan - cursor for scanning
117  * @mm_head: the head of the mm list to scan
118  * @mm_slot: the current mm_slot we are scanning
119  * @address: the next address inside that to be scanned
120  *
121  * There is only the one khugepaged_scan instance of this cursor structure.
122  */
123 struct khugepaged_scan {
124         struct list_head mm_head;
125         struct khugepaged_mm_slot *mm_slot;
126         unsigned long address;
127 };
128
129 static struct khugepaged_scan khugepaged_scan = {
130         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
131 };
132
133 #ifdef CONFIG_SYSFS
134 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
135                                          struct kobj_attribute *attr,
136                                          char *buf)
137 {
138         return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
139 }
140
141 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
142                                           struct kobj_attribute *attr,
143                                           const char *buf, size_t count)
144 {
145         unsigned int msecs;
146         int err;
147
148         err = kstrtouint(buf, 10, &msecs);
149         if (err)
150                 return -EINVAL;
151
152         khugepaged_scan_sleep_millisecs = msecs;
153         khugepaged_sleep_expire = 0;
154         wake_up_interruptible(&khugepaged_wait);
155
156         return count;
157 }
158 static struct kobj_attribute scan_sleep_millisecs_attr =
159         __ATTR_RW(scan_sleep_millisecs);
160
161 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
162                                           struct kobj_attribute *attr,
163                                           char *buf)
164 {
165         return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
166 }
167
168 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
169                                            struct kobj_attribute *attr,
170                                            const char *buf, size_t count)
171 {
172         unsigned int msecs;
173         int err;
174
175         err = kstrtouint(buf, 10, &msecs);
176         if (err)
177                 return -EINVAL;
178
179         khugepaged_alloc_sleep_millisecs = msecs;
180         khugepaged_sleep_expire = 0;
181         wake_up_interruptible(&khugepaged_wait);
182
183         return count;
184 }
185 static struct kobj_attribute alloc_sleep_millisecs_attr =
186         __ATTR_RW(alloc_sleep_millisecs);
187
188 static ssize_t pages_to_scan_show(struct kobject *kobj,
189                                   struct kobj_attribute *attr,
190                                   char *buf)
191 {
192         return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
193 }
194 static ssize_t pages_to_scan_store(struct kobject *kobj,
195                                    struct kobj_attribute *attr,
196                                    const char *buf, size_t count)
197 {
198         unsigned int pages;
199         int err;
200
201         err = kstrtouint(buf, 10, &pages);
202         if (err || !pages)
203                 return -EINVAL;
204
205         khugepaged_pages_to_scan = pages;
206
207         return count;
208 }
209 static struct kobj_attribute pages_to_scan_attr =
210         __ATTR_RW(pages_to_scan);
211
212 static ssize_t pages_collapsed_show(struct kobject *kobj,
213                                     struct kobj_attribute *attr,
214                                     char *buf)
215 {
216         return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
217 }
218 static struct kobj_attribute pages_collapsed_attr =
219         __ATTR_RO(pages_collapsed);
220
221 static ssize_t full_scans_show(struct kobject *kobj,
222                                struct kobj_attribute *attr,
223                                char *buf)
224 {
225         return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
226 }
227 static struct kobj_attribute full_scans_attr =
228         __ATTR_RO(full_scans);
229
230 static ssize_t defrag_show(struct kobject *kobj,
231                            struct kobj_attribute *attr, char *buf)
232 {
233         return single_hugepage_flag_show(kobj, attr, buf,
234                                          TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
235 }
236 static ssize_t defrag_store(struct kobject *kobj,
237                             struct kobj_attribute *attr,
238                             const char *buf, size_t count)
239 {
240         return single_hugepage_flag_store(kobj, attr, buf, count,
241                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
242 }
243 static struct kobj_attribute khugepaged_defrag_attr =
244         __ATTR_RW(defrag);
245
246 /*
247  * max_ptes_none controls if khugepaged should collapse hugepages over
248  * any unmapped ptes in turn potentially increasing the memory
249  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250  * reduce the available free memory in the system as it
251  * runs. Increasing max_ptes_none will instead potentially reduce the
252  * free memory in the system during the khugepaged scan.
253  */
254 static ssize_t max_ptes_none_show(struct kobject *kobj,
255                                   struct kobj_attribute *attr,
256                                   char *buf)
257 {
258         return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
259 }
260 static ssize_t max_ptes_none_store(struct kobject *kobj,
261                                    struct kobj_attribute *attr,
262                                    const char *buf, size_t count)
263 {
264         int err;
265         unsigned long max_ptes_none;
266
267         err = kstrtoul(buf, 10, &max_ptes_none);
268         if (err || max_ptes_none > HPAGE_PMD_NR - 1)
269                 return -EINVAL;
270
271         khugepaged_max_ptes_none = max_ptes_none;
272
273         return count;
274 }
275 static struct kobj_attribute khugepaged_max_ptes_none_attr =
276         __ATTR_RW(max_ptes_none);
277
278 static ssize_t max_ptes_swap_show(struct kobject *kobj,
279                                   struct kobj_attribute *attr,
280                                   char *buf)
281 {
282         return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
283 }
284
285 static ssize_t max_ptes_swap_store(struct kobject *kobj,
286                                    struct kobj_attribute *attr,
287                                    const char *buf, size_t count)
288 {
289         int err;
290         unsigned long max_ptes_swap;
291
292         err  = kstrtoul(buf, 10, &max_ptes_swap);
293         if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
294                 return -EINVAL;
295
296         khugepaged_max_ptes_swap = max_ptes_swap;
297
298         return count;
299 }
300
301 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
302         __ATTR_RW(max_ptes_swap);
303
304 static ssize_t max_ptes_shared_show(struct kobject *kobj,
305                                     struct kobj_attribute *attr,
306                                     char *buf)
307 {
308         return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
309 }
310
311 static ssize_t max_ptes_shared_store(struct kobject *kobj,
312                                      struct kobj_attribute *attr,
313                                      const char *buf, size_t count)
314 {
315         int err;
316         unsigned long max_ptes_shared;
317
318         err  = kstrtoul(buf, 10, &max_ptes_shared);
319         if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
320                 return -EINVAL;
321
322         khugepaged_max_ptes_shared = max_ptes_shared;
323
324         return count;
325 }
326
327 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
328         __ATTR_RW(max_ptes_shared);
329
330 static struct attribute *khugepaged_attr[] = {
331         &khugepaged_defrag_attr.attr,
332         &khugepaged_max_ptes_none_attr.attr,
333         &khugepaged_max_ptes_swap_attr.attr,
334         &khugepaged_max_ptes_shared_attr.attr,
335         &pages_to_scan_attr.attr,
336         &pages_collapsed_attr.attr,
337         &full_scans_attr.attr,
338         &scan_sleep_millisecs_attr.attr,
339         &alloc_sleep_millisecs_attr.attr,
340         NULL,
341 };
342
343 struct attribute_group khugepaged_attr_group = {
344         .attrs = khugepaged_attr,
345         .name = "khugepaged",
346 };
347 #endif /* CONFIG_SYSFS */
348
349 int hugepage_madvise(struct vm_area_struct *vma,
350                      unsigned long *vm_flags, int advice)
351 {
352         switch (advice) {
353         case MADV_HUGEPAGE:
354 #ifdef CONFIG_S390
355                 /*
356                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357                  * can't handle this properly after s390_enable_sie, so we simply
358                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
359                  */
360                 if (mm_has_pgste(vma->vm_mm))
361                         return 0;
362 #endif
363                 *vm_flags &= ~VM_NOHUGEPAGE;
364                 *vm_flags |= VM_HUGEPAGE;
365                 /*
366                  * If the vma become good for khugepaged to scan,
367                  * register it here without waiting a page fault that
368                  * may not happen any time soon.
369                  */
370                 khugepaged_enter_vma(vma, *vm_flags);
371                 break;
372         case MADV_NOHUGEPAGE:
373                 *vm_flags &= ~VM_HUGEPAGE;
374                 *vm_flags |= VM_NOHUGEPAGE;
375                 /*
376                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377                  * this vma even if we leave the mm registered in khugepaged if
378                  * it got registered before VM_NOHUGEPAGE was set.
379                  */
380                 break;
381         }
382
383         return 0;
384 }
385
386 int __init khugepaged_init(void)
387 {
388         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389                                           sizeof(struct khugepaged_mm_slot),
390                                           __alignof__(struct khugepaged_mm_slot),
391                                           0, NULL);
392         if (!mm_slot_cache)
393                 return -ENOMEM;
394
395         khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
396         khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
397         khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
398         khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
399
400         return 0;
401 }
402
403 void __init khugepaged_destroy(void)
404 {
405         kmem_cache_destroy(mm_slot_cache);
406 }
407
408 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
409 {
410         return atomic_read(&mm->mm_users) == 0;
411 }
412
413 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
414 {
415         return hpage_collapse_test_exit(mm) ||
416                test_bit(MMF_DISABLE_THP, &mm->flags);
417 }
418
419 void __khugepaged_enter(struct mm_struct *mm)
420 {
421         struct khugepaged_mm_slot *mm_slot;
422         struct mm_slot *slot;
423         int wakeup;
424
425         /* __khugepaged_exit() must not run from under us */
426         VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
427         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
428                 return;
429
430         mm_slot = mm_slot_alloc(mm_slot_cache);
431         if (!mm_slot)
432                 return;
433
434         slot = &mm_slot->slot;
435
436         spin_lock(&khugepaged_mm_lock);
437         mm_slot_insert(mm_slots_hash, mm, slot);
438         /*
439          * Insert just behind the scanning cursor, to let the area settle
440          * down a little.
441          */
442         wakeup = list_empty(&khugepaged_scan.mm_head);
443         list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
444         spin_unlock(&khugepaged_mm_lock);
445
446         mmgrab(mm);
447         if (wakeup)
448                 wake_up_interruptible(&khugepaged_wait);
449 }
450
451 void khugepaged_enter_vma(struct vm_area_struct *vma,
452                           unsigned long vm_flags)
453 {
454         if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
455             hugepage_flags_enabled()) {
456                 if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
457                                             PMD_ORDER))
458                         __khugepaged_enter(vma->vm_mm);
459         }
460 }
461
462 void __khugepaged_exit(struct mm_struct *mm)
463 {
464         struct khugepaged_mm_slot *mm_slot;
465         struct mm_slot *slot;
466         int free = 0;
467
468         spin_lock(&khugepaged_mm_lock);
469         slot = mm_slot_lookup(mm_slots_hash, mm);
470         mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
471         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
472                 hash_del(&slot->hash);
473                 list_del(&slot->mm_node);
474                 free = 1;
475         }
476         spin_unlock(&khugepaged_mm_lock);
477
478         if (free) {
479                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
480                 mm_slot_free(mm_slot_cache, mm_slot);
481                 mmdrop(mm);
482         } else if (mm_slot) {
483                 /*
484                  * This is required to serialize against
485                  * hpage_collapse_test_exit() (which is guaranteed to run
486                  * under mmap sem read mode). Stop here (after we return all
487                  * pagetables will be destroyed) until khugepaged has finished
488                  * working on the pagetables under the mmap_lock.
489                  */
490                 mmap_write_lock(mm);
491                 mmap_write_unlock(mm);
492         }
493 }
494
495 static void release_pte_folio(struct folio *folio)
496 {
497         node_stat_mod_folio(folio,
498                         NR_ISOLATED_ANON + folio_is_file_lru(folio),
499                         -folio_nr_pages(folio));
500         folio_unlock(folio);
501         folio_putback_lru(folio);
502 }
503
504 static void release_pte_pages(pte_t *pte, pte_t *_pte,
505                 struct list_head *compound_pagelist)
506 {
507         struct folio *folio, *tmp;
508
509         while (--_pte >= pte) {
510                 pte_t pteval = ptep_get(_pte);
511                 unsigned long pfn;
512
513                 if (pte_none(pteval))
514                         continue;
515                 pfn = pte_pfn(pteval);
516                 if (is_zero_pfn(pfn))
517                         continue;
518                 folio = pfn_folio(pfn);
519                 if (folio_test_large(folio))
520                         continue;
521                 release_pte_folio(folio);
522         }
523
524         list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
525                 list_del(&folio->lru);
526                 release_pte_folio(folio);
527         }
528 }
529
530 static bool is_refcount_suitable(struct folio *folio)
531 {
532         int expected_refcount;
533
534         expected_refcount = folio_mapcount(folio);
535         if (folio_test_swapcache(folio))
536                 expected_refcount += folio_nr_pages(folio);
537
538         return folio_ref_count(folio) == expected_refcount;
539 }
540
541 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
542                                         unsigned long address,
543                                         pte_t *pte,
544                                         struct collapse_control *cc,
545                                         struct list_head *compound_pagelist)
546 {
547         struct page *page = NULL;
548         struct folio *folio = NULL;
549         pte_t *_pte;
550         int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
551         bool writable = false;
552
553         for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
554              _pte++, address += PAGE_SIZE) {
555                 pte_t pteval = ptep_get(_pte);
556                 if (pte_none(pteval) || (pte_present(pteval) &&
557                                 is_zero_pfn(pte_pfn(pteval)))) {
558                         ++none_or_zero;
559                         if (!userfaultfd_armed(vma) &&
560                             (!cc->is_khugepaged ||
561                              none_or_zero <= khugepaged_max_ptes_none)) {
562                                 continue;
563                         } else {
564                                 result = SCAN_EXCEED_NONE_PTE;
565                                 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
566                                 goto out;
567                         }
568                 }
569                 if (!pte_present(pteval)) {
570                         result = SCAN_PTE_NON_PRESENT;
571                         goto out;
572                 }
573                 if (pte_uffd_wp(pteval)) {
574                         result = SCAN_PTE_UFFD_WP;
575                         goto out;
576                 }
577                 page = vm_normal_page(vma, address, pteval);
578                 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
579                         result = SCAN_PAGE_NULL;
580                         goto out;
581                 }
582
583                 folio = page_folio(page);
584                 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
585
586                 /* See hpage_collapse_scan_pmd(). */
587                 if (folio_likely_mapped_shared(folio)) {
588                         ++shared;
589                         if (cc->is_khugepaged &&
590                             shared > khugepaged_max_ptes_shared) {
591                                 result = SCAN_EXCEED_SHARED_PTE;
592                                 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
593                                 goto out;
594                         }
595                 }
596
597                 if (folio_test_large(folio)) {
598                         struct folio *f;
599
600                         /*
601                          * Check if we have dealt with the compound page
602                          * already
603                          */
604                         list_for_each_entry(f, compound_pagelist, lru) {
605                                 if (folio == f)
606                                         goto next;
607                         }
608                 }
609
610                 /*
611                  * We can do it before isolate_lru_page because the
612                  * page can't be freed from under us. NOTE: PG_lock
613                  * is needed to serialize against split_huge_page
614                  * when invoked from the VM.
615                  */
616                 if (!folio_trylock(folio)) {
617                         result = SCAN_PAGE_LOCK;
618                         goto out;
619                 }
620
621                 /*
622                  * Check if the page has any GUP (or other external) pins.
623                  *
624                  * The page table that maps the page has been already unlinked
625                  * from the page table tree and this process cannot get
626                  * an additional pin on the page.
627                  *
628                  * New pins can come later if the page is shared across fork,
629                  * but not from this process. The other process cannot write to
630                  * the page, only trigger CoW.
631                  */
632                 if (!is_refcount_suitable(folio)) {
633                         folio_unlock(folio);
634                         result = SCAN_PAGE_COUNT;
635                         goto out;
636                 }
637
638                 /*
639                  * Isolate the page to avoid collapsing an hugepage
640                  * currently in use by the VM.
641                  */
642                 if (!folio_isolate_lru(folio)) {
643                         folio_unlock(folio);
644                         result = SCAN_DEL_PAGE_LRU;
645                         goto out;
646                 }
647                 node_stat_mod_folio(folio,
648                                 NR_ISOLATED_ANON + folio_is_file_lru(folio),
649                                 folio_nr_pages(folio));
650                 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
651                 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
652
653                 if (folio_test_large(folio))
654                         list_add_tail(&folio->lru, compound_pagelist);
655 next:
656                 /*
657                  * If collapse was initiated by khugepaged, check that there is
658                  * enough young pte to justify collapsing the page
659                  */
660                 if (cc->is_khugepaged &&
661                     (pte_young(pteval) || folio_test_young(folio) ||
662                      folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
663                                                                      address)))
664                         referenced++;
665
666                 if (pte_write(pteval))
667                         writable = true;
668         }
669
670         if (unlikely(!writable)) {
671                 result = SCAN_PAGE_RO;
672         } else if (unlikely(cc->is_khugepaged && !referenced)) {
673                 result = SCAN_LACK_REFERENCED_PAGE;
674         } else {
675                 result = SCAN_SUCCEED;
676                 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
677                                                     referenced, writable, result);
678                 return result;
679         }
680 out:
681         release_pte_pages(pte, _pte, compound_pagelist);
682         trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
683                                             referenced, writable, result);
684         return result;
685 }
686
687 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
688                                                 struct vm_area_struct *vma,
689                                                 unsigned long address,
690                                                 spinlock_t *ptl,
691                                                 struct list_head *compound_pagelist)
692 {
693         struct folio *src, *tmp;
694         pte_t *_pte;
695         pte_t pteval;
696
697         for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
698              _pte++, address += PAGE_SIZE) {
699                 pteval = ptep_get(_pte);
700                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
701                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
702                         if (is_zero_pfn(pte_pfn(pteval))) {
703                                 /*
704                                  * ptl mostly unnecessary.
705                                  */
706                                 spin_lock(ptl);
707                                 ptep_clear(vma->vm_mm, address, _pte);
708                                 spin_unlock(ptl);
709                                 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
710                         }
711                 } else {
712                         struct page *src_page = pte_page(pteval);
713
714                         src = page_folio(src_page);
715                         if (!folio_test_large(src))
716                                 release_pte_folio(src);
717                         /*
718                          * ptl mostly unnecessary, but preempt has to
719                          * be disabled to update the per-cpu stats
720                          * inside folio_remove_rmap_pte().
721                          */
722                         spin_lock(ptl);
723                         ptep_clear(vma->vm_mm, address, _pte);
724                         folio_remove_rmap_pte(src, src_page, vma);
725                         spin_unlock(ptl);
726                         free_page_and_swap_cache(src_page);
727                 }
728         }
729
730         list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
731                 list_del(&src->lru);
732                 node_stat_sub_folio(src, NR_ISOLATED_ANON +
733                                 folio_is_file_lru(src));
734                 folio_unlock(src);
735                 free_swap_cache(src);
736                 folio_putback_lru(src);
737         }
738 }
739
740 static void __collapse_huge_page_copy_failed(pte_t *pte,
741                                              pmd_t *pmd,
742                                              pmd_t orig_pmd,
743                                              struct vm_area_struct *vma,
744                                              struct list_head *compound_pagelist)
745 {
746         spinlock_t *pmd_ptl;
747
748         /*
749          * Re-establish the PMD to point to the original page table
750          * entry. Restoring PMD needs to be done prior to releasing
751          * pages. Since pages are still isolated and locked here,
752          * acquiring anon_vma_lock_write is unnecessary.
753          */
754         pmd_ptl = pmd_lock(vma->vm_mm, pmd);
755         pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
756         spin_unlock(pmd_ptl);
757         /*
758          * Release both raw and compound pages isolated
759          * in __collapse_huge_page_isolate.
760          */
761         release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
762 }
763
764 /*
765  * __collapse_huge_page_copy - attempts to copy memory contents from raw
766  * pages to a hugepage. Cleans up the raw pages if copying succeeds;
767  * otherwise restores the original page table and releases isolated raw pages.
768  * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
769  *
770  * @pte: starting of the PTEs to copy from
771  * @folio: the new hugepage to copy contents to
772  * @pmd: pointer to the new hugepage's PMD
773  * @orig_pmd: the original raw pages' PMD
774  * @vma: the original raw pages' virtual memory area
775  * @address: starting address to copy
776  * @ptl: lock on raw pages' PTEs
777  * @compound_pagelist: list that stores compound pages
778  */
779 static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
780                 pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
781                 unsigned long address, spinlock_t *ptl,
782                 struct list_head *compound_pagelist)
783 {
784         unsigned int i;
785         int result = SCAN_SUCCEED;
786
787         /*
788          * Copying pages' contents is subject to memory poison at any iteration.
789          */
790         for (i = 0; i < HPAGE_PMD_NR; i++) {
791                 pte_t pteval = ptep_get(pte + i);
792                 struct page *page = folio_page(folio, i);
793                 unsigned long src_addr = address + i * PAGE_SIZE;
794                 struct page *src_page;
795
796                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
797                         clear_user_highpage(page, src_addr);
798                         continue;
799                 }
800                 src_page = pte_page(pteval);
801                 if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
802                         result = SCAN_COPY_MC;
803                         break;
804                 }
805         }
806
807         if (likely(result == SCAN_SUCCEED))
808                 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
809                                                     compound_pagelist);
810         else
811                 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
812                                                  compound_pagelist);
813
814         return result;
815 }
816
817 static void khugepaged_alloc_sleep(void)
818 {
819         DEFINE_WAIT(wait);
820
821         add_wait_queue(&khugepaged_wait, &wait);
822         __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
823         schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
824         remove_wait_queue(&khugepaged_wait, &wait);
825 }
826
827 struct collapse_control khugepaged_collapse_control = {
828         .is_khugepaged = true,
829 };
830
831 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
832 {
833         int i;
834
835         /*
836          * If node_reclaim_mode is disabled, then no extra effort is made to
837          * allocate memory locally.
838          */
839         if (!node_reclaim_enabled())
840                 return false;
841
842         /* If there is a count for this node already, it must be acceptable */
843         if (cc->node_load[nid])
844                 return false;
845
846         for (i = 0; i < MAX_NUMNODES; i++) {
847                 if (!cc->node_load[i])
848                         continue;
849                 if (node_distance(nid, i) > node_reclaim_distance)
850                         return true;
851         }
852         return false;
853 }
854
855 #define khugepaged_defrag()                                     \
856         (transparent_hugepage_flags &                           \
857          (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
858
859 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
860 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
861 {
862         return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
863 }
864
865 #ifdef CONFIG_NUMA
866 static int hpage_collapse_find_target_node(struct collapse_control *cc)
867 {
868         int nid, target_node = 0, max_value = 0;
869
870         /* find first node with max normal pages hit */
871         for (nid = 0; nid < MAX_NUMNODES; nid++)
872                 if (cc->node_load[nid] > max_value) {
873                         max_value = cc->node_load[nid];
874                         target_node = nid;
875                 }
876
877         for_each_online_node(nid) {
878                 if (max_value == cc->node_load[nid])
879                         node_set(nid, cc->alloc_nmask);
880         }
881
882         return target_node;
883 }
884 #else
885 static int hpage_collapse_find_target_node(struct collapse_control *cc)
886 {
887         return 0;
888 }
889 #endif
890
891 /*
892  * If mmap_lock temporarily dropped, revalidate vma
893  * before taking mmap_lock.
894  * Returns enum scan_result value.
895  */
896
897 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
898                                    bool expect_anon,
899                                    struct vm_area_struct **vmap,
900                                    struct collapse_control *cc)
901 {
902         struct vm_area_struct *vma;
903         unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
904
905         if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
906                 return SCAN_ANY_PROCESS;
907
908         *vmap = vma = find_vma(mm, address);
909         if (!vma)
910                 return SCAN_VMA_NULL;
911
912         if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
913                 return SCAN_ADDRESS_RANGE;
914         if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
915                 return SCAN_VMA_CHECK;
916         /*
917          * Anon VMA expected, the address may be unmapped then
918          * remapped to file after khugepaged reaquired the mmap_lock.
919          *
920          * thp_vma_allowable_order may return true for qualified file
921          * vmas.
922          */
923         if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
924                 return SCAN_PAGE_ANON;
925         return SCAN_SUCCEED;
926 }
927
928 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
929                                    unsigned long address,
930                                    pmd_t **pmd)
931 {
932         pmd_t pmde;
933
934         *pmd = mm_find_pmd(mm, address);
935         if (!*pmd)
936                 return SCAN_PMD_NULL;
937
938         pmde = pmdp_get_lockless(*pmd);
939         if (pmd_none(pmde))
940                 return SCAN_PMD_NONE;
941         if (!pmd_present(pmde))
942                 return SCAN_PMD_NULL;
943         if (pmd_trans_huge(pmde))
944                 return SCAN_PMD_MAPPED;
945         if (pmd_devmap(pmde))
946                 return SCAN_PMD_NULL;
947         if (pmd_bad(pmde))
948                 return SCAN_PMD_NULL;
949         return SCAN_SUCCEED;
950 }
951
952 static int check_pmd_still_valid(struct mm_struct *mm,
953                                  unsigned long address,
954                                  pmd_t *pmd)
955 {
956         pmd_t *new_pmd;
957         int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
958
959         if (result != SCAN_SUCCEED)
960                 return result;
961         if (new_pmd != pmd)
962                 return SCAN_FAIL;
963         return SCAN_SUCCEED;
964 }
965
966 /*
967  * Bring missing pages in from swap, to complete THP collapse.
968  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
969  *
970  * Called and returns without pte mapped or spinlocks held.
971  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
972  */
973 static int __collapse_huge_page_swapin(struct mm_struct *mm,
974                                        struct vm_area_struct *vma,
975                                        unsigned long haddr, pmd_t *pmd,
976                                        int referenced)
977 {
978         int swapped_in = 0;
979         vm_fault_t ret = 0;
980         unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
981         int result;
982         pte_t *pte = NULL;
983         spinlock_t *ptl;
984
985         for (address = haddr; address < end; address += PAGE_SIZE) {
986                 struct vm_fault vmf = {
987                         .vma = vma,
988                         .address = address,
989                         .pgoff = linear_page_index(vma, address),
990                         .flags = FAULT_FLAG_ALLOW_RETRY,
991                         .pmd = pmd,
992                 };
993
994                 if (!pte++) {
995                         pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
996                         if (!pte) {
997                                 mmap_read_unlock(mm);
998                                 result = SCAN_PMD_NULL;
999                                 goto out;
1000                         }
1001                 }
1002
1003                 vmf.orig_pte = ptep_get_lockless(pte);
1004                 if (!is_swap_pte(vmf.orig_pte))
1005                         continue;
1006
1007                 vmf.pte = pte;
1008                 vmf.ptl = ptl;
1009                 ret = do_swap_page(&vmf);
1010                 /* Which unmaps pte (after perhaps re-checking the entry) */
1011                 pte = NULL;
1012
1013                 /*
1014                  * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1015                  * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1016                  * we do not retry here and swap entry will remain in pagetable
1017                  * resulting in later failure.
1018                  */
1019                 if (ret & VM_FAULT_RETRY) {
1020                         /* Likely, but not guaranteed, that page lock failed */
1021                         result = SCAN_PAGE_LOCK;
1022                         goto out;
1023                 }
1024                 if (ret & VM_FAULT_ERROR) {
1025                         mmap_read_unlock(mm);
1026                         result = SCAN_FAIL;
1027                         goto out;
1028                 }
1029                 swapped_in++;
1030         }
1031
1032         if (pte)
1033                 pte_unmap(pte);
1034
1035         /* Drain LRU cache to remove extra pin on the swapped in pages */
1036         if (swapped_in)
1037                 lru_add_drain();
1038
1039         result = SCAN_SUCCEED;
1040 out:
1041         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1042         return result;
1043 }
1044
1045 static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
1046                               struct collapse_control *cc)
1047 {
1048         gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1049                      GFP_TRANSHUGE);
1050         int node = hpage_collapse_find_target_node(cc);
1051         struct folio *folio;
1052
1053         folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1054         if (!folio) {
1055                 *foliop = NULL;
1056                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1057                 return SCAN_ALLOC_HUGE_PAGE_FAIL;
1058         }
1059
1060         count_vm_event(THP_COLLAPSE_ALLOC);
1061         if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1062                 folio_put(folio);
1063                 *foliop = NULL;
1064                 return SCAN_CGROUP_CHARGE_FAIL;
1065         }
1066
1067         count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1068
1069         *foliop = folio;
1070         return SCAN_SUCCEED;
1071 }
1072
1073 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1074                               int referenced, int unmapped,
1075                               struct collapse_control *cc)
1076 {
1077         LIST_HEAD(compound_pagelist);
1078         pmd_t *pmd, _pmd;
1079         pte_t *pte;
1080         pgtable_t pgtable;
1081         struct folio *folio;
1082         spinlock_t *pmd_ptl, *pte_ptl;
1083         int result = SCAN_FAIL;
1084         struct vm_area_struct *vma;
1085         struct mmu_notifier_range range;
1086
1087         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1088
1089         /*
1090          * Before allocating the hugepage, release the mmap_lock read lock.
1091          * The allocation can take potentially a long time if it involves
1092          * sync compaction, and we do not need to hold the mmap_lock during
1093          * that. We will recheck the vma after taking it again in write mode.
1094          */
1095         mmap_read_unlock(mm);
1096
1097         result = alloc_charge_folio(&folio, mm, cc);
1098         if (result != SCAN_SUCCEED)
1099                 goto out_nolock;
1100
1101         mmap_read_lock(mm);
1102         result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1103         if (result != SCAN_SUCCEED) {
1104                 mmap_read_unlock(mm);
1105                 goto out_nolock;
1106         }
1107
1108         result = find_pmd_or_thp_or_none(mm, address, &pmd);
1109         if (result != SCAN_SUCCEED) {
1110                 mmap_read_unlock(mm);
1111                 goto out_nolock;
1112         }
1113
1114         if (unmapped) {
1115                 /*
1116                  * __collapse_huge_page_swapin will return with mmap_lock
1117                  * released when it fails. So we jump out_nolock directly in
1118                  * that case.  Continuing to collapse causes inconsistency.
1119                  */
1120                 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1121                                                      referenced);
1122                 if (result != SCAN_SUCCEED)
1123                         goto out_nolock;
1124         }
1125
1126         mmap_read_unlock(mm);
1127         /*
1128          * Prevent all access to pagetables with the exception of
1129          * gup_fast later handled by the ptep_clear_flush and the VM
1130          * handled by the anon_vma lock + PG_lock.
1131          *
1132          * UFFDIO_MOVE is prevented to race as well thanks to the
1133          * mmap_lock.
1134          */
1135         mmap_write_lock(mm);
1136         result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1137         if (result != SCAN_SUCCEED)
1138                 goto out_up_write;
1139         /* check if the pmd is still valid */
1140         result = check_pmd_still_valid(mm, address, pmd);
1141         if (result != SCAN_SUCCEED)
1142                 goto out_up_write;
1143
1144         vma_start_write(vma);
1145         anon_vma_lock_write(vma->anon_vma);
1146
1147         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1148                                 address + HPAGE_PMD_SIZE);
1149         mmu_notifier_invalidate_range_start(&range);
1150
1151         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1152         /*
1153          * This removes any huge TLB entry from the CPU so we won't allow
1154          * huge and small TLB entries for the same virtual address to
1155          * avoid the risk of CPU bugs in that area.
1156          *
1157          * Parallel GUP-fast is fine since GUP-fast will back off when
1158          * it detects PMD is changed.
1159          */
1160         _pmd = pmdp_collapse_flush(vma, address, pmd);
1161         spin_unlock(pmd_ptl);
1162         mmu_notifier_invalidate_range_end(&range);
1163         tlb_remove_table_sync_one();
1164
1165         pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1166         if (pte) {
1167                 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1168                                                       &compound_pagelist);
1169                 spin_unlock(pte_ptl);
1170         } else {
1171                 result = SCAN_PMD_NULL;
1172         }
1173
1174         if (unlikely(result != SCAN_SUCCEED)) {
1175                 if (pte)
1176                         pte_unmap(pte);
1177                 spin_lock(pmd_ptl);
1178                 BUG_ON(!pmd_none(*pmd));
1179                 /*
1180                  * We can only use set_pmd_at when establishing
1181                  * hugepmds and never for establishing regular pmds that
1182                  * points to regular pagetables. Use pmd_populate for that
1183                  */
1184                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1185                 spin_unlock(pmd_ptl);
1186                 anon_vma_unlock_write(vma->anon_vma);
1187                 goto out_up_write;
1188         }
1189
1190         /*
1191          * All pages are isolated and locked so anon_vma rmap
1192          * can't run anymore.
1193          */
1194         anon_vma_unlock_write(vma->anon_vma);
1195
1196         result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
1197                                            vma, address, pte_ptl,
1198                                            &compound_pagelist);
1199         pte_unmap(pte);
1200         if (unlikely(result != SCAN_SUCCEED))
1201                 goto out_up_write;
1202
1203         /*
1204          * The smp_wmb() inside __folio_mark_uptodate() ensures the
1205          * copy_huge_page writes become visible before the set_pmd_at()
1206          * write.
1207          */
1208         __folio_mark_uptodate(folio);
1209         pgtable = pmd_pgtable(_pmd);
1210
1211         _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
1212         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1213
1214         spin_lock(pmd_ptl);
1215         BUG_ON(!pmd_none(*pmd));
1216         folio_add_new_anon_rmap(folio, vma, address);
1217         folio_add_lru_vma(folio, vma);
1218         pgtable_trans_huge_deposit(mm, pmd, pgtable);
1219         set_pmd_at(mm, address, pmd, _pmd);
1220         update_mmu_cache_pmd(vma, address, pmd);
1221         spin_unlock(pmd_ptl);
1222
1223         folio = NULL;
1224
1225         result = SCAN_SUCCEED;
1226 out_up_write:
1227         mmap_write_unlock(mm);
1228 out_nolock:
1229         if (folio)
1230                 folio_put(folio);
1231         trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1232         return result;
1233 }
1234
1235 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1236                                    struct vm_area_struct *vma,
1237                                    unsigned long address, bool *mmap_locked,
1238                                    struct collapse_control *cc)
1239 {
1240         pmd_t *pmd;
1241         pte_t *pte, *_pte;
1242         int result = SCAN_FAIL, referenced = 0;
1243         int none_or_zero = 0, shared = 0;
1244         struct page *page = NULL;
1245         struct folio *folio = NULL;
1246         unsigned long _address;
1247         spinlock_t *ptl;
1248         int node = NUMA_NO_NODE, unmapped = 0;
1249         bool writable = false;
1250
1251         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1252
1253         result = find_pmd_or_thp_or_none(mm, address, &pmd);
1254         if (result != SCAN_SUCCEED)
1255                 goto out;
1256
1257         memset(cc->node_load, 0, sizeof(cc->node_load));
1258         nodes_clear(cc->alloc_nmask);
1259         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1260         if (!pte) {
1261                 result = SCAN_PMD_NULL;
1262                 goto out;
1263         }
1264
1265         for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1266              _pte++, _address += PAGE_SIZE) {
1267                 pte_t pteval = ptep_get(_pte);
1268                 if (is_swap_pte(pteval)) {
1269                         ++unmapped;
1270                         if (!cc->is_khugepaged ||
1271                             unmapped <= khugepaged_max_ptes_swap) {
1272                                 /*
1273                                  * Always be strict with uffd-wp
1274                                  * enabled swap entries.  Please see
1275                                  * comment below for pte_uffd_wp().
1276                                  */
1277                                 if (pte_swp_uffd_wp_any(pteval)) {
1278                                         result = SCAN_PTE_UFFD_WP;
1279                                         goto out_unmap;
1280                                 }
1281                                 continue;
1282                         } else {
1283                                 result = SCAN_EXCEED_SWAP_PTE;
1284                                 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1285                                 goto out_unmap;
1286                         }
1287                 }
1288                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1289                         ++none_or_zero;
1290                         if (!userfaultfd_armed(vma) &&
1291                             (!cc->is_khugepaged ||
1292                              none_or_zero <= khugepaged_max_ptes_none)) {
1293                                 continue;
1294                         } else {
1295                                 result = SCAN_EXCEED_NONE_PTE;
1296                                 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1297                                 goto out_unmap;
1298                         }
1299                 }
1300                 if (pte_uffd_wp(pteval)) {
1301                         /*
1302                          * Don't collapse the page if any of the small
1303                          * PTEs are armed with uffd write protection.
1304                          * Here we can also mark the new huge pmd as
1305                          * write protected if any of the small ones is
1306                          * marked but that could bring unknown
1307                          * userfault messages that falls outside of
1308                          * the registered range.  So, just be simple.
1309                          */
1310                         result = SCAN_PTE_UFFD_WP;
1311                         goto out_unmap;
1312                 }
1313                 if (pte_write(pteval))
1314                         writable = true;
1315
1316                 page = vm_normal_page(vma, _address, pteval);
1317                 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1318                         result = SCAN_PAGE_NULL;
1319                         goto out_unmap;
1320                 }
1321                 folio = page_folio(page);
1322
1323                 if (!folio_test_anon(folio)) {
1324                         result = SCAN_PAGE_ANON;
1325                         goto out_unmap;
1326                 }
1327
1328                 /*
1329                  * We treat a single page as shared if any part of the THP
1330                  * is shared. "False negatives" from
1331                  * folio_likely_mapped_shared() are not expected to matter
1332                  * much in practice.
1333                  */
1334                 if (folio_likely_mapped_shared(folio)) {
1335                         ++shared;
1336                         if (cc->is_khugepaged &&
1337                             shared > khugepaged_max_ptes_shared) {
1338                                 result = SCAN_EXCEED_SHARED_PTE;
1339                                 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1340                                 goto out_unmap;
1341                         }
1342                 }
1343
1344                 /*
1345                  * Record which node the original page is from and save this
1346                  * information to cc->node_load[].
1347                  * Khugepaged will allocate hugepage from the node has the max
1348                  * hit record.
1349                  */
1350                 node = folio_nid(folio);
1351                 if (hpage_collapse_scan_abort(node, cc)) {
1352                         result = SCAN_SCAN_ABORT;
1353                         goto out_unmap;
1354                 }
1355                 cc->node_load[node]++;
1356                 if (!folio_test_lru(folio)) {
1357                         result = SCAN_PAGE_LRU;
1358                         goto out_unmap;
1359                 }
1360                 if (folio_test_locked(folio)) {
1361                         result = SCAN_PAGE_LOCK;
1362                         goto out_unmap;
1363                 }
1364
1365                 /*
1366                  * Check if the page has any GUP (or other external) pins.
1367                  *
1368                  * Here the check may be racy:
1369                  * it may see folio_mapcount() > folio_ref_count().
1370                  * But such case is ephemeral we could always retry collapse
1371                  * later.  However it may report false positive if the page
1372                  * has excessive GUP pins (i.e. 512).  Anyway the same check
1373                  * will be done again later the risk seems low.
1374                  */
1375                 if (!is_refcount_suitable(folio)) {
1376                         result = SCAN_PAGE_COUNT;
1377                         goto out_unmap;
1378                 }
1379
1380                 /*
1381                  * If collapse was initiated by khugepaged, check that there is
1382                  * enough young pte to justify collapsing the page
1383                  */
1384                 if (cc->is_khugepaged &&
1385                     (pte_young(pteval) || folio_test_young(folio) ||
1386                      folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
1387                                                                      address)))
1388                         referenced++;
1389         }
1390         if (!writable) {
1391                 result = SCAN_PAGE_RO;
1392         } else if (cc->is_khugepaged &&
1393                    (!referenced ||
1394                     (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1395                 result = SCAN_LACK_REFERENCED_PAGE;
1396         } else {
1397                 result = SCAN_SUCCEED;
1398         }
1399 out_unmap:
1400         pte_unmap_unlock(pte, ptl);
1401         if (result == SCAN_SUCCEED) {
1402                 result = collapse_huge_page(mm, address, referenced,
1403                                             unmapped, cc);
1404                 /* collapse_huge_page will return with the mmap_lock released */
1405                 *mmap_locked = false;
1406         }
1407 out:
1408         trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
1409                                      none_or_zero, result, unmapped);
1410         return result;
1411 }
1412
1413 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1414 {
1415         struct mm_slot *slot = &mm_slot->slot;
1416         struct mm_struct *mm = slot->mm;
1417
1418         lockdep_assert_held(&khugepaged_mm_lock);
1419
1420         if (hpage_collapse_test_exit(mm)) {
1421                 /* free mm_slot */
1422                 hash_del(&slot->hash);
1423                 list_del(&slot->mm_node);
1424
1425                 /*
1426                  * Not strictly needed because the mm exited already.
1427                  *
1428                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1429                  */
1430
1431                 /* khugepaged_mm_lock actually not necessary for the below */
1432                 mm_slot_free(mm_slot_cache, mm_slot);
1433                 mmdrop(mm);
1434         }
1435 }
1436
1437 #ifdef CONFIG_SHMEM
1438 /* hpage must be locked, and mmap_lock must be held */
1439 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1440                         pmd_t *pmdp, struct page *hpage)
1441 {
1442         struct vm_fault vmf = {
1443                 .vma = vma,
1444                 .address = addr,
1445                 .flags = 0,
1446                 .pmd = pmdp,
1447         };
1448
1449         VM_BUG_ON(!PageTransHuge(hpage));
1450         mmap_assert_locked(vma->vm_mm);
1451
1452         if (do_set_pmd(&vmf, hpage))
1453                 return SCAN_FAIL;
1454
1455         get_page(hpage);
1456         return SCAN_SUCCEED;
1457 }
1458
1459 /**
1460  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1461  * address haddr.
1462  *
1463  * @mm: process address space where collapse happens
1464  * @addr: THP collapse address
1465  * @install_pmd: If a huge PMD should be installed
1466  *
1467  * This function checks whether all the PTEs in the PMD are pointing to the
1468  * right THP. If so, retract the page table so the THP can refault in with
1469  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1470  */
1471 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1472                             bool install_pmd)
1473 {
1474         struct mmu_notifier_range range;
1475         bool notified = false;
1476         unsigned long haddr = addr & HPAGE_PMD_MASK;
1477         struct vm_area_struct *vma = vma_lookup(mm, haddr);
1478         struct folio *folio;
1479         pte_t *start_pte, *pte;
1480         pmd_t *pmd, pgt_pmd;
1481         spinlock_t *pml = NULL, *ptl;
1482         int nr_ptes = 0, result = SCAN_FAIL;
1483         int i;
1484
1485         mmap_assert_locked(mm);
1486
1487         /* First check VMA found, in case page tables are being torn down */
1488         if (!vma || !vma->vm_file ||
1489             !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1490                 return SCAN_VMA_CHECK;
1491
1492         /* Fast check before locking page if already PMD-mapped */
1493         result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1494         if (result == SCAN_PMD_MAPPED)
1495                 return result;
1496
1497         /*
1498          * If we are here, we've succeeded in replacing all the native pages
1499          * in the page cache with a single hugepage. If a mm were to fault-in
1500          * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1501          * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1502          * analogously elide sysfs THP settings here.
1503          */
1504         if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
1505                 return SCAN_VMA_CHECK;
1506
1507         /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1508         if (userfaultfd_wp(vma))
1509                 return SCAN_PTE_UFFD_WP;
1510
1511         folio = filemap_lock_folio(vma->vm_file->f_mapping,
1512                                linear_page_index(vma, haddr));
1513         if (IS_ERR(folio))
1514                 return SCAN_PAGE_NULL;
1515
1516         if (folio_order(folio) != HPAGE_PMD_ORDER) {
1517                 result = SCAN_PAGE_COMPOUND;
1518                 goto drop_folio;
1519         }
1520
1521         result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1522         switch (result) {
1523         case SCAN_SUCCEED:
1524                 break;
1525         case SCAN_PMD_NONE:
1526                 /*
1527                  * All pte entries have been removed and pmd cleared.
1528                  * Skip all the pte checks and just update the pmd mapping.
1529                  */
1530                 goto maybe_install_pmd;
1531         default:
1532                 goto drop_folio;
1533         }
1534
1535         result = SCAN_FAIL;
1536         start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1537         if (!start_pte)         /* mmap_lock + page lock should prevent this */
1538                 goto drop_folio;
1539
1540         /* step 1: check all mapped PTEs are to the right huge page */
1541         for (i = 0, addr = haddr, pte = start_pte;
1542              i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1543                 struct page *page;
1544                 pte_t ptent = ptep_get(pte);
1545
1546                 /* empty pte, skip */
1547                 if (pte_none(ptent))
1548                         continue;
1549
1550                 /* page swapped out, abort */
1551                 if (!pte_present(ptent)) {
1552                         result = SCAN_PTE_NON_PRESENT;
1553                         goto abort;
1554                 }
1555
1556                 page = vm_normal_page(vma, addr, ptent);
1557                 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1558                         page = NULL;
1559                 /*
1560                  * Note that uprobe, debugger, or MAP_PRIVATE may change the
1561                  * page table, but the new page will not be a subpage of hpage.
1562                  */
1563                 if (folio_page(folio, i) != page)
1564                         goto abort;
1565         }
1566
1567         pte_unmap_unlock(start_pte, ptl);
1568         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1569                                 haddr, haddr + HPAGE_PMD_SIZE);
1570         mmu_notifier_invalidate_range_start(&range);
1571         notified = true;
1572
1573         /*
1574          * pmd_lock covers a wider range than ptl, and (if split from mm's
1575          * page_table_lock) ptl nests inside pml. The less time we hold pml,
1576          * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1577          * inserts a valid as-if-COWed PTE without even looking up page cache.
1578          * So page lock of folio does not protect from it, so we must not drop
1579          * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1580          */
1581         if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1582                 pml = pmd_lock(mm, pmd);
1583
1584         start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
1585         if (!start_pte)         /* mmap_lock + page lock should prevent this */
1586                 goto abort;
1587         if (!pml)
1588                 spin_lock(ptl);
1589         else if (ptl != pml)
1590                 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1591
1592         /* step 2: clear page table and adjust rmap */
1593         for (i = 0, addr = haddr, pte = start_pte;
1594              i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1595                 struct page *page;
1596                 pte_t ptent = ptep_get(pte);
1597
1598                 if (pte_none(ptent))
1599                         continue;
1600                 /*
1601                  * We dropped ptl after the first scan, to do the mmu_notifier:
1602                  * page lock stops more PTEs of the folio being faulted in, but
1603                  * does not stop write faults COWing anon copies from existing
1604                  * PTEs; and does not stop those being swapped out or migrated.
1605                  */
1606                 if (!pte_present(ptent)) {
1607                         result = SCAN_PTE_NON_PRESENT;
1608                         goto abort;
1609                 }
1610                 page = vm_normal_page(vma, addr, ptent);
1611                 if (folio_page(folio, i) != page)
1612                         goto abort;
1613
1614                 /*
1615                  * Must clear entry, or a racing truncate may re-remove it.
1616                  * TLB flush can be left until pmdp_collapse_flush() does it.
1617                  * PTE dirty? Shmem page is already dirty; file is read-only.
1618                  */
1619                 ptep_clear(mm, addr, pte);
1620                 folio_remove_rmap_pte(folio, page, vma);
1621                 nr_ptes++;
1622         }
1623
1624         pte_unmap(start_pte);
1625         if (!pml)
1626                 spin_unlock(ptl);
1627
1628         /* step 3: set proper refcount and mm_counters. */
1629         if (nr_ptes) {
1630                 folio_ref_sub(folio, nr_ptes);
1631                 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1632         }
1633
1634         /* step 4: remove empty page table */
1635         if (!pml) {
1636                 pml = pmd_lock(mm, pmd);
1637                 if (ptl != pml)
1638                         spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1639         }
1640         pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1641         pmdp_get_lockless_sync();
1642         if (ptl != pml)
1643                 spin_unlock(ptl);
1644         spin_unlock(pml);
1645
1646         mmu_notifier_invalidate_range_end(&range);
1647
1648         mm_dec_nr_ptes(mm);
1649         page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1650         pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1651
1652 maybe_install_pmd:
1653         /* step 5: install pmd entry */
1654         result = install_pmd
1655                         ? set_huge_pmd(vma, haddr, pmd, &folio->page)
1656                         : SCAN_SUCCEED;
1657         goto drop_folio;
1658 abort:
1659         if (nr_ptes) {
1660                 flush_tlb_mm(mm);
1661                 folio_ref_sub(folio, nr_ptes);
1662                 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1663         }
1664         if (start_pte)
1665                 pte_unmap_unlock(start_pte, ptl);
1666         if (pml && pml != ptl)
1667                 spin_unlock(pml);
1668         if (notified)
1669                 mmu_notifier_invalidate_range_end(&range);
1670 drop_folio:
1671         folio_unlock(folio);
1672         folio_put(folio);
1673         return result;
1674 }
1675
1676 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1677 {
1678         struct vm_area_struct *vma;
1679
1680         i_mmap_lock_read(mapping);
1681         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1682                 struct mmu_notifier_range range;
1683                 struct mm_struct *mm;
1684                 unsigned long addr;
1685                 pmd_t *pmd, pgt_pmd;
1686                 spinlock_t *pml;
1687                 spinlock_t *ptl;
1688                 bool skipped_uffd = false;
1689
1690                 /*
1691                  * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1692                  * got written to. These VMAs are likely not worth removing
1693                  * page tables from, as PMD-mapping is likely to be split later.
1694                  */
1695                 if (READ_ONCE(vma->anon_vma))
1696                         continue;
1697
1698                 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1699                 if (addr & ~HPAGE_PMD_MASK ||
1700                     vma->vm_end < addr + HPAGE_PMD_SIZE)
1701                         continue;
1702
1703                 mm = vma->vm_mm;
1704                 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1705                         continue;
1706
1707                 if (hpage_collapse_test_exit(mm))
1708                         continue;
1709                 /*
1710                  * When a vma is registered with uffd-wp, we cannot recycle
1711                  * the page table because there may be pte markers installed.
1712                  * Other vmas can still have the same file mapped hugely, but
1713                  * skip this one: it will always be mapped in small page size
1714                  * for uffd-wp registered ranges.
1715                  */
1716                 if (userfaultfd_wp(vma))
1717                         continue;
1718
1719                 /* PTEs were notified when unmapped; but now for the PMD? */
1720                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1721                                         addr, addr + HPAGE_PMD_SIZE);
1722                 mmu_notifier_invalidate_range_start(&range);
1723
1724                 pml = pmd_lock(mm, pmd);
1725                 ptl = pte_lockptr(mm, pmd);
1726                 if (ptl != pml)
1727                         spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1728
1729                 /*
1730                  * Huge page lock is still held, so normally the page table
1731                  * must remain empty; and we have already skipped anon_vma
1732                  * and userfaultfd_wp() vmas.  But since the mmap_lock is not
1733                  * held, it is still possible for a racing userfaultfd_ioctl()
1734                  * to have inserted ptes or markers.  Now that we hold ptlock,
1735                  * repeating the anon_vma check protects from one category,
1736                  * and repeating the userfaultfd_wp() check from another.
1737                  */
1738                 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
1739                         skipped_uffd = true;
1740                 } else {
1741                         pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1742                         pmdp_get_lockless_sync();
1743                 }
1744
1745                 if (ptl != pml)
1746                         spin_unlock(ptl);
1747                 spin_unlock(pml);
1748
1749                 mmu_notifier_invalidate_range_end(&range);
1750
1751                 if (!skipped_uffd) {
1752                         mm_dec_nr_ptes(mm);
1753                         page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1754                         pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1755                 }
1756         }
1757         i_mmap_unlock_read(mapping);
1758 }
1759
1760 /**
1761  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1762  *
1763  * @mm: process address space where collapse happens
1764  * @addr: virtual collapse start address
1765  * @file: file that collapse on
1766  * @start: collapse start address
1767  * @cc: collapse context and scratchpad
1768  *
1769  * Basic scheme is simple, details are more complex:
1770  *  - allocate and lock a new huge page;
1771  *  - scan page cache, locking old pages
1772  *    + swap/gup in pages if necessary;
1773  *  - copy data to new page
1774  *  - handle shmem holes
1775  *    + re-validate that holes weren't filled by someone else
1776  *    + check for userfaultfd
1777  *  - finalize updates to the page cache;
1778  *  - if replacing succeeds:
1779  *    + unlock huge page;
1780  *    + free old pages;
1781  *  - if replacing failed;
1782  *    + unlock old pages
1783  *    + unlock and free huge page;
1784  */
1785 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1786                          struct file *file, pgoff_t start,
1787                          struct collapse_control *cc)
1788 {
1789         struct address_space *mapping = file->f_mapping;
1790         struct page *dst;
1791         struct folio *folio, *tmp, *new_folio;
1792         pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1793         LIST_HEAD(pagelist);
1794         XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1795         int nr_none = 0, result = SCAN_SUCCEED;
1796         bool is_shmem = shmem_file(file);
1797
1798         VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1799         VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1800
1801         result = alloc_charge_folio(&new_folio, mm, cc);
1802         if (result != SCAN_SUCCEED)
1803                 goto out;
1804
1805         __folio_set_locked(new_folio);
1806         if (is_shmem)
1807                 __folio_set_swapbacked(new_folio);
1808         new_folio->index = start;
1809         new_folio->mapping = mapping;
1810
1811         /*
1812          * Ensure we have slots for all the pages in the range.  This is
1813          * almost certainly a no-op because most of the pages must be present
1814          */
1815         do {
1816                 xas_lock_irq(&xas);
1817                 xas_create_range(&xas);
1818                 if (!xas_error(&xas))
1819                         break;
1820                 xas_unlock_irq(&xas);
1821                 if (!xas_nomem(&xas, GFP_KERNEL)) {
1822                         result = SCAN_FAIL;
1823                         goto rollback;
1824                 }
1825         } while (1);
1826
1827         for (index = start; index < end; index++) {
1828                 xas_set(&xas, index);
1829                 folio = xas_load(&xas);
1830
1831                 VM_BUG_ON(index != xas.xa_index);
1832                 if (is_shmem) {
1833                         if (!folio) {
1834                                 /*
1835                                  * Stop if extent has been truncated or
1836                                  * hole-punched, and is now completely
1837                                  * empty.
1838                                  */
1839                                 if (index == start) {
1840                                         if (!xas_next_entry(&xas, end - 1)) {
1841                                                 result = SCAN_TRUNCATED;
1842                                                 goto xa_locked;
1843                                         }
1844                                 }
1845                                 nr_none++;
1846                                 continue;
1847                         }
1848
1849                         if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
1850                                 xas_unlock_irq(&xas);
1851                                 /* swap in or instantiate fallocated page */
1852                                 if (shmem_get_folio(mapping->host, index,
1853                                                 &folio, SGP_NOALLOC)) {
1854                                         result = SCAN_FAIL;
1855                                         goto xa_unlocked;
1856                                 }
1857                                 /* drain lru cache to help isolate_lru_page() */
1858                                 lru_add_drain();
1859                         } else if (folio_trylock(folio)) {
1860                                 folio_get(folio);
1861                                 xas_unlock_irq(&xas);
1862                         } else {
1863                                 result = SCAN_PAGE_LOCK;
1864                                 goto xa_locked;
1865                         }
1866                 } else {        /* !is_shmem */
1867                         if (!folio || xa_is_value(folio)) {
1868                                 xas_unlock_irq(&xas);
1869                                 page_cache_sync_readahead(mapping, &file->f_ra,
1870                                                           file, index,
1871                                                           end - index);
1872                                 /* drain lru cache to help isolate_lru_page() */
1873                                 lru_add_drain();
1874                                 folio = filemap_lock_folio(mapping, index);
1875                                 if (IS_ERR(folio)) {
1876                                         result = SCAN_FAIL;
1877                                         goto xa_unlocked;
1878                                 }
1879                         } else if (folio_test_dirty(folio)) {
1880                                 /*
1881                                  * khugepaged only works on read-only fd,
1882                                  * so this page is dirty because it hasn't
1883                                  * been flushed since first write. There
1884                                  * won't be new dirty pages.
1885                                  *
1886                                  * Trigger async flush here and hope the
1887                                  * writeback is done when khugepaged
1888                                  * revisits this page.
1889                                  *
1890                                  * This is a one-off situation. We are not
1891                                  * forcing writeback in loop.
1892                                  */
1893                                 xas_unlock_irq(&xas);
1894                                 filemap_flush(mapping);
1895                                 result = SCAN_FAIL;
1896                                 goto xa_unlocked;
1897                         } else if (folio_test_writeback(folio)) {
1898                                 xas_unlock_irq(&xas);
1899                                 result = SCAN_FAIL;
1900                                 goto xa_unlocked;
1901                         } else if (folio_trylock(folio)) {
1902                                 folio_get(folio);
1903                                 xas_unlock_irq(&xas);
1904                         } else {
1905                                 result = SCAN_PAGE_LOCK;
1906                                 goto xa_locked;
1907                         }
1908                 }
1909
1910                 /*
1911                  * The folio must be locked, so we can drop the i_pages lock
1912                  * without racing with truncate.
1913                  */
1914                 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1915
1916                 /* make sure the folio is up to date */
1917                 if (unlikely(!folio_test_uptodate(folio))) {
1918                         result = SCAN_FAIL;
1919                         goto out_unlock;
1920                 }
1921
1922                 /*
1923                  * If file was truncated then extended, or hole-punched, before
1924                  * we locked the first folio, then a THP might be there already.
1925                  * This will be discovered on the first iteration.
1926                  */
1927                 if (folio_test_large(folio)) {
1928                         result = folio_order(folio) == HPAGE_PMD_ORDER &&
1929                                         folio->index == start
1930                                         /* Maybe PMD-mapped */
1931                                         ? SCAN_PTE_MAPPED_HUGEPAGE
1932                                         : SCAN_PAGE_COMPOUND;
1933                         goto out_unlock;
1934                 }
1935
1936                 if (folio_mapping(folio) != mapping) {
1937                         result = SCAN_TRUNCATED;
1938                         goto out_unlock;
1939                 }
1940
1941                 if (!is_shmem && (folio_test_dirty(folio) ||
1942                                   folio_test_writeback(folio))) {
1943                         /*
1944                          * khugepaged only works on read-only fd, so this
1945                          * folio is dirty because it hasn't been flushed
1946                          * since first write.
1947                          */
1948                         result = SCAN_FAIL;
1949                         goto out_unlock;
1950                 }
1951
1952                 if (!folio_isolate_lru(folio)) {
1953                         result = SCAN_DEL_PAGE_LRU;
1954                         goto out_unlock;
1955                 }
1956
1957                 if (!filemap_release_folio(folio, GFP_KERNEL)) {
1958                         result = SCAN_PAGE_HAS_PRIVATE;
1959                         folio_putback_lru(folio);
1960                         goto out_unlock;
1961                 }
1962
1963                 if (folio_mapped(folio))
1964                         try_to_unmap(folio,
1965                                         TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1966
1967                 xas_lock_irq(&xas);
1968
1969                 VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
1970
1971                 /*
1972                  * We control three references to the folio:
1973                  *  - we hold a pin on it;
1974                  *  - one reference from page cache;
1975                  *  - one from lru_isolate_folio;
1976                  * If those are the only references, then any new usage
1977                  * of the folio will have to fetch it from the page
1978                  * cache. That requires locking the folio to handle
1979                  * truncate, so any new usage will be blocked until we
1980                  * unlock folio after collapse/during rollback.
1981                  */
1982                 if (folio_ref_count(folio) != 3) {
1983                         result = SCAN_PAGE_COUNT;
1984                         xas_unlock_irq(&xas);
1985                         folio_putback_lru(folio);
1986                         goto out_unlock;
1987                 }
1988
1989                 /*
1990                  * Accumulate the folios that are being collapsed.
1991                  */
1992                 list_add_tail(&folio->lru, &pagelist);
1993                 continue;
1994 out_unlock:
1995                 folio_unlock(folio);
1996                 folio_put(folio);
1997                 goto xa_unlocked;
1998         }
1999
2000         if (!is_shmem) {
2001                 filemap_nr_thps_inc(mapping);
2002                 /*
2003                  * Paired with smp_mb() in do_dentry_open() to ensure
2004                  * i_writecount is up to date and the update to nr_thps is
2005                  * visible. Ensures the page cache will be truncated if the
2006                  * file is opened writable.
2007                  */
2008                 smp_mb();
2009                 if (inode_is_open_for_write(mapping->host)) {
2010                         result = SCAN_FAIL;
2011                         filemap_nr_thps_dec(mapping);
2012                 }
2013         }
2014
2015 xa_locked:
2016         xas_unlock_irq(&xas);
2017 xa_unlocked:
2018
2019         /*
2020          * If collapse is successful, flush must be done now before copying.
2021          * If collapse is unsuccessful, does flush actually need to be done?
2022          * Do it anyway, to clear the state.
2023          */
2024         try_to_unmap_flush();
2025
2026         if (result == SCAN_SUCCEED && nr_none &&
2027             !shmem_charge(mapping->host, nr_none))
2028                 result = SCAN_FAIL;
2029         if (result != SCAN_SUCCEED) {
2030                 nr_none = 0;
2031                 goto rollback;
2032         }
2033
2034         /*
2035          * The old folios are locked, so they won't change anymore.
2036          */
2037         index = start;
2038         dst = folio_page(new_folio, 0);
2039         list_for_each_entry(folio, &pagelist, lru) {
2040                 while (index < folio->index) {
2041                         clear_highpage(dst);
2042                         index++;
2043                         dst++;
2044                 }
2045                 if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) {
2046                         result = SCAN_COPY_MC;
2047                         goto rollback;
2048                 }
2049                 index++;
2050                 dst++;
2051         }
2052         while (index < end) {
2053                 clear_highpage(dst);
2054                 index++;
2055                 dst++;
2056         }
2057
2058         if (nr_none) {
2059                 struct vm_area_struct *vma;
2060                 int nr_none_check = 0;
2061
2062                 i_mmap_lock_read(mapping);
2063                 xas_lock_irq(&xas);
2064
2065                 xas_set(&xas, start);
2066                 for (index = start; index < end; index++) {
2067                         if (!xas_next(&xas)) {
2068                                 xas_store(&xas, XA_RETRY_ENTRY);
2069                                 if (xas_error(&xas)) {
2070                                         result = SCAN_STORE_FAILED;
2071                                         goto immap_locked;
2072                                 }
2073                                 nr_none_check++;
2074                         }
2075                 }
2076
2077                 if (nr_none != nr_none_check) {
2078                         result = SCAN_PAGE_FILLED;
2079                         goto immap_locked;
2080                 }
2081
2082                 /*
2083                  * If userspace observed a missing page in a VMA with
2084                  * a MODE_MISSING userfaultfd, then it might expect a
2085                  * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
2086                  * roll back to avoid suppressing such an event. Since
2087                  * wp/minor userfaultfds don't give userspace any
2088                  * guarantees that the kernel doesn't fill a missing
2089                  * page with a zero page, so they don't matter here.
2090                  *
2091                  * Any userfaultfds registered after this point will
2092                  * not be able to observe any missing pages due to the
2093                  * previously inserted retry entries.
2094                  */
2095                 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2096                         if (userfaultfd_missing(vma)) {
2097                                 result = SCAN_EXCEED_NONE_PTE;
2098                                 goto immap_locked;
2099                         }
2100                 }
2101
2102 immap_locked:
2103                 i_mmap_unlock_read(mapping);
2104                 if (result != SCAN_SUCCEED) {
2105                         xas_set(&xas, start);
2106                         for (index = start; index < end; index++) {
2107                                 if (xas_next(&xas) == XA_RETRY_ENTRY)
2108                                         xas_store(&xas, NULL);
2109                         }
2110
2111                         xas_unlock_irq(&xas);
2112                         goto rollback;
2113                 }
2114         } else {
2115                 xas_lock_irq(&xas);
2116         }
2117
2118         if (is_shmem)
2119                 __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
2120         else
2121                 __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
2122
2123         if (nr_none) {
2124                 __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
2125                 /* nr_none is always 0 for non-shmem. */
2126                 __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
2127         }
2128
2129         /*
2130          * Mark new_folio as uptodate before inserting it into the
2131          * page cache so that it isn't mistaken for an fallocated but
2132          * unwritten page.
2133          */
2134         folio_mark_uptodate(new_folio);
2135         folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2136
2137         if (is_shmem)
2138                 folio_mark_dirty(new_folio);
2139         folio_add_lru(new_folio);
2140
2141         /* Join all the small entries into a single multi-index entry. */
2142         xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2143         xas_store(&xas, new_folio);
2144         WARN_ON_ONCE(xas_error(&xas));
2145         xas_unlock_irq(&xas);
2146
2147         /*
2148          * Remove pte page tables, so we can re-fault the page as huge.
2149          * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2150          */
2151         retract_page_tables(mapping, start);
2152         if (cc && !cc->is_khugepaged)
2153                 result = SCAN_PTE_MAPPED_HUGEPAGE;
2154         folio_unlock(new_folio);
2155
2156         /*
2157          * The collapse has succeeded, so free the old folios.
2158          */
2159         list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2160                 list_del(&folio->lru);
2161                 folio->mapping = NULL;
2162                 folio_clear_active(folio);
2163                 folio_clear_unevictable(folio);
2164                 folio_unlock(folio);
2165                 folio_put_refs(folio, 3);
2166         }
2167
2168         goto out;
2169
2170 rollback:
2171         /* Something went wrong: roll back page cache changes */
2172         if (nr_none) {
2173                 xas_lock_irq(&xas);
2174                 mapping->nrpages -= nr_none;
2175                 xas_unlock_irq(&xas);
2176                 shmem_uncharge(mapping->host, nr_none);
2177         }
2178
2179         list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2180                 list_del(&folio->lru);
2181                 folio_unlock(folio);
2182                 folio_putback_lru(folio);
2183                 folio_put(folio);
2184         }
2185         /*
2186          * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2187          * file only. This undo is not needed unless failure is
2188          * due to SCAN_COPY_MC.
2189          */
2190         if (!is_shmem && result == SCAN_COPY_MC) {
2191                 filemap_nr_thps_dec(mapping);
2192                 /*
2193                  * Paired with smp_mb() in do_dentry_open() to
2194                  * ensure the update to nr_thps is visible.
2195                  */
2196                 smp_mb();
2197         }
2198
2199         new_folio->mapping = NULL;
2200
2201         folio_unlock(new_folio);
2202         folio_put(new_folio);
2203 out:
2204         VM_BUG_ON(!list_empty(&pagelist));
2205         trace_mm_khugepaged_collapse_file(mm, new_folio, index, is_shmem, addr, file, HPAGE_PMD_NR, result);
2206         return result;
2207 }
2208
2209 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2210                                     struct file *file, pgoff_t start,
2211                                     struct collapse_control *cc)
2212 {
2213         struct folio *folio = NULL;
2214         struct address_space *mapping = file->f_mapping;
2215         XA_STATE(xas, &mapping->i_pages, start);
2216         int present, swap;
2217         int node = NUMA_NO_NODE;
2218         int result = SCAN_SUCCEED;
2219
2220         present = 0;
2221         swap = 0;
2222         memset(cc->node_load, 0, sizeof(cc->node_load));
2223         nodes_clear(cc->alloc_nmask);
2224         rcu_read_lock();
2225         xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
2226                 if (xas_retry(&xas, folio))
2227                         continue;
2228
2229                 if (xa_is_value(folio)) {
2230                         ++swap;
2231                         if (cc->is_khugepaged &&
2232                             swap > khugepaged_max_ptes_swap) {
2233                                 result = SCAN_EXCEED_SWAP_PTE;
2234                                 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2235                                 break;
2236                         }
2237                         continue;
2238                 }
2239
2240                 /*
2241                  * TODO: khugepaged should compact smaller compound pages
2242                  * into a PMD sized page
2243                  */
2244                 if (folio_test_large(folio)) {
2245                         result = folio_order(folio) == HPAGE_PMD_ORDER &&
2246                                         folio->index == start
2247                                         /* Maybe PMD-mapped */
2248                                         ? SCAN_PTE_MAPPED_HUGEPAGE
2249                                         : SCAN_PAGE_COMPOUND;
2250                         /*
2251                          * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2252                          * by the caller won't touch the page cache, and so
2253                          * it's safe to skip LRU and refcount checks before
2254                          * returning.
2255                          */
2256                         break;
2257                 }
2258
2259                 node = folio_nid(folio);
2260                 if (hpage_collapse_scan_abort(node, cc)) {
2261                         result = SCAN_SCAN_ABORT;
2262                         break;
2263                 }
2264                 cc->node_load[node]++;
2265
2266                 if (!folio_test_lru(folio)) {
2267                         result = SCAN_PAGE_LRU;
2268                         break;
2269                 }
2270
2271                 if (folio_ref_count(folio) !=
2272                     1 + folio_mapcount(folio) + folio_test_private(folio)) {
2273                         result = SCAN_PAGE_COUNT;
2274                         break;
2275                 }
2276
2277                 /*
2278                  * We probably should check if the folio is referenced
2279                  * here, but nobody would transfer pte_young() to
2280                  * folio_test_referenced() for us.  And rmap walk here
2281                  * is just too costly...
2282                  */
2283
2284                 present++;
2285
2286                 if (need_resched()) {
2287                         xas_pause(&xas);
2288                         cond_resched_rcu();
2289                 }
2290         }
2291         rcu_read_unlock();
2292
2293         if (result == SCAN_SUCCEED) {
2294                 if (cc->is_khugepaged &&
2295                     present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2296                         result = SCAN_EXCEED_NONE_PTE;
2297                         count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2298                 } else {
2299                         result = collapse_file(mm, addr, file, start, cc);
2300                 }
2301         }
2302
2303         trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
2304         return result;
2305 }
2306 #else
2307 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2308                                     struct file *file, pgoff_t start,
2309                                     struct collapse_control *cc)
2310 {
2311         BUILD_BUG();
2312 }
2313 #endif
2314
2315 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2316                                             struct collapse_control *cc)
2317         __releases(&khugepaged_mm_lock)
2318         __acquires(&khugepaged_mm_lock)
2319 {
2320         struct vma_iterator vmi;
2321         struct khugepaged_mm_slot *mm_slot;
2322         struct mm_slot *slot;
2323         struct mm_struct *mm;
2324         struct vm_area_struct *vma;
2325         int progress = 0;
2326
2327         VM_BUG_ON(!pages);
2328         lockdep_assert_held(&khugepaged_mm_lock);
2329         *result = SCAN_FAIL;
2330
2331         if (khugepaged_scan.mm_slot) {
2332                 mm_slot = khugepaged_scan.mm_slot;
2333                 slot = &mm_slot->slot;
2334         } else {
2335                 slot = list_entry(khugepaged_scan.mm_head.next,
2336                                      struct mm_slot, mm_node);
2337                 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2338                 khugepaged_scan.address = 0;
2339                 khugepaged_scan.mm_slot = mm_slot;
2340         }
2341         spin_unlock(&khugepaged_mm_lock);
2342
2343         mm = slot->mm;
2344         /*
2345          * Don't wait for semaphore (to avoid long wait times).  Just move to
2346          * the next mm on the list.
2347          */
2348         vma = NULL;
2349         if (unlikely(!mmap_read_trylock(mm)))
2350                 goto breakouterloop_mmap_lock;
2351
2352         progress++;
2353         if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2354                 goto breakouterloop;
2355
2356         vma_iter_init(&vmi, mm, khugepaged_scan.address);
2357         for_each_vma(vmi, vma) {
2358                 unsigned long hstart, hend;
2359
2360                 cond_resched();
2361                 if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2362                         progress++;
2363                         break;
2364                 }
2365                 if (!thp_vma_allowable_order(vma, vma->vm_flags,
2366                                         TVA_ENFORCE_SYSFS, PMD_ORDER)) {
2367 skip:
2368                         progress++;
2369                         continue;
2370                 }
2371                 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2372                 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2373                 if (khugepaged_scan.address > hend)
2374                         goto skip;
2375                 if (khugepaged_scan.address < hstart)
2376                         khugepaged_scan.address = hstart;
2377                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2378
2379                 while (khugepaged_scan.address < hend) {
2380                         bool mmap_locked = true;
2381
2382                         cond_resched();
2383                         if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2384                                 goto breakouterloop;
2385
2386                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2387                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2388                                   hend);
2389                         if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2390                                 struct file *file = get_file(vma->vm_file);
2391                                 pgoff_t pgoff = linear_page_index(vma,
2392                                                 khugepaged_scan.address);
2393
2394                                 mmap_read_unlock(mm);
2395                                 mmap_locked = false;
2396                                 *result = hpage_collapse_scan_file(mm,
2397                                         khugepaged_scan.address, file, pgoff, cc);
2398                                 fput(file);
2399                                 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2400                                         mmap_read_lock(mm);
2401                                         if (hpage_collapse_test_exit_or_disable(mm))
2402                                                 goto breakouterloop;
2403                                         *result = collapse_pte_mapped_thp(mm,
2404                                                 khugepaged_scan.address, false);
2405                                         if (*result == SCAN_PMD_MAPPED)
2406                                                 *result = SCAN_SUCCEED;
2407                                         mmap_read_unlock(mm);
2408                                 }
2409                         } else {
2410                                 *result = hpage_collapse_scan_pmd(mm, vma,
2411                                         khugepaged_scan.address, &mmap_locked, cc);
2412                         }
2413
2414                         if (*result == SCAN_SUCCEED)
2415                                 ++khugepaged_pages_collapsed;
2416
2417                         /* move to next address */
2418                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2419                         progress += HPAGE_PMD_NR;
2420                         if (!mmap_locked)
2421                                 /*
2422                                  * We released mmap_lock so break loop.  Note
2423                                  * that we drop mmap_lock before all hugepage
2424                                  * allocations, so if allocation fails, we are
2425                                  * guaranteed to break here and report the
2426                                  * correct result back to caller.
2427                                  */
2428                                 goto breakouterloop_mmap_lock;
2429                         if (progress >= pages)
2430                                 goto breakouterloop;
2431                 }
2432         }
2433 breakouterloop:
2434         mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2435 breakouterloop_mmap_lock:
2436
2437         spin_lock(&khugepaged_mm_lock);
2438         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2439         /*
2440          * Release the current mm_slot if this mm is about to die, or
2441          * if we scanned all vmas of this mm.
2442          */
2443         if (hpage_collapse_test_exit(mm) || !vma) {
2444                 /*
2445                  * Make sure that if mm_users is reaching zero while
2446                  * khugepaged runs here, khugepaged_exit will find
2447                  * mm_slot not pointing to the exiting mm.
2448                  */
2449                 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2450                         slot = list_entry(slot->mm_node.next,
2451                                           struct mm_slot, mm_node);
2452                         khugepaged_scan.mm_slot =
2453                                 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2454                         khugepaged_scan.address = 0;
2455                 } else {
2456                         khugepaged_scan.mm_slot = NULL;
2457                         khugepaged_full_scans++;
2458                 }
2459
2460                 collect_mm_slot(mm_slot);
2461         }
2462
2463         return progress;
2464 }
2465
2466 static int khugepaged_has_work(void)
2467 {
2468         return !list_empty(&khugepaged_scan.mm_head) &&
2469                 hugepage_flags_enabled();
2470 }
2471
2472 static int khugepaged_wait_event(void)
2473 {
2474         return !list_empty(&khugepaged_scan.mm_head) ||
2475                 kthread_should_stop();
2476 }
2477
2478 static void khugepaged_do_scan(struct collapse_control *cc)
2479 {
2480         unsigned int progress = 0, pass_through_head = 0;
2481         unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2482         bool wait = true;
2483         int result = SCAN_SUCCEED;
2484
2485         lru_add_drain_all();
2486
2487         while (true) {
2488                 cond_resched();
2489
2490                 if (unlikely(kthread_should_stop()))
2491                         break;
2492
2493                 spin_lock(&khugepaged_mm_lock);
2494                 if (!khugepaged_scan.mm_slot)
2495                         pass_through_head++;
2496                 if (khugepaged_has_work() &&
2497                     pass_through_head < 2)
2498                         progress += khugepaged_scan_mm_slot(pages - progress,
2499                                                             &result, cc);
2500                 else
2501                         progress = pages;
2502                 spin_unlock(&khugepaged_mm_lock);
2503
2504                 if (progress >= pages)
2505                         break;
2506
2507                 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2508                         /*
2509                          * If fail to allocate the first time, try to sleep for
2510                          * a while.  When hit again, cancel the scan.
2511                          */
2512                         if (!wait)
2513                                 break;
2514                         wait = false;
2515                         khugepaged_alloc_sleep();
2516                 }
2517         }
2518 }
2519
2520 static bool khugepaged_should_wakeup(void)
2521 {
2522         return kthread_should_stop() ||
2523                time_after_eq(jiffies, khugepaged_sleep_expire);
2524 }
2525
2526 static void khugepaged_wait_work(void)
2527 {
2528         if (khugepaged_has_work()) {
2529                 const unsigned long scan_sleep_jiffies =
2530                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2531
2532                 if (!scan_sleep_jiffies)
2533                         return;
2534
2535                 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2536                 wait_event_freezable_timeout(khugepaged_wait,
2537                                              khugepaged_should_wakeup(),
2538                                              scan_sleep_jiffies);
2539                 return;
2540         }
2541
2542         if (hugepage_flags_enabled())
2543                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2544 }
2545
2546 static int khugepaged(void *none)
2547 {
2548         struct khugepaged_mm_slot *mm_slot;
2549
2550         set_freezable();
2551         set_user_nice(current, MAX_NICE);
2552
2553         while (!kthread_should_stop()) {
2554                 khugepaged_do_scan(&khugepaged_collapse_control);
2555                 khugepaged_wait_work();
2556         }
2557
2558         spin_lock(&khugepaged_mm_lock);
2559         mm_slot = khugepaged_scan.mm_slot;
2560         khugepaged_scan.mm_slot = NULL;
2561         if (mm_slot)
2562                 collect_mm_slot(mm_slot);
2563         spin_unlock(&khugepaged_mm_lock);
2564         return 0;
2565 }
2566
2567 static void set_recommended_min_free_kbytes(void)
2568 {
2569         struct zone *zone;
2570         int nr_zones = 0;
2571         unsigned long recommended_min;
2572
2573         if (!hugepage_flags_enabled()) {
2574                 calculate_min_free_kbytes();
2575                 goto update_wmarks;
2576         }
2577
2578         for_each_populated_zone(zone) {
2579                 /*
2580                  * We don't need to worry about fragmentation of
2581                  * ZONE_MOVABLE since it only has movable pages.
2582                  */
2583                 if (zone_idx(zone) > gfp_zone(GFP_USER))
2584                         continue;
2585
2586                 nr_zones++;
2587         }
2588
2589         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2590         recommended_min = pageblock_nr_pages * nr_zones * 2;
2591
2592         /*
2593          * Make sure that on average at least two pageblocks are almost free
2594          * of another type, one for a migratetype to fall back to and a
2595          * second to avoid subsequent fallbacks of other types There are 3
2596          * MIGRATE_TYPES we care about.
2597          */
2598         recommended_min += pageblock_nr_pages * nr_zones *
2599                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2600
2601         /* don't ever allow to reserve more than 5% of the lowmem */
2602         recommended_min = min(recommended_min,
2603                               (unsigned long) nr_free_buffer_pages() / 20);
2604         recommended_min <<= (PAGE_SHIFT-10);
2605
2606         if (recommended_min > min_free_kbytes) {
2607                 if (user_min_free_kbytes >= 0)
2608                         pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2609                                 min_free_kbytes, recommended_min);
2610
2611                 min_free_kbytes = recommended_min;
2612         }
2613
2614 update_wmarks:
2615         setup_per_zone_wmarks();
2616 }
2617
2618 int start_stop_khugepaged(void)
2619 {
2620         int err = 0;
2621
2622         mutex_lock(&khugepaged_mutex);
2623         if (hugepage_flags_enabled()) {
2624                 if (!khugepaged_thread)
2625                         khugepaged_thread = kthread_run(khugepaged, NULL,
2626                                                         "khugepaged");
2627                 if (IS_ERR(khugepaged_thread)) {
2628                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2629                         err = PTR_ERR(khugepaged_thread);
2630                         khugepaged_thread = NULL;
2631                         goto fail;
2632                 }
2633
2634                 if (!list_empty(&khugepaged_scan.mm_head))
2635                         wake_up_interruptible(&khugepaged_wait);
2636         } else if (khugepaged_thread) {
2637                 kthread_stop(khugepaged_thread);
2638                 khugepaged_thread = NULL;
2639         }
2640         set_recommended_min_free_kbytes();
2641 fail:
2642         mutex_unlock(&khugepaged_mutex);
2643         return err;
2644 }
2645
2646 void khugepaged_min_free_kbytes_update(void)
2647 {
2648         mutex_lock(&khugepaged_mutex);
2649         if (hugepage_flags_enabled() && khugepaged_thread)
2650                 set_recommended_min_free_kbytes();
2651         mutex_unlock(&khugepaged_mutex);
2652 }
2653
2654 bool current_is_khugepaged(void)
2655 {
2656         return kthread_func(current) == khugepaged;
2657 }
2658
2659 static int madvise_collapse_errno(enum scan_result r)
2660 {
2661         /*
2662          * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2663          * actionable feedback to caller, so they may take an appropriate
2664          * fallback measure depending on the nature of the failure.
2665          */
2666         switch (r) {
2667         case SCAN_ALLOC_HUGE_PAGE_FAIL:
2668                 return -ENOMEM;
2669         case SCAN_CGROUP_CHARGE_FAIL:
2670         case SCAN_EXCEED_NONE_PTE:
2671                 return -EBUSY;
2672         /* Resource temporary unavailable - trying again might succeed */
2673         case SCAN_PAGE_COUNT:
2674         case SCAN_PAGE_LOCK:
2675         case SCAN_PAGE_LRU:
2676         case SCAN_DEL_PAGE_LRU:
2677         case SCAN_PAGE_FILLED:
2678                 return -EAGAIN;
2679         /*
2680          * Other: Trying again likely not to succeed / error intrinsic to
2681          * specified memory range. khugepaged likely won't be able to collapse
2682          * either.
2683          */
2684         default:
2685                 return -EINVAL;
2686         }
2687 }
2688
2689 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2690                      unsigned long start, unsigned long end)
2691 {
2692         struct collapse_control *cc;
2693         struct mm_struct *mm = vma->vm_mm;
2694         unsigned long hstart, hend, addr;
2695         int thps = 0, last_fail = SCAN_FAIL;
2696         bool mmap_locked = true;
2697
2698         BUG_ON(vma->vm_start > start);
2699         BUG_ON(vma->vm_end < end);
2700
2701         *prev = vma;
2702
2703         if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
2704                 return -EINVAL;
2705
2706         cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2707         if (!cc)
2708                 return -ENOMEM;
2709         cc->is_khugepaged = false;
2710
2711         mmgrab(mm);
2712         lru_add_drain_all();
2713
2714         hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2715         hend = end & HPAGE_PMD_MASK;
2716
2717         for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2718                 int result = SCAN_FAIL;
2719
2720                 if (!mmap_locked) {
2721                         cond_resched();
2722                         mmap_read_lock(mm);
2723                         mmap_locked = true;
2724                         result = hugepage_vma_revalidate(mm, addr, false, &vma,
2725                                                          cc);
2726                         if (result  != SCAN_SUCCEED) {
2727                                 last_fail = result;
2728                                 goto out_nolock;
2729                         }
2730
2731                         hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2732                 }
2733                 mmap_assert_locked(mm);
2734                 memset(cc->node_load, 0, sizeof(cc->node_load));
2735                 nodes_clear(cc->alloc_nmask);
2736                 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2737                         struct file *file = get_file(vma->vm_file);
2738                         pgoff_t pgoff = linear_page_index(vma, addr);
2739
2740                         mmap_read_unlock(mm);
2741                         mmap_locked = false;
2742                         result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2743                                                           cc);
2744                         fput(file);
2745                 } else {
2746                         result = hpage_collapse_scan_pmd(mm, vma, addr,
2747                                                          &mmap_locked, cc);
2748                 }
2749                 if (!mmap_locked)
2750                         *prev = NULL;  /* Tell caller we dropped mmap_lock */
2751
2752 handle_result:
2753                 switch (result) {
2754                 case SCAN_SUCCEED:
2755                 case SCAN_PMD_MAPPED:
2756                         ++thps;
2757                         break;
2758                 case SCAN_PTE_MAPPED_HUGEPAGE:
2759                         BUG_ON(mmap_locked);
2760                         BUG_ON(*prev);
2761                         mmap_read_lock(mm);
2762                         result = collapse_pte_mapped_thp(mm, addr, true);
2763                         mmap_read_unlock(mm);
2764                         goto handle_result;
2765                 /* Whitelisted set of results where continuing OK */
2766                 case SCAN_PMD_NULL:
2767                 case SCAN_PTE_NON_PRESENT:
2768                 case SCAN_PTE_UFFD_WP:
2769                 case SCAN_PAGE_RO:
2770                 case SCAN_LACK_REFERENCED_PAGE:
2771                 case SCAN_PAGE_NULL:
2772                 case SCAN_PAGE_COUNT:
2773                 case SCAN_PAGE_LOCK:
2774                 case SCAN_PAGE_COMPOUND:
2775                 case SCAN_PAGE_LRU:
2776                 case SCAN_DEL_PAGE_LRU:
2777                         last_fail = result;
2778                         break;
2779                 default:
2780                         last_fail = result;
2781                         /* Other error, exit */
2782                         goto out_maybelock;
2783                 }
2784         }
2785
2786 out_maybelock:
2787         /* Caller expects us to hold mmap_lock on return */
2788         if (!mmap_locked)
2789                 mmap_read_lock(mm);
2790 out_nolock:
2791         mmap_assert_locked(mm);
2792         mmdrop(mm);
2793         kfree(cc);
2794
2795         return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2796                         : madvise_collapse_errno(last_fail);
2797 }
This page took 0.192267 seconds and 4 git commands to generate.