]> Git Repo - linux.git/blob - mm/madvise.c
mm: vmscan: avoid split during shrink_folio_list()
[linux.git] / mm / madvise.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *      linux/mm/madvise.c
4  *
5  * Copyright (C) 1999  Linus Torvalds
6  * Copyright (C) 2002  Christoph Hellwig
7  */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mm_inline.h>
22 #include <linux/string.h>
23 #include <linux/uio.h>
24 #include <linux/ksm.h>
25 #include <linux/fs.h>
26 #include <linux/file.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagewalk.h>
30 #include <linux/swap.h>
31 #include <linux/swapops.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/mmu_notifier.h>
34
35 #include <asm/tlb.h>
36
37 #include "internal.h"
38 #include "swap.h"
39
40 struct madvise_walk_private {
41         struct mmu_gather *tlb;
42         bool pageout;
43 };
44
45 /*
46  * Any behaviour which results in changes to the vma->vm_flags needs to
47  * take mmap_lock for writing. Others, which simply traverse vmas, need
48  * to only take it for reading.
49  */
50 static int madvise_need_mmap_write(int behavior)
51 {
52         switch (behavior) {
53         case MADV_REMOVE:
54         case MADV_WILLNEED:
55         case MADV_DONTNEED:
56         case MADV_DONTNEED_LOCKED:
57         case MADV_COLD:
58         case MADV_PAGEOUT:
59         case MADV_FREE:
60         case MADV_POPULATE_READ:
61         case MADV_POPULATE_WRITE:
62         case MADV_COLLAPSE:
63                 return 0;
64         default:
65                 /* be safe, default to 1. list exceptions explicitly */
66                 return 1;
67         }
68 }
69
70 #ifdef CONFIG_ANON_VMA_NAME
71 struct anon_vma_name *anon_vma_name_alloc(const char *name)
72 {
73         struct anon_vma_name *anon_name;
74         size_t count;
75
76         /* Add 1 for NUL terminator at the end of the anon_name->name */
77         count = strlen(name) + 1;
78         anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL);
79         if (anon_name) {
80                 kref_init(&anon_name->kref);
81                 memcpy(anon_name->name, name, count);
82         }
83
84         return anon_name;
85 }
86
87 void anon_vma_name_free(struct kref *kref)
88 {
89         struct anon_vma_name *anon_name =
90                         container_of(kref, struct anon_vma_name, kref);
91         kfree(anon_name);
92 }
93
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
95 {
96         mmap_assert_locked(vma->vm_mm);
97
98         return vma->anon_name;
99 }
100
101 /* mmap_lock should be write-locked */
102 static int replace_anon_vma_name(struct vm_area_struct *vma,
103                                  struct anon_vma_name *anon_name)
104 {
105         struct anon_vma_name *orig_name = anon_vma_name(vma);
106
107         if (!anon_name) {
108                 vma->anon_name = NULL;
109                 anon_vma_name_put(orig_name);
110                 return 0;
111         }
112
113         if (anon_vma_name_eq(orig_name, anon_name))
114                 return 0;
115
116         vma->anon_name = anon_vma_name_reuse(anon_name);
117         anon_vma_name_put(orig_name);
118
119         return 0;
120 }
121 #else /* CONFIG_ANON_VMA_NAME */
122 static int replace_anon_vma_name(struct vm_area_struct *vma,
123                                  struct anon_vma_name *anon_name)
124 {
125         if (anon_name)
126                 return -EINVAL;
127
128         return 0;
129 }
130 #endif /* CONFIG_ANON_VMA_NAME */
131 /*
132  * Update the vm_flags on region of a vma, splitting it or merging it as
133  * necessary.  Must be called with mmap_lock held for writing;
134  * Caller should ensure anon_name stability by raising its refcount even when
135  * anon_name belongs to a valid vma because this function might free that vma.
136  */
137 static int madvise_update_vma(struct vm_area_struct *vma,
138                               struct vm_area_struct **prev, unsigned long start,
139                               unsigned long end, unsigned long new_flags,
140                               struct anon_vma_name *anon_name)
141 {
142         struct mm_struct *mm = vma->vm_mm;
143         int error;
144         VMA_ITERATOR(vmi, mm, start);
145
146         if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
147                 *prev = vma;
148                 return 0;
149         }
150
151         vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags,
152                                     anon_name);
153         if (IS_ERR(vma))
154                 return PTR_ERR(vma);
155
156         *prev = vma;
157
158         /* vm_flags is protected by the mmap_lock held in write mode. */
159         vma_start_write(vma);
160         vm_flags_reset(vma, new_flags);
161         if (!vma->vm_file || vma_is_anon_shmem(vma)) {
162                 error = replace_anon_vma_name(vma, anon_name);
163                 if (error)
164                         return error;
165         }
166
167         return 0;
168 }
169
170 #ifdef CONFIG_SWAP
171 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
172                 unsigned long end, struct mm_walk *walk)
173 {
174         struct vm_area_struct *vma = walk->private;
175         struct swap_iocb *splug = NULL;
176         pte_t *ptep = NULL;
177         spinlock_t *ptl;
178         unsigned long addr;
179
180         for (addr = start; addr < end; addr += PAGE_SIZE) {
181                 pte_t pte;
182                 swp_entry_t entry;
183                 struct folio *folio;
184
185                 if (!ptep++) {
186                         ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
187                         if (!ptep)
188                                 break;
189                 }
190
191                 pte = ptep_get(ptep);
192                 if (!is_swap_pte(pte))
193                         continue;
194                 entry = pte_to_swp_entry(pte);
195                 if (unlikely(non_swap_entry(entry)))
196                         continue;
197
198                 pte_unmap_unlock(ptep, ptl);
199                 ptep = NULL;
200
201                 folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
202                                              vma, addr, &splug);
203                 if (folio)
204                         folio_put(folio);
205         }
206
207         if (ptep)
208                 pte_unmap_unlock(ptep, ptl);
209         swap_read_unplug(splug);
210         cond_resched();
211
212         return 0;
213 }
214
215 static const struct mm_walk_ops swapin_walk_ops = {
216         .pmd_entry              = swapin_walk_pmd_entry,
217         .walk_lock              = PGWALK_RDLOCK,
218 };
219
220 static void shmem_swapin_range(struct vm_area_struct *vma,
221                 unsigned long start, unsigned long end,
222                 struct address_space *mapping)
223 {
224         XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
225         pgoff_t end_index = linear_page_index(vma, end) - 1;
226         struct folio *folio;
227         struct swap_iocb *splug = NULL;
228
229         rcu_read_lock();
230         xas_for_each(&xas, folio, end_index) {
231                 unsigned long addr;
232                 swp_entry_t entry;
233
234                 if (!xa_is_value(folio))
235                         continue;
236                 entry = radix_to_swp_entry(folio);
237                 /* There might be swapin error entries in shmem mapping. */
238                 if (non_swap_entry(entry))
239                         continue;
240
241                 addr = vma->vm_start +
242                         ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT);
243                 xas_pause(&xas);
244                 rcu_read_unlock();
245
246                 folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
247                                              vma, addr, &splug);
248                 if (folio)
249                         folio_put(folio);
250
251                 rcu_read_lock();
252         }
253         rcu_read_unlock();
254         swap_read_unplug(splug);
255 }
256 #endif          /* CONFIG_SWAP */
257
258 /*
259  * Schedule all required I/O operations.  Do not wait for completion.
260  */
261 static long madvise_willneed(struct vm_area_struct *vma,
262                              struct vm_area_struct **prev,
263                              unsigned long start, unsigned long end)
264 {
265         struct mm_struct *mm = vma->vm_mm;
266         struct file *file = vma->vm_file;
267         loff_t offset;
268
269         *prev = vma;
270 #ifdef CONFIG_SWAP
271         if (!file) {
272                 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
273                 lru_add_drain(); /* Push any new pages onto the LRU now */
274                 return 0;
275         }
276
277         if (shmem_mapping(file->f_mapping)) {
278                 shmem_swapin_range(vma, start, end, file->f_mapping);
279                 lru_add_drain(); /* Push any new pages onto the LRU now */
280                 return 0;
281         }
282 #else
283         if (!file)
284                 return -EBADF;
285 #endif
286
287         if (IS_DAX(file_inode(file))) {
288                 /* no bad return value, but ignore advice */
289                 return 0;
290         }
291
292         /*
293          * Filesystem's fadvise may need to take various locks.  We need to
294          * explicitly grab a reference because the vma (and hence the
295          * vma's reference to the file) can go away as soon as we drop
296          * mmap_lock.
297          */
298         *prev = NULL;   /* tell sys_madvise we drop mmap_lock */
299         get_file(file);
300         offset = (loff_t)(start - vma->vm_start)
301                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
302         mmap_read_unlock(mm);
303         vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
304         fput(file);
305         mmap_read_lock(mm);
306         return 0;
307 }
308
309 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
310 {
311         if (!vma->vm_file)
312                 return false;
313         /*
314          * paging out pagecache only for non-anonymous mappings that correspond
315          * to the files the calling process could (if tried) open for writing;
316          * otherwise we'd be including shared non-exclusive mappings, which
317          * opens a side channel.
318          */
319         return inode_owner_or_capable(&nop_mnt_idmap,
320                                       file_inode(vma->vm_file)) ||
321                file_permission(vma->vm_file, MAY_WRITE) == 0;
322 }
323
324 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
325                                 unsigned long addr, unsigned long end,
326                                 struct mm_walk *walk)
327 {
328         struct madvise_walk_private *private = walk->private;
329         struct mmu_gather *tlb = private->tlb;
330         bool pageout = private->pageout;
331         struct mm_struct *mm = tlb->mm;
332         struct vm_area_struct *vma = walk->vma;
333         pte_t *start_pte, *pte, ptent;
334         spinlock_t *ptl;
335         struct folio *folio = NULL;
336         LIST_HEAD(folio_list);
337         bool pageout_anon_only_filter;
338         unsigned int batch_count = 0;
339
340         if (fatal_signal_pending(current))
341                 return -EINTR;
342
343         pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
344                                         !can_do_file_pageout(vma);
345
346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
347         if (pmd_trans_huge(*pmd)) {
348                 pmd_t orig_pmd;
349                 unsigned long next = pmd_addr_end(addr, end);
350
351                 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
352                 ptl = pmd_trans_huge_lock(pmd, vma);
353                 if (!ptl)
354                         return 0;
355
356                 orig_pmd = *pmd;
357                 if (is_huge_zero_pmd(orig_pmd))
358                         goto huge_unlock;
359
360                 if (unlikely(!pmd_present(orig_pmd))) {
361                         VM_BUG_ON(thp_migration_supported() &&
362                                         !is_pmd_migration_entry(orig_pmd));
363                         goto huge_unlock;
364                 }
365
366                 folio = pmd_folio(orig_pmd);
367
368                 /* Do not interfere with other mappings of this folio */
369                 if (folio_likely_mapped_shared(folio))
370                         goto huge_unlock;
371
372                 if (pageout_anon_only_filter && !folio_test_anon(folio))
373                         goto huge_unlock;
374
375                 if (next - addr != HPAGE_PMD_SIZE) {
376                         int err;
377
378                         folio_get(folio);
379                         spin_unlock(ptl);
380                         folio_lock(folio);
381                         err = split_folio(folio);
382                         folio_unlock(folio);
383                         folio_put(folio);
384                         if (!err)
385                                 goto regular_folio;
386                         return 0;
387                 }
388
389                 if (!pageout && pmd_young(orig_pmd)) {
390                         pmdp_invalidate(vma, addr, pmd);
391                         orig_pmd = pmd_mkold(orig_pmd);
392
393                         set_pmd_at(mm, addr, pmd, orig_pmd);
394                         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
395                 }
396
397                 folio_clear_referenced(folio);
398                 folio_test_clear_young(folio);
399                 if (folio_test_active(folio))
400                         folio_set_workingset(folio);
401                 if (pageout) {
402                         if (folio_isolate_lru(folio)) {
403                                 if (folio_test_unevictable(folio))
404                                         folio_putback_lru(folio);
405                                 else
406                                         list_add(&folio->lru, &folio_list);
407                         }
408                 } else
409                         folio_deactivate(folio);
410 huge_unlock:
411                 spin_unlock(ptl);
412                 if (pageout)
413                         reclaim_pages(&folio_list, true);
414                 return 0;
415         }
416
417 regular_folio:
418 #endif
419         tlb_change_page_size(tlb, PAGE_SIZE);
420 restart:
421         start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
422         if (!start_pte)
423                 return 0;
424         flush_tlb_batched_pending(mm);
425         arch_enter_lazy_mmu_mode();
426         for (; addr < end; pte++, addr += PAGE_SIZE) {
427                 ptent = ptep_get(pte);
428
429                 if (++batch_count == SWAP_CLUSTER_MAX) {
430                         batch_count = 0;
431                         if (need_resched()) {
432                                 arch_leave_lazy_mmu_mode();
433                                 pte_unmap_unlock(start_pte, ptl);
434                                 cond_resched();
435                                 goto restart;
436                         }
437                 }
438
439                 if (pte_none(ptent))
440                         continue;
441
442                 if (!pte_present(ptent))
443                         continue;
444
445                 folio = vm_normal_folio(vma, addr, ptent);
446                 if (!folio || folio_is_zone_device(folio))
447                         continue;
448
449                 /*
450                  * Creating a THP page is expensive so split it only if we
451                  * are sure it's worth. Split it if we are only owner.
452                  */
453                 if (folio_test_large(folio)) {
454                         int err;
455
456                         if (folio_likely_mapped_shared(folio))
457                                 break;
458                         if (pageout_anon_only_filter && !folio_test_anon(folio))
459                                 break;
460                         if (!folio_trylock(folio))
461                                 break;
462                         folio_get(folio);
463                         arch_leave_lazy_mmu_mode();
464                         pte_unmap_unlock(start_pte, ptl);
465                         start_pte = NULL;
466                         err = split_folio(folio);
467                         folio_unlock(folio);
468                         folio_put(folio);
469                         if (err)
470                                 break;
471                         start_pte = pte =
472                                 pte_offset_map_lock(mm, pmd, addr, &ptl);
473                         if (!start_pte)
474                                 break;
475                         arch_enter_lazy_mmu_mode();
476                         pte--;
477                         addr -= PAGE_SIZE;
478                         continue;
479                 }
480
481                 /*
482                  * Do not interfere with other mappings of this folio and
483                  * non-LRU folio.
484                  */
485                 if (!folio_test_lru(folio) || folio_mapcount(folio) != 1)
486                         continue;
487
488                 if (pageout_anon_only_filter && !folio_test_anon(folio))
489                         continue;
490
491                 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
492
493                 if (!pageout && pte_young(ptent)) {
494                         ptent = ptep_get_and_clear_full(mm, addr, pte,
495                                                         tlb->fullmm);
496                         ptent = pte_mkold(ptent);
497                         set_pte_at(mm, addr, pte, ptent);
498                         tlb_remove_tlb_entry(tlb, pte, addr);
499                 }
500
501                 /*
502                  * We are deactivating a folio for accelerating reclaiming.
503                  * VM couldn't reclaim the folio unless we clear PG_young.
504                  * As a side effect, it makes confuse idle-page tracking
505                  * because they will miss recent referenced history.
506                  */
507                 folio_clear_referenced(folio);
508                 folio_test_clear_young(folio);
509                 if (folio_test_active(folio))
510                         folio_set_workingset(folio);
511                 if (pageout) {
512                         if (folio_isolate_lru(folio)) {
513                                 if (folio_test_unevictable(folio))
514                                         folio_putback_lru(folio);
515                                 else
516                                         list_add(&folio->lru, &folio_list);
517                         }
518                 } else
519                         folio_deactivate(folio);
520         }
521
522         if (start_pte) {
523                 arch_leave_lazy_mmu_mode();
524                 pte_unmap_unlock(start_pte, ptl);
525         }
526         if (pageout)
527                 reclaim_pages(&folio_list, true);
528         cond_resched();
529
530         return 0;
531 }
532
533 static const struct mm_walk_ops cold_walk_ops = {
534         .pmd_entry = madvise_cold_or_pageout_pte_range,
535         .walk_lock = PGWALK_RDLOCK,
536 };
537
538 static void madvise_cold_page_range(struct mmu_gather *tlb,
539                              struct vm_area_struct *vma,
540                              unsigned long addr, unsigned long end)
541 {
542         struct madvise_walk_private walk_private = {
543                 .pageout = false,
544                 .tlb = tlb,
545         };
546
547         tlb_start_vma(tlb, vma);
548         walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
549         tlb_end_vma(tlb, vma);
550 }
551
552 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
553 {
554         return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
555 }
556
557 static long madvise_cold(struct vm_area_struct *vma,
558                         struct vm_area_struct **prev,
559                         unsigned long start_addr, unsigned long end_addr)
560 {
561         struct mm_struct *mm = vma->vm_mm;
562         struct mmu_gather tlb;
563
564         *prev = vma;
565         if (!can_madv_lru_vma(vma))
566                 return -EINVAL;
567
568         lru_add_drain();
569         tlb_gather_mmu(&tlb, mm);
570         madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
571         tlb_finish_mmu(&tlb);
572
573         return 0;
574 }
575
576 static void madvise_pageout_page_range(struct mmu_gather *tlb,
577                              struct vm_area_struct *vma,
578                              unsigned long addr, unsigned long end)
579 {
580         struct madvise_walk_private walk_private = {
581                 .pageout = true,
582                 .tlb = tlb,
583         };
584
585         tlb_start_vma(tlb, vma);
586         walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
587         tlb_end_vma(tlb, vma);
588 }
589
590 static long madvise_pageout(struct vm_area_struct *vma,
591                         struct vm_area_struct **prev,
592                         unsigned long start_addr, unsigned long end_addr)
593 {
594         struct mm_struct *mm = vma->vm_mm;
595         struct mmu_gather tlb;
596
597         *prev = vma;
598         if (!can_madv_lru_vma(vma))
599                 return -EINVAL;
600
601         /*
602          * If the VMA belongs to a private file mapping, there can be private
603          * dirty pages which can be paged out if even this process is neither
604          * owner nor write capable of the file. We allow private file mappings
605          * further to pageout dirty anon pages.
606          */
607         if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
608                                 (vma->vm_flags & VM_MAYSHARE)))
609                 return 0;
610
611         lru_add_drain();
612         tlb_gather_mmu(&tlb, mm);
613         madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
614         tlb_finish_mmu(&tlb);
615
616         return 0;
617 }
618
619 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
620                                 unsigned long end, struct mm_walk *walk)
621
622 {
623         struct mmu_gather *tlb = walk->private;
624         struct mm_struct *mm = tlb->mm;
625         struct vm_area_struct *vma = walk->vma;
626         spinlock_t *ptl;
627         pte_t *start_pte, *pte, ptent;
628         struct folio *folio;
629         int nr_swap = 0;
630         unsigned long next;
631         int nr, max_nr;
632
633         next = pmd_addr_end(addr, end);
634         if (pmd_trans_huge(*pmd))
635                 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
636                         return 0;
637
638         tlb_change_page_size(tlb, PAGE_SIZE);
639         start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
640         if (!start_pte)
641                 return 0;
642         flush_tlb_batched_pending(mm);
643         arch_enter_lazy_mmu_mode();
644         for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) {
645                 nr = 1;
646                 ptent = ptep_get(pte);
647
648                 if (pte_none(ptent))
649                         continue;
650                 /*
651                  * If the pte has swp_entry, just clear page table to
652                  * prevent swap-in which is more expensive rather than
653                  * (page allocation + zeroing).
654                  */
655                 if (!pte_present(ptent)) {
656                         swp_entry_t entry;
657
658                         entry = pte_to_swp_entry(ptent);
659                         if (!non_swap_entry(entry)) {
660                                 max_nr = (end - addr) / PAGE_SIZE;
661                                 nr = swap_pte_batch(pte, max_nr, ptent);
662                                 nr_swap -= nr;
663                                 free_swap_and_cache_nr(entry, nr);
664                                 clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm);
665                         } else if (is_hwpoison_entry(entry) ||
666                                    is_poisoned_swp_entry(entry)) {
667                                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
668                         }
669                         continue;
670                 }
671
672                 folio = vm_normal_folio(vma, addr, ptent);
673                 if (!folio || folio_is_zone_device(folio))
674                         continue;
675
676                 /*
677                  * If pmd isn't transhuge but the folio is large and
678                  * is owned by only this process, split it and
679                  * deactivate all pages.
680                  */
681                 if (folio_test_large(folio)) {
682                         int err;
683
684                         if (folio_likely_mapped_shared(folio))
685                                 break;
686                         if (!folio_trylock(folio))
687                                 break;
688                         folio_get(folio);
689                         arch_leave_lazy_mmu_mode();
690                         pte_unmap_unlock(start_pte, ptl);
691                         start_pte = NULL;
692                         err = split_folio(folio);
693                         folio_unlock(folio);
694                         folio_put(folio);
695                         if (err)
696                                 break;
697                         start_pte = pte =
698                                 pte_offset_map_lock(mm, pmd, addr, &ptl);
699                         if (!start_pte)
700                                 break;
701                         arch_enter_lazy_mmu_mode();
702                         pte--;
703                         addr -= PAGE_SIZE;
704                         continue;
705                 }
706
707                 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
708                         if (!folio_trylock(folio))
709                                 continue;
710                         /*
711                          * If folio is shared with others, we mustn't clear
712                          * the folio's dirty flag.
713                          */
714                         if (folio_mapcount(folio) != 1) {
715                                 folio_unlock(folio);
716                                 continue;
717                         }
718
719                         if (folio_test_swapcache(folio) &&
720                             !folio_free_swap(folio)) {
721                                 folio_unlock(folio);
722                                 continue;
723                         }
724
725                         folio_clear_dirty(folio);
726                         folio_unlock(folio);
727                 }
728
729                 if (pte_young(ptent) || pte_dirty(ptent)) {
730                         /*
731                          * Some of architecture(ex, PPC) don't update TLB
732                          * with set_pte_at and tlb_remove_tlb_entry so for
733                          * the portability, remap the pte with old|clean
734                          * after pte clearing.
735                          */
736                         ptent = ptep_get_and_clear_full(mm, addr, pte,
737                                                         tlb->fullmm);
738
739                         ptent = pte_mkold(ptent);
740                         ptent = pte_mkclean(ptent);
741                         set_pte_at(mm, addr, pte, ptent);
742                         tlb_remove_tlb_entry(tlb, pte, addr);
743                 }
744                 folio_mark_lazyfree(folio);
745         }
746
747         if (nr_swap)
748                 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
749         if (start_pte) {
750                 arch_leave_lazy_mmu_mode();
751                 pte_unmap_unlock(start_pte, ptl);
752         }
753         cond_resched();
754
755         return 0;
756 }
757
758 static const struct mm_walk_ops madvise_free_walk_ops = {
759         .pmd_entry              = madvise_free_pte_range,
760         .walk_lock              = PGWALK_RDLOCK,
761 };
762
763 static int madvise_free_single_vma(struct vm_area_struct *vma,
764                         unsigned long start_addr, unsigned long end_addr)
765 {
766         struct mm_struct *mm = vma->vm_mm;
767         struct mmu_notifier_range range;
768         struct mmu_gather tlb;
769
770         /* MADV_FREE works for only anon vma at the moment */
771         if (!vma_is_anonymous(vma))
772                 return -EINVAL;
773
774         range.start = max(vma->vm_start, start_addr);
775         if (range.start >= vma->vm_end)
776                 return -EINVAL;
777         range.end = min(vma->vm_end, end_addr);
778         if (range.end <= vma->vm_start)
779                 return -EINVAL;
780         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
781                                 range.start, range.end);
782
783         lru_add_drain();
784         tlb_gather_mmu(&tlb, mm);
785         update_hiwater_rss(mm);
786
787         mmu_notifier_invalidate_range_start(&range);
788         tlb_start_vma(&tlb, vma);
789         walk_page_range(vma->vm_mm, range.start, range.end,
790                         &madvise_free_walk_ops, &tlb);
791         tlb_end_vma(&tlb, vma);
792         mmu_notifier_invalidate_range_end(&range);
793         tlb_finish_mmu(&tlb);
794
795         return 0;
796 }
797
798 /*
799  * Application no longer needs these pages.  If the pages are dirty,
800  * it's OK to just throw them away.  The app will be more careful about
801  * data it wants to keep.  Be sure to free swap resources too.  The
802  * zap_page_range_single call sets things up for shrink_active_list to actually
803  * free these pages later if no one else has touched them in the meantime,
804  * although we could add these pages to a global reuse list for
805  * shrink_active_list to pick up before reclaiming other pages.
806  *
807  * NB: This interface discards data rather than pushes it out to swap,
808  * as some implementations do.  This has performance implications for
809  * applications like large transactional databases which want to discard
810  * pages in anonymous maps after committing to backing store the data
811  * that was kept in them.  There is no reason to write this data out to
812  * the swap area if the application is discarding it.
813  *
814  * An interface that causes the system to free clean pages and flush
815  * dirty pages is already available as msync(MS_INVALIDATE).
816  */
817 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
818                                         unsigned long start, unsigned long end)
819 {
820         zap_page_range_single(vma, start, end - start, NULL);
821         return 0;
822 }
823
824 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
825                                             unsigned long start,
826                                             unsigned long *end,
827                                             int behavior)
828 {
829         if (!is_vm_hugetlb_page(vma)) {
830                 unsigned int forbidden = VM_PFNMAP;
831
832                 if (behavior != MADV_DONTNEED_LOCKED)
833                         forbidden |= VM_LOCKED;
834
835                 return !(vma->vm_flags & forbidden);
836         }
837
838         if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED)
839                 return false;
840         if (start & ~huge_page_mask(hstate_vma(vma)))
841                 return false;
842
843         /*
844          * Madvise callers expect the length to be rounded up to PAGE_SIZE
845          * boundaries, and may be unaware that this VMA uses huge pages.
846          * Avoid unexpected data loss by rounding down the number of
847          * huge pages freed.
848          */
849         *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
850
851         return true;
852 }
853
854 static long madvise_dontneed_free(struct vm_area_struct *vma,
855                                   struct vm_area_struct **prev,
856                                   unsigned long start, unsigned long end,
857                                   int behavior)
858 {
859         struct mm_struct *mm = vma->vm_mm;
860
861         *prev = vma;
862         if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
863                 return -EINVAL;
864
865         if (start == end)
866                 return 0;
867
868         if (!userfaultfd_remove(vma, start, end)) {
869                 *prev = NULL; /* mmap_lock has been dropped, prev is stale */
870
871                 mmap_read_lock(mm);
872                 vma = vma_lookup(mm, start);
873                 if (!vma)
874                         return -ENOMEM;
875                 /*
876                  * Potential end adjustment for hugetlb vma is OK as
877                  * the check below keeps end within vma.
878                  */
879                 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
880                                                      behavior))
881                         return -EINVAL;
882                 if (end > vma->vm_end) {
883                         /*
884                          * Don't fail if end > vma->vm_end. If the old
885                          * vma was split while the mmap_lock was
886                          * released the effect of the concurrent
887                          * operation may not cause madvise() to
888                          * have an undefined result. There may be an
889                          * adjacent next vma that we'll walk
890                          * next. userfaultfd_remove() will generate an
891                          * UFFD_EVENT_REMOVE repetition on the
892                          * end-vma->vm_end range, but the manager can
893                          * handle a repetition fine.
894                          */
895                         end = vma->vm_end;
896                 }
897                 VM_WARN_ON(start >= end);
898         }
899
900         if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
901                 return madvise_dontneed_single_vma(vma, start, end);
902         else if (behavior == MADV_FREE)
903                 return madvise_free_single_vma(vma, start, end);
904         else
905                 return -EINVAL;
906 }
907
908 static long madvise_populate(struct mm_struct *mm, unsigned long start,
909                 unsigned long end, int behavior)
910 {
911         const bool write = behavior == MADV_POPULATE_WRITE;
912         int locked = 1;
913         long pages;
914
915         while (start < end) {
916                 /* Populate (prefault) page tables readable/writable. */
917                 pages = faultin_page_range(mm, start, end, write, &locked);
918                 if (!locked) {
919                         mmap_read_lock(mm);
920                         locked = 1;
921                 }
922                 if (pages < 0) {
923                         switch (pages) {
924                         case -EINTR:
925                                 return -EINTR;
926                         case -EINVAL: /* Incompatible mappings / permissions. */
927                                 return -EINVAL;
928                         case -EHWPOISON:
929                                 return -EHWPOISON;
930                         case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
931                                 return -EFAULT;
932                         default:
933                                 pr_warn_once("%s: unhandled return value: %ld\n",
934                                              __func__, pages);
935                                 fallthrough;
936                         case -ENOMEM: /* No VMA or out of memory. */
937                                 return -ENOMEM;
938                         }
939                 }
940                 start += pages * PAGE_SIZE;
941         }
942         return 0;
943 }
944
945 /*
946  * Application wants to free up the pages and associated backing store.
947  * This is effectively punching a hole into the middle of a file.
948  */
949 static long madvise_remove(struct vm_area_struct *vma,
950                                 struct vm_area_struct **prev,
951                                 unsigned long start, unsigned long end)
952 {
953         loff_t offset;
954         int error;
955         struct file *f;
956         struct mm_struct *mm = vma->vm_mm;
957
958         *prev = NULL;   /* tell sys_madvise we drop mmap_lock */
959
960         if (vma->vm_flags & VM_LOCKED)
961                 return -EINVAL;
962
963         f = vma->vm_file;
964
965         if (!f || !f->f_mapping || !f->f_mapping->host) {
966                         return -EINVAL;
967         }
968
969         if (!vma_is_shared_maywrite(vma))
970                 return -EACCES;
971
972         offset = (loff_t)(start - vma->vm_start)
973                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
974
975         /*
976          * Filesystem's fallocate may need to take i_rwsem.  We need to
977          * explicitly grab a reference because the vma (and hence the
978          * vma's reference to the file) can go away as soon as we drop
979          * mmap_lock.
980          */
981         get_file(f);
982         if (userfaultfd_remove(vma, start, end)) {
983                 /* mmap_lock was not released by userfaultfd_remove() */
984                 mmap_read_unlock(mm);
985         }
986         error = vfs_fallocate(f,
987                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
988                                 offset, end - start);
989         fput(f);
990         mmap_read_lock(mm);
991         return error;
992 }
993
994 /*
995  * Apply an madvise behavior to a region of a vma.  madvise_update_vma
996  * will handle splitting a vm area into separate areas, each area with its own
997  * behavior.
998  */
999 static int madvise_vma_behavior(struct vm_area_struct *vma,
1000                                 struct vm_area_struct **prev,
1001                                 unsigned long start, unsigned long end,
1002                                 unsigned long behavior)
1003 {
1004         int error;
1005         struct anon_vma_name *anon_name;
1006         unsigned long new_flags = vma->vm_flags;
1007
1008         switch (behavior) {
1009         case MADV_REMOVE:
1010                 return madvise_remove(vma, prev, start, end);
1011         case MADV_WILLNEED:
1012                 return madvise_willneed(vma, prev, start, end);
1013         case MADV_COLD:
1014                 return madvise_cold(vma, prev, start, end);
1015         case MADV_PAGEOUT:
1016                 return madvise_pageout(vma, prev, start, end);
1017         case MADV_FREE:
1018         case MADV_DONTNEED:
1019         case MADV_DONTNEED_LOCKED:
1020                 return madvise_dontneed_free(vma, prev, start, end, behavior);
1021         case MADV_NORMAL:
1022                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
1023                 break;
1024         case MADV_SEQUENTIAL:
1025                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
1026                 break;
1027         case MADV_RANDOM:
1028                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1029                 break;
1030         case MADV_DONTFORK:
1031                 new_flags |= VM_DONTCOPY;
1032                 break;
1033         case MADV_DOFORK:
1034                 if (vma->vm_flags & VM_IO)
1035                         return -EINVAL;
1036                 new_flags &= ~VM_DONTCOPY;
1037                 break;
1038         case MADV_WIPEONFORK:
1039                 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1040                 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1041                         return -EINVAL;
1042                 new_flags |= VM_WIPEONFORK;
1043                 break;
1044         case MADV_KEEPONFORK:
1045                 new_flags &= ~VM_WIPEONFORK;
1046                 break;
1047         case MADV_DONTDUMP:
1048                 new_flags |= VM_DONTDUMP;
1049                 break;
1050         case MADV_DODUMP:
1051                 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1052                         return -EINVAL;
1053                 new_flags &= ~VM_DONTDUMP;
1054                 break;
1055         case MADV_MERGEABLE:
1056         case MADV_UNMERGEABLE:
1057                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1058                 if (error)
1059                         goto out;
1060                 break;
1061         case MADV_HUGEPAGE:
1062         case MADV_NOHUGEPAGE:
1063                 error = hugepage_madvise(vma, &new_flags, behavior);
1064                 if (error)
1065                         goto out;
1066                 break;
1067         case MADV_COLLAPSE:
1068                 return madvise_collapse(vma, prev, start, end);
1069         }
1070
1071         anon_name = anon_vma_name(vma);
1072         anon_vma_name_get(anon_name);
1073         error = madvise_update_vma(vma, prev, start, end, new_flags,
1074                                    anon_name);
1075         anon_vma_name_put(anon_name);
1076
1077 out:
1078         /*
1079          * madvise() returns EAGAIN if kernel resources, such as
1080          * slab, are temporarily unavailable.
1081          */
1082         if (error == -ENOMEM)
1083                 error = -EAGAIN;
1084         return error;
1085 }
1086
1087 #ifdef CONFIG_MEMORY_FAILURE
1088 /*
1089  * Error injection support for memory error handling.
1090  */
1091 static int madvise_inject_error(int behavior,
1092                 unsigned long start, unsigned long end)
1093 {
1094         unsigned long size;
1095
1096         if (!capable(CAP_SYS_ADMIN))
1097                 return -EPERM;
1098
1099
1100         for (; start < end; start += size) {
1101                 unsigned long pfn;
1102                 struct page *page;
1103                 int ret;
1104
1105                 ret = get_user_pages_fast(start, 1, 0, &page);
1106                 if (ret != 1)
1107                         return ret;
1108                 pfn = page_to_pfn(page);
1109
1110                 /*
1111                  * When soft offlining hugepages, after migrating the page
1112                  * we dissolve it, therefore in the second loop "page" will
1113                  * no longer be a compound page.
1114                  */
1115                 size = page_size(compound_head(page));
1116
1117                 if (behavior == MADV_SOFT_OFFLINE) {
1118                         pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1119                                  pfn, start);
1120                         ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1121                 } else {
1122                         pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1123                                  pfn, start);
1124                         ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED);
1125                         if (ret == -EOPNOTSUPP)
1126                                 ret = 0;
1127                 }
1128
1129                 if (ret)
1130                         return ret;
1131         }
1132
1133         return 0;
1134 }
1135 #endif
1136
1137 static bool
1138 madvise_behavior_valid(int behavior)
1139 {
1140         switch (behavior) {
1141         case MADV_DOFORK:
1142         case MADV_DONTFORK:
1143         case MADV_NORMAL:
1144         case MADV_SEQUENTIAL:
1145         case MADV_RANDOM:
1146         case MADV_REMOVE:
1147         case MADV_WILLNEED:
1148         case MADV_DONTNEED:
1149         case MADV_DONTNEED_LOCKED:
1150         case MADV_FREE:
1151         case MADV_COLD:
1152         case MADV_PAGEOUT:
1153         case MADV_POPULATE_READ:
1154         case MADV_POPULATE_WRITE:
1155 #ifdef CONFIG_KSM
1156         case MADV_MERGEABLE:
1157         case MADV_UNMERGEABLE:
1158 #endif
1159 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1160         case MADV_HUGEPAGE:
1161         case MADV_NOHUGEPAGE:
1162         case MADV_COLLAPSE:
1163 #endif
1164         case MADV_DONTDUMP:
1165         case MADV_DODUMP:
1166         case MADV_WIPEONFORK:
1167         case MADV_KEEPONFORK:
1168 #ifdef CONFIG_MEMORY_FAILURE
1169         case MADV_SOFT_OFFLINE:
1170         case MADV_HWPOISON:
1171 #endif
1172                 return true;
1173
1174         default:
1175                 return false;
1176         }
1177 }
1178
1179 static bool process_madvise_behavior_valid(int behavior)
1180 {
1181         switch (behavior) {
1182         case MADV_COLD:
1183         case MADV_PAGEOUT:
1184         case MADV_WILLNEED:
1185         case MADV_COLLAPSE:
1186                 return true;
1187         default:
1188                 return false;
1189         }
1190 }
1191
1192 /*
1193  * Walk the vmas in range [start,end), and call the visit function on each one.
1194  * The visit function will get start and end parameters that cover the overlap
1195  * between the current vma and the original range.  Any unmapped regions in the
1196  * original range will result in this function returning -ENOMEM while still
1197  * calling the visit function on all of the existing vmas in the range.
1198  * Must be called with the mmap_lock held for reading or writing.
1199  */
1200 static
1201 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
1202                       unsigned long end, unsigned long arg,
1203                       int (*visit)(struct vm_area_struct *vma,
1204                                    struct vm_area_struct **prev, unsigned long start,
1205                                    unsigned long end, unsigned long arg))
1206 {
1207         struct vm_area_struct *vma;
1208         struct vm_area_struct *prev;
1209         unsigned long tmp;
1210         int unmapped_error = 0;
1211
1212         /*
1213          * If the interval [start,end) covers some unmapped address
1214          * ranges, just ignore them, but return -ENOMEM at the end.
1215          * - different from the way of handling in mlock etc.
1216          */
1217         vma = find_vma_prev(mm, start, &prev);
1218         if (vma && start > vma->vm_start)
1219                 prev = vma;
1220
1221         for (;;) {
1222                 int error;
1223
1224                 /* Still start < end. */
1225                 if (!vma)
1226                         return -ENOMEM;
1227
1228                 /* Here start < (end|vma->vm_end). */
1229                 if (start < vma->vm_start) {
1230                         unmapped_error = -ENOMEM;
1231                         start = vma->vm_start;
1232                         if (start >= end)
1233                                 break;
1234                 }
1235
1236                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1237                 tmp = vma->vm_end;
1238                 if (end < tmp)
1239                         tmp = end;
1240
1241                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1242                 error = visit(vma, &prev, start, tmp, arg);
1243                 if (error)
1244                         return error;
1245                 start = tmp;
1246                 if (prev && start < prev->vm_end)
1247                         start = prev->vm_end;
1248                 if (start >= end)
1249                         break;
1250                 if (prev)
1251                         vma = find_vma(mm, prev->vm_end);
1252                 else    /* madvise_remove dropped mmap_lock */
1253                         vma = find_vma(mm, start);
1254         }
1255
1256         return unmapped_error;
1257 }
1258
1259 #ifdef CONFIG_ANON_VMA_NAME
1260 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1261                                  struct vm_area_struct **prev,
1262                                  unsigned long start, unsigned long end,
1263                                  unsigned long anon_name)
1264 {
1265         int error;
1266
1267         /* Only anonymous mappings can be named */
1268         if (vma->vm_file && !vma_is_anon_shmem(vma))
1269                 return -EBADF;
1270
1271         error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
1272                                    (struct anon_vma_name *)anon_name);
1273
1274         /*
1275          * madvise() returns EAGAIN if kernel resources, such as
1276          * slab, are temporarily unavailable.
1277          */
1278         if (error == -ENOMEM)
1279                 error = -EAGAIN;
1280         return error;
1281 }
1282
1283 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
1284                           unsigned long len_in, struct anon_vma_name *anon_name)
1285 {
1286         unsigned long end;
1287         unsigned long len;
1288
1289         if (start & ~PAGE_MASK)
1290                 return -EINVAL;
1291         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
1292
1293         /* Check to see whether len was rounded up from small -ve to zero */
1294         if (len_in && !len)
1295                 return -EINVAL;
1296
1297         end = start + len;
1298         if (end < start)
1299                 return -EINVAL;
1300
1301         if (end == start)
1302                 return 0;
1303
1304         return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
1305                                  madvise_vma_anon_name);
1306 }
1307 #endif /* CONFIG_ANON_VMA_NAME */
1308 /*
1309  * The madvise(2) system call.
1310  *
1311  * Applications can use madvise() to advise the kernel how it should
1312  * handle paging I/O in this VM area.  The idea is to help the kernel
1313  * use appropriate read-ahead and caching techniques.  The information
1314  * provided is advisory only, and can be safely disregarded by the
1315  * kernel without affecting the correct operation of the application.
1316  *
1317  * behavior values:
1318  *  MADV_NORMAL - the default behavior is to read clusters.  This
1319  *              results in some read-ahead and read-behind.
1320  *  MADV_RANDOM - the system should read the minimum amount of data
1321  *              on any access, since it is unlikely that the appli-
1322  *              cation will need more than what it asks for.
1323  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
1324  *              once, so they can be aggressively read ahead, and
1325  *              can be freed soon after they are accessed.
1326  *  MADV_WILLNEED - the application is notifying the system to read
1327  *              some pages ahead.
1328  *  MADV_DONTNEED - the application is finished with the given range,
1329  *              so the kernel can free resources associated with it.
1330  *  MADV_FREE - the application marks pages in the given range as lazy free,
1331  *              where actual purges are postponed until memory pressure happens.
1332  *  MADV_REMOVE - the application wants to free up the given range of
1333  *              pages and associated backing store.
1334  *  MADV_DONTFORK - omit this area from child's address space when forking:
1335  *              typically, to avoid COWing pages pinned by get_user_pages().
1336  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1337  *  MADV_WIPEONFORK - present the child process with zero-filled memory in this
1338  *              range after a fork.
1339  *  MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1340  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
1341  *              were corrupted by unrecoverable hardware memory failure.
1342  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1343  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1344  *              this area with pages of identical content from other such areas.
1345  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1346  *  MADV_HUGEPAGE - the application wants to back the given range by transparent
1347  *              huge pages in the future. Existing pages might be coalesced and
1348  *              new pages might be allocated as THP.
1349  *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1350  *              transparent huge pages so the existing pages will not be
1351  *              coalesced into THP and new pages will not be allocated as THP.
1352  *  MADV_COLLAPSE - synchronously coalesce pages into new THP.
1353  *  MADV_DONTDUMP - the application wants to prevent pages in the given range
1354  *              from being included in its core dump.
1355  *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1356  *  MADV_COLD - the application is not expected to use this memory soon,
1357  *              deactivate pages in this range so that they can be reclaimed
1358  *              easily if memory pressure happens.
1359  *  MADV_PAGEOUT - the application is not expected to use this memory soon,
1360  *              page out the pages in this range immediately.
1361  *  MADV_POPULATE_READ - populate (prefault) page tables readable by
1362  *              triggering read faults if required
1363  *  MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1364  *              triggering write faults if required
1365  *
1366  * return values:
1367  *  zero    - success
1368  *  -EINVAL - start + len < 0, start is not page-aligned,
1369  *              "behavior" is not a valid value, or application
1370  *              is attempting to release locked or shared pages,
1371  *              or the specified address range includes file, Huge TLB,
1372  *              MAP_SHARED or VMPFNMAP range.
1373  *  -ENOMEM - addresses in the specified range are not currently
1374  *              mapped, or are outside the AS of the process.
1375  *  -EIO    - an I/O error occurred while paging in data.
1376  *  -EBADF  - map exists, but area maps something that isn't a file.
1377  *  -EAGAIN - a kernel resource was temporarily unavailable.
1378  */
1379 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1380 {
1381         unsigned long end;
1382         int error;
1383         int write;
1384         size_t len;
1385         struct blk_plug plug;
1386
1387         if (!madvise_behavior_valid(behavior))
1388                 return -EINVAL;
1389
1390         if (!PAGE_ALIGNED(start))
1391                 return -EINVAL;
1392         len = PAGE_ALIGN(len_in);
1393
1394         /* Check to see whether len was rounded up from small -ve to zero */
1395         if (len_in && !len)
1396                 return -EINVAL;
1397
1398         end = start + len;
1399         if (end < start)
1400                 return -EINVAL;
1401
1402         if (end == start)
1403                 return 0;
1404
1405 #ifdef CONFIG_MEMORY_FAILURE
1406         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1407                 return madvise_inject_error(behavior, start, start + len_in);
1408 #endif
1409
1410         write = madvise_need_mmap_write(behavior);
1411         if (write) {
1412                 if (mmap_write_lock_killable(mm))
1413                         return -EINTR;
1414         } else {
1415                 mmap_read_lock(mm);
1416         }
1417
1418         start = untagged_addr_remote(mm, start);
1419         end = start + len;
1420
1421         blk_start_plug(&plug);
1422         switch (behavior) {
1423         case MADV_POPULATE_READ:
1424         case MADV_POPULATE_WRITE:
1425                 error = madvise_populate(mm, start, end, behavior);
1426                 break;
1427         default:
1428                 error = madvise_walk_vmas(mm, start, end, behavior,
1429                                           madvise_vma_behavior);
1430                 break;
1431         }
1432         blk_finish_plug(&plug);
1433         if (write)
1434                 mmap_write_unlock(mm);
1435         else
1436                 mmap_read_unlock(mm);
1437
1438         return error;
1439 }
1440
1441 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1442 {
1443         return do_madvise(current->mm, start, len_in, behavior);
1444 }
1445
1446 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1447                 size_t, vlen, int, behavior, unsigned int, flags)
1448 {
1449         ssize_t ret;
1450         struct iovec iovstack[UIO_FASTIOV];
1451         struct iovec *iov = iovstack;
1452         struct iov_iter iter;
1453         struct task_struct *task;
1454         struct mm_struct *mm;
1455         size_t total_len;
1456         unsigned int f_flags;
1457
1458         if (flags != 0) {
1459                 ret = -EINVAL;
1460                 goto out;
1461         }
1462
1463         ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1464         if (ret < 0)
1465                 goto out;
1466
1467         task = pidfd_get_task(pidfd, &f_flags);
1468         if (IS_ERR(task)) {
1469                 ret = PTR_ERR(task);
1470                 goto free_iov;
1471         }
1472
1473         if (!process_madvise_behavior_valid(behavior)) {
1474                 ret = -EINVAL;
1475                 goto release_task;
1476         }
1477
1478         /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1479         mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1480         if (IS_ERR_OR_NULL(mm)) {
1481                 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1482                 goto release_task;
1483         }
1484
1485         /*
1486          * Require CAP_SYS_NICE for influencing process performance. Note that
1487          * only non-destructive hints are currently supported.
1488          */
1489         if (!capable(CAP_SYS_NICE)) {
1490                 ret = -EPERM;
1491                 goto release_mm;
1492         }
1493
1494         total_len = iov_iter_count(&iter);
1495
1496         while (iov_iter_count(&iter)) {
1497                 ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter),
1498                                         iter_iov_len(&iter), behavior);
1499                 if (ret < 0)
1500                         break;
1501                 iov_iter_advance(&iter, iter_iov_len(&iter));
1502         }
1503
1504         ret = (total_len - iov_iter_count(&iter)) ? : ret;
1505
1506 release_mm:
1507         mmput(mm);
1508 release_task:
1509         put_task_struct(task);
1510 free_iov:
1511         kfree(iov);
1512 out:
1513         return ret;
1514 }
This page took 0.114859 seconds and 4 git commands to generate.