]> Git Repo - linux.git/blame - mm/memory.c
ceph: handle idmapped mounts in create_request_message()
[linux.git] / mm / memory.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13/*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24/*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32/*
33 * 05.04.94 - Multi-page memory management added for v1.1.
166f61b9 34 * Idea by Alex Bligh ([email protected])
1da177e4
LT
35 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * ([email protected])
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42#include <linux/kernel_stat.h>
43#include <linux/mm.h>
36090def 44#include <linux/mm_inline.h>
6e84f315 45#include <linux/sched/mm.h>
f7ccbae4 46#include <linux/sched/coredump.h>
6a3827d7 47#include <linux/sched/numa_balancing.h>
29930025 48#include <linux/sched/task.h>
1da177e4
LT
49#include <linux/hugetlb.h>
50#include <linux/mman.h>
51#include <linux/swap.h>
52#include <linux/highmem.h>
53#include <linux/pagemap.h>
5042db43 54#include <linux/memremap.h>
b073d7f8 55#include <linux/kmsan.h>
9a840895 56#include <linux/ksm.h>
1da177e4 57#include <linux/rmap.h>
b95f1b31 58#include <linux/export.h>
0ff92245 59#include <linux/delayacct.h>
1da177e4 60#include <linux/init.h>
01c8f1c4 61#include <linux/pfn_t.h>
edc79b2a 62#include <linux/writeback.h>
8a9f3ccd 63#include <linux/memcontrol.h>
cddb8a5c 64#include <linux/mmu_notifier.h>
3dc14741
HD
65#include <linux/swapops.h>
66#include <linux/elf.h>
5a0e3ad6 67#include <linux/gfp.h>
4daae3b4 68#include <linux/migrate.h>
2fbc57c5 69#include <linux/string.h>
467b171a 70#include <linux/memory-tiers.h>
1592eef0 71#include <linux/debugfs.h>
6b251fc9 72#include <linux/userfaultfd_k.h>
bc2466e4 73#include <linux/dax.h>
6b31d595 74#include <linux/oom.h>
98fa15f3 75#include <linux/numa.h>
bce617ed
PX
76#include <linux/perf_event.h>
77#include <linux/ptrace.h>
e80d3909 78#include <linux/vmalloc.h>
33024536 79#include <linux/sched/sysctl.h>
1da177e4 80
b3d1411b
JFG
81#include <trace/events/kmem.h>
82
6952b61d 83#include <asm/io.h>
33a709b2 84#include <asm/mmu_context.h>
1da177e4 85#include <asm/pgalloc.h>
7c0f6ba6 86#include <linux/uaccess.h>
1da177e4
LT
87#include <asm/tlb.h>
88#include <asm/tlbflush.h>
1da177e4 89
e80d3909 90#include "pgalloc-track.h"
42b77728 91#include "internal.h"
014bb1de 92#include "swap.h"
42b77728 93
af27d940 94#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
90572890 95#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
75980e97
PZ
96#endif
97
a9ee6cf5 98#ifndef CONFIG_NUMA
1da177e4 99unsigned long max_mapnr;
1da177e4 100EXPORT_SYMBOL(max_mapnr);
166f61b9
TH
101
102struct page *mem_map;
1da177e4
LT
103EXPORT_SYMBOL(mem_map);
104#endif
105
5c041f5d 106static vm_fault_t do_fault(struct vm_fault *vmf);
2bad466c
PX
107static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
108static bool vmf_pte_changed(struct vm_fault *vmf);
109
110/*
111 * Return true if the original pte was a uffd-wp pte marker (so the pte was
112 * wr-protected).
113 */
114static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
115{
116 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
117 return false;
118
119 return pte_marker_uffd_wp(vmf->orig_pte);
120}
5c041f5d 121
1da177e4
LT
122/*
123 * A number of key systems in x86 including ioremap() rely on the assumption
124 * that high_memory defines the upper bound on direct map memory, then end
125 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
126 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
127 * and ZONE_HIGHMEM.
128 */
166f61b9 129void *high_memory;
1da177e4 130EXPORT_SYMBOL(high_memory);
1da177e4 131
32a93233
IM
132/*
133 * Randomize the address space (stacks, mmaps, brk, etc.).
134 *
135 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
136 * as ancient (libc5 based) binaries can segfault. )
137 */
138int randomize_va_space __read_mostly =
139#ifdef CONFIG_COMPAT_BRK
140 1;
141#else
142 2;
143#endif
a62eaf15 144
46bdb427
WD
145#ifndef arch_wants_old_prefaulted_pte
146static inline bool arch_wants_old_prefaulted_pte(void)
147{
148 /*
149 * Transitioning a PTE from 'old' to 'young' can be expensive on
150 * some architectures, even if it's performed in hardware. By
151 * default, "false" means prefaulted entries will be 'young'.
152 */
153 return false;
154}
155#endif
156
a62eaf15
AK
157static int __init disable_randmaps(char *s)
158{
159 randomize_va_space = 0;
9b41046c 160 return 1;
a62eaf15
AK
161}
162__setup("norandmaps", disable_randmaps);
163
62eede62 164unsigned long zero_pfn __read_mostly;
0b70068e
AB
165EXPORT_SYMBOL(zero_pfn);
166
166f61b9
TH
167unsigned long highest_memmap_pfn __read_mostly;
168
a13ea5b7
HD
169/*
170 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
171 */
172static int __init init_zero_pfn(void)
173{
174 zero_pfn = page_to_pfn(ZERO_PAGE(0));
175 return 0;
176}
e720e7d0 177early_initcall(init_zero_pfn);
a62eaf15 178
f1a79412 179void mm_trace_rss_stat(struct mm_struct *mm, int member)
b3d1411b 180{
f1a79412 181 trace_rss_stat(mm, member);
b3d1411b 182}
d559db08 183
1da177e4
LT
184/*
185 * Note: this doesn't free the actual pages themselves. That
186 * has been handled earlier when unmapping all the memory regions.
187 */
9e1b32ca
BH
188static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
189 unsigned long addr)
1da177e4 190{
2f569afd 191 pgtable_t token = pmd_pgtable(*pmd);
e0da382c 192 pmd_clear(pmd);
9e1b32ca 193 pte_free_tlb(tlb, token, addr);
c4812909 194 mm_dec_nr_ptes(tlb->mm);
1da177e4
LT
195}
196
e0da382c
HD
197static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
198 unsigned long addr, unsigned long end,
199 unsigned long floor, unsigned long ceiling)
1da177e4
LT
200{
201 pmd_t *pmd;
202 unsigned long next;
e0da382c 203 unsigned long start;
1da177e4 204
e0da382c 205 start = addr;
1da177e4 206 pmd = pmd_offset(pud, addr);
1da177e4
LT
207 do {
208 next = pmd_addr_end(addr, end);
209 if (pmd_none_or_clear_bad(pmd))
210 continue;
9e1b32ca 211 free_pte_range(tlb, pmd, addr);
1da177e4
LT
212 } while (pmd++, addr = next, addr != end);
213
e0da382c
HD
214 start &= PUD_MASK;
215 if (start < floor)
216 return;
217 if (ceiling) {
218 ceiling &= PUD_MASK;
219 if (!ceiling)
220 return;
1da177e4 221 }
e0da382c
HD
222 if (end - 1 > ceiling - 1)
223 return;
224
225 pmd = pmd_offset(pud, start);
226 pud_clear(pud);
9e1b32ca 227 pmd_free_tlb(tlb, pmd, start);
dc6c9a35 228 mm_dec_nr_pmds(tlb->mm);
1da177e4
LT
229}
230
c2febafc 231static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
e0da382c
HD
232 unsigned long addr, unsigned long end,
233 unsigned long floor, unsigned long ceiling)
1da177e4
LT
234{
235 pud_t *pud;
236 unsigned long next;
e0da382c 237 unsigned long start;
1da177e4 238
e0da382c 239 start = addr;
c2febafc 240 pud = pud_offset(p4d, addr);
1da177e4
LT
241 do {
242 next = pud_addr_end(addr, end);
243 if (pud_none_or_clear_bad(pud))
244 continue;
e0da382c 245 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1da177e4
LT
246 } while (pud++, addr = next, addr != end);
247
c2febafc
KS
248 start &= P4D_MASK;
249 if (start < floor)
250 return;
251 if (ceiling) {
252 ceiling &= P4D_MASK;
253 if (!ceiling)
254 return;
255 }
256 if (end - 1 > ceiling - 1)
257 return;
258
259 pud = pud_offset(p4d, start);
260 p4d_clear(p4d);
261 pud_free_tlb(tlb, pud, start);
b4e98d9a 262 mm_dec_nr_puds(tlb->mm);
c2febafc
KS
263}
264
265static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
266 unsigned long addr, unsigned long end,
267 unsigned long floor, unsigned long ceiling)
268{
269 p4d_t *p4d;
270 unsigned long next;
271 unsigned long start;
272
273 start = addr;
274 p4d = p4d_offset(pgd, addr);
275 do {
276 next = p4d_addr_end(addr, end);
277 if (p4d_none_or_clear_bad(p4d))
278 continue;
279 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
280 } while (p4d++, addr = next, addr != end);
281
e0da382c
HD
282 start &= PGDIR_MASK;
283 if (start < floor)
284 return;
285 if (ceiling) {
286 ceiling &= PGDIR_MASK;
287 if (!ceiling)
288 return;
1da177e4 289 }
e0da382c
HD
290 if (end - 1 > ceiling - 1)
291 return;
292
c2febafc 293 p4d = p4d_offset(pgd, start);
e0da382c 294 pgd_clear(pgd);
c2febafc 295 p4d_free_tlb(tlb, p4d, start);
1da177e4
LT
296}
297
298/*
e0da382c 299 * This function frees user-level page tables of a process.
1da177e4 300 */
42b77728 301void free_pgd_range(struct mmu_gather *tlb,
e0da382c
HD
302 unsigned long addr, unsigned long end,
303 unsigned long floor, unsigned long ceiling)
1da177e4
LT
304{
305 pgd_t *pgd;
306 unsigned long next;
e0da382c
HD
307
308 /*
309 * The next few lines have given us lots of grief...
310 *
311 * Why are we testing PMD* at this top level? Because often
312 * there will be no work to do at all, and we'd prefer not to
313 * go all the way down to the bottom just to discover that.
314 *
315 * Why all these "- 1"s? Because 0 represents both the bottom
316 * of the address space and the top of it (using -1 for the
317 * top wouldn't help much: the masks would do the wrong thing).
318 * The rule is that addr 0 and floor 0 refer to the bottom of
319 * the address space, but end 0 and ceiling 0 refer to the top
320 * Comparisons need to use "end - 1" and "ceiling - 1" (though
321 * that end 0 case should be mythical).
322 *
323 * Wherever addr is brought up or ceiling brought down, we must
324 * be careful to reject "the opposite 0" before it confuses the
325 * subsequent tests. But what about where end is brought down
326 * by PMD_SIZE below? no, end can't go down to 0 there.
327 *
328 * Whereas we round start (addr) and ceiling down, by different
329 * masks at different levels, in order to test whether a table
330 * now has no other vmas using it, so can be freed, we don't
331 * bother to round floor or end up - the tests don't need that.
332 */
1da177e4 333
e0da382c
HD
334 addr &= PMD_MASK;
335 if (addr < floor) {
336 addr += PMD_SIZE;
337 if (!addr)
338 return;
339 }
340 if (ceiling) {
341 ceiling &= PMD_MASK;
342 if (!ceiling)
343 return;
344 }
345 if (end - 1 > ceiling - 1)
346 end -= PMD_SIZE;
347 if (addr > end - 1)
348 return;
07e32661
AK
349 /*
350 * We add page table cache pages with PAGE_SIZE,
351 * (see pte_free_tlb()), flush the tlb if we need
352 */
ed6a7935 353 tlb_change_page_size(tlb, PAGE_SIZE);
42b77728 354 pgd = pgd_offset(tlb->mm, addr);
1da177e4
LT
355 do {
356 next = pgd_addr_end(addr, end);
357 if (pgd_none_or_clear_bad(pgd))
358 continue;
c2febafc 359 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
1da177e4 360 } while (pgd++, addr = next, addr != end);
e0da382c
HD
361}
362
fd892593 363void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
763ecb03 364 struct vm_area_struct *vma, unsigned long floor,
98e51a22 365 unsigned long ceiling, bool mm_wr_locked)
e0da382c 366{
763ecb03 367 do {
e0da382c 368 unsigned long addr = vma->vm_start;
763ecb03
LH
369 struct vm_area_struct *next;
370
371 /*
372 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
373 * be 0. This will underflow and is okay.
374 */
fd892593 375 next = mas_find(mas, ceiling - 1);
e0da382c 376
8f4f8c16 377 /*
25d9e2d1
NP
378 * Hide vma from rmap and truncate_pagecache before freeing
379 * pgtables
8f4f8c16 380 */
98e51a22
SB
381 if (mm_wr_locked)
382 vma_start_write(vma);
5beb4930 383 unlink_anon_vmas(vma);
8f4f8c16
HD
384 unlink_file_vma(vma);
385
9da61aef 386 if (is_vm_hugetlb_page(vma)) {
3bf5ee95 387 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
166f61b9 388 floor, next ? next->vm_start : ceiling);
3bf5ee95
HD
389 } else {
390 /*
391 * Optimization: gather nearby vmas into one call down
392 */
393 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
4866920b 394 && !is_vm_hugetlb_page(next)) {
3bf5ee95 395 vma = next;
fd892593 396 next = mas_find(mas, ceiling - 1);
98e51a22
SB
397 if (mm_wr_locked)
398 vma_start_write(vma);
5beb4930 399 unlink_anon_vmas(vma);
8f4f8c16 400 unlink_file_vma(vma);
3bf5ee95
HD
401 }
402 free_pgd_range(tlb, addr, vma->vm_end,
166f61b9 403 floor, next ? next->vm_start : ceiling);
3bf5ee95 404 }
e0da382c 405 vma = next;
763ecb03 406 } while (vma);
1da177e4
LT
407}
408
03c4f204 409void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
1da177e4 410{
03c4f204 411 spinlock_t *ptl = pmd_lock(mm, pmd);
1bb3630e 412
8ac1f832 413 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
c4812909 414 mm_inc_nr_ptes(mm);
ed33b5a6
QZ
415 /*
416 * Ensure all pte setup (eg. pte page lock and page clearing) are
417 * visible before the pte is made visible to other CPUs by being
418 * put into page tables.
419 *
420 * The other side of the story is the pointer chasing in the page
421 * table walking code (when walking the page table without locking;
422 * ie. most of the time). Fortunately, these data accesses consist
423 * of a chain of data-dependent loads, meaning most CPUs (alpha
424 * being the notable exception) will already guarantee loads are
425 * seen in-order. See the alpha page table accessors for the
426 * smp_rmb() barriers in page table walking code.
427 */
428 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
03c4f204
QZ
429 pmd_populate(mm, pmd, *pte);
430 *pte = NULL;
4b471e88 431 }
c4088ebd 432 spin_unlock(ptl);
03c4f204
QZ
433}
434
4cf58924 435int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
1da177e4 436{
4cf58924 437 pgtable_t new = pte_alloc_one(mm);
1bb3630e
HD
438 if (!new)
439 return -ENOMEM;
440
03c4f204 441 pmd_install(mm, pmd, &new);
2f569afd
MS
442 if (new)
443 pte_free(mm, new);
1bb3630e 444 return 0;
1da177e4
LT
445}
446
4cf58924 447int __pte_alloc_kernel(pmd_t *pmd)
1da177e4 448{
4cf58924 449 pte_t *new = pte_alloc_one_kernel(&init_mm);
1bb3630e
HD
450 if (!new)
451 return -ENOMEM;
452
453 spin_lock(&init_mm.page_table_lock);
8ac1f832 454 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
ed33b5a6 455 smp_wmb(); /* See comment in pmd_install() */
1bb3630e 456 pmd_populate_kernel(&init_mm, pmd, new);
2f569afd 457 new = NULL;
4b471e88 458 }
1bb3630e 459 spin_unlock(&init_mm.page_table_lock);
2f569afd
MS
460 if (new)
461 pte_free_kernel(&init_mm, new);
1bb3630e 462 return 0;
1da177e4
LT
463}
464
d559db08
KH
465static inline void init_rss_vec(int *rss)
466{
467 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
468}
469
470static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
ae859762 471{
d559db08
KH
472 int i;
473
34e55232 474 if (current->mm == mm)
05af2e10 475 sync_mm_rss(mm);
d559db08
KH
476 for (i = 0; i < NR_MM_COUNTERS; i++)
477 if (rss[i])
478 add_mm_counter(mm, i, rss[i]);
ae859762
HD
479}
480
b5810039 481/*
6aab341e
LT
482 * This function is called to print an error when a bad pte
483 * is found. For example, we might have a PFN-mapped pte in
484 * a region that doesn't allow it.
b5810039
NP
485 *
486 * The calling function must still handle the error.
487 */
3dc14741
HD
488static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
489 pte_t pte, struct page *page)
b5810039 490{
3dc14741 491 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
c2febafc
KS
492 p4d_t *p4d = p4d_offset(pgd, addr);
493 pud_t *pud = pud_offset(p4d, addr);
3dc14741
HD
494 pmd_t *pmd = pmd_offset(pud, addr);
495 struct address_space *mapping;
496 pgoff_t index;
d936cf9b
HD
497 static unsigned long resume;
498 static unsigned long nr_shown;
499 static unsigned long nr_unshown;
500
501 /*
502 * Allow a burst of 60 reports, then keep quiet for that minute;
503 * or allow a steady drip of one report per second.
504 */
505 if (nr_shown == 60) {
506 if (time_before(jiffies, resume)) {
507 nr_unshown++;
508 return;
509 }
510 if (nr_unshown) {
1170532b
JP
511 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
512 nr_unshown);
d936cf9b
HD
513 nr_unshown = 0;
514 }
515 nr_shown = 0;
516 }
517 if (nr_shown++ == 0)
518 resume = jiffies + 60 * HZ;
3dc14741
HD
519
520 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
521 index = linear_page_index(vma, addr);
522
1170532b
JP
523 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
524 current->comm,
525 (long long)pte_val(pte), (long long)pmd_val(*pmd));
718a3821 526 if (page)
f0b791a3 527 dump_page(page, "bad pte");
6aa9b8b2 528 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
1170532b 529 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
7e0a1265 530 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
2682582a
KK
531 vma->vm_file,
532 vma->vm_ops ? vma->vm_ops->fault : NULL,
533 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
7e0a1265 534 mapping ? mapping->a_ops->read_folio : NULL);
b5810039 535 dump_stack();
373d4d09 536 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
b5810039
NP
537}
538
ee498ed7 539/*
7e675137 540 * vm_normal_page -- This function gets the "struct page" associated with a pte.
6aab341e 541 *
7e675137
NP
542 * "Special" mappings do not wish to be associated with a "struct page" (either
543 * it doesn't exist, or it exists but they don't want to touch it). In this
544 * case, NULL is returned here. "Normal" mappings do have a struct page.
b379d790 545 *
7e675137
NP
546 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
547 * pte bit, in which case this function is trivial. Secondly, an architecture
548 * may not have a spare pte bit, which requires a more complicated scheme,
549 * described below.
550 *
551 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
552 * special mapping (even if there are underlying and valid "struct pages").
553 * COWed pages of a VM_PFNMAP are always normal.
6aab341e 554 *
b379d790
JH
555 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
556 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
7e675137
NP
557 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
558 * mapping will always honor the rule
6aab341e
LT
559 *
560 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
561 *
7e675137
NP
562 * And for normal mappings this is false.
563 *
564 * This restricts such mappings to be a linear translation from virtual address
565 * to pfn. To get around this restriction, we allow arbitrary mappings so long
566 * as the vma is not a COW mapping; in that case, we know that all ptes are
567 * special (because none can have been COWed).
b379d790 568 *
b379d790 569 *
7e675137 570 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
b379d790
JH
571 *
572 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
573 * page" backing, however the difference is that _all_ pages with a struct
574 * page (that is, those where pfn_valid is true) are refcounted and considered
575 * normal pages by the VM. The disadvantage is that pages are refcounted
576 * (which can be slower and simply not an option for some PFNMAP users). The
577 * advantage is that we don't have to follow the strict linearity rule of
578 * PFNMAP mappings in order to support COWable mappings.
579 *
ee498ed7 580 */
25b2995a
CH
581struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
582 pte_t pte)
ee498ed7 583{
22b31eec 584 unsigned long pfn = pte_pfn(pte);
7e675137 585
00b3a331 586 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
b38af472 587 if (likely(!pte_special(pte)))
22b31eec 588 goto check_pfn;
667a0a06
DV
589 if (vma->vm_ops && vma->vm_ops->find_special_page)
590 return vma->vm_ops->find_special_page(vma, addr);
a13ea5b7
HD
591 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
592 return NULL;
df6ad698
JG
593 if (is_zero_pfn(pfn))
594 return NULL;
e1fb4a08 595 if (pte_devmap(pte))
3218f871
AS
596 /*
597 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
598 * and will have refcounts incremented on their struct pages
599 * when they are inserted into PTEs, thus they are safe to
600 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
601 * do not have refcounts. Example of legacy ZONE_DEVICE is
602 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
603 */
e1fb4a08
DJ
604 return NULL;
605
df6ad698 606 print_bad_pte(vma, addr, pte, NULL);
7e675137
NP
607 return NULL;
608 }
609
00b3a331 610 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
7e675137 611
b379d790
JH
612 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
613 if (vma->vm_flags & VM_MIXEDMAP) {
614 if (!pfn_valid(pfn))
615 return NULL;
616 goto out;
617 } else {
7e675137
NP
618 unsigned long off;
619 off = (addr - vma->vm_start) >> PAGE_SHIFT;
b379d790
JH
620 if (pfn == vma->vm_pgoff + off)
621 return NULL;
622 if (!is_cow_mapping(vma->vm_flags))
623 return NULL;
624 }
6aab341e
LT
625 }
626
b38af472
HD
627 if (is_zero_pfn(pfn))
628 return NULL;
00b3a331 629
22b31eec
HD
630check_pfn:
631 if (unlikely(pfn > highest_memmap_pfn)) {
632 print_bad_pte(vma, addr, pte, NULL);
633 return NULL;
634 }
6aab341e
LT
635
636 /*
7e675137 637 * NOTE! We still have PageReserved() pages in the page tables.
7e675137 638 * eg. VDSO mappings can cause them to exist.
6aab341e 639 */
b379d790 640out:
6aab341e 641 return pfn_to_page(pfn);
ee498ed7
HD
642}
643
318e9342
VMO
644struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
645 pte_t pte)
646{
647 struct page *page = vm_normal_page(vma, addr, pte);
648
649 if (page)
650 return page_folio(page);
651 return NULL;
652}
653
28093f9f
GS
654#ifdef CONFIG_TRANSPARENT_HUGEPAGE
655struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
656 pmd_t pmd)
657{
658 unsigned long pfn = pmd_pfn(pmd);
659
660 /*
661 * There is no pmd_special() but there may be special pmds, e.g.
662 * in a direct-access (dax) mapping, so let's just replicate the
00b3a331 663 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
28093f9f
GS
664 */
665 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
666 if (vma->vm_flags & VM_MIXEDMAP) {
667 if (!pfn_valid(pfn))
668 return NULL;
669 goto out;
670 } else {
671 unsigned long off;
672 off = (addr - vma->vm_start) >> PAGE_SHIFT;
673 if (pfn == vma->vm_pgoff + off)
674 return NULL;
675 if (!is_cow_mapping(vma->vm_flags))
676 return NULL;
677 }
678 }
679
e1fb4a08
DJ
680 if (pmd_devmap(pmd))
681 return NULL;
3cde287b 682 if (is_huge_zero_pmd(pmd))
28093f9f
GS
683 return NULL;
684 if (unlikely(pfn > highest_memmap_pfn))
685 return NULL;
686
687 /*
688 * NOTE! We still have PageReserved() pages in the page tables.
689 * eg. VDSO mappings can cause them to exist.
690 */
691out:
692 return pfn_to_page(pfn);
693}
694#endif
695
b756a3b5
AP
696static void restore_exclusive_pte(struct vm_area_struct *vma,
697 struct page *page, unsigned long address,
698 pte_t *ptep)
699{
c33c7948 700 pte_t orig_pte;
b756a3b5
AP
701 pte_t pte;
702 swp_entry_t entry;
703
c33c7948 704 orig_pte = ptep_get(ptep);
b756a3b5 705 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
c33c7948 706 if (pte_swp_soft_dirty(orig_pte))
b756a3b5
AP
707 pte = pte_mksoft_dirty(pte);
708
c33c7948
RR
709 entry = pte_to_swp_entry(orig_pte);
710 if (pte_swp_uffd_wp(orig_pte))
b756a3b5
AP
711 pte = pte_mkuffd_wp(pte);
712 else if (is_writable_device_exclusive_entry(entry))
713 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
714
6c287605
DH
715 VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
716
b756a3b5
AP
717 /*
718 * No need to take a page reference as one was already
719 * created when the swap entry was made.
720 */
721 if (PageAnon(page))
f1e2db12 722 page_add_anon_rmap(page, vma, address, RMAP_NONE);
b756a3b5
AP
723 else
724 /*
725 * Currently device exclusive access only supports anonymous
726 * memory so the entry shouldn't point to a filebacked page.
727 */
4d8ff640 728 WARN_ON_ONCE(1);
b756a3b5 729
1eba86c0
PT
730 set_pte_at(vma->vm_mm, address, ptep, pte);
731
b756a3b5
AP
732 /*
733 * No need to invalidate - it was non-present before. However
734 * secondary CPUs may have mappings that need invalidating.
735 */
736 update_mmu_cache(vma, address, ptep);
737}
738
739/*
740 * Tries to restore an exclusive pte if the page lock can be acquired without
741 * sleeping.
742 */
743static int
744try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
745 unsigned long addr)
746{
c33c7948 747 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
b756a3b5
AP
748 struct page *page = pfn_swap_entry_to_page(entry);
749
750 if (trylock_page(page)) {
751 restore_exclusive_pte(vma, page, addr, src_pte);
752 unlock_page(page);
753 return 0;
754 }
755
756 return -EBUSY;
757}
758
1da177e4
LT
759/*
760 * copy one vm_area from one task to the other. Assumes the page tables
761 * already present in the new task to be cleared in the whole range
762 * covered by this vma.
1da177e4
LT
763 */
764
df3a57d1
LT
765static unsigned long
766copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
8f34f1ea
PX
767 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
768 struct vm_area_struct *src_vma, unsigned long addr, int *rss)
1da177e4 769{
8f34f1ea 770 unsigned long vm_flags = dst_vma->vm_flags;
c33c7948
RR
771 pte_t orig_pte = ptep_get(src_pte);
772 pte_t pte = orig_pte;
1da177e4 773 struct page *page;
c33c7948 774 swp_entry_t entry = pte_to_swp_entry(orig_pte);
df3a57d1
LT
775
776 if (likely(!non_swap_entry(entry))) {
777 if (swap_duplicate(entry) < 0)
9a5cc85c 778 return -EIO;
df3a57d1
LT
779
780 /* make sure dst_mm is on swapoff's mmlist. */
781 if (unlikely(list_empty(&dst_mm->mmlist))) {
782 spin_lock(&mmlist_lock);
783 if (list_empty(&dst_mm->mmlist))
784 list_add(&dst_mm->mmlist,
785 &src_mm->mmlist);
786 spin_unlock(&mmlist_lock);
787 }
1493a191 788 /* Mark the swap entry as shared. */
c33c7948
RR
789 if (pte_swp_exclusive(orig_pte)) {
790 pte = pte_swp_clear_exclusive(orig_pte);
1493a191
DH
791 set_pte_at(src_mm, addr, src_pte, pte);
792 }
df3a57d1
LT
793 rss[MM_SWAPENTS]++;
794 } else if (is_migration_entry(entry)) {
af5cdaf8 795 page = pfn_swap_entry_to_page(entry);
1da177e4 796
df3a57d1 797 rss[mm_counter(page)]++;
5042db43 798
6c287605 799 if (!is_readable_migration_entry(entry) &&
df3a57d1 800 is_cow_mapping(vm_flags)) {
5042db43 801 /*
6c287605
DH
802 * COW mappings require pages in both parent and child
803 * to be set to read. A previously exclusive entry is
804 * now shared.
5042db43 805 */
4dd845b5
AP
806 entry = make_readable_migration_entry(
807 swp_offset(entry));
df3a57d1 808 pte = swp_entry_to_pte(entry);
c33c7948 809 if (pte_swp_soft_dirty(orig_pte))
df3a57d1 810 pte = pte_swp_mksoft_dirty(pte);
c33c7948 811 if (pte_swp_uffd_wp(orig_pte))
df3a57d1
LT
812 pte = pte_swp_mkuffd_wp(pte);
813 set_pte_at(src_mm, addr, src_pte, pte);
814 }
815 } else if (is_device_private_entry(entry)) {
af5cdaf8 816 page = pfn_swap_entry_to_page(entry);
5042db43 817
df3a57d1
LT
818 /*
819 * Update rss count even for unaddressable pages, as
820 * they should treated just like normal pages in this
821 * respect.
822 *
823 * We will likely want to have some new rss counters
824 * for unaddressable pages, at some point. But for now
825 * keep things as they are.
826 */
827 get_page(page);
828 rss[mm_counter(page)]++;
fb3d824d
DH
829 /* Cannot fail as these pages cannot get pinned. */
830 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
df3a57d1
LT
831
832 /*
833 * We do not preserve soft-dirty information, because so
834 * far, checkpoint/restore is the only feature that
835 * requires that. And checkpoint/restore does not work
836 * when a device driver is involved (you cannot easily
837 * save and restore device driver state).
838 */
4dd845b5 839 if (is_writable_device_private_entry(entry) &&
df3a57d1 840 is_cow_mapping(vm_flags)) {
4dd845b5
AP
841 entry = make_readable_device_private_entry(
842 swp_offset(entry));
df3a57d1 843 pte = swp_entry_to_pte(entry);
c33c7948 844 if (pte_swp_uffd_wp(orig_pte))
df3a57d1
LT
845 pte = pte_swp_mkuffd_wp(pte);
846 set_pte_at(src_mm, addr, src_pte, pte);
1da177e4 847 }
b756a3b5
AP
848 } else if (is_device_exclusive_entry(entry)) {
849 /*
850 * Make device exclusive entries present by restoring the
851 * original entry then copying as for a present pte. Device
852 * exclusive entries currently only support private writable
853 * (ie. COW) mappings.
854 */
855 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
856 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
857 return -EBUSY;
858 return -ENOENT;
c56d1b62 859 } else if (is_pte_marker_entry(entry)) {
af19487f
AR
860 pte_marker marker = copy_pte_marker(entry, dst_vma);
861
862 if (marker)
863 set_pte_at(dst_mm, addr, dst_pte,
864 make_pte_marker(marker));
c56d1b62 865 return 0;
1da177e4 866 }
8f34f1ea
PX
867 if (!userfaultfd_wp(dst_vma))
868 pte = pte_swp_clear_uffd_wp(pte);
df3a57d1
LT
869 set_pte_at(dst_mm, addr, dst_pte, pte);
870 return 0;
871}
872
70e806e4 873/*
b51ad4f8 874 * Copy a present and normal page.
70e806e4 875 *
b51ad4f8
DH
876 * NOTE! The usual case is that this isn't required;
877 * instead, the caller can just increase the page refcount
878 * and re-use the pte the traditional way.
70e806e4
PX
879 *
880 * And if we need a pre-allocated page but don't yet have
881 * one, return a negative error to let the preallocation
882 * code know so that it can do so outside the page table
883 * lock.
884 */
885static inline int
c78f4636
PX
886copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
887 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
edf50470 888 struct folio **prealloc, struct page *page)
70e806e4 889{
edf50470 890 struct folio *new_folio;
b51ad4f8 891 pte_t pte;
70e806e4 892
edf50470
MWO
893 new_folio = *prealloc;
894 if (!new_folio)
70e806e4
PX
895 return -EAGAIN;
896
897 /*
898 * We have a prealloc page, all good! Take it
899 * over and copy the page & arm it.
900 */
901 *prealloc = NULL;
edf50470
MWO
902 copy_user_highpage(&new_folio->page, page, addr, src_vma);
903 __folio_mark_uptodate(new_folio);
904 folio_add_new_anon_rmap(new_folio, dst_vma, addr);
905 folio_add_lru_vma(new_folio, dst_vma);
906 rss[MM_ANONPAGES]++;
70e806e4
PX
907
908 /* All done, just insert the new page copy in the child */
edf50470 909 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
c78f4636 910 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
c33c7948 911 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
8f34f1ea 912 /* Uffd-wp needs to be delivered to dest pte as well */
f1eb1bac 913 pte = pte_mkuffd_wp(pte);
c78f4636 914 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
70e806e4
PX
915 return 0;
916}
917
918/*
919 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
920 * is required to copy this pte.
921 */
922static inline int
c78f4636
PX
923copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
924 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
edf50470 925 struct folio **prealloc)
df3a57d1 926{
c78f4636
PX
927 struct mm_struct *src_mm = src_vma->vm_mm;
928 unsigned long vm_flags = src_vma->vm_flags;
c33c7948 929 pte_t pte = ptep_get(src_pte);
df3a57d1 930 struct page *page;
14ddee41 931 struct folio *folio;
df3a57d1 932
c78f4636 933 page = vm_normal_page(src_vma, addr, pte);
14ddee41
MWO
934 if (page)
935 folio = page_folio(page);
936 if (page && folio_test_anon(folio)) {
b51ad4f8
DH
937 /*
938 * If this page may have been pinned by the parent process,
939 * copy the page immediately for the child so that we'll always
940 * guarantee the pinned page won't be randomly replaced in the
941 * future.
942 */
14ddee41 943 folio_get(folio);
fb3d824d 944 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
14ddee41
MWO
945 /* Page may be pinned, we have to copy. */
946 folio_put(folio);
fb3d824d
DH
947 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
948 addr, rss, prealloc, page);
949 }
edf50470 950 rss[MM_ANONPAGES]++;
b51ad4f8 951 } else if (page) {
14ddee41 952 folio_get(folio);
fb3d824d 953 page_dup_file_rmap(page, false);
edf50470 954 rss[mm_counter_file(page)]++;
70e806e4
PX
955 }
956
1da177e4
LT
957 /*
958 * If it's a COW mapping, write protect it both
959 * in the parent and the child
960 */
1b2de5d0 961 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
1da177e4 962 ptep_set_wrprotect(src_mm, addr, src_pte);
3dc90795 963 pte = pte_wrprotect(pte);
1da177e4 964 }
14ddee41 965 VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
1da177e4
LT
966
967 /*
968 * If it's a shared mapping, mark it clean in
969 * the child
970 */
971 if (vm_flags & VM_SHARED)
972 pte = pte_mkclean(pte);
973 pte = pte_mkold(pte);
6aab341e 974
8f34f1ea 975 if (!userfaultfd_wp(dst_vma))
b569a176
PX
976 pte = pte_clear_uffd_wp(pte);
977
c78f4636 978 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
70e806e4
PX
979 return 0;
980}
981
edf50470
MWO
982static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
983 struct vm_area_struct *vma, unsigned long addr)
70e806e4 984{
edf50470 985 struct folio *new_folio;
70e806e4 986
edf50470
MWO
987 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
988 if (!new_folio)
70e806e4
PX
989 return NULL;
990
edf50470
MWO
991 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
992 folio_put(new_folio);
70e806e4 993 return NULL;
6aab341e 994 }
e601ded4 995 folio_throttle_swaprate(new_folio, GFP_KERNEL);
ae859762 996
edf50470 997 return new_folio;
1da177e4
LT
998}
999
c78f4636
PX
1000static int
1001copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1002 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1003 unsigned long end)
1da177e4 1004{
c78f4636
PX
1005 struct mm_struct *dst_mm = dst_vma->vm_mm;
1006 struct mm_struct *src_mm = src_vma->vm_mm;
c36987e2 1007 pte_t *orig_src_pte, *orig_dst_pte;
1da177e4 1008 pte_t *src_pte, *dst_pte;
c33c7948 1009 pte_t ptent;
c74df32c 1010 spinlock_t *src_ptl, *dst_ptl;
70e806e4 1011 int progress, ret = 0;
d559db08 1012 int rss[NR_MM_COUNTERS];
570a335b 1013 swp_entry_t entry = (swp_entry_t){0};
edf50470 1014 struct folio *prealloc = NULL;
1da177e4
LT
1015
1016again:
70e806e4 1017 progress = 0;
d559db08
KH
1018 init_rss_vec(rss);
1019
3db82b93
HD
1020 /*
1021 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1022 * error handling here, assume that exclusive mmap_lock on dst and src
1023 * protects anon from unexpected THP transitions; with shmem and file
1024 * protected by mmap_lock-less collapse skipping areas with anon_vma
1025 * (whereas vma_needs_copy() skips areas without anon_vma). A rework
1026 * can remove such assumptions later, but this is good enough for now.
1027 */
c74df32c 1028 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
70e806e4
PX
1029 if (!dst_pte) {
1030 ret = -ENOMEM;
1031 goto out;
1032 }
3db82b93
HD
1033 src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl);
1034 if (!src_pte) {
1035 pte_unmap_unlock(dst_pte, dst_ptl);
1036 /* ret == 0 */
1037 goto out;
1038 }
f20dc5f7 1039 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
c36987e2
DN
1040 orig_src_pte = src_pte;
1041 orig_dst_pte = dst_pte;
6606c3e0 1042 arch_enter_lazy_mmu_mode();
1da177e4 1043
1da177e4
LT
1044 do {
1045 /*
1046 * We are holding two locks at this point - either of them
1047 * could generate latencies in another task on another CPU.
1048 */
e040f218
HD
1049 if (progress >= 32) {
1050 progress = 0;
1051 if (need_resched() ||
95c354fe 1052 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
e040f218
HD
1053 break;
1054 }
c33c7948
RR
1055 ptent = ptep_get(src_pte);
1056 if (pte_none(ptent)) {
1da177e4
LT
1057 progress++;
1058 continue;
1059 }
c33c7948 1060 if (unlikely(!pte_present(ptent))) {
9a5cc85c
AP
1061 ret = copy_nonpresent_pte(dst_mm, src_mm,
1062 dst_pte, src_pte,
1063 dst_vma, src_vma,
1064 addr, rss);
1065 if (ret == -EIO) {
c33c7948 1066 entry = pte_to_swp_entry(ptep_get(src_pte));
79a1971c 1067 break;
b756a3b5
AP
1068 } else if (ret == -EBUSY) {
1069 break;
1070 } else if (!ret) {
1071 progress += 8;
1072 continue;
9a5cc85c 1073 }
b756a3b5
AP
1074
1075 /*
1076 * Device exclusive entry restored, continue by copying
1077 * the now present pte.
1078 */
1079 WARN_ON_ONCE(ret != -ENOENT);
79a1971c 1080 }
70e806e4 1081 /* copy_present_pte() will clear `*prealloc' if consumed */
c78f4636
PX
1082 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1083 addr, rss, &prealloc);
70e806e4
PX
1084 /*
1085 * If we need a pre-allocated page for this pte, drop the
1086 * locks, allocate, and try again.
1087 */
1088 if (unlikely(ret == -EAGAIN))
1089 break;
1090 if (unlikely(prealloc)) {
1091 /*
1092 * pre-alloc page cannot be reused by next time so as
1093 * to strictly follow mempolicy (e.g., alloc_page_vma()
1094 * will allocate page according to address). This
1095 * could only happen if one pinned pte changed.
1096 */
edf50470 1097 folio_put(prealloc);
70e806e4
PX
1098 prealloc = NULL;
1099 }
1da177e4
LT
1100 progress += 8;
1101 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1da177e4 1102
6606c3e0 1103 arch_leave_lazy_mmu_mode();
3db82b93 1104 pte_unmap_unlock(orig_src_pte, src_ptl);
d559db08 1105 add_mm_rss_vec(dst_mm, rss);
c36987e2 1106 pte_unmap_unlock(orig_dst_pte, dst_ptl);
c74df32c 1107 cond_resched();
570a335b 1108
9a5cc85c
AP
1109 if (ret == -EIO) {
1110 VM_WARN_ON_ONCE(!entry.val);
70e806e4
PX
1111 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1112 ret = -ENOMEM;
1113 goto out;
1114 }
1115 entry.val = 0;
b756a3b5
AP
1116 } else if (ret == -EBUSY) {
1117 goto out;
9a5cc85c 1118 } else if (ret == -EAGAIN) {
c78f4636 1119 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
70e806e4 1120 if (!prealloc)
570a335b 1121 return -ENOMEM;
9a5cc85c
AP
1122 } else if (ret) {
1123 VM_WARN_ON_ONCE(1);
570a335b 1124 }
9a5cc85c
AP
1125
1126 /* We've captured and resolved the error. Reset, try again. */
1127 ret = 0;
1128
1da177e4
LT
1129 if (addr != end)
1130 goto again;
70e806e4
PX
1131out:
1132 if (unlikely(prealloc))
edf50470 1133 folio_put(prealloc);
70e806e4 1134 return ret;
1da177e4
LT
1135}
1136
c78f4636
PX
1137static inline int
1138copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1139 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1140 unsigned long end)
1da177e4 1141{
c78f4636
PX
1142 struct mm_struct *dst_mm = dst_vma->vm_mm;
1143 struct mm_struct *src_mm = src_vma->vm_mm;
1da177e4
LT
1144 pmd_t *src_pmd, *dst_pmd;
1145 unsigned long next;
1146
1147 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1148 if (!dst_pmd)
1149 return -ENOMEM;
1150 src_pmd = pmd_offset(src_pud, addr);
1151 do {
1152 next = pmd_addr_end(addr, end);
84c3fc4e
ZY
1153 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1154 || pmd_devmap(*src_pmd)) {
71e3aac0 1155 int err;
c78f4636 1156 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
8f34f1ea
PX
1157 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1158 addr, dst_vma, src_vma);
71e3aac0
AA
1159 if (err == -ENOMEM)
1160 return -ENOMEM;
1161 if (!err)
1162 continue;
1163 /* fall through */
1164 }
1da177e4
LT
1165 if (pmd_none_or_clear_bad(src_pmd))
1166 continue;
c78f4636
PX
1167 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1168 addr, next))
1da177e4
LT
1169 return -ENOMEM;
1170 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1171 return 0;
1172}
1173
c78f4636
PX
1174static inline int
1175copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1176 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1177 unsigned long end)
1da177e4 1178{
c78f4636
PX
1179 struct mm_struct *dst_mm = dst_vma->vm_mm;
1180 struct mm_struct *src_mm = src_vma->vm_mm;
1da177e4
LT
1181 pud_t *src_pud, *dst_pud;
1182 unsigned long next;
1183
c2febafc 1184 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1da177e4
LT
1185 if (!dst_pud)
1186 return -ENOMEM;
c2febafc 1187 src_pud = pud_offset(src_p4d, addr);
1da177e4
LT
1188 do {
1189 next = pud_addr_end(addr, end);
a00cc7d9
MW
1190 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1191 int err;
1192
c78f4636 1193 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
a00cc7d9 1194 err = copy_huge_pud(dst_mm, src_mm,
c78f4636 1195 dst_pud, src_pud, addr, src_vma);
a00cc7d9
MW
1196 if (err == -ENOMEM)
1197 return -ENOMEM;
1198 if (!err)
1199 continue;
1200 /* fall through */
1201 }
1da177e4
LT
1202 if (pud_none_or_clear_bad(src_pud))
1203 continue;
c78f4636
PX
1204 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1205 addr, next))
1da177e4
LT
1206 return -ENOMEM;
1207 } while (dst_pud++, src_pud++, addr = next, addr != end);
1208 return 0;
1209}
1210
c78f4636
PX
1211static inline int
1212copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1213 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1214 unsigned long end)
c2febafc 1215{
c78f4636 1216 struct mm_struct *dst_mm = dst_vma->vm_mm;
c2febafc
KS
1217 p4d_t *src_p4d, *dst_p4d;
1218 unsigned long next;
1219
1220 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1221 if (!dst_p4d)
1222 return -ENOMEM;
1223 src_p4d = p4d_offset(src_pgd, addr);
1224 do {
1225 next = p4d_addr_end(addr, end);
1226 if (p4d_none_or_clear_bad(src_p4d))
1227 continue;
c78f4636
PX
1228 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1229 addr, next))
c2febafc
KS
1230 return -ENOMEM;
1231 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1232 return 0;
1233}
1234
c56d1b62
PX
1235/*
1236 * Return true if the vma needs to copy the pgtable during this fork(). Return
1237 * false when we can speed up fork() by allowing lazy page faults later until
1238 * when the child accesses the memory range.
1239 */
bc70fbf2 1240static bool
c56d1b62
PX
1241vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1242{
1243 /*
1244 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1245 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1246 * contains uffd-wp protection information, that's something we can't
1247 * retrieve from page cache, and skip copying will lose those info.
1248 */
1249 if (userfaultfd_wp(dst_vma))
1250 return true;
1251
bcd51a3c 1252 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
c56d1b62
PX
1253 return true;
1254
1255 if (src_vma->anon_vma)
1256 return true;
1257
1258 /*
1259 * Don't copy ptes where a page fault will fill them correctly. Fork
1260 * becomes much lighter when there are big shared or private readonly
1261 * mappings. The tradeoff is that copy_page_range is more efficient
1262 * than faulting.
1263 */
1264 return false;
1265}
1266
c78f4636
PX
1267int
1268copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1da177e4
LT
1269{
1270 pgd_t *src_pgd, *dst_pgd;
1271 unsigned long next;
c78f4636
PX
1272 unsigned long addr = src_vma->vm_start;
1273 unsigned long end = src_vma->vm_end;
1274 struct mm_struct *dst_mm = dst_vma->vm_mm;
1275 struct mm_struct *src_mm = src_vma->vm_mm;
ac46d4f3 1276 struct mmu_notifier_range range;
2ec74c3e 1277 bool is_cow;
cddb8a5c 1278 int ret;
1da177e4 1279
c56d1b62 1280 if (!vma_needs_copy(dst_vma, src_vma))
0661a336 1281 return 0;
d992895b 1282
c78f4636 1283 if (is_vm_hugetlb_page(src_vma))
bc70fbf2 1284 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1da177e4 1285
c78f4636 1286 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
2ab64037 1287 /*
1288 * We do not free on error cases below as remove_vma
1289 * gets called on error from higher level routine
1290 */
c78f4636 1291 ret = track_pfn_copy(src_vma);
2ab64037 1292 if (ret)
1293 return ret;
1294 }
1295
cddb8a5c
AA
1296 /*
1297 * We need to invalidate the secondary MMU mappings only when
1298 * there could be a permission downgrade on the ptes of the
1299 * parent mm. And a permission downgrade will only happen if
1300 * is_cow_mapping() returns true.
1301 */
c78f4636 1302 is_cow = is_cow_mapping(src_vma->vm_flags);
ac46d4f3
JG
1303
1304 if (is_cow) {
7269f999 1305 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
7d4a8be0 1306 0, src_mm, addr, end);
ac46d4f3 1307 mmu_notifier_invalidate_range_start(&range);
57efa1fe
JG
1308 /*
1309 * Disabling preemption is not needed for the write side, as
1310 * the read side doesn't spin, but goes to the mmap_lock.
1311 *
1312 * Use the raw variant of the seqcount_t write API to avoid
1313 * lockdep complaining about preemptibility.
1314 */
e727bfd5 1315 vma_assert_write_locked(src_vma);
57efa1fe 1316 raw_write_seqcount_begin(&src_mm->write_protect_seq);
ac46d4f3 1317 }
cddb8a5c
AA
1318
1319 ret = 0;
1da177e4
LT
1320 dst_pgd = pgd_offset(dst_mm, addr);
1321 src_pgd = pgd_offset(src_mm, addr);
1322 do {
1323 next = pgd_addr_end(addr, end);
1324 if (pgd_none_or_clear_bad(src_pgd))
1325 continue;
c78f4636
PX
1326 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1327 addr, next))) {
d155df53 1328 untrack_pfn_clear(dst_vma);
cddb8a5c
AA
1329 ret = -ENOMEM;
1330 break;
1331 }
1da177e4 1332 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
cddb8a5c 1333
57efa1fe
JG
1334 if (is_cow) {
1335 raw_write_seqcount_end(&src_mm->write_protect_seq);
ac46d4f3 1336 mmu_notifier_invalidate_range_end(&range);
57efa1fe 1337 }
cddb8a5c 1338 return ret;
1da177e4
LT
1339}
1340
5abfd71d
PX
1341/* Whether we should zap all COWed (private) pages too */
1342static inline bool should_zap_cows(struct zap_details *details)
1343{
1344 /* By default, zap all pages */
1345 if (!details)
1346 return true;
1347
1348 /* Or, we zap COWed pages only if the caller wants to */
2e148f1e 1349 return details->even_cows;
5abfd71d
PX
1350}
1351
2e148f1e 1352/* Decides whether we should zap this page with the page pointer specified */
254ab940 1353static inline bool should_zap_page(struct zap_details *details, struct page *page)
3506659e 1354{
5abfd71d
PX
1355 /* If we can make a decision without *page.. */
1356 if (should_zap_cows(details))
254ab940 1357 return true;
5abfd71d
PX
1358
1359 /* E.g. the caller passes NULL for the case of a zero page */
1360 if (!page)
254ab940 1361 return true;
3506659e 1362
2e148f1e
PX
1363 /* Otherwise we should only zap non-anon pages */
1364 return !PageAnon(page);
3506659e
MWO
1365}
1366
999dad82
PX
1367static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
1368{
1369 if (!details)
1370 return false;
1371
1372 return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1373}
1374
1375/*
1376 * This function makes sure that we'll replace the none pte with an uffd-wp
1377 * swap special pte marker when necessary. Must be with the pgtable lock held.
1378 */
1379static inline void
1380zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1381 unsigned long addr, pte_t *pte,
1382 struct zap_details *details, pte_t pteval)
1383{
2bad466c
PX
1384 /* Zap on anonymous always means dropping everything */
1385 if (vma_is_anonymous(vma))
1386 return;
1387
999dad82
PX
1388 if (zap_drop_file_uffd_wp(details))
1389 return;
1390
1391 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1392}
1393
51c6f666 1394static unsigned long zap_pte_range(struct mmu_gather *tlb,
b5810039 1395 struct vm_area_struct *vma, pmd_t *pmd,
1da177e4 1396 unsigned long addr, unsigned long end,
97a89413 1397 struct zap_details *details)
1da177e4 1398{
b5810039 1399 struct mm_struct *mm = tlb->mm;
d16dfc55 1400 int force_flush = 0;
d559db08 1401 int rss[NR_MM_COUNTERS];
97a89413 1402 spinlock_t *ptl;
5f1a1907 1403 pte_t *start_pte;
97a89413 1404 pte_t *pte;
8a5f14a2 1405 swp_entry_t entry;
d559db08 1406
ed6a7935 1407 tlb_change_page_size(tlb, PAGE_SIZE);
e303297e 1408 init_rss_vec(rss);
3db82b93
HD
1409 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1410 if (!pte)
1411 return addr;
1412
3ea27719 1413 flush_tlb_batched_pending(mm);
6606c3e0 1414 arch_enter_lazy_mmu_mode();
1da177e4 1415 do {
c33c7948 1416 pte_t ptent = ptep_get(pte);
8018db85
PX
1417 struct page *page;
1418
166f61b9 1419 if (pte_none(ptent))
1da177e4 1420 continue;
6f5e6b9e 1421
7b167b68
MK
1422 if (need_resched())
1423 break;
1424
1da177e4 1425 if (pte_present(ptent)) {
5df397de
LT
1426 unsigned int delay_rmap;
1427
25b2995a 1428 page = vm_normal_page(vma, addr, ptent);
254ab940 1429 if (unlikely(!should_zap_page(details, page)))
91b61ef3 1430 continue;
b5810039 1431 ptent = ptep_get_and_clear_full(mm, addr, pte,
a600388d 1432 tlb->fullmm);
e5136e87 1433 arch_check_zapped_pte(vma, ptent);
1da177e4 1434 tlb_remove_tlb_entry(tlb, pte, addr);
999dad82
PX
1435 zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1436 ptent);
e2942062 1437 if (unlikely(!page)) {
6080d19f 1438 ksm_might_unmap_zero_page(mm, ptent);
1da177e4 1439 continue;
e2942062 1440 }
eca56ff9 1441
5df397de 1442 delay_rmap = 0;
eca56ff9 1443 if (!PageAnon(page)) {
1cf35d47 1444 if (pte_dirty(ptent)) {
6237bcd9 1445 set_page_dirty(page);
5df397de
LT
1446 if (tlb_delay_rmap(tlb)) {
1447 delay_rmap = 1;
1448 force_flush = 1;
1449 }
1cf35d47 1450 }
8788f678 1451 if (pte_young(ptent) && likely(vma_has_recency(vma)))
bf3f3bc5 1452 mark_page_accessed(page);
6237bcd9 1453 }
eca56ff9 1454 rss[mm_counter(page)]--;
5df397de
LT
1455 if (!delay_rmap) {
1456 page_remove_rmap(page, vma, false);
1457 if (unlikely(page_mapcount(page) < 0))
1458 print_bad_pte(vma, addr, ptent, page);
1459 }
1460 if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
1cf35d47 1461 force_flush = 1;
ce9ec37b 1462 addr += PAGE_SIZE;
d16dfc55 1463 break;
1cf35d47 1464 }
1da177e4
LT
1465 continue;
1466 }
5042db43
JG
1467
1468 entry = pte_to_swp_entry(ptent);
b756a3b5
AP
1469 if (is_device_private_entry(entry) ||
1470 is_device_exclusive_entry(entry)) {
8018db85 1471 page = pfn_swap_entry_to_page(entry);
254ab940 1472 if (unlikely(!should_zap_page(details, page)))
91b61ef3 1473 continue;
999dad82
PX
1474 /*
1475 * Both device private/exclusive mappings should only
1476 * work with anonymous page so far, so we don't need to
1477 * consider uffd-wp bit when zap. For more information,
1478 * see zap_install_uffd_wp_if_needed().
1479 */
1480 WARN_ON_ONCE(!vma_is_anonymous(vma));
5042db43 1481 rss[mm_counter(page)]--;
b756a3b5 1482 if (is_device_private_entry(entry))
cea86fe2 1483 page_remove_rmap(page, vma, false);
5042db43 1484 put_page(page);
8018db85 1485 } else if (!non_swap_entry(entry)) {
5abfd71d
PX
1486 /* Genuine swap entry, hence a private anon page */
1487 if (!should_zap_cows(details))
1488 continue;
8a5f14a2 1489 rss[MM_SWAPENTS]--;
8018db85
PX
1490 if (unlikely(!free_swap_and_cache(entry)))
1491 print_bad_pte(vma, addr, ptent, NULL);
5abfd71d 1492 } else if (is_migration_entry(entry)) {
af5cdaf8 1493 page = pfn_swap_entry_to_page(entry);
254ab940 1494 if (!should_zap_page(details, page))
5abfd71d 1495 continue;
eca56ff9 1496 rss[mm_counter(page)]--;
999dad82 1497 } else if (pte_marker_entry_uffd_wp(entry)) {
2bad466c
PX
1498 /*
1499 * For anon: always drop the marker; for file: only
1500 * drop the marker if explicitly requested.
1501 */
1502 if (!vma_is_anonymous(vma) &&
1503 !zap_drop_file_uffd_wp(details))
999dad82 1504 continue;
9f186f9e 1505 } else if (is_hwpoison_entry(entry) ||
af19487f 1506 is_poisoned_swp_entry(entry)) {
5abfd71d
PX
1507 if (!should_zap_cows(details))
1508 continue;
1509 } else {
1510 /* We should have covered all the swap entry types */
1511 WARN_ON_ONCE(1);
b084d435 1512 }
9888a1ca 1513 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
999dad82 1514 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
97a89413 1515 } while (pte++, addr += PAGE_SIZE, addr != end);
ae859762 1516
d559db08 1517 add_mm_rss_vec(mm, rss);
6606c3e0 1518 arch_leave_lazy_mmu_mode();
51c6f666 1519
1cf35d47 1520 /* Do the actual TLB flush before dropping ptl */
5df397de 1521 if (force_flush) {
1cf35d47 1522 tlb_flush_mmu_tlbonly(tlb);
f036c818 1523 tlb_flush_rmaps(tlb, vma);
5df397de 1524 }
1cf35d47
LT
1525 pte_unmap_unlock(start_pte, ptl);
1526
1527 /*
1528 * If we forced a TLB flush (either due to running out of
1529 * batch buffers or because we needed to flush dirty TLB
1530 * entries before releasing the ptl), free the batched
3db82b93 1531 * memory too. Come back again if we didn't do everything.
1cf35d47 1532 */
3db82b93 1533 if (force_flush)
fa0aafb8 1534 tlb_flush_mmu(tlb);
d16dfc55 1535
51c6f666 1536 return addr;
1da177e4
LT
1537}
1538
51c6f666 1539static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
b5810039 1540 struct vm_area_struct *vma, pud_t *pud,
1da177e4 1541 unsigned long addr, unsigned long end,
97a89413 1542 struct zap_details *details)
1da177e4
LT
1543{
1544 pmd_t *pmd;
1545 unsigned long next;
1546
1547 pmd = pmd_offset(pud, addr);
1548 do {
1549 next = pmd_addr_end(addr, end);
84c3fc4e 1550 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
53406ed1 1551 if (next - addr != HPAGE_PMD_SIZE)
fd60775a 1552 __split_huge_pmd(vma, pmd, addr, false, NULL);
3db82b93
HD
1553 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1554 addr = next;
1555 continue;
1556 }
71e3aac0 1557 /* fall through */
3506659e
MWO
1558 } else if (details && details->single_folio &&
1559 folio_test_pmd_mappable(details->single_folio) &&
22061a1f
HD
1560 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1561 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1562 /*
1563 * Take and drop THP pmd lock so that we cannot return
1564 * prematurely, while zap_huge_pmd() has cleared *pmd,
1565 * but not yet decremented compound_mapcount().
1566 */
1567 spin_unlock(ptl);
71e3aac0 1568 }
3db82b93
HD
1569 if (pmd_none(*pmd)) {
1570 addr = next;
1571 continue;
1572 }
1573 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1574 if (addr != next)
1575 pmd--;
1576 } while (pmd++, cond_resched(), addr != end);
51c6f666
RH
1577
1578 return addr;
1da177e4
LT
1579}
1580
51c6f666 1581static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
c2febafc 1582 struct vm_area_struct *vma, p4d_t *p4d,
1da177e4 1583 unsigned long addr, unsigned long end,
97a89413 1584 struct zap_details *details)
1da177e4
LT
1585{
1586 pud_t *pud;
1587 unsigned long next;
1588
c2febafc 1589 pud = pud_offset(p4d, addr);
1da177e4
LT
1590 do {
1591 next = pud_addr_end(addr, end);
a00cc7d9
MW
1592 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1593 if (next - addr != HPAGE_PUD_SIZE) {
42fc5414 1594 mmap_assert_locked(tlb->mm);
a00cc7d9
MW
1595 split_huge_pud(vma, pud, addr);
1596 } else if (zap_huge_pud(tlb, vma, pud, addr))
1597 goto next;
1598 /* fall through */
1599 }
97a89413 1600 if (pud_none_or_clear_bad(pud))
1da177e4 1601 continue;
97a89413 1602 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
a00cc7d9
MW
1603next:
1604 cond_resched();
97a89413 1605 } while (pud++, addr = next, addr != end);
51c6f666
RH
1606
1607 return addr;
1da177e4
LT
1608}
1609
c2febafc
KS
1610static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1611 struct vm_area_struct *vma, pgd_t *pgd,
1612 unsigned long addr, unsigned long end,
1613 struct zap_details *details)
1614{
1615 p4d_t *p4d;
1616 unsigned long next;
1617
1618 p4d = p4d_offset(pgd, addr);
1619 do {
1620 next = p4d_addr_end(addr, end);
1621 if (p4d_none_or_clear_bad(p4d))
1622 continue;
1623 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1624 } while (p4d++, addr = next, addr != end);
1625
1626 return addr;
1627}
1628
aac45363 1629void unmap_page_range(struct mmu_gather *tlb,
038c7aa1
AV
1630 struct vm_area_struct *vma,
1631 unsigned long addr, unsigned long end,
1632 struct zap_details *details)
1da177e4
LT
1633{
1634 pgd_t *pgd;
1635 unsigned long next;
1636
1da177e4
LT
1637 BUG_ON(addr >= end);
1638 tlb_start_vma(tlb, vma);
1639 pgd = pgd_offset(vma->vm_mm, addr);
1640 do {
1641 next = pgd_addr_end(addr, end);
97a89413 1642 if (pgd_none_or_clear_bad(pgd))
1da177e4 1643 continue;
c2febafc 1644 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
97a89413 1645 } while (pgd++, addr = next, addr != end);
1da177e4
LT
1646 tlb_end_vma(tlb, vma);
1647}
51c6f666 1648
f5cc4eef
AV
1649
1650static void unmap_single_vma(struct mmu_gather *tlb,
1651 struct vm_area_struct *vma, unsigned long start_addr,
4f74d2c8 1652 unsigned long end_addr,
68f48381 1653 struct zap_details *details, bool mm_wr_locked)
f5cc4eef
AV
1654{
1655 unsigned long start = max(vma->vm_start, start_addr);
1656 unsigned long end;
1657
1658 if (start >= vma->vm_end)
1659 return;
1660 end = min(vma->vm_end, end_addr);
1661 if (end <= vma->vm_start)
1662 return;
1663
cbc91f71
SD
1664 if (vma->vm_file)
1665 uprobe_munmap(vma, start, end);
1666
b3b9c293 1667 if (unlikely(vma->vm_flags & VM_PFNMAP))
68f48381 1668 untrack_pfn(vma, 0, 0, mm_wr_locked);
f5cc4eef
AV
1669
1670 if (start != end) {
1671 if (unlikely(is_vm_hugetlb_page(vma))) {
1672 /*
1673 * It is undesirable to test vma->vm_file as it
1674 * should be non-null for valid hugetlb area.
1675 * However, vm_file will be NULL in the error
7aa6b4ad 1676 * cleanup path of mmap_region. When
f5cc4eef 1677 * hugetlbfs ->mmap method fails,
7aa6b4ad 1678 * mmap_region() nullifies vma->vm_file
f5cc4eef
AV
1679 * before calling this function to clean up.
1680 * Since no pte has actually been setup, it is
1681 * safe to do nothing in this case.
1682 */
24669e58 1683 if (vma->vm_file) {
05e90bd0
PX
1684 zap_flags_t zap_flags = details ?
1685 details->zap_flags : 0;
2820b0f0 1686 __unmap_hugepage_range(tlb, vma, start, end,
05e90bd0 1687 NULL, zap_flags);
24669e58 1688 }
f5cc4eef
AV
1689 } else
1690 unmap_page_range(tlb, vma, start, end, details);
1691 }
1da177e4
LT
1692}
1693
1da177e4
LT
1694/**
1695 * unmap_vmas - unmap a range of memory covered by a list of vma's
0164f69d 1696 * @tlb: address of the caller's struct mmu_gather
6e412203 1697 * @mas: the maple state
1da177e4
LT
1698 * @vma: the starting vma
1699 * @start_addr: virtual address at which to start unmapping
1700 * @end_addr: virtual address at which to end unmapping
6e412203 1701 * @tree_end: The maximum index to check
809ef83c 1702 * @mm_wr_locked: lock flag
1da177e4 1703 *
508034a3 1704 * Unmap all pages in the vma list.
1da177e4 1705 *
1da177e4
LT
1706 * Only addresses between `start' and `end' will be unmapped.
1707 *
1708 * The VMA list must be sorted in ascending virtual address order.
1709 *
1710 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1711 * range after unmap_vmas() returns. So the only responsibility here is to
1712 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1713 * drops the lock and schedules.
1714 */
fd892593 1715void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1da177e4 1716 struct vm_area_struct *vma, unsigned long start_addr,
fd892593
LH
1717 unsigned long end_addr, unsigned long tree_end,
1718 bool mm_wr_locked)
1da177e4 1719{
ac46d4f3 1720 struct mmu_notifier_range range;
999dad82 1721 struct zap_details details = {
04ada095 1722 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
999dad82
PX
1723 /* Careful - we need to zap private pages too! */
1724 .even_cows = true,
1725 };
1da177e4 1726
7d4a8be0 1727 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
6f4f13e8 1728 start_addr, end_addr);
ac46d4f3 1729 mmu_notifier_invalidate_range_start(&range);
763ecb03 1730 do {
2820b0f0
RR
1731 unsigned long start = start_addr;
1732 unsigned long end = end_addr;
1733 hugetlb_zap_begin(vma, &start, &end);
1734 unmap_single_vma(tlb, vma, start, end, &details,
68f48381 1735 mm_wr_locked);
2820b0f0 1736 hugetlb_zap_end(vma, &details);
fd892593 1737 } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
ac46d4f3 1738 mmu_notifier_invalidate_range_end(&range);
1da177e4
LT
1739}
1740
f5cc4eef
AV
1741/**
1742 * zap_page_range_single - remove user pages in a given range
1743 * @vma: vm_area_struct holding the applicable pages
1744 * @address: starting address of pages to zap
1745 * @size: number of bytes to zap
8a5f14a2 1746 * @details: details of shared cache invalidation
f5cc4eef
AV
1747 *
1748 * The range must fit into one VMA.
1da177e4 1749 */
21b85b09 1750void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1da177e4
LT
1751 unsigned long size, struct zap_details *details)
1752{
21b85b09 1753 const unsigned long end = address + size;
ac46d4f3 1754 struct mmu_notifier_range range;
d16dfc55 1755 struct mmu_gather tlb;
1da177e4 1756
1da177e4 1757 lru_add_drain();
7d4a8be0 1758 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
21b85b09 1759 address, end);
2820b0f0 1760 hugetlb_zap_begin(vma, &range.start, &range.end);
a72afd87 1761 tlb_gather_mmu(&tlb, vma->vm_mm);
ac46d4f3
JG
1762 update_hiwater_rss(vma->vm_mm);
1763 mmu_notifier_invalidate_range_start(&range);
21b85b09
MK
1764 /*
1765 * unmap 'address-end' not 'range.start-range.end' as range
1766 * could have been expanded for hugetlb pmd sharing.
1767 */
68f48381 1768 unmap_single_vma(&tlb, vma, address, end, details, false);
ac46d4f3 1769 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 1770 tlb_finish_mmu(&tlb);
2820b0f0 1771 hugetlb_zap_end(vma, details);
1da177e4
LT
1772}
1773
c627f9cc
JS
1774/**
1775 * zap_vma_ptes - remove ptes mapping the vma
1776 * @vma: vm_area_struct holding ptes to be zapped
1777 * @address: starting address of pages to zap
1778 * @size: number of bytes to zap
1779 *
1780 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1781 *
1782 * The entire address range must be fully contained within the vma.
1783 *
c627f9cc 1784 */
27d036e3 1785void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
c627f9cc
JS
1786 unsigned long size)
1787{
88a35912 1788 if (!range_in_vma(vma, address, address + size) ||
c627f9cc 1789 !(vma->vm_flags & VM_PFNMAP))
27d036e3
LR
1790 return;
1791
f5cc4eef 1792 zap_page_range_single(vma, address, size, NULL);
c627f9cc
JS
1793}
1794EXPORT_SYMBOL_GPL(zap_vma_ptes);
1795
8cd3984d 1796static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
c9cfcddf 1797{
c2febafc
KS
1798 pgd_t *pgd;
1799 p4d_t *p4d;
1800 pud_t *pud;
1801 pmd_t *pmd;
1802
1803 pgd = pgd_offset(mm, addr);
1804 p4d = p4d_alloc(mm, pgd, addr);
1805 if (!p4d)
1806 return NULL;
1807 pud = pud_alloc(mm, p4d, addr);
1808 if (!pud)
1809 return NULL;
1810 pmd = pmd_alloc(mm, pud, addr);
1811 if (!pmd)
1812 return NULL;
1813
1814 VM_BUG_ON(pmd_trans_huge(*pmd));
8cd3984d
AR
1815 return pmd;
1816}
1817
1818pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1819 spinlock_t **ptl)
1820{
1821 pmd_t *pmd = walk_to_pmd(mm, addr);
1822
1823 if (!pmd)
1824 return NULL;
c2febafc 1825 return pte_alloc_map_lock(mm, pmd, addr, ptl);
c9cfcddf
LT
1826}
1827
8efd6f5b
AR
1828static int validate_page_before_insert(struct page *page)
1829{
1830 if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1831 return -EINVAL;
1832 flush_dcache_page(page);
1833 return 0;
1834}
1835
cea86fe2 1836static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
8efd6f5b
AR
1837 unsigned long addr, struct page *page, pgprot_t prot)
1838{
c33c7948 1839 if (!pte_none(ptep_get(pte)))
8efd6f5b
AR
1840 return -EBUSY;
1841 /* Ok, finally just insert the thing.. */
1842 get_page(page);
f1a79412 1843 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
cea86fe2
HD
1844 page_add_file_rmap(page, vma, false);
1845 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
8efd6f5b
AR
1846 return 0;
1847}
1848
238f58d8
LT
1849/*
1850 * This is the old fallback for page remapping.
1851 *
1852 * For historical reasons, it only allows reserved pages. Only
1853 * old drivers should use this, and they needed to mark their
1854 * pages reserved for the old functions anyway.
1855 */
423bad60
NP
1856static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1857 struct page *page, pgprot_t prot)
238f58d8
LT
1858{
1859 int retval;
c9cfcddf 1860 pte_t *pte;
8a9f3ccd
BS
1861 spinlock_t *ptl;
1862
8efd6f5b
AR
1863 retval = validate_page_before_insert(page);
1864 if (retval)
5b4e655e 1865 goto out;
238f58d8 1866 retval = -ENOMEM;
cea86fe2 1867 pte = get_locked_pte(vma->vm_mm, addr, &ptl);
238f58d8 1868 if (!pte)
5b4e655e 1869 goto out;
cea86fe2 1870 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
238f58d8
LT
1871 pte_unmap_unlock(pte, ptl);
1872out:
1873 return retval;
1874}
1875
cea86fe2 1876static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
8cd3984d
AR
1877 unsigned long addr, struct page *page, pgprot_t prot)
1878{
1879 int err;
1880
1881 if (!page_count(page))
1882 return -EINVAL;
1883 err = validate_page_before_insert(page);
7f70c2a6
AR
1884 if (err)
1885 return err;
cea86fe2 1886 return insert_page_into_pte_locked(vma, pte, addr, page, prot);
8cd3984d
AR
1887}
1888
1889/* insert_pages() amortizes the cost of spinlock operations
bb7dbaaf 1890 * when inserting pages in a loop.
8cd3984d
AR
1891 */
1892static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1893 struct page **pages, unsigned long *num, pgprot_t prot)
1894{
1895 pmd_t *pmd = NULL;
7f70c2a6
AR
1896 pte_t *start_pte, *pte;
1897 spinlock_t *pte_lock;
8cd3984d
AR
1898 struct mm_struct *const mm = vma->vm_mm;
1899 unsigned long curr_page_idx = 0;
1900 unsigned long remaining_pages_total = *num;
1901 unsigned long pages_to_write_in_pmd;
1902 int ret;
1903more:
1904 ret = -EFAULT;
1905 pmd = walk_to_pmd(mm, addr);
1906 if (!pmd)
1907 goto out;
1908
1909 pages_to_write_in_pmd = min_t(unsigned long,
1910 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1911
1912 /* Allocate the PTE if necessary; takes PMD lock once only. */
1913 ret = -ENOMEM;
1914 if (pte_alloc(mm, pmd))
1915 goto out;
8cd3984d
AR
1916
1917 while (pages_to_write_in_pmd) {
1918 int pte_idx = 0;
1919 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1920
7f70c2a6 1921 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
3db82b93
HD
1922 if (!start_pte) {
1923 ret = -EFAULT;
1924 goto out;
1925 }
7f70c2a6 1926 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
cea86fe2 1927 int err = insert_page_in_batch_locked(vma, pte,
8cd3984d
AR
1928 addr, pages[curr_page_idx], prot);
1929 if (unlikely(err)) {
7f70c2a6 1930 pte_unmap_unlock(start_pte, pte_lock);
8cd3984d
AR
1931 ret = err;
1932 remaining_pages_total -= pte_idx;
1933 goto out;
1934 }
1935 addr += PAGE_SIZE;
1936 ++curr_page_idx;
1937 }
7f70c2a6 1938 pte_unmap_unlock(start_pte, pte_lock);
8cd3984d
AR
1939 pages_to_write_in_pmd -= batch_size;
1940 remaining_pages_total -= batch_size;
1941 }
1942 if (remaining_pages_total)
1943 goto more;
1944 ret = 0;
1945out:
1946 *num = remaining_pages_total;
1947 return ret;
1948}
8cd3984d
AR
1949
1950/**
1951 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1952 * @vma: user vma to map to
1953 * @addr: target start user address of these pages
1954 * @pages: source kernel pages
1955 * @num: in: number of pages to map. out: number of pages that were *not*
1956 * mapped. (0 means all pages were successfully mapped).
1957 *
1958 * Preferred over vm_insert_page() when inserting multiple pages.
1959 *
1960 * In case of error, we may have mapped a subset of the provided
1961 * pages. It is the caller's responsibility to account for this case.
1962 *
1963 * The same restrictions apply as in vm_insert_page().
1964 */
1965int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1966 struct page **pages, unsigned long *num)
1967{
8cd3984d
AR
1968 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1969
1970 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1971 return -EFAULT;
1972 if (!(vma->vm_flags & VM_MIXEDMAP)) {
d8ed45c5 1973 BUG_ON(mmap_read_trylock(vma->vm_mm));
8cd3984d 1974 BUG_ON(vma->vm_flags & VM_PFNMAP);
1c71222e 1975 vm_flags_set(vma, VM_MIXEDMAP);
8cd3984d
AR
1976 }
1977 /* Defer page refcount checking till we're about to map that page. */
1978 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
8cd3984d
AR
1979}
1980EXPORT_SYMBOL(vm_insert_pages);
1981
bfa5bf6d
REB
1982/**
1983 * vm_insert_page - insert single page into user vma
1984 * @vma: user vma to map to
1985 * @addr: target user address of this page
1986 * @page: source kernel page
1987 *
a145dd41
LT
1988 * This allows drivers to insert individual pages they've allocated
1989 * into a user vma.
1990 *
1991 * The page has to be a nice clean _individual_ kernel allocation.
1992 * If you allocate a compound page, you need to have marked it as
1993 * such (__GFP_COMP), or manually just split the page up yourself
8dfcc9ba 1994 * (see split_page()).
a145dd41
LT
1995 *
1996 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1997 * took an arbitrary page protection parameter. This doesn't allow
1998 * that. Your vma protection will have to be set up correctly, which
1999 * means that if you want a shared writable mapping, you'd better
2000 * ask for a shared writable mapping!
2001 *
2002 * The page does not need to be reserved.
4b6e1e37
KK
2003 *
2004 * Usually this function is called from f_op->mmap() handler
c1e8d7c6 2005 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
4b6e1e37
KK
2006 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2007 * function from other places, for example from page-fault handler.
a862f68a
MR
2008 *
2009 * Return: %0 on success, negative error code otherwise.
a145dd41 2010 */
423bad60
NP
2011int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2012 struct page *page)
a145dd41
LT
2013{
2014 if (addr < vma->vm_start || addr >= vma->vm_end)
2015 return -EFAULT;
2016 if (!page_count(page))
2017 return -EINVAL;
4b6e1e37 2018 if (!(vma->vm_flags & VM_MIXEDMAP)) {
d8ed45c5 2019 BUG_ON(mmap_read_trylock(vma->vm_mm));
4b6e1e37 2020 BUG_ON(vma->vm_flags & VM_PFNMAP);
1c71222e 2021 vm_flags_set(vma, VM_MIXEDMAP);
4b6e1e37 2022 }
423bad60 2023 return insert_page(vma, addr, page, vma->vm_page_prot);
a145dd41 2024}
e3c3374f 2025EXPORT_SYMBOL(vm_insert_page);
a145dd41 2026
a667d745
SJ
2027/*
2028 * __vm_map_pages - maps range of kernel pages into user vma
2029 * @vma: user vma to map to
2030 * @pages: pointer to array of source kernel pages
2031 * @num: number of pages in page array
2032 * @offset: user's requested vm_pgoff
2033 *
2034 * This allows drivers to map range of kernel pages into a user vma.
2035 *
2036 * Return: 0 on success and error code otherwise.
2037 */
2038static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2039 unsigned long num, unsigned long offset)
2040{
2041 unsigned long count = vma_pages(vma);
2042 unsigned long uaddr = vma->vm_start;
2043 int ret, i;
2044
2045 /* Fail if the user requested offset is beyond the end of the object */
96756fcb 2046 if (offset >= num)
a667d745
SJ
2047 return -ENXIO;
2048
2049 /* Fail if the user requested size exceeds available object size */
2050 if (count > num - offset)
2051 return -ENXIO;
2052
2053 for (i = 0; i < count; i++) {
2054 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2055 if (ret < 0)
2056 return ret;
2057 uaddr += PAGE_SIZE;
2058 }
2059
2060 return 0;
2061}
2062
2063/**
2064 * vm_map_pages - maps range of kernel pages starts with non zero offset
2065 * @vma: user vma to map to
2066 * @pages: pointer to array of source kernel pages
2067 * @num: number of pages in page array
2068 *
2069 * Maps an object consisting of @num pages, catering for the user's
2070 * requested vm_pgoff
2071 *
2072 * If we fail to insert any page into the vma, the function will return
2073 * immediately leaving any previously inserted pages present. Callers
2074 * from the mmap handler may immediately return the error as their caller
2075 * will destroy the vma, removing any successfully inserted pages. Other
2076 * callers should make their own arrangements for calling unmap_region().
2077 *
2078 * Context: Process context. Called by mmap handlers.
2079 * Return: 0 on success and error code otherwise.
2080 */
2081int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2082 unsigned long num)
2083{
2084 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2085}
2086EXPORT_SYMBOL(vm_map_pages);
2087
2088/**
2089 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2090 * @vma: user vma to map to
2091 * @pages: pointer to array of source kernel pages
2092 * @num: number of pages in page array
2093 *
2094 * Similar to vm_map_pages(), except that it explicitly sets the offset
2095 * to 0. This function is intended for the drivers that did not consider
2096 * vm_pgoff.
2097 *
2098 * Context: Process context. Called by mmap handlers.
2099 * Return: 0 on success and error code otherwise.
2100 */
2101int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2102 unsigned long num)
2103{
2104 return __vm_map_pages(vma, pages, num, 0);
2105}
2106EXPORT_SYMBOL(vm_map_pages_zero);
2107
9b5a8e00 2108static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
b2770da6 2109 pfn_t pfn, pgprot_t prot, bool mkwrite)
423bad60
NP
2110{
2111 struct mm_struct *mm = vma->vm_mm;
423bad60
NP
2112 pte_t *pte, entry;
2113 spinlock_t *ptl;
2114
423bad60
NP
2115 pte = get_locked_pte(mm, addr, &ptl);
2116 if (!pte)
9b5a8e00 2117 return VM_FAULT_OOM;
c33c7948
RR
2118 entry = ptep_get(pte);
2119 if (!pte_none(entry)) {
b2770da6
RZ
2120 if (mkwrite) {
2121 /*
2122 * For read faults on private mappings the PFN passed
2123 * in may not match the PFN we have mapped if the
2124 * mapped PFN is a writeable COW page. In the mkwrite
2125 * case we are creating a writable PTE for a shared
f2c57d91
JK
2126 * mapping and we expect the PFNs to match. If they
2127 * don't match, we are likely racing with block
2128 * allocation and mapping invalidation so just skip the
2129 * update.
b2770da6 2130 */
c33c7948
RR
2131 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2132 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
b2770da6 2133 goto out_unlock;
f2c57d91 2134 }
c33c7948 2135 entry = pte_mkyoung(entry);
cae85cb8
JK
2136 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2137 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2138 update_mmu_cache(vma, addr, pte);
2139 }
2140 goto out_unlock;
b2770da6 2141 }
423bad60
NP
2142
2143 /* Ok, finally just insert the thing.. */
01c8f1c4
DW
2144 if (pfn_t_devmap(pfn))
2145 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2146 else
2147 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
b2770da6 2148
b2770da6
RZ
2149 if (mkwrite) {
2150 entry = pte_mkyoung(entry);
2151 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2152 }
2153
423bad60 2154 set_pte_at(mm, addr, pte, entry);
4b3073e1 2155 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
423bad60 2156
423bad60
NP
2157out_unlock:
2158 pte_unmap_unlock(pte, ptl);
9b5a8e00 2159 return VM_FAULT_NOPAGE;
423bad60
NP
2160}
2161
f5e6d1d5
MW
2162/**
2163 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2164 * @vma: user vma to map to
2165 * @addr: target user address of this page
2166 * @pfn: source kernel pfn
2167 * @pgprot: pgprot flags for the inserted page
2168 *
a1a0aea5 2169 * This is exactly like vmf_insert_pfn(), except that it allows drivers
f5e6d1d5
MW
2170 * to override pgprot on a per-page basis.
2171 *
2172 * This only makes sense for IO mappings, and it makes no sense for
2173 * COW mappings. In general, using multiple vmas is preferable;
ae2b01f3 2174 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
f5e6d1d5
MW
2175 * impractical.
2176 *
28d8b812
LS
2177 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2178 * caching- and encryption bits different than those of @vma->vm_page_prot,
2179 * because the caching- or encryption mode may not be known at mmap() time.
2180 *
2181 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2182 * to set caching and encryption bits for those vmas (except for COW pages).
2183 * This is ensured by core vm only modifying these page table entries using
2184 * functions that don't touch caching- or encryption bits, using pte_modify()
2185 * if needed. (See for example mprotect()).
2186 *
2187 * Also when new page-table entries are created, this is only done using the
2188 * fault() callback, and never using the value of vma->vm_page_prot,
2189 * except for page-table entries that point to anonymous pages as the result
2190 * of COW.
574c5b3d 2191 *
ae2b01f3 2192 * Context: Process context. May allocate using %GFP_KERNEL.
f5e6d1d5
MW
2193 * Return: vm_fault_t value.
2194 */
2195vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2196 unsigned long pfn, pgprot_t pgprot)
2197{
6d958546
MW
2198 /*
2199 * Technically, architectures with pte_special can avoid all these
2200 * restrictions (same for remap_pfn_range). However we would like
2201 * consistency in testing and feature parity among all, so we should
2202 * try to keep these invariants in place for everybody.
2203 */
2204 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2205 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2206 (VM_PFNMAP|VM_MIXEDMAP));
2207 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2208 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2209
2210 if (addr < vma->vm_start || addr >= vma->vm_end)
2211 return VM_FAULT_SIGBUS;
2212
2213 if (!pfn_modify_allowed(pfn, pgprot))
2214 return VM_FAULT_SIGBUS;
2215
2216 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2217
9b5a8e00 2218 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
6d958546 2219 false);
f5e6d1d5
MW
2220}
2221EXPORT_SYMBOL(vmf_insert_pfn_prot);
e0dc0d8f 2222
ae2b01f3
MW
2223/**
2224 * vmf_insert_pfn - insert single pfn into user vma
2225 * @vma: user vma to map to
2226 * @addr: target user address of this page
2227 * @pfn: source kernel pfn
2228 *
2229 * Similar to vm_insert_page, this allows drivers to insert individual pages
2230 * they've allocated into a user vma. Same comments apply.
2231 *
2232 * This function should only be called from a vm_ops->fault handler, and
2233 * in that case the handler should return the result of this function.
2234 *
2235 * vma cannot be a COW mapping.
2236 *
2237 * As this is called only for pages that do not currently exist, we
2238 * do not need to flush old virtual caches or the TLB.
2239 *
2240 * Context: Process context. May allocate using %GFP_KERNEL.
2241 * Return: vm_fault_t value.
2242 */
2243vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2244 unsigned long pfn)
2245{
2246 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2247}
2248EXPORT_SYMBOL(vmf_insert_pfn);
2249
785a3fab
DW
2250static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2251{
2252 /* these checks mirror the abort conditions in vm_normal_page */
2253 if (vma->vm_flags & VM_MIXEDMAP)
2254 return true;
2255 if (pfn_t_devmap(pfn))
2256 return true;
2257 if (pfn_t_special(pfn))
2258 return true;
2259 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2260 return true;
2261 return false;
2262}
2263
79f3aa5b 2264static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
28d8b812 2265 unsigned long addr, pfn_t pfn, bool mkwrite)
423bad60 2266{
28d8b812 2267 pgprot_t pgprot = vma->vm_page_prot;
79f3aa5b 2268 int err;
87744ab3 2269
785a3fab 2270 BUG_ON(!vm_mixed_ok(vma, pfn));
e0dc0d8f 2271
423bad60 2272 if (addr < vma->vm_start || addr >= vma->vm_end)
79f3aa5b 2273 return VM_FAULT_SIGBUS;
308a047c
BP
2274
2275 track_pfn_insert(vma, &pgprot, pfn);
e0dc0d8f 2276
42e4089c 2277 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
79f3aa5b 2278 return VM_FAULT_SIGBUS;
42e4089c 2279
423bad60
NP
2280 /*
2281 * If we don't have pte special, then we have to use the pfn_valid()
2282 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2283 * refcount the page if pfn_valid is true (hence insert_page rather
62eede62
HD
2284 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2285 * without pte special, it would there be refcounted as a normal page.
423bad60 2286 */
00b3a331
LD
2287 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2288 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
423bad60
NP
2289 struct page *page;
2290
03fc2da6
DW
2291 /*
2292 * At this point we are committed to insert_page()
2293 * regardless of whether the caller specified flags that
2294 * result in pfn_t_has_page() == false.
2295 */
2296 page = pfn_to_page(pfn_t_to_pfn(pfn));
79f3aa5b
MW
2297 err = insert_page(vma, addr, page, pgprot);
2298 } else {
9b5a8e00 2299 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
423bad60 2300 }
b2770da6 2301
5d747637
MW
2302 if (err == -ENOMEM)
2303 return VM_FAULT_OOM;
2304 if (err < 0 && err != -EBUSY)
2305 return VM_FAULT_SIGBUS;
2306
2307 return VM_FAULT_NOPAGE;
e0dc0d8f 2308}
79f3aa5b
MW
2309
2310vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2311 pfn_t pfn)
2312{
28d8b812 2313 return __vm_insert_mixed(vma, addr, pfn, false);
79f3aa5b 2314}
5d747637 2315EXPORT_SYMBOL(vmf_insert_mixed);
e0dc0d8f 2316
ab77dab4
SJ
2317/*
2318 * If the insertion of PTE failed because someone else already added a
2319 * different entry in the mean time, we treat that as success as we assume
2320 * the same entry was actually inserted.
2321 */
ab77dab4
SJ
2322vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2323 unsigned long addr, pfn_t pfn)
b2770da6 2324{
28d8b812 2325 return __vm_insert_mixed(vma, addr, pfn, true);
b2770da6 2326}
ab77dab4 2327EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
b2770da6 2328
1da177e4
LT
2329/*
2330 * maps a range of physical memory into the requested pages. the old
2331 * mappings are removed. any references to nonexistent pages results
2332 * in null mappings (currently treated as "copy-on-access")
2333 */
2334static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2335 unsigned long addr, unsigned long end,
2336 unsigned long pfn, pgprot_t prot)
2337{
90a3e375 2338 pte_t *pte, *mapped_pte;
c74df32c 2339 spinlock_t *ptl;
42e4089c 2340 int err = 0;
1da177e4 2341
90a3e375 2342 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1da177e4
LT
2343 if (!pte)
2344 return -ENOMEM;
6606c3e0 2345 arch_enter_lazy_mmu_mode();
1da177e4 2346 do {
c33c7948 2347 BUG_ON(!pte_none(ptep_get(pte)));
42e4089c
AK
2348 if (!pfn_modify_allowed(pfn, prot)) {
2349 err = -EACCES;
2350 break;
2351 }
7e675137 2352 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1da177e4
LT
2353 pfn++;
2354 } while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0 2355 arch_leave_lazy_mmu_mode();
90a3e375 2356 pte_unmap_unlock(mapped_pte, ptl);
42e4089c 2357 return err;
1da177e4
LT
2358}
2359
2360static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2361 unsigned long addr, unsigned long end,
2362 unsigned long pfn, pgprot_t prot)
2363{
2364 pmd_t *pmd;
2365 unsigned long next;
42e4089c 2366 int err;
1da177e4
LT
2367
2368 pfn -= addr >> PAGE_SHIFT;
2369 pmd = pmd_alloc(mm, pud, addr);
2370 if (!pmd)
2371 return -ENOMEM;
f66055ab 2372 VM_BUG_ON(pmd_trans_huge(*pmd));
1da177e4
LT
2373 do {
2374 next = pmd_addr_end(addr, end);
42e4089c
AK
2375 err = remap_pte_range(mm, pmd, addr, next,
2376 pfn + (addr >> PAGE_SHIFT), prot);
2377 if (err)
2378 return err;
1da177e4
LT
2379 } while (pmd++, addr = next, addr != end);
2380 return 0;
2381}
2382
c2febafc 2383static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
1da177e4
LT
2384 unsigned long addr, unsigned long end,
2385 unsigned long pfn, pgprot_t prot)
2386{
2387 pud_t *pud;
2388 unsigned long next;
42e4089c 2389 int err;
1da177e4
LT
2390
2391 pfn -= addr >> PAGE_SHIFT;
c2febafc 2392 pud = pud_alloc(mm, p4d, addr);
1da177e4
LT
2393 if (!pud)
2394 return -ENOMEM;
2395 do {
2396 next = pud_addr_end(addr, end);
42e4089c
AK
2397 err = remap_pmd_range(mm, pud, addr, next,
2398 pfn + (addr >> PAGE_SHIFT), prot);
2399 if (err)
2400 return err;
1da177e4
LT
2401 } while (pud++, addr = next, addr != end);
2402 return 0;
2403}
2404
c2febafc
KS
2405static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2406 unsigned long addr, unsigned long end,
2407 unsigned long pfn, pgprot_t prot)
2408{
2409 p4d_t *p4d;
2410 unsigned long next;
42e4089c 2411 int err;
c2febafc
KS
2412
2413 pfn -= addr >> PAGE_SHIFT;
2414 p4d = p4d_alloc(mm, pgd, addr);
2415 if (!p4d)
2416 return -ENOMEM;
2417 do {
2418 next = p4d_addr_end(addr, end);
42e4089c
AK
2419 err = remap_pud_range(mm, p4d, addr, next,
2420 pfn + (addr >> PAGE_SHIFT), prot);
2421 if (err)
2422 return err;
c2febafc
KS
2423 } while (p4d++, addr = next, addr != end);
2424 return 0;
2425}
2426
74ffa5a3
CH
2427/*
2428 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
2429 * must have pre-validated the caching bits of the pgprot_t.
bfa5bf6d 2430 */
74ffa5a3
CH
2431int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2432 unsigned long pfn, unsigned long size, pgprot_t prot)
1da177e4
LT
2433{
2434 pgd_t *pgd;
2435 unsigned long next;
2d15cab8 2436 unsigned long end = addr + PAGE_ALIGN(size);
1da177e4
LT
2437 struct mm_struct *mm = vma->vm_mm;
2438 int err;
2439
0c4123e3
AZ
2440 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2441 return -EINVAL;
2442
1da177e4
LT
2443 /*
2444 * Physically remapped pages are special. Tell the
2445 * rest of the world about it:
2446 * VM_IO tells people not to look at these pages
2447 * (accesses can have side effects).
6aab341e
LT
2448 * VM_PFNMAP tells the core MM that the base pages are just
2449 * raw PFN mappings, and do not have a "struct page" associated
2450 * with them.
314e51b9
KK
2451 * VM_DONTEXPAND
2452 * Disable vma merging and expanding with mremap().
2453 * VM_DONTDUMP
2454 * Omit vma from core dump, even when VM_IO turned off.
fb155c16
LT
2455 *
2456 * There's a horrible special case to handle copy-on-write
2457 * behaviour that some programs depend on. We mark the "original"
2458 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
b3b9c293 2459 * See vm_normal_page() for details.
1da177e4 2460 */
b3b9c293
KK
2461 if (is_cow_mapping(vma->vm_flags)) {
2462 if (addr != vma->vm_start || end != vma->vm_end)
2463 return -EINVAL;
fb155c16 2464 vma->vm_pgoff = pfn;
b3b9c293
KK
2465 }
2466
1c71222e 2467 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1da177e4
LT
2468
2469 BUG_ON(addr >= end);
2470 pfn -= addr >> PAGE_SHIFT;
2471 pgd = pgd_offset(mm, addr);
2472 flush_cache_range(vma, addr, end);
1da177e4
LT
2473 do {
2474 next = pgd_addr_end(addr, end);
c2febafc 2475 err = remap_p4d_range(mm, pgd, addr, next,
1da177e4
LT
2476 pfn + (addr >> PAGE_SHIFT), prot);
2477 if (err)
74ffa5a3 2478 return err;
1da177e4 2479 } while (pgd++, addr = next, addr != end);
2ab64037 2480
74ffa5a3
CH
2481 return 0;
2482}
2483
2484/**
2485 * remap_pfn_range - remap kernel memory to userspace
2486 * @vma: user vma to map to
2487 * @addr: target page aligned user address to start at
2488 * @pfn: page frame number of kernel physical memory address
2489 * @size: size of mapping area
2490 * @prot: page protection flags for this mapping
2491 *
2492 * Note: this is only safe if the mm semaphore is held when called.
2493 *
2494 * Return: %0 on success, negative error code otherwise.
2495 */
2496int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2497 unsigned long pfn, unsigned long size, pgprot_t prot)
2498{
2499 int err;
2500
2501 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2ab64037 2502 if (err)
74ffa5a3 2503 return -EINVAL;
2ab64037 2504
74ffa5a3
CH
2505 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2506 if (err)
68f48381 2507 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
1da177e4
LT
2508 return err;
2509}
2510EXPORT_SYMBOL(remap_pfn_range);
2511
b4cbb197
LT
2512/**
2513 * vm_iomap_memory - remap memory to userspace
2514 * @vma: user vma to map to
abd69b9e 2515 * @start: start of the physical memory to be mapped
b4cbb197
LT
2516 * @len: size of area
2517 *
2518 * This is a simplified io_remap_pfn_range() for common driver use. The
2519 * driver just needs to give us the physical memory range to be mapped,
2520 * we'll figure out the rest from the vma information.
2521 *
2522 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2523 * whatever write-combining details or similar.
a862f68a
MR
2524 *
2525 * Return: %0 on success, negative error code otherwise.
b4cbb197
LT
2526 */
2527int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2528{
2529 unsigned long vm_len, pfn, pages;
2530
2531 /* Check that the physical memory area passed in looks valid */
2532 if (start + len < start)
2533 return -EINVAL;
2534 /*
2535 * You *really* shouldn't map things that aren't page-aligned,
2536 * but we've historically allowed it because IO memory might
2537 * just have smaller alignment.
2538 */
2539 len += start & ~PAGE_MASK;
2540 pfn = start >> PAGE_SHIFT;
2541 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2542 if (pfn + pages < pfn)
2543 return -EINVAL;
2544
2545 /* We start the mapping 'vm_pgoff' pages into the area */
2546 if (vma->vm_pgoff > pages)
2547 return -EINVAL;
2548 pfn += vma->vm_pgoff;
2549 pages -= vma->vm_pgoff;
2550
2551 /* Can we fit all of the mapping? */
2552 vm_len = vma->vm_end - vma->vm_start;
2553 if (vm_len >> PAGE_SHIFT > pages)
2554 return -EINVAL;
2555
2556 /* Ok, let it rip */
2557 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2558}
2559EXPORT_SYMBOL(vm_iomap_memory);
2560
aee16b3c
JF
2561static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2562 unsigned long addr, unsigned long end,
e80d3909
JR
2563 pte_fn_t fn, void *data, bool create,
2564 pgtbl_mod_mask *mask)
aee16b3c 2565{
8abb50c7 2566 pte_t *pte, *mapped_pte;
be1db475 2567 int err = 0;
3f649ab7 2568 spinlock_t *ptl;
aee16b3c 2569
be1db475 2570 if (create) {
8abb50c7 2571 mapped_pte = pte = (mm == &init_mm) ?
e80d3909 2572 pte_alloc_kernel_track(pmd, addr, mask) :
be1db475
DA
2573 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2574 if (!pte)
2575 return -ENOMEM;
2576 } else {
8abb50c7 2577 mapped_pte = pte = (mm == &init_mm) ?
be1db475
DA
2578 pte_offset_kernel(pmd, addr) :
2579 pte_offset_map_lock(mm, pmd, addr, &ptl);
3db82b93
HD
2580 if (!pte)
2581 return -EINVAL;
be1db475 2582 }
aee16b3c 2583
38e0edb1
JF
2584 arch_enter_lazy_mmu_mode();
2585
eeb4a05f
CH
2586 if (fn) {
2587 do {
c33c7948 2588 if (create || !pte_none(ptep_get(pte))) {
eeb4a05f
CH
2589 err = fn(pte++, addr, data);
2590 if (err)
2591 break;
2592 }
2593 } while (addr += PAGE_SIZE, addr != end);
2594 }
e80d3909 2595 *mask |= PGTBL_PTE_MODIFIED;
aee16b3c 2596
38e0edb1
JF
2597 arch_leave_lazy_mmu_mode();
2598
aee16b3c 2599 if (mm != &init_mm)
8abb50c7 2600 pte_unmap_unlock(mapped_pte, ptl);
aee16b3c
JF
2601 return err;
2602}
2603
2604static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2605 unsigned long addr, unsigned long end,
e80d3909
JR
2606 pte_fn_t fn, void *data, bool create,
2607 pgtbl_mod_mask *mask)
aee16b3c
JF
2608{
2609 pmd_t *pmd;
2610 unsigned long next;
be1db475 2611 int err = 0;
aee16b3c 2612
ceb86879
AK
2613 BUG_ON(pud_huge(*pud));
2614
be1db475 2615 if (create) {
e80d3909 2616 pmd = pmd_alloc_track(mm, pud, addr, mask);
be1db475
DA
2617 if (!pmd)
2618 return -ENOMEM;
2619 } else {
2620 pmd = pmd_offset(pud, addr);
2621 }
aee16b3c
JF
2622 do {
2623 next = pmd_addr_end(addr, end);
0c95cba4
NP
2624 if (pmd_none(*pmd) && !create)
2625 continue;
2626 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2627 return -EINVAL;
2628 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2629 if (!create)
2630 continue;
2631 pmd_clear_bad(pmd);
be1db475 2632 }
0c95cba4
NP
2633 err = apply_to_pte_range(mm, pmd, addr, next,
2634 fn, data, create, mask);
2635 if (err)
2636 break;
aee16b3c 2637 } while (pmd++, addr = next, addr != end);
0c95cba4 2638
aee16b3c
JF
2639 return err;
2640}
2641
c2febafc 2642static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
aee16b3c 2643 unsigned long addr, unsigned long end,
e80d3909
JR
2644 pte_fn_t fn, void *data, bool create,
2645 pgtbl_mod_mask *mask)
aee16b3c
JF
2646{
2647 pud_t *pud;
2648 unsigned long next;
be1db475 2649 int err = 0;
aee16b3c 2650
be1db475 2651 if (create) {
e80d3909 2652 pud = pud_alloc_track(mm, p4d, addr, mask);
be1db475
DA
2653 if (!pud)
2654 return -ENOMEM;
2655 } else {
2656 pud = pud_offset(p4d, addr);
2657 }
aee16b3c
JF
2658 do {
2659 next = pud_addr_end(addr, end);
0c95cba4
NP
2660 if (pud_none(*pud) && !create)
2661 continue;
2662 if (WARN_ON_ONCE(pud_leaf(*pud)))
2663 return -EINVAL;
2664 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2665 if (!create)
2666 continue;
2667 pud_clear_bad(pud);
be1db475 2668 }
0c95cba4
NP
2669 err = apply_to_pmd_range(mm, pud, addr, next,
2670 fn, data, create, mask);
2671 if (err)
2672 break;
aee16b3c 2673 } while (pud++, addr = next, addr != end);
0c95cba4 2674
aee16b3c
JF
2675 return err;
2676}
2677
c2febafc
KS
2678static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2679 unsigned long addr, unsigned long end,
e80d3909
JR
2680 pte_fn_t fn, void *data, bool create,
2681 pgtbl_mod_mask *mask)
c2febafc
KS
2682{
2683 p4d_t *p4d;
2684 unsigned long next;
be1db475 2685 int err = 0;
c2febafc 2686
be1db475 2687 if (create) {
e80d3909 2688 p4d = p4d_alloc_track(mm, pgd, addr, mask);
be1db475
DA
2689 if (!p4d)
2690 return -ENOMEM;
2691 } else {
2692 p4d = p4d_offset(pgd, addr);
2693 }
c2febafc
KS
2694 do {
2695 next = p4d_addr_end(addr, end);
0c95cba4
NP
2696 if (p4d_none(*p4d) && !create)
2697 continue;
2698 if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2699 return -EINVAL;
2700 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2701 if (!create)
2702 continue;
2703 p4d_clear_bad(p4d);
be1db475 2704 }
0c95cba4
NP
2705 err = apply_to_pud_range(mm, p4d, addr, next,
2706 fn, data, create, mask);
2707 if (err)
2708 break;
c2febafc 2709 } while (p4d++, addr = next, addr != end);
0c95cba4 2710
c2febafc
KS
2711 return err;
2712}
2713
be1db475
DA
2714static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2715 unsigned long size, pte_fn_t fn,
2716 void *data, bool create)
aee16b3c
JF
2717{
2718 pgd_t *pgd;
e80d3909 2719 unsigned long start = addr, next;
57250a5b 2720 unsigned long end = addr + size;
e80d3909 2721 pgtbl_mod_mask mask = 0;
be1db475 2722 int err = 0;
aee16b3c 2723
9cb65bc3
MP
2724 if (WARN_ON(addr >= end))
2725 return -EINVAL;
2726
aee16b3c
JF
2727 pgd = pgd_offset(mm, addr);
2728 do {
2729 next = pgd_addr_end(addr, end);
0c95cba4 2730 if (pgd_none(*pgd) && !create)
be1db475 2731 continue;
0c95cba4
NP
2732 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2733 return -EINVAL;
2734 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2735 if (!create)
2736 continue;
2737 pgd_clear_bad(pgd);
2738 }
2739 err = apply_to_p4d_range(mm, pgd, addr, next,
2740 fn, data, create, &mask);
aee16b3c
JF
2741 if (err)
2742 break;
2743 } while (pgd++, addr = next, addr != end);
57250a5b 2744
e80d3909
JR
2745 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2746 arch_sync_kernel_mappings(start, start + size);
2747
aee16b3c
JF
2748 return err;
2749}
be1db475
DA
2750
2751/*
2752 * Scan a region of virtual memory, filling in page tables as necessary
2753 * and calling a provided function on each leaf page table.
2754 */
2755int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2756 unsigned long size, pte_fn_t fn, void *data)
2757{
2758 return __apply_to_page_range(mm, addr, size, fn, data, true);
2759}
aee16b3c
JF
2760EXPORT_SYMBOL_GPL(apply_to_page_range);
2761
be1db475
DA
2762/*
2763 * Scan a region of virtual memory, calling a provided function on
2764 * each leaf page table where it exists.
2765 *
2766 * Unlike apply_to_page_range, this does _not_ fill in page tables
2767 * where they are absent.
2768 */
2769int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2770 unsigned long size, pte_fn_t fn, void *data)
2771{
2772 return __apply_to_page_range(mm, addr, size, fn, data, false);
2773}
2774EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2775
8f4e2101 2776/*
9b4bdd2f
KS
2777 * handle_pte_fault chooses page fault handler according to an entry which was
2778 * read non-atomically. Before making any commitment, on those architectures
2779 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2780 * parts, do_swap_page must check under lock before unmapping the pte and
2781 * proceeding (but do_wp_page is only called after already making such a check;
a335b2e1 2782 * and do_anonymous_page can safely check later on).
8f4e2101 2783 */
2ca99358 2784static inline int pte_unmap_same(struct vm_fault *vmf)
8f4e2101
HD
2785{
2786 int same = 1;
923717cb 2787#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
8f4e2101 2788 if (sizeof(pte_t) > sizeof(unsigned long)) {
c7ad0880 2789 spin_lock(vmf->ptl);
c33c7948 2790 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
c7ad0880 2791 spin_unlock(vmf->ptl);
8f4e2101
HD
2792 }
2793#endif
2ca99358
PX
2794 pte_unmap(vmf->pte);
2795 vmf->pte = NULL;
8f4e2101
HD
2796 return same;
2797}
2798
a873dfe1
TL
2799/*
2800 * Return:
2801 * 0: copied succeeded
2802 * -EHWPOISON: copy failed due to hwpoison in source page
2803 * -EAGAIN: copied failed (some other reason)
2804 */
2805static inline int __wp_page_copy_user(struct page *dst, struct page *src,
2806 struct vm_fault *vmf)
6aab341e 2807{
a873dfe1 2808 int ret;
83d116c5
JH
2809 void *kaddr;
2810 void __user *uaddr;
83d116c5
JH
2811 struct vm_area_struct *vma = vmf->vma;
2812 struct mm_struct *mm = vma->vm_mm;
2813 unsigned long addr = vmf->address;
2814
83d116c5 2815 if (likely(src)) {
d302c239
TL
2816 if (copy_mc_user_highpage(dst, src, addr, vma)) {
2817 memory_failure_queue(page_to_pfn(src), 0);
a873dfe1 2818 return -EHWPOISON;
d302c239 2819 }
a873dfe1 2820 return 0;
83d116c5
JH
2821 }
2822
6aab341e
LT
2823 /*
2824 * If the source page was a PFN mapping, we don't have
2825 * a "struct page" for it. We do a best-effort copy by
2826 * just copying from the original user address. If that
2827 * fails, we just zero-fill it. Live with it.
2828 */
83d116c5
JH
2829 kaddr = kmap_atomic(dst);
2830 uaddr = (void __user *)(addr & PAGE_MASK);
2831
2832 /*
2833 * On architectures with software "accessed" bits, we would
2834 * take a double page fault, so mark it accessed here.
2835 */
3db82b93 2836 vmf->pte = NULL;
e1fd09e3 2837 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
83d116c5 2838 pte_t entry;
5d2a2dbb 2839
83d116c5 2840 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
c33c7948 2841 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
83d116c5
JH
2842 /*
2843 * Other thread has already handled the fault
7df67697 2844 * and update local tlb only
83d116c5 2845 */
a92cbb82
HD
2846 if (vmf->pte)
2847 update_mmu_tlb(vma, addr, vmf->pte);
a873dfe1 2848 ret = -EAGAIN;
83d116c5
JH
2849 goto pte_unlock;
2850 }
2851
2852 entry = pte_mkyoung(vmf->orig_pte);
2853 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
5003a2bd 2854 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
83d116c5
JH
2855 }
2856
2857 /*
2858 * This really shouldn't fail, because the page is there
2859 * in the page tables. But it might just be unreadable,
2860 * in which case we just give up and fill the result with
2861 * zeroes.
2862 */
2863 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3db82b93 2864 if (vmf->pte)
c3e5ea6e
KS
2865 goto warn;
2866
2867 /* Re-validate under PTL if the page is still mapped */
2868 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
c33c7948 2869 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
7df67697 2870 /* The PTE changed under us, update local tlb */
a92cbb82
HD
2871 if (vmf->pte)
2872 update_mmu_tlb(vma, addr, vmf->pte);
a873dfe1 2873 ret = -EAGAIN;
c3e5ea6e
KS
2874 goto pte_unlock;
2875 }
2876
5d2a2dbb 2877 /*
985ba004 2878 * The same page can be mapped back since last copy attempt.
c3e5ea6e 2879 * Try to copy again under PTL.
5d2a2dbb 2880 */
c3e5ea6e
KS
2881 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2882 /*
2883 * Give a warn in case there can be some obscure
2884 * use-case
2885 */
2886warn:
2887 WARN_ON_ONCE(1);
2888 clear_page(kaddr);
2889 }
83d116c5
JH
2890 }
2891
a873dfe1 2892 ret = 0;
83d116c5
JH
2893
2894pte_unlock:
3db82b93 2895 if (vmf->pte)
83d116c5
JH
2896 pte_unmap_unlock(vmf->pte, vmf->ptl);
2897 kunmap_atomic(kaddr);
2898 flush_dcache_page(dst);
2899
2900 return ret;
6aab341e
LT
2901}
2902
c20cd45e
MH
2903static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2904{
2905 struct file *vm_file = vma->vm_file;
2906
2907 if (vm_file)
2908 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2909
2910 /*
2911 * Special mappings (e.g. VDSO) do not have any file so fake
2912 * a default GFP_KERNEL for them.
2913 */
2914 return GFP_KERNEL;
2915}
2916
fb09a464
KS
2917/*
2918 * Notify the address space that the page is about to become writable so that
2919 * it can prohibit this or wait for the page to get into an appropriate state.
2920 *
2921 * We do this without the lock held, so that it can sleep if it needs to.
2922 */
86aa6998 2923static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
fb09a464 2924{
2b740303 2925 vm_fault_t ret;
38b8cb7f 2926 unsigned int old_flags = vmf->flags;
fb09a464 2927
38b8cb7f 2928 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
fb09a464 2929
dc617f29
DW
2930 if (vmf->vma->vm_file &&
2931 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2932 return VM_FAULT_SIGBUS;
2933
11bac800 2934 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
38b8cb7f
JK
2935 /* Restore original flags so that caller is not surprised */
2936 vmf->flags = old_flags;
fb09a464
KS
2937 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2938 return ret;
2939 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3d243659
SK
2940 folio_lock(folio);
2941 if (!folio->mapping) {
2942 folio_unlock(folio);
fb09a464
KS
2943 return 0; /* retry */
2944 }
2945 ret |= VM_FAULT_LOCKED;
2946 } else
3d243659 2947 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
fb09a464
KS
2948 return ret;
2949}
2950
97ba0c2b
JK
2951/*
2952 * Handle dirtying of a page in shared file mapping on a write fault.
2953 *
2954 * The function expects the page to be locked and unlocks it.
2955 */
89b15332 2956static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
97ba0c2b 2957{
89b15332 2958 struct vm_area_struct *vma = vmf->vma;
97ba0c2b 2959 struct address_space *mapping;
15b4919a 2960 struct folio *folio = page_folio(vmf->page);
97ba0c2b
JK
2961 bool dirtied;
2962 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2963
15b4919a
Z
2964 dirtied = folio_mark_dirty(folio);
2965 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
97ba0c2b 2966 /*
15b4919a
Z
2967 * Take a local copy of the address_space - folio.mapping may be zeroed
2968 * by truncate after folio_unlock(). The address_space itself remains
2969 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s
97ba0c2b
JK
2970 * release semantics to prevent the compiler from undoing this copying.
2971 */
15b4919a
Z
2972 mapping = folio_raw_mapping(folio);
2973 folio_unlock(folio);
97ba0c2b 2974
89b15332
JW
2975 if (!page_mkwrite)
2976 file_update_time(vma->vm_file);
2977
2978 /*
2979 * Throttle page dirtying rate down to writeback speed.
2980 *
2981 * mapping may be NULL here because some device drivers do not
2982 * set page.mapping but still dirty their pages
2983 *
c1e8d7c6 2984 * Drop the mmap_lock before waiting on IO, if we can. The file
89b15332
JW
2985 * is pinning the mapping, as per above.
2986 */
97ba0c2b 2987 if ((dirtied || page_mkwrite) && mapping) {
89b15332
JW
2988 struct file *fpin;
2989
2990 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
97ba0c2b 2991 balance_dirty_pages_ratelimited(mapping);
89b15332
JW
2992 if (fpin) {
2993 fput(fpin);
d9272525 2994 return VM_FAULT_COMPLETED;
89b15332 2995 }
97ba0c2b
JK
2996 }
2997
89b15332 2998 return 0;
97ba0c2b
JK
2999}
3000
4e047f89
SR
3001/*
3002 * Handle write page faults for pages that can be reused in the current vma
3003 *
3004 * This can happen either due to the mapping being with the VM_SHARED flag,
3005 * or due to us being the last reference standing to the page. In either
3006 * case, all we need to do here is to mark the page as writable and update
3007 * any related book-keeping.
3008 */
997dd98d 3009static inline void wp_page_reuse(struct vm_fault *vmf)
82b0f8c3 3010 __releases(vmf->ptl)
4e047f89 3011{
82b0f8c3 3012 struct vm_area_struct *vma = vmf->vma;
a41b70d6 3013 struct page *page = vmf->page;
4e047f89 3014 pte_t entry;
6c287605 3015
c89357e2 3016 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
cdb281e6 3017 VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
6c287605 3018
4e047f89
SR
3019 /*
3020 * Clear the pages cpupid information as the existing
3021 * information potentially belongs to a now completely
3022 * unrelated process.
3023 */
3024 if (page)
3025 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3026
2994302b
JK
3027 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3028 entry = pte_mkyoung(vmf->orig_pte);
4e047f89 3029 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
82b0f8c3 3030 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
5003a2bd 3031 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
82b0f8c3 3032 pte_unmap_unlock(vmf->pte, vmf->ptl);
798a6b87 3033 count_vm_event(PGREUSE);
4e047f89
SR
3034}
3035
2f38ab2c 3036/*
c89357e2
DH
3037 * Handle the case of a page which we actually need to copy to a new page,
3038 * either due to COW or unsharing.
2f38ab2c 3039 *
c1e8d7c6 3040 * Called with mmap_lock locked and the old page referenced, but
2f38ab2c
SR
3041 * without the ptl held.
3042 *
3043 * High level logic flow:
3044 *
3045 * - Allocate a page, copy the content of the old page to the new one.
3046 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3047 * - Take the PTL. If the pte changed, bail out and release the allocated page
3048 * - If the pte is still the way we remember it, update the page table and all
3049 * relevant references. This includes dropping the reference the page-table
3050 * held to the old page, as well as updating the rmap.
3051 * - In any case, unlock the PTL and drop the reference we took to the old page.
3052 */
2b740303 3053static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2f38ab2c 3054{
c89357e2 3055 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 3056 struct vm_area_struct *vma = vmf->vma;
bae473a4 3057 struct mm_struct *mm = vma->vm_mm;
28d41a48
MWO
3058 struct folio *old_folio = NULL;
3059 struct folio *new_folio = NULL;
2f38ab2c
SR
3060 pte_t entry;
3061 int page_copied = 0;
ac46d4f3 3062 struct mmu_notifier_range range;
a873dfe1 3063 int ret;
2f38ab2c 3064
662ce1dc
YY
3065 delayacct_wpcopy_start();
3066
28d41a48
MWO
3067 if (vmf->page)
3068 old_folio = page_folio(vmf->page);
2f38ab2c
SR
3069 if (unlikely(anon_vma_prepare(vma)))
3070 goto oom;
3071
2994302b 3072 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
6bc56a4d
MWO
3073 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
3074 if (!new_folio)
2f38ab2c
SR
3075 goto oom;
3076 } else {
28d41a48
MWO
3077 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
3078 vmf->address, false);
3079 if (!new_folio)
2f38ab2c 3080 goto oom;
83d116c5 3081
28d41a48 3082 ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
a873dfe1 3083 if (ret) {
83d116c5
JH
3084 /*
3085 * COW failed, if the fault was solved by other,
3086 * it's fine. If not, userspace would re-fault on
3087 * the same address and we will handle the fault
3088 * from the second attempt.
a873dfe1 3089 * The -EHWPOISON case will not be retried.
83d116c5 3090 */
28d41a48
MWO
3091 folio_put(new_folio);
3092 if (old_folio)
3093 folio_put(old_folio);
662ce1dc
YY
3094
3095 delayacct_wpcopy_end();
a873dfe1 3096 return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
83d116c5 3097 }
28d41a48 3098 kmsan_copy_page_meta(&new_folio->page, vmf->page);
2f38ab2c 3099 }
2f38ab2c 3100
28d41a48 3101 if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
2f38ab2c 3102 goto oom_free_new;
4d4f75bf 3103 folio_throttle_swaprate(new_folio, GFP_KERNEL);
2f38ab2c 3104
28d41a48 3105 __folio_mark_uptodate(new_folio);
eb3c24f3 3106
7d4a8be0 3107 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
6f4f13e8 3108 vmf->address & PAGE_MASK,
ac46d4f3
JG
3109 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3110 mmu_notifier_invalidate_range_start(&range);
2f38ab2c
SR
3111
3112 /*
3113 * Re-check the pte - we dropped the lock
3114 */
82b0f8c3 3115 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
c33c7948 3116 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
28d41a48
MWO
3117 if (old_folio) {
3118 if (!folio_test_anon(old_folio)) {
3119 dec_mm_counter(mm, mm_counter_file(&old_folio->page));
f1a79412 3120 inc_mm_counter(mm, MM_ANONPAGES);
2f38ab2c
SR
3121 }
3122 } else {
6080d19f 3123 ksm_might_unmap_zero_page(mm, vmf->orig_pte);
f1a79412 3124 inc_mm_counter(mm, MM_ANONPAGES);
2f38ab2c 3125 }
2994302b 3126 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
28d41a48 3127 entry = mk_pte(&new_folio->page, vma->vm_page_prot);
50c25ee9 3128 entry = pte_sw_mkyoung(entry);
c89357e2
DH
3129 if (unlikely(unshare)) {
3130 if (pte_soft_dirty(vmf->orig_pte))
3131 entry = pte_mksoft_dirty(entry);
3132 if (pte_uffd_wp(vmf->orig_pte))
3133 entry = pte_mkuffd_wp(entry);
3134 } else {
3135 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3136 }
111fe718 3137
2f38ab2c
SR
3138 /*
3139 * Clear the pte entry and flush it first, before updating the
111fe718
NP
3140 * pte with the new entry, to keep TLBs on different CPUs in
3141 * sync. This code used to set the new PTE then flush TLBs, but
3142 * that left a window where the new PTE could be loaded into
3143 * some TLBs while the old PTE remains in others.
2f38ab2c 3144 */
ec8832d0 3145 ptep_clear_flush(vma, vmf->address, vmf->pte);
28d41a48
MWO
3146 folio_add_new_anon_rmap(new_folio, vma, vmf->address);
3147 folio_add_lru_vma(new_folio, vma);
2f38ab2c
SR
3148 /*
3149 * We call the notify macro here because, when using secondary
3150 * mmu page tables (such as kvm shadow page tables), we want the
3151 * new page to be mapped directly into the secondary page table.
3152 */
c89357e2 3153 BUG_ON(unshare && pte_write(entry));
82b0f8c3 3154 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
5003a2bd 3155 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
28d41a48 3156 if (old_folio) {
2f38ab2c
SR
3157 /*
3158 * Only after switching the pte to the new page may
3159 * we remove the mapcount here. Otherwise another
3160 * process may come and find the rmap count decremented
3161 * before the pte is switched to the new page, and
3162 * "reuse" the old page writing into it while our pte
3163 * here still points into it and can be read by other
3164 * threads.
3165 *
3166 * The critical issue is to order this
3167 * page_remove_rmap with the ptp_clear_flush above.
3168 * Those stores are ordered by (if nothing else,)
3169 * the barrier present in the atomic_add_negative
3170 * in page_remove_rmap.
3171 *
3172 * Then the TLB flush in ptep_clear_flush ensures that
3173 * no process can access the old page before the
3174 * decremented mapcount is visible. And the old page
3175 * cannot be reused until after the decremented
3176 * mapcount is visible. So transitively, TLBs to
3177 * old page will be flushed before it can be reused.
3178 */
28d41a48 3179 page_remove_rmap(vmf->page, vma, false);
2f38ab2c
SR
3180 }
3181
3182 /* Free the old page.. */
28d41a48 3183 new_folio = old_folio;
2f38ab2c 3184 page_copied = 1;
3db82b93
HD
3185 pte_unmap_unlock(vmf->pte, vmf->ptl);
3186 } else if (vmf->pte) {
7df67697 3187 update_mmu_tlb(vma, vmf->address, vmf->pte);
3db82b93 3188 pte_unmap_unlock(vmf->pte, vmf->ptl);
2f38ab2c
SR
3189 }
3190
ec8832d0 3191 mmu_notifier_invalidate_range_end(&range);
3db82b93
HD
3192
3193 if (new_folio)
3194 folio_put(new_folio);
28d41a48 3195 if (old_folio) {
f4c4a3f4 3196 if (page_copied)
28d41a48
MWO
3197 free_swap_cache(&old_folio->page);
3198 folio_put(old_folio);
2f38ab2c 3199 }
662ce1dc
YY
3200
3201 delayacct_wpcopy_end();
cb8d8633 3202 return 0;
2f38ab2c 3203oom_free_new:
28d41a48 3204 folio_put(new_folio);
2f38ab2c 3205oom:
28d41a48
MWO
3206 if (old_folio)
3207 folio_put(old_folio);
662ce1dc
YY
3208
3209 delayacct_wpcopy_end();
2f38ab2c
SR
3210 return VM_FAULT_OOM;
3211}
3212
66a6197c
JK
3213/**
3214 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3215 * writeable once the page is prepared
3216 *
3217 * @vmf: structure describing the fault
3218 *
3219 * This function handles all that is needed to finish a write page fault in a
3220 * shared mapping due to PTE being read-only once the mapped page is prepared.
a862f68a 3221 * It handles locking of PTE and modifying it.
66a6197c
JK
3222 *
3223 * The function expects the page to be locked or other protection against
3224 * concurrent faults / writeback (such as DAX radix tree locks).
a862f68a 3225 *
2797e79f 3226 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
a862f68a 3227 * we acquired PTE lock.
66a6197c 3228 */
2b740303 3229vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
66a6197c
JK
3230{
3231 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3232 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3233 &vmf->ptl);
3db82b93
HD
3234 if (!vmf->pte)
3235 return VM_FAULT_NOPAGE;
66a6197c
JK
3236 /*
3237 * We might have raced with another page fault while we released the
3238 * pte_offset_map_lock.
3239 */
c33c7948 3240 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
7df67697 3241 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
66a6197c 3242 pte_unmap_unlock(vmf->pte, vmf->ptl);
a19e2553 3243 return VM_FAULT_NOPAGE;
66a6197c
JK
3244 }
3245 wp_page_reuse(vmf);
a19e2553 3246 return 0;
66a6197c
JK
3247}
3248
dd906184
BH
3249/*
3250 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3251 * mapping
3252 */
2b740303 3253static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
dd906184 3254{
82b0f8c3 3255 struct vm_area_struct *vma = vmf->vma;
bae473a4 3256
dd906184 3257 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2b740303 3258 vm_fault_t ret;
dd906184 3259
82b0f8c3 3260 pte_unmap_unlock(vmf->pte, vmf->ptl);
063e60d8
MWO
3261 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3262 vma_end_read(vmf->vma);
3263 return VM_FAULT_RETRY;
3264 }
3265
fe82221f 3266 vmf->flags |= FAULT_FLAG_MKWRITE;
11bac800 3267 ret = vma->vm_ops->pfn_mkwrite(vmf);
2f89dc12 3268 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
dd906184 3269 return ret;
66a6197c 3270 return finish_mkwrite_fault(vmf);
dd906184 3271 }
997dd98d 3272 wp_page_reuse(vmf);
cb8d8633 3273 return 0;
dd906184
BH
3274}
3275
5a97858b 3276static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
82b0f8c3 3277 __releases(vmf->ptl)
93e478d4 3278{
82b0f8c3 3279 struct vm_area_struct *vma = vmf->vma;
cb8d8633 3280 vm_fault_t ret = 0;
93e478d4 3281
5a97858b 3282 folio_get(folio);
93e478d4 3283
93e478d4 3284 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2b740303 3285 vm_fault_t tmp;
93e478d4 3286
82b0f8c3 3287 pte_unmap_unlock(vmf->pte, vmf->ptl);
063e60d8
MWO
3288 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3289 folio_put(folio);
3290 vma_end_read(vmf->vma);
3291 return VM_FAULT_RETRY;
3292 }
3293
86aa6998 3294 tmp = do_page_mkwrite(vmf, folio);
93e478d4
SR
3295 if (unlikely(!tmp || (tmp &
3296 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5a97858b 3297 folio_put(folio);
93e478d4
SR
3298 return tmp;
3299 }
66a6197c 3300 tmp = finish_mkwrite_fault(vmf);
a19e2553 3301 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
5a97858b
SK
3302 folio_unlock(folio);
3303 folio_put(folio);
66a6197c 3304 return tmp;
93e478d4 3305 }
66a6197c
JK
3306 } else {
3307 wp_page_reuse(vmf);
5a97858b 3308 folio_lock(folio);
93e478d4 3309 }
89b15332 3310 ret |= fault_dirty_shared_page(vmf);
5a97858b 3311 folio_put(folio);
93e478d4 3312
89b15332 3313 return ret;
93e478d4
SR
3314}
3315
1da177e4 3316/*
c89357e2
DH
3317 * This routine handles present pages, when
3318 * * users try to write to a shared page (FAULT_FLAG_WRITE)
3319 * * GUP wants to take a R/O pin on a possibly shared anonymous page
3320 * (FAULT_FLAG_UNSHARE)
3321 *
3322 * It is done by copying the page to a new address and decrementing the
3323 * shared-page counter for the old page.
1da177e4 3324 *
1da177e4
LT
3325 * Note that this routine assumes that the protection checks have been
3326 * done by the caller (the low-level page fault routine in most cases).
c89357e2
DH
3327 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3328 * done any necessary COW.
1da177e4 3329 *
c89357e2
DH
3330 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3331 * though the page will change only once the write actually happens. This
3332 * avoids a few races, and potentially makes it more efficient.
1da177e4 3333 *
c1e8d7c6 3334 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 3335 * but allow concurrent faults), with pte both mapped and locked.
c1e8d7c6 3336 * We return with mmap_lock still held, but pte unmapped and unlocked.
1da177e4 3337 */
2b740303 3338static vm_fault_t do_wp_page(struct vm_fault *vmf)
82b0f8c3 3339 __releases(vmf->ptl)
1da177e4 3340{
c89357e2 3341 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 3342 struct vm_area_struct *vma = vmf->vma;
b9086fde 3343 struct folio *folio = NULL;
1da177e4 3344
c89357e2 3345 if (likely(!unshare)) {
c33c7948 3346 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
c89357e2
DH
3347 pte_unmap_unlock(vmf->pte, vmf->ptl);
3348 return handle_userfault(vmf, VM_UFFD_WP);
3349 }
3350
3351 /*
3352 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3353 * is flushed in this case before copying.
3354 */
3355 if (unlikely(userfaultfd_wp(vmf->vma) &&
3356 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3357 flush_tlb_page(vmf->vma, vmf->address);
3358 }
6ce64428 3359
a41b70d6 3360 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
c89357e2 3361
5a97858b
SK
3362 if (vmf->page)
3363 folio = page_folio(vmf->page);
3364
b9086fde
DH
3365 /*
3366 * Shared mapping: we are guaranteed to have VM_WRITE and
3367 * FAULT_FLAG_WRITE set at this point.
3368 */
3369 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
251b97f5 3370 /*
64e45507
PF
3371 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3372 * VM_PFNMAP VMA.
251b97f5
PZ
3373 *
3374 * We should not cow pages in a shared writeable mapping.
dd906184 3375 * Just mark the pages writable and/or call ops->pfn_mkwrite.
251b97f5 3376 */
b9086fde 3377 if (!vmf->page)
2994302b 3378 return wp_pfn_shared(vmf);
5a97858b 3379 return wp_page_shared(vmf, folio);
251b97f5 3380 }
1da177e4 3381
d08b3851 3382 /*
b9086fde
DH
3383 * Private mapping: create an exclusive anonymous page copy if reuse
3384 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
d08b3851 3385 */
b9086fde 3386 if (folio && folio_test_anon(folio)) {
6c287605
DH
3387 /*
3388 * If the page is exclusive to this process we must reuse the
3389 * page without further checks.
3390 */
e4a2ed94 3391 if (PageAnonExclusive(vmf->page))
6c287605
DH
3392 goto reuse;
3393
53a05ad9 3394 /*
e4a2ed94
MWO
3395 * We have to verify under folio lock: these early checks are
3396 * just an optimization to avoid locking the folio and freeing
53a05ad9
DH
3397 * the swapcache if there is little hope that we can reuse.
3398 *
e4a2ed94 3399 * KSM doesn't necessarily raise the folio refcount.
53a05ad9 3400 */
e4a2ed94 3401 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
d4c47097 3402 goto copy;
e4a2ed94 3403 if (!folio_test_lru(folio))
d4c47097 3404 /*
1fec6890
MWO
3405 * We cannot easily detect+handle references from
3406 * remote LRU caches or references to LRU folios.
d4c47097
DH
3407 */
3408 lru_add_drain();
e4a2ed94 3409 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
09854ba9 3410 goto copy;
e4a2ed94 3411 if (!folio_trylock(folio))
09854ba9 3412 goto copy;
e4a2ed94
MWO
3413 if (folio_test_swapcache(folio))
3414 folio_free_swap(folio);
3415 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3416 folio_unlock(folio);
52d1e606 3417 goto copy;
b009c024 3418 }
09854ba9 3419 /*
e4a2ed94
MWO
3420 * Ok, we've got the only folio reference from our mapping
3421 * and the folio is locked, it's dark out, and we're wearing
53a05ad9 3422 * sunglasses. Hit it.
09854ba9 3423 */
e4a2ed94
MWO
3424 page_move_anon_rmap(vmf->page, vma);
3425 folio_unlock(folio);
6c287605 3426reuse:
c89357e2
DH
3427 if (unlikely(unshare)) {
3428 pte_unmap_unlock(vmf->pte, vmf->ptl);
3429 return 0;
3430 }
be068f29 3431 wp_page_reuse(vmf);
cb8d8633 3432 return 0;
1da177e4 3433 }
52d1e606 3434copy:
063e60d8
MWO
3435 if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) {
3436 pte_unmap_unlock(vmf->pte, vmf->ptl);
3437 vma_end_read(vmf->vma);
3438 return VM_FAULT_RETRY;
3439 }
3440
1da177e4
LT
3441 /*
3442 * Ok, we need to copy. Oh, well..
3443 */
b9086fde
DH
3444 if (folio)
3445 folio_get(folio);
28766805 3446
82b0f8c3 3447 pte_unmap_unlock(vmf->pte, vmf->ptl);
94bfe85b 3448#ifdef CONFIG_KSM
b9086fde 3449 if (folio && folio_test_ksm(folio))
94bfe85b
YY
3450 count_vm_event(COW_KSM);
3451#endif
a41b70d6 3452 return wp_page_copy(vmf);
1da177e4
LT
3453}
3454
97a89413 3455static void unmap_mapping_range_vma(struct vm_area_struct *vma,
1da177e4
LT
3456 unsigned long start_addr, unsigned long end_addr,
3457 struct zap_details *details)
3458{
f5cc4eef 3459 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
1da177e4
LT
3460}
3461
f808c13f 3462static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
232a6a1c
PX
3463 pgoff_t first_index,
3464 pgoff_t last_index,
1da177e4
LT
3465 struct zap_details *details)
3466{
3467 struct vm_area_struct *vma;
1da177e4
LT
3468 pgoff_t vba, vea, zba, zea;
3469
232a6a1c 3470 vma_interval_tree_foreach(vma, root, first_index, last_index) {
1da177e4 3471 vba = vma->vm_pgoff;
d6e93217 3472 vea = vba + vma_pages(vma) - 1;
f9871da9
ML
3473 zba = max(first_index, vba);
3474 zea = min(last_index, vea);
1da177e4 3475
97a89413 3476 unmap_mapping_range_vma(vma,
1da177e4
LT
3477 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3478 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
97a89413 3479 details);
1da177e4
LT
3480 }
3481}
3482
22061a1f 3483/**
3506659e
MWO
3484 * unmap_mapping_folio() - Unmap single folio from processes.
3485 * @folio: The locked folio to be unmapped.
22061a1f 3486 *
3506659e 3487 * Unmap this folio from any userspace process which still has it mmaped.
22061a1f
HD
3488 * Typically, for efficiency, the range of nearby pages has already been
3489 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3506659e
MWO
3490 * truncation or invalidation holds the lock on a folio, it may find that
3491 * the page has been remapped again: and then uses unmap_mapping_folio()
22061a1f
HD
3492 * to unmap it finally.
3493 */
3506659e 3494void unmap_mapping_folio(struct folio *folio)
22061a1f 3495{
3506659e 3496 struct address_space *mapping = folio->mapping;
22061a1f 3497 struct zap_details details = { };
232a6a1c
PX
3498 pgoff_t first_index;
3499 pgoff_t last_index;
22061a1f 3500
3506659e 3501 VM_BUG_ON(!folio_test_locked(folio));
22061a1f 3502
3506659e 3503 first_index = folio->index;
87b11f86 3504 last_index = folio_next_index(folio) - 1;
232a6a1c 3505
2e148f1e 3506 details.even_cows = false;
3506659e 3507 details.single_folio = folio;
999dad82 3508 details.zap_flags = ZAP_FLAG_DROP_MARKER;
22061a1f 3509
2c865995 3510 i_mmap_lock_read(mapping);
22061a1f 3511 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
232a6a1c
PX
3512 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3513 last_index, &details);
2c865995 3514 i_mmap_unlock_read(mapping);
22061a1f
HD
3515}
3516
977fbdcd
MW
3517/**
3518 * unmap_mapping_pages() - Unmap pages from processes.
3519 * @mapping: The address space containing pages to be unmapped.
3520 * @start: Index of first page to be unmapped.
3521 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3522 * @even_cows: Whether to unmap even private COWed pages.
3523 *
3524 * Unmap the pages in this address space from any userspace process which
3525 * has them mmaped. Generally, you want to remove COWed pages as well when
3526 * a file is being truncated, but not when invalidating pages from the page
3527 * cache.
3528 */
3529void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3530 pgoff_t nr, bool even_cows)
3531{
3532 struct zap_details details = { };
232a6a1c
PX
3533 pgoff_t first_index = start;
3534 pgoff_t last_index = start + nr - 1;
977fbdcd 3535
2e148f1e 3536 details.even_cows = even_cows;
232a6a1c
PX
3537 if (last_index < first_index)
3538 last_index = ULONG_MAX;
977fbdcd 3539
2c865995 3540 i_mmap_lock_read(mapping);
977fbdcd 3541 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
232a6a1c
PX
3542 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3543 last_index, &details);
2c865995 3544 i_mmap_unlock_read(mapping);
977fbdcd 3545}
6e0e99d5 3546EXPORT_SYMBOL_GPL(unmap_mapping_pages);
977fbdcd 3547
1da177e4 3548/**
8a5f14a2 3549 * unmap_mapping_range - unmap the portion of all mmaps in the specified
977fbdcd 3550 * address_space corresponding to the specified byte range in the underlying
8a5f14a2
KS
3551 * file.
3552 *
3d41088f 3553 * @mapping: the address space containing mmaps to be unmapped.
1da177e4
LT
3554 * @holebegin: byte in first page to unmap, relative to the start of
3555 * the underlying file. This will be rounded down to a PAGE_SIZE
25d9e2d1 3556 * boundary. Note that this is different from truncate_pagecache(), which
1da177e4
LT
3557 * must keep the partial page. In contrast, we must get rid of
3558 * partial pages.
3559 * @holelen: size of prospective hole in bytes. This will be rounded
3560 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3561 * end of the file.
3562 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3563 * but 0 when invalidating pagecache, don't throw away private data.
3564 */
3565void unmap_mapping_range(struct address_space *mapping,
3566 loff_t const holebegin, loff_t const holelen, int even_cows)
3567{
1da177e4
LT
3568 pgoff_t hba = holebegin >> PAGE_SHIFT;
3569 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3570
3571 /* Check for overflow. */
3572 if (sizeof(holelen) > sizeof(hlen)) {
3573 long long holeend =
3574 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3575 if (holeend & ~(long long)ULONG_MAX)
3576 hlen = ULONG_MAX - hba + 1;
3577 }
3578
977fbdcd 3579 unmap_mapping_pages(mapping, hba, hlen, even_cows);
1da177e4
LT
3580}
3581EXPORT_SYMBOL(unmap_mapping_range);
3582
b756a3b5
AP
3583/*
3584 * Restore a potential device exclusive pte to a working pte entry
3585 */
3586static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3587{
19672a9e 3588 struct folio *folio = page_folio(vmf->page);
b756a3b5
AP
3589 struct vm_area_struct *vma = vmf->vma;
3590 struct mmu_notifier_range range;
fdc724d6 3591 vm_fault_t ret;
b756a3b5 3592
7c7b9629
AP
3593 /*
3594 * We need a reference to lock the folio because we don't hold
3595 * the PTL so a racing thread can remove the device-exclusive
3596 * entry and unmap it. If the folio is free the entry must
3597 * have been removed already. If it happens to have already
3598 * been re-allocated after being freed all we do is lock and
3599 * unlock it.
3600 */
3601 if (!folio_try_get(folio))
3602 return 0;
3603
fdc724d6
SB
3604 ret = folio_lock_or_retry(folio, vmf);
3605 if (ret) {
7c7b9629 3606 folio_put(folio);
fdc724d6 3607 return ret;
7c7b9629 3608 }
7d4a8be0 3609 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
b756a3b5
AP
3610 vma->vm_mm, vmf->address & PAGE_MASK,
3611 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3612 mmu_notifier_invalidate_range_start(&range);
3613
3614 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3615 &vmf->ptl);
c33c7948 3616 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
19672a9e 3617 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
b756a3b5 3618
3db82b93
HD
3619 if (vmf->pte)
3620 pte_unmap_unlock(vmf->pte, vmf->ptl);
19672a9e 3621 folio_unlock(folio);
7c7b9629 3622 folio_put(folio);
b756a3b5
AP
3623
3624 mmu_notifier_invalidate_range_end(&range);
3625 return 0;
3626}
3627
a160e537 3628static inline bool should_try_to_free_swap(struct folio *folio,
c145e0b4
DH
3629 struct vm_area_struct *vma,
3630 unsigned int fault_flags)
3631{
a160e537 3632 if (!folio_test_swapcache(folio))
c145e0b4 3633 return false;
9202d527 3634 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
a160e537 3635 folio_test_mlocked(folio))
c145e0b4
DH
3636 return true;
3637 /*
3638 * If we want to map a page that's in the swapcache writable, we
3639 * have to detect via the refcount if we're really the exclusive
3640 * user. Try freeing the swapcache to get rid of the swapcache
3641 * reference only in case it's likely that we'll be the exlusive user.
3642 */
a160e537
MWO
3643 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
3644 folio_ref_count(folio) == 2;
c145e0b4
DH
3645}
3646
9c28a205
PX
3647static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3648{
3649 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3650 vmf->address, &vmf->ptl);
3db82b93
HD
3651 if (!vmf->pte)
3652 return 0;
9c28a205
PX
3653 /*
3654 * Be careful so that we will only recover a special uffd-wp pte into a
3655 * none pte. Otherwise it means the pte could have changed, so retry.
7e3ce3f8
PX
3656 *
3657 * This should also cover the case where e.g. the pte changed
af19487f 3658 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
7e3ce3f8 3659 * So is_pte_marker() check is not enough to safely drop the pte.
9c28a205 3660 */
c33c7948 3661 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
9c28a205
PX
3662 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3663 pte_unmap_unlock(vmf->pte, vmf->ptl);
3664 return 0;
3665}
3666
2bad466c
PX
3667static vm_fault_t do_pte_missing(struct vm_fault *vmf)
3668{
3669 if (vma_is_anonymous(vmf->vma))
3670 return do_anonymous_page(vmf);
3671 else
3672 return do_fault(vmf);
3673}
3674
9c28a205
PX
3675/*
3676 * This is actually a page-missing access, but with uffd-wp special pte
3677 * installed. It means this pte was wr-protected before being unmapped.
3678 */
3679static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3680{
3681 /*
3682 * Just in case there're leftover special ptes even after the region
7a079ba2 3683 * got unregistered - we can simply clear them.
9c28a205 3684 */
2bad466c 3685 if (unlikely(!userfaultfd_wp(vmf->vma)))
9c28a205
PX
3686 return pte_marker_clear(vmf);
3687
2bad466c 3688 return do_pte_missing(vmf);
9c28a205
PX
3689}
3690
5c041f5d
PX
3691static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3692{
3693 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3694 unsigned long marker = pte_marker_get(entry);
3695
3696 /*
ca92ea3d
PX
3697 * PTE markers should never be empty. If anything weird happened,
3698 * the best thing to do is to kill the process along with its mm.
5c041f5d 3699 */
ca92ea3d 3700 if (WARN_ON_ONCE(!marker))
5c041f5d
PX
3701 return VM_FAULT_SIGBUS;
3702
15520a3f 3703 /* Higher priority than uffd-wp when data corrupted */
af19487f
AR
3704 if (marker & PTE_MARKER_POISONED)
3705 return VM_FAULT_HWPOISON;
15520a3f 3706
9c28a205
PX
3707 if (pte_marker_entry_uffd_wp(entry))
3708 return pte_marker_handle_uffd_wp(vmf);
3709
3710 /* This is an unknown pte marker */
3711 return VM_FAULT_SIGBUS;
5c041f5d
PX
3712}
3713
1da177e4 3714/*
c1e8d7c6 3715 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 3716 * but allow concurrent faults), and pte mapped but not yet locked.
9a95f3cf
PC
3717 * We return with pte unmapped and unlocked.
3718 *
c1e8d7c6 3719 * We return with the mmap_lock locked or unlocked in the same cases
9a95f3cf 3720 * as does filemap_fault().
1da177e4 3721 */
2b740303 3722vm_fault_t do_swap_page(struct vm_fault *vmf)
1da177e4 3723{
82b0f8c3 3724 struct vm_area_struct *vma = vmf->vma;
d4f9565a
MWO
3725 struct folio *swapcache, *folio = NULL;
3726 struct page *page;
2799e775 3727 struct swap_info_struct *si = NULL;
14f9135d 3728 rmap_t rmap_flags = RMAP_NONE;
1493a191 3729 bool exclusive = false;
65500d23 3730 swp_entry_t entry;
1da177e4 3731 pte_t pte;
2b740303 3732 vm_fault_t ret = 0;
aae466b0 3733 void *shadow = NULL;
1da177e4 3734
2ca99358 3735 if (!pte_unmap_same(vmf))
8f4e2101 3736 goto out;
65500d23 3737
2994302b 3738 entry = pte_to_swp_entry(vmf->orig_pte);
d1737fdb
AK
3739 if (unlikely(non_swap_entry(entry))) {
3740 if (is_migration_entry(entry)) {
82b0f8c3
JK
3741 migration_entry_wait(vma->vm_mm, vmf->pmd,
3742 vmf->address);
b756a3b5
AP
3743 } else if (is_device_exclusive_entry(entry)) {
3744 vmf->page = pfn_swap_entry_to_page(entry);
3745 ret = remove_device_exclusive_entry(vmf);
5042db43 3746 } else if (is_device_private_entry(entry)) {
1235ccd0
SB
3747 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3748 /*
3749 * migrate_to_ram is not yet ready to operate
3750 * under VMA lock.
3751 */
3752 vma_end_read(vma);
3753 ret = VM_FAULT_RETRY;
3754 goto out;
3755 }
3756
af5cdaf8 3757 vmf->page = pfn_swap_entry_to_page(entry);
16ce101d
AP
3758 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3759 vmf->address, &vmf->ptl);
3db82b93 3760 if (unlikely(!vmf->pte ||
c33c7948
RR
3761 !pte_same(ptep_get(vmf->pte),
3762 vmf->orig_pte)))
3b65f437 3763 goto unlock;
16ce101d
AP
3764
3765 /*
3766 * Get a page reference while we know the page can't be
3767 * freed.
3768 */
3769 get_page(vmf->page);
3770 pte_unmap_unlock(vmf->pte, vmf->ptl);
4a955bed 3771 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
16ce101d 3772 put_page(vmf->page);
d1737fdb
AK
3773 } else if (is_hwpoison_entry(entry)) {
3774 ret = VM_FAULT_HWPOISON;
5c041f5d
PX
3775 } else if (is_pte_marker_entry(entry)) {
3776 ret = handle_pte_marker(vmf);
d1737fdb 3777 } else {
2994302b 3778 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
d99be1a8 3779 ret = VM_FAULT_SIGBUS;
d1737fdb 3780 }
0697212a
CL
3781 goto out;
3782 }
0bcac06f 3783
2799e775
ML
3784 /* Prevent swapoff from happening to us. */
3785 si = get_swap_device(entry);
3786 if (unlikely(!si))
3787 goto out;
0bcac06f 3788
5a423081
MWO
3789 folio = swap_cache_get_folio(entry, vma, vmf->address);
3790 if (folio)
3791 page = folio_file_page(folio, swp_offset(entry));
d4f9565a 3792 swapcache = folio;
f8020772 3793
d4f9565a 3794 if (!folio) {
a449bf58
QC
3795 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3796 __swap_count(entry) == 1) {
0bcac06f 3797 /* skip swapcache */
63ad4add
MWO
3798 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
3799 vma, vmf->address, false);
3800 page = &folio->page;
3801 if (folio) {
3802 __folio_set_locked(folio);
3803 __folio_set_swapbacked(folio);
4c6355b2 3804
65995918 3805 if (mem_cgroup_swapin_charge_folio(folio,
63ad4add
MWO
3806 vma->vm_mm, GFP_KERNEL,
3807 entry)) {
545b1b07 3808 ret = VM_FAULT_OOM;
4c6355b2 3809 goto out_page;
545b1b07 3810 }
0add0c77 3811 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 3812
aae466b0
JK
3813 shadow = get_shadow_from_swap_cache(entry);
3814 if (shadow)
63ad4add 3815 workingset_refault(folio, shadow);
0076f029 3816
63ad4add 3817 folio_add_lru(folio);
0add0c77
SB
3818
3819 /* To provide entry to swap_readpage() */
3d2c9087 3820 folio->swap = entry;
5169b844 3821 swap_readpage(page, true, NULL);
63ad4add 3822 folio->private = NULL;
0bcac06f 3823 }
aa8d22a1 3824 } else {
e9e9b7ec
MK
3825 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3826 vmf);
63ad4add
MWO
3827 if (page)
3828 folio = page_folio(page);
d4f9565a 3829 swapcache = folio;
0bcac06f
MK
3830 }
3831
d4f9565a 3832 if (!folio) {
1da177e4 3833 /*
8f4e2101
HD
3834 * Back out if somebody else faulted in this pte
3835 * while we released the pte lock.
1da177e4 3836 */
82b0f8c3
JK
3837 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3838 vmf->address, &vmf->ptl);
c33c7948
RR
3839 if (likely(vmf->pte &&
3840 pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
1da177e4 3841 ret = VM_FAULT_OOM;
65500d23 3842 goto unlock;
1da177e4
LT
3843 }
3844
3845 /* Had to read the page from swap area: Major fault */
3846 ret = VM_FAULT_MAJOR;
f8891e5e 3847 count_vm_event(PGMAJFAULT);
2262185c 3848 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
d1737fdb 3849 } else if (PageHWPoison(page)) {
71f72525
WF
3850 /*
3851 * hwpoisoned dirty swapcache pages are kept for killing
3852 * owner processes (which may be unknown at hwpoison time)
3853 */
d1737fdb 3854 ret = VM_FAULT_HWPOISON;
4779cb31 3855 goto out_release;
1da177e4
LT
3856 }
3857
fdc724d6
SB
3858 ret |= folio_lock_or_retry(folio, vmf);
3859 if (ret & VM_FAULT_RETRY)
d065bd81 3860 goto out_release;
073e587e 3861
84d60fdd
DH
3862 if (swapcache) {
3863 /*
3b344157 3864 * Make sure folio_free_swap() or swapoff did not release the
84d60fdd
DH
3865 * swapcache from under us. The page pin, and pte_same test
3866 * below, are not enough to exclude that. Even if it is still
3867 * swapcache, we need to check that the page's swap has not
3868 * changed.
3869 */
63ad4add 3870 if (unlikely(!folio_test_swapcache(folio) ||
cfeed8ff 3871 page_swap_entry(page).val != entry.val))
84d60fdd
DH
3872 goto out_page;
3873
3874 /*
3875 * KSM sometimes has to copy on read faults, for example, if
3876 * page->index of !PageKSM() pages would be nonlinear inside the
3877 * anon VMA -- PageKSM() is lost on actual swapout.
3878 */
3879 page = ksm_might_need_to_copy(page, vma, vmf->address);
3880 if (unlikely(!page)) {
3881 ret = VM_FAULT_OOM;
84d60fdd 3882 goto out_page;
6b970599
KW
3883 } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
3884 ret = VM_FAULT_HWPOISON;
3885 goto out_page;
84d60fdd 3886 }
63ad4add 3887 folio = page_folio(page);
c145e0b4
DH
3888
3889 /*
3890 * If we want to map a page that's in the swapcache writable, we
3891 * have to detect via the refcount if we're really the exclusive
3892 * owner. Try removing the extra reference from the local LRU
1fec6890 3893 * caches if required.
c145e0b4 3894 */
d4f9565a 3895 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
63ad4add 3896 !folio_test_ksm(folio) && !folio_test_lru(folio))
c145e0b4 3897 lru_add_drain();
5ad64688
HD
3898 }
3899
4231f842 3900 folio_throttle_swaprate(folio, GFP_KERNEL);
8a9f3ccd 3901
1da177e4 3902 /*
8f4e2101 3903 * Back out if somebody else already faulted in this pte.
1da177e4 3904 */
82b0f8c3
JK
3905 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3906 &vmf->ptl);
c33c7948 3907 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
b8107480 3908 goto out_nomap;
b8107480 3909
63ad4add 3910 if (unlikely(!folio_test_uptodate(folio))) {
b8107480
KK
3911 ret = VM_FAULT_SIGBUS;
3912 goto out_nomap;
1da177e4
LT
3913 }
3914
78fbe906
DH
3915 /*
3916 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
3917 * must never point at an anonymous page in the swapcache that is
3918 * PG_anon_exclusive. Sanity check that this holds and especially, that
3919 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
3920 * check after taking the PT lock and making sure that nobody
3921 * concurrently faulted in this page and set PG_anon_exclusive.
3922 */
63ad4add
MWO
3923 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
3924 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
78fbe906 3925
1493a191
DH
3926 /*
3927 * Check under PT lock (to protect against concurrent fork() sharing
3928 * the swap entry concurrently) for certainly exclusive pages.
3929 */
63ad4add 3930 if (!folio_test_ksm(folio)) {
1493a191 3931 exclusive = pte_swp_exclusive(vmf->orig_pte);
d4f9565a 3932 if (folio != swapcache) {
1493a191
DH
3933 /*
3934 * We have a fresh page that is not exposed to the
3935 * swapcache -> certainly exclusive.
3936 */
3937 exclusive = true;
63ad4add 3938 } else if (exclusive && folio_test_writeback(folio) &&
eacde327 3939 data_race(si->flags & SWP_STABLE_WRITES)) {
1493a191
DH
3940 /*
3941 * This is tricky: not all swap backends support
3942 * concurrent page modifications while under writeback.
3943 *
3944 * So if we stumble over such a page in the swapcache
3945 * we must not set the page exclusive, otherwise we can
3946 * map it writable without further checks and modify it
3947 * while still under writeback.
3948 *
3949 * For these problematic swap backends, simply drop the
3950 * exclusive marker: this is perfectly fine as we start
3951 * writeback only if we fully unmapped the page and
3952 * there are no unexpected references on the page after
3953 * unmapping succeeded. After fully unmapped, no
3954 * further GUP references (FOLL_GET and FOLL_PIN) can
3955 * appear, so dropping the exclusive marker and mapping
3956 * it only R/O is fine.
3957 */
3958 exclusive = false;
3959 }
3960 }
3961
6dca4ac6
PC
3962 /*
3963 * Some architectures may have to restore extra metadata to the page
3964 * when reading from swap. This metadata may be indexed by swap entry
3965 * so this must be called before swap_free().
3966 */
3967 arch_swap_restore(entry, folio);
3968
8c7c6e34 3969 /*
c145e0b4
DH
3970 * Remove the swap entry and conditionally try to free up the swapcache.
3971 * We're already holding a reference on the page but haven't mapped it
3972 * yet.
8c7c6e34 3973 */
c145e0b4 3974 swap_free(entry);
a160e537
MWO
3975 if (should_try_to_free_swap(folio, vma, vmf->flags))
3976 folio_free_swap(folio);
1da177e4 3977
f1a79412
SB
3978 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
3979 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1da177e4 3980 pte = mk_pte(page, vma->vm_page_prot);
c145e0b4
DH
3981
3982 /*
1493a191
DH
3983 * Same logic as in do_wp_page(); however, optimize for pages that are
3984 * certainly not shared either because we just allocated them without
3985 * exposing them to the swapcache or because the swap entry indicates
3986 * exclusivity.
c145e0b4 3987 */
63ad4add
MWO
3988 if (!folio_test_ksm(folio) &&
3989 (exclusive || folio_ref_count(folio) == 1)) {
6c287605
DH
3990 if (vmf->flags & FAULT_FLAG_WRITE) {
3991 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3992 vmf->flags &= ~FAULT_FLAG_WRITE;
6c287605 3993 }
14f9135d 3994 rmap_flags |= RMAP_EXCLUSIVE;
1da177e4 3995 }
1da177e4 3996 flush_icache_page(vma, page);
2994302b 3997 if (pte_swp_soft_dirty(vmf->orig_pte))
179ef71c 3998 pte = pte_mksoft_dirty(pte);
f1eb1bac 3999 if (pte_swp_uffd_wp(vmf->orig_pte))
f45ec5ff 4000 pte = pte_mkuffd_wp(pte);
2994302b 4001 vmf->orig_pte = pte;
0bcac06f
MK
4002
4003 /* ksm created a completely new copy */
d4f9565a 4004 if (unlikely(folio != swapcache && swapcache)) {
40f2bbf7 4005 page_add_new_anon_rmap(page, vma, vmf->address);
63ad4add 4006 folio_add_lru_vma(folio, vma);
0bcac06f 4007 } else {
f1e2db12 4008 page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
00501b53 4009 }
1da177e4 4010
63ad4add
MWO
4011 VM_BUG_ON(!folio_test_anon(folio) ||
4012 (pte_write(pte) && !PageAnonExclusive(page)));
1eba86c0
PT
4013 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4014 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
4015
63ad4add 4016 folio_unlock(folio);
d4f9565a 4017 if (folio != swapcache && swapcache) {
4969c119
AA
4018 /*
4019 * Hold the lock to avoid the swap entry to be reused
4020 * until we take the PT lock for the pte_same() check
4021 * (to avoid false positives from pte_same). For
4022 * further safety release the lock after the swap_free
4023 * so that the swap count won't change under a
4024 * parallel locked swapcache.
4025 */
d4f9565a
MWO
4026 folio_unlock(swapcache);
4027 folio_put(swapcache);
4969c119 4028 }
c475a8ab 4029
82b0f8c3 4030 if (vmf->flags & FAULT_FLAG_WRITE) {
2994302b 4031 ret |= do_wp_page(vmf);
61469f1d
HD
4032 if (ret & VM_FAULT_ERROR)
4033 ret &= VM_FAULT_ERROR;
1da177e4
LT
4034 goto out;
4035 }
4036
4037 /* No need to invalidate - it was non-present before */
5003a2bd 4038 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
65500d23 4039unlock:
3db82b93
HD
4040 if (vmf->pte)
4041 pte_unmap_unlock(vmf->pte, vmf->ptl);
1da177e4 4042out:
2799e775
ML
4043 if (si)
4044 put_swap_device(si);
1da177e4 4045 return ret;
b8107480 4046out_nomap:
3db82b93
HD
4047 if (vmf->pte)
4048 pte_unmap_unlock(vmf->pte, vmf->ptl);
bc43f75c 4049out_page:
63ad4add 4050 folio_unlock(folio);
4779cb31 4051out_release:
63ad4add 4052 folio_put(folio);
d4f9565a
MWO
4053 if (folio != swapcache && swapcache) {
4054 folio_unlock(swapcache);
4055 folio_put(swapcache);
4969c119 4056 }
2799e775
ML
4057 if (si)
4058 put_swap_device(si);
65500d23 4059 return ret;
1da177e4
LT
4060}
4061
4062/*
c1e8d7c6 4063 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 4064 * but allow concurrent faults), and pte mapped but not yet locked.
c1e8d7c6 4065 * We return with mmap_lock still held, but pte unmapped and unlocked.
1da177e4 4066 */
2b740303 4067static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
1da177e4 4068{
2bad466c 4069 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
82b0f8c3 4070 struct vm_area_struct *vma = vmf->vma;
6bc56a4d 4071 struct folio *folio;
2b740303 4072 vm_fault_t ret = 0;
1da177e4 4073 pte_t entry;
1da177e4 4074
6b7339f4
KS
4075 /* File mapping without ->vm_ops ? */
4076 if (vma->vm_flags & VM_SHARED)
4077 return VM_FAULT_SIGBUS;
4078
7267ec00 4079 /*
3db82b93
HD
4080 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4081 * be distinguished from a transient failure of pte_offset_map().
7267ec00 4082 */
4cf58924 4083 if (pte_alloc(vma->vm_mm, vmf->pmd))
7267ec00
KS
4084 return VM_FAULT_OOM;
4085
11ac5524 4086 /* Use the zero-page for reads */
82b0f8c3 4087 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 4088 !mm_forbids_zeropage(vma->vm_mm)) {
82b0f8c3 4089 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
62eede62 4090 vma->vm_page_prot));
82b0f8c3
JK
4091 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4092 vmf->address, &vmf->ptl);
3db82b93
HD
4093 if (!vmf->pte)
4094 goto unlock;
2bad466c 4095 if (vmf_pte_changed(vmf)) {
7df67697 4096 update_mmu_tlb(vma, vmf->address, vmf->pte);
a13ea5b7 4097 goto unlock;
7df67697 4098 }
6b31d595
MH
4099 ret = check_stable_address_space(vma->vm_mm);
4100 if (ret)
4101 goto unlock;
6b251fc9
AA
4102 /* Deliver the page fault to userland, check inside PT lock */
4103 if (userfaultfd_missing(vma)) {
82b0f8c3
JK
4104 pte_unmap_unlock(vmf->pte, vmf->ptl);
4105 return handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9 4106 }
a13ea5b7
HD
4107 goto setpte;
4108 }
4109
557ed1fa 4110 /* Allocate our own private page. */
557ed1fa
NP
4111 if (unlikely(anon_vma_prepare(vma)))
4112 goto oom;
6bc56a4d
MWO
4113 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4114 if (!folio)
557ed1fa 4115 goto oom;
eb3c24f3 4116
6bc56a4d 4117 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
eb3c24f3 4118 goto oom_free_page;
e2bf3e2c 4119 folio_throttle_swaprate(folio, GFP_KERNEL);
eb3c24f3 4120
52f37629 4121 /*
cb3184de 4122 * The memory barrier inside __folio_mark_uptodate makes sure that
f4f5329d 4123 * preceding stores to the page contents become visible before
52f37629
MK
4124 * the set_pte_at() write.
4125 */
cb3184de 4126 __folio_mark_uptodate(folio);
8f4e2101 4127
cb3184de 4128 entry = mk_pte(&folio->page, vma->vm_page_prot);
50c25ee9 4129 entry = pte_sw_mkyoung(entry);
1ac0cb5d 4130 if (vma->vm_flags & VM_WRITE)
161e393c 4131 entry = pte_mkwrite(pte_mkdirty(entry), vma);
1da177e4 4132
82b0f8c3
JK
4133 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4134 &vmf->ptl);
3db82b93
HD
4135 if (!vmf->pte)
4136 goto release;
2bad466c 4137 if (vmf_pte_changed(vmf)) {
bce8cb3c 4138 update_mmu_tlb(vma, vmf->address, vmf->pte);
557ed1fa 4139 goto release;
7df67697 4140 }
9ba69294 4141
6b31d595
MH
4142 ret = check_stable_address_space(vma->vm_mm);
4143 if (ret)
4144 goto release;
4145
6b251fc9
AA
4146 /* Deliver the page fault to userland, check inside PT lock */
4147 if (userfaultfd_missing(vma)) {
82b0f8c3 4148 pte_unmap_unlock(vmf->pte, vmf->ptl);
cb3184de 4149 folio_put(folio);
82b0f8c3 4150 return handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
4151 }
4152
f1a79412 4153 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
cb3184de
MWO
4154 folio_add_new_anon_rmap(folio, vma, vmf->address);
4155 folio_add_lru_vma(folio, vma);
a13ea5b7 4156setpte:
2bad466c
PX
4157 if (uffd_wp)
4158 entry = pte_mkuffd_wp(entry);
82b0f8c3 4159 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
1da177e4
LT
4160
4161 /* No need to invalidate - it was non-present before */
5003a2bd 4162 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
65500d23 4163unlock:
3db82b93
HD
4164 if (vmf->pte)
4165 pte_unmap_unlock(vmf->pte, vmf->ptl);
6b31d595 4166 return ret;
8f4e2101 4167release:
cb3184de 4168 folio_put(folio);
8f4e2101 4169 goto unlock;
8a9f3ccd 4170oom_free_page:
cb3184de 4171 folio_put(folio);
65500d23 4172oom:
1da177e4
LT
4173 return VM_FAULT_OOM;
4174}
4175
9a95f3cf 4176/*
c1e8d7c6 4177 * The mmap_lock must have been held on entry, and may have been
9a95f3cf
PC
4178 * released depending on flags and vma->vm_ops->fault() return value.
4179 * See filemap_fault() and __lock_page_retry().
4180 */
2b740303 4181static vm_fault_t __do_fault(struct vm_fault *vmf)
7eae74af 4182{
82b0f8c3 4183 struct vm_area_struct *vma = vmf->vma;
2b740303 4184 vm_fault_t ret;
7eae74af 4185
63f3655f
MH
4186 /*
4187 * Preallocate pte before we take page_lock because this might lead to
4188 * deadlocks for memcg reclaim which waits for pages under writeback:
4189 * lock_page(A)
4190 * SetPageWriteback(A)
4191 * unlock_page(A)
4192 * lock_page(B)
4193 * lock_page(B)
d383807a 4194 * pte_alloc_one
63f3655f
MH
4195 * shrink_page_list
4196 * wait_on_page_writeback(A)
4197 * SetPageWriteback(B)
4198 * unlock_page(B)
4199 * # flush A, B to clear the writeback
4200 */
4201 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
a7069ee3 4202 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
63f3655f
MH
4203 if (!vmf->prealloc_pte)
4204 return VM_FAULT_OOM;
63f3655f
MH
4205 }
4206
11bac800 4207 ret = vma->vm_ops->fault(vmf);
3917048d 4208 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
b1aa812b 4209 VM_FAULT_DONE_COW)))
bc2466e4 4210 return ret;
7eae74af 4211
667240e0 4212 if (unlikely(PageHWPoison(vmf->page))) {
3149c79f 4213 struct page *page = vmf->page;
e53ac737
RR
4214 vm_fault_t poisonret = VM_FAULT_HWPOISON;
4215 if (ret & VM_FAULT_LOCKED) {
3149c79f
RR
4216 if (page_mapped(page))
4217 unmap_mapping_pages(page_mapping(page),
4218 page->index, 1, false);
e53ac737 4219 /* Retry if a clean page was removed from the cache. */
3149c79f
RR
4220 if (invalidate_inode_page(page))
4221 poisonret = VM_FAULT_NOPAGE;
4222 unlock_page(page);
e53ac737 4223 }
3149c79f 4224 put_page(page);
936ca80d 4225 vmf->page = NULL;
e53ac737 4226 return poisonret;
7eae74af
KS
4227 }
4228
4229 if (unlikely(!(ret & VM_FAULT_LOCKED)))
667240e0 4230 lock_page(vmf->page);
7eae74af 4231 else
667240e0 4232 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
7eae74af 4233
7eae74af
KS
4234 return ret;
4235}
4236
396bcc52 4237#ifdef CONFIG_TRANSPARENT_HUGEPAGE
82b0f8c3 4238static void deposit_prealloc_pte(struct vm_fault *vmf)
953c66c2 4239{
82b0f8c3 4240 struct vm_area_struct *vma = vmf->vma;
953c66c2 4241
82b0f8c3 4242 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
953c66c2
AK
4243 /*
4244 * We are going to consume the prealloc table,
4245 * count that as nr_ptes.
4246 */
c4812909 4247 mm_inc_nr_ptes(vma->vm_mm);
7f2b6ce8 4248 vmf->prealloc_pte = NULL;
953c66c2
AK
4249}
4250
f9ce0be7 4251vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
10102459 4252{
82b0f8c3
JK
4253 struct vm_area_struct *vma = vmf->vma;
4254 bool write = vmf->flags & FAULT_FLAG_WRITE;
4255 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
10102459 4256 pmd_t entry;
d01ac3c3 4257 vm_fault_t ret = VM_FAULT_FALLBACK;
10102459
KS
4258
4259 if (!transhuge_vma_suitable(vma, haddr))
d01ac3c3 4260 return ret;
10102459 4261
10102459 4262 page = compound_head(page);
d01ac3c3
MWO
4263 if (compound_order(page) != HPAGE_PMD_ORDER)
4264 return ret;
10102459 4265
eac96c3e
YS
4266 /*
4267 * Just backoff if any subpage of a THP is corrupted otherwise
4268 * the corrupted page may mapped by PMD silently to escape the
4269 * check. This kind of THP just can be PTE mapped. Access to
4270 * the corrupted subpage should trigger SIGBUS as expected.
4271 */
4272 if (unlikely(PageHasHWPoisoned(page)))
4273 return ret;
4274
953c66c2 4275 /*
f0953a1b 4276 * Archs like ppc64 need additional space to store information
953c66c2
AK
4277 * related to pte entry. Use the preallocated table for that.
4278 */
82b0f8c3 4279 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4cf58924 4280 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
82b0f8c3 4281 if (!vmf->prealloc_pte)
953c66c2 4282 return VM_FAULT_OOM;
953c66c2
AK
4283 }
4284
82b0f8c3
JK
4285 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4286 if (unlikely(!pmd_none(*vmf->pmd)))
10102459
KS
4287 goto out;
4288
9f1f5b60 4289 flush_icache_pages(vma, page, HPAGE_PMD_NR);
10102459
KS
4290
4291 entry = mk_huge_pmd(page, vma->vm_page_prot);
4292 if (write)
f55e1014 4293 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
10102459 4294
fadae295 4295 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
cea86fe2
HD
4296 page_add_file_rmap(page, vma, true);
4297
953c66c2
AK
4298 /*
4299 * deposit and withdraw with pmd lock held
4300 */
4301 if (arch_needs_pgtable_deposit())
82b0f8c3 4302 deposit_prealloc_pte(vmf);
10102459 4303
82b0f8c3 4304 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
10102459 4305
82b0f8c3 4306 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
10102459
KS
4307
4308 /* fault is handled */
4309 ret = 0;
95ecedcd 4310 count_vm_event(THP_FILE_MAPPED);
10102459 4311out:
82b0f8c3 4312 spin_unlock(vmf->ptl);
10102459
KS
4313 return ret;
4314}
4315#else
f9ce0be7 4316vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
10102459 4317{
f9ce0be7 4318 return VM_FAULT_FALLBACK;
10102459
KS
4319}
4320#endif
4321
3bd786f7
YF
4322/**
4323 * set_pte_range - Set a range of PTEs to point to pages in a folio.
4324 * @vmf: Fault decription.
4325 * @folio: The folio that contains @page.
4326 * @page: The first page to create a PTE for.
4327 * @nr: The number of PTEs to create.
4328 * @addr: The first address to create a PTE for.
4329 */
4330void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4331 struct page *page, unsigned int nr, unsigned long addr)
3bb97794 4332{
82b0f8c3 4333 struct vm_area_struct *vma = vmf->vma;
2bad466c 4334 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
82b0f8c3 4335 bool write = vmf->flags & FAULT_FLAG_WRITE;
3bd786f7 4336 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
3bb97794 4337 pte_t entry;
7267ec00 4338
3bd786f7 4339 flush_icache_pages(vma, page, nr);
3bb97794 4340 entry = mk_pte(page, vma->vm_page_prot);
46bdb427
WD
4341
4342 if (prefault && arch_wants_old_prefaulted_pte())
4343 entry = pte_mkold(entry);
50c25ee9
TB
4344 else
4345 entry = pte_sw_mkyoung(entry);
46bdb427 4346
3bb97794
KS
4347 if (write)
4348 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
9c28a205 4349 if (unlikely(uffd_wp))
f1eb1bac 4350 entry = pte_mkuffd_wp(entry);
bae473a4
KS
4351 /* copy-on-write page */
4352 if (write && !(vma->vm_flags & VM_SHARED)) {
3bd786f7
YF
4353 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
4354 VM_BUG_ON_FOLIO(nr != 1, folio);
4355 folio_add_new_anon_rmap(folio, vma, addr);
4356 folio_add_lru_vma(folio, vma);
3bb97794 4357 } else {
3bd786f7
YF
4358 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
4359 folio_add_file_rmap_range(folio, page, nr, vma, false);
3bb97794 4360 }
3bd786f7
YF
4361 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4362
4363 /* no need to invalidate: a not-present page won't be cached */
4364 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
3bb97794
KS
4365}
4366
f46f2ade
PX
4367static bool vmf_pte_changed(struct vm_fault *vmf)
4368{
4369 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
c33c7948 4370 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
f46f2ade 4371
c33c7948 4372 return !pte_none(ptep_get(vmf->pte));
f46f2ade
PX
4373}
4374
9118c0cb
JK
4375/**
4376 * finish_fault - finish page fault once we have prepared the page to fault
4377 *
4378 * @vmf: structure describing the fault
4379 *
4380 * This function handles all that is needed to finish a page fault once the
4381 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4382 * given page, adds reverse page mapping, handles memcg charges and LRU
a862f68a 4383 * addition.
9118c0cb
JK
4384 *
4385 * The function expects the page to be locked and on success it consumes a
4386 * reference of a page being mapped (for the PTE which maps it).
a862f68a
MR
4387 *
4388 * Return: %0 on success, %VM_FAULT_ code in case of error.
9118c0cb 4389 */
2b740303 4390vm_fault_t finish_fault(struct vm_fault *vmf)
9118c0cb 4391{
f9ce0be7 4392 struct vm_area_struct *vma = vmf->vma;
9118c0cb 4393 struct page *page;
f9ce0be7 4394 vm_fault_t ret;
9118c0cb
JK
4395
4396 /* Did we COW the page? */
f9ce0be7 4397 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
9118c0cb
JK
4398 page = vmf->cow_page;
4399 else
4400 page = vmf->page;
6b31d595
MH
4401
4402 /*
4403 * check even for read faults because we might have lost our CoWed
4404 * page
4405 */
f9ce0be7
KS
4406 if (!(vma->vm_flags & VM_SHARED)) {
4407 ret = check_stable_address_space(vma->vm_mm);
4408 if (ret)
4409 return ret;
4410 }
4411
4412 if (pmd_none(*vmf->pmd)) {
4413 if (PageTransCompound(page)) {
4414 ret = do_set_pmd(vmf, page);
4415 if (ret != VM_FAULT_FALLBACK)
4416 return ret;
4417 }
4418
03c4f204
QZ
4419 if (vmf->prealloc_pte)
4420 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4421 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
f9ce0be7
KS
4422 return VM_FAULT_OOM;
4423 }
4424
f9ce0be7
KS
4425 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4426 vmf->address, &vmf->ptl);
3db82b93
HD
4427 if (!vmf->pte)
4428 return VM_FAULT_NOPAGE;
70427f6e 4429
f9ce0be7 4430 /* Re-check under ptl */
70427f6e 4431 if (likely(!vmf_pte_changed(vmf))) {
3bd786f7 4432 struct folio *folio = page_folio(page);
70427f6e 4433
3bd786f7 4434 set_pte_range(vmf, folio, page, 1, vmf->address);
70427f6e
SA
4435 ret = 0;
4436 } else {
4437 update_mmu_tlb(vma, vmf->address, vmf->pte);
f9ce0be7 4438 ret = VM_FAULT_NOPAGE;
70427f6e 4439 }
f9ce0be7 4440
f9ce0be7 4441 pte_unmap_unlock(vmf->pte, vmf->ptl);
9118c0cb
JK
4442 return ret;
4443}
4444
53d36a56
LS
4445static unsigned long fault_around_pages __read_mostly =
4446 65536 >> PAGE_SHIFT;
a9b0f861 4447
a9b0f861
KS
4448#ifdef CONFIG_DEBUG_FS
4449static int fault_around_bytes_get(void *data, u64 *val)
1592eef0 4450{
53d36a56 4451 *val = fault_around_pages << PAGE_SHIFT;
1592eef0
KS
4452 return 0;
4453}
4454
b4903d6e 4455/*
da391d64
WK
4456 * fault_around_bytes must be rounded down to the nearest page order as it's
4457 * what do_fault_around() expects to see.
b4903d6e 4458 */
a9b0f861 4459static int fault_around_bytes_set(void *data, u64 val)
1592eef0 4460{
a9b0f861 4461 if (val / PAGE_SIZE > PTRS_PER_PTE)
1592eef0 4462 return -EINVAL;
53d36a56
LS
4463
4464 /*
4465 * The minimum value is 1 page, however this results in no fault-around
4466 * at all. See should_fault_around().
4467 */
4468 fault_around_pages = max(rounddown_pow_of_two(val) >> PAGE_SHIFT, 1UL);
4469
1592eef0
KS
4470 return 0;
4471}
0a1345f8 4472DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
a9b0f861 4473 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
1592eef0
KS
4474
4475static int __init fault_around_debugfs(void)
4476{
d9f7979c
GKH
4477 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4478 &fault_around_bytes_fops);
1592eef0
KS
4479 return 0;
4480}
4481late_initcall(fault_around_debugfs);
1592eef0 4482#endif
8c6e50b0 4483
1fdb412b
KS
4484/*
4485 * do_fault_around() tries to map few pages around the fault address. The hope
4486 * is that the pages will be needed soon and this will lower the number of
4487 * faults to handle.
4488 *
4489 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4490 * not ready to be mapped: not up-to-date, locked, etc.
4491 *
9042599e
LS
4492 * This function doesn't cross VMA or page table boundaries, in order to call
4493 * map_pages() and acquire a PTE lock only once.
1fdb412b 4494 *
53d36a56 4495 * fault_around_pages defines how many pages we'll try to map.
da391d64
WK
4496 * do_fault_around() expects it to be set to a power of two less than or equal
4497 * to PTRS_PER_PTE.
1fdb412b 4498 *
da391d64 4499 * The virtual address of the area that we map is naturally aligned to
53d36a56 4500 * fault_around_pages * PAGE_SIZE rounded down to the machine page size
da391d64
WK
4501 * (and therefore to page order). This way it's easier to guarantee
4502 * that we don't cross page table boundaries.
1fdb412b 4503 */
2b740303 4504static vm_fault_t do_fault_around(struct vm_fault *vmf)
8c6e50b0 4505{
53d36a56 4506 pgoff_t nr_pages = READ_ONCE(fault_around_pages);
9042599e
LS
4507 pgoff_t pte_off = pte_index(vmf->address);
4508 /* The page offset of vmf->address within the VMA. */
4509 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4510 pgoff_t from_pte, to_pte;
58ef47ef 4511 vm_fault_t ret;
8c6e50b0 4512
9042599e
LS
4513 /* The PTE offset of the start address, clamped to the VMA. */
4514 from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
4515 pte_off - min(pte_off, vma_off));
aecd6f44 4516
9042599e
LS
4517 /* The PTE offset of the end address, clamped to the VMA and PTE. */
4518 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
4519 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
8c6e50b0 4520
82b0f8c3 4521 if (pmd_none(*vmf->pmd)) {
4cf58924 4522 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
82b0f8c3 4523 if (!vmf->prealloc_pte)
f9ce0be7 4524 return VM_FAULT_OOM;
8c6e50b0
KS
4525 }
4526
58ef47ef
MWO
4527 rcu_read_lock();
4528 ret = vmf->vma->vm_ops->map_pages(vmf,
4529 vmf->pgoff + from_pte - pte_off,
4530 vmf->pgoff + to_pte - pte_off);
4531 rcu_read_unlock();
4532
4533 return ret;
8c6e50b0
KS
4534}
4535
9c28a205
PX
4536/* Return true if we should do read fault-around, false otherwise */
4537static inline bool should_fault_around(struct vm_fault *vmf)
4538{
4539 /* No ->map_pages? No way to fault around... */
4540 if (!vmf->vma->vm_ops->map_pages)
4541 return false;
4542
4543 if (uffd_disable_fault_around(vmf->vma))
4544 return false;
4545
53d36a56
LS
4546 /* A single page implies no faulting 'around' at all. */
4547 return fault_around_pages > 1;
9c28a205
PX
4548}
4549
2b740303 4550static vm_fault_t do_read_fault(struct vm_fault *vmf)
e655fb29 4551{
2b740303 4552 vm_fault_t ret = 0;
22d1e68f 4553 struct folio *folio;
8c6e50b0
KS
4554
4555 /*
4556 * Let's call ->map_pages() first and use ->fault() as fallback
4557 * if page by the offset is not ready to be mapped (cold cache or
4558 * something).
4559 */
9c28a205
PX
4560 if (should_fault_around(vmf)) {
4561 ret = do_fault_around(vmf);
4562 if (ret)
4563 return ret;
8c6e50b0 4564 }
e655fb29 4565
f5617ffe
MWO
4566 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4567 vma_end_read(vmf->vma);
4568 return VM_FAULT_RETRY;
4569 }
4570
936ca80d 4571 ret = __do_fault(vmf);
e655fb29
KS
4572 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4573 return ret;
4574
9118c0cb 4575 ret |= finish_fault(vmf);
22d1e68f
SK
4576 folio = page_folio(vmf->page);
4577 folio_unlock(folio);
7267ec00 4578 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
22d1e68f 4579 folio_put(folio);
e655fb29
KS
4580 return ret;
4581}
4582
2b740303 4583static vm_fault_t do_cow_fault(struct vm_fault *vmf)
ec47c3b9 4584{
82b0f8c3 4585 struct vm_area_struct *vma = vmf->vma;
2b740303 4586 vm_fault_t ret;
ec47c3b9 4587
61a4b8d3
MWO
4588 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4589 vma_end_read(vma);
4590 return VM_FAULT_RETRY;
4591 }
4592
ec47c3b9
KS
4593 if (unlikely(anon_vma_prepare(vma)))
4594 return VM_FAULT_OOM;
4595
936ca80d
JK
4596 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4597 if (!vmf->cow_page)
ec47c3b9
KS
4598 return VM_FAULT_OOM;
4599
8f425e4e
MWO
4600 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4601 GFP_KERNEL)) {
936ca80d 4602 put_page(vmf->cow_page);
ec47c3b9
KS
4603 return VM_FAULT_OOM;
4604 }
68fa572b 4605 folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
ec47c3b9 4606
936ca80d 4607 ret = __do_fault(vmf);
ec47c3b9
KS
4608 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4609 goto uncharge_out;
3917048d
JK
4610 if (ret & VM_FAULT_DONE_COW)
4611 return ret;
ec47c3b9 4612
b1aa812b 4613 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
936ca80d 4614 __SetPageUptodate(vmf->cow_page);
ec47c3b9 4615
9118c0cb 4616 ret |= finish_fault(vmf);
b1aa812b
JK
4617 unlock_page(vmf->page);
4618 put_page(vmf->page);
7267ec00
KS
4619 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4620 goto uncharge_out;
ec47c3b9
KS
4621 return ret;
4622uncharge_out:
936ca80d 4623 put_page(vmf->cow_page);
ec47c3b9
KS
4624 return ret;
4625}
4626
2b740303 4627static vm_fault_t do_shared_fault(struct vm_fault *vmf)
1da177e4 4628{
82b0f8c3 4629 struct vm_area_struct *vma = vmf->vma;
2b740303 4630 vm_fault_t ret, tmp;
6f609b7e 4631 struct folio *folio;
1d65f86d 4632
61a4b8d3
MWO
4633 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4634 vma_end_read(vma);
4635 return VM_FAULT_RETRY;
4636 }
1d65f86d 4637
936ca80d 4638 ret = __do_fault(vmf);
7eae74af 4639 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
f0c6d4d2 4640 return ret;
1da177e4 4641
6f609b7e
SK
4642 folio = page_folio(vmf->page);
4643
1da177e4 4644 /*
f0c6d4d2
KS
4645 * Check if the backing address space wants to know that the page is
4646 * about to become writable
1da177e4 4647 */
fb09a464 4648 if (vma->vm_ops->page_mkwrite) {
6f609b7e 4649 folio_unlock(folio);
86aa6998 4650 tmp = do_page_mkwrite(vmf, folio);
fb09a464
KS
4651 if (unlikely(!tmp ||
4652 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
6f609b7e 4653 folio_put(folio);
fb09a464 4654 return tmp;
4294621f 4655 }
fb09a464
KS
4656 }
4657
9118c0cb 4658 ret |= finish_fault(vmf);
7267ec00
KS
4659 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4660 VM_FAULT_RETRY))) {
6f609b7e
SK
4661 folio_unlock(folio);
4662 folio_put(folio);
f0c6d4d2 4663 return ret;
1da177e4 4664 }
b827e496 4665
89b15332 4666 ret |= fault_dirty_shared_page(vmf);
1d65f86d 4667 return ret;
54cb8821 4668}
d00806b1 4669
9a95f3cf 4670/*
c1e8d7c6 4671 * We enter with non-exclusive mmap_lock (to exclude vma changes,
9a95f3cf 4672 * but allow concurrent faults).
c1e8d7c6 4673 * The mmap_lock may have been released depending on flags and our
9138e47e 4674 * return value. See filemap_fault() and __folio_lock_or_retry().
c1e8d7c6 4675 * If mmap_lock is released, vma may become invalid (for example
fc8efd2d 4676 * by other thread calling munmap()).
9a95f3cf 4677 */
2b740303 4678static vm_fault_t do_fault(struct vm_fault *vmf)
54cb8821 4679{
82b0f8c3 4680 struct vm_area_struct *vma = vmf->vma;
fc8efd2d 4681 struct mm_struct *vm_mm = vma->vm_mm;
2b740303 4682 vm_fault_t ret;
54cb8821 4683
ff09d7ec
AK
4684 /*
4685 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4686 */
4687 if (!vma->vm_ops->fault) {
3db82b93
HD
4688 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4689 vmf->address, &vmf->ptl);
4690 if (unlikely(!vmf->pte))
ff09d7ec
AK
4691 ret = VM_FAULT_SIGBUS;
4692 else {
ff09d7ec
AK
4693 /*
4694 * Make sure this is not a temporary clearing of pte
4695 * by holding ptl and checking again. A R/M/W update
4696 * of pte involves: take ptl, clearing the pte so that
4697 * we don't have concurrent modification by hardware
4698 * followed by an update.
4699 */
c33c7948 4700 if (unlikely(pte_none(ptep_get(vmf->pte))))
ff09d7ec
AK
4701 ret = VM_FAULT_SIGBUS;
4702 else
4703 ret = VM_FAULT_NOPAGE;
4704
4705 pte_unmap_unlock(vmf->pte, vmf->ptl);
4706 }
4707 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
b0b9b3df
HD
4708 ret = do_read_fault(vmf);
4709 else if (!(vma->vm_flags & VM_SHARED))
4710 ret = do_cow_fault(vmf);
4711 else
4712 ret = do_shared_fault(vmf);
4713
4714 /* preallocated pagetable is unused: free it */
4715 if (vmf->prealloc_pte) {
fc8efd2d 4716 pte_free(vm_mm, vmf->prealloc_pte);
7f2b6ce8 4717 vmf->prealloc_pte = NULL;
b0b9b3df
HD
4718 }
4719 return ret;
54cb8821
NP
4720}
4721
f4c0d836
YS
4722int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4723 unsigned long addr, int page_nid, int *flags)
9532fec1
MG
4724{
4725 get_page(page);
4726
fc137c0d
R
4727 /* Record the current PID acceesing VMA */
4728 vma_set_access_pid_bit(vma);
4729
9532fec1 4730 count_vm_numa_event(NUMA_HINT_FAULTS);
04bb2f94 4731 if (page_nid == numa_node_id()) {
9532fec1 4732 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
04bb2f94
RR
4733 *flags |= TNF_FAULT_LOCAL;
4734 }
9532fec1
MG
4735
4736 return mpol_misplaced(page, vma, addr);
4737}
4738
2b740303 4739static vm_fault_t do_numa_page(struct vm_fault *vmf)
d10e63f2 4740{
82b0f8c3 4741 struct vm_area_struct *vma = vmf->vma;
4daae3b4 4742 struct page *page = NULL;
98fa15f3 4743 int page_nid = NUMA_NO_NODE;
6a56ccbc 4744 bool writable = false;
90572890 4745 int last_cpupid;
cbee9f88 4746 int target_nid;
04a86453 4747 pte_t pte, old_pte;
6688cc05 4748 int flags = 0;
d10e63f2
MG
4749
4750 /*
166f61b9
TH
4751 * The "pte" at this point cannot be used safely without
4752 * validation through pte_unmap_same(). It's of NUMA type but
4753 * the pfn may be screwed if the read is non atomic.
166f61b9 4754 */
82b0f8c3 4755 spin_lock(vmf->ptl);
c33c7948 4756 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
82b0f8c3 4757 pte_unmap_unlock(vmf->pte, vmf->ptl);
4daae3b4
MG
4758 goto out;
4759 }
4760
b99a342d
YH
4761 /* Get the normal PTE */
4762 old_pte = ptep_get(vmf->pte);
04a86453 4763 pte = pte_modify(old_pte, vma->vm_page_prot);
d10e63f2 4764
6a56ccbc
DH
4765 /*
4766 * Detect now whether the PTE could be writable; this information
4767 * is only valid while holding the PT lock.
4768 */
4769 writable = pte_write(pte);
4770 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
4771 can_change_pte_writable(vma, vmf->address, pte))
4772 writable = true;
4773
82b0f8c3 4774 page = vm_normal_page(vma, vmf->address, pte);
3218f871 4775 if (!page || is_zone_device_page(page))
b99a342d 4776 goto out_map;
d10e63f2 4777
e81c4802 4778 /* TODO: handle PTE-mapped THP */
b99a342d
YH
4779 if (PageCompound(page))
4780 goto out_map;
e81c4802 4781
6688cc05 4782 /*
bea66fbd
MG
4783 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4784 * much anyway since they can be in shared cache state. This misses
4785 * the case where a mapping is writable but the process never writes
4786 * to it but pte_write gets cleared during protection updates and
4787 * pte_dirty has unpredictable behaviour between PTE scan updates,
4788 * background writeback, dirty balancing and application behaviour.
6688cc05 4789 */
6a56ccbc 4790 if (!writable)
6688cc05
PZ
4791 flags |= TNF_NO_GROUP;
4792
dabe1d99
RR
4793 /*
4794 * Flag if the page is shared between multiple address spaces. This
4795 * is later used when determining whether to group tasks together
4796 */
4797 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4798 flags |= TNF_SHARED;
4799
8191acbd 4800 page_nid = page_to_nid(page);
33024536
YH
4801 /*
4802 * For memory tiering mode, cpupid of slow memory page is used
4803 * to record page access time. So use default value.
4804 */
4805 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4806 !node_is_toptier(page_nid))
4807 last_cpupid = (-1 & LAST_CPUPID_MASK);
4808 else
4809 last_cpupid = page_cpupid_last(page);
82b0f8c3 4810 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
bae473a4 4811 &flags);
98fa15f3 4812 if (target_nid == NUMA_NO_NODE) {
4daae3b4 4813 put_page(page);
b99a342d 4814 goto out_map;
4daae3b4 4815 }
b99a342d 4816 pte_unmap_unlock(vmf->pte, vmf->ptl);
6a56ccbc 4817 writable = false;
4daae3b4
MG
4818
4819 /* Migrate to the requested node */
bf90ac19 4820 if (migrate_misplaced_page(page, vma, target_nid)) {
8191acbd 4821 page_nid = target_nid;
6688cc05 4822 flags |= TNF_MIGRATED;
b99a342d 4823 } else {
074c2381 4824 flags |= TNF_MIGRATE_FAIL;
c7ad0880
HD
4825 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4826 vmf->address, &vmf->ptl);
4827 if (unlikely(!vmf->pte))
4828 goto out;
c33c7948 4829 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
b99a342d
YH
4830 pte_unmap_unlock(vmf->pte, vmf->ptl);
4831 goto out;
4832 }
4833 goto out_map;
4834 }
4daae3b4
MG
4835
4836out:
98fa15f3 4837 if (page_nid != NUMA_NO_NODE)
6688cc05 4838 task_numa_fault(last_cpupid, page_nid, 1, flags);
d10e63f2 4839 return 0;
b99a342d
YH
4840out_map:
4841 /*
4842 * Make it present again, depending on how arch implements
4843 * non-accessible ptes, some can allow access by kernel mode.
4844 */
4845 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4846 pte = pte_modify(old_pte, vma->vm_page_prot);
4847 pte = pte_mkyoung(pte);
6a56ccbc 4848 if (writable)
161e393c 4849 pte = pte_mkwrite(pte, vma);
b99a342d 4850 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
5003a2bd 4851 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
b99a342d
YH
4852 pte_unmap_unlock(vmf->pte, vmf->ptl);
4853 goto out;
d10e63f2
MG
4854}
4855
2b740303 4856static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
b96375f7 4857{
8f5fd0e1
MWO
4858 struct vm_area_struct *vma = vmf->vma;
4859 if (vma_is_anonymous(vma))
82b0f8c3 4860 return do_huge_pmd_anonymous_page(vmf);
40d49a3c 4861 if (vma->vm_ops->huge_fault)
1d024e7a 4862 return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
b96375f7
MW
4863 return VM_FAULT_FALLBACK;
4864}
4865
183f24aa 4866/* `inline' is required to avoid gcc 4.1.2 build error */
5db4f15c 4867static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
b96375f7 4868{
8f5fd0e1 4869 struct vm_area_struct *vma = vmf->vma;
c89357e2 4870 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
aea06577 4871 vm_fault_t ret;
c89357e2 4872
8f5fd0e1 4873 if (vma_is_anonymous(vma)) {
c89357e2 4874 if (likely(!unshare) &&
8f5fd0e1 4875 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd))
529b930b 4876 return handle_userfault(vmf, VM_UFFD_WP);
5db4f15c 4877 return do_huge_pmd_wp_page(vmf);
529b930b 4878 }
327e9fd4 4879
8f5fd0e1
MWO
4880 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4881 if (vma->vm_ops->huge_fault) {
1d024e7a 4882 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
aea06577
DH
4883 if (!(ret & VM_FAULT_FALLBACK))
4884 return ret;
4885 }
327e9fd4 4886 }
af9e4d5f 4887
327e9fd4 4888 /* COW or write-notify handled on pte level: split pmd. */
8f5fd0e1 4889 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
af9e4d5f 4890
b96375f7
MW
4891 return VM_FAULT_FALLBACK;
4892}
4893
2b740303 4894static vm_fault_t create_huge_pud(struct vm_fault *vmf)
a00cc7d9 4895{
14c99d65
GJ
4896#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4897 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
c4fd825e 4898 struct vm_area_struct *vma = vmf->vma;
14c99d65 4899 /* No support for anonymous transparent PUD pages yet */
c4fd825e 4900 if (vma_is_anonymous(vma))
14c99d65 4901 return VM_FAULT_FALLBACK;
40d49a3c 4902 if (vma->vm_ops->huge_fault)
1d024e7a 4903 return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
14c99d65
GJ
4904#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4905 return VM_FAULT_FALLBACK;
4906}
4907
4908static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4909{
327e9fd4
THV
4910#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4911 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
c4fd825e 4912 struct vm_area_struct *vma = vmf->vma;
aea06577
DH
4913 vm_fault_t ret;
4914
a00cc7d9 4915 /* No support for anonymous transparent PUD pages yet */
c4fd825e 4916 if (vma_is_anonymous(vma))
327e9fd4 4917 goto split;
c4fd825e
MWO
4918 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4919 if (vma->vm_ops->huge_fault) {
1d024e7a 4920 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
aea06577
DH
4921 if (!(ret & VM_FAULT_FALLBACK))
4922 return ret;
4923 }
327e9fd4
THV
4924 }
4925split:
4926 /* COW or write-notify not handled on PUD level: split pud.*/
c4fd825e 4927 __split_huge_pud(vma, vmf->pud, vmf->address);
14c99d65 4928#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
a00cc7d9
MW
4929 return VM_FAULT_FALLBACK;
4930}
4931
1da177e4
LT
4932/*
4933 * These routines also need to handle stuff like marking pages dirty
4934 * and/or accessed for architectures that don't do it in hardware (most
4935 * RISC architectures). The early dirtying is also good on the i386.
4936 *
4937 * There is also a hook called "update_mmu_cache()" that architectures
4938 * with external mmu caches can use to update those (ie the Sparc or
4939 * PowerPC hashed page tables that act as extended TLBs).
4940 *
c1e8d7c6 4941 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
7267ec00 4942 * concurrent faults).
9a95f3cf 4943 *
c1e8d7c6 4944 * The mmap_lock may have been released depending on flags and our return value.
9138e47e 4945 * See filemap_fault() and __folio_lock_or_retry().
1da177e4 4946 */
2b740303 4947static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
1da177e4
LT
4948{
4949 pte_t entry;
4950
82b0f8c3 4951 if (unlikely(pmd_none(*vmf->pmd))) {
7267ec00
KS
4952 /*
4953 * Leave __pte_alloc() until later: because vm_ops->fault may
4954 * want to allocate huge page, and if we expose page table
4955 * for an instant, it will be difficult to retract from
4956 * concurrent faults and from rmap lookups.
4957 */
82b0f8c3 4958 vmf->pte = NULL;
f46f2ade 4959 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
7267ec00 4960 } else {
7267ec00
KS
4961 /*
4962 * A regular pmd is established and it can't morph into a huge
c7ad0880
HD
4963 * pmd by anon khugepaged, since that takes mmap_lock in write
4964 * mode; but shmem or file collapse to THP could still morph
4965 * it into a huge pmd: just retry later if so.
7267ec00 4966 */
c7ad0880
HD
4967 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
4968 vmf->address, &vmf->ptl);
4969 if (unlikely(!vmf->pte))
4970 return 0;
26e1a0c3 4971 vmf->orig_pte = ptep_get_lockless(vmf->pte);
f46f2ade 4972 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
7267ec00 4973
2994302b 4974 if (pte_none(vmf->orig_pte)) {
82b0f8c3
JK
4975 pte_unmap(vmf->pte);
4976 vmf->pte = NULL;
65500d23 4977 }
1da177e4
LT
4978 }
4979
2bad466c
PX
4980 if (!vmf->pte)
4981 return do_pte_missing(vmf);
7267ec00 4982
2994302b
JK
4983 if (!pte_present(vmf->orig_pte))
4984 return do_swap_page(vmf);
7267ec00 4985
2994302b
JK
4986 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4987 return do_numa_page(vmf);
d10e63f2 4988
82b0f8c3 4989 spin_lock(vmf->ptl);
2994302b 4990 entry = vmf->orig_pte;
c33c7948 4991 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
7df67697 4992 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
8f4e2101 4993 goto unlock;
7df67697 4994 }
c89357e2 4995 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
f6f37321 4996 if (!pte_write(entry))
2994302b 4997 return do_wp_page(vmf);
c89357e2
DH
4998 else if (likely(vmf->flags & FAULT_FLAG_WRITE))
4999 entry = pte_mkdirty(entry);
1da177e4
LT
5000 }
5001 entry = pte_mkyoung(entry);
82b0f8c3
JK
5002 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5003 vmf->flags & FAULT_FLAG_WRITE)) {
5003a2bd
MWO
5004 update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5005 vmf->pte, 1);
1a44e149 5006 } else {
b7333b58
YS
5007 /* Skip spurious TLB flush for retried page fault */
5008 if (vmf->flags & FAULT_FLAG_TRIED)
5009 goto unlock;
1a44e149
AA
5010 /*
5011 * This is needed only for protection faults but the arch code
5012 * is not yet telling us if this is a protection fault or not.
5013 * This still avoids useless tlb flushes for .text page faults
5014 * with threads.
5015 */
82b0f8c3 5016 if (vmf->flags & FAULT_FLAG_WRITE)
99c29133
GS
5017 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5018 vmf->pte);
1a44e149 5019 }
8f4e2101 5020unlock:
82b0f8c3 5021 pte_unmap_unlock(vmf->pte, vmf->ptl);
83c54070 5022 return 0;
1da177e4
LT
5023}
5024
5025/*
4ec31152
MWO
5026 * On entry, we hold either the VMA lock or the mmap_lock
5027 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
5028 * the result, the mmap_lock is not held on exit. See filemap_fault()
5029 * and __folio_lock_or_retry().
1da177e4 5030 */
2b740303
SJ
5031static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5032 unsigned long address, unsigned int flags)
1da177e4 5033{
82b0f8c3 5034 struct vm_fault vmf = {
bae473a4 5035 .vma = vma,
1a29d85e 5036 .address = address & PAGE_MASK,
824ddc60 5037 .real_address = address,
bae473a4 5038 .flags = flags,
0721ec8b 5039 .pgoff = linear_page_index(vma, address),
667240e0 5040 .gfp_mask = __get_fault_gfp_mask(vma),
bae473a4 5041 };
dcddffd4 5042 struct mm_struct *mm = vma->vm_mm;
7da4e2cb 5043 unsigned long vm_flags = vma->vm_flags;
1da177e4 5044 pgd_t *pgd;
c2febafc 5045 p4d_t *p4d;
2b740303 5046 vm_fault_t ret;
1da177e4 5047
1da177e4 5048 pgd = pgd_offset(mm, address);
c2febafc
KS
5049 p4d = p4d_alloc(mm, pgd, address);
5050 if (!p4d)
5051 return VM_FAULT_OOM;
a00cc7d9 5052
c2febafc 5053 vmf.pud = pud_alloc(mm, p4d, address);
a00cc7d9 5054 if (!vmf.pud)
c74df32c 5055 return VM_FAULT_OOM;
625110b5 5056retry_pud:
7da4e2cb 5057 if (pud_none(*vmf.pud) &&
a7f4e6e4 5058 hugepage_vma_check(vma, vm_flags, false, true, true)) {
a00cc7d9
MW
5059 ret = create_huge_pud(&vmf);
5060 if (!(ret & VM_FAULT_FALLBACK))
5061 return ret;
5062 } else {
5063 pud_t orig_pud = *vmf.pud;
5064
5065 barrier();
5066 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
a00cc7d9 5067
c89357e2
DH
5068 /*
5069 * TODO once we support anonymous PUDs: NUMA case and
5070 * FAULT_FLAG_UNSHARE handling.
5071 */
5072 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
a00cc7d9
MW
5073 ret = wp_huge_pud(&vmf, orig_pud);
5074 if (!(ret & VM_FAULT_FALLBACK))
5075 return ret;
5076 } else {
5077 huge_pud_set_accessed(&vmf, orig_pud);
5078 return 0;
5079 }
5080 }
5081 }
5082
5083 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
82b0f8c3 5084 if (!vmf.pmd)
c74df32c 5085 return VM_FAULT_OOM;
625110b5
TH
5086
5087 /* Huge pud page fault raced with pmd_alloc? */
5088 if (pud_trans_unstable(vmf.pud))
5089 goto retry_pud;
5090
7da4e2cb 5091 if (pmd_none(*vmf.pmd) &&
a7f4e6e4 5092 hugepage_vma_check(vma, vm_flags, false, true, true)) {
a2d58167 5093 ret = create_huge_pmd(&vmf);
c0292554
KS
5094 if (!(ret & VM_FAULT_FALLBACK))
5095 return ret;
71e3aac0 5096 } else {
26e1a0c3 5097 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
1f1d06c3 5098
5db4f15c 5099 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
84c3fc4e 5100 VM_BUG_ON(thp_migration_supported() &&
5db4f15c
YS
5101 !is_pmd_migration_entry(vmf.orig_pmd));
5102 if (is_pmd_migration_entry(vmf.orig_pmd))
84c3fc4e
ZY
5103 pmd_migration_entry_wait(mm, vmf.pmd);
5104 return 0;
5105 }
5db4f15c
YS
5106 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5107 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5108 return do_huge_pmd_numa_page(&vmf);
d10e63f2 5109
c89357e2
DH
5110 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5111 !pmd_write(vmf.orig_pmd)) {
5db4f15c 5112 ret = wp_huge_pmd(&vmf);
9845cbbd
KS
5113 if (!(ret & VM_FAULT_FALLBACK))
5114 return ret;
a1dd450b 5115 } else {
5db4f15c 5116 huge_pmd_set_accessed(&vmf);
9845cbbd 5117 return 0;
1f1d06c3 5118 }
71e3aac0
AA
5119 }
5120 }
5121
82b0f8c3 5122 return handle_pte_fault(&vmf);
1da177e4
LT
5123}
5124
bce617ed 5125/**
f0953a1b 5126 * mm_account_fault - Do page fault accounting
809ef83c 5127 * @mm: mm from which memcg should be extracted. It can be NULL.
bce617ed
PX
5128 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
5129 * of perf event counters, but we'll still do the per-task accounting to
5130 * the task who triggered this page fault.
5131 * @address: the faulted address.
5132 * @flags: the fault flags.
5133 * @ret: the fault retcode.
5134 *
f0953a1b 5135 * This will take care of most of the page fault accounting. Meanwhile, it
bce617ed 5136 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
f0953a1b 5137 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
bce617ed
PX
5138 * still be in per-arch page fault handlers at the entry of page fault.
5139 */
53156443 5140static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
bce617ed
PX
5141 unsigned long address, unsigned int flags,
5142 vm_fault_t ret)
5143{
5144 bool major;
5145
53156443
SB
5146 /* Incomplete faults will be accounted upon completion. */
5147 if (ret & VM_FAULT_RETRY)
5148 return;
5149
bce617ed 5150 /*
53156443
SB
5151 * To preserve the behavior of older kernels, PGFAULT counters record
5152 * both successful and failed faults, as opposed to perf counters,
5153 * which ignore failed cases.
bce617ed 5154 */
53156443
SB
5155 count_vm_event(PGFAULT);
5156 count_memcg_event_mm(mm, PGFAULT);
5157
5158 /*
5159 * Do not account for unsuccessful faults (e.g. when the address wasn't
5160 * valid). That includes arch_vma_access_permitted() failing before
5161 * reaching here. So this is not a "this many hardware page faults"
5162 * counter. We should use the hw profiling for that.
5163 */
5164 if (ret & VM_FAULT_ERROR)
bce617ed
PX
5165 return;
5166
5167 /*
5168 * We define the fault as a major fault when the final successful fault
5169 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5170 * handle it immediately previously).
5171 */
5172 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5173
a2beb5f1
PX
5174 if (major)
5175 current->maj_flt++;
5176 else
5177 current->min_flt++;
5178
bce617ed 5179 /*
a2beb5f1
PX
5180 * If the fault is done for GUP, regs will be NULL. We only do the
5181 * accounting for the per thread fault counters who triggered the
5182 * fault, and we skip the perf event updates.
bce617ed
PX
5183 */
5184 if (!regs)
5185 return;
5186
a2beb5f1 5187 if (major)
bce617ed 5188 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
a2beb5f1 5189 else
bce617ed 5190 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
bce617ed
PX
5191}
5192
ec1c86b2
YZ
5193#ifdef CONFIG_LRU_GEN
5194static void lru_gen_enter_fault(struct vm_area_struct *vma)
5195{
8788f678
YZ
5196 /* the LRU algorithm only applies to accesses with recency */
5197 current->in_lru_fault = vma_has_recency(vma);
ec1c86b2
YZ
5198}
5199
5200static void lru_gen_exit_fault(void)
5201{
5202 current->in_lru_fault = false;
5203}
5204#else
5205static void lru_gen_enter_fault(struct vm_area_struct *vma)
5206{
5207}
5208
5209static void lru_gen_exit_fault(void)
5210{
5211}
5212#endif /* CONFIG_LRU_GEN */
5213
cdc5021c
DH
5214static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
5215 unsigned int *flags)
5216{
5217 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
5218 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
5219 return VM_FAULT_SIGSEGV;
5220 /*
5221 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
5222 * just treat it like an ordinary read-fault otherwise.
5223 */
5224 if (!is_cow_mapping(vma->vm_flags))
5225 *flags &= ~FAULT_FLAG_UNSHARE;
79881fed
DH
5226 } else if (*flags & FAULT_FLAG_WRITE) {
5227 /* Write faults on read-only mappings are impossible ... */
5228 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
5229 return VM_FAULT_SIGSEGV;
5230 /* ... and FOLL_FORCE only applies to COW mappings. */
5231 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
5232 !is_cow_mapping(vma->vm_flags)))
5233 return VM_FAULT_SIGSEGV;
cdc5021c 5234 }
4089eef0
SB
5235#ifdef CONFIG_PER_VMA_LOCK
5236 /*
5237 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
5238 * the assumption that lock is dropped on VM_FAULT_RETRY.
5239 */
5240 if (WARN_ON_ONCE((*flags &
5241 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
5242 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
5243 return VM_FAULT_SIGSEGV;
5244#endif
5245
cdc5021c
DH
5246 return 0;
5247}
5248
9a95f3cf
PC
5249/*
5250 * By the time we get here, we already hold the mm semaphore
5251 *
c1e8d7c6 5252 * The mmap_lock may have been released depending on flags and our
9138e47e 5253 * return value. See filemap_fault() and __folio_lock_or_retry().
9a95f3cf 5254 */
2b740303 5255vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
bce617ed 5256 unsigned int flags, struct pt_regs *regs)
519e5247 5257{
53156443
SB
5258 /* If the fault handler drops the mmap_lock, vma may be freed */
5259 struct mm_struct *mm = vma->vm_mm;
2b740303 5260 vm_fault_t ret;
519e5247
JW
5261
5262 __set_current_state(TASK_RUNNING);
5263
cdc5021c
DH
5264 ret = sanitize_fault_flags(vma, &flags);
5265 if (ret)
53156443 5266 goto out;
cdc5021c 5267
de0c799b
LD
5268 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5269 flags & FAULT_FLAG_INSTRUCTION,
53156443
SB
5270 flags & FAULT_FLAG_REMOTE)) {
5271 ret = VM_FAULT_SIGSEGV;
5272 goto out;
5273 }
de0c799b 5274
519e5247
JW
5275 /*
5276 * Enable the memcg OOM handling for faults triggered in user
5277 * space. Kernel faults are handled more gracefully.
5278 */
5279 if (flags & FAULT_FLAG_USER)
29ef680a 5280 mem_cgroup_enter_user_fault();
519e5247 5281
ec1c86b2
YZ
5282 lru_gen_enter_fault(vma);
5283
bae473a4
KS
5284 if (unlikely(is_vm_hugetlb_page(vma)))
5285 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5286 else
5287 ret = __handle_mm_fault(vma, address, flags);
519e5247 5288
ec1c86b2
YZ
5289 lru_gen_exit_fault();
5290
49426420 5291 if (flags & FAULT_FLAG_USER) {
29ef680a 5292 mem_cgroup_exit_user_fault();
166f61b9
TH
5293 /*
5294 * The task may have entered a memcg OOM situation but
5295 * if the allocation error was handled gracefully (no
5296 * VM_FAULT_OOM), there is no need to kill anything.
5297 * Just clean up the OOM state peacefully.
5298 */
5299 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5300 mem_cgroup_oom_synchronize(false);
49426420 5301 }
53156443
SB
5302out:
5303 mm_account_fault(mm, regs, address, flags, ret);
bce617ed 5304
519e5247
JW
5305 return ret;
5306}
e1d6d01a 5307EXPORT_SYMBOL_GPL(handle_mm_fault);
519e5247 5308
c2508ec5
LT
5309#ifdef CONFIG_LOCK_MM_AND_FIND_VMA
5310#include <linux/extable.h>
5311
5312static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5313{
4542057e 5314 if (likely(mmap_read_trylock(mm)))
c2508ec5 5315 return true;
c2508ec5
LT
5316
5317 if (regs && !user_mode(regs)) {
5318 unsigned long ip = instruction_pointer(regs);
5319 if (!search_exception_tables(ip))
5320 return false;
5321 }
5322
eda00472 5323 return !mmap_read_lock_killable(mm);
c2508ec5
LT
5324}
5325
5326static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
5327{
5328 /*
5329 * We don't have this operation yet.
5330 *
5331 * It should be easy enough to do: it's basically a
5332 * atomic_long_try_cmpxchg_acquire()
5333 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
5334 * it also needs the proper lockdep magic etc.
5335 */
5336 return false;
5337}
5338
5339static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5340{
5341 mmap_read_unlock(mm);
5342 if (regs && !user_mode(regs)) {
5343 unsigned long ip = instruction_pointer(regs);
5344 if (!search_exception_tables(ip))
5345 return false;
5346 }
eda00472 5347 return !mmap_write_lock_killable(mm);
c2508ec5
LT
5348}
5349
5350/*
5351 * Helper for page fault handling.
5352 *
5353 * This is kind of equivalend to "mmap_read_lock()" followed
5354 * by "find_extend_vma()", except it's a lot more careful about
5355 * the locking (and will drop the lock on failure).
5356 *
5357 * For example, if we have a kernel bug that causes a page
5358 * fault, we don't want to just use mmap_read_lock() to get
5359 * the mm lock, because that would deadlock if the bug were
5360 * to happen while we're holding the mm lock for writing.
5361 *
5362 * So this checks the exception tables on kernel faults in
5363 * order to only do this all for instructions that are actually
5364 * expected to fault.
5365 *
5366 * We can also actually take the mm lock for writing if we
5367 * need to extend the vma, which helps the VM layer a lot.
5368 */
5369struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
5370 unsigned long addr, struct pt_regs *regs)
5371{
5372 struct vm_area_struct *vma;
5373
5374 if (!get_mmap_lock_carefully(mm, regs))
5375 return NULL;
5376
5377 vma = find_vma(mm, addr);
5378 if (likely(vma && (vma->vm_start <= addr)))
5379 return vma;
5380
5381 /*
5382 * Well, dang. We might still be successful, but only
5383 * if we can extend a vma to do so.
5384 */
5385 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
5386 mmap_read_unlock(mm);
5387 return NULL;
5388 }
5389
5390 /*
5391 * We can try to upgrade the mmap lock atomically,
5392 * in which case we can continue to use the vma
5393 * we already looked up.
5394 *
5395 * Otherwise we'll have to drop the mmap lock and
5396 * re-take it, and also look up the vma again,
5397 * re-checking it.
5398 */
5399 if (!mmap_upgrade_trylock(mm)) {
5400 if (!upgrade_mmap_lock_carefully(mm, regs))
5401 return NULL;
5402
5403 vma = find_vma(mm, addr);
5404 if (!vma)
5405 goto fail;
5406 if (vma->vm_start <= addr)
5407 goto success;
5408 if (!(vma->vm_flags & VM_GROWSDOWN))
5409 goto fail;
5410 }
5411
8d7071af 5412 if (expand_stack_locked(vma, addr))
c2508ec5
LT
5413 goto fail;
5414
5415success:
5416 mmap_write_downgrade(mm);
5417 return vma;
5418
5419fail:
5420 mmap_write_unlock(mm);
5421 return NULL;
5422}
5423#endif
5424
50ee3253
SB
5425#ifdef CONFIG_PER_VMA_LOCK
5426/*
5427 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
5428 * stable and not isolated. If the VMA is not found or is being modified the
5429 * function returns NULL.
5430 */
5431struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
5432 unsigned long address)
5433{
5434 MA_STATE(mas, &mm->mm_mt, address, address);
5435 struct vm_area_struct *vma;
5436
5437 rcu_read_lock();
5438retry:
5439 vma = mas_walk(&mas);
5440 if (!vma)
5441 goto inval;
5442
50ee3253
SB
5443 if (!vma_start_read(vma))
5444 goto inval;
5445
657b5146
JH
5446 /*
5447 * find_mergeable_anon_vma uses adjacent vmas which are not locked.
5448 * This check must happen after vma_start_read(); otherwise, a
5449 * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA
5450 * from its anon_vma.
5451 */
29a22b9e 5452 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma))
657b5146 5453 goto inval_end_read;
444eeb17 5454
50ee3253 5455 /* Check since vm_start/vm_end might change before we lock the VMA */
657b5146
JH
5456 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
5457 goto inval_end_read;
50ee3253
SB
5458
5459 /* Check if the VMA got isolated after we found it */
5460 if (vma->detached) {
5461 vma_end_read(vma);
52f23865 5462 count_vm_vma_lock_event(VMA_LOCK_MISS);
50ee3253
SB
5463 /* The area was replaced with another one */
5464 goto retry;
5465 }
5466
5467 rcu_read_unlock();
5468 return vma;
657b5146
JH
5469
5470inval_end_read:
5471 vma_end_read(vma);
50ee3253
SB
5472inval:
5473 rcu_read_unlock();
52f23865 5474 count_vm_vma_lock_event(VMA_LOCK_ABORT);
50ee3253
SB
5475 return NULL;
5476}
5477#endif /* CONFIG_PER_VMA_LOCK */
5478
90eceff1
KS
5479#ifndef __PAGETABLE_P4D_FOLDED
5480/*
5481 * Allocate p4d page table.
5482 * We've already handled the fast-path in-line.
5483 */
5484int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5485{
5486 p4d_t *new = p4d_alloc_one(mm, address);
5487 if (!new)
5488 return -ENOMEM;
5489
90eceff1 5490 spin_lock(&mm->page_table_lock);
ed33b5a6 5491 if (pgd_present(*pgd)) { /* Another has populated it */
90eceff1 5492 p4d_free(mm, new);
ed33b5a6
QZ
5493 } else {
5494 smp_wmb(); /* See comment in pmd_install() */
90eceff1 5495 pgd_populate(mm, pgd, new);
ed33b5a6 5496 }
90eceff1
KS
5497 spin_unlock(&mm->page_table_lock);
5498 return 0;
5499}
5500#endif /* __PAGETABLE_P4D_FOLDED */
5501
1da177e4
LT
5502#ifndef __PAGETABLE_PUD_FOLDED
5503/*
5504 * Allocate page upper directory.
872fec16 5505 * We've already handled the fast-path in-line.
1da177e4 5506 */
c2febafc 5507int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
1da177e4 5508{
c74df32c
HD
5509 pud_t *new = pud_alloc_one(mm, address);
5510 if (!new)
1bb3630e 5511 return -ENOMEM;
1da177e4 5512
872fec16 5513 spin_lock(&mm->page_table_lock);
b4e98d9a
KS
5514 if (!p4d_present(*p4d)) {
5515 mm_inc_nr_puds(mm);
ed33b5a6 5516 smp_wmb(); /* See comment in pmd_install() */
c2febafc 5517 p4d_populate(mm, p4d, new);
b4e98d9a 5518 } else /* Another has populated it */
5e541973 5519 pud_free(mm, new);
c74df32c 5520 spin_unlock(&mm->page_table_lock);
1bb3630e 5521 return 0;
1da177e4
LT
5522}
5523#endif /* __PAGETABLE_PUD_FOLDED */
5524
5525#ifndef __PAGETABLE_PMD_FOLDED
5526/*
5527 * Allocate page middle directory.
872fec16 5528 * We've already handled the fast-path in-line.
1da177e4 5529 */
1bb3630e 5530int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1da177e4 5531{
a00cc7d9 5532 spinlock_t *ptl;
c74df32c
HD
5533 pmd_t *new = pmd_alloc_one(mm, address);
5534 if (!new)
1bb3630e 5535 return -ENOMEM;
1da177e4 5536
a00cc7d9 5537 ptl = pud_lock(mm, pud);
dc6c9a35
KS
5538 if (!pud_present(*pud)) {
5539 mm_inc_nr_pmds(mm);
ed33b5a6 5540 smp_wmb(); /* See comment in pmd_install() */
1bb3630e 5541 pud_populate(mm, pud, new);
ed33b5a6 5542 } else { /* Another has populated it */
5e541973 5543 pmd_free(mm, new);
ed33b5a6 5544 }
a00cc7d9 5545 spin_unlock(ptl);
1bb3630e 5546 return 0;
e0f39591 5547}
1da177e4
LT
5548#endif /* __PAGETABLE_PMD_FOLDED */
5549
0e5e64c0
MS
5550/**
5551 * follow_pte - look up PTE at a user virtual address
5552 * @mm: the mm_struct of the target address space
5553 * @address: user virtual address
5554 * @ptepp: location to store found PTE
5555 * @ptlp: location to store the lock for the PTE
5556 *
5557 * On a successful return, the pointer to the PTE is stored in @ptepp;
5558 * the corresponding lock is taken and its location is stored in @ptlp.
5559 * The contents of the PTE are only stable until @ptlp is released;
5560 * any further use, if any, must be protected against invalidation
5561 * with MMU notifiers.
5562 *
5563 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
5564 * should be taken for read.
5565 *
5566 * KVM uses this function. While it is arguably less bad than ``follow_pfn``,
5567 * it is not a good general-purpose API.
5568 *
5569 * Return: zero on success, -ve otherwise.
5570 */
5571int follow_pte(struct mm_struct *mm, unsigned long address,
5572 pte_t **ptepp, spinlock_t **ptlp)
f8ad0f49
JW
5573{
5574 pgd_t *pgd;
c2febafc 5575 p4d_t *p4d;
f8ad0f49
JW
5576 pud_t *pud;
5577 pmd_t *pmd;
5578 pte_t *ptep;
5579
5580 pgd = pgd_offset(mm, address);
5581 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5582 goto out;
5583
c2febafc
KS
5584 p4d = p4d_offset(pgd, address);
5585 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5586 goto out;
5587
5588 pud = pud_offset(p4d, address);
f8ad0f49
JW
5589 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5590 goto out;
5591
5592 pmd = pmd_offset(pud, address);
f66055ab 5593 VM_BUG_ON(pmd_trans_huge(*pmd));
f8ad0f49 5594
f8ad0f49 5595 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3db82b93
HD
5596 if (!ptep)
5597 goto out;
c33c7948 5598 if (!pte_present(ptep_get(ptep)))
f8ad0f49
JW
5599 goto unlock;
5600 *ptepp = ptep;
5601 return 0;
5602unlock:
5603 pte_unmap_unlock(ptep, *ptlp);
5604out:
5605 return -EINVAL;
5606}
9fd6dad1
PB
5607EXPORT_SYMBOL_GPL(follow_pte);
5608
3b6748e2
JW
5609/**
5610 * follow_pfn - look up PFN at a user virtual address
5611 * @vma: memory mapping
5612 * @address: user virtual address
5613 * @pfn: location to store found PFN
5614 *
5615 * Only IO mappings and raw PFN mappings are allowed.
5616 *
9fd6dad1
PB
5617 * This function does not allow the caller to read the permissions
5618 * of the PTE. Do not use it.
5619 *
a862f68a 5620 * Return: zero and the pfn at @pfn on success, -ve otherwise.
3b6748e2
JW
5621 */
5622int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5623 unsigned long *pfn)
5624{
5625 int ret = -EINVAL;
5626 spinlock_t *ptl;
5627 pte_t *ptep;
5628
5629 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5630 return ret;
5631
9fd6dad1 5632 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3b6748e2
JW
5633 if (ret)
5634 return ret;
c33c7948 5635 *pfn = pte_pfn(ptep_get(ptep));
3b6748e2
JW
5636 pte_unmap_unlock(ptep, ptl);
5637 return 0;
5638}
5639EXPORT_SYMBOL(follow_pfn);
5640
28b2ee20 5641#ifdef CONFIG_HAVE_IOREMAP_PROT
d87fe660 5642int follow_phys(struct vm_area_struct *vma,
5643 unsigned long address, unsigned int flags,
5644 unsigned long *prot, resource_size_t *phys)
28b2ee20 5645{
03668a4d 5646 int ret = -EINVAL;
28b2ee20
RR
5647 pte_t *ptep, pte;
5648 spinlock_t *ptl;
28b2ee20 5649
d87fe660 5650 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5651 goto out;
28b2ee20 5652
9fd6dad1 5653 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
d87fe660 5654 goto out;
c33c7948 5655 pte = ptep_get(ptep);
03668a4d 5656
f6f37321 5657 if ((flags & FOLL_WRITE) && !pte_write(pte))
28b2ee20 5658 goto unlock;
28b2ee20
RR
5659
5660 *prot = pgprot_val(pte_pgprot(pte));
03668a4d 5661 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
28b2ee20 5662
03668a4d 5663 ret = 0;
28b2ee20
RR
5664unlock:
5665 pte_unmap_unlock(ptep, ptl);
5666out:
d87fe660 5667 return ret;
28b2ee20
RR
5668}
5669
96667f8a
SV
5670/**
5671 * generic_access_phys - generic implementation for iomem mmap access
5672 * @vma: the vma to access
f0953a1b 5673 * @addr: userspace address, not relative offset within @vma
96667f8a
SV
5674 * @buf: buffer to read/write
5675 * @len: length of transfer
5676 * @write: set to FOLL_WRITE when writing, otherwise reading
5677 *
5678 * This is a generic implementation for &vm_operations_struct.access for an
5679 * iomem mapping. This callback is used by access_process_vm() when the @vma is
5680 * not page based.
5681 */
28b2ee20
RR
5682int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5683 void *buf, int len, int write)
5684{
5685 resource_size_t phys_addr;
5686 unsigned long prot = 0;
2bc7273b 5687 void __iomem *maddr;
96667f8a
SV
5688 pte_t *ptep, pte;
5689 spinlock_t *ptl;
5690 int offset = offset_in_page(addr);
5691 int ret = -EINVAL;
5692
5693 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5694 return -EINVAL;
5695
5696retry:
e913a8cd 5697 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
96667f8a 5698 return -EINVAL;
c33c7948 5699 pte = ptep_get(ptep);
96667f8a 5700 pte_unmap_unlock(ptep, ptl);
28b2ee20 5701
96667f8a
SV
5702 prot = pgprot_val(pte_pgprot(pte));
5703 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5704
5705 if ((write & FOLL_WRITE) && !pte_write(pte))
28b2ee20
RR
5706 return -EINVAL;
5707
9cb12d7b 5708 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
24eee1e4 5709 if (!maddr)
5710 return -ENOMEM;
5711
e913a8cd 5712 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
96667f8a
SV
5713 goto out_unmap;
5714
c33c7948 5715 if (!pte_same(pte, ptep_get(ptep))) {
96667f8a
SV
5716 pte_unmap_unlock(ptep, ptl);
5717 iounmap(maddr);
5718
5719 goto retry;
5720 }
5721
28b2ee20
RR
5722 if (write)
5723 memcpy_toio(maddr + offset, buf, len);
5724 else
5725 memcpy_fromio(buf, maddr + offset, len);
96667f8a
SV
5726 ret = len;
5727 pte_unmap_unlock(ptep, ptl);
5728out_unmap:
28b2ee20
RR
5729 iounmap(maddr);
5730
96667f8a 5731 return ret;
28b2ee20 5732}
5a73633e 5733EXPORT_SYMBOL_GPL(generic_access_phys);
28b2ee20
RR
5734#endif
5735
0ec76a11 5736/*
d3f5ffca 5737 * Access another process' address space as given in mm.
0ec76a11 5738 */
d3f5ffca
JH
5739int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5740 int len, unsigned int gup_flags)
0ec76a11 5741{
0ec76a11 5742 void *old_buf = buf;
442486ec 5743 int write = gup_flags & FOLL_WRITE;
0ec76a11 5744
d8ed45c5 5745 if (mmap_read_lock_killable(mm))
1e426fe2
KK
5746 return 0;
5747
22883973
KS
5748 /* Untag the address before looking up the VMA */
5749 addr = untagged_addr_remote(mm, addr);
5750
eee9c708
LT
5751 /* Avoid triggering the temporary warning in __get_user_pages */
5752 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
5753 return 0;
5754
183ff22b 5755 /* ignore errors, just check how much was successfully transferred */
0ec76a11 5756 while (len) {
ca5e8632 5757 int bytes, offset;
0ec76a11 5758 void *maddr;
ca5e8632
LS
5759 struct vm_area_struct *vma = NULL;
5760 struct page *page = get_user_page_vma_remote(mm, addr,
5761 gup_flags, &vma);
0ec76a11 5762
ca5e8632 5763 if (IS_ERR_OR_NULL(page)) {
9471f1f2
LT
5764 /* We might need to expand the stack to access it */
5765 vma = vma_lookup(mm, addr);
5766 if (!vma) {
5767 vma = expand_stack(mm, addr);
5768
5769 /* mmap_lock was dropped on failure */
5770 if (!vma)
5771 return buf - old_buf;
5772
5773 /* Try again if stack expansion worked */
5774 continue;
5775 }
5776
ca5e8632 5777
28b2ee20
RR
5778 /*
5779 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5780 * we can access using slightly different code.
5781 */
9471f1f2
LT
5782 bytes = 0;
5783#ifdef CONFIG_HAVE_IOREMAP_PROT
28b2ee20 5784 if (vma->vm_ops && vma->vm_ops->access)
9471f1f2
LT
5785 bytes = vma->vm_ops->access(vma, addr, buf,
5786 len, write);
dbffcd03 5787#endif
9471f1f2
LT
5788 if (bytes <= 0)
5789 break;
0ec76a11 5790 } else {
28b2ee20
RR
5791 bytes = len;
5792 offset = addr & (PAGE_SIZE-1);
5793 if (bytes > PAGE_SIZE-offset)
5794 bytes = PAGE_SIZE-offset;
5795
5796 maddr = kmap(page);
5797 if (write) {
5798 copy_to_user_page(vma, page, addr,
5799 maddr + offset, buf, bytes);
5800 set_page_dirty_lock(page);
5801 } else {
5802 copy_from_user_page(vma, page, addr,
5803 buf, maddr + offset, bytes);
5804 }
5805 kunmap(page);
09cbfeaf 5806 put_page(page);
0ec76a11 5807 }
0ec76a11
DH
5808 len -= bytes;
5809 buf += bytes;
5810 addr += bytes;
5811 }
d8ed45c5 5812 mmap_read_unlock(mm);
0ec76a11
DH
5813
5814 return buf - old_buf;
5815}
03252919 5816
5ddd36b9 5817/**
ae91dbfc 5818 * access_remote_vm - access another process' address space
5ddd36b9
SW
5819 * @mm: the mm_struct of the target address space
5820 * @addr: start address to access
5821 * @buf: source or destination buffer
5822 * @len: number of bytes to transfer
6347e8d5 5823 * @gup_flags: flags modifying lookup behaviour
5ddd36b9
SW
5824 *
5825 * The caller must hold a reference on @mm.
a862f68a
MR
5826 *
5827 * Return: number of bytes copied from source to destination.
5ddd36b9
SW
5828 */
5829int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6347e8d5 5830 void *buf, int len, unsigned int gup_flags)
5ddd36b9 5831{
d3f5ffca 5832 return __access_remote_vm(mm, addr, buf, len, gup_flags);
5ddd36b9
SW
5833}
5834
206cb636
SW
5835/*
5836 * Access another process' address space.
5837 * Source/target buffer must be kernel space,
5838 * Do not walk the page table directly, use get_user_pages
5839 */
5840int access_process_vm(struct task_struct *tsk, unsigned long addr,
f307ab6d 5841 void *buf, int len, unsigned int gup_flags)
206cb636
SW
5842{
5843 struct mm_struct *mm;
5844 int ret;
5845
5846 mm = get_task_mm(tsk);
5847 if (!mm)
5848 return 0;
5849
d3f5ffca 5850 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
442486ec 5851
206cb636
SW
5852 mmput(mm);
5853
5854 return ret;
5855}
fcd35857 5856EXPORT_SYMBOL_GPL(access_process_vm);
206cb636 5857
03252919
AK
5858/*
5859 * Print the name of a VMA.
5860 */
5861void print_vma_addr(char *prefix, unsigned long ip)
5862{
5863 struct mm_struct *mm = current->mm;
5864 struct vm_area_struct *vma;
5865
e8bff74a 5866 /*
0a7f682d 5867 * we might be running from an atomic context so we cannot sleep
e8bff74a 5868 */
d8ed45c5 5869 if (!mmap_read_trylock(mm))
e8bff74a
IM
5870 return;
5871
03252919
AK
5872 vma = find_vma(mm, ip);
5873 if (vma && vma->vm_file) {
5874 struct file *f = vma->vm_file;
0a7f682d 5875 char *buf = (char *)__get_free_page(GFP_NOWAIT);
03252919 5876 if (buf) {
2fbc57c5 5877 char *p;
03252919 5878
9bf39ab2 5879 p = file_path(f, buf, PAGE_SIZE);
03252919
AK
5880 if (IS_ERR(p))
5881 p = "?";
2fbc57c5 5882 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
03252919
AK
5883 vma->vm_start,
5884 vma->vm_end - vma->vm_start);
5885 free_page((unsigned long)buf);
5886 }
5887 }
d8ed45c5 5888 mmap_read_unlock(mm);
03252919 5889}
3ee1afa3 5890
662bbcb2 5891#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
9ec23531 5892void __might_fault(const char *file, int line)
3ee1afa3 5893{
9ec23531 5894 if (pagefault_disabled())
662bbcb2 5895 return;
42a38756 5896 __might_sleep(file, line);
9ec23531 5897#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
662bbcb2 5898 if (current->mm)
da1c55f1 5899 might_lock_read(&current->mm->mmap_lock);
9ec23531 5900#endif
3ee1afa3 5901}
9ec23531 5902EXPORT_SYMBOL(__might_fault);
3ee1afa3 5903#endif
47ad8475
AA
5904
5905#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
c6ddfb6c
YH
5906/*
5907 * Process all subpages of the specified huge page with the specified
5908 * operation. The target subpage will be processed last to keep its
5909 * cache lines hot.
5910 */
1cb9dc4b 5911static inline int process_huge_page(
c6ddfb6c 5912 unsigned long addr_hint, unsigned int pages_per_huge_page,
1cb9dc4b 5913 int (*process_subpage)(unsigned long addr, int idx, void *arg),
c6ddfb6c 5914 void *arg)
47ad8475 5915{
1cb9dc4b 5916 int i, n, base, l, ret;
c79b57e4
YH
5917 unsigned long addr = addr_hint &
5918 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
47ad8475 5919
c6ddfb6c 5920 /* Process target subpage last to keep its cache lines hot */
47ad8475 5921 might_sleep();
c79b57e4
YH
5922 n = (addr_hint - addr) / PAGE_SIZE;
5923 if (2 * n <= pages_per_huge_page) {
c6ddfb6c 5924 /* If target subpage in first half of huge page */
c79b57e4
YH
5925 base = 0;
5926 l = n;
c6ddfb6c 5927 /* Process subpages at the end of huge page */
c79b57e4
YH
5928 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5929 cond_resched();
1cb9dc4b
LS
5930 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
5931 if (ret)
5932 return ret;
c79b57e4
YH
5933 }
5934 } else {
c6ddfb6c 5935 /* If target subpage in second half of huge page */
c79b57e4
YH
5936 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5937 l = pages_per_huge_page - n;
c6ddfb6c 5938 /* Process subpages at the begin of huge page */
c79b57e4
YH
5939 for (i = 0; i < base; i++) {
5940 cond_resched();
1cb9dc4b
LS
5941 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
5942 if (ret)
5943 return ret;
c79b57e4
YH
5944 }
5945 }
5946 /*
c6ddfb6c
YH
5947 * Process remaining subpages in left-right-left-right pattern
5948 * towards the target subpage
c79b57e4
YH
5949 */
5950 for (i = 0; i < l; i++) {
5951 int left_idx = base + i;
5952 int right_idx = base + 2 * l - 1 - i;
5953
5954 cond_resched();
1cb9dc4b
LS
5955 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5956 if (ret)
5957 return ret;
47ad8475 5958 cond_resched();
1cb9dc4b
LS
5959 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5960 if (ret)
5961 return ret;
47ad8475 5962 }
1cb9dc4b 5963 return 0;
47ad8475
AA
5964}
5965
c6ddfb6c
YH
5966static void clear_gigantic_page(struct page *page,
5967 unsigned long addr,
5968 unsigned int pages_per_huge_page)
5969{
5970 int i;
14455eab 5971 struct page *p;
c6ddfb6c
YH
5972
5973 might_sleep();
14455eab
CL
5974 for (i = 0; i < pages_per_huge_page; i++) {
5975 p = nth_page(page, i);
c6ddfb6c
YH
5976 cond_resched();
5977 clear_user_highpage(p, addr + i * PAGE_SIZE);
5978 }
5979}
5980
1cb9dc4b 5981static int clear_subpage(unsigned long addr, int idx, void *arg)
c6ddfb6c
YH
5982{
5983 struct page *page = arg;
5984
5985 clear_user_highpage(page + idx, addr);
1cb9dc4b 5986 return 0;
c6ddfb6c
YH
5987}
5988
5989void clear_huge_page(struct page *page,
5990 unsigned long addr_hint, unsigned int pages_per_huge_page)
5991{
5992 unsigned long addr = addr_hint &
5993 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5994
5995 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5996 clear_gigantic_page(page, addr, pages_per_huge_page);
5997 return;
5998 }
5999
6000 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
6001}
6002
1cb9dc4b 6003static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
c0e8150e
Z
6004 unsigned long addr,
6005 struct vm_area_struct *vma,
6006 unsigned int pages_per_huge_page)
47ad8475
AA
6007{
6008 int i;
c0e8150e
Z
6009 struct page *dst_page;
6010 struct page *src_page;
47ad8475 6011
14455eab 6012 for (i = 0; i < pages_per_huge_page; i++) {
c0e8150e
Z
6013 dst_page = folio_page(dst, i);
6014 src_page = folio_page(src, i);
14455eab 6015
47ad8475 6016 cond_resched();
1cb9dc4b
LS
6017 if (copy_mc_user_highpage(dst_page, src_page,
6018 addr + i*PAGE_SIZE, vma)) {
6019 memory_failure_queue(page_to_pfn(src_page), 0);
6020 return -EHWPOISON;
6021 }
47ad8475 6022 }
1cb9dc4b 6023 return 0;
47ad8475
AA
6024}
6025
c9f4cd71
YH
6026struct copy_subpage_arg {
6027 struct page *dst;
6028 struct page *src;
6029 struct vm_area_struct *vma;
6030};
6031
1cb9dc4b 6032static int copy_subpage(unsigned long addr, int idx, void *arg)
c9f4cd71
YH
6033{
6034 struct copy_subpage_arg *copy_arg = arg;
6035
1cb9dc4b
LS
6036 if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
6037 addr, copy_arg->vma)) {
6038 memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
6039 return -EHWPOISON;
6040 }
6041 return 0;
c9f4cd71
YH
6042}
6043
1cb9dc4b
LS
6044int copy_user_large_folio(struct folio *dst, struct folio *src,
6045 unsigned long addr_hint, struct vm_area_struct *vma)
47ad8475 6046{
c0e8150e 6047 unsigned int pages_per_huge_page = folio_nr_pages(dst);
c9f4cd71
YH
6048 unsigned long addr = addr_hint &
6049 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
6050 struct copy_subpage_arg arg = {
c0e8150e
Z
6051 .dst = &dst->page,
6052 .src = &src->page,
c9f4cd71
YH
6053 .vma = vma,
6054 };
47ad8475 6055
1cb9dc4b
LS
6056 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
6057 return copy_user_gigantic_page(dst, src, addr, vma,
6058 pages_per_huge_page);
47ad8475 6059
1cb9dc4b 6060 return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
47ad8475 6061}
fa4d75c1 6062
e87340ca
Z
6063long copy_folio_from_user(struct folio *dst_folio,
6064 const void __user *usr_src,
6065 bool allow_pagefault)
fa4d75c1 6066{
e87340ca 6067 void *kaddr;
fa4d75c1 6068 unsigned long i, rc = 0;
e87340ca
Z
6069 unsigned int nr_pages = folio_nr_pages(dst_folio);
6070 unsigned long ret_val = nr_pages * PAGE_SIZE;
14455eab 6071 struct page *subpage;
fa4d75c1 6072
e87340ca
Z
6073 for (i = 0; i < nr_pages; i++) {
6074 subpage = folio_page(dst_folio, i);
6075 kaddr = kmap_local_page(subpage);
0d508c1f
Z
6076 if (!allow_pagefault)
6077 pagefault_disable();
e87340ca 6078 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
0d508c1f
Z
6079 if (!allow_pagefault)
6080 pagefault_enable();
e87340ca 6081 kunmap_local(kaddr);
fa4d75c1
MK
6082
6083 ret_val -= (PAGE_SIZE - rc);
6084 if (rc)
6085 break;
6086
e763243c
MS
6087 flush_dcache_page(subpage);
6088
fa4d75c1
MK
6089 cond_resched();
6090 }
6091 return ret_val;
6092}
47ad8475 6093#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
49076ec2 6094
40b64acd 6095#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
b35f1819
KS
6096
6097static struct kmem_cache *page_ptl_cachep;
6098
6099void __init ptlock_cache_init(void)
6100{
6101 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
6102 SLAB_PANIC, NULL);
6103}
6104
f5ecca06 6105bool ptlock_alloc(struct ptdesc *ptdesc)
49076ec2
KS
6106{
6107 spinlock_t *ptl;
6108
b35f1819 6109 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
49076ec2
KS
6110 if (!ptl)
6111 return false;
f5ecca06 6112 ptdesc->ptl = ptl;
49076ec2
KS
6113 return true;
6114}
6115
6ed1b8a0 6116void ptlock_free(struct ptdesc *ptdesc)
49076ec2 6117{
6ed1b8a0 6118 kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
49076ec2
KS
6119}
6120#endif
This page took 3.116643 seconds and 4 git commands to generate.