]>
Commit | Line | Data |
---|---|---|
4bbd4c77 KS |
1 | #include <linux/kernel.h> |
2 | #include <linux/errno.h> | |
3 | #include <linux/err.h> | |
4 | #include <linux/spinlock.h> | |
5 | ||
4bbd4c77 | 6 | #include <linux/mm.h> |
3565fce3 | 7 | #include <linux/memremap.h> |
4bbd4c77 KS |
8 | #include <linux/pagemap.h> |
9 | #include <linux/rmap.h> | |
10 | #include <linux/swap.h> | |
11 | #include <linux/swapops.h> | |
12 | ||
174cd4b1 | 13 | #include <linux/sched/signal.h> |
2667f50e | 14 | #include <linux/rwsem.h> |
f30c59e9 | 15 | #include <linux/hugetlb.h> |
1027e443 | 16 | |
33a709b2 | 17 | #include <asm/mmu_context.h> |
2667f50e | 18 | #include <asm/pgtable.h> |
1027e443 | 19 | #include <asm/tlbflush.h> |
2667f50e | 20 | |
4bbd4c77 KS |
21 | #include "internal.h" |
22 | ||
69e68b4f KS |
23 | static struct page *no_page_table(struct vm_area_struct *vma, |
24 | unsigned int flags) | |
4bbd4c77 | 25 | { |
69e68b4f KS |
26 | /* |
27 | * When core dumping an enormous anonymous area that nobody | |
28 | * has touched so far, we don't want to allocate unnecessary pages or | |
29 | * page tables. Return error instead of NULL to skip handle_mm_fault, | |
30 | * then get_dump_page() will return NULL to leave a hole in the dump. | |
31 | * But we can only make this optimization where a hole would surely | |
32 | * be zero-filled if handle_mm_fault() actually did handle it. | |
33 | */ | |
34 | if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) | |
35 | return ERR_PTR(-EFAULT); | |
36 | return NULL; | |
37 | } | |
4bbd4c77 | 38 | |
1027e443 KS |
39 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
40 | pte_t *pte, unsigned int flags) | |
41 | { | |
42 | /* No page to get reference */ | |
43 | if (flags & FOLL_GET) | |
44 | return -EFAULT; | |
45 | ||
46 | if (flags & FOLL_TOUCH) { | |
47 | pte_t entry = *pte; | |
48 | ||
49 | if (flags & FOLL_WRITE) | |
50 | entry = pte_mkdirty(entry); | |
51 | entry = pte_mkyoung(entry); | |
52 | ||
53 | if (!pte_same(*pte, entry)) { | |
54 | set_pte_at(vma->vm_mm, address, pte, entry); | |
55 | update_mmu_cache(vma, address, pte); | |
56 | } | |
57 | } | |
58 | ||
59 | /* Proper page table entry exists, but no corresponding struct page */ | |
60 | return -EEXIST; | |
61 | } | |
62 | ||
19be0eaf LT |
63 | /* |
64 | * FOLL_FORCE can write to even unwritable pte's, but only | |
65 | * after we've gone through a COW cycle and they are dirty. | |
66 | */ | |
67 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) | |
68 | { | |
f6f37321 | 69 | return pte_write(pte) || |
19be0eaf LT |
70 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); |
71 | } | |
72 | ||
69e68b4f KS |
73 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
74 | unsigned long address, pmd_t *pmd, unsigned int flags) | |
75 | { | |
76 | struct mm_struct *mm = vma->vm_mm; | |
3565fce3 | 77 | struct dev_pagemap *pgmap = NULL; |
69e68b4f KS |
78 | struct page *page; |
79 | spinlock_t *ptl; | |
80 | pte_t *ptep, pte; | |
4bbd4c77 | 81 | |
69e68b4f | 82 | retry: |
4bbd4c77 | 83 | if (unlikely(pmd_bad(*pmd))) |
69e68b4f | 84 | return no_page_table(vma, flags); |
4bbd4c77 KS |
85 | |
86 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | |
4bbd4c77 KS |
87 | pte = *ptep; |
88 | if (!pte_present(pte)) { | |
89 | swp_entry_t entry; | |
90 | /* | |
91 | * KSM's break_ksm() relies upon recognizing a ksm page | |
92 | * even while it is being migrated, so for that case we | |
93 | * need migration_entry_wait(). | |
94 | */ | |
95 | if (likely(!(flags & FOLL_MIGRATION))) | |
96 | goto no_page; | |
0661a336 | 97 | if (pte_none(pte)) |
4bbd4c77 KS |
98 | goto no_page; |
99 | entry = pte_to_swp_entry(pte); | |
100 | if (!is_migration_entry(entry)) | |
101 | goto no_page; | |
102 | pte_unmap_unlock(ptep, ptl); | |
103 | migration_entry_wait(mm, pmd, address); | |
69e68b4f | 104 | goto retry; |
4bbd4c77 | 105 | } |
8a0516ed | 106 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
4bbd4c77 | 107 | goto no_page; |
19be0eaf | 108 | if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { |
69e68b4f KS |
109 | pte_unmap_unlock(ptep, ptl); |
110 | return NULL; | |
111 | } | |
4bbd4c77 KS |
112 | |
113 | page = vm_normal_page(vma, address, pte); | |
3565fce3 DW |
114 | if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { |
115 | /* | |
116 | * Only return device mapping pages in the FOLL_GET case since | |
117 | * they are only valid while holding the pgmap reference. | |
118 | */ | |
119 | pgmap = get_dev_pagemap(pte_pfn(pte), NULL); | |
120 | if (pgmap) | |
121 | page = pte_page(pte); | |
122 | else | |
123 | goto no_page; | |
124 | } else if (unlikely(!page)) { | |
1027e443 KS |
125 | if (flags & FOLL_DUMP) { |
126 | /* Avoid special (like zero) pages in core dumps */ | |
127 | page = ERR_PTR(-EFAULT); | |
128 | goto out; | |
129 | } | |
130 | ||
131 | if (is_zero_pfn(pte_pfn(pte))) { | |
132 | page = pte_page(pte); | |
133 | } else { | |
134 | int ret; | |
135 | ||
136 | ret = follow_pfn_pte(vma, address, ptep, flags); | |
137 | page = ERR_PTR(ret); | |
138 | goto out; | |
139 | } | |
4bbd4c77 KS |
140 | } |
141 | ||
6742d293 KS |
142 | if (flags & FOLL_SPLIT && PageTransCompound(page)) { |
143 | int ret; | |
144 | get_page(page); | |
145 | pte_unmap_unlock(ptep, ptl); | |
146 | lock_page(page); | |
147 | ret = split_huge_page(page); | |
148 | unlock_page(page); | |
149 | put_page(page); | |
150 | if (ret) | |
151 | return ERR_PTR(ret); | |
152 | goto retry; | |
153 | } | |
154 | ||
3565fce3 | 155 | if (flags & FOLL_GET) { |
ddc58f27 | 156 | get_page(page); |
3565fce3 DW |
157 | |
158 | /* drop the pgmap reference now that we hold the page */ | |
159 | if (pgmap) { | |
160 | put_dev_pagemap(pgmap); | |
161 | pgmap = NULL; | |
162 | } | |
163 | } | |
4bbd4c77 KS |
164 | if (flags & FOLL_TOUCH) { |
165 | if ((flags & FOLL_WRITE) && | |
166 | !pte_dirty(pte) && !PageDirty(page)) | |
167 | set_page_dirty(page); | |
168 | /* | |
169 | * pte_mkyoung() would be more correct here, but atomic care | |
170 | * is needed to avoid losing the dirty bit: it is easier to use | |
171 | * mark_page_accessed(). | |
172 | */ | |
173 | mark_page_accessed(page); | |
174 | } | |
de60f5f1 | 175 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
e90309c9 KS |
176 | /* Do not mlock pte-mapped THP */ |
177 | if (PageTransCompound(page)) | |
178 | goto out; | |
179 | ||
4bbd4c77 KS |
180 | /* |
181 | * The preliminary mapping check is mainly to avoid the | |
182 | * pointless overhead of lock_page on the ZERO_PAGE | |
183 | * which might bounce very badly if there is contention. | |
184 | * | |
185 | * If the page is already locked, we don't need to | |
186 | * handle it now - vmscan will handle it later if and | |
187 | * when it attempts to reclaim the page. | |
188 | */ | |
189 | if (page->mapping && trylock_page(page)) { | |
190 | lru_add_drain(); /* push cached pages to LRU */ | |
191 | /* | |
192 | * Because we lock page here, and migration is | |
193 | * blocked by the pte's page reference, and we | |
194 | * know the page is still mapped, we don't even | |
195 | * need to check for file-cache page truncation. | |
196 | */ | |
197 | mlock_vma_page(page); | |
198 | unlock_page(page); | |
199 | } | |
200 | } | |
1027e443 | 201 | out: |
4bbd4c77 | 202 | pte_unmap_unlock(ptep, ptl); |
4bbd4c77 | 203 | return page; |
4bbd4c77 KS |
204 | no_page: |
205 | pte_unmap_unlock(ptep, ptl); | |
206 | if (!pte_none(pte)) | |
69e68b4f KS |
207 | return NULL; |
208 | return no_page_table(vma, flags); | |
209 | } | |
210 | ||
080dbb61 AK |
211 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
212 | unsigned long address, pud_t *pudp, | |
213 | unsigned int flags, unsigned int *page_mask) | |
69e68b4f | 214 | { |
69e68b4f KS |
215 | pmd_t *pmd; |
216 | spinlock_t *ptl; | |
217 | struct page *page; | |
218 | struct mm_struct *mm = vma->vm_mm; | |
219 | ||
080dbb61 | 220 | pmd = pmd_offset(pudp, address); |
69e68b4f KS |
221 | if (pmd_none(*pmd)) |
222 | return no_page_table(vma, flags); | |
223 | if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { | |
e66f17ff NH |
224 | page = follow_huge_pmd(mm, address, pmd, flags); |
225 | if (page) | |
226 | return page; | |
227 | return no_page_table(vma, flags); | |
69e68b4f | 228 | } |
4dc71451 AK |
229 | if (is_hugepd(__hugepd(pmd_val(*pmd)))) { |
230 | page = follow_huge_pd(vma, address, | |
231 | __hugepd(pmd_val(*pmd)), flags, | |
232 | PMD_SHIFT); | |
233 | if (page) | |
234 | return page; | |
235 | return no_page_table(vma, flags); | |
236 | } | |
84c3fc4e ZY |
237 | retry: |
238 | if (!pmd_present(*pmd)) { | |
239 | if (likely(!(flags & FOLL_MIGRATION))) | |
240 | return no_page_table(vma, flags); | |
241 | VM_BUG_ON(thp_migration_supported() && | |
242 | !is_pmd_migration_entry(*pmd)); | |
243 | if (is_pmd_migration_entry(*pmd)) | |
244 | pmd_migration_entry_wait(mm, pmd); | |
245 | goto retry; | |
246 | } | |
3565fce3 DW |
247 | if (pmd_devmap(*pmd)) { |
248 | ptl = pmd_lock(mm, pmd); | |
249 | page = follow_devmap_pmd(vma, address, pmd, flags); | |
250 | spin_unlock(ptl); | |
251 | if (page) | |
252 | return page; | |
253 | } | |
6742d293 KS |
254 | if (likely(!pmd_trans_huge(*pmd))) |
255 | return follow_page_pte(vma, address, pmd, flags); | |
256 | ||
db08f203 AK |
257 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
258 | return no_page_table(vma, flags); | |
259 | ||
84c3fc4e | 260 | retry_locked: |
6742d293 | 261 | ptl = pmd_lock(mm, pmd); |
84c3fc4e ZY |
262 | if (unlikely(!pmd_present(*pmd))) { |
263 | spin_unlock(ptl); | |
264 | if (likely(!(flags & FOLL_MIGRATION))) | |
265 | return no_page_table(vma, flags); | |
266 | pmd_migration_entry_wait(mm, pmd); | |
267 | goto retry_locked; | |
268 | } | |
6742d293 KS |
269 | if (unlikely(!pmd_trans_huge(*pmd))) { |
270 | spin_unlock(ptl); | |
271 | return follow_page_pte(vma, address, pmd, flags); | |
272 | } | |
6742d293 KS |
273 | if (flags & FOLL_SPLIT) { |
274 | int ret; | |
275 | page = pmd_page(*pmd); | |
276 | if (is_huge_zero_page(page)) { | |
277 | spin_unlock(ptl); | |
278 | ret = 0; | |
78ddc534 | 279 | split_huge_pmd(vma, pmd, address); |
337d9abf NH |
280 | if (pmd_trans_unstable(pmd)) |
281 | ret = -EBUSY; | |
6742d293 KS |
282 | } else { |
283 | get_page(page); | |
69e68b4f | 284 | spin_unlock(ptl); |
6742d293 KS |
285 | lock_page(page); |
286 | ret = split_huge_page(page); | |
287 | unlock_page(page); | |
288 | put_page(page); | |
baa355fd KS |
289 | if (pmd_none(*pmd)) |
290 | return no_page_table(vma, flags); | |
6742d293 KS |
291 | } |
292 | ||
293 | return ret ? ERR_PTR(ret) : | |
294 | follow_page_pte(vma, address, pmd, flags); | |
69e68b4f | 295 | } |
6742d293 KS |
296 | page = follow_trans_huge_pmd(vma, address, pmd, flags); |
297 | spin_unlock(ptl); | |
298 | *page_mask = HPAGE_PMD_NR - 1; | |
299 | return page; | |
4bbd4c77 KS |
300 | } |
301 | ||
080dbb61 AK |
302 | |
303 | static struct page *follow_pud_mask(struct vm_area_struct *vma, | |
304 | unsigned long address, p4d_t *p4dp, | |
305 | unsigned int flags, unsigned int *page_mask) | |
306 | { | |
307 | pud_t *pud; | |
308 | spinlock_t *ptl; | |
309 | struct page *page; | |
310 | struct mm_struct *mm = vma->vm_mm; | |
311 | ||
312 | pud = pud_offset(p4dp, address); | |
313 | if (pud_none(*pud)) | |
314 | return no_page_table(vma, flags); | |
315 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { | |
316 | page = follow_huge_pud(mm, address, pud, flags); | |
317 | if (page) | |
318 | return page; | |
319 | return no_page_table(vma, flags); | |
320 | } | |
4dc71451 AK |
321 | if (is_hugepd(__hugepd(pud_val(*pud)))) { |
322 | page = follow_huge_pd(vma, address, | |
323 | __hugepd(pud_val(*pud)), flags, | |
324 | PUD_SHIFT); | |
325 | if (page) | |
326 | return page; | |
327 | return no_page_table(vma, flags); | |
328 | } | |
080dbb61 AK |
329 | if (pud_devmap(*pud)) { |
330 | ptl = pud_lock(mm, pud); | |
331 | page = follow_devmap_pud(vma, address, pud, flags); | |
332 | spin_unlock(ptl); | |
333 | if (page) | |
334 | return page; | |
335 | } | |
336 | if (unlikely(pud_bad(*pud))) | |
337 | return no_page_table(vma, flags); | |
338 | ||
339 | return follow_pmd_mask(vma, address, pud, flags, page_mask); | |
340 | } | |
341 | ||
342 | ||
343 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, | |
344 | unsigned long address, pgd_t *pgdp, | |
345 | unsigned int flags, unsigned int *page_mask) | |
346 | { | |
347 | p4d_t *p4d; | |
4dc71451 | 348 | struct page *page; |
080dbb61 AK |
349 | |
350 | p4d = p4d_offset(pgdp, address); | |
351 | if (p4d_none(*p4d)) | |
352 | return no_page_table(vma, flags); | |
353 | BUILD_BUG_ON(p4d_huge(*p4d)); | |
354 | if (unlikely(p4d_bad(*p4d))) | |
355 | return no_page_table(vma, flags); | |
356 | ||
4dc71451 AK |
357 | if (is_hugepd(__hugepd(p4d_val(*p4d)))) { |
358 | page = follow_huge_pd(vma, address, | |
359 | __hugepd(p4d_val(*p4d)), flags, | |
360 | P4D_SHIFT); | |
361 | if (page) | |
362 | return page; | |
363 | return no_page_table(vma, flags); | |
364 | } | |
080dbb61 AK |
365 | return follow_pud_mask(vma, address, p4d, flags, page_mask); |
366 | } | |
367 | ||
368 | /** | |
369 | * follow_page_mask - look up a page descriptor from a user-virtual address | |
370 | * @vma: vm_area_struct mapping @address | |
371 | * @address: virtual address to look up | |
372 | * @flags: flags modifying lookup behaviour | |
373 | * @page_mask: on output, *page_mask is set according to the size of the page | |
374 | * | |
375 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | |
376 | * | |
377 | * Returns the mapped (struct page *), %NULL if no mapping exists, or | |
378 | * an error pointer if there is a mapping to something not represented | |
379 | * by a page descriptor (see also vm_normal_page()). | |
380 | */ | |
381 | struct page *follow_page_mask(struct vm_area_struct *vma, | |
382 | unsigned long address, unsigned int flags, | |
383 | unsigned int *page_mask) | |
384 | { | |
385 | pgd_t *pgd; | |
386 | struct page *page; | |
387 | struct mm_struct *mm = vma->vm_mm; | |
388 | ||
389 | *page_mask = 0; | |
390 | ||
391 | /* make this handle hugepd */ | |
392 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | |
393 | if (!IS_ERR(page)) { | |
394 | BUG_ON(flags & FOLL_GET); | |
395 | return page; | |
396 | } | |
397 | ||
398 | pgd = pgd_offset(mm, address); | |
399 | ||
400 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | |
401 | return no_page_table(vma, flags); | |
402 | ||
faaa5b62 AK |
403 | if (pgd_huge(*pgd)) { |
404 | page = follow_huge_pgd(mm, address, pgd, flags); | |
405 | if (page) | |
406 | return page; | |
407 | return no_page_table(vma, flags); | |
408 | } | |
4dc71451 AK |
409 | if (is_hugepd(__hugepd(pgd_val(*pgd)))) { |
410 | page = follow_huge_pd(vma, address, | |
411 | __hugepd(pgd_val(*pgd)), flags, | |
412 | PGDIR_SHIFT); | |
413 | if (page) | |
414 | return page; | |
415 | return no_page_table(vma, flags); | |
416 | } | |
faaa5b62 | 417 | |
080dbb61 AK |
418 | return follow_p4d_mask(vma, address, pgd, flags, page_mask); |
419 | } | |
420 | ||
f2b495ca KS |
421 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
422 | unsigned int gup_flags, struct vm_area_struct **vma, | |
423 | struct page **page) | |
424 | { | |
425 | pgd_t *pgd; | |
c2febafc | 426 | p4d_t *p4d; |
f2b495ca KS |
427 | pud_t *pud; |
428 | pmd_t *pmd; | |
429 | pte_t *pte; | |
430 | int ret = -EFAULT; | |
431 | ||
432 | /* user gate pages are read-only */ | |
433 | if (gup_flags & FOLL_WRITE) | |
434 | return -EFAULT; | |
435 | if (address > TASK_SIZE) | |
436 | pgd = pgd_offset_k(address); | |
437 | else | |
438 | pgd = pgd_offset_gate(mm, address); | |
439 | BUG_ON(pgd_none(*pgd)); | |
c2febafc KS |
440 | p4d = p4d_offset(pgd, address); |
441 | BUG_ON(p4d_none(*p4d)); | |
442 | pud = pud_offset(p4d, address); | |
f2b495ca KS |
443 | BUG_ON(pud_none(*pud)); |
444 | pmd = pmd_offset(pud, address); | |
84c3fc4e | 445 | if (!pmd_present(*pmd)) |
f2b495ca KS |
446 | return -EFAULT; |
447 | VM_BUG_ON(pmd_trans_huge(*pmd)); | |
448 | pte = pte_offset_map(pmd, address); | |
449 | if (pte_none(*pte)) | |
450 | goto unmap; | |
451 | *vma = get_gate_vma(mm); | |
452 | if (!page) | |
453 | goto out; | |
454 | *page = vm_normal_page(*vma, address, *pte); | |
455 | if (!*page) { | |
456 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) | |
457 | goto unmap; | |
458 | *page = pte_page(*pte); | |
df6ad698 JG |
459 | |
460 | /* | |
461 | * This should never happen (a device public page in the gate | |
462 | * area). | |
463 | */ | |
464 | if (is_device_public_page(*page)) | |
465 | goto unmap; | |
f2b495ca KS |
466 | } |
467 | get_page(*page); | |
468 | out: | |
469 | ret = 0; | |
470 | unmap: | |
471 | pte_unmap(pte); | |
472 | return ret; | |
473 | } | |
474 | ||
9a95f3cf PC |
475 | /* |
476 | * mmap_sem must be held on entry. If @nonblocking != NULL and | |
477 | * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. | |
478 | * If it is, *@nonblocking will be set to 0 and -EBUSY returned. | |
479 | */ | |
16744483 KS |
480 | static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, |
481 | unsigned long address, unsigned int *flags, int *nonblocking) | |
482 | { | |
16744483 KS |
483 | unsigned int fault_flags = 0; |
484 | int ret; | |
485 | ||
de60f5f1 EM |
486 | /* mlock all present pages, but do not fault in new pages */ |
487 | if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) | |
488 | return -ENOENT; | |
16744483 KS |
489 | if (*flags & FOLL_WRITE) |
490 | fault_flags |= FAULT_FLAG_WRITE; | |
1b2ee126 DH |
491 | if (*flags & FOLL_REMOTE) |
492 | fault_flags |= FAULT_FLAG_REMOTE; | |
16744483 KS |
493 | if (nonblocking) |
494 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | |
495 | if (*flags & FOLL_NOWAIT) | |
496 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; | |
234b239b ALC |
497 | if (*flags & FOLL_TRIED) { |
498 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); | |
499 | fault_flags |= FAULT_FLAG_TRIED; | |
500 | } | |
16744483 | 501 | |
dcddffd4 | 502 | ret = handle_mm_fault(vma, address, fault_flags); |
16744483 | 503 | if (ret & VM_FAULT_ERROR) { |
9a291a7c JM |
504 | int err = vm_fault_to_errno(ret, *flags); |
505 | ||
506 | if (err) | |
507 | return err; | |
16744483 KS |
508 | BUG(); |
509 | } | |
510 | ||
511 | if (tsk) { | |
512 | if (ret & VM_FAULT_MAJOR) | |
513 | tsk->maj_flt++; | |
514 | else | |
515 | tsk->min_flt++; | |
516 | } | |
517 | ||
518 | if (ret & VM_FAULT_RETRY) { | |
96312e61 | 519 | if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
16744483 KS |
520 | *nonblocking = 0; |
521 | return -EBUSY; | |
522 | } | |
523 | ||
524 | /* | |
525 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when | |
526 | * necessary, even if maybe_mkwrite decided not to set pte_write. We | |
527 | * can thus safely do subsequent page lookups as if they were reads. | |
528 | * But only do so when looping for pte_write is futile: in some cases | |
529 | * userspace may also be wanting to write to the gotten user page, | |
530 | * which a read fault here might prevent (a readonly page might get | |
531 | * reCOWed by userspace write). | |
532 | */ | |
533 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) | |
19be0eaf | 534 | *flags |= FOLL_COW; |
16744483 KS |
535 | return 0; |
536 | } | |
537 | ||
fa5bb209 KS |
538 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
539 | { | |
540 | vm_flags_t vm_flags = vma->vm_flags; | |
1b2ee126 DH |
541 | int write = (gup_flags & FOLL_WRITE); |
542 | int foreign = (gup_flags & FOLL_REMOTE); | |
fa5bb209 KS |
543 | |
544 | if (vm_flags & (VM_IO | VM_PFNMAP)) | |
545 | return -EFAULT; | |
546 | ||
1b2ee126 | 547 | if (write) { |
fa5bb209 KS |
548 | if (!(vm_flags & VM_WRITE)) { |
549 | if (!(gup_flags & FOLL_FORCE)) | |
550 | return -EFAULT; | |
551 | /* | |
552 | * We used to let the write,force case do COW in a | |
553 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could | |
554 | * set a breakpoint in a read-only mapping of an | |
555 | * executable, without corrupting the file (yet only | |
556 | * when that file had been opened for writing!). | |
557 | * Anon pages in shared mappings are surprising: now | |
558 | * just reject it. | |
559 | */ | |
46435364 | 560 | if (!is_cow_mapping(vm_flags)) |
fa5bb209 | 561 | return -EFAULT; |
fa5bb209 KS |
562 | } |
563 | } else if (!(vm_flags & VM_READ)) { | |
564 | if (!(gup_flags & FOLL_FORCE)) | |
565 | return -EFAULT; | |
566 | /* | |
567 | * Is there actually any vma we can reach here which does not | |
568 | * have VM_MAYREAD set? | |
569 | */ | |
570 | if (!(vm_flags & VM_MAYREAD)) | |
571 | return -EFAULT; | |
572 | } | |
d61172b4 DH |
573 | /* |
574 | * gups are always data accesses, not instruction | |
575 | * fetches, so execute=false here | |
576 | */ | |
577 | if (!arch_vma_access_permitted(vma, write, false, foreign)) | |
33a709b2 | 578 | return -EFAULT; |
fa5bb209 KS |
579 | return 0; |
580 | } | |
581 | ||
4bbd4c77 KS |
582 | /** |
583 | * __get_user_pages() - pin user pages in memory | |
584 | * @tsk: task_struct of target task | |
585 | * @mm: mm_struct of target mm | |
586 | * @start: starting user address | |
587 | * @nr_pages: number of pages from start to pin | |
588 | * @gup_flags: flags modifying pin behaviour | |
589 | * @pages: array that receives pointers to the pages pinned. | |
590 | * Should be at least nr_pages long. Or NULL, if caller | |
591 | * only intends to ensure the pages are faulted in. | |
592 | * @vmas: array of pointers to vmas corresponding to each page. | |
593 | * Or NULL if the caller does not require them. | |
594 | * @nonblocking: whether waiting for disk IO or mmap_sem contention | |
595 | * | |
596 | * Returns number of pages pinned. This may be fewer than the number | |
597 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
598 | * were pinned, returns -errno. Each page returned must be released | |
599 | * with a put_page() call when it is finished with. vmas will only | |
600 | * remain valid while mmap_sem is held. | |
601 | * | |
9a95f3cf | 602 | * Must be called with mmap_sem held. It may be released. See below. |
4bbd4c77 KS |
603 | * |
604 | * __get_user_pages walks a process's page tables and takes a reference to | |
605 | * each struct page that each user address corresponds to at a given | |
606 | * instant. That is, it takes the page that would be accessed if a user | |
607 | * thread accesses the given user virtual address at that instant. | |
608 | * | |
609 | * This does not guarantee that the page exists in the user mappings when | |
610 | * __get_user_pages returns, and there may even be a completely different | |
611 | * page there in some cases (eg. if mmapped pagecache has been invalidated | |
612 | * and subsequently re faulted). However it does guarantee that the page | |
613 | * won't be freed completely. And mostly callers simply care that the page | |
614 | * contains data that was valid *at some point in time*. Typically, an IO | |
615 | * or similar operation cannot guarantee anything stronger anyway because | |
616 | * locks can't be held over the syscall boundary. | |
617 | * | |
618 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | |
619 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | |
620 | * appropriate) must be called after the page is finished with, and | |
621 | * before put_page is called. | |
622 | * | |
623 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO | |
624 | * or mmap_sem contention, and if waiting is needed to pin all pages, | |
9a95f3cf PC |
625 | * *@nonblocking will be set to 0. Further, if @gup_flags does not |
626 | * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in | |
627 | * this case. | |
628 | * | |
629 | * A caller using such a combination of @nonblocking and @gup_flags | |
630 | * must therefore hold the mmap_sem for reading only, and recognize | |
631 | * when it's been released. Otherwise, it must be held for either | |
632 | * reading or writing and will not be released. | |
4bbd4c77 KS |
633 | * |
634 | * In most cases, get_user_pages or get_user_pages_fast should be used | |
635 | * instead of __get_user_pages. __get_user_pages should be used only if | |
636 | * you need some special @gup_flags. | |
637 | */ | |
0d731759 | 638 | static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
4bbd4c77 KS |
639 | unsigned long start, unsigned long nr_pages, |
640 | unsigned int gup_flags, struct page **pages, | |
641 | struct vm_area_struct **vmas, int *nonblocking) | |
642 | { | |
fa5bb209 | 643 | long i = 0; |
4bbd4c77 | 644 | unsigned int page_mask; |
fa5bb209 | 645 | struct vm_area_struct *vma = NULL; |
4bbd4c77 KS |
646 | |
647 | if (!nr_pages) | |
648 | return 0; | |
649 | ||
650 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | |
651 | ||
652 | /* | |
653 | * If FOLL_FORCE is set then do not force a full fault as the hinting | |
654 | * fault information is unrelated to the reference behaviour of a task | |
655 | * using the address space | |
656 | */ | |
657 | if (!(gup_flags & FOLL_FORCE)) | |
658 | gup_flags |= FOLL_NUMA; | |
659 | ||
4bbd4c77 | 660 | do { |
fa5bb209 KS |
661 | struct page *page; |
662 | unsigned int foll_flags = gup_flags; | |
663 | unsigned int page_increm; | |
664 | ||
665 | /* first iteration or cross vma bound */ | |
666 | if (!vma || start >= vma->vm_end) { | |
667 | vma = find_extend_vma(mm, start); | |
668 | if (!vma && in_gate_area(mm, start)) { | |
669 | int ret; | |
670 | ret = get_gate_page(mm, start & PAGE_MASK, | |
671 | gup_flags, &vma, | |
672 | pages ? &pages[i] : NULL); | |
673 | if (ret) | |
674 | return i ? : ret; | |
675 | page_mask = 0; | |
676 | goto next_page; | |
677 | } | |
4bbd4c77 | 678 | |
fa5bb209 KS |
679 | if (!vma || check_vma_flags(vma, gup_flags)) |
680 | return i ? : -EFAULT; | |
681 | if (is_vm_hugetlb_page(vma)) { | |
682 | i = follow_hugetlb_page(mm, vma, pages, vmas, | |
683 | &start, &nr_pages, i, | |
87ffc118 | 684 | gup_flags, nonblocking); |
fa5bb209 | 685 | continue; |
4bbd4c77 | 686 | } |
fa5bb209 KS |
687 | } |
688 | retry: | |
689 | /* | |
690 | * If we have a pending SIGKILL, don't keep faulting pages and | |
691 | * potentially allocating memory. | |
692 | */ | |
693 | if (unlikely(fatal_signal_pending(current))) | |
694 | return i ? i : -ERESTARTSYS; | |
695 | cond_resched(); | |
696 | page = follow_page_mask(vma, start, foll_flags, &page_mask); | |
697 | if (!page) { | |
698 | int ret; | |
699 | ret = faultin_page(tsk, vma, start, &foll_flags, | |
700 | nonblocking); | |
701 | switch (ret) { | |
702 | case 0: | |
703 | goto retry; | |
704 | case -EFAULT: | |
705 | case -ENOMEM: | |
706 | case -EHWPOISON: | |
707 | return i ? i : ret; | |
708 | case -EBUSY: | |
709 | return i; | |
710 | case -ENOENT: | |
711 | goto next_page; | |
4bbd4c77 | 712 | } |
fa5bb209 | 713 | BUG(); |
1027e443 KS |
714 | } else if (PTR_ERR(page) == -EEXIST) { |
715 | /* | |
716 | * Proper page table entry exists, but no corresponding | |
717 | * struct page. | |
718 | */ | |
719 | goto next_page; | |
720 | } else if (IS_ERR(page)) { | |
fa5bb209 | 721 | return i ? i : PTR_ERR(page); |
1027e443 | 722 | } |
fa5bb209 KS |
723 | if (pages) { |
724 | pages[i] = page; | |
725 | flush_anon_page(vma, page, start); | |
726 | flush_dcache_page(page); | |
727 | page_mask = 0; | |
4bbd4c77 | 728 | } |
4bbd4c77 | 729 | next_page: |
fa5bb209 KS |
730 | if (vmas) { |
731 | vmas[i] = vma; | |
732 | page_mask = 0; | |
733 | } | |
734 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | |
735 | if (page_increm > nr_pages) | |
736 | page_increm = nr_pages; | |
737 | i += page_increm; | |
738 | start += page_increm * PAGE_SIZE; | |
739 | nr_pages -= page_increm; | |
4bbd4c77 KS |
740 | } while (nr_pages); |
741 | return i; | |
4bbd4c77 | 742 | } |
4bbd4c77 | 743 | |
771ab430 TK |
744 | static bool vma_permits_fault(struct vm_area_struct *vma, |
745 | unsigned int fault_flags) | |
d4925e00 | 746 | { |
1b2ee126 DH |
747 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
748 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); | |
33a709b2 | 749 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
d4925e00 DH |
750 | |
751 | if (!(vm_flags & vma->vm_flags)) | |
752 | return false; | |
753 | ||
33a709b2 DH |
754 | /* |
755 | * The architecture might have a hardware protection | |
1b2ee126 | 756 | * mechanism other than read/write that can deny access. |
d61172b4 DH |
757 | * |
758 | * gup always represents data access, not instruction | |
759 | * fetches, so execute=false here: | |
33a709b2 | 760 | */ |
d61172b4 | 761 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
33a709b2 DH |
762 | return false; |
763 | ||
d4925e00 DH |
764 | return true; |
765 | } | |
766 | ||
4bbd4c77 KS |
767 | /* |
768 | * fixup_user_fault() - manually resolve a user page fault | |
769 | * @tsk: the task_struct to use for page fault accounting, or | |
770 | * NULL if faults are not to be recorded. | |
771 | * @mm: mm_struct of target mm | |
772 | * @address: user address | |
773 | * @fault_flags:flags to pass down to handle_mm_fault() | |
4a9e1cda DD |
774 | * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller |
775 | * does not allow retry | |
4bbd4c77 KS |
776 | * |
777 | * This is meant to be called in the specific scenario where for locking reasons | |
778 | * we try to access user memory in atomic context (within a pagefault_disable() | |
779 | * section), this returns -EFAULT, and we want to resolve the user fault before | |
780 | * trying again. | |
781 | * | |
782 | * Typically this is meant to be used by the futex code. | |
783 | * | |
784 | * The main difference with get_user_pages() is that this function will | |
785 | * unconditionally call handle_mm_fault() which will in turn perform all the | |
786 | * necessary SW fixup of the dirty and young bits in the PTE, while | |
4a9e1cda | 787 | * get_user_pages() only guarantees to update these in the struct page. |
4bbd4c77 KS |
788 | * |
789 | * This is important for some architectures where those bits also gate the | |
790 | * access permission to the page because they are maintained in software. On | |
791 | * such architectures, gup() will not be enough to make a subsequent access | |
792 | * succeed. | |
793 | * | |
4a9e1cda DD |
794 | * This function will not return with an unlocked mmap_sem. So it has not the |
795 | * same semantics wrt the @mm->mmap_sem as does filemap_fault(). | |
4bbd4c77 KS |
796 | */ |
797 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | |
4a9e1cda DD |
798 | unsigned long address, unsigned int fault_flags, |
799 | bool *unlocked) | |
4bbd4c77 KS |
800 | { |
801 | struct vm_area_struct *vma; | |
4a9e1cda DD |
802 | int ret, major = 0; |
803 | ||
804 | if (unlocked) | |
805 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | |
4bbd4c77 | 806 | |
4a9e1cda | 807 | retry: |
4bbd4c77 KS |
808 | vma = find_extend_vma(mm, address); |
809 | if (!vma || address < vma->vm_start) | |
810 | return -EFAULT; | |
811 | ||
d4925e00 | 812 | if (!vma_permits_fault(vma, fault_flags)) |
4bbd4c77 KS |
813 | return -EFAULT; |
814 | ||
dcddffd4 | 815 | ret = handle_mm_fault(vma, address, fault_flags); |
4a9e1cda | 816 | major |= ret & VM_FAULT_MAJOR; |
4bbd4c77 | 817 | if (ret & VM_FAULT_ERROR) { |
9a291a7c JM |
818 | int err = vm_fault_to_errno(ret, 0); |
819 | ||
820 | if (err) | |
821 | return err; | |
4bbd4c77 KS |
822 | BUG(); |
823 | } | |
4a9e1cda DD |
824 | |
825 | if (ret & VM_FAULT_RETRY) { | |
826 | down_read(&mm->mmap_sem); | |
827 | if (!(fault_flags & FAULT_FLAG_TRIED)) { | |
828 | *unlocked = true; | |
829 | fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; | |
830 | fault_flags |= FAULT_FLAG_TRIED; | |
831 | goto retry; | |
832 | } | |
833 | } | |
834 | ||
4bbd4c77 | 835 | if (tsk) { |
4a9e1cda | 836 | if (major) |
4bbd4c77 KS |
837 | tsk->maj_flt++; |
838 | else | |
839 | tsk->min_flt++; | |
840 | } | |
841 | return 0; | |
842 | } | |
add6a0cd | 843 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
4bbd4c77 | 844 | |
f0818f47 AA |
845 | static __always_inline long __get_user_pages_locked(struct task_struct *tsk, |
846 | struct mm_struct *mm, | |
847 | unsigned long start, | |
848 | unsigned long nr_pages, | |
f0818f47 AA |
849 | struct page **pages, |
850 | struct vm_area_struct **vmas, | |
e716712f | 851 | int *locked, |
0fd71a56 | 852 | unsigned int flags) |
f0818f47 | 853 | { |
f0818f47 AA |
854 | long ret, pages_done; |
855 | bool lock_dropped; | |
856 | ||
857 | if (locked) { | |
858 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ | |
859 | BUG_ON(vmas); | |
860 | /* check caller initialized locked */ | |
861 | BUG_ON(*locked != 1); | |
862 | } | |
863 | ||
864 | if (pages) | |
865 | flags |= FOLL_GET; | |
f0818f47 AA |
866 | |
867 | pages_done = 0; | |
868 | lock_dropped = false; | |
869 | for (;;) { | |
870 | ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, | |
871 | vmas, locked); | |
872 | if (!locked) | |
873 | /* VM_FAULT_RETRY couldn't trigger, bypass */ | |
874 | return ret; | |
875 | ||
876 | /* VM_FAULT_RETRY cannot return errors */ | |
877 | if (!*locked) { | |
878 | BUG_ON(ret < 0); | |
879 | BUG_ON(ret >= nr_pages); | |
880 | } | |
881 | ||
882 | if (!pages) | |
883 | /* If it's a prefault don't insist harder */ | |
884 | return ret; | |
885 | ||
886 | if (ret > 0) { | |
887 | nr_pages -= ret; | |
888 | pages_done += ret; | |
889 | if (!nr_pages) | |
890 | break; | |
891 | } | |
892 | if (*locked) { | |
96312e61 AA |
893 | /* |
894 | * VM_FAULT_RETRY didn't trigger or it was a | |
895 | * FOLL_NOWAIT. | |
896 | */ | |
f0818f47 AA |
897 | if (!pages_done) |
898 | pages_done = ret; | |
899 | break; | |
900 | } | |
901 | /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ | |
902 | pages += ret; | |
903 | start += ret << PAGE_SHIFT; | |
904 | ||
905 | /* | |
906 | * Repeat on the address that fired VM_FAULT_RETRY | |
907 | * without FAULT_FLAG_ALLOW_RETRY but with | |
908 | * FAULT_FLAG_TRIED. | |
909 | */ | |
910 | *locked = 1; | |
911 | lock_dropped = true; | |
912 | down_read(&mm->mmap_sem); | |
913 | ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, | |
914 | pages, NULL, NULL); | |
915 | if (ret != 1) { | |
916 | BUG_ON(ret > 1); | |
917 | if (!pages_done) | |
918 | pages_done = ret; | |
919 | break; | |
920 | } | |
921 | nr_pages--; | |
922 | pages_done++; | |
923 | if (!nr_pages) | |
924 | break; | |
925 | pages++; | |
926 | start += PAGE_SIZE; | |
927 | } | |
e716712f | 928 | if (lock_dropped && *locked) { |
f0818f47 AA |
929 | /* |
930 | * We must let the caller know we temporarily dropped the lock | |
931 | * and so the critical section protected by it was lost. | |
932 | */ | |
933 | up_read(&mm->mmap_sem); | |
934 | *locked = 0; | |
935 | } | |
936 | return pages_done; | |
937 | } | |
938 | ||
939 | /* | |
940 | * We can leverage the VM_FAULT_RETRY functionality in the page fault | |
941 | * paths better by using either get_user_pages_locked() or | |
942 | * get_user_pages_unlocked(). | |
943 | * | |
944 | * get_user_pages_locked() is suitable to replace the form: | |
945 | * | |
946 | * down_read(&mm->mmap_sem); | |
947 | * do_something() | |
948 | * get_user_pages(tsk, mm, ..., pages, NULL); | |
949 | * up_read(&mm->mmap_sem); | |
950 | * | |
951 | * to: | |
952 | * | |
953 | * int locked = 1; | |
954 | * down_read(&mm->mmap_sem); | |
955 | * do_something() | |
956 | * get_user_pages_locked(tsk, mm, ..., pages, &locked); | |
957 | * if (locked) | |
958 | * up_read(&mm->mmap_sem); | |
959 | */ | |
c12d2da5 | 960 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
3b913179 | 961 | unsigned int gup_flags, struct page **pages, |
f0818f47 AA |
962 | int *locked) |
963 | { | |
cde70140 | 964 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
e716712f | 965 | pages, NULL, locked, |
3b913179 | 966 | gup_flags | FOLL_TOUCH); |
f0818f47 | 967 | } |
c12d2da5 | 968 | EXPORT_SYMBOL(get_user_pages_locked); |
f0818f47 AA |
969 | |
970 | /* | |
971 | * get_user_pages_unlocked() is suitable to replace the form: | |
972 | * | |
973 | * down_read(&mm->mmap_sem); | |
974 | * get_user_pages(tsk, mm, ..., pages, NULL); | |
975 | * up_read(&mm->mmap_sem); | |
976 | * | |
977 | * with: | |
978 | * | |
979 | * get_user_pages_unlocked(tsk, mm, ..., pages); | |
980 | * | |
981 | * It is functionally equivalent to get_user_pages_fast so | |
80a79516 LS |
982 | * get_user_pages_fast should be used instead if specific gup_flags |
983 | * (e.g. FOLL_FORCE) are not required. | |
f0818f47 | 984 | */ |
c12d2da5 | 985 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
c164154f | 986 | struct page **pages, unsigned int gup_flags) |
f0818f47 | 987 | { |
c803c9c6 AV |
988 | struct mm_struct *mm = current->mm; |
989 | int locked = 1; | |
990 | long ret; | |
991 | ||
992 | down_read(&mm->mmap_sem); | |
993 | ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, | |
e716712f | 994 | &locked, gup_flags | FOLL_TOUCH); |
c803c9c6 AV |
995 | if (locked) |
996 | up_read(&mm->mmap_sem); | |
997 | return ret; | |
f0818f47 | 998 | } |
c12d2da5 | 999 | EXPORT_SYMBOL(get_user_pages_unlocked); |
f0818f47 | 1000 | |
4bbd4c77 | 1001 | /* |
1e987790 | 1002 | * get_user_pages_remote() - pin user pages in memory |
4bbd4c77 KS |
1003 | * @tsk: the task_struct to use for page fault accounting, or |
1004 | * NULL if faults are not to be recorded. | |
1005 | * @mm: mm_struct of target mm | |
1006 | * @start: starting user address | |
1007 | * @nr_pages: number of pages from start to pin | |
9beae1ea | 1008 | * @gup_flags: flags modifying lookup behaviour |
4bbd4c77 KS |
1009 | * @pages: array that receives pointers to the pages pinned. |
1010 | * Should be at least nr_pages long. Or NULL, if caller | |
1011 | * only intends to ensure the pages are faulted in. | |
1012 | * @vmas: array of pointers to vmas corresponding to each page. | |
1013 | * Or NULL if the caller does not require them. | |
5b56d49f LS |
1014 | * @locked: pointer to lock flag indicating whether lock is held and |
1015 | * subsequently whether VM_FAULT_RETRY functionality can be | |
1016 | * utilised. Lock must initially be held. | |
4bbd4c77 KS |
1017 | * |
1018 | * Returns number of pages pinned. This may be fewer than the number | |
1019 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
1020 | * were pinned, returns -errno. Each page returned must be released | |
1021 | * with a put_page() call when it is finished with. vmas will only | |
1022 | * remain valid while mmap_sem is held. | |
1023 | * | |
1024 | * Must be called with mmap_sem held for read or write. | |
1025 | * | |
1026 | * get_user_pages walks a process's page tables and takes a reference to | |
1027 | * each struct page that each user address corresponds to at a given | |
1028 | * instant. That is, it takes the page that would be accessed if a user | |
1029 | * thread accesses the given user virtual address at that instant. | |
1030 | * | |
1031 | * This does not guarantee that the page exists in the user mappings when | |
1032 | * get_user_pages returns, and there may even be a completely different | |
1033 | * page there in some cases (eg. if mmapped pagecache has been invalidated | |
1034 | * and subsequently re faulted). However it does guarantee that the page | |
1035 | * won't be freed completely. And mostly callers simply care that the page | |
1036 | * contains data that was valid *at some point in time*. Typically, an IO | |
1037 | * or similar operation cannot guarantee anything stronger anyway because | |
1038 | * locks can't be held over the syscall boundary. | |
1039 | * | |
9beae1ea LS |
1040 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
1041 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must | |
1042 | * be called after the page is finished with, and before put_page is called. | |
4bbd4c77 KS |
1043 | * |
1044 | * get_user_pages is typically used for fewer-copy IO operations, to get a | |
1045 | * handle on the memory by some means other than accesses via the user virtual | |
1046 | * addresses. The pages may be submitted for DMA to devices or accessed via | |
1047 | * their kernel linear mapping (via the kmap APIs). Care should be taken to | |
1048 | * use the correct cache flushing APIs. | |
1049 | * | |
1050 | * See also get_user_pages_fast, for performance critical applications. | |
f0818f47 AA |
1051 | * |
1052 | * get_user_pages should be phased out in favor of | |
1053 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing | |
1054 | * should use get_user_pages because it cannot pass | |
1055 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. | |
4bbd4c77 | 1056 | */ |
1e987790 DH |
1057 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
1058 | unsigned long start, unsigned long nr_pages, | |
9beae1ea | 1059 | unsigned int gup_flags, struct page **pages, |
5b56d49f | 1060 | struct vm_area_struct **vmas, int *locked) |
4bbd4c77 | 1061 | { |
859110d7 | 1062 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
e716712f | 1063 | locked, |
9beae1ea | 1064 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
1e987790 DH |
1065 | } |
1066 | EXPORT_SYMBOL(get_user_pages_remote); | |
1067 | ||
1068 | /* | |
d4edcf0d DH |
1069 | * This is the same as get_user_pages_remote(), just with a |
1070 | * less-flexible calling convention where we assume that the task | |
5b56d49f LS |
1071 | * and mm being operated on are the current task's and don't allow |
1072 | * passing of a locked parameter. We also obviously don't pass | |
1073 | * FOLL_REMOTE in here. | |
1e987790 | 1074 | */ |
c12d2da5 | 1075 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
768ae309 | 1076 | unsigned int gup_flags, struct page **pages, |
1e987790 DH |
1077 | struct vm_area_struct **vmas) |
1078 | { | |
cde70140 | 1079 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
e716712f | 1080 | pages, vmas, NULL, |
768ae309 | 1081 | gup_flags | FOLL_TOUCH); |
4bbd4c77 | 1082 | } |
c12d2da5 | 1083 | EXPORT_SYMBOL(get_user_pages); |
4bbd4c77 | 1084 | |
2bb6d283 DW |
1085 | #ifdef CONFIG_FS_DAX |
1086 | /* | |
1087 | * This is the same as get_user_pages() in that it assumes we are | |
1088 | * operating on the current task's mm, but it goes further to validate | |
1089 | * that the vmas associated with the address range are suitable for | |
1090 | * longterm elevated page reference counts. For example, filesystem-dax | |
1091 | * mappings are subject to the lifetime enforced by the filesystem and | |
1092 | * we need guarantees that longterm users like RDMA and V4L2 only | |
1093 | * establish mappings that have a kernel enforced revocation mechanism. | |
1094 | * | |
1095 | * "longterm" == userspace controlled elevated page count lifetime. | |
1096 | * Contrast this to iov_iter_get_pages() usages which are transient. | |
1097 | */ | |
1098 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, | |
1099 | unsigned int gup_flags, struct page **pages, | |
1100 | struct vm_area_struct **vmas_arg) | |
1101 | { | |
1102 | struct vm_area_struct **vmas = vmas_arg; | |
1103 | struct vm_area_struct *vma_prev = NULL; | |
1104 | long rc, i; | |
1105 | ||
1106 | if (!pages) | |
1107 | return -EINVAL; | |
1108 | ||
1109 | if (!vmas) { | |
1110 | vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), | |
1111 | GFP_KERNEL); | |
1112 | if (!vmas) | |
1113 | return -ENOMEM; | |
1114 | } | |
1115 | ||
1116 | rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); | |
1117 | ||
1118 | for (i = 0; i < rc; i++) { | |
1119 | struct vm_area_struct *vma = vmas[i]; | |
1120 | ||
1121 | if (vma == vma_prev) | |
1122 | continue; | |
1123 | ||
1124 | vma_prev = vma; | |
1125 | ||
1126 | if (vma_is_fsdax(vma)) | |
1127 | break; | |
1128 | } | |
1129 | ||
1130 | /* | |
1131 | * Either get_user_pages() failed, or the vma validation | |
1132 | * succeeded, in either case we don't need to put_page() before | |
1133 | * returning. | |
1134 | */ | |
1135 | if (i >= rc) | |
1136 | goto out; | |
1137 | ||
1138 | for (i = 0; i < rc; i++) | |
1139 | put_page(pages[i]); | |
1140 | rc = -EOPNOTSUPP; | |
1141 | out: | |
1142 | if (vmas != vmas_arg) | |
1143 | kfree(vmas); | |
1144 | return rc; | |
1145 | } | |
1146 | EXPORT_SYMBOL(get_user_pages_longterm); | |
1147 | #endif /* CONFIG_FS_DAX */ | |
1148 | ||
acc3c8d1 KS |
1149 | /** |
1150 | * populate_vma_page_range() - populate a range of pages in the vma. | |
1151 | * @vma: target vma | |
1152 | * @start: start address | |
1153 | * @end: end address | |
1154 | * @nonblocking: | |
1155 | * | |
1156 | * This takes care of mlocking the pages too if VM_LOCKED is set. | |
1157 | * | |
1158 | * return 0 on success, negative error code on error. | |
1159 | * | |
1160 | * vma->vm_mm->mmap_sem must be held. | |
1161 | * | |
1162 | * If @nonblocking is NULL, it may be held for read or write and will | |
1163 | * be unperturbed. | |
1164 | * | |
1165 | * If @nonblocking is non-NULL, it must held for read only and may be | |
1166 | * released. If it's released, *@nonblocking will be set to 0. | |
1167 | */ | |
1168 | long populate_vma_page_range(struct vm_area_struct *vma, | |
1169 | unsigned long start, unsigned long end, int *nonblocking) | |
1170 | { | |
1171 | struct mm_struct *mm = vma->vm_mm; | |
1172 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | |
1173 | int gup_flags; | |
1174 | ||
1175 | VM_BUG_ON(start & ~PAGE_MASK); | |
1176 | VM_BUG_ON(end & ~PAGE_MASK); | |
1177 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | |
1178 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | |
1179 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | |
1180 | ||
de60f5f1 EM |
1181 | gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; |
1182 | if (vma->vm_flags & VM_LOCKONFAULT) | |
1183 | gup_flags &= ~FOLL_POPULATE; | |
acc3c8d1 KS |
1184 | /* |
1185 | * We want to touch writable mappings with a write fault in order | |
1186 | * to break COW, except for shared mappings because these don't COW | |
1187 | * and we would not want to dirty them for nothing. | |
1188 | */ | |
1189 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | |
1190 | gup_flags |= FOLL_WRITE; | |
1191 | ||
1192 | /* | |
1193 | * We want mlock to succeed for regions that have any permissions | |
1194 | * other than PROT_NONE. | |
1195 | */ | |
1196 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | |
1197 | gup_flags |= FOLL_FORCE; | |
1198 | ||
1199 | /* | |
1200 | * We made sure addr is within a VMA, so the following will | |
1201 | * not result in a stack expansion that recurses back here. | |
1202 | */ | |
1203 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | |
1204 | NULL, NULL, nonblocking); | |
1205 | } | |
1206 | ||
1207 | /* | |
1208 | * __mm_populate - populate and/or mlock pages within a range of address space. | |
1209 | * | |
1210 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | |
1211 | * flags. VMAs must be already marked with the desired vm_flags, and | |
1212 | * mmap_sem must not be held. | |
1213 | */ | |
1214 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |
1215 | { | |
1216 | struct mm_struct *mm = current->mm; | |
1217 | unsigned long end, nstart, nend; | |
1218 | struct vm_area_struct *vma = NULL; | |
1219 | int locked = 0; | |
1220 | long ret = 0; | |
1221 | ||
1222 | VM_BUG_ON(start & ~PAGE_MASK); | |
1223 | VM_BUG_ON(len != PAGE_ALIGN(len)); | |
1224 | end = start + len; | |
1225 | ||
1226 | for (nstart = start; nstart < end; nstart = nend) { | |
1227 | /* | |
1228 | * We want to fault in pages for [nstart; end) address range. | |
1229 | * Find first corresponding VMA. | |
1230 | */ | |
1231 | if (!locked) { | |
1232 | locked = 1; | |
1233 | down_read(&mm->mmap_sem); | |
1234 | vma = find_vma(mm, nstart); | |
1235 | } else if (nstart >= vma->vm_end) | |
1236 | vma = vma->vm_next; | |
1237 | if (!vma || vma->vm_start >= end) | |
1238 | break; | |
1239 | /* | |
1240 | * Set [nstart; nend) to intersection of desired address | |
1241 | * range with the first VMA. Also, skip undesirable VMA types. | |
1242 | */ | |
1243 | nend = min(end, vma->vm_end); | |
1244 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | |
1245 | continue; | |
1246 | if (nstart < vma->vm_start) | |
1247 | nstart = vma->vm_start; | |
1248 | /* | |
1249 | * Now fault in a range of pages. populate_vma_page_range() | |
1250 | * double checks the vma flags, so that it won't mlock pages | |
1251 | * if the vma was already munlocked. | |
1252 | */ | |
1253 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | |
1254 | if (ret < 0) { | |
1255 | if (ignore_errors) { | |
1256 | ret = 0; | |
1257 | continue; /* continue at next VMA */ | |
1258 | } | |
1259 | break; | |
1260 | } | |
1261 | nend = nstart + ret * PAGE_SIZE; | |
1262 | ret = 0; | |
1263 | } | |
1264 | if (locked) | |
1265 | up_read(&mm->mmap_sem); | |
1266 | return ret; /* 0 or negative error code */ | |
1267 | } | |
1268 | ||
4bbd4c77 KS |
1269 | /** |
1270 | * get_dump_page() - pin user page in memory while writing it to core dump | |
1271 | * @addr: user address | |
1272 | * | |
1273 | * Returns struct page pointer of user page pinned for dump, | |
ea1754a0 | 1274 | * to be freed afterwards by put_page(). |
4bbd4c77 KS |
1275 | * |
1276 | * Returns NULL on any kind of failure - a hole must then be inserted into | |
1277 | * the corefile, to preserve alignment with its headers; and also returns | |
1278 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | |
1279 | * allowing a hole to be left in the corefile to save diskspace. | |
1280 | * | |
1281 | * Called without mmap_sem, but after all other threads have been killed. | |
1282 | */ | |
1283 | #ifdef CONFIG_ELF_CORE | |
1284 | struct page *get_dump_page(unsigned long addr) | |
1285 | { | |
1286 | struct vm_area_struct *vma; | |
1287 | struct page *page; | |
1288 | ||
1289 | if (__get_user_pages(current, current->mm, addr, 1, | |
1290 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, | |
1291 | NULL) < 1) | |
1292 | return NULL; | |
1293 | flush_cache_page(vma, addr, page_to_pfn(page)); | |
1294 | return page; | |
1295 | } | |
1296 | #endif /* CONFIG_ELF_CORE */ | |
2667f50e SC |
1297 | |
1298 | /* | |
e585513b | 1299 | * Generic Fast GUP |
2667f50e SC |
1300 | * |
1301 | * get_user_pages_fast attempts to pin user pages by walking the page | |
1302 | * tables directly and avoids taking locks. Thus the walker needs to be | |
1303 | * protected from page table pages being freed from under it, and should | |
1304 | * block any THP splits. | |
1305 | * | |
1306 | * One way to achieve this is to have the walker disable interrupts, and | |
1307 | * rely on IPIs from the TLB flushing code blocking before the page table | |
1308 | * pages are freed. This is unsuitable for architectures that do not need | |
1309 | * to broadcast an IPI when invalidating TLBs. | |
1310 | * | |
1311 | * Another way to achieve this is to batch up page table containing pages | |
1312 | * belonging to more than one mm_user, then rcu_sched a callback to free those | |
1313 | * pages. Disabling interrupts will allow the fast_gup walker to both block | |
1314 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs | |
1315 | * (which is a relatively rare event). The code below adopts this strategy. | |
1316 | * | |
1317 | * Before activating this code, please be aware that the following assumptions | |
1318 | * are currently made: | |
1319 | * | |
e585513b KS |
1320 | * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to |
1321 | * free pages containing page tables or TLB flushing requires IPI broadcast. | |
2667f50e | 1322 | * |
2667f50e SC |
1323 | * *) ptes can be read atomically by the architecture. |
1324 | * | |
1325 | * *) access_ok is sufficient to validate userspace address ranges. | |
1326 | * | |
1327 | * The last two assumptions can be relaxed by the addition of helper functions. | |
1328 | * | |
1329 | * This code is based heavily on the PowerPC implementation by Nick Piggin. | |
1330 | */ | |
e585513b | 1331 | #ifdef CONFIG_HAVE_GENERIC_GUP |
2667f50e | 1332 | |
0005d20b KS |
1333 | #ifndef gup_get_pte |
1334 | /* | |
1335 | * We assume that the PTE can be read atomically. If this is not the case for | |
1336 | * your architecture, please provide the helper. | |
1337 | */ | |
1338 | static inline pte_t gup_get_pte(pte_t *ptep) | |
1339 | { | |
1340 | return READ_ONCE(*ptep); | |
1341 | } | |
1342 | #endif | |
1343 | ||
b59f65fa KS |
1344 | static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) |
1345 | { | |
1346 | while ((*nr) - nr_start) { | |
1347 | struct page *page = pages[--(*nr)]; | |
1348 | ||
1349 | ClearPageReferenced(page); | |
1350 | put_page(page); | |
1351 | } | |
1352 | } | |
1353 | ||
2667f50e SC |
1354 | #ifdef __HAVE_ARCH_PTE_SPECIAL |
1355 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |
1356 | int write, struct page **pages, int *nr) | |
1357 | { | |
b59f65fa KS |
1358 | struct dev_pagemap *pgmap = NULL; |
1359 | int nr_start = *nr, ret = 0; | |
2667f50e | 1360 | pte_t *ptep, *ptem; |
2667f50e SC |
1361 | |
1362 | ptem = ptep = pte_offset_map(&pmd, addr); | |
1363 | do { | |
0005d20b | 1364 | pte_t pte = gup_get_pte(ptep); |
7aef4172 | 1365 | struct page *head, *page; |
2667f50e SC |
1366 | |
1367 | /* | |
1368 | * Similar to the PMD case below, NUMA hinting must take slow | |
8a0516ed | 1369 | * path using the pte_protnone check. |
2667f50e | 1370 | */ |
e7884f8e KS |
1371 | if (pte_protnone(pte)) |
1372 | goto pte_unmap; | |
1373 | ||
1374 | if (!pte_access_permitted(pte, write)) | |
1375 | goto pte_unmap; | |
1376 | ||
b59f65fa KS |
1377 | if (pte_devmap(pte)) { |
1378 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); | |
1379 | if (unlikely(!pgmap)) { | |
1380 | undo_dev_pagemap(nr, nr_start, pages); | |
1381 | goto pte_unmap; | |
1382 | } | |
1383 | } else if (pte_special(pte)) | |
2667f50e SC |
1384 | goto pte_unmap; |
1385 | ||
1386 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
1387 | page = pte_page(pte); | |
7aef4172 | 1388 | head = compound_head(page); |
2667f50e | 1389 | |
7aef4172 | 1390 | if (!page_cache_get_speculative(head)) |
2667f50e SC |
1391 | goto pte_unmap; |
1392 | ||
1393 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | |
7aef4172 | 1394 | put_page(head); |
2667f50e SC |
1395 | goto pte_unmap; |
1396 | } | |
1397 | ||
7aef4172 | 1398 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
e9348053 KS |
1399 | |
1400 | SetPageReferenced(page); | |
2667f50e SC |
1401 | pages[*nr] = page; |
1402 | (*nr)++; | |
1403 | ||
1404 | } while (ptep++, addr += PAGE_SIZE, addr != end); | |
1405 | ||
1406 | ret = 1; | |
1407 | ||
1408 | pte_unmap: | |
832d7aa0 CH |
1409 | if (pgmap) |
1410 | put_dev_pagemap(pgmap); | |
2667f50e SC |
1411 | pte_unmap(ptem); |
1412 | return ret; | |
1413 | } | |
1414 | #else | |
1415 | ||
1416 | /* | |
1417 | * If we can't determine whether or not a pte is special, then fail immediately | |
1418 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not | |
1419 | * to be special. | |
1420 | * | |
1421 | * For a futex to be placed on a THP tail page, get_futex_key requires a | |
1422 | * __get_user_pages_fast implementation that can pin pages. Thus it's still | |
1423 | * useful to have gup_huge_pmd even if we can't operate on ptes. | |
1424 | */ | |
1425 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |
1426 | int write, struct page **pages, int *nr) | |
1427 | { | |
1428 | return 0; | |
1429 | } | |
1430 | #endif /* __HAVE_ARCH_PTE_SPECIAL */ | |
1431 | ||
09180ca4 | 1432 | #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
b59f65fa KS |
1433 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, |
1434 | unsigned long end, struct page **pages, int *nr) | |
1435 | { | |
1436 | int nr_start = *nr; | |
1437 | struct dev_pagemap *pgmap = NULL; | |
1438 | ||
1439 | do { | |
1440 | struct page *page = pfn_to_page(pfn); | |
1441 | ||
1442 | pgmap = get_dev_pagemap(pfn, pgmap); | |
1443 | if (unlikely(!pgmap)) { | |
1444 | undo_dev_pagemap(nr, nr_start, pages); | |
1445 | return 0; | |
1446 | } | |
1447 | SetPageReferenced(page); | |
1448 | pages[*nr] = page; | |
1449 | get_page(page); | |
b59f65fa KS |
1450 | (*nr)++; |
1451 | pfn++; | |
1452 | } while (addr += PAGE_SIZE, addr != end); | |
832d7aa0 CH |
1453 | |
1454 | if (pgmap) | |
1455 | put_dev_pagemap(pgmap); | |
b59f65fa KS |
1456 | return 1; |
1457 | } | |
1458 | ||
1459 | static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, | |
1460 | unsigned long end, struct page **pages, int *nr) | |
1461 | { | |
1462 | unsigned long fault_pfn; | |
1463 | ||
1464 | fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | |
1465 | return __gup_device_huge(fault_pfn, addr, end, pages, nr); | |
1466 | } | |
1467 | ||
1468 | static int __gup_device_huge_pud(pud_t pud, unsigned long addr, | |
1469 | unsigned long end, struct page **pages, int *nr) | |
1470 | { | |
1471 | unsigned long fault_pfn; | |
1472 | ||
1473 | fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | |
1474 | return __gup_device_huge(fault_pfn, addr, end, pages, nr); | |
1475 | } | |
1476 | #else | |
1477 | static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, | |
1478 | unsigned long end, struct page **pages, int *nr) | |
1479 | { | |
1480 | BUILD_BUG(); | |
1481 | return 0; | |
1482 | } | |
1483 | ||
1484 | static int __gup_device_huge_pud(pud_t pud, unsigned long addr, | |
1485 | unsigned long end, struct page **pages, int *nr) | |
1486 | { | |
1487 | BUILD_BUG(); | |
1488 | return 0; | |
1489 | } | |
1490 | #endif | |
1491 | ||
2667f50e SC |
1492 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
1493 | unsigned long end, int write, struct page **pages, int *nr) | |
1494 | { | |
ddc58f27 | 1495 | struct page *head, *page; |
2667f50e SC |
1496 | int refs; |
1497 | ||
e7884f8e | 1498 | if (!pmd_access_permitted(orig, write)) |
2667f50e SC |
1499 | return 0; |
1500 | ||
b59f65fa KS |
1501 | if (pmd_devmap(orig)) |
1502 | return __gup_device_huge_pmd(orig, addr, end, pages, nr); | |
1503 | ||
2667f50e | 1504 | refs = 0; |
d63206ee | 1505 | page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
2667f50e | 1506 | do { |
2667f50e SC |
1507 | pages[*nr] = page; |
1508 | (*nr)++; | |
1509 | page++; | |
1510 | refs++; | |
1511 | } while (addr += PAGE_SIZE, addr != end); | |
1512 | ||
d63206ee | 1513 | head = compound_head(pmd_page(orig)); |
2667f50e SC |
1514 | if (!page_cache_add_speculative(head, refs)) { |
1515 | *nr -= refs; | |
1516 | return 0; | |
1517 | } | |
1518 | ||
1519 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { | |
1520 | *nr -= refs; | |
1521 | while (refs--) | |
1522 | put_page(head); | |
1523 | return 0; | |
1524 | } | |
1525 | ||
e9348053 | 1526 | SetPageReferenced(head); |
2667f50e SC |
1527 | return 1; |
1528 | } | |
1529 | ||
1530 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, | |
1531 | unsigned long end, int write, struct page **pages, int *nr) | |
1532 | { | |
ddc58f27 | 1533 | struct page *head, *page; |
2667f50e SC |
1534 | int refs; |
1535 | ||
e7884f8e | 1536 | if (!pud_access_permitted(orig, write)) |
2667f50e SC |
1537 | return 0; |
1538 | ||
b59f65fa KS |
1539 | if (pud_devmap(orig)) |
1540 | return __gup_device_huge_pud(orig, addr, end, pages, nr); | |
1541 | ||
2667f50e | 1542 | refs = 0; |
d63206ee | 1543 | page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
2667f50e | 1544 | do { |
2667f50e SC |
1545 | pages[*nr] = page; |
1546 | (*nr)++; | |
1547 | page++; | |
1548 | refs++; | |
1549 | } while (addr += PAGE_SIZE, addr != end); | |
1550 | ||
d63206ee | 1551 | head = compound_head(pud_page(orig)); |
2667f50e SC |
1552 | if (!page_cache_add_speculative(head, refs)) { |
1553 | *nr -= refs; | |
1554 | return 0; | |
1555 | } | |
1556 | ||
1557 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { | |
1558 | *nr -= refs; | |
1559 | while (refs--) | |
1560 | put_page(head); | |
1561 | return 0; | |
1562 | } | |
1563 | ||
e9348053 | 1564 | SetPageReferenced(head); |
2667f50e SC |
1565 | return 1; |
1566 | } | |
1567 | ||
f30c59e9 AK |
1568 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
1569 | unsigned long end, int write, | |
1570 | struct page **pages, int *nr) | |
1571 | { | |
1572 | int refs; | |
ddc58f27 | 1573 | struct page *head, *page; |
f30c59e9 | 1574 | |
e7884f8e | 1575 | if (!pgd_access_permitted(orig, write)) |
f30c59e9 AK |
1576 | return 0; |
1577 | ||
b59f65fa | 1578 | BUILD_BUG_ON(pgd_devmap(orig)); |
f30c59e9 | 1579 | refs = 0; |
d63206ee | 1580 | page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); |
f30c59e9 | 1581 | do { |
f30c59e9 AK |
1582 | pages[*nr] = page; |
1583 | (*nr)++; | |
1584 | page++; | |
1585 | refs++; | |
1586 | } while (addr += PAGE_SIZE, addr != end); | |
1587 | ||
d63206ee | 1588 | head = compound_head(pgd_page(orig)); |
f30c59e9 AK |
1589 | if (!page_cache_add_speculative(head, refs)) { |
1590 | *nr -= refs; | |
1591 | return 0; | |
1592 | } | |
1593 | ||
1594 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { | |
1595 | *nr -= refs; | |
1596 | while (refs--) | |
1597 | put_page(head); | |
1598 | return 0; | |
1599 | } | |
1600 | ||
e9348053 | 1601 | SetPageReferenced(head); |
f30c59e9 AK |
1602 | return 1; |
1603 | } | |
1604 | ||
2667f50e SC |
1605 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
1606 | int write, struct page **pages, int *nr) | |
1607 | { | |
1608 | unsigned long next; | |
1609 | pmd_t *pmdp; | |
1610 | ||
1611 | pmdp = pmd_offset(&pud, addr); | |
1612 | do { | |
38c5ce93 | 1613 | pmd_t pmd = READ_ONCE(*pmdp); |
2667f50e SC |
1614 | |
1615 | next = pmd_addr_end(addr, end); | |
84c3fc4e | 1616 | if (!pmd_present(pmd)) |
2667f50e SC |
1617 | return 0; |
1618 | ||
1619 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { | |
1620 | /* | |
1621 | * NUMA hinting faults need to be handled in the GUP | |
1622 | * slowpath for accounting purposes and so that they | |
1623 | * can be serialised against THP migration. | |
1624 | */ | |
8a0516ed | 1625 | if (pmd_protnone(pmd)) |
2667f50e SC |
1626 | return 0; |
1627 | ||
1628 | if (!gup_huge_pmd(pmd, pmdp, addr, next, write, | |
1629 | pages, nr)) | |
1630 | return 0; | |
1631 | ||
f30c59e9 AK |
1632 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { |
1633 | /* | |
1634 | * architecture have different format for hugetlbfs | |
1635 | * pmd format and THP pmd format | |
1636 | */ | |
1637 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, | |
1638 | PMD_SHIFT, next, write, pages, nr)) | |
1639 | return 0; | |
2667f50e SC |
1640 | } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) |
1641 | return 0; | |
1642 | } while (pmdp++, addr = next, addr != end); | |
1643 | ||
1644 | return 1; | |
1645 | } | |
1646 | ||
c2febafc | 1647 | static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, |
f30c59e9 | 1648 | int write, struct page **pages, int *nr) |
2667f50e SC |
1649 | { |
1650 | unsigned long next; | |
1651 | pud_t *pudp; | |
1652 | ||
c2febafc | 1653 | pudp = pud_offset(&p4d, addr); |
2667f50e | 1654 | do { |
e37c6982 | 1655 | pud_t pud = READ_ONCE(*pudp); |
2667f50e SC |
1656 | |
1657 | next = pud_addr_end(addr, end); | |
1658 | if (pud_none(pud)) | |
1659 | return 0; | |
f30c59e9 | 1660 | if (unlikely(pud_huge(pud))) { |
2667f50e | 1661 | if (!gup_huge_pud(pud, pudp, addr, next, write, |
f30c59e9 AK |
1662 | pages, nr)) |
1663 | return 0; | |
1664 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { | |
1665 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, | |
1666 | PUD_SHIFT, next, write, pages, nr)) | |
2667f50e SC |
1667 | return 0; |
1668 | } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) | |
1669 | return 0; | |
1670 | } while (pudp++, addr = next, addr != end); | |
1671 | ||
1672 | return 1; | |
1673 | } | |
1674 | ||
c2febafc KS |
1675 | static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, |
1676 | int write, struct page **pages, int *nr) | |
1677 | { | |
1678 | unsigned long next; | |
1679 | p4d_t *p4dp; | |
1680 | ||
1681 | p4dp = p4d_offset(&pgd, addr); | |
1682 | do { | |
1683 | p4d_t p4d = READ_ONCE(*p4dp); | |
1684 | ||
1685 | next = p4d_addr_end(addr, end); | |
1686 | if (p4d_none(p4d)) | |
1687 | return 0; | |
1688 | BUILD_BUG_ON(p4d_huge(p4d)); | |
1689 | if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { | |
1690 | if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, | |
1691 | P4D_SHIFT, next, write, pages, nr)) | |
1692 | return 0; | |
ce70df08 | 1693 | } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) |
c2febafc KS |
1694 | return 0; |
1695 | } while (p4dp++, addr = next, addr != end); | |
1696 | ||
1697 | return 1; | |
1698 | } | |
1699 | ||
5b65c467 KS |
1700 | static void gup_pgd_range(unsigned long addr, unsigned long end, |
1701 | int write, struct page **pages, int *nr) | |
1702 | { | |
1703 | unsigned long next; | |
1704 | pgd_t *pgdp; | |
1705 | ||
1706 | pgdp = pgd_offset(current->mm, addr); | |
1707 | do { | |
1708 | pgd_t pgd = READ_ONCE(*pgdp); | |
1709 | ||
1710 | next = pgd_addr_end(addr, end); | |
1711 | if (pgd_none(pgd)) | |
1712 | return; | |
1713 | if (unlikely(pgd_huge(pgd))) { | |
1714 | if (!gup_huge_pgd(pgd, pgdp, addr, next, write, | |
1715 | pages, nr)) | |
1716 | return; | |
1717 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { | |
1718 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, | |
1719 | PGDIR_SHIFT, next, write, pages, nr)) | |
1720 | return; | |
1721 | } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) | |
1722 | return; | |
1723 | } while (pgdp++, addr = next, addr != end); | |
1724 | } | |
1725 | ||
1726 | #ifndef gup_fast_permitted | |
1727 | /* | |
1728 | * Check if it's allowed to use __get_user_pages_fast() for the range, or | |
1729 | * we need to fall back to the slow version: | |
1730 | */ | |
1731 | bool gup_fast_permitted(unsigned long start, int nr_pages, int write) | |
1732 | { | |
1733 | unsigned long len, end; | |
1734 | ||
1735 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
1736 | end = start + len; | |
1737 | return end >= start; | |
1738 | } | |
1739 | #endif | |
1740 | ||
2667f50e SC |
1741 | /* |
1742 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to | |
1743 | * the regular GUP. It will only return non-negative values. | |
1744 | */ | |
1745 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
1746 | struct page **pages) | |
1747 | { | |
2667f50e | 1748 | unsigned long addr, len, end; |
5b65c467 | 1749 | unsigned long flags; |
2667f50e SC |
1750 | int nr = 0; |
1751 | ||
1752 | start &= PAGE_MASK; | |
1753 | addr = start; | |
1754 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
1755 | end = start + len; | |
1756 | ||
1757 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | |
aa2369f1 | 1758 | (void __user *)start, len))) |
2667f50e SC |
1759 | return 0; |
1760 | ||
1761 | /* | |
1762 | * Disable interrupts. We use the nested form as we can already have | |
1763 | * interrupts disabled by get_futex_key. | |
1764 | * | |
1765 | * With interrupts disabled, we block page table pages from being | |
1766 | * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h | |
1767 | * for more details. | |
1768 | * | |
1769 | * We do not adopt an rcu_read_lock(.) here as we also want to | |
1770 | * block IPIs that come from THPs splitting. | |
1771 | */ | |
1772 | ||
5b65c467 KS |
1773 | if (gup_fast_permitted(start, nr_pages, write)) { |
1774 | local_irq_save(flags); | |
1775 | gup_pgd_range(addr, end, write, pages, &nr); | |
1776 | local_irq_restore(flags); | |
1777 | } | |
2667f50e SC |
1778 | |
1779 | return nr; | |
1780 | } | |
1781 | ||
1782 | /** | |
1783 | * get_user_pages_fast() - pin user pages in memory | |
1784 | * @start: starting user address | |
1785 | * @nr_pages: number of pages from start to pin | |
1786 | * @write: whether pages will be written to | |
1787 | * @pages: array that receives pointers to the pages pinned. | |
1788 | * Should be at least nr_pages long. | |
1789 | * | |
1790 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | |
1791 | * If not successful, it will fall back to taking the lock and | |
1792 | * calling get_user_pages(). | |
1793 | * | |
1794 | * Returns number of pages pinned. This may be fewer than the number | |
1795 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
1796 | * were pinned, returns -errno. | |
1797 | */ | |
1798 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
1799 | struct page **pages) | |
1800 | { | |
5b65c467 | 1801 | unsigned long addr, len, end; |
73e10a61 | 1802 | int nr = 0, ret = 0; |
2667f50e SC |
1803 | |
1804 | start &= PAGE_MASK; | |
5b65c467 KS |
1805 | addr = start; |
1806 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
1807 | end = start + len; | |
1808 | ||
1809 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | |
1810 | (void __user *)start, len))) | |
1811 | return 0; | |
73e10a61 KS |
1812 | |
1813 | if (gup_fast_permitted(start, nr_pages, write)) { | |
5b65c467 KS |
1814 | local_irq_disable(); |
1815 | gup_pgd_range(addr, end, write, pages, &nr); | |
1816 | local_irq_enable(); | |
73e10a61 KS |
1817 | ret = nr; |
1818 | } | |
2667f50e SC |
1819 | |
1820 | if (nr < nr_pages) { | |
1821 | /* Try to get the remaining pages with get_user_pages */ | |
1822 | start += nr << PAGE_SHIFT; | |
1823 | pages += nr; | |
1824 | ||
c164154f LS |
1825 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, |
1826 | write ? FOLL_WRITE : 0); | |
2667f50e SC |
1827 | |
1828 | /* Have to be a bit careful with return values */ | |
1829 | if (nr > 0) { | |
1830 | if (ret < 0) | |
1831 | ret = nr; | |
1832 | else | |
1833 | ret += nr; | |
1834 | } | |
1835 | } | |
1836 | ||
1837 | return ret; | |
1838 | } | |
1839 | ||
e585513b | 1840 | #endif /* CONFIG_HAVE_GENERIC_GUP */ |