]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
4bbd4c77 KS |
2 | #include <linux/kernel.h> |
3 | #include <linux/errno.h> | |
4 | #include <linux/err.h> | |
5 | #include <linux/spinlock.h> | |
6 | ||
4bbd4c77 | 7 | #include <linux/mm.h> |
89c1905d | 8 | #include <linux/memfd.h> |
3565fce3 | 9 | #include <linux/memremap.h> |
4bbd4c77 KS |
10 | #include <linux/pagemap.h> |
11 | #include <linux/rmap.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
1507f512 | 14 | #include <linux/secretmem.h> |
4bbd4c77 | 15 | |
174cd4b1 | 16 | #include <linux/sched/signal.h> |
2667f50e | 17 | #include <linux/rwsem.h> |
f30c59e9 | 18 | #include <linux/hugetlb.h> |
9a4e9f3b AK |
19 | #include <linux/migrate.h> |
20 | #include <linux/mm_inline.h> | |
89c1905d | 21 | #include <linux/pagevec.h> |
9a4e9f3b | 22 | #include <linux/sched/mm.h> |
a6e79df9 | 23 | #include <linux/shmem_fs.h> |
1027e443 | 24 | |
33a709b2 | 25 | #include <asm/mmu_context.h> |
1027e443 | 26 | #include <asm/tlbflush.h> |
2667f50e | 27 | |
4bbd4c77 KS |
28 | #include "internal.h" |
29 | ||
df06b37f KB |
30 | struct follow_page_context { |
31 | struct dev_pagemap *pgmap; | |
32 | unsigned int page_mask; | |
33 | }; | |
34 | ||
b6a2619c DH |
35 | static inline void sanity_check_pinned_pages(struct page **pages, |
36 | unsigned long npages) | |
37 | { | |
38 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) | |
39 | return; | |
40 | ||
41 | /* | |
42 | * We only pin anonymous pages if they are exclusive. Once pinned, we | |
43 | * can no longer turn them possibly shared and PageAnonExclusive() will | |
44 | * stick around until the page is freed. | |
45 | * | |
46 | * We'd like to verify that our pinned anonymous pages are still mapped | |
47 | * exclusively. The issue with anon THP is that we don't know how | |
48 | * they are/were mapped when pinning them. However, for anon | |
49 | * THP we can assume that either the given page (PTE-mapped THP) or | |
50 | * the head page (PMD-mapped THP) should be PageAnonExclusive(). If | |
51 | * neither is the case, there is certainly something wrong. | |
52 | */ | |
53 | for (; npages; npages--, pages++) { | |
54 | struct page *page = *pages; | |
55 | struct folio *folio = page_folio(page); | |
56 | ||
c8070b78 DH |
57 | if (is_zero_page(page) || |
58 | !folio_test_anon(folio)) | |
b6a2619c DH |
59 | continue; |
60 | if (!folio_test_large(folio) || folio_test_hugetlb(folio)) | |
61 | VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); | |
62 | else | |
63 | /* Either a PTE-mapped or a PMD-mapped THP. */ | |
64 | VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && | |
65 | !PageAnonExclusive(page), page); | |
66 | } | |
67 | } | |
68 | ||
cd1adf1b | 69 | /* |
ece1ed7b | 70 | * Return the folio with ref appropriately incremented, |
cd1adf1b | 71 | * or NULL if that failed. |
a707cdd5 | 72 | */ |
ece1ed7b | 73 | static inline struct folio *try_get_folio(struct page *page, int refs) |
a707cdd5 | 74 | { |
ece1ed7b | 75 | struct folio *folio; |
a707cdd5 | 76 | |
59409373 | 77 | retry: |
ece1ed7b MWO |
78 | folio = page_folio(page); |
79 | if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) | |
a707cdd5 | 80 | return NULL; |
fa2690af | 81 | if (unlikely(!folio_ref_try_add(folio, refs))) |
a707cdd5 | 82 | return NULL; |
c24d3732 JH |
83 | |
84 | /* | |
ece1ed7b MWO |
85 | * At this point we have a stable reference to the folio; but it |
86 | * could be that between calling page_folio() and the refcount | |
87 | * increment, the folio was split, in which case we'd end up | |
88 | * holding a reference on a folio that has nothing to do with the page | |
c24d3732 | 89 | * we were given anymore. |
ece1ed7b MWO |
90 | * So now that the folio is stable, recheck that the page still |
91 | * belongs to this folio. | |
c24d3732 | 92 | */ |
ece1ed7b | 93 | if (unlikely(page_folio(page) != folio)) { |
53e45c4f | 94 | if (!put_devmap_managed_folio_refs(folio, refs)) |
f4f451a1 | 95 | folio_put_refs(folio, refs); |
59409373 | 96 | goto retry; |
c24d3732 JH |
97 | } |
98 | ||
ece1ed7b | 99 | return folio; |
a707cdd5 JH |
100 | } |
101 | ||
d8ddc099 | 102 | static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) |
4509b42c JG |
103 | { |
104 | if (flags & FOLL_PIN) { | |
c8070b78 DH |
105 | if (is_zero_folio(folio)) |
106 | return; | |
d8ddc099 MWO |
107 | node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); |
108 | if (folio_test_large(folio)) | |
94688e8e | 109 | atomic_sub(refs, &folio->_pincount); |
4509b42c JG |
110 | else |
111 | refs *= GUP_PIN_COUNTING_BIAS; | |
112 | } | |
113 | ||
53e45c4f | 114 | if (!put_devmap_managed_folio_refs(folio, refs)) |
f4f451a1 | 115 | folio_put_refs(folio, refs); |
4509b42c JG |
116 | } |
117 | ||
3faa52c0 | 118 | /** |
f442fa61 YS |
119 | * try_grab_folio() - add a folio's refcount by a flag-dependent amount |
120 | * @folio: pointer to folio to be grabbed | |
121 | * @refs: the value to (effectively) add to the folio's refcount | |
122 | * @flags: gup flags: these are the FOLL_* flag values | |
3faa52c0 JH |
123 | * |
124 | * This might not do anything at all, depending on the flags argument. | |
125 | * | |
126 | * "grab" names in this file mean, "look at flags to decide whether to use | |
f442fa61 | 127 | * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. |
3faa52c0 | 128 | * |
3faa52c0 | 129 | * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same |
f442fa61 | 130 | * time. |
3faa52c0 | 131 | * |
0f089235 LG |
132 | * Return: 0 for success, or if no action was required (if neither FOLL_PIN |
133 | * nor FOLL_GET was set, nothing is done). A negative error code for failure: | |
134 | * | |
f442fa61 | 135 | * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not |
0f089235 | 136 | * be grabbed. |
f442fa61 YS |
137 | * |
138 | * It is called when we have a stable reference for the folio, typically in | |
139 | * GUP slow path. | |
3faa52c0 | 140 | */ |
f442fa61 YS |
141 | int __must_check try_grab_folio(struct folio *folio, int refs, |
142 | unsigned int flags) | |
3faa52c0 | 143 | { |
5fec0719 | 144 | if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) |
0f089235 | 145 | return -ENOMEM; |
3faa52c0 | 146 | |
f442fa61 | 147 | if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) |
4003f107 | 148 | return -EREMOTEIO; |
3faa52c0 | 149 | |
c36c04c2 | 150 | if (flags & FOLL_GET) |
f442fa61 | 151 | folio_ref_add(folio, refs); |
c36c04c2 | 152 | else if (flags & FOLL_PIN) { |
c8070b78 DH |
153 | /* |
154 | * Don't take a pin on the zero page - it's not going anywhere | |
155 | * and it is used in a *lot* of places. | |
156 | */ | |
f442fa61 | 157 | if (is_zero_folio(folio)) |
c8070b78 DH |
158 | return 0; |
159 | ||
c36c04c2 | 160 | /* |
f442fa61 | 161 | * Increment the normal page refcount field at least once, |
78d9d6ce | 162 | * so that the page really is pinned. |
c36c04c2 | 163 | */ |
5fec0719 | 164 | if (folio_test_large(folio)) { |
f442fa61 YS |
165 | folio_ref_add(folio, refs); |
166 | atomic_add(refs, &folio->_pincount); | |
8ea2979c | 167 | } else { |
f442fa61 | 168 | folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); |
8ea2979c | 169 | } |
c36c04c2 | 170 | |
f442fa61 | 171 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); |
c36c04c2 JH |
172 | } |
173 | ||
0f089235 | 174 | return 0; |
3faa52c0 JH |
175 | } |
176 | ||
3faa52c0 JH |
177 | /** |
178 | * unpin_user_page() - release a dma-pinned page | |
179 | * @page: pointer to page to be released | |
180 | * | |
181 | * Pages that were pinned via pin_user_pages*() must be released via either | |
182 | * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so | |
183 | * that such pages can be separately tracked and uniquely handled. In | |
184 | * particular, interactions with RDMA and filesystems need special handling. | |
185 | */ | |
186 | void unpin_user_page(struct page *page) | |
187 | { | |
b6a2619c | 188 | sanity_check_pinned_pages(&page, 1); |
d8ddc099 | 189 | gup_put_folio(page_folio(page), 1, FOLL_PIN); |
3faa52c0 JH |
190 | } |
191 | EXPORT_SYMBOL(unpin_user_page); | |
192 | ||
6cc04054 VK |
193 | /** |
194 | * unpin_folio() - release a dma-pinned folio | |
195 | * @folio: pointer to folio to be released | |
196 | * | |
197 | * Folios that were pinned via memfd_pin_folios() or other similar routines | |
198 | * must be released either using unpin_folio() or unpin_folios(). | |
199 | */ | |
200 | void unpin_folio(struct folio *folio) | |
201 | { | |
202 | gup_put_folio(folio, 1, FOLL_PIN); | |
203 | } | |
204 | EXPORT_SYMBOL_GPL(unpin_folio); | |
205 | ||
1101fb8f DH |
206 | /** |
207 | * folio_add_pin - Try to get an additional pin on a pinned folio | |
208 | * @folio: The folio to be pinned | |
209 | * | |
210 | * Get an additional pin on a folio we already have a pin on. Makes no change | |
211 | * if the folio is a zero_page. | |
212 | */ | |
213 | void folio_add_pin(struct folio *folio) | |
214 | { | |
215 | if (is_zero_folio(folio)) | |
216 | return; | |
217 | ||
218 | /* | |
219 | * Similar to try_grab_folio(): be sure to *also* increment the normal | |
220 | * page refcount field at least once, so that the page really is | |
221 | * pinned. | |
222 | */ | |
223 | if (folio_test_large(folio)) { | |
224 | WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); | |
225 | folio_ref_inc(folio); | |
226 | atomic_inc(&folio->_pincount); | |
227 | } else { | |
228 | WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); | |
229 | folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); | |
230 | } | |
231 | } | |
232 | ||
659508f9 | 233 | static inline struct folio *gup_folio_range_next(struct page *start, |
8f39f5fc | 234 | unsigned long npages, unsigned long i, unsigned int *ntails) |
458a4f78 | 235 | { |
659508f9 MWO |
236 | struct page *next = nth_page(start, i); |
237 | struct folio *folio = page_folio(next); | |
458a4f78 JM |
238 | unsigned int nr = 1; |
239 | ||
659508f9 | 240 | if (folio_test_large(folio)) |
4c654229 | 241 | nr = min_t(unsigned int, npages - i, |
659508f9 | 242 | folio_nr_pages(folio) - folio_page_idx(folio, next)); |
458a4f78 | 243 | |
458a4f78 | 244 | *ntails = nr; |
659508f9 | 245 | return folio; |
458a4f78 JM |
246 | } |
247 | ||
12521c76 | 248 | static inline struct folio *gup_folio_next(struct page **list, |
28297dbc | 249 | unsigned long npages, unsigned long i, unsigned int *ntails) |
8745d7f6 | 250 | { |
12521c76 | 251 | struct folio *folio = page_folio(list[i]); |
8745d7f6 JM |
252 | unsigned int nr; |
253 | ||
8745d7f6 | 254 | for (nr = i + 1; nr < npages; nr++) { |
12521c76 | 255 | if (page_folio(list[nr]) != folio) |
8745d7f6 JM |
256 | break; |
257 | } | |
258 | ||
8745d7f6 | 259 | *ntails = nr - i; |
12521c76 | 260 | return folio; |
8745d7f6 JM |
261 | } |
262 | ||
fc1d8e7c | 263 | /** |
f1f6a7dd | 264 | * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages |
2d15eb31 | 265 | * @pages: array of pages to be maybe marked dirty, and definitely released. |
fc1d8e7c | 266 | * @npages: number of pages in the @pages array. |
2d15eb31 | 267 | * @make_dirty: whether to mark the pages dirty |
fc1d8e7c JH |
268 | * |
269 | * "gup-pinned page" refers to a page that has had one of the get_user_pages() | |
270 | * variants called on that page. | |
271 | * | |
272 | * For each page in the @pages array, make that page (or its head page, if a | |
2d15eb31 | 273 | * compound page) dirty, if @make_dirty is true, and if the page was previously |
f1f6a7dd JH |
274 | * listed as clean. In any case, releases all pages using unpin_user_page(), |
275 | * possibly via unpin_user_pages(), for the non-dirty case. | |
fc1d8e7c | 276 | * |
f1f6a7dd | 277 | * Please see the unpin_user_page() documentation for details. |
fc1d8e7c | 278 | * |
2d15eb31 AM |
279 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
280 | * required, then the caller should a) verify that this is really correct, | |
281 | * because _lock() is usually required, and b) hand code it: | |
f1f6a7dd | 282 | * set_page_dirty_lock(), unpin_user_page(). |
fc1d8e7c JH |
283 | * |
284 | */ | |
f1f6a7dd JH |
285 | void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
286 | bool make_dirty) | |
fc1d8e7c | 287 | { |
12521c76 MWO |
288 | unsigned long i; |
289 | struct folio *folio; | |
290 | unsigned int nr; | |
2d15eb31 AM |
291 | |
292 | if (!make_dirty) { | |
f1f6a7dd | 293 | unpin_user_pages(pages, npages); |
2d15eb31 AM |
294 | return; |
295 | } | |
296 | ||
b6a2619c | 297 | sanity_check_pinned_pages(pages, npages); |
12521c76 MWO |
298 | for (i = 0; i < npages; i += nr) { |
299 | folio = gup_folio_next(pages, npages, i, &nr); | |
2d15eb31 AM |
300 | /* |
301 | * Checking PageDirty at this point may race with | |
302 | * clear_page_dirty_for_io(), but that's OK. Two key | |
303 | * cases: | |
304 | * | |
305 | * 1) This code sees the page as already dirty, so it | |
306 | * skips the call to set_page_dirty(). That could happen | |
307 | * because clear_page_dirty_for_io() called | |
a929e0d1 | 308 | * folio_mkclean(), followed by set_page_dirty(). |
2d15eb31 AM |
309 | * However, now the page is going to get written back, |
310 | * which meets the original intention of setting it | |
311 | * dirty, so all is well: clear_page_dirty_for_io() goes | |
312 | * on to call TestClearPageDirty(), and write the page | |
313 | * back. | |
314 | * | |
315 | * 2) This code sees the page as clean, so it calls | |
316 | * set_page_dirty(). The page stays dirty, despite being | |
317 | * written back, so it gets written back again in the | |
318 | * next writeback cycle. This is harmless. | |
319 | */ | |
12521c76 MWO |
320 | if (!folio_test_dirty(folio)) { |
321 | folio_lock(folio); | |
322 | folio_mark_dirty(folio); | |
323 | folio_unlock(folio); | |
324 | } | |
325 | gup_put_folio(folio, nr, FOLL_PIN); | |
2d15eb31 | 326 | } |
fc1d8e7c | 327 | } |
f1f6a7dd | 328 | EXPORT_SYMBOL(unpin_user_pages_dirty_lock); |
fc1d8e7c | 329 | |
458a4f78 JM |
330 | /** |
331 | * unpin_user_page_range_dirty_lock() - release and optionally dirty | |
332 | * gup-pinned page range | |
333 | * | |
334 | * @page: the starting page of a range maybe marked dirty, and definitely released. | |
335 | * @npages: number of consecutive pages to release. | |
336 | * @make_dirty: whether to mark the pages dirty | |
337 | * | |
338 | * "gup-pinned page range" refers to a range of pages that has had one of the | |
339 | * pin_user_pages() variants called on that page. | |
340 | * | |
341 | * For the page ranges defined by [page .. page+npages], make that range (or | |
342 | * its head pages, if a compound page) dirty, if @make_dirty is true, and if the | |
343 | * page range was previously listed as clean. | |
344 | * | |
345 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is | |
346 | * required, then the caller should a) verify that this is really correct, | |
347 | * because _lock() is usually required, and b) hand code it: | |
348 | * set_page_dirty_lock(), unpin_user_page(). | |
349 | * | |
350 | */ | |
351 | void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, | |
352 | bool make_dirty) | |
353 | { | |
659508f9 MWO |
354 | unsigned long i; |
355 | struct folio *folio; | |
356 | unsigned int nr; | |
357 | ||
358 | for (i = 0; i < npages; i += nr) { | |
359 | folio = gup_folio_range_next(page, npages, i, &nr); | |
360 | if (make_dirty && !folio_test_dirty(folio)) { | |
361 | folio_lock(folio); | |
362 | folio_mark_dirty(folio); | |
363 | folio_unlock(folio); | |
364 | } | |
365 | gup_put_folio(folio, nr, FOLL_PIN); | |
458a4f78 JM |
366 | } |
367 | } | |
368 | EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); | |
369 | ||
23babe19 | 370 | static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) |
b6a2619c DH |
371 | { |
372 | unsigned long i; | |
373 | struct folio *folio; | |
374 | unsigned int nr; | |
375 | ||
376 | /* | |
377 | * Don't perform any sanity checks because we might have raced with | |
378 | * fork() and some anonymous pages might now actually be shared -- | |
379 | * which is why we're unpinning after all. | |
380 | */ | |
381 | for (i = 0; i < npages; i += nr) { | |
382 | folio = gup_folio_next(pages, npages, i, &nr); | |
383 | gup_put_folio(folio, nr, FOLL_PIN); | |
384 | } | |
385 | } | |
386 | ||
fc1d8e7c | 387 | /** |
f1f6a7dd | 388 | * unpin_user_pages() - release an array of gup-pinned pages. |
fc1d8e7c JH |
389 | * @pages: array of pages to be marked dirty and released. |
390 | * @npages: number of pages in the @pages array. | |
391 | * | |
f1f6a7dd | 392 | * For each page in the @pages array, release the page using unpin_user_page(). |
fc1d8e7c | 393 | * |
f1f6a7dd | 394 | * Please see the unpin_user_page() documentation for details. |
fc1d8e7c | 395 | */ |
f1f6a7dd | 396 | void unpin_user_pages(struct page **pages, unsigned long npages) |
fc1d8e7c | 397 | { |
12521c76 MWO |
398 | unsigned long i; |
399 | struct folio *folio; | |
400 | unsigned int nr; | |
fc1d8e7c | 401 | |
146608bb JH |
402 | /* |
403 | * If this WARN_ON() fires, then the system *might* be leaking pages (by | |
404 | * leaving them pinned), but probably not. More likely, gup/pup returned | |
405 | * a hard -ERRNO error to the caller, who erroneously passed it here. | |
406 | */ | |
407 | if (WARN_ON(IS_ERR_VALUE(npages))) | |
408 | return; | |
31b912de | 409 | |
b6a2619c | 410 | sanity_check_pinned_pages(pages, npages); |
12521c76 MWO |
411 | for (i = 0; i < npages; i += nr) { |
412 | folio = gup_folio_next(pages, npages, i, &nr); | |
413 | gup_put_folio(folio, nr, FOLL_PIN); | |
e7602748 | 414 | } |
fc1d8e7c | 415 | } |
f1f6a7dd | 416 | EXPORT_SYMBOL(unpin_user_pages); |
fc1d8e7c | 417 | |
6cc04054 VK |
418 | /** |
419 | * unpin_folios() - release an array of gup-pinned folios. | |
420 | * @folios: array of folios to be marked dirty and released. | |
421 | * @nfolios: number of folios in the @folios array. | |
422 | * | |
423 | * For each folio in the @folios array, release the folio using gup_put_folio. | |
424 | * | |
425 | * Please see the unpin_folio() documentation for details. | |
426 | */ | |
427 | void unpin_folios(struct folio **folios, unsigned long nfolios) | |
428 | { | |
429 | unsigned long i = 0, j; | |
430 | ||
431 | /* | |
432 | * If this WARN_ON() fires, then the system *might* be leaking folios | |
433 | * (by leaving them pinned), but probably not. More likely, gup/pup | |
434 | * returned a hard -ERRNO error to the caller, who erroneously passed | |
435 | * it here. | |
436 | */ | |
437 | if (WARN_ON(IS_ERR_VALUE(nfolios))) | |
438 | return; | |
439 | ||
440 | while (i < nfolios) { | |
441 | for (j = i + 1; j < nfolios; j++) | |
442 | if (folios[i] != folios[j]) | |
443 | break; | |
444 | ||
445 | if (folios[i]) | |
446 | gup_put_folio(folios[i], j - i, FOLL_PIN); | |
447 | i = j; | |
448 | } | |
449 | } | |
450 | EXPORT_SYMBOL_GPL(unpin_folios); | |
451 | ||
a458b76a AA |
452 | /* |
453 | * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's | |
454 | * lifecycle. Avoid setting the bit unless necessary, or it might cause write | |
455 | * cache bouncing on large SMP machines for concurrent pinned gups. | |
456 | */ | |
457 | static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) | |
458 | { | |
459 | if (!test_bit(MMF_HAS_PINNED, mm_flags)) | |
460 | set_bit(MMF_HAS_PINNED, mm_flags); | |
461 | } | |
462 | ||
050a9adc | 463 | #ifdef CONFIG_MMU |
a12083d7 | 464 | |
8268614b | 465 | #ifdef CONFIG_HAVE_GUP_FAST |
a12083d7 PX |
466 | static int record_subpages(struct page *page, unsigned long sz, |
467 | unsigned long addr, unsigned long end, | |
468 | struct page **pages) | |
469 | { | |
470 | struct page *start_page; | |
471 | int nr; | |
472 | ||
473 | start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT); | |
474 | for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) | |
475 | pages[nr] = nth_page(start_page, nr); | |
476 | ||
477 | return nr; | |
478 | } | |
f442fa61 YS |
479 | |
480 | /** | |
481 | * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. | |
482 | * @page: pointer to page to be grabbed | |
483 | * @refs: the value to (effectively) add to the folio's refcount | |
484 | * @flags: gup flags: these are the FOLL_* flag values. | |
485 | * | |
486 | * "grab" names in this file mean, "look at flags to decide whether to use | |
487 | * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. | |
488 | * | |
489 | * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the | |
490 | * same time. (That's true throughout the get_user_pages*() and | |
491 | * pin_user_pages*() APIs.) Cases: | |
492 | * | |
493 | * FOLL_GET: folio's refcount will be incremented by @refs. | |
494 | * | |
495 | * FOLL_PIN on large folios: folio's refcount will be incremented by | |
496 | * @refs, and its pincount will be incremented by @refs. | |
497 | * | |
498 | * FOLL_PIN on single-page folios: folio's refcount will be incremented by | |
499 | * @refs * GUP_PIN_COUNTING_BIAS. | |
500 | * | |
501 | * Return: The folio containing @page (with refcount appropriately | |
502 | * incremented) for success, or NULL upon failure. If neither FOLL_GET | |
503 | * nor FOLL_PIN was set, that's considered failure, and furthermore, | |
504 | * a likely bug in the caller, so a warning is also emitted. | |
505 | * | |
506 | * It uses add ref unless zero to elevate the folio refcount and must be called | |
507 | * in fast path only. | |
508 | */ | |
509 | static struct folio *try_grab_folio_fast(struct page *page, int refs, | |
510 | unsigned int flags) | |
511 | { | |
512 | struct folio *folio; | |
513 | ||
514 | /* Raise warn if it is not called in fast GUP */ | |
515 | VM_WARN_ON_ONCE(!irqs_disabled()); | |
516 | ||
517 | if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) | |
518 | return NULL; | |
519 | ||
520 | if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) | |
521 | return NULL; | |
522 | ||
523 | if (flags & FOLL_GET) | |
524 | return try_get_folio(page, refs); | |
525 | ||
526 | /* FOLL_PIN is set */ | |
527 | ||
528 | /* | |
529 | * Don't take a pin on the zero page - it's not going anywhere | |
530 | * and it is used in a *lot* of places. | |
531 | */ | |
532 | if (is_zero_page(page)) | |
533 | return page_folio(page); | |
534 | ||
535 | folio = try_get_folio(page, refs); | |
536 | if (!folio) | |
537 | return NULL; | |
538 | ||
539 | /* | |
540 | * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a | |
541 | * right zone, so fail and let the caller fall back to the slow | |
542 | * path. | |
543 | */ | |
544 | if (unlikely((flags & FOLL_LONGTERM) && | |
545 | !folio_is_longterm_pinnable(folio))) { | |
546 | if (!put_devmap_managed_folio_refs(folio, refs)) | |
547 | folio_put_refs(folio, refs); | |
548 | return NULL; | |
549 | } | |
550 | ||
551 | /* | |
552 | * When pinning a large folio, use an exact count to track it. | |
553 | * | |
554 | * However, be sure to *also* increment the normal folio | |
555 | * refcount field at least once, so that the folio really | |
556 | * is pinned. That's why the refcount from the earlier | |
557 | * try_get_folio() is left intact. | |
558 | */ | |
559 | if (folio_test_large(folio)) | |
560 | atomic_add(refs, &folio->_pincount); | |
561 | else | |
562 | folio_ref_add(folio, | |
563 | refs * (GUP_PIN_COUNTING_BIAS - 1)); | |
564 | /* | |
565 | * Adjust the pincount before re-checking the PTE for changes. | |
566 | * This is essentially a smp_mb() and is paired with a memory | |
567 | * barrier in folio_try_share_anon_rmap_*(). | |
568 | */ | |
569 | smp_mb__after_atomic(); | |
570 | ||
571 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); | |
572 | ||
573 | return folio; | |
574 | } | |
8268614b | 575 | #endif /* CONFIG_HAVE_GUP_FAST */ |
a12083d7 | 576 | |
69e68b4f | 577 | static struct page *no_page_table(struct vm_area_struct *vma, |
878b0c45 | 578 | unsigned int flags, unsigned long address) |
4bbd4c77 | 579 | { |
878b0c45 PX |
580 | if (!(flags & FOLL_DUMP)) |
581 | return NULL; | |
582 | ||
69e68b4f | 583 | /* |
878b0c45 | 584 | * When core dumping, we don't want to allocate unnecessary pages or |
69e68b4f KS |
585 | * page tables. Return error instead of NULL to skip handle_mm_fault, |
586 | * then get_dump_page() will return NULL to leave a hole in the dump. | |
587 | * But we can only make this optimization where a hole would surely | |
588 | * be zero-filled if handle_mm_fault() actually did handle it. | |
589 | */ | |
878b0c45 PX |
590 | if (is_vm_hugetlb_page(vma)) { |
591 | struct hstate *h = hstate_vma(vma); | |
592 | ||
593 | if (!hugetlbfs_pagecache_present(h, vma, address)) | |
594 | return ERR_PTR(-EFAULT); | |
595 | } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) { | |
69e68b4f | 596 | return ERR_PTR(-EFAULT); |
878b0c45 PX |
597 | } |
598 | ||
69e68b4f KS |
599 | return NULL; |
600 | } | |
4bbd4c77 | 601 | |
1b167618 PX |
602 | #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES |
603 | static struct page *follow_huge_pud(struct vm_area_struct *vma, | |
604 | unsigned long addr, pud_t *pudp, | |
605 | int flags, struct follow_page_context *ctx) | |
606 | { | |
607 | struct mm_struct *mm = vma->vm_mm; | |
608 | struct page *page; | |
609 | pud_t pud = *pudp; | |
610 | unsigned long pfn = pud_pfn(pud); | |
611 | int ret; | |
612 | ||
613 | assert_spin_locked(pud_lockptr(mm, pudp)); | |
614 | ||
615 | if ((flags & FOLL_WRITE) && !pud_write(pud)) | |
616 | return NULL; | |
617 | ||
618 | if (!pud_present(pud)) | |
619 | return NULL; | |
620 | ||
621 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; | |
622 | ||
623 | if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && | |
624 | pud_devmap(pud)) { | |
625 | /* | |
626 | * device mapped pages can only be returned if the caller | |
627 | * will manage the page reference count. | |
628 | * | |
629 | * At least one of FOLL_GET | FOLL_PIN must be set, so | |
630 | * assert that here: | |
631 | */ | |
632 | if (!(flags & (FOLL_GET | FOLL_PIN))) | |
633 | return ERR_PTR(-EEXIST); | |
634 | ||
635 | if (flags & FOLL_TOUCH) | |
636 | touch_pud(vma, addr, pudp, flags & FOLL_WRITE); | |
637 | ||
638 | ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap); | |
639 | if (!ctx->pgmap) | |
640 | return ERR_PTR(-EFAULT); | |
641 | } | |
642 | ||
643 | page = pfn_to_page(pfn); | |
644 | ||
645 | if (!pud_devmap(pud) && !pud_write(pud) && | |
646 | gup_must_unshare(vma, flags, page)) | |
647 | return ERR_PTR(-EMLINK); | |
648 | ||
f442fa61 | 649 | ret = try_grab_folio(page_folio(page), 1, flags); |
1b167618 PX |
650 | if (ret) |
651 | page = ERR_PTR(ret); | |
652 | else | |
653 | ctx->page_mask = HPAGE_PUD_NR - 1; | |
654 | ||
655 | return page; | |
656 | } | |
4418c522 PX |
657 | |
658 | /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ | |
659 | static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, | |
660 | struct vm_area_struct *vma, | |
661 | unsigned int flags) | |
662 | { | |
663 | /* If the pmd is writable, we can write to the page. */ | |
664 | if (pmd_write(pmd)) | |
665 | return true; | |
666 | ||
667 | /* Maybe FOLL_FORCE is set to override it? */ | |
668 | if (!(flags & FOLL_FORCE)) | |
669 | return false; | |
670 | ||
671 | /* But FOLL_FORCE has no effect on shared mappings */ | |
672 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | |
673 | return false; | |
674 | ||
675 | /* ... or read-only private ones */ | |
676 | if (!(vma->vm_flags & VM_MAYWRITE)) | |
677 | return false; | |
678 | ||
679 | /* ... or already writable ones that just need to take a write fault */ | |
680 | if (vma->vm_flags & VM_WRITE) | |
681 | return false; | |
682 | ||
683 | /* | |
684 | * See can_change_pte_writable(): we broke COW and could map the page | |
685 | * writable if we have an exclusive anonymous page ... | |
686 | */ | |
687 | if (!page || !PageAnon(page) || !PageAnonExclusive(page)) | |
688 | return false; | |
689 | ||
690 | /* ... and a write-fault isn't required for other reasons. */ | |
f38ee285 | 691 | if (pmd_needs_soft_dirty_wp(vma, pmd)) |
4418c522 PX |
692 | return false; |
693 | return !userfaultfd_huge_pmd_wp(vma, pmd); | |
694 | } | |
695 | ||
696 | static struct page *follow_huge_pmd(struct vm_area_struct *vma, | |
697 | unsigned long addr, pmd_t *pmd, | |
698 | unsigned int flags, | |
699 | struct follow_page_context *ctx) | |
700 | { | |
701 | struct mm_struct *mm = vma->vm_mm; | |
702 | pmd_t pmdval = *pmd; | |
703 | struct page *page; | |
704 | int ret; | |
705 | ||
706 | assert_spin_locked(pmd_lockptr(mm, pmd)); | |
707 | ||
708 | page = pmd_page(pmdval); | |
709 | if ((flags & FOLL_WRITE) && | |
710 | !can_follow_write_pmd(pmdval, page, vma, flags)) | |
711 | return NULL; | |
712 | ||
713 | /* Avoid dumping huge zero page */ | |
714 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval)) | |
715 | return ERR_PTR(-EFAULT); | |
716 | ||
717 | if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) | |
718 | return NULL; | |
719 | ||
720 | if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) | |
721 | return ERR_PTR(-EMLINK); | |
722 | ||
723 | VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && | |
724 | !PageAnonExclusive(page), page); | |
725 | ||
f442fa61 | 726 | ret = try_grab_folio(page_folio(page), 1, flags); |
4418c522 PX |
727 | if (ret) |
728 | return ERR_PTR(ret); | |
729 | ||
730 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
731 | if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH)) | |
732 | touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); | |
733 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
734 | ||
735 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; | |
736 | ctx->page_mask = HPAGE_PMD_NR - 1; | |
737 | ||
738 | return page; | |
739 | } | |
740 | ||
1b167618 PX |
741 | #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ |
742 | static struct page *follow_huge_pud(struct vm_area_struct *vma, | |
743 | unsigned long addr, pud_t *pudp, | |
744 | int flags, struct follow_page_context *ctx) | |
745 | { | |
746 | return NULL; | |
747 | } | |
4418c522 PX |
748 | |
749 | static struct page *follow_huge_pmd(struct vm_area_struct *vma, | |
750 | unsigned long addr, pmd_t *pmd, | |
751 | unsigned int flags, | |
752 | struct follow_page_context *ctx) | |
753 | { | |
754 | return NULL; | |
755 | } | |
1b167618 PX |
756 | #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ |
757 | ||
1027e443 KS |
758 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
759 | pte_t *pte, unsigned int flags) | |
760 | { | |
1027e443 | 761 | if (flags & FOLL_TOUCH) { |
c33c7948 RR |
762 | pte_t orig_entry = ptep_get(pte); |
763 | pte_t entry = orig_entry; | |
1027e443 KS |
764 | |
765 | if (flags & FOLL_WRITE) | |
766 | entry = pte_mkdirty(entry); | |
767 | entry = pte_mkyoung(entry); | |
768 | ||
c33c7948 | 769 | if (!pte_same(orig_entry, entry)) { |
1027e443 KS |
770 | set_pte_at(vma->vm_mm, address, pte, entry); |
771 | update_mmu_cache(vma, address, pte); | |
772 | } | |
773 | } | |
774 | ||
775 | /* Proper page table entry exists, but no corresponding struct page */ | |
776 | return -EEXIST; | |
777 | } | |
778 | ||
5535be30 DH |
779 | /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ |
780 | static inline bool can_follow_write_pte(pte_t pte, struct page *page, | |
781 | struct vm_area_struct *vma, | |
782 | unsigned int flags) | |
19be0eaf | 783 | { |
5535be30 DH |
784 | /* If the pte is writable, we can write to the page. */ |
785 | if (pte_write(pte)) | |
786 | return true; | |
787 | ||
788 | /* Maybe FOLL_FORCE is set to override it? */ | |
789 | if (!(flags & FOLL_FORCE)) | |
790 | return false; | |
791 | ||
792 | /* But FOLL_FORCE has no effect on shared mappings */ | |
793 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | |
794 | return false; | |
795 | ||
796 | /* ... or read-only private ones */ | |
797 | if (!(vma->vm_flags & VM_MAYWRITE)) | |
798 | return false; | |
799 | ||
800 | /* ... or already writable ones that just need to take a write fault */ | |
801 | if (vma->vm_flags & VM_WRITE) | |
802 | return false; | |
803 | ||
804 | /* | |
805 | * See can_change_pte_writable(): we broke COW and could map the page | |
806 | * writable if we have an exclusive anonymous page ... | |
807 | */ | |
808 | if (!page || !PageAnon(page) || !PageAnonExclusive(page)) | |
809 | return false; | |
810 | ||
811 | /* ... and a write-fault isn't required for other reasons. */ | |
f38ee285 | 812 | if (pte_needs_soft_dirty_wp(vma, pte)) |
5535be30 DH |
813 | return false; |
814 | return !userfaultfd_pte_wp(vma, pte); | |
19be0eaf LT |
815 | } |
816 | ||
69e68b4f | 817 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
df06b37f KB |
818 | unsigned long address, pmd_t *pmd, unsigned int flags, |
819 | struct dev_pagemap **pgmap) | |
69e68b4f KS |
820 | { |
821 | struct mm_struct *mm = vma->vm_mm; | |
822 | struct page *page; | |
823 | spinlock_t *ptl; | |
824 | pte_t *ptep, pte; | |
f28d4363 | 825 | int ret; |
4bbd4c77 | 826 | |
eddb1c22 JH |
827 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
828 | if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == | |
829 | (FOLL_PIN | FOLL_GET))) | |
830 | return ERR_PTR(-EINVAL); | |
4bbd4c77 KS |
831 | |
832 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | |
04dee9e8 | 833 | if (!ptep) |
878b0c45 | 834 | return no_page_table(vma, flags, address); |
c33c7948 | 835 | pte = ptep_get(ptep); |
f7355e99 DH |
836 | if (!pte_present(pte)) |
837 | goto no_page; | |
d74943a2 | 838 | if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) |
4bbd4c77 | 839 | goto no_page; |
4bbd4c77 KS |
840 | |
841 | page = vm_normal_page(vma, address, pte); | |
5535be30 DH |
842 | |
843 | /* | |
844 | * We only care about anon pages in can_follow_write_pte() and don't | |
845 | * have to worry about pte_devmap() because they are never anon. | |
846 | */ | |
847 | if ((flags & FOLL_WRITE) && | |
848 | !can_follow_write_pte(pte, page, vma, flags)) { | |
849 | page = NULL; | |
850 | goto out; | |
851 | } | |
852 | ||
3faa52c0 | 853 | if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { |
3565fce3 | 854 | /* |
3faa52c0 JH |
855 | * Only return device mapping pages in the FOLL_GET or FOLL_PIN |
856 | * case since they are only valid while holding the pgmap | |
857 | * reference. | |
3565fce3 | 858 | */ |
df06b37f KB |
859 | *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); |
860 | if (*pgmap) | |
3565fce3 DW |
861 | page = pte_page(pte); |
862 | else | |
863 | goto no_page; | |
864 | } else if (unlikely(!page)) { | |
1027e443 KS |
865 | if (flags & FOLL_DUMP) { |
866 | /* Avoid special (like zero) pages in core dumps */ | |
867 | page = ERR_PTR(-EFAULT); | |
868 | goto out; | |
869 | } | |
870 | ||
871 | if (is_zero_pfn(pte_pfn(pte))) { | |
872 | page = pte_page(pte); | |
873 | } else { | |
1027e443 KS |
874 | ret = follow_pfn_pte(vma, address, ptep, flags); |
875 | page = ERR_PTR(ret); | |
876 | goto out; | |
877 | } | |
4bbd4c77 KS |
878 | } |
879 | ||
84209e87 | 880 | if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { |
a7f22660 DH |
881 | page = ERR_PTR(-EMLINK); |
882 | goto out; | |
883 | } | |
b6a2619c DH |
884 | |
885 | VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && | |
886 | !PageAnonExclusive(page), page); | |
887 | ||
f442fa61 YS |
888 | /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ |
889 | ret = try_grab_folio(page_folio(page), 1, flags); | |
0f089235 LG |
890 | if (unlikely(ret)) { |
891 | page = ERR_PTR(ret); | |
3faa52c0 | 892 | goto out; |
8fde12ca | 893 | } |
4003f107 | 894 | |
f28d4363 CI |
895 | /* |
896 | * We need to make the page accessible if and only if we are going | |
897 | * to access its content (the FOLL_PIN case). Please see | |
898 | * Documentation/core-api/pin_user_pages.rst for details. | |
899 | */ | |
900 | if (flags & FOLL_PIN) { | |
901 | ret = arch_make_page_accessible(page); | |
902 | if (ret) { | |
903 | unpin_user_page(page); | |
904 | page = ERR_PTR(ret); | |
905 | goto out; | |
906 | } | |
907 | } | |
4bbd4c77 KS |
908 | if (flags & FOLL_TOUCH) { |
909 | if ((flags & FOLL_WRITE) && | |
910 | !pte_dirty(pte) && !PageDirty(page)) | |
911 | set_page_dirty(page); | |
912 | /* | |
913 | * pte_mkyoung() would be more correct here, but atomic care | |
914 | * is needed to avoid losing the dirty bit: it is easier to use | |
915 | * mark_page_accessed(). | |
916 | */ | |
917 | mark_page_accessed(page); | |
918 | } | |
1027e443 | 919 | out: |
4bbd4c77 | 920 | pte_unmap_unlock(ptep, ptl); |
4bbd4c77 | 921 | return page; |
4bbd4c77 KS |
922 | no_page: |
923 | pte_unmap_unlock(ptep, ptl); | |
924 | if (!pte_none(pte)) | |
69e68b4f | 925 | return NULL; |
878b0c45 | 926 | return no_page_table(vma, flags, address); |
69e68b4f KS |
927 | } |
928 | ||
080dbb61 AK |
929 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
930 | unsigned long address, pud_t *pudp, | |
df06b37f KB |
931 | unsigned int flags, |
932 | struct follow_page_context *ctx) | |
69e68b4f | 933 | { |
68827280 | 934 | pmd_t *pmd, pmdval; |
69e68b4f KS |
935 | spinlock_t *ptl; |
936 | struct page *page; | |
937 | struct mm_struct *mm = vma->vm_mm; | |
938 | ||
080dbb61 | 939 | pmd = pmd_offset(pudp, address); |
26e1a0c3 | 940 | pmdval = pmdp_get_lockless(pmd); |
68827280 | 941 | if (pmd_none(pmdval)) |
878b0c45 | 942 | return no_page_table(vma, flags, address); |
f7355e99 | 943 | if (!pmd_present(pmdval)) |
878b0c45 | 944 | return no_page_table(vma, flags, address); |
68827280 | 945 | if (pmd_devmap(pmdval)) { |
3565fce3 | 946 | ptl = pmd_lock(mm, pmd); |
df06b37f | 947 | page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); |
3565fce3 DW |
948 | spin_unlock(ptl); |
949 | if (page) | |
950 | return page; | |
878b0c45 | 951 | return no_page_table(vma, flags, address); |
3565fce3 | 952 | } |
4418c522 | 953 | if (likely(!pmd_leaf(pmdval))) |
df06b37f | 954 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
6742d293 | 955 | |
d74943a2 | 956 | if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) |
878b0c45 | 957 | return no_page_table(vma, flags, address); |
db08f203 | 958 | |
6742d293 | 959 | ptl = pmd_lock(mm, pmd); |
4418c522 PX |
960 | pmdval = *pmd; |
961 | if (unlikely(!pmd_present(pmdval))) { | |
84c3fc4e | 962 | spin_unlock(ptl); |
878b0c45 | 963 | return no_page_table(vma, flags, address); |
84c3fc4e | 964 | } |
4418c522 | 965 | if (unlikely(!pmd_leaf(pmdval))) { |
6742d293 | 966 | spin_unlock(ptl); |
df06b37f | 967 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
6742d293 | 968 | } |
4418c522 | 969 | if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) { |
2378118b HD |
970 | spin_unlock(ptl); |
971 | split_huge_pmd(vma, pmd, address); | |
972 | /* If pmd was left empty, stuff a page table in there quickly */ | |
973 | return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : | |
df06b37f | 974 | follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
69e68b4f | 975 | } |
4418c522 | 976 | page = follow_huge_pmd(vma, address, pmd, flags, ctx); |
6742d293 | 977 | spin_unlock(ptl); |
6742d293 | 978 | return page; |
4bbd4c77 KS |
979 | } |
980 | ||
080dbb61 AK |
981 | static struct page *follow_pud_mask(struct vm_area_struct *vma, |
982 | unsigned long address, p4d_t *p4dp, | |
df06b37f KB |
983 | unsigned int flags, |
984 | struct follow_page_context *ctx) | |
080dbb61 | 985 | { |
caf8cab7 | 986 | pud_t *pudp, pud; |
080dbb61 AK |
987 | spinlock_t *ptl; |
988 | struct page *page; | |
989 | struct mm_struct *mm = vma->vm_mm; | |
990 | ||
caf8cab7 PX |
991 | pudp = pud_offset(p4dp, address); |
992 | pud = READ_ONCE(*pudp); | |
1b167618 | 993 | if (!pud_present(pud)) |
878b0c45 | 994 | return no_page_table(vma, flags, address); |
1b167618 | 995 | if (pud_leaf(pud)) { |
caf8cab7 | 996 | ptl = pud_lock(mm, pudp); |
1b167618 | 997 | page = follow_huge_pud(vma, address, pudp, flags, ctx); |
080dbb61 AK |
998 | spin_unlock(ptl); |
999 | if (page) | |
1000 | return page; | |
878b0c45 | 1001 | return no_page_table(vma, flags, address); |
080dbb61 | 1002 | } |
caf8cab7 | 1003 | if (unlikely(pud_bad(pud))) |
878b0c45 | 1004 | return no_page_table(vma, flags, address); |
080dbb61 | 1005 | |
caf8cab7 | 1006 | return follow_pmd_mask(vma, address, pudp, flags, ctx); |
080dbb61 AK |
1007 | } |
1008 | ||
080dbb61 AK |
1009 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, |
1010 | unsigned long address, pgd_t *pgdp, | |
df06b37f KB |
1011 | unsigned int flags, |
1012 | struct follow_page_context *ctx) | |
080dbb61 | 1013 | { |
e6fd5564 | 1014 | p4d_t *p4dp, p4d; |
080dbb61 | 1015 | |
e6fd5564 PX |
1016 | p4dp = p4d_offset(pgdp, address); |
1017 | p4d = READ_ONCE(*p4dp); | |
1965e933 | 1018 | BUILD_BUG_ON(p4d_leaf(p4d)); |
a12083d7 | 1019 | |
a12083d7 | 1020 | if (!p4d_present(p4d) || p4d_bad(p4d)) |
878b0c45 | 1021 | return no_page_table(vma, flags, address); |
080dbb61 | 1022 | |
e6fd5564 | 1023 | return follow_pud_mask(vma, address, p4dp, flags, ctx); |
080dbb61 AK |
1024 | } |
1025 | ||
1026 | /** | |
1027 | * follow_page_mask - look up a page descriptor from a user-virtual address | |
1028 | * @vma: vm_area_struct mapping @address | |
1029 | * @address: virtual address to look up | |
1030 | * @flags: flags modifying lookup behaviour | |
78179556 MR |
1031 | * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a |
1032 | * pointer to output page_mask | |
080dbb61 AK |
1033 | * |
1034 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | |
1035 | * | |
78179556 MR |
1036 | * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches |
1037 | * the device's dev_pagemap metadata to avoid repeating expensive lookups. | |
1038 | * | |
a7f22660 DH |
1039 | * When getting an anonymous page and the caller has to trigger unsharing |
1040 | * of a shared anonymous page first, -EMLINK is returned. The caller should | |
1041 | * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only | |
1042 | * relevant with FOLL_PIN and !FOLL_WRITE. | |
1043 | * | |
78179556 MR |
1044 | * On output, the @ctx->page_mask is set according to the size of the page. |
1045 | * | |
1046 | * Return: the mapped (struct page *), %NULL if no mapping exists, or | |
080dbb61 AK |
1047 | * an error pointer if there is a mapping to something not represented |
1048 | * by a page descriptor (see also vm_normal_page()). | |
1049 | */ | |
a7030aea | 1050 | static struct page *follow_page_mask(struct vm_area_struct *vma, |
080dbb61 | 1051 | unsigned long address, unsigned int flags, |
df06b37f | 1052 | struct follow_page_context *ctx) |
080dbb61 AK |
1053 | { |
1054 | pgd_t *pgd; | |
080dbb61 | 1055 | struct mm_struct *mm = vma->vm_mm; |
9cb28da5 | 1056 | struct page *page; |
080dbb61 | 1057 | |
9cb28da5 | 1058 | vma_pgtable_walk_begin(vma); |
080dbb61 | 1059 | |
9cb28da5 | 1060 | ctx->page_mask = 0; |
080dbb61 AK |
1061 | pgd = pgd_offset(mm, address); |
1062 | ||
8268614b | 1063 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
a12083d7 PX |
1064 | page = no_page_table(vma, flags, address); |
1065 | else | |
1066 | page = follow_p4d_mask(vma, address, pgd, flags, ctx); | |
080dbb61 | 1067 | |
9cb28da5 PX |
1068 | vma_pgtable_walk_end(vma); |
1069 | ||
a12083d7 | 1070 | return page; |
df06b37f KB |
1071 | } |
1072 | ||
1073 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | |
1074 | unsigned int foll_flags) | |
1075 | { | |
1076 | struct follow_page_context ctx = { NULL }; | |
1077 | struct page *page; | |
1078 | ||
1507f512 MR |
1079 | if (vma_is_secretmem(vma)) |
1080 | return NULL; | |
1081 | ||
d64e2dbc | 1082 | if (WARN_ON_ONCE(foll_flags & FOLL_PIN)) |
8909691b DH |
1083 | return NULL; |
1084 | ||
d74943a2 DH |
1085 | /* |
1086 | * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect | |
1087 | * to fail on PROT_NONE-mapped pages. | |
1088 | */ | |
df06b37f KB |
1089 | page = follow_page_mask(vma, address, foll_flags, &ctx); |
1090 | if (ctx.pgmap) | |
1091 | put_dev_pagemap(ctx.pgmap); | |
1092 | return page; | |
080dbb61 AK |
1093 | } |
1094 | ||
f2b495ca KS |
1095 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
1096 | unsigned int gup_flags, struct vm_area_struct **vma, | |
1097 | struct page **page) | |
1098 | { | |
1099 | pgd_t *pgd; | |
c2febafc | 1100 | p4d_t *p4d; |
f2b495ca KS |
1101 | pud_t *pud; |
1102 | pmd_t *pmd; | |
1103 | pte_t *pte; | |
c33c7948 | 1104 | pte_t entry; |
f2b495ca KS |
1105 | int ret = -EFAULT; |
1106 | ||
1107 | /* user gate pages are read-only */ | |
1108 | if (gup_flags & FOLL_WRITE) | |
1109 | return -EFAULT; | |
1110 | if (address > TASK_SIZE) | |
1111 | pgd = pgd_offset_k(address); | |
1112 | else | |
1113 | pgd = pgd_offset_gate(mm, address); | |
b5d1c39f AL |
1114 | if (pgd_none(*pgd)) |
1115 | return -EFAULT; | |
c2febafc | 1116 | p4d = p4d_offset(pgd, address); |
b5d1c39f AL |
1117 | if (p4d_none(*p4d)) |
1118 | return -EFAULT; | |
c2febafc | 1119 | pud = pud_offset(p4d, address); |
b5d1c39f AL |
1120 | if (pud_none(*pud)) |
1121 | return -EFAULT; | |
f2b495ca | 1122 | pmd = pmd_offset(pud, address); |
84c3fc4e | 1123 | if (!pmd_present(*pmd)) |
f2b495ca | 1124 | return -EFAULT; |
f2b495ca | 1125 | pte = pte_offset_map(pmd, address); |
04dee9e8 HD |
1126 | if (!pte) |
1127 | return -EFAULT; | |
c33c7948 RR |
1128 | entry = ptep_get(pte); |
1129 | if (pte_none(entry)) | |
f2b495ca KS |
1130 | goto unmap; |
1131 | *vma = get_gate_vma(mm); | |
1132 | if (!page) | |
1133 | goto out; | |
c33c7948 | 1134 | *page = vm_normal_page(*vma, address, entry); |
f2b495ca | 1135 | if (!*page) { |
c33c7948 | 1136 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) |
f2b495ca | 1137 | goto unmap; |
c33c7948 | 1138 | *page = pte_page(entry); |
f2b495ca | 1139 | } |
f442fa61 | 1140 | ret = try_grab_folio(page_folio(*page), 1, gup_flags); |
0f089235 | 1141 | if (unlikely(ret)) |
8fde12ca | 1142 | goto unmap; |
f2b495ca KS |
1143 | out: |
1144 | ret = 0; | |
1145 | unmap: | |
1146 | pte_unmap(pte); | |
1147 | return ret; | |
1148 | } | |
1149 | ||
9a95f3cf | 1150 | /* |
9a863a6a JG |
1151 | * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not |
1152 | * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set | |
1153 | * to 0 and -EBUSY returned. | |
9a95f3cf | 1154 | */ |
64019a2e | 1155 | static int faultin_page(struct vm_area_struct *vma, |
a7f22660 DH |
1156 | unsigned long address, unsigned int *flags, bool unshare, |
1157 | int *locked) | |
16744483 | 1158 | { |
16744483 | 1159 | unsigned int fault_flags = 0; |
2b740303 | 1160 | vm_fault_t ret; |
16744483 | 1161 | |
55b8fe70 AG |
1162 | if (*flags & FOLL_NOFAULT) |
1163 | return -EFAULT; | |
16744483 KS |
1164 | if (*flags & FOLL_WRITE) |
1165 | fault_flags |= FAULT_FLAG_WRITE; | |
1b2ee126 DH |
1166 | if (*flags & FOLL_REMOTE) |
1167 | fault_flags |= FAULT_FLAG_REMOTE; | |
f04740f5 | 1168 | if (*flags & FOLL_UNLOCKABLE) { |
71335f37 | 1169 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
93c5c61d PX |
1170 | /* |
1171 | * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set | |
1172 | * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. | |
1173 | * That's because some callers may not be prepared to | |
1174 | * handle early exits caused by non-fatal signals. | |
1175 | */ | |
1176 | if (*flags & FOLL_INTERRUPTIBLE) | |
1177 | fault_flags |= FAULT_FLAG_INTERRUPTIBLE; | |
1178 | } | |
16744483 KS |
1179 | if (*flags & FOLL_NOWAIT) |
1180 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; | |
234b239b | 1181 | if (*flags & FOLL_TRIED) { |
4426e945 PX |
1182 | /* |
1183 | * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED | |
1184 | * can co-exist | |
1185 | */ | |
234b239b ALC |
1186 | fault_flags |= FAULT_FLAG_TRIED; |
1187 | } | |
a7f22660 DH |
1188 | if (unshare) { |
1189 | fault_flags |= FAULT_FLAG_UNSHARE; | |
1190 | /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ | |
1191 | VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); | |
1192 | } | |
16744483 | 1193 | |
bce617ed | 1194 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
d9272525 PX |
1195 | |
1196 | if (ret & VM_FAULT_COMPLETED) { | |
1197 | /* | |
1198 | * With FAULT_FLAG_RETRY_NOWAIT we'll never release the | |
1199 | * mmap lock in the page fault handler. Sanity check this. | |
1200 | */ | |
1201 | WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); | |
9a863a6a JG |
1202 | *locked = 0; |
1203 | ||
d9272525 PX |
1204 | /* |
1205 | * We should do the same as VM_FAULT_RETRY, but let's not | |
1206 | * return -EBUSY since that's not reflecting the reality of | |
1207 | * what has happened - we've just fully completed a page | |
1208 | * fault, with the mmap lock released. Use -EAGAIN to show | |
1209 | * that we want to take the mmap lock _again_. | |
1210 | */ | |
1211 | return -EAGAIN; | |
1212 | } | |
1213 | ||
16744483 | 1214 | if (ret & VM_FAULT_ERROR) { |
9a291a7c JM |
1215 | int err = vm_fault_to_errno(ret, *flags); |
1216 | ||
1217 | if (err) | |
1218 | return err; | |
16744483 KS |
1219 | BUG(); |
1220 | } | |
1221 | ||
16744483 | 1222 | if (ret & VM_FAULT_RETRY) { |
9a863a6a | 1223 | if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
4f6da934 | 1224 | *locked = 0; |
16744483 KS |
1225 | return -EBUSY; |
1226 | } | |
1227 | ||
16744483 KS |
1228 | return 0; |
1229 | } | |
1230 | ||
8ac26843 LS |
1231 | /* |
1232 | * Writing to file-backed mappings which require folio dirty tracking using GUP | |
1233 | * is a fundamentally broken operation, as kernel write access to GUP mappings | |
1234 | * do not adhere to the semantics expected by a file system. | |
1235 | * | |
1236 | * Consider the following scenario:- | |
1237 | * | |
1238 | * 1. A folio is written to via GUP which write-faults the memory, notifying | |
1239 | * the file system and dirtying the folio. | |
1240 | * 2. Later, writeback is triggered, resulting in the folio being cleaned and | |
1241 | * the PTE being marked read-only. | |
1242 | * 3. The GUP caller writes to the folio, as it is mapped read/write via the | |
1243 | * direct mapping. | |
1244 | * 4. The GUP caller, now done with the page, unpins it and sets it dirty | |
1245 | * (though it does not have to). | |
1246 | * | |
1247 | * This results in both data being written to a folio without writenotify, and | |
1248 | * the folio being dirtied unexpectedly (if the caller decides to do so). | |
1249 | */ | |
1250 | static bool writable_file_mapping_allowed(struct vm_area_struct *vma, | |
1251 | unsigned long gup_flags) | |
1252 | { | |
1253 | /* | |
1254 | * If we aren't pinning then no problematic write can occur. A long term | |
1255 | * pin is the most egregious case so this is the case we disallow. | |
1256 | */ | |
1257 | if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != | |
1258 | (FOLL_PIN | FOLL_LONGTERM)) | |
1259 | return true; | |
1260 | ||
1261 | /* | |
1262 | * If the VMA does not require dirty tracking then no problematic write | |
1263 | * can occur either. | |
1264 | */ | |
1265 | return !vma_needs_dirty_tracking(vma); | |
1266 | } | |
1267 | ||
fa5bb209 KS |
1268 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
1269 | { | |
1270 | vm_flags_t vm_flags = vma->vm_flags; | |
1b2ee126 DH |
1271 | int write = (gup_flags & FOLL_WRITE); |
1272 | int foreign = (gup_flags & FOLL_REMOTE); | |
8ac26843 | 1273 | bool vma_anon = vma_is_anonymous(vma); |
fa5bb209 KS |
1274 | |
1275 | if (vm_flags & (VM_IO | VM_PFNMAP)) | |
1276 | return -EFAULT; | |
1277 | ||
8ac26843 | 1278 | if ((gup_flags & FOLL_ANON) && !vma_anon) |
7f7ccc2c WT |
1279 | return -EFAULT; |
1280 | ||
52650c8b JG |
1281 | if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) |
1282 | return -EOPNOTSUPP; | |
1283 | ||
1507f512 MR |
1284 | if (vma_is_secretmem(vma)) |
1285 | return -EFAULT; | |
1286 | ||
1b2ee126 | 1287 | if (write) { |
8ac26843 LS |
1288 | if (!vma_anon && |
1289 | !writable_file_mapping_allowed(vma, gup_flags)) | |
1290 | return -EFAULT; | |
1291 | ||
6beb9958 | 1292 | if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { |
fa5bb209 KS |
1293 | if (!(gup_flags & FOLL_FORCE)) |
1294 | return -EFAULT; | |
f347454d DH |
1295 | /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */ |
1296 | if (is_vm_hugetlb_page(vma)) | |
1297 | return -EFAULT; | |
fa5bb209 KS |
1298 | /* |
1299 | * We used to let the write,force case do COW in a | |
1300 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could | |
1301 | * set a breakpoint in a read-only mapping of an | |
1302 | * executable, without corrupting the file (yet only | |
1303 | * when that file had been opened for writing!). | |
1304 | * Anon pages in shared mappings are surprising: now | |
1305 | * just reject it. | |
1306 | */ | |
46435364 | 1307 | if (!is_cow_mapping(vm_flags)) |
fa5bb209 | 1308 | return -EFAULT; |
fa5bb209 KS |
1309 | } |
1310 | } else if (!(vm_flags & VM_READ)) { | |
1311 | if (!(gup_flags & FOLL_FORCE)) | |
1312 | return -EFAULT; | |
1313 | /* | |
1314 | * Is there actually any vma we can reach here which does not | |
1315 | * have VM_MAYREAD set? | |
1316 | */ | |
1317 | if (!(vm_flags & VM_MAYREAD)) | |
1318 | return -EFAULT; | |
1319 | } | |
d61172b4 DH |
1320 | /* |
1321 | * gups are always data accesses, not instruction | |
1322 | * fetches, so execute=false here | |
1323 | */ | |
1324 | if (!arch_vma_access_permitted(vma, write, false, foreign)) | |
33a709b2 | 1325 | return -EFAULT; |
fa5bb209 KS |
1326 | return 0; |
1327 | } | |
1328 | ||
6cd06ab1 LT |
1329 | /* |
1330 | * This is "vma_lookup()", but with a warning if we would have | |
1331 | * historically expanded the stack in the GUP code. | |
1332 | */ | |
1333 | static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, | |
1334 | unsigned long addr) | |
1335 | { | |
1336 | #ifdef CONFIG_STACK_GROWSUP | |
1337 | return vma_lookup(mm, addr); | |
1338 | #else | |
1339 | static volatile unsigned long next_warn; | |
1340 | struct vm_area_struct *vma; | |
1341 | unsigned long now, next; | |
1342 | ||
1343 | vma = find_vma(mm, addr); | |
1344 | if (!vma || (addr >= vma->vm_start)) | |
1345 | return vma; | |
1346 | ||
1347 | /* Only warn for half-way relevant accesses */ | |
1348 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
1349 | return NULL; | |
1350 | if (vma->vm_start - addr > 65536) | |
1351 | return NULL; | |
1352 | ||
1353 | /* Let's not warn more than once an hour.. */ | |
1354 | now = jiffies; next = next_warn; | |
1355 | if (next && time_before(now, next)) | |
1356 | return NULL; | |
1357 | next_warn = now + 60*60*HZ; | |
1358 | ||
1359 | /* Let people know things may have changed. */ | |
1360 | pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", | |
1361 | current->comm, task_pid_nr(current), | |
1362 | vma->vm_start, vma->vm_end, addr); | |
1363 | dump_stack(); | |
1364 | return NULL; | |
1365 | #endif | |
1366 | } | |
1367 | ||
4bbd4c77 KS |
1368 | /** |
1369 | * __get_user_pages() - pin user pages in memory | |
4bbd4c77 KS |
1370 | * @mm: mm_struct of target mm |
1371 | * @start: starting user address | |
1372 | * @nr_pages: number of pages from start to pin | |
1373 | * @gup_flags: flags modifying pin behaviour | |
1374 | * @pages: array that receives pointers to the pages pinned. | |
1375 | * Should be at least nr_pages long. Or NULL, if caller | |
1376 | * only intends to ensure the pages are faulted in. | |
c1e8d7c6 | 1377 | * @locked: whether we're still with the mmap_lock held |
4bbd4c77 | 1378 | * |
d2dfbe47 LX |
1379 | * Returns either number of pages pinned (which may be less than the |
1380 | * number requested), or an error. Details about the return value: | |
1381 | * | |
1382 | * -- If nr_pages is 0, returns 0. | |
1383 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. | |
1384 | * -- If nr_pages is >0, and some pages were pinned, returns the number of | |
1385 | * pages pinned. Again, this may be less than nr_pages. | |
2d3a36a4 | 1386 | * -- 0 return value is possible when the fault would need to be retried. |
d2dfbe47 LX |
1387 | * |
1388 | * The caller is responsible for releasing returned @pages, via put_page(). | |
1389 | * | |
c1e8d7c6 | 1390 | * Must be called with mmap_lock held. It may be released. See below. |
4bbd4c77 KS |
1391 | * |
1392 | * __get_user_pages walks a process's page tables and takes a reference to | |
1393 | * each struct page that each user address corresponds to at a given | |
1394 | * instant. That is, it takes the page that would be accessed if a user | |
1395 | * thread accesses the given user virtual address at that instant. | |
1396 | * | |
1397 | * This does not guarantee that the page exists in the user mappings when | |
1398 | * __get_user_pages returns, and there may even be a completely different | |
1399 | * page there in some cases (eg. if mmapped pagecache has been invalidated | |
c5acf1f6 | 1400 | * and subsequently re-faulted). However it does guarantee that the page |
4bbd4c77 KS |
1401 | * won't be freed completely. And mostly callers simply care that the page |
1402 | * contains data that was valid *at some point in time*. Typically, an IO | |
1403 | * or similar operation cannot guarantee anything stronger anyway because | |
1404 | * locks can't be held over the syscall boundary. | |
1405 | * | |
1406 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | |
1407 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | |
1408 | * appropriate) must be called after the page is finished with, and | |
1409 | * before put_page is called. | |
1410 | * | |
9a863a6a JG |
1411 | * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may |
1412 | * be released. If this happens *@locked will be set to 0 on return. | |
9a95f3cf | 1413 | * |
9a863a6a JG |
1414 | * A caller using such a combination of @gup_flags must therefore hold the |
1415 | * mmap_lock for reading only, and recognize when it's been released. Otherwise, | |
1416 | * it must be held for either reading or writing and will not be released. | |
4bbd4c77 KS |
1417 | * |
1418 | * In most cases, get_user_pages or get_user_pages_fast should be used | |
1419 | * instead of __get_user_pages. __get_user_pages should be used only if | |
1420 | * you need some special @gup_flags. | |
1421 | */ | |
64019a2e | 1422 | static long __get_user_pages(struct mm_struct *mm, |
4bbd4c77 KS |
1423 | unsigned long start, unsigned long nr_pages, |
1424 | unsigned int gup_flags, struct page **pages, | |
b2cac248 | 1425 | int *locked) |
4bbd4c77 | 1426 | { |
df06b37f | 1427 | long ret = 0, i = 0; |
fa5bb209 | 1428 | struct vm_area_struct *vma = NULL; |
df06b37f | 1429 | struct follow_page_context ctx = { NULL }; |
4bbd4c77 KS |
1430 | |
1431 | if (!nr_pages) | |
1432 | return 0; | |
1433 | ||
428e106a | 1434 | start = untagged_addr_remote(mm, start); |
f9652594 | 1435 | |
eddb1c22 | 1436 | VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); |
4bbd4c77 | 1437 | |
4bbd4c77 | 1438 | do { |
fa5bb209 KS |
1439 | struct page *page; |
1440 | unsigned int foll_flags = gup_flags; | |
1441 | unsigned int page_increm; | |
1442 | ||
1443 | /* first iteration or cross vma bound */ | |
1444 | if (!vma || start >= vma->vm_end) { | |
631426ba DH |
1445 | /* |
1446 | * MADV_POPULATE_(READ|WRITE) wants to handle VMA | |
1447 | * lookups+error reporting differently. | |
1448 | */ | |
1449 | if (gup_flags & FOLL_MADV_POPULATE) { | |
1450 | vma = vma_lookup(mm, start); | |
1451 | if (!vma) { | |
1452 | ret = -ENOMEM; | |
1453 | goto out; | |
1454 | } | |
1455 | if (check_vma_flags(vma, gup_flags)) { | |
1456 | ret = -EINVAL; | |
1457 | goto out; | |
1458 | } | |
1459 | goto retry; | |
1460 | } | |
6cd06ab1 | 1461 | vma = gup_vma_lookup(mm, start); |
fa5bb209 | 1462 | if (!vma && in_gate_area(mm, start)) { |
fa5bb209 KS |
1463 | ret = get_gate_page(mm, start & PAGE_MASK, |
1464 | gup_flags, &vma, | |
ffe1e786 | 1465 | pages ? &page : NULL); |
fa5bb209 | 1466 | if (ret) |
08be37b7 | 1467 | goto out; |
df06b37f | 1468 | ctx.page_mask = 0; |
fa5bb209 KS |
1469 | goto next_page; |
1470 | } | |
4bbd4c77 | 1471 | |
52650c8b | 1472 | if (!vma) { |
df06b37f KB |
1473 | ret = -EFAULT; |
1474 | goto out; | |
1475 | } | |
52650c8b JG |
1476 | ret = check_vma_flags(vma, gup_flags); |
1477 | if (ret) | |
1478 | goto out; | |
fa5bb209 KS |
1479 | } |
1480 | retry: | |
1481 | /* | |
1482 | * If we have a pending SIGKILL, don't keep faulting pages and | |
1483 | * potentially allocating memory. | |
1484 | */ | |
fa45f116 | 1485 | if (fatal_signal_pending(current)) { |
d180870d | 1486 | ret = -EINTR; |
df06b37f KB |
1487 | goto out; |
1488 | } | |
fa5bb209 | 1489 | cond_resched(); |
df06b37f KB |
1490 | |
1491 | page = follow_page_mask(vma, start, foll_flags, &ctx); | |
a7f22660 DH |
1492 | if (!page || PTR_ERR(page) == -EMLINK) { |
1493 | ret = faultin_page(vma, start, &foll_flags, | |
1494 | PTR_ERR(page) == -EMLINK, locked); | |
fa5bb209 KS |
1495 | switch (ret) { |
1496 | case 0: | |
1497 | goto retry; | |
df06b37f | 1498 | case -EBUSY: |
d9272525 | 1499 | case -EAGAIN: |
df06b37f | 1500 | ret = 0; |
e4a9bc58 | 1501 | fallthrough; |
fa5bb209 KS |
1502 | case -EFAULT: |
1503 | case -ENOMEM: | |
1504 | case -EHWPOISON: | |
df06b37f | 1505 | goto out; |
4bbd4c77 | 1506 | } |
fa5bb209 | 1507 | BUG(); |
1027e443 KS |
1508 | } else if (PTR_ERR(page) == -EEXIST) { |
1509 | /* | |
1510 | * Proper page table entry exists, but no corresponding | |
65462462 JH |
1511 | * struct page. If the caller expects **pages to be |
1512 | * filled in, bail out now, because that can't be done | |
1513 | * for this page. | |
1027e443 | 1514 | */ |
65462462 JH |
1515 | if (pages) { |
1516 | ret = PTR_ERR(page); | |
1517 | goto out; | |
1518 | } | |
1027e443 | 1519 | } else if (IS_ERR(page)) { |
df06b37f KB |
1520 | ret = PTR_ERR(page); |
1521 | goto out; | |
1027e443 | 1522 | } |
ffe1e786 | 1523 | next_page: |
df06b37f | 1524 | page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); |
fa5bb209 KS |
1525 | if (page_increm > nr_pages) |
1526 | page_increm = nr_pages; | |
57edfcfd PX |
1527 | |
1528 | if (pages) { | |
1529 | struct page *subpage; | |
1530 | unsigned int j; | |
1531 | ||
1532 | /* | |
1533 | * This must be a large folio (and doesn't need to | |
1534 | * be the whole folio; it can be part of it), do | |
1535 | * the refcount work for all the subpages too. | |
1536 | * | |
1537 | * NOTE: here the page may not be the head page | |
1538 | * e.g. when start addr is not thp-size aligned. | |
1539 | * try_grab_folio() should have taken care of tail | |
1540 | * pages. | |
1541 | */ | |
1542 | if (page_increm > 1) { | |
f442fa61 | 1543 | struct folio *folio = page_folio(page); |
57edfcfd PX |
1544 | |
1545 | /* | |
1546 | * Since we already hold refcount on the | |
1547 | * large folio, this should never fail. | |
1548 | */ | |
f442fa61 YS |
1549 | if (try_grab_folio(folio, page_increm - 1, |
1550 | foll_flags)) { | |
57edfcfd PX |
1551 | /* |
1552 | * Release the 1st page ref if the | |
1553 | * folio is problematic, fail hard. | |
1554 | */ | |
f442fa61 | 1555 | gup_put_folio(folio, 1, |
57edfcfd PX |
1556 | foll_flags); |
1557 | ret = -EFAULT; | |
1558 | goto out; | |
1559 | } | |
1560 | } | |
1561 | ||
1562 | for (j = 0; j < page_increm; j++) { | |
1563 | subpage = nth_page(page, j); | |
1564 | pages[i + j] = subpage; | |
1565 | flush_anon_page(vma, subpage, start + j * PAGE_SIZE); | |
1566 | flush_dcache_page(subpage); | |
1567 | } | |
1568 | } | |
1569 | ||
fa5bb209 KS |
1570 | i += page_increm; |
1571 | start += page_increm * PAGE_SIZE; | |
1572 | nr_pages -= page_increm; | |
4bbd4c77 | 1573 | } while (nr_pages); |
df06b37f KB |
1574 | out: |
1575 | if (ctx.pgmap) | |
1576 | put_dev_pagemap(ctx.pgmap); | |
1577 | return i ? i : ret; | |
4bbd4c77 | 1578 | } |
4bbd4c77 | 1579 | |
771ab430 TK |
1580 | static bool vma_permits_fault(struct vm_area_struct *vma, |
1581 | unsigned int fault_flags) | |
d4925e00 | 1582 | { |
1b2ee126 DH |
1583 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
1584 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); | |
33a709b2 | 1585 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
d4925e00 DH |
1586 | |
1587 | if (!(vm_flags & vma->vm_flags)) | |
1588 | return false; | |
1589 | ||
33a709b2 DH |
1590 | /* |
1591 | * The architecture might have a hardware protection | |
1b2ee126 | 1592 | * mechanism other than read/write that can deny access. |
d61172b4 DH |
1593 | * |
1594 | * gup always represents data access, not instruction | |
1595 | * fetches, so execute=false here: | |
33a709b2 | 1596 | */ |
d61172b4 | 1597 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
33a709b2 DH |
1598 | return false; |
1599 | ||
d4925e00 DH |
1600 | return true; |
1601 | } | |
1602 | ||
adc8cb40 | 1603 | /** |
4bbd4c77 | 1604 | * fixup_user_fault() - manually resolve a user page fault |
4bbd4c77 KS |
1605 | * @mm: mm_struct of target mm |
1606 | * @address: user address | |
1607 | * @fault_flags:flags to pass down to handle_mm_fault() | |
c1e8d7c6 | 1608 | * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller |
548b6a1e MC |
1609 | * does not allow retry. If NULL, the caller must guarantee |
1610 | * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. | |
4bbd4c77 KS |
1611 | * |
1612 | * This is meant to be called in the specific scenario where for locking reasons | |
1613 | * we try to access user memory in atomic context (within a pagefault_disable() | |
1614 | * section), this returns -EFAULT, and we want to resolve the user fault before | |
1615 | * trying again. | |
1616 | * | |
1617 | * Typically this is meant to be used by the futex code. | |
1618 | * | |
1619 | * The main difference with get_user_pages() is that this function will | |
1620 | * unconditionally call handle_mm_fault() which will in turn perform all the | |
1621 | * necessary SW fixup of the dirty and young bits in the PTE, while | |
4a9e1cda | 1622 | * get_user_pages() only guarantees to update these in the struct page. |
4bbd4c77 KS |
1623 | * |
1624 | * This is important for some architectures where those bits also gate the | |
1625 | * access permission to the page because they are maintained in software. On | |
1626 | * such architectures, gup() will not be enough to make a subsequent access | |
1627 | * succeed. | |
1628 | * | |
c1e8d7c6 ML |
1629 | * This function will not return with an unlocked mmap_lock. So it has not the |
1630 | * same semantics wrt the @mm->mmap_lock as does filemap_fault(). | |
4bbd4c77 | 1631 | */ |
64019a2e | 1632 | int fixup_user_fault(struct mm_struct *mm, |
4a9e1cda DD |
1633 | unsigned long address, unsigned int fault_flags, |
1634 | bool *unlocked) | |
4bbd4c77 KS |
1635 | { |
1636 | struct vm_area_struct *vma; | |
8fed2f3c | 1637 | vm_fault_t ret; |
4a9e1cda | 1638 | |
428e106a | 1639 | address = untagged_addr_remote(mm, address); |
f9652594 | 1640 | |
4a9e1cda | 1641 | if (unlocked) |
71335f37 | 1642 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
4bbd4c77 | 1643 | |
4a9e1cda | 1644 | retry: |
6cd06ab1 | 1645 | vma = gup_vma_lookup(mm, address); |
8d7071af | 1646 | if (!vma) |
4bbd4c77 KS |
1647 | return -EFAULT; |
1648 | ||
d4925e00 | 1649 | if (!vma_permits_fault(vma, fault_flags)) |
4bbd4c77 KS |
1650 | return -EFAULT; |
1651 | ||
475f4dfc PX |
1652 | if ((fault_flags & FAULT_FLAG_KILLABLE) && |
1653 | fatal_signal_pending(current)) | |
1654 | return -EINTR; | |
1655 | ||
bce617ed | 1656 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
d9272525 PX |
1657 | |
1658 | if (ret & VM_FAULT_COMPLETED) { | |
1659 | /* | |
1660 | * NOTE: it's a pity that we need to retake the lock here | |
1661 | * to pair with the unlock() in the callers. Ideally we | |
1662 | * could tell the callers so they do not need to unlock. | |
1663 | */ | |
1664 | mmap_read_lock(mm); | |
1665 | *unlocked = true; | |
1666 | return 0; | |
1667 | } | |
1668 | ||
4bbd4c77 | 1669 | if (ret & VM_FAULT_ERROR) { |
9a291a7c JM |
1670 | int err = vm_fault_to_errno(ret, 0); |
1671 | ||
1672 | if (err) | |
1673 | return err; | |
4bbd4c77 KS |
1674 | BUG(); |
1675 | } | |
4a9e1cda DD |
1676 | |
1677 | if (ret & VM_FAULT_RETRY) { | |
d8ed45c5 | 1678 | mmap_read_lock(mm); |
475f4dfc PX |
1679 | *unlocked = true; |
1680 | fault_flags |= FAULT_FLAG_TRIED; | |
1681 | goto retry; | |
4a9e1cda DD |
1682 | } |
1683 | ||
4bbd4c77 KS |
1684 | return 0; |
1685 | } | |
add6a0cd | 1686 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
4bbd4c77 | 1687 | |
93c5c61d PX |
1688 | /* |
1689 | * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is | |
1690 | * specified, it'll also respond to generic signals. The caller of GUP | |
1691 | * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. | |
1692 | */ | |
1693 | static bool gup_signal_pending(unsigned int flags) | |
1694 | { | |
1695 | if (fatal_signal_pending(current)) | |
1696 | return true; | |
1697 | ||
1698 | if (!(flags & FOLL_INTERRUPTIBLE)) | |
1699 | return false; | |
1700 | ||
1701 | return signal_pending(current); | |
1702 | } | |
1703 | ||
2d3a36a4 | 1704 | /* |
b2a72dff JG |
1705 | * Locking: (*locked == 1) means that the mmap_lock has already been acquired by |
1706 | * the caller. This function may drop the mmap_lock. If it does so, then it will | |
1707 | * set (*locked = 0). | |
1708 | * | |
1709 | * (*locked == 0) means that the caller expects this function to acquire and | |
1710 | * drop the mmap_lock. Therefore, the value of *locked will still be zero when | |
1711 | * the function returns, even though it may have changed temporarily during | |
1712 | * function execution. | |
1713 | * | |
1714 | * Please note that this function, unlike __get_user_pages(), will not return 0 | |
1715 | * for nr_pages > 0, unless FOLL_NOWAIT is used. | |
2d3a36a4 | 1716 | */ |
64019a2e | 1717 | static __always_inline long __get_user_pages_locked(struct mm_struct *mm, |
f0818f47 AA |
1718 | unsigned long start, |
1719 | unsigned long nr_pages, | |
f0818f47 | 1720 | struct page **pages, |
e716712f | 1721 | int *locked, |
0fd71a56 | 1722 | unsigned int flags) |
f0818f47 | 1723 | { |
f0818f47 | 1724 | long ret, pages_done; |
b2a72dff | 1725 | bool must_unlock = false; |
f0818f47 | 1726 | |
9c4b2142 LS |
1727 | if (!nr_pages) |
1728 | return 0; | |
1729 | ||
b2a72dff JG |
1730 | /* |
1731 | * The internal caller expects GUP to manage the lock internally and the | |
1732 | * lock must be released when this returns. | |
1733 | */ | |
9a863a6a | 1734 | if (!*locked) { |
b2a72dff JG |
1735 | if (mmap_read_lock_killable(mm)) |
1736 | return -EAGAIN; | |
1737 | must_unlock = true; | |
1738 | *locked = 1; | |
f0818f47 | 1739 | } |
961ba472 JG |
1740 | else |
1741 | mmap_assert_locked(mm); | |
f0818f47 | 1742 | |
a458b76a AA |
1743 | if (flags & FOLL_PIN) |
1744 | mm_set_has_pinned_flag(&mm->flags); | |
008cfe44 | 1745 | |
eddb1c22 JH |
1746 | /* |
1747 | * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior | |
1748 | * is to set FOLL_GET if the caller wants pages[] filled in (but has | |
1749 | * carelessly failed to specify FOLL_GET), so keep doing that, but only | |
1750 | * for FOLL_GET, not for the newer FOLL_PIN. | |
1751 | * | |
1752 | * FOLL_PIN always expects pages to be non-null, but no need to assert | |
1753 | * that here, as any failures will be obvious enough. | |
1754 | */ | |
1755 | if (pages && !(flags & FOLL_PIN)) | |
f0818f47 | 1756 | flags |= FOLL_GET; |
f0818f47 AA |
1757 | |
1758 | pages_done = 0; | |
f0818f47 | 1759 | for (;;) { |
64019a2e | 1760 | ret = __get_user_pages(mm, start, nr_pages, flags, pages, |
b2cac248 | 1761 | locked); |
f04740f5 | 1762 | if (!(flags & FOLL_UNLOCKABLE)) { |
f0818f47 | 1763 | /* VM_FAULT_RETRY couldn't trigger, bypass */ |
f04740f5 JG |
1764 | pages_done = ret; |
1765 | break; | |
1766 | } | |
f0818f47 | 1767 | |
d9272525 | 1768 | /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ |
f0818f47 AA |
1769 | if (!*locked) { |
1770 | BUG_ON(ret < 0); | |
1771 | BUG_ON(ret >= nr_pages); | |
1772 | } | |
1773 | ||
f0818f47 AA |
1774 | if (ret > 0) { |
1775 | nr_pages -= ret; | |
1776 | pages_done += ret; | |
1777 | if (!nr_pages) | |
1778 | break; | |
1779 | } | |
1780 | if (*locked) { | |
96312e61 AA |
1781 | /* |
1782 | * VM_FAULT_RETRY didn't trigger or it was a | |
1783 | * FOLL_NOWAIT. | |
1784 | */ | |
f0818f47 AA |
1785 | if (!pages_done) |
1786 | pages_done = ret; | |
1787 | break; | |
1788 | } | |
df17277b MR |
1789 | /* |
1790 | * VM_FAULT_RETRY triggered, so seek to the faulting offset. | |
1791 | * For the prefault case (!pages) we only update counts. | |
1792 | */ | |
1793 | if (likely(pages)) | |
1794 | pages += ret; | |
f0818f47 | 1795 | start += ret << PAGE_SHIFT; |
b2a72dff JG |
1796 | |
1797 | /* The lock was temporarily dropped, so we must unlock later */ | |
1798 | must_unlock = true; | |
f0818f47 | 1799 | |
4426e945 | 1800 | retry: |
f0818f47 AA |
1801 | /* |
1802 | * Repeat on the address that fired VM_FAULT_RETRY | |
4426e945 PX |
1803 | * with both FAULT_FLAG_ALLOW_RETRY and |
1804 | * FAULT_FLAG_TRIED. Note that GUP can be interrupted | |
93c5c61d PX |
1805 | * by fatal signals of even common signals, depending on |
1806 | * the caller's request. So we need to check it before we | |
4426e945 | 1807 | * start trying again otherwise it can loop forever. |
f0818f47 | 1808 | */ |
93c5c61d | 1809 | if (gup_signal_pending(flags)) { |
ae46d2aa HD |
1810 | if (!pages_done) |
1811 | pages_done = -EINTR; | |
4426e945 | 1812 | break; |
ae46d2aa | 1813 | } |
4426e945 | 1814 | |
d8ed45c5 | 1815 | ret = mmap_read_lock_killable(mm); |
71335f37 PX |
1816 | if (ret) { |
1817 | BUG_ON(ret > 0); | |
1818 | if (!pages_done) | |
1819 | pages_done = ret; | |
1820 | break; | |
1821 | } | |
4426e945 | 1822 | |
c7b6a566 | 1823 | *locked = 1; |
64019a2e | 1824 | ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, |
b2cac248 | 1825 | pages, locked); |
4426e945 PX |
1826 | if (!*locked) { |
1827 | /* Continue to retry until we succeeded */ | |
1828 | BUG_ON(ret != 0); | |
1829 | goto retry; | |
1830 | } | |
f0818f47 AA |
1831 | if (ret != 1) { |
1832 | BUG_ON(ret > 1); | |
1833 | if (!pages_done) | |
1834 | pages_done = ret; | |
1835 | break; | |
1836 | } | |
1837 | nr_pages--; | |
1838 | pages_done++; | |
1839 | if (!nr_pages) | |
1840 | break; | |
df17277b MR |
1841 | if (likely(pages)) |
1842 | pages++; | |
f0818f47 AA |
1843 | start += PAGE_SIZE; |
1844 | } | |
b2a72dff | 1845 | if (must_unlock && *locked) { |
f0818f47 | 1846 | /* |
b2a72dff JG |
1847 | * We either temporarily dropped the lock, or the caller |
1848 | * requested that we both acquire and drop the lock. Either way, | |
1849 | * we must now unlock, and notify the caller of that state. | |
f0818f47 | 1850 | */ |
d8ed45c5 | 1851 | mmap_read_unlock(mm); |
f0818f47 AA |
1852 | *locked = 0; |
1853 | } | |
9c4b2142 LS |
1854 | |
1855 | /* | |
1856 | * Failing to pin anything implies something has gone wrong (except when | |
1857 | * FOLL_NOWAIT is specified). | |
1858 | */ | |
1859 | if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT))) | |
1860 | return -EFAULT; | |
1861 | ||
f0818f47 AA |
1862 | return pages_done; |
1863 | } | |
1864 | ||
d3649f68 CH |
1865 | /** |
1866 | * populate_vma_page_range() - populate a range of pages in the vma. | |
1867 | * @vma: target vma | |
1868 | * @start: start address | |
1869 | * @end: end address | |
c1e8d7c6 | 1870 | * @locked: whether the mmap_lock is still held |
d3649f68 CH |
1871 | * |
1872 | * This takes care of mlocking the pages too if VM_LOCKED is set. | |
1873 | * | |
0a36f7f8 TY |
1874 | * Return either number of pages pinned in the vma, or a negative error |
1875 | * code on error. | |
d3649f68 | 1876 | * |
c1e8d7c6 | 1877 | * vma->vm_mm->mmap_lock must be held. |
d3649f68 | 1878 | * |
4f6da934 | 1879 | * If @locked is NULL, it may be held for read or write and will |
d3649f68 CH |
1880 | * be unperturbed. |
1881 | * | |
4f6da934 PX |
1882 | * If @locked is non-NULL, it must held for read only and may be |
1883 | * released. If it's released, *@locked will be set to 0. | |
d3649f68 CH |
1884 | */ |
1885 | long populate_vma_page_range(struct vm_area_struct *vma, | |
4f6da934 | 1886 | unsigned long start, unsigned long end, int *locked) |
d3649f68 CH |
1887 | { |
1888 | struct mm_struct *mm = vma->vm_mm; | |
1889 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | |
9a863a6a | 1890 | int local_locked = 1; |
d3649f68 | 1891 | int gup_flags; |
ece369c7 | 1892 | long ret; |
d3649f68 | 1893 | |
be51eb18 ML |
1894 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
1895 | VM_BUG_ON(!PAGE_ALIGNED(end)); | |
d3649f68 CH |
1896 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
1897 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | |
42fc5414 | 1898 | mmap_assert_locked(mm); |
d3649f68 | 1899 | |
b67bf49c HD |
1900 | /* |
1901 | * Rightly or wrongly, the VM_LOCKONFAULT case has never used | |
1902 | * faultin_page() to break COW, so it has no work to do here. | |
1903 | */ | |
d3649f68 | 1904 | if (vma->vm_flags & VM_LOCKONFAULT) |
b67bf49c HD |
1905 | return nr_pages; |
1906 | ||
1096bc93 LT |
1907 | /* ... similarly, we've never faulted in PROT_NONE pages */ |
1908 | if (!vma_is_accessible(vma)) | |
1909 | return -EFAULT; | |
1910 | ||
b67bf49c | 1911 | gup_flags = FOLL_TOUCH; |
d3649f68 CH |
1912 | /* |
1913 | * We want to touch writable mappings with a write fault in order | |
1914 | * to break COW, except for shared mappings because these don't COW | |
1915 | * and we would not want to dirty them for nothing. | |
1096bc93 LT |
1916 | * |
1917 | * Otherwise, do a read fault, and use FOLL_FORCE in case it's not | |
1918 | * readable (ie write-only or executable). | |
d3649f68 CH |
1919 | */ |
1920 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | |
1921 | gup_flags |= FOLL_WRITE; | |
1096bc93 | 1922 | else |
d3649f68 CH |
1923 | gup_flags |= FOLL_FORCE; |
1924 | ||
f04740f5 JG |
1925 | if (locked) |
1926 | gup_flags |= FOLL_UNLOCKABLE; | |
1927 | ||
d3649f68 CH |
1928 | /* |
1929 | * We made sure addr is within a VMA, so the following will | |
1930 | * not result in a stack expansion that recurses back here. | |
1931 | */ | |
ece369c7 | 1932 | ret = __get_user_pages(mm, start, nr_pages, gup_flags, |
b2cac248 | 1933 | NULL, locked ? locked : &local_locked); |
ece369c7 HD |
1934 | lru_add_drain(); |
1935 | return ret; | |
d3649f68 CH |
1936 | } |
1937 | ||
4ca9b385 | 1938 | /* |
631426ba DH |
1939 | * faultin_page_range() - populate (prefault) page tables inside the |
1940 | * given range readable/writable | |
4ca9b385 DH |
1941 | * |
1942 | * This takes care of mlocking the pages, too, if VM_LOCKED is set. | |
1943 | * | |
631426ba | 1944 | * @mm: the mm to populate page tables in |
4ca9b385 DH |
1945 | * @start: start address |
1946 | * @end: end address | |
1947 | * @write: whether to prefault readable or writable | |
1948 | * @locked: whether the mmap_lock is still held | |
1949 | * | |
631426ba DH |
1950 | * Returns either number of processed pages in the MM, or a negative error |
1951 | * code on error (see __get_user_pages()). Note that this function reports | |
1952 | * errors related to VMAs, such as incompatible mappings, as expected by | |
1953 | * MADV_POPULATE_(READ|WRITE). | |
4ca9b385 | 1954 | * |
631426ba DH |
1955 | * The range must be page-aligned. |
1956 | * | |
1957 | * mm->mmap_lock must be held. If it's released, *@locked will be set to 0. | |
4ca9b385 | 1958 | */ |
631426ba DH |
1959 | long faultin_page_range(struct mm_struct *mm, unsigned long start, |
1960 | unsigned long end, bool write, int *locked) | |
4ca9b385 | 1961 | { |
4ca9b385 DH |
1962 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
1963 | int gup_flags; | |
ece369c7 | 1964 | long ret; |
4ca9b385 DH |
1965 | |
1966 | VM_BUG_ON(!PAGE_ALIGNED(start)); | |
1967 | VM_BUG_ON(!PAGE_ALIGNED(end)); | |
4ca9b385 DH |
1968 | mmap_assert_locked(mm); |
1969 | ||
1970 | /* | |
1971 | * FOLL_TOUCH: Mark page accessed and thereby young; will also mark | |
1972 | * the page dirty with FOLL_WRITE -- which doesn't make a | |
1973 | * difference with !FOLL_FORCE, because the page is writable | |
1974 | * in the page table. | |
1975 | * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit | |
1976 | * a poisoned page. | |
4ca9b385 DH |
1977 | * !FOLL_FORCE: Require proper access permissions. |
1978 | */ | |
631426ba DH |
1979 | gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE | |
1980 | FOLL_MADV_POPULATE; | |
4ca9b385 DH |
1981 | if (write) |
1982 | gup_flags |= FOLL_WRITE; | |
1983 | ||
631426ba DH |
1984 | ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked, |
1985 | gup_flags); | |
ece369c7 HD |
1986 | lru_add_drain(); |
1987 | return ret; | |
4ca9b385 DH |
1988 | } |
1989 | ||
d3649f68 CH |
1990 | /* |
1991 | * __mm_populate - populate and/or mlock pages within a range of address space. | |
1992 | * | |
1993 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | |
1994 | * flags. VMAs must be already marked with the desired vm_flags, and | |
c1e8d7c6 | 1995 | * mmap_lock must not be held. |
d3649f68 CH |
1996 | */ |
1997 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |
1998 | { | |
1999 | struct mm_struct *mm = current->mm; | |
2000 | unsigned long end, nstart, nend; | |
2001 | struct vm_area_struct *vma = NULL; | |
2002 | int locked = 0; | |
2003 | long ret = 0; | |
2004 | ||
2005 | end = start + len; | |
2006 | ||
2007 | for (nstart = start; nstart < end; nstart = nend) { | |
2008 | /* | |
2009 | * We want to fault in pages for [nstart; end) address range. | |
2010 | * Find first corresponding VMA. | |
2011 | */ | |
2012 | if (!locked) { | |
2013 | locked = 1; | |
d8ed45c5 | 2014 | mmap_read_lock(mm); |
c4d1a92d | 2015 | vma = find_vma_intersection(mm, nstart, end); |
d3649f68 | 2016 | } else if (nstart >= vma->vm_end) |
c4d1a92d LH |
2017 | vma = find_vma_intersection(mm, vma->vm_end, end); |
2018 | ||
2019 | if (!vma) | |
d3649f68 CH |
2020 | break; |
2021 | /* | |
2022 | * Set [nstart; nend) to intersection of desired address | |
2023 | * range with the first VMA. Also, skip undesirable VMA types. | |
2024 | */ | |
2025 | nend = min(end, vma->vm_end); | |
2026 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | |
2027 | continue; | |
2028 | if (nstart < vma->vm_start) | |
2029 | nstart = vma->vm_start; | |
2030 | /* | |
2031 | * Now fault in a range of pages. populate_vma_page_range() | |
2032 | * double checks the vma flags, so that it won't mlock pages | |
2033 | * if the vma was already munlocked. | |
2034 | */ | |
2035 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | |
2036 | if (ret < 0) { | |
2037 | if (ignore_errors) { | |
2038 | ret = 0; | |
2039 | continue; /* continue at next VMA */ | |
2040 | } | |
2041 | break; | |
2042 | } | |
2043 | nend = nstart + ret * PAGE_SIZE; | |
2044 | ret = 0; | |
2045 | } | |
2046 | if (locked) | |
d8ed45c5 | 2047 | mmap_read_unlock(mm); |
d3649f68 CH |
2048 | return ret; /* 0 or negative error code */ |
2049 | } | |
050a9adc | 2050 | #else /* CONFIG_MMU */ |
64019a2e | 2051 | static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, |
050a9adc | 2052 | unsigned long nr_pages, struct page **pages, |
b2cac248 | 2053 | int *locked, unsigned int foll_flags) |
050a9adc CH |
2054 | { |
2055 | struct vm_area_struct *vma; | |
b2a72dff | 2056 | bool must_unlock = false; |
050a9adc | 2057 | unsigned long vm_flags; |
24dc20c7 | 2058 | long i; |
050a9adc | 2059 | |
b2a72dff JG |
2060 | if (!nr_pages) |
2061 | return 0; | |
2062 | ||
2063 | /* | |
2064 | * The internal caller expects GUP to manage the lock internally and the | |
2065 | * lock must be released when this returns. | |
2066 | */ | |
9a863a6a | 2067 | if (!*locked) { |
b2a72dff JG |
2068 | if (mmap_read_lock_killable(mm)) |
2069 | return -EAGAIN; | |
2070 | must_unlock = true; | |
2071 | *locked = 1; | |
2072 | } | |
2073 | ||
050a9adc CH |
2074 | /* calculate required read or write permissions. |
2075 | * If FOLL_FORCE is set, we only require the "MAY" flags. | |
2076 | */ | |
2077 | vm_flags = (foll_flags & FOLL_WRITE) ? | |
2078 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | |
2079 | vm_flags &= (foll_flags & FOLL_FORCE) ? | |
2080 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | |
2081 | ||
2082 | for (i = 0; i < nr_pages; i++) { | |
2083 | vma = find_vma(mm, start); | |
2084 | if (!vma) | |
b2a72dff | 2085 | break; |
050a9adc CH |
2086 | |
2087 | /* protect what we can, including chardevs */ | |
2088 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || | |
2089 | !(vm_flags & vma->vm_flags)) | |
b2a72dff | 2090 | break; |
050a9adc CH |
2091 | |
2092 | if (pages) { | |
396a400b | 2093 | pages[i] = virt_to_page((void *)start); |
050a9adc CH |
2094 | if (pages[i]) |
2095 | get_page(pages[i]); | |
2096 | } | |
b2cac248 | 2097 | |
050a9adc CH |
2098 | start = (start + PAGE_SIZE) & PAGE_MASK; |
2099 | } | |
2100 | ||
b2a72dff JG |
2101 | if (must_unlock && *locked) { |
2102 | mmap_read_unlock(mm); | |
2103 | *locked = 0; | |
2104 | } | |
050a9adc | 2105 | |
050a9adc CH |
2106 | return i ? : -EFAULT; |
2107 | } | |
2108 | #endif /* !CONFIG_MMU */ | |
d3649f68 | 2109 | |
bb523b40 AG |
2110 | /** |
2111 | * fault_in_writeable - fault in userspace address range for writing | |
2112 | * @uaddr: start of address range | |
2113 | * @size: size of address range | |
2114 | * | |
2115 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
2116 | * copy_from_user()). | |
2117 | */ | |
2118 | size_t fault_in_writeable(char __user *uaddr, size_t size) | |
2119 | { | |
2120 | char __user *start = uaddr, *end; | |
2121 | ||
2122 | if (unlikely(size == 0)) | |
2123 | return 0; | |
677b2a8c CL |
2124 | if (!user_write_access_begin(uaddr, size)) |
2125 | return size; | |
bb523b40 | 2126 | if (!PAGE_ALIGNED(uaddr)) { |
677b2a8c | 2127 | unsafe_put_user(0, uaddr, out); |
bb523b40 AG |
2128 | uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); |
2129 | } | |
2130 | end = (char __user *)PAGE_ALIGN((unsigned long)start + size); | |
2131 | if (unlikely(end < start)) | |
2132 | end = NULL; | |
2133 | while (uaddr != end) { | |
677b2a8c | 2134 | unsafe_put_user(0, uaddr, out); |
bb523b40 AG |
2135 | uaddr += PAGE_SIZE; |
2136 | } | |
2137 | ||
2138 | out: | |
677b2a8c | 2139 | user_write_access_end(); |
bb523b40 AG |
2140 | if (size > uaddr - start) |
2141 | return size - (uaddr - start); | |
2142 | return 0; | |
2143 | } | |
2144 | EXPORT_SYMBOL(fault_in_writeable); | |
2145 | ||
da32b581 CM |
2146 | /** |
2147 | * fault_in_subpage_writeable - fault in an address range for writing | |
2148 | * @uaddr: start of address range | |
2149 | * @size: size of address range | |
2150 | * | |
2151 | * Fault in a user address range for writing while checking for permissions at | |
2152 | * sub-page granularity (e.g. arm64 MTE). This function should be used when | |
2153 | * the caller cannot guarantee forward progress of a copy_to_user() loop. | |
2154 | * | |
2155 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
2156 | * copy_from_user()). | |
2157 | */ | |
2158 | size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) | |
2159 | { | |
2160 | size_t faulted_in; | |
2161 | ||
2162 | /* | |
2163 | * Attempt faulting in at page granularity first for page table | |
2164 | * permission checking. The arch-specific probe_subpage_writeable() | |
2165 | * functions may not check for this. | |
2166 | */ | |
2167 | faulted_in = size - fault_in_writeable(uaddr, size); | |
2168 | if (faulted_in) | |
2169 | faulted_in -= probe_subpage_writeable(uaddr, faulted_in); | |
2170 | ||
2171 | return size - faulted_in; | |
2172 | } | |
2173 | EXPORT_SYMBOL(fault_in_subpage_writeable); | |
2174 | ||
cdd591fc AG |
2175 | /* |
2176 | * fault_in_safe_writeable - fault in an address range for writing | |
2177 | * @uaddr: start of address range | |
2178 | * @size: length of address range | |
2179 | * | |
fe673d3f LT |
2180 | * Faults in an address range for writing. This is primarily useful when we |
2181 | * already know that some or all of the pages in the address range aren't in | |
2182 | * memory. | |
cdd591fc | 2183 | * |
fe673d3f | 2184 | * Unlike fault_in_writeable(), this function is non-destructive. |
cdd591fc AG |
2185 | * |
2186 | * Note that we don't pin or otherwise hold the pages referenced that we fault | |
2187 | * in. There's no guarantee that they'll stay in memory for any duration of | |
2188 | * time. | |
2189 | * | |
2190 | * Returns the number of bytes not faulted in, like copy_to_user() and | |
2191 | * copy_from_user(). | |
2192 | */ | |
2193 | size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) | |
2194 | { | |
fe673d3f | 2195 | unsigned long start = (unsigned long)uaddr, end; |
cdd591fc | 2196 | struct mm_struct *mm = current->mm; |
fe673d3f | 2197 | bool unlocked = false; |
cdd591fc | 2198 | |
fe673d3f LT |
2199 | if (unlikely(size == 0)) |
2200 | return 0; | |
cdd591fc | 2201 | end = PAGE_ALIGN(start + size); |
fe673d3f | 2202 | if (end < start) |
cdd591fc | 2203 | end = 0; |
cdd591fc | 2204 | |
fe673d3f LT |
2205 | mmap_read_lock(mm); |
2206 | do { | |
2207 | if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) | |
cdd591fc | 2208 | break; |
fe673d3f LT |
2209 | start = (start + PAGE_SIZE) & PAGE_MASK; |
2210 | } while (start != end); | |
2211 | mmap_read_unlock(mm); | |
2212 | ||
2213 | if (size > (unsigned long)uaddr - start) | |
2214 | return size - ((unsigned long)uaddr - start); | |
2215 | return 0; | |
cdd591fc AG |
2216 | } |
2217 | EXPORT_SYMBOL(fault_in_safe_writeable); | |
2218 | ||
bb523b40 AG |
2219 | /** |
2220 | * fault_in_readable - fault in userspace address range for reading | |
2221 | * @uaddr: start of user address range | |
2222 | * @size: size of user address range | |
2223 | * | |
2224 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
2225 | * copy_from_user()). | |
2226 | */ | |
2227 | size_t fault_in_readable(const char __user *uaddr, size_t size) | |
2228 | { | |
2229 | const char __user *start = uaddr, *end; | |
2230 | volatile char c; | |
2231 | ||
2232 | if (unlikely(size == 0)) | |
2233 | return 0; | |
677b2a8c CL |
2234 | if (!user_read_access_begin(uaddr, size)) |
2235 | return size; | |
bb523b40 | 2236 | if (!PAGE_ALIGNED(uaddr)) { |
677b2a8c | 2237 | unsafe_get_user(c, uaddr, out); |
bb523b40 AG |
2238 | uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); |
2239 | } | |
2240 | end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); | |
2241 | if (unlikely(end < start)) | |
2242 | end = NULL; | |
2243 | while (uaddr != end) { | |
677b2a8c | 2244 | unsafe_get_user(c, uaddr, out); |
bb523b40 AG |
2245 | uaddr += PAGE_SIZE; |
2246 | } | |
2247 | ||
2248 | out: | |
677b2a8c | 2249 | user_read_access_end(); |
bb523b40 AG |
2250 | (void)c; |
2251 | if (size > uaddr - start) | |
2252 | return size - (uaddr - start); | |
2253 | return 0; | |
2254 | } | |
2255 | EXPORT_SYMBOL(fault_in_readable); | |
2256 | ||
8f942eea JH |
2257 | /** |
2258 | * get_dump_page() - pin user page in memory while writing it to core dump | |
2259 | * @addr: user address | |
2260 | * | |
2261 | * Returns struct page pointer of user page pinned for dump, | |
2262 | * to be freed afterwards by put_page(). | |
2263 | * | |
2264 | * Returns NULL on any kind of failure - a hole must then be inserted into | |
2265 | * the corefile, to preserve alignment with its headers; and also returns | |
2266 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | |
f0953a1b | 2267 | * allowing a hole to be left in the corefile to save disk space. |
8f942eea | 2268 | * |
7f3bfab5 | 2269 | * Called without mmap_lock (takes and releases the mmap_lock by itself). |
8f942eea JH |
2270 | */ |
2271 | #ifdef CONFIG_ELF_CORE | |
2272 | struct page *get_dump_page(unsigned long addr) | |
2273 | { | |
8f942eea | 2274 | struct page *page; |
b2a72dff | 2275 | int locked = 0; |
7f3bfab5 | 2276 | int ret; |
8f942eea | 2277 | |
b2cac248 | 2278 | ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, |
7f3bfab5 | 2279 | FOLL_FORCE | FOLL_DUMP | FOLL_GET); |
7f3bfab5 | 2280 | return (ret == 1) ? page : NULL; |
8f942eea JH |
2281 | } |
2282 | #endif /* CONFIG_ELF_CORE */ | |
2283 | ||
d1e153fe | 2284 | #ifdef CONFIG_MIGRATION |
f68749ec | 2285 | /* |
53ba78de | 2286 | * Returns the number of collected folios. Return value is always >= 0. |
f68749ec | 2287 | */ |
53ba78de VK |
2288 | static unsigned long collect_longterm_unpinnable_folios( |
2289 | struct list_head *movable_folio_list, | |
2290 | unsigned long nr_folios, | |
2291 | struct folio **folios) | |
9a4e9f3b | 2292 | { |
67e139b0 | 2293 | unsigned long i, collected = 0; |
1b7f7e58 | 2294 | struct folio *prev_folio = NULL; |
67e139b0 | 2295 | bool drain_allow = true; |
9a4e9f3b | 2296 | |
53ba78de VK |
2297 | for (i = 0; i < nr_folios; i++) { |
2298 | struct folio *folio = folios[i]; | |
f9f38f78 | 2299 | |
1b7f7e58 | 2300 | if (folio == prev_folio) |
83c02c23 | 2301 | continue; |
1b7f7e58 | 2302 | prev_folio = folio; |
f9f38f78 | 2303 | |
67e139b0 AP |
2304 | if (folio_is_longterm_pinnable(folio)) |
2305 | continue; | |
b05a79d4 | 2306 | |
67e139b0 | 2307 | collected++; |
b05a79d4 | 2308 | |
67e139b0 | 2309 | if (folio_is_device_coherent(folio)) |
f9f38f78 CH |
2310 | continue; |
2311 | ||
1b7f7e58 | 2312 | if (folio_test_hugetlb(folio)) { |
53ba78de | 2313 | isolate_hugetlb(folio, movable_folio_list); |
f9f38f78 CH |
2314 | continue; |
2315 | } | |
9a4e9f3b | 2316 | |
1b7f7e58 | 2317 | if (!folio_test_lru(folio) && drain_allow) { |
f9f38f78 CH |
2318 | lru_add_drain_all(); |
2319 | drain_allow = false; | |
2320 | } | |
2321 | ||
be2d5756 | 2322 | if (!folio_isolate_lru(folio)) |
f9f38f78 | 2323 | continue; |
67e139b0 | 2324 | |
53ba78de | 2325 | list_add_tail(&folio->lru, movable_folio_list); |
1b7f7e58 MWO |
2326 | node_stat_mod_folio(folio, |
2327 | NR_ISOLATED_ANON + folio_is_file_lru(folio), | |
2328 | folio_nr_pages(folio)); | |
9a4e9f3b AK |
2329 | } |
2330 | ||
67e139b0 AP |
2331 | return collected; |
2332 | } | |
2333 | ||
2334 | /* | |
53ba78de VK |
2335 | * Unpins all folios and migrates device coherent folios and movable_folio_list. |
2336 | * Returns -EAGAIN if all folios were successfully migrated or -errno for | |
2337 | * failure (or partial success). | |
67e139b0 | 2338 | */ |
53ba78de VK |
2339 | static int migrate_longterm_unpinnable_folios( |
2340 | struct list_head *movable_folio_list, | |
2341 | unsigned long nr_folios, | |
2342 | struct folio **folios) | |
67e139b0 AP |
2343 | { |
2344 | int ret; | |
2345 | unsigned long i; | |
6e7f34eb | 2346 | |
53ba78de VK |
2347 | for (i = 0; i < nr_folios; i++) { |
2348 | struct folio *folio = folios[i]; | |
67e139b0 AP |
2349 | |
2350 | if (folio_is_device_coherent(folio)) { | |
2351 | /* | |
53ba78de VK |
2352 | * Migration will fail if the folio is pinned, so |
2353 | * convert the pin on the source folio to a normal | |
2354 | * reference. | |
67e139b0 | 2355 | */ |
53ba78de | 2356 | folios[i] = NULL; |
67e139b0 AP |
2357 | folio_get(folio); |
2358 | gup_put_folio(folio, 1, FOLL_PIN); | |
2359 | ||
2360 | if (migrate_device_coherent_page(&folio->page)) { | |
2361 | ret = -EBUSY; | |
2362 | goto err; | |
2363 | } | |
2364 | ||
b05a79d4 | 2365 | continue; |
67e139b0 | 2366 | } |
b05a79d4 | 2367 | |
67e139b0 | 2368 | /* |
53ba78de | 2369 | * We can't migrate folios with unexpected references, so drop |
67e139b0 | 2370 | * the reference obtained by __get_user_pages_locked(). |
53ba78de | 2371 | * Migrating folios have been added to movable_folio_list after |
67e139b0 | 2372 | * calling folio_isolate_lru() which takes a reference so the |
53ba78de | 2373 | * folio won't be freed if it's migrating. |
67e139b0 | 2374 | */ |
53ba78de VK |
2375 | unpin_folio(folios[i]); |
2376 | folios[i] = NULL; | |
f68749ec | 2377 | } |
f9f38f78 | 2378 | |
53ba78de | 2379 | if (!list_empty(movable_folio_list)) { |
f9f38f78 CH |
2380 | struct migration_target_control mtc = { |
2381 | .nid = NUMA_NO_NODE, | |
2382 | .gfp_mask = GFP_USER | __GFP_NOWARN, | |
e42dfe4e | 2383 | .reason = MR_LONGTERM_PIN, |
f9f38f78 CH |
2384 | }; |
2385 | ||
53ba78de | 2386 | if (migrate_pages(movable_folio_list, alloc_migration_target, |
67e139b0 AP |
2387 | NULL, (unsigned long)&mtc, MIGRATE_SYNC, |
2388 | MR_LONGTERM_PIN, NULL)) { | |
f9f38f78 | 2389 | ret = -ENOMEM; |
67e139b0 AP |
2390 | goto err; |
2391 | } | |
9a4e9f3b AK |
2392 | } |
2393 | ||
53ba78de | 2394 | putback_movable_pages(movable_folio_list); |
67e139b0 AP |
2395 | |
2396 | return -EAGAIN; | |
2397 | ||
2398 | err: | |
53ba78de VK |
2399 | unpin_folios(folios, nr_folios); |
2400 | putback_movable_pages(movable_folio_list); | |
24a95998 | 2401 | |
67e139b0 AP |
2402 | return ret; |
2403 | } | |
2404 | ||
2405 | /* | |
53ba78de VK |
2406 | * Check whether all folios are *allowed* to be pinned indefinitely (longterm). |
2407 | * Rather confusingly, all folios in the range are required to be pinned via | |
2408 | * FOLL_PIN, before calling this routine. | |
67e139b0 | 2409 | * |
53ba78de VK |
2410 | * If any folios in the range are not allowed to be pinned, then this routine |
2411 | * will migrate those folios away, unpin all the folios in the range and return | |
67e139b0 AP |
2412 | * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then |
2413 | * call this routine again. | |
2414 | * | |
2415 | * If an error other than -EAGAIN occurs, this indicates a migration failure. | |
2416 | * The caller should give up, and propagate the error back up the call stack. | |
2417 | * | |
53ba78de VK |
2418 | * If everything is OK and all folios in the range are allowed to be pinned, |
2419 | * then this routine leaves all folios pinned and returns zero for success. | |
67e139b0 | 2420 | */ |
53ba78de VK |
2421 | static long check_and_migrate_movable_folios(unsigned long nr_folios, |
2422 | struct folio **folios) | |
67e139b0 AP |
2423 | { |
2424 | unsigned long collected; | |
53ba78de | 2425 | LIST_HEAD(movable_folio_list); |
67e139b0 | 2426 | |
53ba78de VK |
2427 | collected = collect_longterm_unpinnable_folios(&movable_folio_list, |
2428 | nr_folios, folios); | |
67e139b0 AP |
2429 | if (!collected) |
2430 | return 0; | |
2431 | ||
53ba78de VK |
2432 | return migrate_longterm_unpinnable_folios(&movable_folio_list, |
2433 | nr_folios, folios); | |
2434 | } | |
2435 | ||
2436 | /* | |
2437 | * This routine just converts all the pages in the @pages array to folios and | |
2438 | * calls check_and_migrate_movable_folios() to do the heavy lifting. | |
2439 | * | |
2440 | * Please see the check_and_migrate_movable_folios() documentation for details. | |
2441 | */ | |
2442 | static long check_and_migrate_movable_pages(unsigned long nr_pages, | |
2443 | struct page **pages) | |
2444 | { | |
2445 | struct folio **folios; | |
2446 | long i, ret; | |
2447 | ||
2448 | folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL); | |
2449 | if (!folios) | |
2450 | return -ENOMEM; | |
2451 | ||
2452 | for (i = 0; i < nr_pages; i++) | |
2453 | folios[i] = page_folio(pages[i]); | |
2454 | ||
2455 | ret = check_and_migrate_movable_folios(nr_pages, folios); | |
2456 | ||
2457 | kfree(folios); | |
2458 | return ret; | |
9a4e9f3b AK |
2459 | } |
2460 | #else | |
f68749ec | 2461 | static long check_and_migrate_movable_pages(unsigned long nr_pages, |
f6d299ec | 2462 | struct page **pages) |
9a4e9f3b | 2463 | { |
24a95998 | 2464 | return 0; |
9a4e9f3b | 2465 | } |
53ba78de VK |
2466 | |
2467 | static long check_and_migrate_movable_folios(unsigned long nr_folios, | |
2468 | struct folio **folios) | |
2469 | { | |
2470 | return 0; | |
2471 | } | |
d1e153fe | 2472 | #endif /* CONFIG_MIGRATION */ |
9a4e9f3b | 2473 | |
2bb6d283 | 2474 | /* |
932f4a63 IW |
2475 | * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which |
2476 | * allows us to process the FOLL_LONGTERM flag. | |
2bb6d283 | 2477 | */ |
64019a2e | 2478 | static long __gup_longterm_locked(struct mm_struct *mm, |
932f4a63 IW |
2479 | unsigned long start, |
2480 | unsigned long nr_pages, | |
2481 | struct page **pages, | |
53b2d09b | 2482 | int *locked, |
932f4a63 | 2483 | unsigned int gup_flags) |
2bb6d283 | 2484 | { |
f68749ec | 2485 | unsigned int flags; |
24a95998 | 2486 | long rc, nr_pinned_pages; |
2bb6d283 | 2487 | |
f68749ec | 2488 | if (!(gup_flags & FOLL_LONGTERM)) |
b2cac248 | 2489 | return __get_user_pages_locked(mm, start, nr_pages, pages, |
53b2d09b | 2490 | locked, gup_flags); |
67e139b0 | 2491 | |
f68749ec PT |
2492 | flags = memalloc_pin_save(); |
2493 | do { | |
24a95998 | 2494 | nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, |
b2cac248 | 2495 | pages, locked, |
24a95998 AP |
2496 | gup_flags); |
2497 | if (nr_pinned_pages <= 0) { | |
2498 | rc = nr_pinned_pages; | |
f68749ec | 2499 | break; |
24a95998 | 2500 | } |
d64e2dbc JG |
2501 | |
2502 | /* FOLL_LONGTERM implies FOLL_PIN */ | |
f6d299ec | 2503 | rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); |
24a95998 | 2504 | } while (rc == -EAGAIN); |
f68749ec | 2505 | memalloc_pin_restore(flags); |
24a95998 | 2506 | return rc ? rc : nr_pinned_pages; |
2bb6d283 | 2507 | } |
932f4a63 | 2508 | |
d64e2dbc JG |
2509 | /* |
2510 | * Check that the given flags are valid for the exported gup/pup interface, and | |
2511 | * update them with the required flags that the caller must have set. | |
2512 | */ | |
b2cac248 LS |
2513 | static bool is_valid_gup_args(struct page **pages, int *locked, |
2514 | unsigned int *gup_flags_p, unsigned int to_set) | |
447f3e45 | 2515 | { |
d64e2dbc JG |
2516 | unsigned int gup_flags = *gup_flags_p; |
2517 | ||
447f3e45 | 2518 | /* |
d64e2dbc JG |
2519 | * These flags not allowed to be specified externally to the gup |
2520 | * interfaces: | |
0f20bba1 | 2521 | * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only |
d64e2dbc | 2522 | * - FOLL_REMOTE is internal only and used on follow_page() |
f04740f5 | 2523 | * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL |
447f3e45 | 2524 | */ |
0f20bba1 | 2525 | if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS)) |
d64e2dbc JG |
2526 | return false; |
2527 | ||
2528 | gup_flags |= to_set; | |
f04740f5 JG |
2529 | if (locked) { |
2530 | /* At the external interface locked must be set */ | |
2531 | if (WARN_ON_ONCE(*locked != 1)) | |
2532 | return false; | |
2533 | ||
2534 | gup_flags |= FOLL_UNLOCKABLE; | |
2535 | } | |
d64e2dbc JG |
2536 | |
2537 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ | |
2538 | if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == | |
2539 | (FOLL_PIN | FOLL_GET))) | |
2540 | return false; | |
2541 | ||
2542 | /* LONGTERM can only be specified when pinning */ | |
2543 | if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) | |
2544 | return false; | |
2545 | ||
2546 | /* Pages input must be given if using GET/PIN */ | |
2547 | if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) | |
447f3e45 | 2548 | return false; |
d64e2dbc | 2549 | |
d64e2dbc JG |
2550 | /* We want to allow the pgmap to be hot-unplugged at all times */ |
2551 | if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && | |
2552 | (gup_flags & FOLL_PCI_P2PDMA))) | |
2553 | return false; | |
2554 | ||
d64e2dbc | 2555 | *gup_flags_p = gup_flags; |
447f3e45 BS |
2556 | return true; |
2557 | } | |
2558 | ||
22bf29b6 | 2559 | #ifdef CONFIG_MMU |
adc8cb40 | 2560 | /** |
c4237f8b | 2561 | * get_user_pages_remote() - pin user pages in memory |
c4237f8b JH |
2562 | * @mm: mm_struct of target mm |
2563 | * @start: starting user address | |
2564 | * @nr_pages: number of pages from start to pin | |
2565 | * @gup_flags: flags modifying lookup behaviour | |
2566 | * @pages: array that receives pointers to the pages pinned. | |
2567 | * Should be at least nr_pages long. Or NULL, if caller | |
2568 | * only intends to ensure the pages are faulted in. | |
c4237f8b JH |
2569 | * @locked: pointer to lock flag indicating whether lock is held and |
2570 | * subsequently whether VM_FAULT_RETRY functionality can be | |
2571 | * utilised. Lock must initially be held. | |
2572 | * | |
2573 | * Returns either number of pages pinned (which may be less than the | |
2574 | * number requested), or an error. Details about the return value: | |
2575 | * | |
2576 | * -- If nr_pages is 0, returns 0. | |
2577 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. | |
2578 | * -- If nr_pages is >0, and some pages were pinned, returns the number of | |
2579 | * pages pinned. Again, this may be less than nr_pages. | |
2580 | * | |
2581 | * The caller is responsible for releasing returned @pages, via put_page(). | |
2582 | * | |
c1e8d7c6 | 2583 | * Must be called with mmap_lock held for read or write. |
c4237f8b | 2584 | * |
adc8cb40 SJ |
2585 | * get_user_pages_remote walks a process's page tables and takes a reference |
2586 | * to each struct page that each user address corresponds to at a given | |
c4237f8b JH |
2587 | * instant. That is, it takes the page that would be accessed if a user |
2588 | * thread accesses the given user virtual address at that instant. | |
2589 | * | |
2590 | * This does not guarantee that the page exists in the user mappings when | |
adc8cb40 | 2591 | * get_user_pages_remote returns, and there may even be a completely different |
c4237f8b | 2592 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
5da1a868 | 2593 | * and subsequently re-faulted). However it does guarantee that the page |
c4237f8b JH |
2594 | * won't be freed completely. And mostly callers simply care that the page |
2595 | * contains data that was valid *at some point in time*. Typically, an IO | |
2596 | * or similar operation cannot guarantee anything stronger anyway because | |
2597 | * locks can't be held over the syscall boundary. | |
2598 | * | |
2599 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page | |
2600 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must | |
2601 | * be called after the page is finished with, and before put_page is called. | |
2602 | * | |
adc8cb40 SJ |
2603 | * get_user_pages_remote is typically used for fewer-copy IO operations, |
2604 | * to get a handle on the memory by some means other than accesses | |
2605 | * via the user virtual addresses. The pages may be submitted for | |
2606 | * DMA to devices or accessed via their kernel linear mapping (via the | |
2607 | * kmap APIs). Care should be taken to use the correct cache flushing APIs. | |
c4237f8b JH |
2608 | * |
2609 | * See also get_user_pages_fast, for performance critical applications. | |
2610 | * | |
adc8cb40 | 2611 | * get_user_pages_remote should be phased out in favor of |
c4237f8b | 2612 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing |
adc8cb40 | 2613 | * should use get_user_pages_remote because it cannot pass |
c4237f8b JH |
2614 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
2615 | */ | |
64019a2e | 2616 | long get_user_pages_remote(struct mm_struct *mm, |
c4237f8b JH |
2617 | unsigned long start, unsigned long nr_pages, |
2618 | unsigned int gup_flags, struct page **pages, | |
ca5e8632 | 2619 | int *locked) |
c4237f8b | 2620 | { |
9a863a6a JG |
2621 | int local_locked = 1; |
2622 | ||
b2cac248 | 2623 | if (!is_valid_gup_args(pages, locked, &gup_flags, |
d64e2dbc | 2624 | FOLL_TOUCH | FOLL_REMOTE)) |
eddb1c22 JH |
2625 | return -EINVAL; |
2626 | ||
b2cac248 | 2627 | return __get_user_pages_locked(mm, start, nr_pages, pages, |
9a863a6a | 2628 | locked ? locked : &local_locked, |
d64e2dbc | 2629 | gup_flags); |
c4237f8b JH |
2630 | } |
2631 | EXPORT_SYMBOL(get_user_pages_remote); | |
2632 | ||
eddb1c22 | 2633 | #else /* CONFIG_MMU */ |
64019a2e | 2634 | long get_user_pages_remote(struct mm_struct *mm, |
eddb1c22 JH |
2635 | unsigned long start, unsigned long nr_pages, |
2636 | unsigned int gup_flags, struct page **pages, | |
ca5e8632 | 2637 | int *locked) |
eddb1c22 JH |
2638 | { |
2639 | return 0; | |
2640 | } | |
2641 | #endif /* !CONFIG_MMU */ | |
2642 | ||
adc8cb40 SJ |
2643 | /** |
2644 | * get_user_pages() - pin user pages in memory | |
2645 | * @start: starting user address | |
2646 | * @nr_pages: number of pages from start to pin | |
2647 | * @gup_flags: flags modifying lookup behaviour | |
2648 | * @pages: array that receives pointers to the pages pinned. | |
2649 | * Should be at least nr_pages long. Or NULL, if caller | |
2650 | * only intends to ensure the pages are faulted in. | |
adc8cb40 | 2651 | * |
64019a2e PX |
2652 | * This is the same as get_user_pages_remote(), just with a less-flexible |
2653 | * calling convention where we assume that the mm being operated on belongs to | |
2654 | * the current task, and doesn't allow passing of a locked parameter. We also | |
2655 | * obviously don't pass FOLL_REMOTE in here. | |
932f4a63 IW |
2656 | */ |
2657 | long get_user_pages(unsigned long start, unsigned long nr_pages, | |
54d02069 | 2658 | unsigned int gup_flags, struct page **pages) |
932f4a63 | 2659 | { |
9a863a6a JG |
2660 | int locked = 1; |
2661 | ||
b2cac248 | 2662 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) |
eddb1c22 JH |
2663 | return -EINVAL; |
2664 | ||
afa3c33e | 2665 | return __get_user_pages_locked(current->mm, start, nr_pages, pages, |
b2cac248 | 2666 | &locked, gup_flags); |
932f4a63 IW |
2667 | } |
2668 | EXPORT_SYMBOL(get_user_pages); | |
2bb6d283 | 2669 | |
acc3c8d1 | 2670 | /* |
d3649f68 | 2671 | * get_user_pages_unlocked() is suitable to replace the form: |
acc3c8d1 | 2672 | * |
3e4e28c5 | 2673 | * mmap_read_lock(mm); |
64019a2e | 2674 | * get_user_pages(mm, ..., pages, NULL); |
3e4e28c5 | 2675 | * mmap_read_unlock(mm); |
d3649f68 CH |
2676 | * |
2677 | * with: | |
2678 | * | |
64019a2e | 2679 | * get_user_pages_unlocked(mm, ..., pages); |
d3649f68 CH |
2680 | * |
2681 | * It is functionally equivalent to get_user_pages_fast so | |
2682 | * get_user_pages_fast should be used instead if specific gup_flags | |
2683 | * (e.g. FOLL_FORCE) are not required. | |
acc3c8d1 | 2684 | */ |
d3649f68 CH |
2685 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
2686 | struct page **pages, unsigned int gup_flags) | |
acc3c8d1 | 2687 | { |
b2a72dff | 2688 | int locked = 0; |
acc3c8d1 | 2689 | |
b2cac248 | 2690 | if (!is_valid_gup_args(pages, NULL, &gup_flags, |
f04740f5 | 2691 | FOLL_TOUCH | FOLL_UNLOCKABLE)) |
d64e2dbc JG |
2692 | return -EINVAL; |
2693 | ||
afa3c33e | 2694 | return __get_user_pages_locked(current->mm, start, nr_pages, pages, |
b2cac248 | 2695 | &locked, gup_flags); |
4bbd4c77 | 2696 | } |
d3649f68 | 2697 | EXPORT_SYMBOL(get_user_pages_unlocked); |
2667f50e SC |
2698 | |
2699 | /* | |
23babe19 | 2700 | * GUP-fast |
2667f50e SC |
2701 | * |
2702 | * get_user_pages_fast attempts to pin user pages by walking the page | |
2703 | * tables directly and avoids taking locks. Thus the walker needs to be | |
2704 | * protected from page table pages being freed from under it, and should | |
2705 | * block any THP splits. | |
2706 | * | |
2707 | * One way to achieve this is to have the walker disable interrupts, and | |
2708 | * rely on IPIs from the TLB flushing code blocking before the page table | |
2709 | * pages are freed. This is unsuitable for architectures that do not need | |
2710 | * to broadcast an IPI when invalidating TLBs. | |
2711 | * | |
2712 | * Another way to achieve this is to batch up page table containing pages | |
2713 | * belonging to more than one mm_user, then rcu_sched a callback to free those | |
23babe19 | 2714 | * pages. Disabling interrupts will allow the gup_fast() walker to both block |
2667f50e SC |
2715 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs |
2716 | * (which is a relatively rare event). The code below adopts this strategy. | |
2717 | * | |
2718 | * Before activating this code, please be aware that the following assumptions | |
2719 | * are currently made: | |
2720 | * | |
ff2e6d72 | 2721 | * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to |
e585513b | 2722 | * free pages containing page tables or TLB flushing requires IPI broadcast. |
2667f50e | 2723 | * |
2667f50e SC |
2724 | * *) ptes can be read atomically by the architecture. |
2725 | * | |
2726 | * *) access_ok is sufficient to validate userspace address ranges. | |
2727 | * | |
2728 | * The last two assumptions can be relaxed by the addition of helper functions. | |
2729 | * | |
2730 | * This code is based heavily on the PowerPC implementation by Nick Piggin. | |
2731 | */ | |
25176ad0 | 2732 | #ifdef CONFIG_HAVE_GUP_FAST |
a6e79df9 | 2733 | /* |
f002882c DH |
2734 | * Used in the GUP-fast path to determine whether GUP is permitted to work on |
2735 | * a specific folio. | |
a6e79df9 LS |
2736 | * |
2737 | * This call assumes the caller has pinned the folio, that the lowest page table | |
2738 | * level still points to this folio, and that interrupts have been disabled. | |
2739 | * | |
f002882c DH |
2740 | * GUP-fast must reject all secretmem folios. |
2741 | * | |
a6e79df9 LS |
2742 | * Writing to pinned file-backed dirty tracked folios is inherently problematic |
2743 | * (see comment describing the writable_file_mapping_allowed() function). We | |
2744 | * therefore try to avoid the most egregious case of a long-term mapping doing | |
2745 | * so. | |
2746 | * | |
2747 | * This function cannot be as thorough as that one as the VMA is not available | |
2748 | * in the fast path, so instead we whitelist known good cases and if in doubt, | |
2749 | * fall back to the slow path. | |
2750 | */ | |
f002882c | 2751 | static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) |
a6e79df9 | 2752 | { |
f002882c | 2753 | bool reject_file_backed = false; |
a6e79df9 | 2754 | struct address_space *mapping; |
f002882c | 2755 | bool check_secretmem = false; |
a6e79df9 LS |
2756 | unsigned long mapping_flags; |
2757 | ||
2758 | /* | |
2759 | * If we aren't pinning then no problematic write can occur. A long term | |
2760 | * pin is the most egregious case so this is the one we disallow. | |
2761 | */ | |
f002882c | 2762 | if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == |
a6e79df9 | 2763 | (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) |
f002882c DH |
2764 | reject_file_backed = true; |
2765 | ||
2766 | /* We hold a folio reference, so we can safely access folio fields. */ | |
a6e79df9 | 2767 | |
f002882c DH |
2768 | /* secretmem folios are always order-0 folios. */ |
2769 | if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) | |
2770 | check_secretmem = true; | |
2771 | ||
2772 | if (!reject_file_backed && !check_secretmem) | |
2773 | return true; | |
a6e79df9 LS |
2774 | |
2775 | if (WARN_ON_ONCE(folio_test_slab(folio))) | |
2776 | return false; | |
2777 | ||
f002882c | 2778 | /* hugetlb neither requires dirty-tracking nor can be secretmem. */ |
a6e79df9 LS |
2779 | if (folio_test_hugetlb(folio)) |
2780 | return true; | |
2781 | ||
2782 | /* | |
2783 | * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods | |
2784 | * cannot proceed, which means no actions performed under RCU can | |
2785 | * proceed either. | |
2786 | * | |
2787 | * inodes and thus their mappings are freed under RCU, which means the | |
2788 | * mapping cannot be freed beneath us and thus we can safely dereference | |
2789 | * it. | |
2790 | */ | |
2791 | lockdep_assert_irqs_disabled(); | |
2792 | ||
2793 | /* | |
2794 | * However, there may be operations which _alter_ the mapping, so ensure | |
2795 | * we read it once and only once. | |
2796 | */ | |
2797 | mapping = READ_ONCE(folio->mapping); | |
2798 | ||
2799 | /* | |
2800 | * The mapping may have been truncated, in any case we cannot determine | |
2801 | * if this mapping is safe - fall back to slow path to determine how to | |
2802 | * proceed. | |
2803 | */ | |
2804 | if (!mapping) | |
2805 | return false; | |
2806 | ||
2807 | /* Anonymous folios pose no problem. */ | |
2808 | mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; | |
2809 | if (mapping_flags) | |
2810 | return mapping_flags & PAGE_MAPPING_ANON; | |
2811 | ||
2812 | /* | |
2813 | * At this point, we know the mapping is non-null and points to an | |
f002882c | 2814 | * address_space object. |
a6e79df9 | 2815 | */ |
f002882c DH |
2816 | if (check_secretmem && secretmem_mapping(mapping)) |
2817 | return false; | |
2818 | /* The only remaining allowed file system is shmem. */ | |
2819 | return !reject_file_backed || shmem_mapping(mapping); | |
a6e79df9 LS |
2820 | } |
2821 | ||
23babe19 DH |
2822 | static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start, |
2823 | unsigned int flags, struct page **pages) | |
b59f65fa KS |
2824 | { |
2825 | while ((*nr) - nr_start) { | |
9cbe4954 | 2826 | struct folio *folio = page_folio(pages[--(*nr)]); |
b59f65fa | 2827 | |
9cbe4954 MWO |
2828 | folio_clear_referenced(folio); |
2829 | gup_put_folio(folio, 1, flags); | |
b59f65fa KS |
2830 | } |
2831 | } | |
2832 | ||
3010a5ea | 2833 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
70cbc3cc | 2834 | /* |
23babe19 | 2835 | * GUP-fast relies on pte change detection to avoid concurrent pgtable |
70cbc3cc YS |
2836 | * operations. |
2837 | * | |
23babe19 | 2838 | * To pin the page, GUP-fast needs to do below in order: |
70cbc3cc YS |
2839 | * (1) pin the page (by prefetching pte), then (2) check pte not changed. |
2840 | * | |
2841 | * For the rest of pgtable operations where pgtable updates can be racy | |
23babe19 | 2842 | * with GUP-fast, we need to do (1) clear pte, then (2) check whether page |
70cbc3cc YS |
2843 | * is pinned. |
2844 | * | |
2845 | * Above will work for all pte-level operations, including THP split. | |
2846 | * | |
23babe19 | 2847 | * For THP collapse, it's a bit more complicated because GUP-fast may be |
70cbc3cc YS |
2848 | * walking a pgtable page that is being freed (pte is still valid but pmd |
2849 | * can be cleared already). To avoid race in such condition, we need to | |
2850 | * also check pmd here to make sure pmd doesn't change (corresponds to | |
2851 | * pmdp_collapse_flush() in the THP collapse code path). | |
2852 | */ | |
23babe19 DH |
2853 | static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, |
2854 | unsigned long end, unsigned int flags, struct page **pages, | |
2855 | int *nr) | |
2667f50e | 2856 | { |
b59f65fa KS |
2857 | struct dev_pagemap *pgmap = NULL; |
2858 | int nr_start = *nr, ret = 0; | |
2667f50e | 2859 | pte_t *ptep, *ptem; |
2667f50e SC |
2860 | |
2861 | ptem = ptep = pte_offset_map(&pmd, addr); | |
04dee9e8 HD |
2862 | if (!ptep) |
2863 | return 0; | |
2667f50e | 2864 | do { |
2a4a06da | 2865 | pte_t pte = ptep_get_lockless(ptep); |
b0496fe4 MWO |
2866 | struct page *page; |
2867 | struct folio *folio; | |
2667f50e | 2868 | |
d74943a2 DH |
2869 | /* |
2870 | * Always fallback to ordinary GUP on PROT_NONE-mapped pages: | |
2871 | * pte_access_permitted() better should reject these pages | |
2872 | * either way: otherwise, GUP-fast might succeed in | |
2873 | * cases where ordinary GUP would fail due to VMA access | |
2874 | * permissions. | |
2875 | */ | |
2876 | if (pte_protnone(pte)) | |
e7884f8e KS |
2877 | goto pte_unmap; |
2878 | ||
b798bec4 | 2879 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) |
e7884f8e KS |
2880 | goto pte_unmap; |
2881 | ||
b59f65fa | 2882 | if (pte_devmap(pte)) { |
7af75561 IW |
2883 | if (unlikely(flags & FOLL_LONGTERM)) |
2884 | goto pte_unmap; | |
2885 | ||
b59f65fa KS |
2886 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); |
2887 | if (unlikely(!pgmap)) { | |
23babe19 | 2888 | gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); |
b59f65fa KS |
2889 | goto pte_unmap; |
2890 | } | |
2891 | } else if (pte_special(pte)) | |
2667f50e SC |
2892 | goto pte_unmap; |
2893 | ||
2894 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
2895 | page = pte_page(pte); | |
2896 | ||
f442fa61 | 2897 | folio = try_grab_folio_fast(page, 1, flags); |
b0496fe4 | 2898 | if (!folio) |
2667f50e SC |
2899 | goto pte_unmap; |
2900 | ||
70cbc3cc | 2901 | if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || |
c33c7948 | 2902 | unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { |
b0496fe4 | 2903 | gup_put_folio(folio, 1, flags); |
2667f50e SC |
2904 | goto pte_unmap; |
2905 | } | |
2906 | ||
f002882c | 2907 | if (!gup_fast_folio_allowed(folio, flags)) { |
b0496fe4 | 2908 | gup_put_folio(folio, 1, flags); |
2667f50e SC |
2909 | goto pte_unmap; |
2910 | } | |
2911 | ||
84209e87 | 2912 | if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { |
a7f22660 DH |
2913 | gup_put_folio(folio, 1, flags); |
2914 | goto pte_unmap; | |
2915 | } | |
2916 | ||
f28d4363 CI |
2917 | /* |
2918 | * We need to make the page accessible if and only if we are | |
2919 | * going to access its content (the FOLL_PIN case). Please | |
2920 | * see Documentation/core-api/pin_user_pages.rst for | |
2921 | * details. | |
2922 | */ | |
2923 | if (flags & FOLL_PIN) { | |
2924 | ret = arch_make_page_accessible(page); | |
2925 | if (ret) { | |
b0496fe4 | 2926 | gup_put_folio(folio, 1, flags); |
f28d4363 CI |
2927 | goto pte_unmap; |
2928 | } | |
2929 | } | |
b0496fe4 | 2930 | folio_set_referenced(folio); |
2667f50e SC |
2931 | pages[*nr] = page; |
2932 | (*nr)++; | |
2667f50e SC |
2933 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
2934 | ||
2935 | ret = 1; | |
2936 | ||
2937 | pte_unmap: | |
832d7aa0 CH |
2938 | if (pgmap) |
2939 | put_dev_pagemap(pgmap); | |
2667f50e SC |
2940 | pte_unmap(ptem); |
2941 | return ret; | |
2942 | } | |
2943 | #else | |
2944 | ||
2945 | /* | |
2946 | * If we can't determine whether or not a pte is special, then fail immediately | |
2947 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not | |
2948 | * to be special. | |
2949 | * | |
2950 | * For a futex to be placed on a THP tail page, get_futex_key requires a | |
dadbb612 | 2951 | * get_user_pages_fast_only implementation that can pin pages. Thus it's still |
23babe19 | 2952 | * useful to have gup_fast_pmd_leaf even if we can't operate on ptes. |
2667f50e | 2953 | */ |
23babe19 DH |
2954 | static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, |
2955 | unsigned long end, unsigned int flags, struct page **pages, | |
2956 | int *nr) | |
2667f50e SC |
2957 | { |
2958 | return 0; | |
2959 | } | |
3010a5ea | 2960 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
2667f50e | 2961 | |
17596731 | 2962 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
23babe19 DH |
2963 | static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr, |
2964 | unsigned long end, unsigned int flags, struct page **pages, int *nr) | |
b59f65fa KS |
2965 | { |
2966 | int nr_start = *nr; | |
2967 | struct dev_pagemap *pgmap = NULL; | |
2968 | ||
2969 | do { | |
9cbe4954 | 2970 | struct folio *folio; |
b59f65fa KS |
2971 | struct page *page = pfn_to_page(pfn); |
2972 | ||
2973 | pgmap = get_dev_pagemap(pfn, pgmap); | |
2974 | if (unlikely(!pgmap)) { | |
23babe19 | 2975 | gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); |
6401c4eb | 2976 | break; |
b59f65fa | 2977 | } |
4003f107 LG |
2978 | |
2979 | if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { | |
23babe19 | 2980 | gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); |
4003f107 LG |
2981 | break; |
2982 | } | |
2983 | ||
f442fa61 | 2984 | folio = try_grab_folio_fast(page, 1, flags); |
9cbe4954 | 2985 | if (!folio) { |
23babe19 | 2986 | gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); |
6401c4eb | 2987 | break; |
3faa52c0 | 2988 | } |
9cbe4954 MWO |
2989 | folio_set_referenced(folio); |
2990 | pages[*nr] = page; | |
b59f65fa KS |
2991 | (*nr)++; |
2992 | pfn++; | |
2993 | } while (addr += PAGE_SIZE, addr != end); | |
832d7aa0 | 2994 | |
6401c4eb | 2995 | put_dev_pagemap(pgmap); |
20b7fee7 | 2996 | return addr == end; |
b59f65fa KS |
2997 | } |
2998 | ||
23babe19 DH |
2999 | static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
3000 | unsigned long end, unsigned int flags, struct page **pages, | |
3001 | int *nr) | |
b59f65fa KS |
3002 | { |
3003 | unsigned long fault_pfn; | |
a9b6de77 DW |
3004 | int nr_start = *nr; |
3005 | ||
3006 | fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | |
23babe19 | 3007 | if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) |
a9b6de77 | 3008 | return 0; |
b59f65fa | 3009 | |
a9b6de77 | 3010 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
23babe19 | 3011 | gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); |
a9b6de77 DW |
3012 | return 0; |
3013 | } | |
3014 | return 1; | |
b59f65fa KS |
3015 | } |
3016 | ||
23babe19 DH |
3017 | static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, |
3018 | unsigned long end, unsigned int flags, struct page **pages, | |
3019 | int *nr) | |
b59f65fa KS |
3020 | { |
3021 | unsigned long fault_pfn; | |
a9b6de77 DW |
3022 | int nr_start = *nr; |
3023 | ||
3024 | fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | |
23babe19 | 3025 | if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) |
a9b6de77 | 3026 | return 0; |
b59f65fa | 3027 | |
a9b6de77 | 3028 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
23babe19 | 3029 | gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); |
a9b6de77 DW |
3030 | return 0; |
3031 | } | |
3032 | return 1; | |
b59f65fa KS |
3033 | } |
3034 | #else | |
23babe19 DH |
3035 | static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
3036 | unsigned long end, unsigned int flags, struct page **pages, | |
3037 | int *nr) | |
b59f65fa KS |
3038 | { |
3039 | BUILD_BUG(); | |
3040 | return 0; | |
3041 | } | |
3042 | ||
23babe19 DH |
3043 | static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr, |
3044 | unsigned long end, unsigned int flags, struct page **pages, | |
3045 | int *nr) | |
b59f65fa KS |
3046 | { |
3047 | BUILD_BUG(); | |
3048 | return 0; | |
3049 | } | |
3050 | #endif | |
3051 | ||
23babe19 DH |
3052 | static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
3053 | unsigned long end, unsigned int flags, struct page **pages, | |
3054 | int *nr) | |
2667f50e | 3055 | { |
667ed1f7 MWO |
3056 | struct page *page; |
3057 | struct folio *folio; | |
2667f50e SC |
3058 | int refs; |
3059 | ||
b798bec4 | 3060 | if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) |
2667f50e SC |
3061 | return 0; |
3062 | ||
7af75561 IW |
3063 | if (pmd_devmap(orig)) { |
3064 | if (unlikely(flags & FOLL_LONGTERM)) | |
3065 | return 0; | |
23babe19 DH |
3066 | return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags, |
3067 | pages, nr); | |
7af75561 | 3068 | } |
b59f65fa | 3069 | |
f3c94c62 PX |
3070 | page = pmd_page(orig); |
3071 | refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); | |
2667f50e | 3072 | |
f442fa61 | 3073 | folio = try_grab_folio_fast(page, refs, flags); |
667ed1f7 | 3074 | if (!folio) |
2667f50e | 3075 | return 0; |
2667f50e SC |
3076 | |
3077 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { | |
667ed1f7 | 3078 | gup_put_folio(folio, refs, flags); |
2667f50e SC |
3079 | return 0; |
3080 | } | |
3081 | ||
f002882c | 3082 | if (!gup_fast_folio_allowed(folio, flags)) { |
a6e79df9 LS |
3083 | gup_put_folio(folio, refs, flags); |
3084 | return 0; | |
3085 | } | |
84209e87 | 3086 | if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { |
a7f22660 DH |
3087 | gup_put_folio(folio, refs, flags); |
3088 | return 0; | |
3089 | } | |
3090 | ||
a43e9820 | 3091 | *nr += refs; |
667ed1f7 | 3092 | folio_set_referenced(folio); |
2667f50e SC |
3093 | return 1; |
3094 | } | |
3095 | ||
23babe19 DH |
3096 | static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, |
3097 | unsigned long end, unsigned int flags, struct page **pages, | |
3098 | int *nr) | |
2667f50e | 3099 | { |
83afb52e MWO |
3100 | struct page *page; |
3101 | struct folio *folio; | |
2667f50e SC |
3102 | int refs; |
3103 | ||
b798bec4 | 3104 | if (!pud_access_permitted(orig, flags & FOLL_WRITE)) |
2667f50e SC |
3105 | return 0; |
3106 | ||
7af75561 IW |
3107 | if (pud_devmap(orig)) { |
3108 | if (unlikely(flags & FOLL_LONGTERM)) | |
3109 | return 0; | |
23babe19 DH |
3110 | return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags, |
3111 | pages, nr); | |
7af75561 | 3112 | } |
b59f65fa | 3113 | |
f3c94c62 PX |
3114 | page = pud_page(orig); |
3115 | refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); | |
2667f50e | 3116 | |
f442fa61 | 3117 | folio = try_grab_folio_fast(page, refs, flags); |
83afb52e | 3118 | if (!folio) |
2667f50e | 3119 | return 0; |
2667f50e SC |
3120 | |
3121 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { | |
83afb52e | 3122 | gup_put_folio(folio, refs, flags); |
2667f50e SC |
3123 | return 0; |
3124 | } | |
3125 | ||
f002882c | 3126 | if (!gup_fast_folio_allowed(folio, flags)) { |
a6e79df9 LS |
3127 | gup_put_folio(folio, refs, flags); |
3128 | return 0; | |
3129 | } | |
3130 | ||
84209e87 | 3131 | if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { |
a7f22660 DH |
3132 | gup_put_folio(folio, refs, flags); |
3133 | return 0; | |
3134 | } | |
3135 | ||
a43e9820 | 3136 | *nr += refs; |
83afb52e | 3137 | folio_set_referenced(folio); |
2667f50e SC |
3138 | return 1; |
3139 | } | |
3140 | ||
23babe19 DH |
3141 | static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
3142 | unsigned long end, unsigned int flags, struct page **pages, | |
3143 | int *nr) | |
f30c59e9 AK |
3144 | { |
3145 | int refs; | |
2d7919a2 MWO |
3146 | struct page *page; |
3147 | struct folio *folio; | |
f30c59e9 | 3148 | |
b798bec4 | 3149 | if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) |
f30c59e9 AK |
3150 | return 0; |
3151 | ||
b59f65fa | 3152 | BUILD_BUG_ON(pgd_devmap(orig)); |
a43e9820 | 3153 | |
f3c94c62 PX |
3154 | page = pgd_page(orig); |
3155 | refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); | |
f30c59e9 | 3156 | |
f442fa61 | 3157 | folio = try_grab_folio_fast(page, refs, flags); |
2d7919a2 | 3158 | if (!folio) |
f30c59e9 | 3159 | return 0; |
f30c59e9 AK |
3160 | |
3161 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { | |
2d7919a2 | 3162 | gup_put_folio(folio, refs, flags); |
f30c59e9 AK |
3163 | return 0; |
3164 | } | |
3165 | ||
31115034 LS |
3166 | if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { |
3167 | gup_put_folio(folio, refs, flags); | |
3168 | return 0; | |
3169 | } | |
3170 | ||
f002882c | 3171 | if (!gup_fast_folio_allowed(folio, flags)) { |
a6e79df9 LS |
3172 | gup_put_folio(folio, refs, flags); |
3173 | return 0; | |
3174 | } | |
3175 | ||
a43e9820 | 3176 | *nr += refs; |
2d7919a2 | 3177 | folio_set_referenced(folio); |
f30c59e9 AK |
3178 | return 1; |
3179 | } | |
3180 | ||
23babe19 DH |
3181 | static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, |
3182 | unsigned long end, unsigned int flags, struct page **pages, | |
3183 | int *nr) | |
2667f50e SC |
3184 | { |
3185 | unsigned long next; | |
3186 | pmd_t *pmdp; | |
3187 | ||
d3f7b1bb | 3188 | pmdp = pmd_offset_lockless(pudp, pud, addr); |
2667f50e | 3189 | do { |
1180e732 | 3190 | pmd_t pmd = pmdp_get_lockless(pmdp); |
2667f50e SC |
3191 | |
3192 | next = pmd_addr_end(addr, end); | |
84c3fc4e | 3193 | if (!pmd_present(pmd)) |
2667f50e SC |
3194 | return 0; |
3195 | ||
7db86dc3 | 3196 | if (unlikely(pmd_leaf(pmd))) { |
23babe19 | 3197 | /* See gup_fast_pte_range() */ |
d74943a2 | 3198 | if (pmd_protnone(pmd)) |
2667f50e SC |
3199 | return 0; |
3200 | ||
23babe19 | 3201 | if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags, |
2667f50e SC |
3202 | pages, nr)) |
3203 | return 0; | |
3204 | ||
23babe19 DH |
3205 | } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags, |
3206 | pages, nr)) | |
2923117b | 3207 | return 0; |
2667f50e SC |
3208 | } while (pmdp++, addr = next, addr != end); |
3209 | ||
3210 | return 1; | |
3211 | } | |
3212 | ||
23babe19 DH |
3213 | static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, |
3214 | unsigned long end, unsigned int flags, struct page **pages, | |
3215 | int *nr) | |
2667f50e SC |
3216 | { |
3217 | unsigned long next; | |
3218 | pud_t *pudp; | |
3219 | ||
d3f7b1bb | 3220 | pudp = pud_offset_lockless(p4dp, p4d, addr); |
2667f50e | 3221 | do { |
e37c6982 | 3222 | pud_t pud = READ_ONCE(*pudp); |
2667f50e SC |
3223 | |
3224 | next = pud_addr_end(addr, end); | |
15494520 | 3225 | if (unlikely(!pud_present(pud))) |
2667f50e | 3226 | return 0; |
7db86dc3 | 3227 | if (unlikely(pud_leaf(pud))) { |
23babe19 DH |
3228 | if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags, |
3229 | pages, nr)) | |
f30c59e9 | 3230 | return 0; |
23babe19 DH |
3231 | } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags, |
3232 | pages, nr)) | |
2667f50e SC |
3233 | return 0; |
3234 | } while (pudp++, addr = next, addr != end); | |
3235 | ||
3236 | return 1; | |
3237 | } | |
3238 | ||
23babe19 DH |
3239 | static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, |
3240 | unsigned long end, unsigned int flags, struct page **pages, | |
3241 | int *nr) | |
c2febafc KS |
3242 | { |
3243 | unsigned long next; | |
3244 | p4d_t *p4dp; | |
3245 | ||
d3f7b1bb | 3246 | p4dp = p4d_offset_lockless(pgdp, pgd, addr); |
c2febafc KS |
3247 | do { |
3248 | p4d_t p4d = READ_ONCE(*p4dp); | |
3249 | ||
3250 | next = p4d_addr_end(addr, end); | |
089f9214 | 3251 | if (!p4d_present(p4d)) |
c2febafc | 3252 | return 0; |
1965e933 | 3253 | BUILD_BUG_ON(p4d_leaf(p4d)); |
8268614b CL |
3254 | if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, |
3255 | pages, nr)) | |
c2febafc KS |
3256 | return 0; |
3257 | } while (p4dp++, addr = next, addr != end); | |
3258 | ||
3259 | return 1; | |
3260 | } | |
3261 | ||
23babe19 | 3262 | static void gup_fast_pgd_range(unsigned long addr, unsigned long end, |
b798bec4 | 3263 | unsigned int flags, struct page **pages, int *nr) |
5b65c467 KS |
3264 | { |
3265 | unsigned long next; | |
3266 | pgd_t *pgdp; | |
3267 | ||
3268 | pgdp = pgd_offset(current->mm, addr); | |
3269 | do { | |
3270 | pgd_t pgd = READ_ONCE(*pgdp); | |
3271 | ||
3272 | next = pgd_addr_end(addr, end); | |
3273 | if (pgd_none(pgd)) | |
3274 | return; | |
7db86dc3 | 3275 | if (unlikely(pgd_leaf(pgd))) { |
23babe19 DH |
3276 | if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags, |
3277 | pages, nr)) | |
5b65c467 | 3278 | return; |
23babe19 DH |
3279 | } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, |
3280 | pages, nr)) | |
5b65c467 KS |
3281 | return; |
3282 | } while (pgdp++, addr = next, addr != end); | |
3283 | } | |
050a9adc | 3284 | #else |
23babe19 | 3285 | static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end, |
050a9adc CH |
3286 | unsigned int flags, struct page **pages, int *nr) |
3287 | { | |
3288 | } | |
25176ad0 | 3289 | #endif /* CONFIG_HAVE_GUP_FAST */ |
5b65c467 KS |
3290 | |
3291 | #ifndef gup_fast_permitted | |
3292 | /* | |
dadbb612 | 3293 | * Check if it's allowed to use get_user_pages_fast_only() for the range, or |
5b65c467 KS |
3294 | * we need to fall back to the slow version: |
3295 | */ | |
26f4c328 | 3296 | static bool gup_fast_permitted(unsigned long start, unsigned long end) |
5b65c467 | 3297 | { |
26f4c328 | 3298 | return true; |
5b65c467 KS |
3299 | } |
3300 | #endif | |
3301 | ||
23babe19 DH |
3302 | static unsigned long gup_fast(unsigned long start, unsigned long end, |
3303 | unsigned int gup_flags, struct page **pages) | |
c28b1fc7 JG |
3304 | { |
3305 | unsigned long flags; | |
3306 | int nr_pinned = 0; | |
57efa1fe | 3307 | unsigned seq; |
c28b1fc7 | 3308 | |
25176ad0 | 3309 | if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) || |
c28b1fc7 JG |
3310 | !gup_fast_permitted(start, end)) |
3311 | return 0; | |
3312 | ||
57efa1fe JG |
3313 | if (gup_flags & FOLL_PIN) { |
3314 | seq = raw_read_seqcount(¤t->mm->write_protect_seq); | |
3315 | if (seq & 1) | |
3316 | return 0; | |
3317 | } | |
3318 | ||
c28b1fc7 JG |
3319 | /* |
3320 | * Disable interrupts. The nested form is used, in order to allow full, | |
3321 | * general purpose use of this routine. | |
3322 | * | |
3323 | * With interrupts disabled, we block page table pages from being freed | |
3324 | * from under us. See struct mmu_table_batch comments in | |
3325 | * include/asm-generic/tlb.h for more details. | |
3326 | * | |
3327 | * We do not adopt an rcu_read_lock() here as we also want to block IPIs | |
3328 | * that come from THPs splitting. | |
3329 | */ | |
3330 | local_irq_save(flags); | |
23babe19 | 3331 | gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); |
c28b1fc7 | 3332 | local_irq_restore(flags); |
57efa1fe JG |
3333 | |
3334 | /* | |
3335 | * When pinning pages for DMA there could be a concurrent write protect | |
23babe19 | 3336 | * from fork() via copy_page_range(), in this case always fail GUP-fast. |
57efa1fe JG |
3337 | */ |
3338 | if (gup_flags & FOLL_PIN) { | |
3339 | if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { | |
23babe19 | 3340 | gup_fast_unpin_user_pages(pages, nr_pinned); |
57efa1fe | 3341 | return 0; |
b6a2619c DH |
3342 | } else { |
3343 | sanity_check_pinned_pages(pages, nr_pinned); | |
57efa1fe JG |
3344 | } |
3345 | } | |
c28b1fc7 JG |
3346 | return nr_pinned; |
3347 | } | |
3348 | ||
23babe19 DH |
3349 | static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, |
3350 | unsigned int gup_flags, struct page **pages) | |
2667f50e | 3351 | { |
c28b1fc7 JG |
3352 | unsigned long len, end; |
3353 | unsigned long nr_pinned; | |
b2a72dff | 3354 | int locked = 0; |
c28b1fc7 | 3355 | int ret; |
2667f50e | 3356 | |
f4000fdf | 3357 | if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | |
376a34ef | 3358 | FOLL_FORCE | FOLL_PIN | FOLL_GET | |
4003f107 | 3359 | FOLL_FAST_ONLY | FOLL_NOFAULT | |
d74943a2 | 3360 | FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) |
817be129 CH |
3361 | return -EINVAL; |
3362 | ||
a458b76a AA |
3363 | if (gup_flags & FOLL_PIN) |
3364 | mm_set_has_pinned_flag(¤t->mm->flags); | |
008cfe44 | 3365 | |
f81cd178 | 3366 | if (!(gup_flags & FOLL_FAST_ONLY)) |
da1c55f1 | 3367 | might_lock_read(¤t->mm->mmap_lock); |
f81cd178 | 3368 | |
f455c854 | 3369 | start = untagged_addr(start) & PAGE_MASK; |
c28b1fc7 JG |
3370 | len = nr_pages << PAGE_SHIFT; |
3371 | if (check_add_overflow(start, len, &end)) | |
9883c7f8 | 3372 | return -EOVERFLOW; |
6014bc27 LT |
3373 | if (end > TASK_SIZE_MAX) |
3374 | return -EFAULT; | |
96d4f267 | 3375 | if (unlikely(!access_ok((void __user *)start, len))) |
c61611f7 | 3376 | return -EFAULT; |
73e10a61 | 3377 | |
23babe19 | 3378 | nr_pinned = gup_fast(start, end, gup_flags, pages); |
c28b1fc7 JG |
3379 | if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) |
3380 | return nr_pinned; | |
2667f50e | 3381 | |
c28b1fc7 JG |
3382 | /* Slow path: try to get the remaining pages with get_user_pages */ |
3383 | start += nr_pinned << PAGE_SHIFT; | |
3384 | pages += nr_pinned; | |
b2a72dff | 3385 | ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, |
b2cac248 | 3386 | pages, &locked, |
f04740f5 | 3387 | gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); |
c28b1fc7 JG |
3388 | if (ret < 0) { |
3389 | /* | |
3390 | * The caller has to unpin the pages we already pinned so | |
3391 | * returning -errno is not an option | |
3392 | */ | |
3393 | if (nr_pinned) | |
3394 | return nr_pinned; | |
3395 | return ret; | |
2667f50e | 3396 | } |
c28b1fc7 | 3397 | return ret + nr_pinned; |
2667f50e | 3398 | } |
c28b1fc7 | 3399 | |
dadbb612 SJ |
3400 | /** |
3401 | * get_user_pages_fast_only() - pin user pages in memory | |
3402 | * @start: starting user address | |
3403 | * @nr_pages: number of pages from start to pin | |
3404 | * @gup_flags: flags modifying pin behaviour | |
3405 | * @pages: array that receives pointers to the pages pinned. | |
3406 | * Should be at least nr_pages long. | |
3407 | * | |
9e1f0580 JH |
3408 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
3409 | * the regular GUP. | |
9e1f0580 JH |
3410 | * |
3411 | * If the architecture does not support this function, simply return with no | |
3412 | * pages pinned. | |
3413 | * | |
3414 | * Careful, careful! COW breaking can go either way, so a non-write | |
3415 | * access can get ambiguous page results. If you call this function without | |
3416 | * 'write' set, you'd better be sure that you're ok with that ambiguity. | |
3417 | */ | |
dadbb612 SJ |
3418 | int get_user_pages_fast_only(unsigned long start, int nr_pages, |
3419 | unsigned int gup_flags, struct page **pages) | |
9e1f0580 | 3420 | { |
9e1f0580 JH |
3421 | /* |
3422 | * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, | |
3423 | * because gup fast is always a "pin with a +1 page refcount" request. | |
376a34ef JH |
3424 | * |
3425 | * FOLL_FAST_ONLY is required in order to match the API description of | |
3426 | * this routine: no fall back to regular ("slow") GUP. | |
9e1f0580 | 3427 | */ |
b2cac248 | 3428 | if (!is_valid_gup_args(pages, NULL, &gup_flags, |
d64e2dbc JG |
3429 | FOLL_GET | FOLL_FAST_ONLY)) |
3430 | return -EINVAL; | |
9e1f0580 | 3431 | |
23babe19 | 3432 | return gup_fast_fallback(start, nr_pages, gup_flags, pages); |
9e1f0580 | 3433 | } |
dadbb612 | 3434 | EXPORT_SYMBOL_GPL(get_user_pages_fast_only); |
9e1f0580 | 3435 | |
eddb1c22 JH |
3436 | /** |
3437 | * get_user_pages_fast() - pin user pages in memory | |
3faa52c0 JH |
3438 | * @start: starting user address |
3439 | * @nr_pages: number of pages from start to pin | |
3440 | * @gup_flags: flags modifying pin behaviour | |
3441 | * @pages: array that receives pointers to the pages pinned. | |
3442 | * Should be at least nr_pages long. | |
eddb1c22 | 3443 | * |
c1e8d7c6 | 3444 | * Attempt to pin user pages in memory without taking mm->mmap_lock. |
eddb1c22 JH |
3445 | * If not successful, it will fall back to taking the lock and |
3446 | * calling get_user_pages(). | |
3447 | * | |
3448 | * Returns number of pages pinned. This may be fewer than the number requested. | |
3449 | * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns | |
3450 | * -errno. | |
3451 | */ | |
3452 | int get_user_pages_fast(unsigned long start, int nr_pages, | |
3453 | unsigned int gup_flags, struct page **pages) | |
3454 | { | |
94202f12 JH |
3455 | /* |
3456 | * The caller may or may not have explicitly set FOLL_GET; either way is | |
3457 | * OK. However, internally (within mm/gup.c), gup fast variants must set | |
3458 | * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" | |
3459 | * request. | |
3460 | */ | |
b2cac248 | 3461 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) |
d64e2dbc | 3462 | return -EINVAL; |
23babe19 | 3463 | return gup_fast_fallback(start, nr_pages, gup_flags, pages); |
eddb1c22 | 3464 | } |
050a9adc | 3465 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
eddb1c22 JH |
3466 | |
3467 | /** | |
3468 | * pin_user_pages_fast() - pin user pages in memory without taking locks | |
3469 | * | |
3faa52c0 JH |
3470 | * @start: starting user address |
3471 | * @nr_pages: number of pages from start to pin | |
3472 | * @gup_flags: flags modifying pin behaviour | |
3473 | * @pages: array that receives pointers to the pages pinned. | |
3474 | * Should be at least nr_pages long. | |
3475 | * | |
3476 | * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See | |
3477 | * get_user_pages_fast() for documentation on the function arguments, because | |
3478 | * the arguments here are identical. | |
3479 | * | |
3480 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please | |
72ef5e52 | 3481 | * see Documentation/core-api/pin_user_pages.rst for further details. |
c8070b78 DH |
3482 | * |
3483 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3484 | * pins in it and unpin_user_page() will not remove pins from it. | |
eddb1c22 JH |
3485 | */ |
3486 | int pin_user_pages_fast(unsigned long start, int nr_pages, | |
3487 | unsigned int gup_flags, struct page **pages) | |
3488 | { | |
b2cac248 | 3489 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) |
3faa52c0 | 3490 | return -EINVAL; |
23babe19 | 3491 | return gup_fast_fallback(start, nr_pages, gup_flags, pages); |
eddb1c22 JH |
3492 | } |
3493 | EXPORT_SYMBOL_GPL(pin_user_pages_fast); | |
3494 | ||
3495 | /** | |
64019a2e | 3496 | * pin_user_pages_remote() - pin pages of a remote process |
eddb1c22 | 3497 | * |
3faa52c0 JH |
3498 | * @mm: mm_struct of target mm |
3499 | * @start: starting user address | |
3500 | * @nr_pages: number of pages from start to pin | |
3501 | * @gup_flags: flags modifying lookup behaviour | |
3502 | * @pages: array that receives pointers to the pages pinned. | |
0768c8de | 3503 | * Should be at least nr_pages long. |
3faa52c0 JH |
3504 | * @locked: pointer to lock flag indicating whether lock is held and |
3505 | * subsequently whether VM_FAULT_RETRY functionality can be | |
3506 | * utilised. Lock must initially be held. | |
3507 | * | |
3508 | * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See | |
3509 | * get_user_pages_remote() for documentation on the function arguments, because | |
3510 | * the arguments here are identical. | |
3511 | * | |
3512 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please | |
72ef5e52 | 3513 | * see Documentation/core-api/pin_user_pages.rst for details. |
c8070b78 DH |
3514 | * |
3515 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3516 | * pins in it and unpin_user_page*() will not remove pins from it. | |
eddb1c22 | 3517 | */ |
64019a2e | 3518 | long pin_user_pages_remote(struct mm_struct *mm, |
eddb1c22 JH |
3519 | unsigned long start, unsigned long nr_pages, |
3520 | unsigned int gup_flags, struct page **pages, | |
0b295316 | 3521 | int *locked) |
eddb1c22 | 3522 | { |
9a863a6a JG |
3523 | int local_locked = 1; |
3524 | ||
b2cac248 | 3525 | if (!is_valid_gup_args(pages, locked, &gup_flags, |
d64e2dbc JG |
3526 | FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) |
3527 | return 0; | |
b2cac248 | 3528 | return __gup_longterm_locked(mm, start, nr_pages, pages, |
9a863a6a | 3529 | locked ? locked : &local_locked, |
d64e2dbc | 3530 | gup_flags); |
eddb1c22 JH |
3531 | } |
3532 | EXPORT_SYMBOL(pin_user_pages_remote); | |
3533 | ||
3534 | /** | |
3535 | * pin_user_pages() - pin user pages in memory for use by other devices | |
3536 | * | |
3faa52c0 JH |
3537 | * @start: starting user address |
3538 | * @nr_pages: number of pages from start to pin | |
3539 | * @gup_flags: flags modifying lookup behaviour | |
3540 | * @pages: array that receives pointers to the pages pinned. | |
0768c8de | 3541 | * Should be at least nr_pages long. |
3faa52c0 JH |
3542 | * |
3543 | * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and | |
3544 | * FOLL_PIN is set. | |
3545 | * | |
3546 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please | |
72ef5e52 | 3547 | * see Documentation/core-api/pin_user_pages.rst for details. |
c8070b78 DH |
3548 | * |
3549 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3550 | * pins in it and unpin_user_page*() will not remove pins from it. | |
eddb1c22 JH |
3551 | */ |
3552 | long pin_user_pages(unsigned long start, unsigned long nr_pages, | |
4c630f30 | 3553 | unsigned int gup_flags, struct page **pages) |
eddb1c22 | 3554 | { |
9a863a6a JG |
3555 | int locked = 1; |
3556 | ||
b2cac248 | 3557 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) |
d64e2dbc | 3558 | return 0; |
64019a2e | 3559 | return __gup_longterm_locked(current->mm, start, nr_pages, |
b2cac248 | 3560 | pages, &locked, gup_flags); |
eddb1c22 JH |
3561 | } |
3562 | EXPORT_SYMBOL(pin_user_pages); | |
91429023 JH |
3563 | |
3564 | /* | |
3565 | * pin_user_pages_unlocked() is the FOLL_PIN variant of | |
3566 | * get_user_pages_unlocked(). Behavior is the same, except that this one sets | |
3567 | * FOLL_PIN and rejects FOLL_GET. | |
c8070b78 DH |
3568 | * |
3569 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3570 | * pins in it and unpin_user_page*() will not remove pins from it. | |
91429023 JH |
3571 | */ |
3572 | long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | |
3573 | struct page **pages, unsigned int gup_flags) | |
3574 | { | |
b2a72dff | 3575 | int locked = 0; |
91429023 | 3576 | |
b2cac248 | 3577 | if (!is_valid_gup_args(pages, NULL, &gup_flags, |
f04740f5 | 3578 | FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) |
d64e2dbc | 3579 | return 0; |
0768c8de | 3580 | |
b2cac248 | 3581 | return __gup_longterm_locked(current->mm, start, nr_pages, pages, |
b2a72dff | 3582 | &locked, gup_flags); |
91429023 JH |
3583 | } |
3584 | EXPORT_SYMBOL(pin_user_pages_unlocked); | |
89c1905d VK |
3585 | |
3586 | /** | |
3587 | * memfd_pin_folios() - pin folios associated with a memfd | |
3588 | * @memfd: the memfd whose folios are to be pinned | |
3589 | * @start: the first memfd offset | |
3590 | * @end: the last memfd offset (inclusive) | |
3591 | * @folios: array that receives pointers to the folios pinned | |
3592 | * @max_folios: maximum number of entries in @folios | |
3593 | * @offset: the offset into the first folio | |
3594 | * | |
3595 | * Attempt to pin folios associated with a memfd in the contiguous range | |
3596 | * [start, end]. Given that a memfd is either backed by shmem or hugetlb, | |
3597 | * the folios can either be found in the page cache or need to be allocated | |
3598 | * if necessary. Once the folios are located, they are all pinned via | |
3599 | * FOLL_PIN and @offset is populatedwith the offset into the first folio. | |
3600 | * And, eventually, these pinned folios must be released either using | |
3601 | * unpin_folios() or unpin_folio(). | |
3602 | * | |
3603 | * It must be noted that the folios may be pinned for an indefinite amount | |
3604 | * of time. And, in most cases, the duration of time they may stay pinned | |
3605 | * would be controlled by the userspace. This behavior is effectively the | |
3606 | * same as using FOLL_LONGTERM with other GUP APIs. | |
3607 | * | |
3608 | * Returns number of folios pinned, which could be less than @max_folios | |
3609 | * as it depends on the folio sizes that cover the range [start, end]. | |
3610 | * If no folios were pinned, it returns -errno. | |
3611 | */ | |
3612 | long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, | |
3613 | struct folio **folios, unsigned int max_folios, | |
3614 | pgoff_t *offset) | |
3615 | { | |
3616 | unsigned int flags, nr_folios, nr_found; | |
3617 | unsigned int i, pgshift = PAGE_SHIFT; | |
3618 | pgoff_t start_idx, end_idx, next_idx; | |
3619 | struct folio *folio = NULL; | |
3620 | struct folio_batch fbatch; | |
3621 | struct hstate *h; | |
3622 | long ret = -EINVAL; | |
3623 | ||
3624 | if (start < 0 || start > end || !max_folios) | |
3625 | return -EINVAL; | |
3626 | ||
3627 | if (!memfd) | |
3628 | return -EINVAL; | |
3629 | ||
3630 | if (!shmem_file(memfd) && !is_file_hugepages(memfd)) | |
3631 | return -EINVAL; | |
3632 | ||
3633 | if (end >= i_size_read(file_inode(memfd))) | |
3634 | return -EINVAL; | |
3635 | ||
3636 | if (is_file_hugepages(memfd)) { | |
3637 | h = hstate_file(memfd); | |
3638 | pgshift = huge_page_shift(h); | |
3639 | } | |
3640 | ||
3641 | flags = memalloc_pin_save(); | |
3642 | do { | |
3643 | nr_folios = 0; | |
3644 | start_idx = start >> pgshift; | |
3645 | end_idx = end >> pgshift; | |
3646 | if (is_file_hugepages(memfd)) { | |
3647 | start_idx <<= huge_page_order(h); | |
3648 | end_idx <<= huge_page_order(h); | |
3649 | } | |
3650 | ||
3651 | folio_batch_init(&fbatch); | |
3652 | while (start_idx <= end_idx && nr_folios < max_folios) { | |
3653 | /* | |
3654 | * In most cases, we should be able to find the folios | |
3655 | * in the page cache. If we cannot find them for some | |
3656 | * reason, we try to allocate them and add them to the | |
3657 | * page cache. | |
3658 | */ | |
3659 | nr_found = filemap_get_folios_contig(memfd->f_mapping, | |
3660 | &start_idx, | |
3661 | end_idx, | |
3662 | &fbatch); | |
3663 | if (folio) { | |
3664 | folio_put(folio); | |
3665 | folio = NULL; | |
3666 | } | |
3667 | ||
3668 | next_idx = 0; | |
3669 | for (i = 0; i < nr_found; i++) { | |
3670 | /* | |
3671 | * As there can be multiple entries for a | |
3672 | * given folio in the batch returned by | |
3673 | * filemap_get_folios_contig(), the below | |
3674 | * check is to ensure that we pin and return a | |
3675 | * unique set of folios between start and end. | |
3676 | */ | |
3677 | if (next_idx && | |
3678 | next_idx != folio_index(fbatch.folios[i])) | |
3679 | continue; | |
3680 | ||
3681 | folio = page_folio(&fbatch.folios[i]->page); | |
3682 | ||
3683 | if (try_grab_folio(folio, 1, FOLL_PIN)) { | |
3684 | folio_batch_release(&fbatch); | |
3685 | ret = -EINVAL; | |
3686 | goto err; | |
3687 | } | |
3688 | ||
3689 | if (nr_folios == 0) | |
3690 | *offset = offset_in_folio(folio, start); | |
3691 | ||
3692 | folios[nr_folios] = folio; | |
3693 | next_idx = folio_next_index(folio); | |
3694 | if (++nr_folios == max_folios) | |
3695 | break; | |
3696 | } | |
3697 | ||
3698 | folio = NULL; | |
3699 | folio_batch_release(&fbatch); | |
3700 | if (!nr_found) { | |
3701 | folio = memfd_alloc_folio(memfd, start_idx); | |
3702 | if (IS_ERR(folio)) { | |
3703 | ret = PTR_ERR(folio); | |
3704 | if (ret != -EEXIST) | |
3705 | goto err; | |
3706 | } | |
3707 | } | |
3708 | } | |
3709 | ||
3710 | ret = check_and_migrate_movable_folios(nr_folios, folios); | |
3711 | } while (ret == -EAGAIN); | |
3712 | ||
3713 | memalloc_pin_restore(flags); | |
3714 | return ret ? ret : nr_folios; | |
3715 | err: | |
3716 | memalloc_pin_restore(flags); | |
3717 | unpin_folios(folios, nr_folios); | |
3718 | ||
3719 | return ret; | |
3720 | } | |
3721 | EXPORT_SYMBOL_GPL(memfd_pin_folios); |