]>
Commit | Line | Data |
---|---|---|
4bbd4c77 KS |
1 | #include <linux/kernel.h> |
2 | #include <linux/errno.h> | |
3 | #include <linux/err.h> | |
4 | #include <linux/spinlock.h> | |
5 | ||
4bbd4c77 KS |
6 | #include <linux/mm.h> |
7 | #include <linux/pagemap.h> | |
8 | #include <linux/rmap.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/swapops.h> | |
11 | ||
2667f50e SC |
12 | #include <linux/sched.h> |
13 | #include <linux/rwsem.h> | |
f30c59e9 | 14 | #include <linux/hugetlb.h> |
2667f50e SC |
15 | #include <asm/pgtable.h> |
16 | ||
4bbd4c77 KS |
17 | #include "internal.h" |
18 | ||
69e68b4f KS |
19 | static struct page *no_page_table(struct vm_area_struct *vma, |
20 | unsigned int flags) | |
4bbd4c77 | 21 | { |
69e68b4f KS |
22 | /* |
23 | * When core dumping an enormous anonymous area that nobody | |
24 | * has touched so far, we don't want to allocate unnecessary pages or | |
25 | * page tables. Return error instead of NULL to skip handle_mm_fault, | |
26 | * then get_dump_page() will return NULL to leave a hole in the dump. | |
27 | * But we can only make this optimization where a hole would surely | |
28 | * be zero-filled if handle_mm_fault() actually did handle it. | |
29 | */ | |
30 | if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) | |
31 | return ERR_PTR(-EFAULT); | |
32 | return NULL; | |
33 | } | |
4bbd4c77 | 34 | |
69e68b4f KS |
35 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
36 | unsigned long address, pmd_t *pmd, unsigned int flags) | |
37 | { | |
38 | struct mm_struct *mm = vma->vm_mm; | |
39 | struct page *page; | |
40 | spinlock_t *ptl; | |
41 | pte_t *ptep, pte; | |
4bbd4c77 | 42 | |
69e68b4f | 43 | retry: |
4bbd4c77 | 44 | if (unlikely(pmd_bad(*pmd))) |
69e68b4f | 45 | return no_page_table(vma, flags); |
4bbd4c77 KS |
46 | |
47 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | |
4bbd4c77 KS |
48 | pte = *ptep; |
49 | if (!pte_present(pte)) { | |
50 | swp_entry_t entry; | |
51 | /* | |
52 | * KSM's break_ksm() relies upon recognizing a ksm page | |
53 | * even while it is being migrated, so for that case we | |
54 | * need migration_entry_wait(). | |
55 | */ | |
56 | if (likely(!(flags & FOLL_MIGRATION))) | |
57 | goto no_page; | |
0661a336 | 58 | if (pte_none(pte)) |
4bbd4c77 KS |
59 | goto no_page; |
60 | entry = pte_to_swp_entry(pte); | |
61 | if (!is_migration_entry(entry)) | |
62 | goto no_page; | |
63 | pte_unmap_unlock(ptep, ptl); | |
64 | migration_entry_wait(mm, pmd, address); | |
69e68b4f | 65 | goto retry; |
4bbd4c77 | 66 | } |
8a0516ed | 67 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
4bbd4c77 | 68 | goto no_page; |
69e68b4f KS |
69 | if ((flags & FOLL_WRITE) && !pte_write(pte)) { |
70 | pte_unmap_unlock(ptep, ptl); | |
71 | return NULL; | |
72 | } | |
4bbd4c77 KS |
73 | |
74 | page = vm_normal_page(vma, address, pte); | |
75 | if (unlikely(!page)) { | |
76 | if ((flags & FOLL_DUMP) || | |
77 | !is_zero_pfn(pte_pfn(pte))) | |
78 | goto bad_page; | |
79 | page = pte_page(pte); | |
80 | } | |
81 | ||
82 | if (flags & FOLL_GET) | |
83 | get_page_foll(page); | |
84 | if (flags & FOLL_TOUCH) { | |
85 | if ((flags & FOLL_WRITE) && | |
86 | !pte_dirty(pte) && !PageDirty(page)) | |
87 | set_page_dirty(page); | |
88 | /* | |
89 | * pte_mkyoung() would be more correct here, but atomic care | |
90 | * is needed to avoid losing the dirty bit: it is easier to use | |
91 | * mark_page_accessed(). | |
92 | */ | |
93 | mark_page_accessed(page); | |
94 | } | |
84d33df2 | 95 | if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { |
4bbd4c77 KS |
96 | /* |
97 | * The preliminary mapping check is mainly to avoid the | |
98 | * pointless overhead of lock_page on the ZERO_PAGE | |
99 | * which might bounce very badly if there is contention. | |
100 | * | |
101 | * If the page is already locked, we don't need to | |
102 | * handle it now - vmscan will handle it later if and | |
103 | * when it attempts to reclaim the page. | |
104 | */ | |
105 | if (page->mapping && trylock_page(page)) { | |
106 | lru_add_drain(); /* push cached pages to LRU */ | |
107 | /* | |
108 | * Because we lock page here, and migration is | |
109 | * blocked by the pte's page reference, and we | |
110 | * know the page is still mapped, we don't even | |
111 | * need to check for file-cache page truncation. | |
112 | */ | |
113 | mlock_vma_page(page); | |
114 | unlock_page(page); | |
115 | } | |
116 | } | |
4bbd4c77 | 117 | pte_unmap_unlock(ptep, ptl); |
4bbd4c77 | 118 | return page; |
4bbd4c77 KS |
119 | bad_page: |
120 | pte_unmap_unlock(ptep, ptl); | |
121 | return ERR_PTR(-EFAULT); | |
122 | ||
123 | no_page: | |
124 | pte_unmap_unlock(ptep, ptl); | |
125 | if (!pte_none(pte)) | |
69e68b4f KS |
126 | return NULL; |
127 | return no_page_table(vma, flags); | |
128 | } | |
129 | ||
130 | /** | |
131 | * follow_page_mask - look up a page descriptor from a user-virtual address | |
132 | * @vma: vm_area_struct mapping @address | |
133 | * @address: virtual address to look up | |
134 | * @flags: flags modifying lookup behaviour | |
135 | * @page_mask: on output, *page_mask is set according to the size of the page | |
136 | * | |
137 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | |
138 | * | |
139 | * Returns the mapped (struct page *), %NULL if no mapping exists, or | |
140 | * an error pointer if there is a mapping to something not represented | |
141 | * by a page descriptor (see also vm_normal_page()). | |
142 | */ | |
143 | struct page *follow_page_mask(struct vm_area_struct *vma, | |
144 | unsigned long address, unsigned int flags, | |
145 | unsigned int *page_mask) | |
146 | { | |
147 | pgd_t *pgd; | |
148 | pud_t *pud; | |
149 | pmd_t *pmd; | |
150 | spinlock_t *ptl; | |
151 | struct page *page; | |
152 | struct mm_struct *mm = vma->vm_mm; | |
153 | ||
154 | *page_mask = 0; | |
155 | ||
156 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | |
157 | if (!IS_ERR(page)) { | |
158 | BUG_ON(flags & FOLL_GET); | |
4bbd4c77 | 159 | return page; |
69e68b4f | 160 | } |
4bbd4c77 | 161 | |
69e68b4f KS |
162 | pgd = pgd_offset(mm, address); |
163 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | |
164 | return no_page_table(vma, flags); | |
165 | ||
166 | pud = pud_offset(pgd, address); | |
167 | if (pud_none(*pud)) | |
168 | return no_page_table(vma, flags); | |
169 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { | |
e66f17ff NH |
170 | page = follow_huge_pud(mm, address, pud, flags); |
171 | if (page) | |
172 | return page; | |
173 | return no_page_table(vma, flags); | |
69e68b4f KS |
174 | } |
175 | if (unlikely(pud_bad(*pud))) | |
176 | return no_page_table(vma, flags); | |
177 | ||
178 | pmd = pmd_offset(pud, address); | |
179 | if (pmd_none(*pmd)) | |
180 | return no_page_table(vma, flags); | |
181 | if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { | |
e66f17ff NH |
182 | page = follow_huge_pmd(mm, address, pmd, flags); |
183 | if (page) | |
184 | return page; | |
185 | return no_page_table(vma, flags); | |
69e68b4f | 186 | } |
8a0516ed | 187 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
69e68b4f KS |
188 | return no_page_table(vma, flags); |
189 | if (pmd_trans_huge(*pmd)) { | |
190 | if (flags & FOLL_SPLIT) { | |
191 | split_huge_page_pmd(vma, address, pmd); | |
192 | return follow_page_pte(vma, address, pmd, flags); | |
193 | } | |
194 | ptl = pmd_lock(mm, pmd); | |
195 | if (likely(pmd_trans_huge(*pmd))) { | |
196 | if (unlikely(pmd_trans_splitting(*pmd))) { | |
197 | spin_unlock(ptl); | |
198 | wait_split_huge_page(vma->anon_vma, pmd); | |
199 | } else { | |
200 | page = follow_trans_huge_pmd(vma, address, | |
201 | pmd, flags); | |
202 | spin_unlock(ptl); | |
203 | *page_mask = HPAGE_PMD_NR - 1; | |
204 | return page; | |
205 | } | |
206 | } else | |
207 | spin_unlock(ptl); | |
208 | } | |
209 | return follow_page_pte(vma, address, pmd, flags); | |
4bbd4c77 KS |
210 | } |
211 | ||
f2b495ca KS |
212 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
213 | unsigned int gup_flags, struct vm_area_struct **vma, | |
214 | struct page **page) | |
215 | { | |
216 | pgd_t *pgd; | |
217 | pud_t *pud; | |
218 | pmd_t *pmd; | |
219 | pte_t *pte; | |
220 | int ret = -EFAULT; | |
221 | ||
222 | /* user gate pages are read-only */ | |
223 | if (gup_flags & FOLL_WRITE) | |
224 | return -EFAULT; | |
225 | if (address > TASK_SIZE) | |
226 | pgd = pgd_offset_k(address); | |
227 | else | |
228 | pgd = pgd_offset_gate(mm, address); | |
229 | BUG_ON(pgd_none(*pgd)); | |
230 | pud = pud_offset(pgd, address); | |
231 | BUG_ON(pud_none(*pud)); | |
232 | pmd = pmd_offset(pud, address); | |
233 | if (pmd_none(*pmd)) | |
234 | return -EFAULT; | |
235 | VM_BUG_ON(pmd_trans_huge(*pmd)); | |
236 | pte = pte_offset_map(pmd, address); | |
237 | if (pte_none(*pte)) | |
238 | goto unmap; | |
239 | *vma = get_gate_vma(mm); | |
240 | if (!page) | |
241 | goto out; | |
242 | *page = vm_normal_page(*vma, address, *pte); | |
243 | if (!*page) { | |
244 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) | |
245 | goto unmap; | |
246 | *page = pte_page(*pte); | |
247 | } | |
248 | get_page(*page); | |
249 | out: | |
250 | ret = 0; | |
251 | unmap: | |
252 | pte_unmap(pte); | |
253 | return ret; | |
254 | } | |
255 | ||
9a95f3cf PC |
256 | /* |
257 | * mmap_sem must be held on entry. If @nonblocking != NULL and | |
258 | * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. | |
259 | * If it is, *@nonblocking will be set to 0 and -EBUSY returned. | |
260 | */ | |
16744483 KS |
261 | static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, |
262 | unsigned long address, unsigned int *flags, int *nonblocking) | |
263 | { | |
264 | struct mm_struct *mm = vma->vm_mm; | |
265 | unsigned int fault_flags = 0; | |
266 | int ret; | |
267 | ||
84d33df2 KS |
268 | /* For mm_populate(), just skip the stack guard page. */ |
269 | if ((*flags & FOLL_POPULATE) && | |
16744483 KS |
270 | (stack_guard_page_start(vma, address) || |
271 | stack_guard_page_end(vma, address + PAGE_SIZE))) | |
272 | return -ENOENT; | |
273 | if (*flags & FOLL_WRITE) | |
274 | fault_flags |= FAULT_FLAG_WRITE; | |
275 | if (nonblocking) | |
276 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | |
277 | if (*flags & FOLL_NOWAIT) | |
278 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; | |
234b239b ALC |
279 | if (*flags & FOLL_TRIED) { |
280 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); | |
281 | fault_flags |= FAULT_FLAG_TRIED; | |
282 | } | |
16744483 KS |
283 | |
284 | ret = handle_mm_fault(mm, vma, address, fault_flags); | |
285 | if (ret & VM_FAULT_ERROR) { | |
286 | if (ret & VM_FAULT_OOM) | |
287 | return -ENOMEM; | |
288 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | |
289 | return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; | |
33692f27 | 290 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
16744483 KS |
291 | return -EFAULT; |
292 | BUG(); | |
293 | } | |
294 | ||
295 | if (tsk) { | |
296 | if (ret & VM_FAULT_MAJOR) | |
297 | tsk->maj_flt++; | |
298 | else | |
299 | tsk->min_flt++; | |
300 | } | |
301 | ||
302 | if (ret & VM_FAULT_RETRY) { | |
303 | if (nonblocking) | |
304 | *nonblocking = 0; | |
305 | return -EBUSY; | |
306 | } | |
307 | ||
308 | /* | |
309 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when | |
310 | * necessary, even if maybe_mkwrite decided not to set pte_write. We | |
311 | * can thus safely do subsequent page lookups as if they were reads. | |
312 | * But only do so when looping for pte_write is futile: in some cases | |
313 | * userspace may also be wanting to write to the gotten user page, | |
314 | * which a read fault here might prevent (a readonly page might get | |
315 | * reCOWed by userspace write). | |
316 | */ | |
317 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) | |
318 | *flags &= ~FOLL_WRITE; | |
319 | return 0; | |
320 | } | |
321 | ||
fa5bb209 KS |
322 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
323 | { | |
324 | vm_flags_t vm_flags = vma->vm_flags; | |
325 | ||
326 | if (vm_flags & (VM_IO | VM_PFNMAP)) | |
327 | return -EFAULT; | |
328 | ||
329 | if (gup_flags & FOLL_WRITE) { | |
330 | if (!(vm_flags & VM_WRITE)) { | |
331 | if (!(gup_flags & FOLL_FORCE)) | |
332 | return -EFAULT; | |
333 | /* | |
334 | * We used to let the write,force case do COW in a | |
335 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could | |
336 | * set a breakpoint in a read-only mapping of an | |
337 | * executable, without corrupting the file (yet only | |
338 | * when that file had been opened for writing!). | |
339 | * Anon pages in shared mappings are surprising: now | |
340 | * just reject it. | |
341 | */ | |
342 | if (!is_cow_mapping(vm_flags)) { | |
343 | WARN_ON_ONCE(vm_flags & VM_MAYWRITE); | |
344 | return -EFAULT; | |
345 | } | |
346 | } | |
347 | } else if (!(vm_flags & VM_READ)) { | |
348 | if (!(gup_flags & FOLL_FORCE)) | |
349 | return -EFAULT; | |
350 | /* | |
351 | * Is there actually any vma we can reach here which does not | |
352 | * have VM_MAYREAD set? | |
353 | */ | |
354 | if (!(vm_flags & VM_MAYREAD)) | |
355 | return -EFAULT; | |
356 | } | |
357 | return 0; | |
358 | } | |
359 | ||
4bbd4c77 KS |
360 | /** |
361 | * __get_user_pages() - pin user pages in memory | |
362 | * @tsk: task_struct of target task | |
363 | * @mm: mm_struct of target mm | |
364 | * @start: starting user address | |
365 | * @nr_pages: number of pages from start to pin | |
366 | * @gup_flags: flags modifying pin behaviour | |
367 | * @pages: array that receives pointers to the pages pinned. | |
368 | * Should be at least nr_pages long. Or NULL, if caller | |
369 | * only intends to ensure the pages are faulted in. | |
370 | * @vmas: array of pointers to vmas corresponding to each page. | |
371 | * Or NULL if the caller does not require them. | |
372 | * @nonblocking: whether waiting for disk IO or mmap_sem contention | |
373 | * | |
374 | * Returns number of pages pinned. This may be fewer than the number | |
375 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
376 | * were pinned, returns -errno. Each page returned must be released | |
377 | * with a put_page() call when it is finished with. vmas will only | |
378 | * remain valid while mmap_sem is held. | |
379 | * | |
9a95f3cf | 380 | * Must be called with mmap_sem held. It may be released. See below. |
4bbd4c77 KS |
381 | * |
382 | * __get_user_pages walks a process's page tables and takes a reference to | |
383 | * each struct page that each user address corresponds to at a given | |
384 | * instant. That is, it takes the page that would be accessed if a user | |
385 | * thread accesses the given user virtual address at that instant. | |
386 | * | |
387 | * This does not guarantee that the page exists in the user mappings when | |
388 | * __get_user_pages returns, and there may even be a completely different | |
389 | * page there in some cases (eg. if mmapped pagecache has been invalidated | |
390 | * and subsequently re faulted). However it does guarantee that the page | |
391 | * won't be freed completely. And mostly callers simply care that the page | |
392 | * contains data that was valid *at some point in time*. Typically, an IO | |
393 | * or similar operation cannot guarantee anything stronger anyway because | |
394 | * locks can't be held over the syscall boundary. | |
395 | * | |
396 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | |
397 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | |
398 | * appropriate) must be called after the page is finished with, and | |
399 | * before put_page is called. | |
400 | * | |
401 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO | |
402 | * or mmap_sem contention, and if waiting is needed to pin all pages, | |
9a95f3cf PC |
403 | * *@nonblocking will be set to 0. Further, if @gup_flags does not |
404 | * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in | |
405 | * this case. | |
406 | * | |
407 | * A caller using such a combination of @nonblocking and @gup_flags | |
408 | * must therefore hold the mmap_sem for reading only, and recognize | |
409 | * when it's been released. Otherwise, it must be held for either | |
410 | * reading or writing and will not be released. | |
4bbd4c77 KS |
411 | * |
412 | * In most cases, get_user_pages or get_user_pages_fast should be used | |
413 | * instead of __get_user_pages. __get_user_pages should be used only if | |
414 | * you need some special @gup_flags. | |
415 | */ | |
416 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |
417 | unsigned long start, unsigned long nr_pages, | |
418 | unsigned int gup_flags, struct page **pages, | |
419 | struct vm_area_struct **vmas, int *nonblocking) | |
420 | { | |
fa5bb209 | 421 | long i = 0; |
4bbd4c77 | 422 | unsigned int page_mask; |
fa5bb209 | 423 | struct vm_area_struct *vma = NULL; |
4bbd4c77 KS |
424 | |
425 | if (!nr_pages) | |
426 | return 0; | |
427 | ||
428 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | |
429 | ||
430 | /* | |
431 | * If FOLL_FORCE is set then do not force a full fault as the hinting | |
432 | * fault information is unrelated to the reference behaviour of a task | |
433 | * using the address space | |
434 | */ | |
435 | if (!(gup_flags & FOLL_FORCE)) | |
436 | gup_flags |= FOLL_NUMA; | |
437 | ||
4bbd4c77 | 438 | do { |
fa5bb209 KS |
439 | struct page *page; |
440 | unsigned int foll_flags = gup_flags; | |
441 | unsigned int page_increm; | |
442 | ||
443 | /* first iteration or cross vma bound */ | |
444 | if (!vma || start >= vma->vm_end) { | |
445 | vma = find_extend_vma(mm, start); | |
446 | if (!vma && in_gate_area(mm, start)) { | |
447 | int ret; | |
448 | ret = get_gate_page(mm, start & PAGE_MASK, | |
449 | gup_flags, &vma, | |
450 | pages ? &pages[i] : NULL); | |
451 | if (ret) | |
452 | return i ? : ret; | |
453 | page_mask = 0; | |
454 | goto next_page; | |
455 | } | |
4bbd4c77 | 456 | |
fa5bb209 KS |
457 | if (!vma || check_vma_flags(vma, gup_flags)) |
458 | return i ? : -EFAULT; | |
459 | if (is_vm_hugetlb_page(vma)) { | |
460 | i = follow_hugetlb_page(mm, vma, pages, vmas, | |
461 | &start, &nr_pages, i, | |
462 | gup_flags); | |
463 | continue; | |
4bbd4c77 | 464 | } |
fa5bb209 KS |
465 | } |
466 | retry: | |
467 | /* | |
468 | * If we have a pending SIGKILL, don't keep faulting pages and | |
469 | * potentially allocating memory. | |
470 | */ | |
471 | if (unlikely(fatal_signal_pending(current))) | |
472 | return i ? i : -ERESTARTSYS; | |
473 | cond_resched(); | |
474 | page = follow_page_mask(vma, start, foll_flags, &page_mask); | |
475 | if (!page) { | |
476 | int ret; | |
477 | ret = faultin_page(tsk, vma, start, &foll_flags, | |
478 | nonblocking); | |
479 | switch (ret) { | |
480 | case 0: | |
481 | goto retry; | |
482 | case -EFAULT: | |
483 | case -ENOMEM: | |
484 | case -EHWPOISON: | |
485 | return i ? i : ret; | |
486 | case -EBUSY: | |
487 | return i; | |
488 | case -ENOENT: | |
489 | goto next_page; | |
4bbd4c77 | 490 | } |
fa5bb209 | 491 | BUG(); |
4bbd4c77 | 492 | } |
fa5bb209 KS |
493 | if (IS_ERR(page)) |
494 | return i ? i : PTR_ERR(page); | |
495 | if (pages) { | |
496 | pages[i] = page; | |
497 | flush_anon_page(vma, page, start); | |
498 | flush_dcache_page(page); | |
499 | page_mask = 0; | |
4bbd4c77 | 500 | } |
4bbd4c77 | 501 | next_page: |
fa5bb209 KS |
502 | if (vmas) { |
503 | vmas[i] = vma; | |
504 | page_mask = 0; | |
505 | } | |
506 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | |
507 | if (page_increm > nr_pages) | |
508 | page_increm = nr_pages; | |
509 | i += page_increm; | |
510 | start += page_increm * PAGE_SIZE; | |
511 | nr_pages -= page_increm; | |
4bbd4c77 KS |
512 | } while (nr_pages); |
513 | return i; | |
4bbd4c77 KS |
514 | } |
515 | EXPORT_SYMBOL(__get_user_pages); | |
516 | ||
517 | /* | |
518 | * fixup_user_fault() - manually resolve a user page fault | |
519 | * @tsk: the task_struct to use for page fault accounting, or | |
520 | * NULL if faults are not to be recorded. | |
521 | * @mm: mm_struct of target mm | |
522 | * @address: user address | |
523 | * @fault_flags:flags to pass down to handle_mm_fault() | |
524 | * | |
525 | * This is meant to be called in the specific scenario where for locking reasons | |
526 | * we try to access user memory in atomic context (within a pagefault_disable() | |
527 | * section), this returns -EFAULT, and we want to resolve the user fault before | |
528 | * trying again. | |
529 | * | |
530 | * Typically this is meant to be used by the futex code. | |
531 | * | |
532 | * The main difference with get_user_pages() is that this function will | |
533 | * unconditionally call handle_mm_fault() which will in turn perform all the | |
534 | * necessary SW fixup of the dirty and young bits in the PTE, while | |
535 | * handle_mm_fault() only guarantees to update these in the struct page. | |
536 | * | |
537 | * This is important for some architectures where those bits also gate the | |
538 | * access permission to the page because they are maintained in software. On | |
539 | * such architectures, gup() will not be enough to make a subsequent access | |
540 | * succeed. | |
541 | * | |
9a95f3cf | 542 | * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault(). |
4bbd4c77 KS |
543 | */ |
544 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | |
545 | unsigned long address, unsigned int fault_flags) | |
546 | { | |
547 | struct vm_area_struct *vma; | |
548 | vm_flags_t vm_flags; | |
549 | int ret; | |
550 | ||
551 | vma = find_extend_vma(mm, address); | |
552 | if (!vma || address < vma->vm_start) | |
553 | return -EFAULT; | |
554 | ||
555 | vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ; | |
556 | if (!(vm_flags & vma->vm_flags)) | |
557 | return -EFAULT; | |
558 | ||
559 | ret = handle_mm_fault(mm, vma, address, fault_flags); | |
560 | if (ret & VM_FAULT_ERROR) { | |
561 | if (ret & VM_FAULT_OOM) | |
562 | return -ENOMEM; | |
563 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | |
564 | return -EHWPOISON; | |
33692f27 | 565 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
4bbd4c77 KS |
566 | return -EFAULT; |
567 | BUG(); | |
568 | } | |
569 | if (tsk) { | |
570 | if (ret & VM_FAULT_MAJOR) | |
571 | tsk->maj_flt++; | |
572 | else | |
573 | tsk->min_flt++; | |
574 | } | |
575 | return 0; | |
576 | } | |
577 | ||
f0818f47 AA |
578 | static __always_inline long __get_user_pages_locked(struct task_struct *tsk, |
579 | struct mm_struct *mm, | |
580 | unsigned long start, | |
581 | unsigned long nr_pages, | |
582 | int write, int force, | |
583 | struct page **pages, | |
584 | struct vm_area_struct **vmas, | |
0fd71a56 AA |
585 | int *locked, bool notify_drop, |
586 | unsigned int flags) | |
f0818f47 | 587 | { |
f0818f47 AA |
588 | long ret, pages_done; |
589 | bool lock_dropped; | |
590 | ||
591 | if (locked) { | |
592 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ | |
593 | BUG_ON(vmas); | |
594 | /* check caller initialized locked */ | |
595 | BUG_ON(*locked != 1); | |
596 | } | |
597 | ||
598 | if (pages) | |
599 | flags |= FOLL_GET; | |
600 | if (write) | |
601 | flags |= FOLL_WRITE; | |
602 | if (force) | |
603 | flags |= FOLL_FORCE; | |
604 | ||
605 | pages_done = 0; | |
606 | lock_dropped = false; | |
607 | for (;;) { | |
608 | ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, | |
609 | vmas, locked); | |
610 | if (!locked) | |
611 | /* VM_FAULT_RETRY couldn't trigger, bypass */ | |
612 | return ret; | |
613 | ||
614 | /* VM_FAULT_RETRY cannot return errors */ | |
615 | if (!*locked) { | |
616 | BUG_ON(ret < 0); | |
617 | BUG_ON(ret >= nr_pages); | |
618 | } | |
619 | ||
620 | if (!pages) | |
621 | /* If it's a prefault don't insist harder */ | |
622 | return ret; | |
623 | ||
624 | if (ret > 0) { | |
625 | nr_pages -= ret; | |
626 | pages_done += ret; | |
627 | if (!nr_pages) | |
628 | break; | |
629 | } | |
630 | if (*locked) { | |
631 | /* VM_FAULT_RETRY didn't trigger */ | |
632 | if (!pages_done) | |
633 | pages_done = ret; | |
634 | break; | |
635 | } | |
636 | /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ | |
637 | pages += ret; | |
638 | start += ret << PAGE_SHIFT; | |
639 | ||
640 | /* | |
641 | * Repeat on the address that fired VM_FAULT_RETRY | |
642 | * without FAULT_FLAG_ALLOW_RETRY but with | |
643 | * FAULT_FLAG_TRIED. | |
644 | */ | |
645 | *locked = 1; | |
646 | lock_dropped = true; | |
647 | down_read(&mm->mmap_sem); | |
648 | ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, | |
649 | pages, NULL, NULL); | |
650 | if (ret != 1) { | |
651 | BUG_ON(ret > 1); | |
652 | if (!pages_done) | |
653 | pages_done = ret; | |
654 | break; | |
655 | } | |
656 | nr_pages--; | |
657 | pages_done++; | |
658 | if (!nr_pages) | |
659 | break; | |
660 | pages++; | |
661 | start += PAGE_SIZE; | |
662 | } | |
663 | if (notify_drop && lock_dropped && *locked) { | |
664 | /* | |
665 | * We must let the caller know we temporarily dropped the lock | |
666 | * and so the critical section protected by it was lost. | |
667 | */ | |
668 | up_read(&mm->mmap_sem); | |
669 | *locked = 0; | |
670 | } | |
671 | return pages_done; | |
672 | } | |
673 | ||
674 | /* | |
675 | * We can leverage the VM_FAULT_RETRY functionality in the page fault | |
676 | * paths better by using either get_user_pages_locked() or | |
677 | * get_user_pages_unlocked(). | |
678 | * | |
679 | * get_user_pages_locked() is suitable to replace the form: | |
680 | * | |
681 | * down_read(&mm->mmap_sem); | |
682 | * do_something() | |
683 | * get_user_pages(tsk, mm, ..., pages, NULL); | |
684 | * up_read(&mm->mmap_sem); | |
685 | * | |
686 | * to: | |
687 | * | |
688 | * int locked = 1; | |
689 | * down_read(&mm->mmap_sem); | |
690 | * do_something() | |
691 | * get_user_pages_locked(tsk, mm, ..., pages, &locked); | |
692 | * if (locked) | |
693 | * up_read(&mm->mmap_sem); | |
694 | */ | |
695 | long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, | |
696 | unsigned long start, unsigned long nr_pages, | |
697 | int write, int force, struct page **pages, | |
698 | int *locked) | |
699 | { | |
700 | return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, | |
0fd71a56 | 701 | pages, NULL, locked, true, FOLL_TOUCH); |
f0818f47 AA |
702 | } |
703 | EXPORT_SYMBOL(get_user_pages_locked); | |
704 | ||
0fd71a56 AA |
705 | /* |
706 | * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to | |
707 | * pass additional gup_flags as last parameter (like FOLL_HWPOISON). | |
708 | * | |
709 | * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the | |
710 | * caller if required (just like with __get_user_pages). "FOLL_GET", | |
711 | * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed | |
712 | * according to the parameters "pages", "write", "force" | |
713 | * respectively. | |
714 | */ | |
715 | __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | |
716 | unsigned long start, unsigned long nr_pages, | |
717 | int write, int force, struct page **pages, | |
718 | unsigned int gup_flags) | |
719 | { | |
720 | long ret; | |
721 | int locked = 1; | |
722 | down_read(&mm->mmap_sem); | |
723 | ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, | |
724 | pages, NULL, &locked, false, gup_flags); | |
725 | if (locked) | |
726 | up_read(&mm->mmap_sem); | |
727 | return ret; | |
728 | } | |
729 | EXPORT_SYMBOL(__get_user_pages_unlocked); | |
730 | ||
f0818f47 AA |
731 | /* |
732 | * get_user_pages_unlocked() is suitable to replace the form: | |
733 | * | |
734 | * down_read(&mm->mmap_sem); | |
735 | * get_user_pages(tsk, mm, ..., pages, NULL); | |
736 | * up_read(&mm->mmap_sem); | |
737 | * | |
738 | * with: | |
739 | * | |
740 | * get_user_pages_unlocked(tsk, mm, ..., pages); | |
741 | * | |
742 | * It is functionally equivalent to get_user_pages_fast so | |
743 | * get_user_pages_fast should be used instead, if the two parameters | |
744 | * "tsk" and "mm" are respectively equal to current and current->mm, | |
745 | * or if "force" shall be set to 1 (get_user_pages_fast misses the | |
746 | * "force" parameter). | |
747 | */ | |
748 | long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | |
749 | unsigned long start, unsigned long nr_pages, | |
750 | int write, int force, struct page **pages) | |
751 | { | |
0fd71a56 AA |
752 | return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, |
753 | force, pages, FOLL_TOUCH); | |
f0818f47 AA |
754 | } |
755 | EXPORT_SYMBOL(get_user_pages_unlocked); | |
756 | ||
4bbd4c77 KS |
757 | /* |
758 | * get_user_pages() - pin user pages in memory | |
759 | * @tsk: the task_struct to use for page fault accounting, or | |
760 | * NULL if faults are not to be recorded. | |
761 | * @mm: mm_struct of target mm | |
762 | * @start: starting user address | |
763 | * @nr_pages: number of pages from start to pin | |
764 | * @write: whether pages will be written to by the caller | |
765 | * @force: whether to force access even when user mapping is currently | |
766 | * protected (but never forces write access to shared mapping). | |
767 | * @pages: array that receives pointers to the pages pinned. | |
768 | * Should be at least nr_pages long. Or NULL, if caller | |
769 | * only intends to ensure the pages are faulted in. | |
770 | * @vmas: array of pointers to vmas corresponding to each page. | |
771 | * Or NULL if the caller does not require them. | |
772 | * | |
773 | * Returns number of pages pinned. This may be fewer than the number | |
774 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
775 | * were pinned, returns -errno. Each page returned must be released | |
776 | * with a put_page() call when it is finished with. vmas will only | |
777 | * remain valid while mmap_sem is held. | |
778 | * | |
779 | * Must be called with mmap_sem held for read or write. | |
780 | * | |
781 | * get_user_pages walks a process's page tables and takes a reference to | |
782 | * each struct page that each user address corresponds to at a given | |
783 | * instant. That is, it takes the page that would be accessed if a user | |
784 | * thread accesses the given user virtual address at that instant. | |
785 | * | |
786 | * This does not guarantee that the page exists in the user mappings when | |
787 | * get_user_pages returns, and there may even be a completely different | |
788 | * page there in some cases (eg. if mmapped pagecache has been invalidated | |
789 | * and subsequently re faulted). However it does guarantee that the page | |
790 | * won't be freed completely. And mostly callers simply care that the page | |
791 | * contains data that was valid *at some point in time*. Typically, an IO | |
792 | * or similar operation cannot guarantee anything stronger anyway because | |
793 | * locks can't be held over the syscall boundary. | |
794 | * | |
795 | * If write=0, the page must not be written to. If the page is written to, | |
796 | * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called | |
797 | * after the page is finished with, and before put_page is called. | |
798 | * | |
799 | * get_user_pages is typically used for fewer-copy IO operations, to get a | |
800 | * handle on the memory by some means other than accesses via the user virtual | |
801 | * addresses. The pages may be submitted for DMA to devices or accessed via | |
802 | * their kernel linear mapping (via the kmap APIs). Care should be taken to | |
803 | * use the correct cache flushing APIs. | |
804 | * | |
805 | * See also get_user_pages_fast, for performance critical applications. | |
f0818f47 AA |
806 | * |
807 | * get_user_pages should be phased out in favor of | |
808 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing | |
809 | * should use get_user_pages because it cannot pass | |
810 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. | |
4bbd4c77 KS |
811 | */ |
812 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |
813 | unsigned long start, unsigned long nr_pages, int write, | |
814 | int force, struct page **pages, struct vm_area_struct **vmas) | |
815 | { | |
f0818f47 | 816 | return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, |
0fd71a56 | 817 | pages, vmas, NULL, false, FOLL_TOUCH); |
4bbd4c77 KS |
818 | } |
819 | EXPORT_SYMBOL(get_user_pages); | |
820 | ||
acc3c8d1 KS |
821 | /** |
822 | * populate_vma_page_range() - populate a range of pages in the vma. | |
823 | * @vma: target vma | |
824 | * @start: start address | |
825 | * @end: end address | |
826 | * @nonblocking: | |
827 | * | |
828 | * This takes care of mlocking the pages too if VM_LOCKED is set. | |
829 | * | |
830 | * return 0 on success, negative error code on error. | |
831 | * | |
832 | * vma->vm_mm->mmap_sem must be held. | |
833 | * | |
834 | * If @nonblocking is NULL, it may be held for read or write and will | |
835 | * be unperturbed. | |
836 | * | |
837 | * If @nonblocking is non-NULL, it must held for read only and may be | |
838 | * released. If it's released, *@nonblocking will be set to 0. | |
839 | */ | |
840 | long populate_vma_page_range(struct vm_area_struct *vma, | |
841 | unsigned long start, unsigned long end, int *nonblocking) | |
842 | { | |
843 | struct mm_struct *mm = vma->vm_mm; | |
844 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | |
845 | int gup_flags; | |
846 | ||
847 | VM_BUG_ON(start & ~PAGE_MASK); | |
848 | VM_BUG_ON(end & ~PAGE_MASK); | |
849 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | |
850 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | |
851 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | |
852 | ||
853 | gup_flags = FOLL_TOUCH | FOLL_POPULATE; | |
854 | /* | |
855 | * We want to touch writable mappings with a write fault in order | |
856 | * to break COW, except for shared mappings because these don't COW | |
857 | * and we would not want to dirty them for nothing. | |
858 | */ | |
859 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | |
860 | gup_flags |= FOLL_WRITE; | |
861 | ||
862 | /* | |
863 | * We want mlock to succeed for regions that have any permissions | |
864 | * other than PROT_NONE. | |
865 | */ | |
866 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | |
867 | gup_flags |= FOLL_FORCE; | |
868 | ||
869 | /* | |
870 | * We made sure addr is within a VMA, so the following will | |
871 | * not result in a stack expansion that recurses back here. | |
872 | */ | |
873 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | |
874 | NULL, NULL, nonblocking); | |
875 | } | |
876 | ||
877 | /* | |
878 | * __mm_populate - populate and/or mlock pages within a range of address space. | |
879 | * | |
880 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | |
881 | * flags. VMAs must be already marked with the desired vm_flags, and | |
882 | * mmap_sem must not be held. | |
883 | */ | |
884 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |
885 | { | |
886 | struct mm_struct *mm = current->mm; | |
887 | unsigned long end, nstart, nend; | |
888 | struct vm_area_struct *vma = NULL; | |
889 | int locked = 0; | |
890 | long ret = 0; | |
891 | ||
892 | VM_BUG_ON(start & ~PAGE_MASK); | |
893 | VM_BUG_ON(len != PAGE_ALIGN(len)); | |
894 | end = start + len; | |
895 | ||
896 | for (nstart = start; nstart < end; nstart = nend) { | |
897 | /* | |
898 | * We want to fault in pages for [nstart; end) address range. | |
899 | * Find first corresponding VMA. | |
900 | */ | |
901 | if (!locked) { | |
902 | locked = 1; | |
903 | down_read(&mm->mmap_sem); | |
904 | vma = find_vma(mm, nstart); | |
905 | } else if (nstart >= vma->vm_end) | |
906 | vma = vma->vm_next; | |
907 | if (!vma || vma->vm_start >= end) | |
908 | break; | |
909 | /* | |
910 | * Set [nstart; nend) to intersection of desired address | |
911 | * range with the first VMA. Also, skip undesirable VMA types. | |
912 | */ | |
913 | nend = min(end, vma->vm_end); | |
914 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | |
915 | continue; | |
916 | if (nstart < vma->vm_start) | |
917 | nstart = vma->vm_start; | |
918 | /* | |
919 | * Now fault in a range of pages. populate_vma_page_range() | |
920 | * double checks the vma flags, so that it won't mlock pages | |
921 | * if the vma was already munlocked. | |
922 | */ | |
923 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | |
924 | if (ret < 0) { | |
925 | if (ignore_errors) { | |
926 | ret = 0; | |
927 | continue; /* continue at next VMA */ | |
928 | } | |
929 | break; | |
930 | } | |
931 | nend = nstart + ret * PAGE_SIZE; | |
932 | ret = 0; | |
933 | } | |
934 | if (locked) | |
935 | up_read(&mm->mmap_sem); | |
936 | return ret; /* 0 or negative error code */ | |
937 | } | |
938 | ||
4bbd4c77 KS |
939 | /** |
940 | * get_dump_page() - pin user page in memory while writing it to core dump | |
941 | * @addr: user address | |
942 | * | |
943 | * Returns struct page pointer of user page pinned for dump, | |
944 | * to be freed afterwards by page_cache_release() or put_page(). | |
945 | * | |
946 | * Returns NULL on any kind of failure - a hole must then be inserted into | |
947 | * the corefile, to preserve alignment with its headers; and also returns | |
948 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | |
949 | * allowing a hole to be left in the corefile to save diskspace. | |
950 | * | |
951 | * Called without mmap_sem, but after all other threads have been killed. | |
952 | */ | |
953 | #ifdef CONFIG_ELF_CORE | |
954 | struct page *get_dump_page(unsigned long addr) | |
955 | { | |
956 | struct vm_area_struct *vma; | |
957 | struct page *page; | |
958 | ||
959 | if (__get_user_pages(current, current->mm, addr, 1, | |
960 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, | |
961 | NULL) < 1) | |
962 | return NULL; | |
963 | flush_cache_page(vma, addr, page_to_pfn(page)); | |
964 | return page; | |
965 | } | |
966 | #endif /* CONFIG_ELF_CORE */ | |
2667f50e SC |
967 | |
968 | /* | |
969 | * Generic RCU Fast GUP | |
970 | * | |
971 | * get_user_pages_fast attempts to pin user pages by walking the page | |
972 | * tables directly and avoids taking locks. Thus the walker needs to be | |
973 | * protected from page table pages being freed from under it, and should | |
974 | * block any THP splits. | |
975 | * | |
976 | * One way to achieve this is to have the walker disable interrupts, and | |
977 | * rely on IPIs from the TLB flushing code blocking before the page table | |
978 | * pages are freed. This is unsuitable for architectures that do not need | |
979 | * to broadcast an IPI when invalidating TLBs. | |
980 | * | |
981 | * Another way to achieve this is to batch up page table containing pages | |
982 | * belonging to more than one mm_user, then rcu_sched a callback to free those | |
983 | * pages. Disabling interrupts will allow the fast_gup walker to both block | |
984 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs | |
985 | * (which is a relatively rare event). The code below adopts this strategy. | |
986 | * | |
987 | * Before activating this code, please be aware that the following assumptions | |
988 | * are currently made: | |
989 | * | |
990 | * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free | |
991 | * pages containing page tables. | |
992 | * | |
993 | * *) THP splits will broadcast an IPI, this can be achieved by overriding | |
994 | * pmdp_splitting_flush. | |
995 | * | |
996 | * *) ptes can be read atomically by the architecture. | |
997 | * | |
998 | * *) access_ok is sufficient to validate userspace address ranges. | |
999 | * | |
1000 | * The last two assumptions can be relaxed by the addition of helper functions. | |
1001 | * | |
1002 | * This code is based heavily on the PowerPC implementation by Nick Piggin. | |
1003 | */ | |
1004 | #ifdef CONFIG_HAVE_GENERIC_RCU_GUP | |
1005 | ||
1006 | #ifdef __HAVE_ARCH_PTE_SPECIAL | |
1007 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |
1008 | int write, struct page **pages, int *nr) | |
1009 | { | |
1010 | pte_t *ptep, *ptem; | |
1011 | int ret = 0; | |
1012 | ||
1013 | ptem = ptep = pte_offset_map(&pmd, addr); | |
1014 | do { | |
1015 | /* | |
1016 | * In the line below we are assuming that the pte can be read | |
1017 | * atomically. If this is not the case for your architecture, | |
1018 | * please wrap this in a helper function! | |
1019 | * | |
1020 | * for an example see gup_get_pte in arch/x86/mm/gup.c | |
1021 | */ | |
9d8c47e4 | 1022 | pte_t pte = READ_ONCE(*ptep); |
2667f50e SC |
1023 | struct page *page; |
1024 | ||
1025 | /* | |
1026 | * Similar to the PMD case below, NUMA hinting must take slow | |
8a0516ed | 1027 | * path using the pte_protnone check. |
2667f50e SC |
1028 | */ |
1029 | if (!pte_present(pte) || pte_special(pte) || | |
8a0516ed | 1030 | pte_protnone(pte) || (write && !pte_write(pte))) |
2667f50e SC |
1031 | goto pte_unmap; |
1032 | ||
1033 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
1034 | page = pte_page(pte); | |
1035 | ||
1036 | if (!page_cache_get_speculative(page)) | |
1037 | goto pte_unmap; | |
1038 | ||
1039 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | |
1040 | put_page(page); | |
1041 | goto pte_unmap; | |
1042 | } | |
1043 | ||
1044 | pages[*nr] = page; | |
1045 | (*nr)++; | |
1046 | ||
1047 | } while (ptep++, addr += PAGE_SIZE, addr != end); | |
1048 | ||
1049 | ret = 1; | |
1050 | ||
1051 | pte_unmap: | |
1052 | pte_unmap(ptem); | |
1053 | return ret; | |
1054 | } | |
1055 | #else | |
1056 | ||
1057 | /* | |
1058 | * If we can't determine whether or not a pte is special, then fail immediately | |
1059 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not | |
1060 | * to be special. | |
1061 | * | |
1062 | * For a futex to be placed on a THP tail page, get_futex_key requires a | |
1063 | * __get_user_pages_fast implementation that can pin pages. Thus it's still | |
1064 | * useful to have gup_huge_pmd even if we can't operate on ptes. | |
1065 | */ | |
1066 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |
1067 | int write, struct page **pages, int *nr) | |
1068 | { | |
1069 | return 0; | |
1070 | } | |
1071 | #endif /* __HAVE_ARCH_PTE_SPECIAL */ | |
1072 | ||
1073 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, | |
1074 | unsigned long end, int write, struct page **pages, int *nr) | |
1075 | { | |
1076 | struct page *head, *page, *tail; | |
1077 | int refs; | |
1078 | ||
1079 | if (write && !pmd_write(orig)) | |
1080 | return 0; | |
1081 | ||
1082 | refs = 0; | |
1083 | head = pmd_page(orig); | |
1084 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | |
1085 | tail = page; | |
1086 | do { | |
1087 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | |
1088 | pages[*nr] = page; | |
1089 | (*nr)++; | |
1090 | page++; | |
1091 | refs++; | |
1092 | } while (addr += PAGE_SIZE, addr != end); | |
1093 | ||
1094 | if (!page_cache_add_speculative(head, refs)) { | |
1095 | *nr -= refs; | |
1096 | return 0; | |
1097 | } | |
1098 | ||
1099 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { | |
1100 | *nr -= refs; | |
1101 | while (refs--) | |
1102 | put_page(head); | |
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * Any tail pages need their mapcount reference taken before we | |
1108 | * return. (This allows the THP code to bump their ref count when | |
1109 | * they are split into base pages). | |
1110 | */ | |
1111 | while (refs--) { | |
1112 | if (PageTail(tail)) | |
1113 | get_huge_page_tail(tail); | |
1114 | tail++; | |
1115 | } | |
1116 | ||
1117 | return 1; | |
1118 | } | |
1119 | ||
1120 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, | |
1121 | unsigned long end, int write, struct page **pages, int *nr) | |
1122 | { | |
1123 | struct page *head, *page, *tail; | |
1124 | int refs; | |
1125 | ||
1126 | if (write && !pud_write(orig)) | |
1127 | return 0; | |
1128 | ||
1129 | refs = 0; | |
1130 | head = pud_page(orig); | |
1131 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | |
1132 | tail = page; | |
1133 | do { | |
1134 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | |
1135 | pages[*nr] = page; | |
1136 | (*nr)++; | |
1137 | page++; | |
1138 | refs++; | |
1139 | } while (addr += PAGE_SIZE, addr != end); | |
1140 | ||
1141 | if (!page_cache_add_speculative(head, refs)) { | |
1142 | *nr -= refs; | |
1143 | return 0; | |
1144 | } | |
1145 | ||
1146 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { | |
1147 | *nr -= refs; | |
1148 | while (refs--) | |
1149 | put_page(head); | |
1150 | return 0; | |
1151 | } | |
1152 | ||
1153 | while (refs--) { | |
1154 | if (PageTail(tail)) | |
1155 | get_huge_page_tail(tail); | |
1156 | tail++; | |
1157 | } | |
1158 | ||
1159 | return 1; | |
1160 | } | |
1161 | ||
f30c59e9 AK |
1162 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
1163 | unsigned long end, int write, | |
1164 | struct page **pages, int *nr) | |
1165 | { | |
1166 | int refs; | |
1167 | struct page *head, *page, *tail; | |
1168 | ||
1169 | if (write && !pgd_write(orig)) | |
1170 | return 0; | |
1171 | ||
1172 | refs = 0; | |
1173 | head = pgd_page(orig); | |
1174 | page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); | |
1175 | tail = page; | |
1176 | do { | |
1177 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | |
1178 | pages[*nr] = page; | |
1179 | (*nr)++; | |
1180 | page++; | |
1181 | refs++; | |
1182 | } while (addr += PAGE_SIZE, addr != end); | |
1183 | ||
1184 | if (!page_cache_add_speculative(head, refs)) { | |
1185 | *nr -= refs; | |
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { | |
1190 | *nr -= refs; | |
1191 | while (refs--) | |
1192 | put_page(head); | |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | while (refs--) { | |
1197 | if (PageTail(tail)) | |
1198 | get_huge_page_tail(tail); | |
1199 | tail++; | |
1200 | } | |
1201 | ||
1202 | return 1; | |
1203 | } | |
1204 | ||
2667f50e SC |
1205 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
1206 | int write, struct page **pages, int *nr) | |
1207 | { | |
1208 | unsigned long next; | |
1209 | pmd_t *pmdp; | |
1210 | ||
1211 | pmdp = pmd_offset(&pud, addr); | |
1212 | do { | |
38c5ce93 | 1213 | pmd_t pmd = READ_ONCE(*pmdp); |
2667f50e SC |
1214 | |
1215 | next = pmd_addr_end(addr, end); | |
1216 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | |
1217 | return 0; | |
1218 | ||
1219 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { | |
1220 | /* | |
1221 | * NUMA hinting faults need to be handled in the GUP | |
1222 | * slowpath for accounting purposes and so that they | |
1223 | * can be serialised against THP migration. | |
1224 | */ | |
8a0516ed | 1225 | if (pmd_protnone(pmd)) |
2667f50e SC |
1226 | return 0; |
1227 | ||
1228 | if (!gup_huge_pmd(pmd, pmdp, addr, next, write, | |
1229 | pages, nr)) | |
1230 | return 0; | |
1231 | ||
f30c59e9 AK |
1232 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { |
1233 | /* | |
1234 | * architecture have different format for hugetlbfs | |
1235 | * pmd format and THP pmd format | |
1236 | */ | |
1237 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, | |
1238 | PMD_SHIFT, next, write, pages, nr)) | |
1239 | return 0; | |
2667f50e SC |
1240 | } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) |
1241 | return 0; | |
1242 | } while (pmdp++, addr = next, addr != end); | |
1243 | ||
1244 | return 1; | |
1245 | } | |
1246 | ||
f30c59e9 AK |
1247 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, |
1248 | int write, struct page **pages, int *nr) | |
2667f50e SC |
1249 | { |
1250 | unsigned long next; | |
1251 | pud_t *pudp; | |
1252 | ||
f30c59e9 | 1253 | pudp = pud_offset(&pgd, addr); |
2667f50e | 1254 | do { |
e37c6982 | 1255 | pud_t pud = READ_ONCE(*pudp); |
2667f50e SC |
1256 | |
1257 | next = pud_addr_end(addr, end); | |
1258 | if (pud_none(pud)) | |
1259 | return 0; | |
f30c59e9 | 1260 | if (unlikely(pud_huge(pud))) { |
2667f50e | 1261 | if (!gup_huge_pud(pud, pudp, addr, next, write, |
f30c59e9 AK |
1262 | pages, nr)) |
1263 | return 0; | |
1264 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { | |
1265 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, | |
1266 | PUD_SHIFT, next, write, pages, nr)) | |
2667f50e SC |
1267 | return 0; |
1268 | } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) | |
1269 | return 0; | |
1270 | } while (pudp++, addr = next, addr != end); | |
1271 | ||
1272 | return 1; | |
1273 | } | |
1274 | ||
1275 | /* | |
1276 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to | |
1277 | * the regular GUP. It will only return non-negative values. | |
1278 | */ | |
1279 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
1280 | struct page **pages) | |
1281 | { | |
1282 | struct mm_struct *mm = current->mm; | |
1283 | unsigned long addr, len, end; | |
1284 | unsigned long next, flags; | |
1285 | pgd_t *pgdp; | |
1286 | int nr = 0; | |
1287 | ||
1288 | start &= PAGE_MASK; | |
1289 | addr = start; | |
1290 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
1291 | end = start + len; | |
1292 | ||
1293 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | |
1294 | start, len))) | |
1295 | return 0; | |
1296 | ||
1297 | /* | |
1298 | * Disable interrupts. We use the nested form as we can already have | |
1299 | * interrupts disabled by get_futex_key. | |
1300 | * | |
1301 | * With interrupts disabled, we block page table pages from being | |
1302 | * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h | |
1303 | * for more details. | |
1304 | * | |
1305 | * We do not adopt an rcu_read_lock(.) here as we also want to | |
1306 | * block IPIs that come from THPs splitting. | |
1307 | */ | |
1308 | ||
1309 | local_irq_save(flags); | |
1310 | pgdp = pgd_offset(mm, addr); | |
1311 | do { | |
9d8c47e4 | 1312 | pgd_t pgd = READ_ONCE(*pgdp); |
f30c59e9 | 1313 | |
2667f50e | 1314 | next = pgd_addr_end(addr, end); |
f30c59e9 | 1315 | if (pgd_none(pgd)) |
2667f50e | 1316 | break; |
f30c59e9 AK |
1317 | if (unlikely(pgd_huge(pgd))) { |
1318 | if (!gup_huge_pgd(pgd, pgdp, addr, next, write, | |
1319 | pages, &nr)) | |
1320 | break; | |
1321 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { | |
1322 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, | |
1323 | PGDIR_SHIFT, next, write, pages, &nr)) | |
1324 | break; | |
1325 | } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | |
2667f50e SC |
1326 | break; |
1327 | } while (pgdp++, addr = next, addr != end); | |
1328 | local_irq_restore(flags); | |
1329 | ||
1330 | return nr; | |
1331 | } | |
1332 | ||
1333 | /** | |
1334 | * get_user_pages_fast() - pin user pages in memory | |
1335 | * @start: starting user address | |
1336 | * @nr_pages: number of pages from start to pin | |
1337 | * @write: whether pages will be written to | |
1338 | * @pages: array that receives pointers to the pages pinned. | |
1339 | * Should be at least nr_pages long. | |
1340 | * | |
1341 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | |
1342 | * If not successful, it will fall back to taking the lock and | |
1343 | * calling get_user_pages(). | |
1344 | * | |
1345 | * Returns number of pages pinned. This may be fewer than the number | |
1346 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
1347 | * were pinned, returns -errno. | |
1348 | */ | |
1349 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
1350 | struct page **pages) | |
1351 | { | |
1352 | struct mm_struct *mm = current->mm; | |
1353 | int nr, ret; | |
1354 | ||
1355 | start &= PAGE_MASK; | |
1356 | nr = __get_user_pages_fast(start, nr_pages, write, pages); | |
1357 | ret = nr; | |
1358 | ||
1359 | if (nr < nr_pages) { | |
1360 | /* Try to get the remaining pages with get_user_pages */ | |
1361 | start += nr << PAGE_SHIFT; | |
1362 | pages += nr; | |
1363 | ||
a7b78075 AA |
1364 | ret = get_user_pages_unlocked(current, mm, start, |
1365 | nr_pages - nr, write, 0, pages); | |
2667f50e SC |
1366 | |
1367 | /* Have to be a bit careful with return values */ | |
1368 | if (nr > 0) { | |
1369 | if (ret < 0) | |
1370 | ret = nr; | |
1371 | else | |
1372 | ret += nr; | |
1373 | } | |
1374 | } | |
1375 | ||
1376 | return ret; | |
1377 | } | |
1378 | ||
1379 | #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ |