]> Git Repo - linux.git/blob - fs/userfaultfd.c
mm: vmscan: avoid split during shrink_folio_list()
[linux.git] / fs / userfaultfd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  fs/userfaultfd.c
4  *
5  *  Copyright (C) 2007  Davide Libenzi <[email protected]>
6  *  Copyright (C) 2008-2009 Red Hat, Inc.
7  *  Copyright (C) 2015  Red Hat, Inc.
8  *
9  *  Some part derived from fs/eventfd.c (anon inode setup) and
10  *  mm/ksm.c (mm hashing).
11  */
12
13 #include <linux/list.h>
14 #include <linux/hashtable.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/mm.h>
18 #include <linux/mm_inline.h>
19 #include <linux/mmu_notifier.h>
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/file.h>
24 #include <linux/bug.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/syscalls.h>
27 #include <linux/userfaultfd_k.h>
28 #include <linux/mempolicy.h>
29 #include <linux/ioctl.h>
30 #include <linux/security.h>
31 #include <linux/hugetlb.h>
32 #include <linux/swapops.h>
33 #include <linux/miscdevice.h>
34
35 static int sysctl_unprivileged_userfaultfd __read_mostly;
36
37 #ifdef CONFIG_SYSCTL
38 static struct ctl_table vm_userfaultfd_table[] = {
39         {
40                 .procname       = "unprivileged_userfaultfd",
41                 .data           = &sysctl_unprivileged_userfaultfd,
42                 .maxlen         = sizeof(sysctl_unprivileged_userfaultfd),
43                 .mode           = 0644,
44                 .proc_handler   = proc_dointvec_minmax,
45                 .extra1         = SYSCTL_ZERO,
46                 .extra2         = SYSCTL_ONE,
47         },
48 };
49 #endif
50
51 static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init;
52
53 struct userfaultfd_fork_ctx {
54         struct userfaultfd_ctx *orig;
55         struct userfaultfd_ctx *new;
56         struct list_head list;
57 };
58
59 struct userfaultfd_unmap_ctx {
60         struct userfaultfd_ctx *ctx;
61         unsigned long start;
62         unsigned long end;
63         struct list_head list;
64 };
65
66 struct userfaultfd_wait_queue {
67         struct uffd_msg msg;
68         wait_queue_entry_t wq;
69         struct userfaultfd_ctx *ctx;
70         bool waken;
71 };
72
73 struct userfaultfd_wake_range {
74         unsigned long start;
75         unsigned long len;
76 };
77
78 /* internal indication that UFFD_API ioctl was successfully executed */
79 #define UFFD_FEATURE_INITIALIZED                (1u << 31)
80
81 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
82 {
83         return ctx->features & UFFD_FEATURE_INITIALIZED;
84 }
85
86 static bool userfaultfd_wp_async_ctx(struct userfaultfd_ctx *ctx)
87 {
88         return ctx && (ctx->features & UFFD_FEATURE_WP_ASYNC);
89 }
90
91 /*
92  * Whether WP_UNPOPULATED is enabled on the uffd context.  It is only
93  * meaningful when userfaultfd_wp()==true on the vma and when it's
94  * anonymous.
95  */
96 bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
97 {
98         struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
99
100         if (!ctx)
101                 return false;
102
103         return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
104 }
105
106 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
107                                      vm_flags_t flags)
108 {
109         const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
110
111         vm_flags_reset(vma, flags);
112         /*
113          * For shared mappings, we want to enable writenotify while
114          * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
115          * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
116          */
117         if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
118                 vma_set_page_prot(vma);
119 }
120
121 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
122                                      int wake_flags, void *key)
123 {
124         struct userfaultfd_wake_range *range = key;
125         int ret;
126         struct userfaultfd_wait_queue *uwq;
127         unsigned long start, len;
128
129         uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
130         ret = 0;
131         /* len == 0 means wake all */
132         start = range->start;
133         len = range->len;
134         if (len && (start > uwq->msg.arg.pagefault.address ||
135                     start + len <= uwq->msg.arg.pagefault.address))
136                 goto out;
137         WRITE_ONCE(uwq->waken, true);
138         /*
139          * The Program-Order guarantees provided by the scheduler
140          * ensure uwq->waken is visible before the task is woken.
141          */
142         ret = wake_up_state(wq->private, mode);
143         if (ret) {
144                 /*
145                  * Wake only once, autoremove behavior.
146                  *
147                  * After the effect of list_del_init is visible to the other
148                  * CPUs, the waitqueue may disappear from under us, see the
149                  * !list_empty_careful() in handle_userfault().
150                  *
151                  * try_to_wake_up() has an implicit smp_mb(), and the
152                  * wq->private is read before calling the extern function
153                  * "wake_up_state" (which in turns calls try_to_wake_up).
154                  */
155                 list_del_init(&wq->entry);
156         }
157 out:
158         return ret;
159 }
160
161 /**
162  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
163  * context.
164  * @ctx: [in] Pointer to the userfaultfd context.
165  */
166 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
167 {
168         refcount_inc(&ctx->refcount);
169 }
170
171 /**
172  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
173  * context.
174  * @ctx: [in] Pointer to userfaultfd context.
175  *
176  * The userfaultfd context reference must have been previously acquired either
177  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
178  */
179 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
180 {
181         if (refcount_dec_and_test(&ctx->refcount)) {
182                 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
183                 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
184                 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
185                 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
186                 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
187                 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
188                 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
189                 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
190                 mmdrop(ctx->mm);
191                 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
192         }
193 }
194
195 static inline void msg_init(struct uffd_msg *msg)
196 {
197         BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
198         /*
199          * Must use memset to zero out the paddings or kernel data is
200          * leaked to userland.
201          */
202         memset(msg, 0, sizeof(struct uffd_msg));
203 }
204
205 static inline struct uffd_msg userfault_msg(unsigned long address,
206                                             unsigned long real_address,
207                                             unsigned int flags,
208                                             unsigned long reason,
209                                             unsigned int features)
210 {
211         struct uffd_msg msg;
212
213         msg_init(&msg);
214         msg.event = UFFD_EVENT_PAGEFAULT;
215
216         msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
217                                     real_address : address;
218
219         /*
220          * These flags indicate why the userfault occurred:
221          * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
222          * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
223          * - Neither of these flags being set indicates a MISSING fault.
224          *
225          * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
226          * fault. Otherwise, it was a read fault.
227          */
228         if (flags & FAULT_FLAG_WRITE)
229                 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
230         if (reason & VM_UFFD_WP)
231                 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
232         if (reason & VM_UFFD_MINOR)
233                 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
234         if (features & UFFD_FEATURE_THREAD_ID)
235                 msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
236         return msg;
237 }
238
239 #ifdef CONFIG_HUGETLB_PAGE
240 /*
241  * Same functionality as userfaultfd_must_wait below with modifications for
242  * hugepmd ranges.
243  */
244 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
245                                               struct vm_fault *vmf,
246                                               unsigned long reason)
247 {
248         struct vm_area_struct *vma = vmf->vma;
249         pte_t *ptep, pte;
250         bool ret = true;
251
252         assert_fault_locked(vmf);
253
254         ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma));
255         if (!ptep)
256                 goto out;
257
258         ret = false;
259         pte = huge_ptep_get(ptep);
260
261         /*
262          * Lockless access: we're in a wait_event so it's ok if it
263          * changes under us.  PTE markers should be handled the same as none
264          * ptes here.
265          */
266         if (huge_pte_none_mostly(pte))
267                 ret = true;
268         if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
269                 ret = true;
270 out:
271         return ret;
272 }
273 #else
274 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
275                                               struct vm_fault *vmf,
276                                               unsigned long reason)
277 {
278         return false;   /* should never get here */
279 }
280 #endif /* CONFIG_HUGETLB_PAGE */
281
282 /*
283  * Verify the pagetables are still not ok after having reigstered into
284  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
285  * userfault that has already been resolved, if userfaultfd_read and
286  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
287  * threads.
288  */
289 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
290                                          struct vm_fault *vmf,
291                                          unsigned long reason)
292 {
293         struct mm_struct *mm = ctx->mm;
294         unsigned long address = vmf->address;
295         pgd_t *pgd;
296         p4d_t *p4d;
297         pud_t *pud;
298         pmd_t *pmd, _pmd;
299         pte_t *pte;
300         pte_t ptent;
301         bool ret = true;
302
303         assert_fault_locked(vmf);
304
305         pgd = pgd_offset(mm, address);
306         if (!pgd_present(*pgd))
307                 goto out;
308         p4d = p4d_offset(pgd, address);
309         if (!p4d_present(*p4d))
310                 goto out;
311         pud = pud_offset(p4d, address);
312         if (!pud_present(*pud))
313                 goto out;
314         pmd = pmd_offset(pud, address);
315 again:
316         _pmd = pmdp_get_lockless(pmd);
317         if (pmd_none(_pmd))
318                 goto out;
319
320         ret = false;
321         if (!pmd_present(_pmd) || pmd_devmap(_pmd))
322                 goto out;
323
324         if (pmd_trans_huge(_pmd)) {
325                 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
326                         ret = true;
327                 goto out;
328         }
329
330         pte = pte_offset_map(pmd, address);
331         if (!pte) {
332                 ret = true;
333                 goto again;
334         }
335         /*
336          * Lockless access: we're in a wait_event so it's ok if it
337          * changes under us.  PTE markers should be handled the same as none
338          * ptes here.
339          */
340         ptent = ptep_get(pte);
341         if (pte_none_mostly(ptent))
342                 ret = true;
343         if (!pte_write(ptent) && (reason & VM_UFFD_WP))
344                 ret = true;
345         pte_unmap(pte);
346
347 out:
348         return ret;
349 }
350
351 static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
352 {
353         if (flags & FAULT_FLAG_INTERRUPTIBLE)
354                 return TASK_INTERRUPTIBLE;
355
356         if (flags & FAULT_FLAG_KILLABLE)
357                 return TASK_KILLABLE;
358
359         return TASK_UNINTERRUPTIBLE;
360 }
361
362 /*
363  * The locking rules involved in returning VM_FAULT_RETRY depending on
364  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
365  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
366  * recommendation in __lock_page_or_retry is not an understatement.
367  *
368  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
369  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
370  * not set.
371  *
372  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
373  * set, VM_FAULT_RETRY can still be returned if and only if there are
374  * fatal_signal_pending()s, and the mmap_lock must be released before
375  * returning it.
376  */
377 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
378 {
379         struct vm_area_struct *vma = vmf->vma;
380         struct mm_struct *mm = vma->vm_mm;
381         struct userfaultfd_ctx *ctx;
382         struct userfaultfd_wait_queue uwq;
383         vm_fault_t ret = VM_FAULT_SIGBUS;
384         bool must_wait;
385         unsigned int blocking_state;
386
387         /*
388          * We don't do userfault handling for the final child pid update.
389          *
390          * We also don't do userfault handling during
391          * coredumping. hugetlbfs has the special
392          * hugetlb_follow_page_mask() to skip missing pages in the
393          * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
394          * the no_page_table() helper in follow_page_mask(), but the
395          * shmem_vm_ops->fault method is invoked even during
396          * coredumping and it ends up here.
397          */
398         if (current->flags & (PF_EXITING|PF_DUMPCORE))
399                 goto out;
400
401         assert_fault_locked(vmf);
402
403         ctx = vma->vm_userfaultfd_ctx.ctx;
404         if (!ctx)
405                 goto out;
406
407         BUG_ON(ctx->mm != mm);
408
409         /* Any unrecognized flag is a bug. */
410         VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
411         /* 0 or > 1 flags set is a bug; we expect exactly 1. */
412         VM_BUG_ON(!reason || (reason & (reason - 1)));
413
414         if (ctx->features & UFFD_FEATURE_SIGBUS)
415                 goto out;
416         if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
417                 goto out;
418
419         /*
420          * If it's already released don't get it. This avoids to loop
421          * in __get_user_pages if userfaultfd_release waits on the
422          * caller of handle_userfault to release the mmap_lock.
423          */
424         if (unlikely(READ_ONCE(ctx->released))) {
425                 /*
426                  * Don't return VM_FAULT_SIGBUS in this case, so a non
427                  * cooperative manager can close the uffd after the
428                  * last UFFDIO_COPY, without risking to trigger an
429                  * involuntary SIGBUS if the process was starting the
430                  * userfaultfd while the userfaultfd was still armed
431                  * (but after the last UFFDIO_COPY). If the uffd
432                  * wasn't already closed when the userfault reached
433                  * this point, that would normally be solved by
434                  * userfaultfd_must_wait returning 'false'.
435                  *
436                  * If we were to return VM_FAULT_SIGBUS here, the non
437                  * cooperative manager would be instead forced to
438                  * always call UFFDIO_UNREGISTER before it can safely
439                  * close the uffd.
440                  */
441                 ret = VM_FAULT_NOPAGE;
442                 goto out;
443         }
444
445         /*
446          * Check that we can return VM_FAULT_RETRY.
447          *
448          * NOTE: it should become possible to return VM_FAULT_RETRY
449          * even if FAULT_FLAG_TRIED is set without leading to gup()
450          * -EBUSY failures, if the userfaultfd is to be extended for
451          * VM_UFFD_WP tracking and we intend to arm the userfault
452          * without first stopping userland access to the memory. For
453          * VM_UFFD_MISSING userfaults this is enough for now.
454          */
455         if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
456                 /*
457                  * Validate the invariant that nowait must allow retry
458                  * to be sure not to return SIGBUS erroneously on
459                  * nowait invocations.
460                  */
461                 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
462 #ifdef CONFIG_DEBUG_VM
463                 if (printk_ratelimit()) {
464                         printk(KERN_WARNING
465                                "FAULT_FLAG_ALLOW_RETRY missing %x\n",
466                                vmf->flags);
467                         dump_stack();
468                 }
469 #endif
470                 goto out;
471         }
472
473         /*
474          * Handle nowait, not much to do other than tell it to retry
475          * and wait.
476          */
477         ret = VM_FAULT_RETRY;
478         if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
479                 goto out;
480
481         /* take the reference before dropping the mmap_lock */
482         userfaultfd_ctx_get(ctx);
483
484         init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
485         uwq.wq.private = current;
486         uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
487                                 reason, ctx->features);
488         uwq.ctx = ctx;
489         uwq.waken = false;
490
491         blocking_state = userfaultfd_get_blocking_state(vmf->flags);
492
493         /*
494          * Take the vma lock now, in order to safely call
495          * userfaultfd_huge_must_wait() later. Since acquiring the
496          * (sleepable) vma lock can modify the current task state, that
497          * must be before explicitly calling set_current_state().
498          */
499         if (is_vm_hugetlb_page(vma))
500                 hugetlb_vma_lock_read(vma);
501
502         spin_lock_irq(&ctx->fault_pending_wqh.lock);
503         /*
504          * After the __add_wait_queue the uwq is visible to userland
505          * through poll/read().
506          */
507         __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
508         /*
509          * The smp_mb() after __set_current_state prevents the reads
510          * following the spin_unlock to happen before the list_add in
511          * __add_wait_queue.
512          */
513         set_current_state(blocking_state);
514         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
515
516         if (!is_vm_hugetlb_page(vma))
517                 must_wait = userfaultfd_must_wait(ctx, vmf, reason);
518         else
519                 must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason);
520         if (is_vm_hugetlb_page(vma))
521                 hugetlb_vma_unlock_read(vma);
522         release_fault_lock(vmf);
523
524         if (likely(must_wait && !READ_ONCE(ctx->released))) {
525                 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
526                 schedule();
527         }
528
529         __set_current_state(TASK_RUNNING);
530
531         /*
532          * Here we race with the list_del; list_add in
533          * userfaultfd_ctx_read(), however because we don't ever run
534          * list_del_init() to refile across the two lists, the prev
535          * and next pointers will never point to self. list_add also
536          * would never let any of the two pointers to point to
537          * self. So list_empty_careful won't risk to see both pointers
538          * pointing to self at any time during the list refile. The
539          * only case where list_del_init() is called is the full
540          * removal in the wake function and there we don't re-list_add
541          * and it's fine not to block on the spinlock. The uwq on this
542          * kernel stack can be released after the list_del_init.
543          */
544         if (!list_empty_careful(&uwq.wq.entry)) {
545                 spin_lock_irq(&ctx->fault_pending_wqh.lock);
546                 /*
547                  * No need of list_del_init(), the uwq on the stack
548                  * will be freed shortly anyway.
549                  */
550                 list_del(&uwq.wq.entry);
551                 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
552         }
553
554         /*
555          * ctx may go away after this if the userfault pseudo fd is
556          * already released.
557          */
558         userfaultfd_ctx_put(ctx);
559
560 out:
561         return ret;
562 }
563
564 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
565                                               struct userfaultfd_wait_queue *ewq)
566 {
567         struct userfaultfd_ctx *release_new_ctx;
568
569         if (WARN_ON_ONCE(current->flags & PF_EXITING))
570                 goto out;
571
572         ewq->ctx = ctx;
573         init_waitqueue_entry(&ewq->wq, current);
574         release_new_ctx = NULL;
575
576         spin_lock_irq(&ctx->event_wqh.lock);
577         /*
578          * After the __add_wait_queue the uwq is visible to userland
579          * through poll/read().
580          */
581         __add_wait_queue(&ctx->event_wqh, &ewq->wq);
582         for (;;) {
583                 set_current_state(TASK_KILLABLE);
584                 if (ewq->msg.event == 0)
585                         break;
586                 if (READ_ONCE(ctx->released) ||
587                     fatal_signal_pending(current)) {
588                         /*
589                          * &ewq->wq may be queued in fork_event, but
590                          * __remove_wait_queue ignores the head
591                          * parameter. It would be a problem if it
592                          * didn't.
593                          */
594                         __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
595                         if (ewq->msg.event == UFFD_EVENT_FORK) {
596                                 struct userfaultfd_ctx *new;
597
598                                 new = (struct userfaultfd_ctx *)
599                                         (unsigned long)
600                                         ewq->msg.arg.reserved.reserved1;
601                                 release_new_ctx = new;
602                         }
603                         break;
604                 }
605
606                 spin_unlock_irq(&ctx->event_wqh.lock);
607
608                 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
609                 schedule();
610
611                 spin_lock_irq(&ctx->event_wqh.lock);
612         }
613         __set_current_state(TASK_RUNNING);
614         spin_unlock_irq(&ctx->event_wqh.lock);
615
616         if (release_new_ctx) {
617                 struct vm_area_struct *vma;
618                 struct mm_struct *mm = release_new_ctx->mm;
619                 VMA_ITERATOR(vmi, mm, 0);
620
621                 /* the various vma->vm_userfaultfd_ctx still points to it */
622                 mmap_write_lock(mm);
623                 for_each_vma(vmi, vma) {
624                         if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
625                                 vma_start_write(vma);
626                                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
627                                 userfaultfd_set_vm_flags(vma,
628                                                          vma->vm_flags & ~__VM_UFFD_FLAGS);
629                         }
630                 }
631                 mmap_write_unlock(mm);
632
633                 userfaultfd_ctx_put(release_new_ctx);
634         }
635
636         /*
637          * ctx may go away after this if the userfault pseudo fd is
638          * already released.
639          */
640 out:
641         atomic_dec(&ctx->mmap_changing);
642         VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
643         userfaultfd_ctx_put(ctx);
644 }
645
646 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
647                                        struct userfaultfd_wait_queue *ewq)
648 {
649         ewq->msg.event = 0;
650         wake_up_locked(&ctx->event_wqh);
651         __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
652 }
653
654 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
655 {
656         struct userfaultfd_ctx *ctx = NULL, *octx;
657         struct userfaultfd_fork_ctx *fctx;
658
659         octx = vma->vm_userfaultfd_ctx.ctx;
660         if (!octx)
661                 return 0;
662
663         if (!(octx->features & UFFD_FEATURE_EVENT_FORK)) {
664                 vma_start_write(vma);
665                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
666                 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
667                 return 0;
668         }
669
670         list_for_each_entry(fctx, fcs, list)
671                 if (fctx->orig == octx) {
672                         ctx = fctx->new;
673                         break;
674                 }
675
676         if (!ctx) {
677                 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
678                 if (!fctx)
679                         return -ENOMEM;
680
681                 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
682                 if (!ctx) {
683                         kfree(fctx);
684                         return -ENOMEM;
685                 }
686
687                 refcount_set(&ctx->refcount, 1);
688                 ctx->flags = octx->flags;
689                 ctx->features = octx->features;
690                 ctx->released = false;
691                 init_rwsem(&ctx->map_changing_lock);
692                 atomic_set(&ctx->mmap_changing, 0);
693                 ctx->mm = vma->vm_mm;
694                 mmgrab(ctx->mm);
695
696                 userfaultfd_ctx_get(octx);
697                 down_write(&octx->map_changing_lock);
698                 atomic_inc(&octx->mmap_changing);
699                 up_write(&octx->map_changing_lock);
700                 fctx->orig = octx;
701                 fctx->new = ctx;
702                 list_add_tail(&fctx->list, fcs);
703         }
704
705         vma->vm_userfaultfd_ctx.ctx = ctx;
706         return 0;
707 }
708
709 static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
710 {
711         struct userfaultfd_ctx *ctx = fctx->orig;
712         struct userfaultfd_wait_queue ewq;
713
714         msg_init(&ewq.msg);
715
716         ewq.msg.event = UFFD_EVENT_FORK;
717         ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
718
719         userfaultfd_event_wait_completion(ctx, &ewq);
720 }
721
722 void dup_userfaultfd_complete(struct list_head *fcs)
723 {
724         struct userfaultfd_fork_ctx *fctx, *n;
725
726         list_for_each_entry_safe(fctx, n, fcs, list) {
727                 dup_fctx(fctx);
728                 list_del(&fctx->list);
729                 kfree(fctx);
730         }
731 }
732
733 void mremap_userfaultfd_prep(struct vm_area_struct *vma,
734                              struct vm_userfaultfd_ctx *vm_ctx)
735 {
736         struct userfaultfd_ctx *ctx;
737
738         ctx = vma->vm_userfaultfd_ctx.ctx;
739
740         if (!ctx)
741                 return;
742
743         if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
744                 vm_ctx->ctx = ctx;
745                 userfaultfd_ctx_get(ctx);
746                 down_write(&ctx->map_changing_lock);
747                 atomic_inc(&ctx->mmap_changing);
748                 up_write(&ctx->map_changing_lock);
749         } else {
750                 /* Drop uffd context if remap feature not enabled */
751                 vma_start_write(vma);
752                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
753                 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
754         }
755 }
756
757 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
758                                  unsigned long from, unsigned long to,
759                                  unsigned long len)
760 {
761         struct userfaultfd_ctx *ctx = vm_ctx->ctx;
762         struct userfaultfd_wait_queue ewq;
763
764         if (!ctx)
765                 return;
766
767         if (to & ~PAGE_MASK) {
768                 userfaultfd_ctx_put(ctx);
769                 return;
770         }
771
772         msg_init(&ewq.msg);
773
774         ewq.msg.event = UFFD_EVENT_REMAP;
775         ewq.msg.arg.remap.from = from;
776         ewq.msg.arg.remap.to = to;
777         ewq.msg.arg.remap.len = len;
778
779         userfaultfd_event_wait_completion(ctx, &ewq);
780 }
781
782 bool userfaultfd_remove(struct vm_area_struct *vma,
783                         unsigned long start, unsigned long end)
784 {
785         struct mm_struct *mm = vma->vm_mm;
786         struct userfaultfd_ctx *ctx;
787         struct userfaultfd_wait_queue ewq;
788
789         ctx = vma->vm_userfaultfd_ctx.ctx;
790         if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
791                 return true;
792
793         userfaultfd_ctx_get(ctx);
794         down_write(&ctx->map_changing_lock);
795         atomic_inc(&ctx->mmap_changing);
796         up_write(&ctx->map_changing_lock);
797         mmap_read_unlock(mm);
798
799         msg_init(&ewq.msg);
800
801         ewq.msg.event = UFFD_EVENT_REMOVE;
802         ewq.msg.arg.remove.start = start;
803         ewq.msg.arg.remove.end = end;
804
805         userfaultfd_event_wait_completion(ctx, &ewq);
806
807         return false;
808 }
809
810 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
811                           unsigned long start, unsigned long end)
812 {
813         struct userfaultfd_unmap_ctx *unmap_ctx;
814
815         list_for_each_entry(unmap_ctx, unmaps, list)
816                 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
817                     unmap_ctx->end == end)
818                         return true;
819
820         return false;
821 }
822
823 int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start,
824                            unsigned long end, struct list_head *unmaps)
825 {
826         struct userfaultfd_unmap_ctx *unmap_ctx;
827         struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
828
829         if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
830             has_unmap_ctx(ctx, unmaps, start, end))
831                 return 0;
832
833         unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
834         if (!unmap_ctx)
835                 return -ENOMEM;
836
837         userfaultfd_ctx_get(ctx);
838         down_write(&ctx->map_changing_lock);
839         atomic_inc(&ctx->mmap_changing);
840         up_write(&ctx->map_changing_lock);
841         unmap_ctx->ctx = ctx;
842         unmap_ctx->start = start;
843         unmap_ctx->end = end;
844         list_add_tail(&unmap_ctx->list, unmaps);
845
846         return 0;
847 }
848
849 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
850 {
851         struct userfaultfd_unmap_ctx *ctx, *n;
852         struct userfaultfd_wait_queue ewq;
853
854         list_for_each_entry_safe(ctx, n, uf, list) {
855                 msg_init(&ewq.msg);
856
857                 ewq.msg.event = UFFD_EVENT_UNMAP;
858                 ewq.msg.arg.remove.start = ctx->start;
859                 ewq.msg.arg.remove.end = ctx->end;
860
861                 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
862
863                 list_del(&ctx->list);
864                 kfree(ctx);
865         }
866 }
867
868 static int userfaultfd_release(struct inode *inode, struct file *file)
869 {
870         struct userfaultfd_ctx *ctx = file->private_data;
871         struct mm_struct *mm = ctx->mm;
872         struct vm_area_struct *vma, *prev;
873         /* len == 0 means wake all */
874         struct userfaultfd_wake_range range = { .len = 0, };
875         unsigned long new_flags;
876         VMA_ITERATOR(vmi, mm, 0);
877
878         WRITE_ONCE(ctx->released, true);
879
880         if (!mmget_not_zero(mm))
881                 goto wakeup;
882
883         /*
884          * Flush page faults out of all CPUs. NOTE: all page faults
885          * must be retried without returning VM_FAULT_SIGBUS if
886          * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
887          * changes while handle_userfault released the mmap_lock. So
888          * it's critical that released is set to true (above), before
889          * taking the mmap_lock for writing.
890          */
891         mmap_write_lock(mm);
892         prev = NULL;
893         for_each_vma(vmi, vma) {
894                 cond_resched();
895                 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
896                        !!(vma->vm_flags & __VM_UFFD_FLAGS));
897                 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
898                         prev = vma;
899                         continue;
900                 }
901                 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
902                 vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
903                                             vma->vm_end, new_flags,
904                                             NULL_VM_UFFD_CTX);
905
906                 vma_start_write(vma);
907                 userfaultfd_set_vm_flags(vma, new_flags);
908                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
909
910                 prev = vma;
911         }
912         mmap_write_unlock(mm);
913         mmput(mm);
914 wakeup:
915         /*
916          * After no new page faults can wait on this fault_*wqh, flush
917          * the last page faults that may have been already waiting on
918          * the fault_*wqh.
919          */
920         spin_lock_irq(&ctx->fault_pending_wqh.lock);
921         __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
922         __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
923         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
924
925         /* Flush pending events that may still wait on event_wqh */
926         wake_up_all(&ctx->event_wqh);
927
928         wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
929         userfaultfd_ctx_put(ctx);
930         return 0;
931 }
932
933 /* fault_pending_wqh.lock must be hold by the caller */
934 static inline struct userfaultfd_wait_queue *find_userfault_in(
935                 wait_queue_head_t *wqh)
936 {
937         wait_queue_entry_t *wq;
938         struct userfaultfd_wait_queue *uwq;
939
940         lockdep_assert_held(&wqh->lock);
941
942         uwq = NULL;
943         if (!waitqueue_active(wqh))
944                 goto out;
945         /* walk in reverse to provide FIFO behavior to read userfaults */
946         wq = list_last_entry(&wqh->head, typeof(*wq), entry);
947         uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
948 out:
949         return uwq;
950 }
951
952 static inline struct userfaultfd_wait_queue *find_userfault(
953                 struct userfaultfd_ctx *ctx)
954 {
955         return find_userfault_in(&ctx->fault_pending_wqh);
956 }
957
958 static inline struct userfaultfd_wait_queue *find_userfault_evt(
959                 struct userfaultfd_ctx *ctx)
960 {
961         return find_userfault_in(&ctx->event_wqh);
962 }
963
964 static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
965 {
966         struct userfaultfd_ctx *ctx = file->private_data;
967         __poll_t ret;
968
969         poll_wait(file, &ctx->fd_wqh, wait);
970
971         if (!userfaultfd_is_initialized(ctx))
972                 return EPOLLERR;
973
974         /*
975          * poll() never guarantees that read won't block.
976          * userfaults can be waken before they're read().
977          */
978         if (unlikely(!(file->f_flags & O_NONBLOCK)))
979                 return EPOLLERR;
980         /*
981          * lockless access to see if there are pending faults
982          * __pollwait last action is the add_wait_queue but
983          * the spin_unlock would allow the waitqueue_active to
984          * pass above the actual list_add inside
985          * add_wait_queue critical section. So use a full
986          * memory barrier to serialize the list_add write of
987          * add_wait_queue() with the waitqueue_active read
988          * below.
989          */
990         ret = 0;
991         smp_mb();
992         if (waitqueue_active(&ctx->fault_pending_wqh))
993                 ret = EPOLLIN;
994         else if (waitqueue_active(&ctx->event_wqh))
995                 ret = EPOLLIN;
996
997         return ret;
998 }
999
1000 static const struct file_operations userfaultfd_fops;
1001
1002 static int resolve_userfault_fork(struct userfaultfd_ctx *new,
1003                                   struct inode *inode,
1004                                   struct uffd_msg *msg)
1005 {
1006         int fd;
1007
1008         fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, new,
1009                         O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
1010         if (fd < 0)
1011                 return fd;
1012
1013         msg->arg.reserved.reserved1 = 0;
1014         msg->arg.fork.ufd = fd;
1015         return 0;
1016 }
1017
1018 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1019                                     struct uffd_msg *msg, struct inode *inode)
1020 {
1021         ssize_t ret;
1022         DECLARE_WAITQUEUE(wait, current);
1023         struct userfaultfd_wait_queue *uwq;
1024         /*
1025          * Handling fork event requires sleeping operations, so
1026          * we drop the event_wqh lock, then do these ops, then
1027          * lock it back and wake up the waiter. While the lock is
1028          * dropped the ewq may go away so we keep track of it
1029          * carefully.
1030          */
1031         LIST_HEAD(fork_event);
1032         struct userfaultfd_ctx *fork_nctx = NULL;
1033
1034         /* always take the fd_wqh lock before the fault_pending_wqh lock */
1035         spin_lock_irq(&ctx->fd_wqh.lock);
1036         __add_wait_queue(&ctx->fd_wqh, &wait);
1037         for (;;) {
1038                 set_current_state(TASK_INTERRUPTIBLE);
1039                 spin_lock(&ctx->fault_pending_wqh.lock);
1040                 uwq = find_userfault(ctx);
1041                 if (uwq) {
1042                         /*
1043                          * Use a seqcount to repeat the lockless check
1044                          * in wake_userfault() to avoid missing
1045                          * wakeups because during the refile both
1046                          * waitqueue could become empty if this is the
1047                          * only userfault.
1048                          */
1049                         write_seqcount_begin(&ctx->refile_seq);
1050
1051                         /*
1052                          * The fault_pending_wqh.lock prevents the uwq
1053                          * to disappear from under us.
1054                          *
1055                          * Refile this userfault from
1056                          * fault_pending_wqh to fault_wqh, it's not
1057                          * pending anymore after we read it.
1058                          *
1059                          * Use list_del() by hand (as
1060                          * userfaultfd_wake_function also uses
1061                          * list_del_init() by hand) to be sure nobody
1062                          * changes __remove_wait_queue() to use
1063                          * list_del_init() in turn breaking the
1064                          * !list_empty_careful() check in
1065                          * handle_userfault(). The uwq->wq.head list
1066                          * must never be empty at any time during the
1067                          * refile, or the waitqueue could disappear
1068                          * from under us. The "wait_queue_head_t"
1069                          * parameter of __remove_wait_queue() is unused
1070                          * anyway.
1071                          */
1072                         list_del(&uwq->wq.entry);
1073                         add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1074
1075                         write_seqcount_end(&ctx->refile_seq);
1076
1077                         /* careful to always initialize msg if ret == 0 */
1078                         *msg = uwq->msg;
1079                         spin_unlock(&ctx->fault_pending_wqh.lock);
1080                         ret = 0;
1081                         break;
1082                 }
1083                 spin_unlock(&ctx->fault_pending_wqh.lock);
1084
1085                 spin_lock(&ctx->event_wqh.lock);
1086                 uwq = find_userfault_evt(ctx);
1087                 if (uwq) {
1088                         *msg = uwq->msg;
1089
1090                         if (uwq->msg.event == UFFD_EVENT_FORK) {
1091                                 fork_nctx = (struct userfaultfd_ctx *)
1092                                         (unsigned long)
1093                                         uwq->msg.arg.reserved.reserved1;
1094                                 list_move(&uwq->wq.entry, &fork_event);
1095                                 /*
1096                                  * fork_nctx can be freed as soon as
1097                                  * we drop the lock, unless we take a
1098                                  * reference on it.
1099                                  */
1100                                 userfaultfd_ctx_get(fork_nctx);
1101                                 spin_unlock(&ctx->event_wqh.lock);
1102                                 ret = 0;
1103                                 break;
1104                         }
1105
1106                         userfaultfd_event_complete(ctx, uwq);
1107                         spin_unlock(&ctx->event_wqh.lock);
1108                         ret = 0;
1109                         break;
1110                 }
1111                 spin_unlock(&ctx->event_wqh.lock);
1112
1113                 if (signal_pending(current)) {
1114                         ret = -ERESTARTSYS;
1115                         break;
1116                 }
1117                 if (no_wait) {
1118                         ret = -EAGAIN;
1119                         break;
1120                 }
1121                 spin_unlock_irq(&ctx->fd_wqh.lock);
1122                 schedule();
1123                 spin_lock_irq(&ctx->fd_wqh.lock);
1124         }
1125         __remove_wait_queue(&ctx->fd_wqh, &wait);
1126         __set_current_state(TASK_RUNNING);
1127         spin_unlock_irq(&ctx->fd_wqh.lock);
1128
1129         if (!ret && msg->event == UFFD_EVENT_FORK) {
1130                 ret = resolve_userfault_fork(fork_nctx, inode, msg);
1131                 spin_lock_irq(&ctx->event_wqh.lock);
1132                 if (!list_empty(&fork_event)) {
1133                         /*
1134                          * The fork thread didn't abort, so we can
1135                          * drop the temporary refcount.
1136                          */
1137                         userfaultfd_ctx_put(fork_nctx);
1138
1139                         uwq = list_first_entry(&fork_event,
1140                                                typeof(*uwq),
1141                                                wq.entry);
1142                         /*
1143                          * If fork_event list wasn't empty and in turn
1144                          * the event wasn't already released by fork
1145                          * (the event is allocated on fork kernel
1146                          * stack), put the event back to its place in
1147                          * the event_wq. fork_event head will be freed
1148                          * as soon as we return so the event cannot
1149                          * stay queued there no matter the current
1150                          * "ret" value.
1151                          */
1152                         list_del(&uwq->wq.entry);
1153                         __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1154
1155                         /*
1156                          * Leave the event in the waitqueue and report
1157                          * error to userland if we failed to resolve
1158                          * the userfault fork.
1159                          */
1160                         if (likely(!ret))
1161                                 userfaultfd_event_complete(ctx, uwq);
1162                 } else {
1163                         /*
1164                          * Here the fork thread aborted and the
1165                          * refcount from the fork thread on fork_nctx
1166                          * has already been released. We still hold
1167                          * the reference we took before releasing the
1168                          * lock above. If resolve_userfault_fork
1169                          * failed we've to drop it because the
1170                          * fork_nctx has to be freed in such case. If
1171                          * it succeeded we'll hold it because the new
1172                          * uffd references it.
1173                          */
1174                         if (ret)
1175                                 userfaultfd_ctx_put(fork_nctx);
1176                 }
1177                 spin_unlock_irq(&ctx->event_wqh.lock);
1178         }
1179
1180         return ret;
1181 }
1182
1183 static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1184                                 size_t count, loff_t *ppos)
1185 {
1186         struct userfaultfd_ctx *ctx = file->private_data;
1187         ssize_t _ret, ret = 0;
1188         struct uffd_msg msg;
1189         int no_wait = file->f_flags & O_NONBLOCK;
1190         struct inode *inode = file_inode(file);
1191
1192         if (!userfaultfd_is_initialized(ctx))
1193                 return -EINVAL;
1194
1195         for (;;) {
1196                 if (count < sizeof(msg))
1197                         return ret ? ret : -EINVAL;
1198                 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
1199                 if (_ret < 0)
1200                         return ret ? ret : _ret;
1201                 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
1202                         return ret ? ret : -EFAULT;
1203                 ret += sizeof(msg);
1204                 buf += sizeof(msg);
1205                 count -= sizeof(msg);
1206                 /*
1207                  * Allow to read more than one fault at time but only
1208                  * block if waiting for the very first one.
1209                  */
1210                 no_wait = O_NONBLOCK;
1211         }
1212 }
1213
1214 static void __wake_userfault(struct userfaultfd_ctx *ctx,
1215                              struct userfaultfd_wake_range *range)
1216 {
1217         spin_lock_irq(&ctx->fault_pending_wqh.lock);
1218         /* wake all in the range and autoremove */
1219         if (waitqueue_active(&ctx->fault_pending_wqh))
1220                 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1221                                      range);
1222         if (waitqueue_active(&ctx->fault_wqh))
1223                 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1224         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1225 }
1226
1227 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1228                                            struct userfaultfd_wake_range *range)
1229 {
1230         unsigned seq;
1231         bool need_wakeup;
1232
1233         /*
1234          * To be sure waitqueue_active() is not reordered by the CPU
1235          * before the pagetable update, use an explicit SMP memory
1236          * barrier here. PT lock release or mmap_read_unlock(mm) still
1237          * have release semantics that can allow the
1238          * waitqueue_active() to be reordered before the pte update.
1239          */
1240         smp_mb();
1241
1242         /*
1243          * Use waitqueue_active because it's very frequent to
1244          * change the address space atomically even if there are no
1245          * userfaults yet. So we take the spinlock only when we're
1246          * sure we've userfaults to wake.
1247          */
1248         do {
1249                 seq = read_seqcount_begin(&ctx->refile_seq);
1250                 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1251                         waitqueue_active(&ctx->fault_wqh);
1252                 cond_resched();
1253         } while (read_seqcount_retry(&ctx->refile_seq, seq));
1254         if (need_wakeup)
1255                 __wake_userfault(ctx, range);
1256 }
1257
1258 static __always_inline int validate_unaligned_range(
1259         struct mm_struct *mm, __u64 start, __u64 len)
1260 {
1261         __u64 task_size = mm->task_size;
1262
1263         if (len & ~PAGE_MASK)
1264                 return -EINVAL;
1265         if (!len)
1266                 return -EINVAL;
1267         if (start < mmap_min_addr)
1268                 return -EINVAL;
1269         if (start >= task_size)
1270                 return -EINVAL;
1271         if (len > task_size - start)
1272                 return -EINVAL;
1273         if (start + len <= start)
1274                 return -EINVAL;
1275         return 0;
1276 }
1277
1278 static __always_inline int validate_range(struct mm_struct *mm,
1279                                           __u64 start, __u64 len)
1280 {
1281         if (start & ~PAGE_MASK)
1282                 return -EINVAL;
1283
1284         return validate_unaligned_range(mm, start, len);
1285 }
1286
1287 static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1288                                 unsigned long arg)
1289 {
1290         struct mm_struct *mm = ctx->mm;
1291         struct vm_area_struct *vma, *prev, *cur;
1292         int ret;
1293         struct uffdio_register uffdio_register;
1294         struct uffdio_register __user *user_uffdio_register;
1295         unsigned long vm_flags, new_flags;
1296         bool found;
1297         bool basic_ioctls;
1298         unsigned long start, end, vma_end;
1299         struct vma_iterator vmi;
1300         bool wp_async = userfaultfd_wp_async_ctx(ctx);
1301
1302         user_uffdio_register = (struct uffdio_register __user *) arg;
1303
1304         ret = -EFAULT;
1305         if (copy_from_user(&uffdio_register, user_uffdio_register,
1306                            sizeof(uffdio_register)-sizeof(__u64)))
1307                 goto out;
1308
1309         ret = -EINVAL;
1310         if (!uffdio_register.mode)
1311                 goto out;
1312         if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
1313                 goto out;
1314         vm_flags = 0;
1315         if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1316                 vm_flags |= VM_UFFD_MISSING;
1317         if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1318 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1319                 goto out;
1320 #endif
1321                 vm_flags |= VM_UFFD_WP;
1322         }
1323         if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
1324 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1325                 goto out;
1326 #endif
1327                 vm_flags |= VM_UFFD_MINOR;
1328         }
1329
1330         ret = validate_range(mm, uffdio_register.range.start,
1331                              uffdio_register.range.len);
1332         if (ret)
1333                 goto out;
1334
1335         start = uffdio_register.range.start;
1336         end = start + uffdio_register.range.len;
1337
1338         ret = -ENOMEM;
1339         if (!mmget_not_zero(mm))
1340                 goto out;
1341
1342         ret = -EINVAL;
1343         mmap_write_lock(mm);
1344         vma_iter_init(&vmi, mm, start);
1345         vma = vma_find(&vmi, end);
1346         if (!vma)
1347                 goto out_unlock;
1348
1349         /*
1350          * If the first vma contains huge pages, make sure start address
1351          * is aligned to huge page size.
1352          */
1353         if (is_vm_hugetlb_page(vma)) {
1354                 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1355
1356                 if (start & (vma_hpagesize - 1))
1357                         goto out_unlock;
1358         }
1359
1360         /*
1361          * Search for not compatible vmas.
1362          */
1363         found = false;
1364         basic_ioctls = false;
1365         cur = vma;
1366         do {
1367                 cond_resched();
1368
1369                 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1370                        !!(cur->vm_flags & __VM_UFFD_FLAGS));
1371
1372                 /* check not compatible vmas */
1373                 ret = -EINVAL;
1374                 if (!vma_can_userfault(cur, vm_flags, wp_async))
1375                         goto out_unlock;
1376
1377                 /*
1378                  * UFFDIO_COPY will fill file holes even without
1379                  * PROT_WRITE. This check enforces that if this is a
1380                  * MAP_SHARED, the process has write permission to the backing
1381                  * file. If VM_MAYWRITE is set it also enforces that on a
1382                  * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1383                  * F_WRITE_SEAL can be taken until the vma is destroyed.
1384                  */
1385                 ret = -EPERM;
1386                 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1387                         goto out_unlock;
1388
1389                 /*
1390                  * If this vma contains ending address, and huge pages
1391                  * check alignment.
1392                  */
1393                 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1394                     end > cur->vm_start) {
1395                         unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1396
1397                         ret = -EINVAL;
1398
1399                         if (end & (vma_hpagesize - 1))
1400                                 goto out_unlock;
1401                 }
1402                 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
1403                         goto out_unlock;
1404
1405                 /*
1406                  * Check that this vma isn't already owned by a
1407                  * different userfaultfd. We can't allow more than one
1408                  * userfaultfd to own a single vma simultaneously or we
1409                  * wouldn't know which one to deliver the userfaults to.
1410                  */
1411                 ret = -EBUSY;
1412                 if (cur->vm_userfaultfd_ctx.ctx &&
1413                     cur->vm_userfaultfd_ctx.ctx != ctx)
1414                         goto out_unlock;
1415
1416                 /*
1417                  * Note vmas containing huge pages
1418                  */
1419                 if (is_vm_hugetlb_page(cur))
1420                         basic_ioctls = true;
1421
1422                 found = true;
1423         } for_each_vma_range(vmi, cur, end);
1424         BUG_ON(!found);
1425
1426         vma_iter_set(&vmi, start);
1427         prev = vma_prev(&vmi);
1428         if (vma->vm_start < start)
1429                 prev = vma;
1430
1431         ret = 0;
1432         for_each_vma_range(vmi, vma, end) {
1433                 cond_resched();
1434
1435                 BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
1436                 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1437                        vma->vm_userfaultfd_ctx.ctx != ctx);
1438                 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1439
1440                 /*
1441                  * Nothing to do: this vma is already registered into this
1442                  * userfaultfd and with the right tracking mode too.
1443                  */
1444                 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1445                     (vma->vm_flags & vm_flags) == vm_flags)
1446                         goto skip;
1447
1448                 if (vma->vm_start > start)
1449                         start = vma->vm_start;
1450                 vma_end = min(end, vma->vm_end);
1451
1452                 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
1453                 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
1454                                             new_flags,
1455                                             (struct vm_userfaultfd_ctx){ctx});
1456                 if (IS_ERR(vma)) {
1457                         ret = PTR_ERR(vma);
1458                         break;
1459                 }
1460
1461                 /*
1462                  * In the vma_merge() successful mprotect-like case 8:
1463                  * the next vma was merged into the current one and
1464                  * the current one has not been updated yet.
1465                  */
1466                 vma_start_write(vma);
1467                 userfaultfd_set_vm_flags(vma, new_flags);
1468                 vma->vm_userfaultfd_ctx.ctx = ctx;
1469
1470                 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1471                         hugetlb_unshare_all_pmds(vma);
1472
1473         skip:
1474                 prev = vma;
1475                 start = vma->vm_end;
1476         }
1477
1478 out_unlock:
1479         mmap_write_unlock(mm);
1480         mmput(mm);
1481         if (!ret) {
1482                 __u64 ioctls_out;
1483
1484                 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1485                     UFFD_API_RANGE_IOCTLS;
1486
1487                 /*
1488                  * Declare the WP ioctl only if the WP mode is
1489                  * specified and all checks passed with the range
1490                  */
1491                 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
1492                         ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
1493
1494                 /* CONTINUE ioctl is only supported for MINOR ranges. */
1495                 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1496                         ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1497
1498                 /*
1499                  * Now that we scanned all vmas we can already tell
1500                  * userland which ioctls methods are guaranteed to
1501                  * succeed on this range.
1502                  */
1503                 if (put_user(ioctls_out, &user_uffdio_register->ioctls))
1504                         ret = -EFAULT;
1505         }
1506 out:
1507         return ret;
1508 }
1509
1510 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1511                                   unsigned long arg)
1512 {
1513         struct mm_struct *mm = ctx->mm;
1514         struct vm_area_struct *vma, *prev, *cur;
1515         int ret;
1516         struct uffdio_range uffdio_unregister;
1517         unsigned long new_flags;
1518         bool found;
1519         unsigned long start, end, vma_end;
1520         const void __user *buf = (void __user *)arg;
1521         struct vma_iterator vmi;
1522         bool wp_async = userfaultfd_wp_async_ctx(ctx);
1523
1524         ret = -EFAULT;
1525         if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1526                 goto out;
1527
1528         ret = validate_range(mm, uffdio_unregister.start,
1529                              uffdio_unregister.len);
1530         if (ret)
1531                 goto out;
1532
1533         start = uffdio_unregister.start;
1534         end = start + uffdio_unregister.len;
1535
1536         ret = -ENOMEM;
1537         if (!mmget_not_zero(mm))
1538                 goto out;
1539
1540         mmap_write_lock(mm);
1541         ret = -EINVAL;
1542         vma_iter_init(&vmi, mm, start);
1543         vma = vma_find(&vmi, end);
1544         if (!vma)
1545                 goto out_unlock;
1546
1547         /*
1548          * If the first vma contains huge pages, make sure start address
1549          * is aligned to huge page size.
1550          */
1551         if (is_vm_hugetlb_page(vma)) {
1552                 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1553
1554                 if (start & (vma_hpagesize - 1))
1555                         goto out_unlock;
1556         }
1557
1558         /*
1559          * Search for not compatible vmas.
1560          */
1561         found = false;
1562         cur = vma;
1563         do {
1564                 cond_resched();
1565
1566                 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1567                        !!(cur->vm_flags & __VM_UFFD_FLAGS));
1568
1569                 /*
1570                  * Check not compatible vmas, not strictly required
1571                  * here as not compatible vmas cannot have an
1572                  * userfaultfd_ctx registered on them, but this
1573                  * provides for more strict behavior to notice
1574                  * unregistration errors.
1575                  */
1576                 if (!vma_can_userfault(cur, cur->vm_flags, wp_async))
1577                         goto out_unlock;
1578
1579                 found = true;
1580         } for_each_vma_range(vmi, cur, end);
1581         BUG_ON(!found);
1582
1583         vma_iter_set(&vmi, start);
1584         prev = vma_prev(&vmi);
1585         if (vma->vm_start < start)
1586                 prev = vma;
1587
1588         ret = 0;
1589         for_each_vma_range(vmi, vma, end) {
1590                 cond_resched();
1591
1592                 BUG_ON(!vma_can_userfault(vma, vma->vm_flags, wp_async));
1593
1594                 /*
1595                  * Nothing to do: this vma is already registered into this
1596                  * userfaultfd and with the right tracking mode too.
1597                  */
1598                 if (!vma->vm_userfaultfd_ctx.ctx)
1599                         goto skip;
1600
1601                 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1602
1603                 if (vma->vm_start > start)
1604                         start = vma->vm_start;
1605                 vma_end = min(end, vma->vm_end);
1606
1607                 if (userfaultfd_missing(vma)) {
1608                         /*
1609                          * Wake any concurrent pending userfault while
1610                          * we unregister, so they will not hang
1611                          * permanently and it avoids userland to call
1612                          * UFFDIO_WAKE explicitly.
1613                          */
1614                         struct userfaultfd_wake_range range;
1615                         range.start = start;
1616                         range.len = vma_end - start;
1617                         wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1618                 }
1619
1620                 /* Reset ptes for the whole vma range if wr-protected */
1621                 if (userfaultfd_wp(vma))
1622                         uffd_wp_range(vma, start, vma_end - start, false);
1623
1624                 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
1625                 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
1626                                             new_flags, NULL_VM_UFFD_CTX);
1627                 if (IS_ERR(vma)) {
1628                         ret = PTR_ERR(vma);
1629                         break;
1630                 }
1631
1632                 /*
1633                  * In the vma_merge() successful mprotect-like case 8:
1634                  * the next vma was merged into the current one and
1635                  * the current one has not been updated yet.
1636                  */
1637                 vma_start_write(vma);
1638                 userfaultfd_set_vm_flags(vma, new_flags);
1639                 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1640
1641         skip:
1642                 prev = vma;
1643                 start = vma->vm_end;
1644         }
1645
1646 out_unlock:
1647         mmap_write_unlock(mm);
1648         mmput(mm);
1649 out:
1650         return ret;
1651 }
1652
1653 /*
1654  * userfaultfd_wake may be used in combination with the
1655  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
1656  */
1657 static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1658                             unsigned long arg)
1659 {
1660         int ret;
1661         struct uffdio_range uffdio_wake;
1662         struct userfaultfd_wake_range range;
1663         const void __user *buf = (void __user *)arg;
1664
1665         ret = -EFAULT;
1666         if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1667                 goto out;
1668
1669         ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1670         if (ret)
1671                 goto out;
1672
1673         range.start = uffdio_wake.start;
1674         range.len = uffdio_wake.len;
1675
1676         /*
1677          * len == 0 means wake all and we don't want to wake all here,
1678          * so check it again to be sure.
1679          */
1680         VM_BUG_ON(!range.len);
1681
1682         wake_userfault(ctx, &range);
1683         ret = 0;
1684
1685 out:
1686         return ret;
1687 }
1688
1689 static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1690                             unsigned long arg)
1691 {
1692         __s64 ret;
1693         struct uffdio_copy uffdio_copy;
1694         struct uffdio_copy __user *user_uffdio_copy;
1695         struct userfaultfd_wake_range range;
1696         uffd_flags_t flags = 0;
1697
1698         user_uffdio_copy = (struct uffdio_copy __user *) arg;
1699
1700         ret = -EAGAIN;
1701         if (atomic_read(&ctx->mmap_changing))
1702                 goto out;
1703
1704         ret = -EFAULT;
1705         if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1706                            /* don't copy "copy" last field */
1707                            sizeof(uffdio_copy)-sizeof(__s64)))
1708                 goto out;
1709
1710         ret = validate_unaligned_range(ctx->mm, uffdio_copy.src,
1711                                        uffdio_copy.len);
1712         if (ret)
1713                 goto out;
1714         ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1715         if (ret)
1716                 goto out;
1717
1718         ret = -EINVAL;
1719         if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
1720                 goto out;
1721         if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
1722                 flags |= MFILL_ATOMIC_WP;
1723         if (mmget_not_zero(ctx->mm)) {
1724                 ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src,
1725                                         uffdio_copy.len, flags);
1726                 mmput(ctx->mm);
1727         } else {
1728                 return -ESRCH;
1729         }
1730         if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1731                 return -EFAULT;
1732         if (ret < 0)
1733                 goto out;
1734         BUG_ON(!ret);
1735         /* len == 0 would wake all */
1736         range.len = ret;
1737         if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1738                 range.start = uffdio_copy.dst;
1739                 wake_userfault(ctx, &range);
1740         }
1741         ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1742 out:
1743         return ret;
1744 }
1745
1746 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1747                                 unsigned long arg)
1748 {
1749         __s64 ret;
1750         struct uffdio_zeropage uffdio_zeropage;
1751         struct uffdio_zeropage __user *user_uffdio_zeropage;
1752         struct userfaultfd_wake_range range;
1753
1754         user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1755
1756         ret = -EAGAIN;
1757         if (atomic_read(&ctx->mmap_changing))
1758                 goto out;
1759
1760         ret = -EFAULT;
1761         if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1762                            /* don't copy "zeropage" last field */
1763                            sizeof(uffdio_zeropage)-sizeof(__s64)))
1764                 goto out;
1765
1766         ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1767                              uffdio_zeropage.range.len);
1768         if (ret)
1769                 goto out;
1770         ret = -EINVAL;
1771         if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1772                 goto out;
1773
1774         if (mmget_not_zero(ctx->mm)) {
1775                 ret = mfill_atomic_zeropage(ctx, uffdio_zeropage.range.start,
1776                                            uffdio_zeropage.range.len);
1777                 mmput(ctx->mm);
1778         } else {
1779                 return -ESRCH;
1780         }
1781         if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1782                 return -EFAULT;
1783         if (ret < 0)
1784                 goto out;
1785         /* len == 0 would wake all */
1786         BUG_ON(!ret);
1787         range.len = ret;
1788         if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1789                 range.start = uffdio_zeropage.range.start;
1790                 wake_userfault(ctx, &range);
1791         }
1792         ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1793 out:
1794         return ret;
1795 }
1796
1797 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1798                                     unsigned long arg)
1799 {
1800         int ret;
1801         struct uffdio_writeprotect uffdio_wp;
1802         struct uffdio_writeprotect __user *user_uffdio_wp;
1803         struct userfaultfd_wake_range range;
1804         bool mode_wp, mode_dontwake;
1805
1806         if (atomic_read(&ctx->mmap_changing))
1807                 return -EAGAIN;
1808
1809         user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
1810
1811         if (copy_from_user(&uffdio_wp, user_uffdio_wp,
1812                            sizeof(struct uffdio_writeprotect)))
1813                 return -EFAULT;
1814
1815         ret = validate_range(ctx->mm, uffdio_wp.range.start,
1816                              uffdio_wp.range.len);
1817         if (ret)
1818                 return ret;
1819
1820         if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
1821                                UFFDIO_WRITEPROTECT_MODE_WP))
1822                 return -EINVAL;
1823
1824         mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
1825         mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
1826
1827         if (mode_wp && mode_dontwake)
1828                 return -EINVAL;
1829
1830         if (mmget_not_zero(ctx->mm)) {
1831                 ret = mwriteprotect_range(ctx, uffdio_wp.range.start,
1832                                           uffdio_wp.range.len, mode_wp);
1833                 mmput(ctx->mm);
1834         } else {
1835                 return -ESRCH;
1836         }
1837
1838         if (ret)
1839                 return ret;
1840
1841         if (!mode_wp && !mode_dontwake) {
1842                 range.start = uffdio_wp.range.start;
1843                 range.len = uffdio_wp.range.len;
1844                 wake_userfault(ctx, &range);
1845         }
1846         return ret;
1847 }
1848
1849 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1850 {
1851         __s64 ret;
1852         struct uffdio_continue uffdio_continue;
1853         struct uffdio_continue __user *user_uffdio_continue;
1854         struct userfaultfd_wake_range range;
1855         uffd_flags_t flags = 0;
1856
1857         user_uffdio_continue = (struct uffdio_continue __user *)arg;
1858
1859         ret = -EAGAIN;
1860         if (atomic_read(&ctx->mmap_changing))
1861                 goto out;
1862
1863         ret = -EFAULT;
1864         if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1865                            /* don't copy the output fields */
1866                            sizeof(uffdio_continue) - (sizeof(__s64))))
1867                 goto out;
1868
1869         ret = validate_range(ctx->mm, uffdio_continue.range.start,
1870                              uffdio_continue.range.len);
1871         if (ret)
1872                 goto out;
1873
1874         ret = -EINVAL;
1875         if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE |
1876                                      UFFDIO_CONTINUE_MODE_WP))
1877                 goto out;
1878         if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
1879                 flags |= MFILL_ATOMIC_WP;
1880
1881         if (mmget_not_zero(ctx->mm)) {
1882                 ret = mfill_atomic_continue(ctx, uffdio_continue.range.start,
1883                                             uffdio_continue.range.len, flags);
1884                 mmput(ctx->mm);
1885         } else {
1886                 return -ESRCH;
1887         }
1888
1889         if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1890                 return -EFAULT;
1891         if (ret < 0)
1892                 goto out;
1893
1894         /* len == 0 would wake all */
1895         BUG_ON(!ret);
1896         range.len = ret;
1897         if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1898                 range.start = uffdio_continue.range.start;
1899                 wake_userfault(ctx, &range);
1900         }
1901         ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1902
1903 out:
1904         return ret;
1905 }
1906
1907 static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
1908 {
1909         __s64 ret;
1910         struct uffdio_poison uffdio_poison;
1911         struct uffdio_poison __user *user_uffdio_poison;
1912         struct userfaultfd_wake_range range;
1913
1914         user_uffdio_poison = (struct uffdio_poison __user *)arg;
1915
1916         ret = -EAGAIN;
1917         if (atomic_read(&ctx->mmap_changing))
1918                 goto out;
1919
1920         ret = -EFAULT;
1921         if (copy_from_user(&uffdio_poison, user_uffdio_poison,
1922                            /* don't copy the output fields */
1923                            sizeof(uffdio_poison) - (sizeof(__s64))))
1924                 goto out;
1925
1926         ret = validate_range(ctx->mm, uffdio_poison.range.start,
1927                              uffdio_poison.range.len);
1928         if (ret)
1929                 goto out;
1930
1931         ret = -EINVAL;
1932         if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
1933                 goto out;
1934
1935         if (mmget_not_zero(ctx->mm)) {
1936                 ret = mfill_atomic_poison(ctx, uffdio_poison.range.start,
1937                                           uffdio_poison.range.len, 0);
1938                 mmput(ctx->mm);
1939         } else {
1940                 return -ESRCH;
1941         }
1942
1943         if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
1944                 return -EFAULT;
1945         if (ret < 0)
1946                 goto out;
1947
1948         /* len == 0 would wake all */
1949         BUG_ON(!ret);
1950         range.len = ret;
1951         if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
1952                 range.start = uffdio_poison.range.start;
1953                 wake_userfault(ctx, &range);
1954         }
1955         ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
1956
1957 out:
1958         return ret;
1959 }
1960
1961 bool userfaultfd_wp_async(struct vm_area_struct *vma)
1962 {
1963         return userfaultfd_wp_async_ctx(vma->vm_userfaultfd_ctx.ctx);
1964 }
1965
1966 static inline unsigned int uffd_ctx_features(__u64 user_features)
1967 {
1968         /*
1969          * For the current set of features the bits just coincide. Set
1970          * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
1971          */
1972         return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
1973 }
1974
1975 static int userfaultfd_move(struct userfaultfd_ctx *ctx,
1976                             unsigned long arg)
1977 {
1978         __s64 ret;
1979         struct uffdio_move uffdio_move;
1980         struct uffdio_move __user *user_uffdio_move;
1981         struct userfaultfd_wake_range range;
1982         struct mm_struct *mm = ctx->mm;
1983
1984         user_uffdio_move = (struct uffdio_move __user *) arg;
1985
1986         if (atomic_read(&ctx->mmap_changing))
1987                 return -EAGAIN;
1988
1989         if (copy_from_user(&uffdio_move, user_uffdio_move,
1990                            /* don't copy "move" last field */
1991                            sizeof(uffdio_move)-sizeof(__s64)))
1992                 return -EFAULT;
1993
1994         /* Do not allow cross-mm moves. */
1995         if (mm != current->mm)
1996                 return -EINVAL;
1997
1998         ret = validate_range(mm, uffdio_move.dst, uffdio_move.len);
1999         if (ret)
2000                 return ret;
2001
2002         ret = validate_range(mm, uffdio_move.src, uffdio_move.len);
2003         if (ret)
2004                 return ret;
2005
2006         if (uffdio_move.mode & ~(UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES|
2007                                   UFFDIO_MOVE_MODE_DONTWAKE))
2008                 return -EINVAL;
2009
2010         if (mmget_not_zero(mm)) {
2011                 ret = move_pages(ctx, uffdio_move.dst, uffdio_move.src,
2012                                  uffdio_move.len, uffdio_move.mode);
2013                 mmput(mm);
2014         } else {
2015                 return -ESRCH;
2016         }
2017
2018         if (unlikely(put_user(ret, &user_uffdio_move->move)))
2019                 return -EFAULT;
2020         if (ret < 0)
2021                 goto out;
2022
2023         /* len == 0 would wake all */
2024         VM_WARN_ON(!ret);
2025         range.len = ret;
2026         if (!(uffdio_move.mode & UFFDIO_MOVE_MODE_DONTWAKE)) {
2027                 range.start = uffdio_move.dst;
2028                 wake_userfault(ctx, &range);
2029         }
2030         ret = range.len == uffdio_move.len ? 0 : -EAGAIN;
2031
2032 out:
2033         return ret;
2034 }
2035
2036 /*
2037  * userland asks for a certain API version and we return which bits
2038  * and ioctl commands are implemented in this kernel for such API
2039  * version or -EINVAL if unknown.
2040  */
2041 static int userfaultfd_api(struct userfaultfd_ctx *ctx,
2042                            unsigned long arg)
2043 {
2044         struct uffdio_api uffdio_api;
2045         void __user *buf = (void __user *)arg;
2046         unsigned int ctx_features;
2047         int ret;
2048         __u64 features;
2049
2050         ret = -EFAULT;
2051         if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
2052                 goto out;
2053         features = uffdio_api.features;
2054         ret = -EINVAL;
2055         if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
2056                 goto err_out;
2057         ret = -EPERM;
2058         if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
2059                 goto err_out;
2060
2061         /* WP_ASYNC relies on WP_UNPOPULATED, choose it unconditionally */
2062         if (features & UFFD_FEATURE_WP_ASYNC)
2063                 features |= UFFD_FEATURE_WP_UNPOPULATED;
2064
2065         /* report all available features and ioctls to userland */
2066         uffdio_api.features = UFFD_API_FEATURES;
2067 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
2068         uffdio_api.features &=
2069                 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
2070 #endif
2071 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
2072         uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
2073 #endif
2074 #ifndef CONFIG_PTE_MARKER_UFFD_WP
2075         uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
2076         uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
2077         uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC;
2078 #endif
2079         uffdio_api.ioctls = UFFD_API_IOCTLS;
2080         ret = -EFAULT;
2081         if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
2082                 goto out;
2083
2084         /* only enable the requested features for this uffd context */
2085         ctx_features = uffd_ctx_features(features);
2086         ret = -EINVAL;
2087         if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
2088                 goto err_out;
2089
2090         ret = 0;
2091 out:
2092         return ret;
2093 err_out:
2094         memset(&uffdio_api, 0, sizeof(uffdio_api));
2095         if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
2096                 ret = -EFAULT;
2097         goto out;
2098 }
2099
2100 static long userfaultfd_ioctl(struct file *file, unsigned cmd,
2101                               unsigned long arg)
2102 {
2103         int ret = -EINVAL;
2104         struct userfaultfd_ctx *ctx = file->private_data;
2105
2106         if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
2107                 return -EINVAL;
2108
2109         switch(cmd) {
2110         case UFFDIO_API:
2111                 ret = userfaultfd_api(ctx, arg);
2112                 break;
2113         case UFFDIO_REGISTER:
2114                 ret = userfaultfd_register(ctx, arg);
2115                 break;
2116         case UFFDIO_UNREGISTER:
2117                 ret = userfaultfd_unregister(ctx, arg);
2118                 break;
2119         case UFFDIO_WAKE:
2120                 ret = userfaultfd_wake(ctx, arg);
2121                 break;
2122         case UFFDIO_COPY:
2123                 ret = userfaultfd_copy(ctx, arg);
2124                 break;
2125         case UFFDIO_ZEROPAGE:
2126                 ret = userfaultfd_zeropage(ctx, arg);
2127                 break;
2128         case UFFDIO_MOVE:
2129                 ret = userfaultfd_move(ctx, arg);
2130                 break;
2131         case UFFDIO_WRITEPROTECT:
2132                 ret = userfaultfd_writeprotect(ctx, arg);
2133                 break;
2134         case UFFDIO_CONTINUE:
2135                 ret = userfaultfd_continue(ctx, arg);
2136                 break;
2137         case UFFDIO_POISON:
2138                 ret = userfaultfd_poison(ctx, arg);
2139                 break;
2140         }
2141         return ret;
2142 }
2143
2144 #ifdef CONFIG_PROC_FS
2145 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
2146 {
2147         struct userfaultfd_ctx *ctx = f->private_data;
2148         wait_queue_entry_t *wq;
2149         unsigned long pending = 0, total = 0;
2150
2151         spin_lock_irq(&ctx->fault_pending_wqh.lock);
2152         list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
2153                 pending++;
2154                 total++;
2155         }
2156         list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
2157                 total++;
2158         }
2159         spin_unlock_irq(&ctx->fault_pending_wqh.lock);
2160
2161         /*
2162          * If more protocols will be added, there will be all shown
2163          * separated by a space. Like this:
2164          *      protocols: aa:... bb:...
2165          */
2166         seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
2167                    pending, total, UFFD_API, ctx->features,
2168                    UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
2169 }
2170 #endif
2171
2172 static const struct file_operations userfaultfd_fops = {
2173 #ifdef CONFIG_PROC_FS
2174         .show_fdinfo    = userfaultfd_show_fdinfo,
2175 #endif
2176         .release        = userfaultfd_release,
2177         .poll           = userfaultfd_poll,
2178         .read           = userfaultfd_read,
2179         .unlocked_ioctl = userfaultfd_ioctl,
2180         .compat_ioctl   = compat_ptr_ioctl,
2181         .llseek         = noop_llseek,
2182 };
2183
2184 static void init_once_userfaultfd_ctx(void *mem)
2185 {
2186         struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2187
2188         init_waitqueue_head(&ctx->fault_pending_wqh);
2189         init_waitqueue_head(&ctx->fault_wqh);
2190         init_waitqueue_head(&ctx->event_wqh);
2191         init_waitqueue_head(&ctx->fd_wqh);
2192         seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
2193 }
2194
2195 static int new_userfaultfd(int flags)
2196 {
2197         struct userfaultfd_ctx *ctx;
2198         int fd;
2199
2200         BUG_ON(!current->mm);
2201
2202         /* Check the UFFD_* constants for consistency.  */
2203         BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
2204         BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
2205         BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
2206
2207         if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
2208                 return -EINVAL;
2209
2210         ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
2211         if (!ctx)
2212                 return -ENOMEM;
2213
2214         refcount_set(&ctx->refcount, 1);
2215         ctx->flags = flags;
2216         ctx->features = 0;
2217         ctx->released = false;
2218         init_rwsem(&ctx->map_changing_lock);
2219         atomic_set(&ctx->mmap_changing, 0);
2220         ctx->mm = current->mm;
2221         /* prevent the mm struct to be freed */
2222         mmgrab(ctx->mm);
2223
2224         /* Create a new inode so that the LSM can block the creation.  */
2225         fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
2226                         O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
2227         if (fd < 0) {
2228                 mmdrop(ctx->mm);
2229                 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
2230         }
2231         return fd;
2232 }
2233
2234 static inline bool userfaultfd_syscall_allowed(int flags)
2235 {
2236         /* Userspace-only page faults are always allowed */
2237         if (flags & UFFD_USER_MODE_ONLY)
2238                 return true;
2239
2240         /*
2241          * The user is requesting a userfaultfd which can handle kernel faults.
2242          * Privileged users are always allowed to do this.
2243          */
2244         if (capable(CAP_SYS_PTRACE))
2245                 return true;
2246
2247         /* Otherwise, access to kernel fault handling is sysctl controlled. */
2248         return sysctl_unprivileged_userfaultfd;
2249 }
2250
2251 SYSCALL_DEFINE1(userfaultfd, int, flags)
2252 {
2253         if (!userfaultfd_syscall_allowed(flags))
2254                 return -EPERM;
2255
2256         return new_userfaultfd(flags);
2257 }
2258
2259 static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
2260 {
2261         if (cmd != USERFAULTFD_IOC_NEW)
2262                 return -EINVAL;
2263
2264         return new_userfaultfd(flags);
2265 }
2266
2267 static const struct file_operations userfaultfd_dev_fops = {
2268         .unlocked_ioctl = userfaultfd_dev_ioctl,
2269         .compat_ioctl = userfaultfd_dev_ioctl,
2270         .owner = THIS_MODULE,
2271         .llseek = noop_llseek,
2272 };
2273
2274 static struct miscdevice userfaultfd_misc = {
2275         .minor = MISC_DYNAMIC_MINOR,
2276         .name = "userfaultfd",
2277         .fops = &userfaultfd_dev_fops
2278 };
2279
2280 static int __init userfaultfd_init(void)
2281 {
2282         int ret;
2283
2284         ret = misc_register(&userfaultfd_misc);
2285         if (ret)
2286                 return ret;
2287
2288         userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
2289                                                 sizeof(struct userfaultfd_ctx),
2290                                                 0,
2291                                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2292                                                 init_once_userfaultfd_ctx);
2293 #ifdef CONFIG_SYSCTL
2294         register_sysctl_init("vm", vm_userfaultfd_table);
2295 #endif
2296         return 0;
2297 }
2298 __initcall(userfaultfd_init);
This page took 0.165361 seconds and 4 git commands to generate.