1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
29 static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm)
33 hmm = kzalloc(sizeof(*hmm), GFP_KERNEL);
35 return ERR_PTR(-ENOMEM);
37 init_waitqueue_head(&hmm->wq);
38 INIT_LIST_HEAD(&hmm->mirrors);
39 init_rwsem(&hmm->mirrors_sem);
40 INIT_LIST_HEAD(&hmm->ranges);
41 spin_lock_init(&hmm->ranges_lock);
43 return &hmm->mmu_notifier;
46 static void hmm_free_notifier(struct mmu_notifier *mn)
48 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
50 WARN_ON(!list_empty(&hmm->ranges));
51 WARN_ON(!list_empty(&hmm->mirrors));
55 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
57 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
58 struct hmm_mirror *mirror;
61 * Since hmm_range_register() holds the mmget() lock hmm_release() is
62 * prevented as long as a range exists.
64 WARN_ON(!list_empty_careful(&hmm->ranges));
66 down_read(&hmm->mirrors_sem);
67 list_for_each_entry(mirror, &hmm->mirrors, list) {
69 * Note: The driver is not allowed to trigger
70 * hmm_mirror_unregister() from this thread.
72 if (mirror->ops->release)
73 mirror->ops->release(mirror);
75 up_read(&hmm->mirrors_sem);
78 static void notifiers_decrement(struct hmm *hmm)
82 spin_lock_irqsave(&hmm->ranges_lock, flags);
84 if (!hmm->notifiers) {
85 struct hmm_range *range;
87 list_for_each_entry(range, &hmm->ranges, list) {
92 wake_up_all(&hmm->wq);
94 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
97 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
98 const struct mmu_notifier_range *nrange)
100 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
101 struct hmm_mirror *mirror;
102 struct hmm_range *range;
106 spin_lock_irqsave(&hmm->ranges_lock, flags);
108 list_for_each_entry(range, &hmm->ranges, list) {
109 if (nrange->end < range->start || nrange->start >= range->end)
112 range->valid = false;
114 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
116 if (mmu_notifier_range_blockable(nrange))
117 down_read(&hmm->mirrors_sem);
118 else if (!down_read_trylock(&hmm->mirrors_sem)) {
123 list_for_each_entry(mirror, &hmm->mirrors, list) {
126 rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange);
128 if (WARN_ON(mmu_notifier_range_blockable(nrange) ||
135 up_read(&hmm->mirrors_sem);
139 notifiers_decrement(hmm);
143 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
144 const struct mmu_notifier_range *nrange)
146 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
148 notifiers_decrement(hmm);
151 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
152 .release = hmm_release,
153 .invalidate_range_start = hmm_invalidate_range_start,
154 .invalidate_range_end = hmm_invalidate_range_end,
155 .alloc_notifier = hmm_alloc_notifier,
156 .free_notifier = hmm_free_notifier,
160 * hmm_mirror_register() - register a mirror against an mm
162 * @mirror: new mirror struct to register
163 * @mm: mm to register against
164 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
166 * To start mirroring a process address space, the device driver must register
167 * an HMM mirror struct.
169 * The caller cannot unregister the hmm_mirror while any ranges are
172 * Callers using this function must put a call to mmu_notifier_synchronize()
173 * in their module exit functions.
175 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
177 struct mmu_notifier *mn;
179 lockdep_assert_held_write(&mm->mmap_sem);
182 if (!mm || !mirror || !mirror->ops)
185 mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm);
188 mirror->hmm = container_of(mn, struct hmm, mmu_notifier);
190 down_write(&mirror->hmm->mirrors_sem);
191 list_add(&mirror->list, &mirror->hmm->mirrors);
192 up_write(&mirror->hmm->mirrors_sem);
196 EXPORT_SYMBOL(hmm_mirror_register);
199 * hmm_mirror_unregister() - unregister a mirror
201 * @mirror: mirror struct to unregister
203 * Stop mirroring a process address space, and cleanup.
205 void hmm_mirror_unregister(struct hmm_mirror *mirror)
207 struct hmm *hmm = mirror->hmm;
209 down_write(&hmm->mirrors_sem);
210 list_del(&mirror->list);
211 up_write(&hmm->mirrors_sem);
212 mmu_notifier_put(&hmm->mmu_notifier);
214 EXPORT_SYMBOL(hmm_mirror_unregister);
216 struct hmm_vma_walk {
217 struct hmm_range *range;
218 struct dev_pagemap *pgmap;
223 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
224 bool write_fault, uint64_t *pfn)
226 unsigned int flags = FAULT_FLAG_REMOTE;
227 struct hmm_vma_walk *hmm_vma_walk = walk->private;
228 struct hmm_range *range = hmm_vma_walk->range;
229 struct vm_area_struct *vma = walk->vma;
235 if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY)
236 flags |= FAULT_FLAG_ALLOW_RETRY;
238 flags |= FAULT_FLAG_WRITE;
240 ret = handle_mm_fault(vma, addr, flags);
241 if (ret & VM_FAULT_RETRY) {
242 /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
245 if (ret & VM_FAULT_ERROR)
251 *pfn = range->values[HMM_PFN_ERROR];
255 static int hmm_pfns_bad(unsigned long addr,
257 struct mm_walk *walk)
259 struct hmm_vma_walk *hmm_vma_walk = walk->private;
260 struct hmm_range *range = hmm_vma_walk->range;
261 uint64_t *pfns = range->pfns;
264 i = (addr - range->start) >> PAGE_SHIFT;
265 for (; addr < end; addr += PAGE_SIZE, i++)
266 pfns[i] = range->values[HMM_PFN_ERROR];
272 * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
273 * @addr: range virtual start address (inclusive)
274 * @end: range virtual end address (exclusive)
275 * @fault: should we fault or not ?
276 * @write_fault: write fault ?
277 * @walk: mm_walk structure
278 * Return: 0 on success, -EBUSY after page fault, or page fault error
280 * This function will be called whenever pmd_none() or pte_none() returns true,
281 * or whenever there is no page directory covering the virtual address range.
283 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
284 bool fault, bool write_fault,
285 struct mm_walk *walk)
287 struct hmm_vma_walk *hmm_vma_walk = walk->private;
288 struct hmm_range *range = hmm_vma_walk->range;
289 uint64_t *pfns = range->pfns;
292 hmm_vma_walk->last = addr;
293 i = (addr - range->start) >> PAGE_SHIFT;
295 if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
298 for (; addr < end; addr += PAGE_SIZE, i++) {
299 pfns[i] = range->values[HMM_PFN_NONE];
300 if (fault || write_fault) {
303 ret = hmm_vma_do_fault(walk, addr, write_fault,
310 return (fault || write_fault) ? -EBUSY : 0;
313 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
314 uint64_t pfns, uint64_t cpu_flags,
315 bool *fault, bool *write_fault)
317 struct hmm_range *range = hmm_vma_walk->range;
319 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
323 * So we not only consider the individual per page request we also
324 * consider the default flags requested for the range. The API can
325 * be used 2 ways. The first one where the HMM user coalesces
326 * multiple page faults into one request and sets flags per pfn for
327 * those faults. The second one where the HMM user wants to pre-
328 * fault a range with specific flags. For the latter one it is a
329 * waste to have the user pre-fill the pfn arrays with a default
332 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
334 /* We aren't ask to do anything ... */
335 if (!(pfns & range->flags[HMM_PFN_VALID]))
337 /* If this is device memory then only fault if explicitly requested */
338 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
339 /* Do we fault on device memory ? */
340 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
341 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
347 /* If CPU page table is not valid then we need to fault */
348 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
349 /* Need to write fault ? */
350 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
351 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
357 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
358 const uint64_t *pfns, unsigned long npages,
359 uint64_t cpu_flags, bool *fault,
364 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
365 *fault = *write_fault = false;
369 *fault = *write_fault = false;
370 for (i = 0; i < npages; ++i) {
371 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
378 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
379 struct mm_walk *walk)
381 struct hmm_vma_walk *hmm_vma_walk = walk->private;
382 struct hmm_range *range = hmm_vma_walk->range;
383 bool fault, write_fault;
384 unsigned long i, npages;
387 i = (addr - range->start) >> PAGE_SHIFT;
388 npages = (end - addr) >> PAGE_SHIFT;
389 pfns = &range->pfns[i];
390 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
391 0, &fault, &write_fault);
392 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
395 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
397 if (pmd_protnone(pmd))
399 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
400 range->flags[HMM_PFN_WRITE] :
401 range->flags[HMM_PFN_VALID];
404 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
405 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
406 unsigned long end, uint64_t *pfns, pmd_t pmd)
408 struct hmm_vma_walk *hmm_vma_walk = walk->private;
409 struct hmm_range *range = hmm_vma_walk->range;
410 unsigned long pfn, npages, i;
411 bool fault, write_fault;
414 npages = (end - addr) >> PAGE_SHIFT;
415 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
416 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
417 &fault, &write_fault);
419 if (pmd_protnone(pmd) || fault || write_fault)
420 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
422 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
423 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
424 if (pmd_devmap(pmd)) {
425 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
426 hmm_vma_walk->pgmap);
427 if (unlikely(!hmm_vma_walk->pgmap))
430 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
432 if (hmm_vma_walk->pgmap) {
433 put_dev_pagemap(hmm_vma_walk->pgmap);
434 hmm_vma_walk->pgmap = NULL;
436 hmm_vma_walk->last = end;
439 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
440 /* stub to allow the code below to compile */
441 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
442 unsigned long end, uint64_t *pfns, pmd_t pmd);
443 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
445 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
447 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
449 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
450 range->flags[HMM_PFN_WRITE] :
451 range->flags[HMM_PFN_VALID];
454 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
455 unsigned long end, pmd_t *pmdp, pte_t *ptep,
458 struct hmm_vma_walk *hmm_vma_walk = walk->private;
459 struct hmm_range *range = hmm_vma_walk->range;
460 bool fault, write_fault;
463 uint64_t orig_pfn = *pfn;
465 *pfn = range->values[HMM_PFN_NONE];
466 fault = write_fault = false;
469 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
470 &fault, &write_fault);
471 if (fault || write_fault)
476 if (!pte_present(pte)) {
477 swp_entry_t entry = pte_to_swp_entry(pte);
479 if (!non_swap_entry(entry)) {
480 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
481 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
482 &fault, &write_fault);
483 if (fault || write_fault)
489 * This is a special swap entry, ignore migration, use
490 * device and report anything else as error.
492 if (is_device_private_entry(entry)) {
493 cpu_flags = range->flags[HMM_PFN_VALID] |
494 range->flags[HMM_PFN_DEVICE_PRIVATE];
495 cpu_flags |= is_write_device_private_entry(entry) ?
496 range->flags[HMM_PFN_WRITE] : 0;
497 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
498 &fault, &write_fault);
499 if (fault || write_fault)
501 *pfn = hmm_device_entry_from_pfn(range,
507 if (is_migration_entry(entry)) {
508 if (fault || write_fault) {
510 hmm_vma_walk->last = addr;
511 migration_entry_wait(walk->mm, pmdp, addr);
517 /* Report error for everything else */
518 *pfn = range->values[HMM_PFN_ERROR];
521 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
522 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
523 &fault, &write_fault);
526 if (fault || write_fault)
529 if (pte_devmap(pte)) {
530 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
531 hmm_vma_walk->pgmap);
532 if (unlikely(!hmm_vma_walk->pgmap))
534 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
535 *pfn = range->values[HMM_PFN_SPECIAL];
539 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
543 if (hmm_vma_walk->pgmap) {
544 put_dev_pagemap(hmm_vma_walk->pgmap);
545 hmm_vma_walk->pgmap = NULL;
548 /* Fault any virtual address we were asked to fault */
549 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
552 static int hmm_vma_walk_pmd(pmd_t *pmdp,
555 struct mm_walk *walk)
557 struct hmm_vma_walk *hmm_vma_walk = walk->private;
558 struct hmm_range *range = hmm_vma_walk->range;
559 uint64_t *pfns = range->pfns;
560 unsigned long addr = start, i;
565 pmd = READ_ONCE(*pmdp);
567 return hmm_vma_walk_hole(start, end, walk);
569 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
570 bool fault, write_fault;
571 unsigned long npages;
574 i = (addr - range->start) >> PAGE_SHIFT;
575 npages = (end - addr) >> PAGE_SHIFT;
576 pfns = &range->pfns[i];
578 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
579 0, &fault, &write_fault);
580 if (fault || write_fault) {
581 hmm_vma_walk->last = addr;
582 pmd_migration_entry_wait(walk->mm, pmdp);
586 } else if (!pmd_present(pmd))
587 return hmm_pfns_bad(start, end, walk);
589 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
591 * No need to take pmd_lock here, even if some other thread
592 * is splitting the huge pmd we will get that event through
593 * mmu_notifier callback.
595 * So just read pmd value and check again it's a transparent
596 * huge or device mapping one and compute corresponding pfn
599 pmd = pmd_read_atomic(pmdp);
601 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
604 i = (addr - range->start) >> PAGE_SHIFT;
605 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
609 * We have handled all the valid cases above ie either none, migration,
610 * huge or transparent huge. At this point either it is a valid pmd
611 * entry pointing to pte directory or it is a bad pmd that will not
615 return hmm_pfns_bad(start, end, walk);
617 ptep = pte_offset_map(pmdp, addr);
618 i = (addr - range->start) >> PAGE_SHIFT;
619 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
622 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
624 /* hmm_vma_handle_pte() did unmap pte directory */
625 hmm_vma_walk->last = addr;
629 if (hmm_vma_walk->pgmap) {
631 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
632 * so that we can leverage get_dev_pagemap() optimization which
633 * will not re-take a reference on a pgmap if we already have
636 put_dev_pagemap(hmm_vma_walk->pgmap);
637 hmm_vma_walk->pgmap = NULL;
641 hmm_vma_walk->last = addr;
645 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
646 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
647 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
649 if (!pud_present(pud))
651 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
652 range->flags[HMM_PFN_WRITE] :
653 range->flags[HMM_PFN_VALID];
656 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
657 struct mm_walk *walk)
659 struct hmm_vma_walk *hmm_vma_walk = walk->private;
660 struct hmm_range *range = hmm_vma_walk->range;
661 unsigned long addr = start, next;
667 pud = READ_ONCE(*pudp);
669 return hmm_vma_walk_hole(start, end, walk);
671 if (pud_huge(pud) && pud_devmap(pud)) {
672 unsigned long i, npages, pfn;
673 uint64_t *pfns, cpu_flags;
674 bool fault, write_fault;
676 if (!pud_present(pud))
677 return hmm_vma_walk_hole(start, end, walk);
679 i = (addr - range->start) >> PAGE_SHIFT;
680 npages = (end - addr) >> PAGE_SHIFT;
681 pfns = &range->pfns[i];
683 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
684 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
685 cpu_flags, &fault, &write_fault);
686 if (fault || write_fault)
687 return hmm_vma_walk_hole_(addr, end, fault,
690 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
691 for (i = 0; i < npages; ++i, ++pfn) {
692 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
693 hmm_vma_walk->pgmap);
694 if (unlikely(!hmm_vma_walk->pgmap))
696 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
699 if (hmm_vma_walk->pgmap) {
700 put_dev_pagemap(hmm_vma_walk->pgmap);
701 hmm_vma_walk->pgmap = NULL;
703 hmm_vma_walk->last = end;
707 split_huge_pud(walk->vma, pudp, addr);
711 pmdp = pmd_offset(pudp, addr);
713 next = pmd_addr_end(addr, end);
714 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
717 } while (pmdp++, addr = next, addr != end);
722 #define hmm_vma_walk_pud NULL
725 #ifdef CONFIG_HUGETLB_PAGE
726 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
727 unsigned long start, unsigned long end,
728 struct mm_walk *walk)
730 unsigned long addr = start, i, pfn;
731 struct hmm_vma_walk *hmm_vma_walk = walk->private;
732 struct hmm_range *range = hmm_vma_walk->range;
733 struct vm_area_struct *vma = walk->vma;
734 uint64_t orig_pfn, cpu_flags;
735 bool fault, write_fault;
740 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
741 entry = huge_ptep_get(pte);
743 i = (start - range->start) >> PAGE_SHIFT;
744 orig_pfn = range->pfns[i];
745 range->pfns[i] = range->values[HMM_PFN_NONE];
746 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
747 fault = write_fault = false;
748 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
749 &fault, &write_fault);
750 if (fault || write_fault) {
755 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
756 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
757 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
759 hmm_vma_walk->last = end;
765 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
770 #define hmm_vma_walk_hugetlb_entry NULL
771 #endif /* CONFIG_HUGETLB_PAGE */
773 static void hmm_pfns_clear(struct hmm_range *range,
778 for (; addr < end; addr += PAGE_SIZE, pfns++)
779 *pfns = range->values[HMM_PFN_NONE];
783 * hmm_range_register() - start tracking change to CPU page table over a range
785 * @mm: the mm struct for the range of virtual address
787 * Return: 0 on success, -EFAULT if the address space is no longer valid
789 * Track updates to the CPU page table see include/linux/hmm.h
791 int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
793 struct hmm *hmm = mirror->hmm;
796 range->valid = false;
799 if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1)))
801 if (range->start >= range->end)
804 /* Prevent hmm_release() from running while the range is valid */
805 if (!mmget_not_zero(hmm->mmu_notifier.mm))
808 /* Initialize range to track CPU page table updates. */
809 spin_lock_irqsave(&hmm->ranges_lock, flags);
812 list_add(&range->list, &hmm->ranges);
815 * If there are any concurrent notifiers we have to wait for them for
816 * the range to be valid (see hmm_range_wait_until_valid()).
820 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
824 EXPORT_SYMBOL(hmm_range_register);
827 * hmm_range_unregister() - stop tracking change to CPU page table over a range
830 * Range struct is used to track updates to the CPU page table after a call to
831 * hmm_range_register(). See include/linux/hmm.h for how to use it.
833 void hmm_range_unregister(struct hmm_range *range)
835 struct hmm *hmm = range->hmm;
838 spin_lock_irqsave(&hmm->ranges_lock, flags);
839 list_del_init(&range->list);
840 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
842 /* Drop reference taken by hmm_range_register() */
843 mmput(hmm->mmu_notifier.mm);
846 * The range is now invalid and the ref on the hmm is dropped, so
847 * poison the pointer. Leave other fields in place, for the caller's
850 range->valid = false;
851 memset(&range->hmm, POISON_INUSE, sizeof(range->hmm));
853 EXPORT_SYMBOL(hmm_range_unregister);
855 static const struct mm_walk_ops hmm_walk_ops = {
856 .pud_entry = hmm_vma_walk_pud,
857 .pmd_entry = hmm_vma_walk_pmd,
858 .pte_hole = hmm_vma_walk_hole,
859 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
863 * hmm_range_fault - try to fault some address in a virtual address range
864 * @range: range being faulted
865 * @flags: HMM_FAULT_* flags
867 * Return: the number of valid pages in range->pfns[] (from range start
868 * address), which may be zero. On error one of the following status codes
871 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
872 * (e.g., device file vma).
873 * -ENOMEM: Out of memory.
874 * -EPERM: Invalid permission (e.g., asking for write and range is read
876 * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped.
877 * -EBUSY: The range has been invalidated and the caller needs to wait for
878 * the invalidation to finish.
879 * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access
880 * that range) number of valid pages in range->pfns[] (from
881 * range start address).
883 * This is similar to a regular CPU page fault except that it will not trigger
884 * any memory migration if the memory being faulted is not accessible by CPUs
885 * and caller does not ask for migration.
887 * On error, for one virtual address in the range, the function will mark the
888 * corresponding HMM pfn entry with an error flag.
890 long hmm_range_fault(struct hmm_range *range, unsigned int flags)
892 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
893 unsigned long start = range->start, end;
894 struct hmm_vma_walk hmm_vma_walk;
895 struct hmm *hmm = range->hmm;
896 struct vm_area_struct *vma;
899 lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
902 /* If range is no longer valid force retry. */
906 vma = find_vma(hmm->mmu_notifier.mm, start);
907 if (vma == NULL || (vma->vm_flags & device_vma))
910 if (!(vma->vm_flags & VM_READ)) {
912 * If vma do not allow read access, then assume that it
913 * does not allow write access, either. HMM does not
914 * support architecture that allow write without read.
916 hmm_pfns_clear(range, range->pfns,
917 range->start, range->end);
921 hmm_vma_walk.pgmap = NULL;
922 hmm_vma_walk.last = start;
923 hmm_vma_walk.flags = flags;
924 hmm_vma_walk.range = range;
925 end = min(range->end, vma->vm_end);
927 walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops,
931 ret = walk_page_range(vma->vm_mm, start, end,
932 &hmm_walk_ops, &hmm_vma_walk);
933 start = hmm_vma_walk.last;
935 /* Keep trying while the range is valid. */
936 } while (ret == -EBUSY && range->valid);
941 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
942 hmm_pfns_clear(range, &range->pfns[i],
943 hmm_vma_walk.last, range->end);
948 } while (start < range->end);
950 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
952 EXPORT_SYMBOL(hmm_range_fault);
955 * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
956 * @range: range being faulted
957 * @device: device to map page to
958 * @daddrs: array of dma addresses for the mapped pages
959 * @flags: HMM_FAULT_*
961 * Return: the number of pages mapped on success (including zero), or any
962 * status return from hmm_range_fault() otherwise.
964 long hmm_range_dma_map(struct hmm_range *range, struct device *device,
965 dma_addr_t *daddrs, unsigned int flags)
967 unsigned long i, npages, mapped;
970 ret = hmm_range_fault(range, flags);
972 return ret ? ret : -EBUSY;
974 npages = (range->end - range->start) >> PAGE_SHIFT;
975 for (i = 0, mapped = 0; i < npages; ++i) {
976 enum dma_data_direction dir = DMA_TO_DEVICE;
980 * FIXME need to update DMA API to provide invalid DMA address
981 * value instead of a function to test dma address value. This
982 * would remove lot of dumb code duplicated accross many arch.
984 * For now setting it to 0 here is good enough as the pfns[]
985 * value is what is use to check what is valid and what isn't.
989 page = hmm_device_entry_to_page(range, range->pfns[i]);
993 /* Check if range is being invalidated */
999 /* If it is read and write than map bi-directional. */
1000 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1001 dir = DMA_BIDIRECTIONAL;
1003 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1004 if (dma_mapping_error(device, daddrs[i])) {
1015 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1016 enum dma_data_direction dir = DMA_TO_DEVICE;
1019 page = hmm_device_entry_to_page(range, range->pfns[i]);
1023 if (dma_mapping_error(device, daddrs[i]))
1026 /* If it is read and write than map bi-directional. */
1027 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1028 dir = DMA_BIDIRECTIONAL;
1030 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1036 EXPORT_SYMBOL(hmm_range_dma_map);
1039 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1040 * @range: range being unmapped
1041 * @device: device against which dma map was done
1042 * @daddrs: dma address of mapped pages
1043 * @dirty: dirty page if it had the write flag set
1044 * Return: number of page unmapped on success, -EINVAL otherwise
1046 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1047 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1048 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1049 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1051 long hmm_range_dma_unmap(struct hmm_range *range,
1052 struct device *device,
1056 unsigned long i, npages;
1060 if (range->end <= range->start)
1067 npages = (range->end - range->start) >> PAGE_SHIFT;
1068 for (i = 0; i < npages; ++i) {
1069 enum dma_data_direction dir = DMA_TO_DEVICE;
1072 page = hmm_device_entry_to_page(range, range->pfns[i]);
1076 /* If it is read and write than map bi-directional. */
1077 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1078 dir = DMA_BIDIRECTIONAL;
1081 * See comments in function description on why it is
1082 * safe here to call set_page_dirty()
1085 set_page_dirty(page);
1088 /* Unmap and clear pfns/dma address */
1089 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1090 range->pfns[i] = range->values[HMM_PFN_NONE];
1091 /* FIXME see comments in hmm_vma_dma_map() */
1098 EXPORT_SYMBOL(hmm_range_dma_unmap);