2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/sched/signal.h>
31 #include <linux/uio.h>
32 #include <linux/vmstat.h>
33 #include <linux/pfn_t.h>
34 #include <linux/sizes.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/iomap.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/fs_dax.h>
42 /* We choose 4096 entries - same as per-zone page wait tables */
43 #define DAX_WAIT_TABLE_BITS 12
44 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
46 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
48 static int __init init_dax_wait_table(void)
52 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
53 init_waitqueue_head(wait_table + i);
56 fs_initcall(init_dax_wait_table);
58 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
60 struct request_queue *q = bdev->bd_queue;
63 dax->addr = ERR_PTR(-EIO);
64 if (blk_queue_enter(q, true) != 0)
67 rc = bdev_direct_access(bdev, dax);
69 dax->addr = ERR_PTR(rc);
76 static void dax_unmap_atomic(struct block_device *bdev,
77 const struct blk_dax_ctl *dax)
79 if (IS_ERR(dax->addr))
81 blk_queue_exit(bdev->bd_queue);
84 static int dax_is_pmd_entry(void *entry)
86 return (unsigned long)entry & RADIX_DAX_PMD;
89 static int dax_is_pte_entry(void *entry)
91 return !((unsigned long)entry & RADIX_DAX_PMD);
94 static int dax_is_zero_entry(void *entry)
96 return (unsigned long)entry & RADIX_DAX_HZP;
99 static int dax_is_empty_entry(void *entry)
101 return (unsigned long)entry & RADIX_DAX_EMPTY;
104 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
106 struct page *page = alloc_pages(GFP_KERNEL, 0);
107 struct blk_dax_ctl dax = {
109 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
114 return ERR_PTR(-ENOMEM);
116 rc = dax_map_atomic(bdev, &dax);
119 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
120 dax_unmap_atomic(bdev, &dax);
125 * DAX radix tree locking
127 struct exceptional_entry_key {
128 struct address_space *mapping;
132 struct wait_exceptional_entry_queue {
134 struct exceptional_entry_key key;
137 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
138 pgoff_t index, void *entry, struct exceptional_entry_key *key)
143 * If 'entry' is a PMD, align the 'index' that we use for the wait
144 * queue to the start of that PMD. This ensures that all offsets in
145 * the range covered by the PMD map to the same bit lock.
147 if (dax_is_pmd_entry(entry))
148 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
150 key->mapping = mapping;
151 key->entry_start = index;
153 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
154 return wait_table + hash;
157 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
158 int sync, void *keyp)
160 struct exceptional_entry_key *key = keyp;
161 struct wait_exceptional_entry_queue *ewait =
162 container_of(wait, struct wait_exceptional_entry_queue, wait);
164 if (key->mapping != ewait->key.mapping ||
165 key->entry_start != ewait->key.entry_start)
167 return autoremove_wake_function(wait, mode, sync, NULL);
171 * Check whether the given slot is locked. The function must be called with
172 * mapping->tree_lock held
174 static inline int slot_locked(struct address_space *mapping, void **slot)
176 unsigned long entry = (unsigned long)
177 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
178 return entry & RADIX_DAX_ENTRY_LOCK;
182 * Mark the given slot is locked. The function must be called with
183 * mapping->tree_lock held
185 static inline void *lock_slot(struct address_space *mapping, void **slot)
187 unsigned long entry = (unsigned long)
188 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
190 entry |= RADIX_DAX_ENTRY_LOCK;
191 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
192 return (void *)entry;
196 * Mark the given slot is unlocked. The function must be called with
197 * mapping->tree_lock held
199 static inline void *unlock_slot(struct address_space *mapping, void **slot)
201 unsigned long entry = (unsigned long)
202 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
204 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
205 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
206 return (void *)entry;
210 * Lookup entry in radix tree, wait for it to become unlocked if it is
211 * exceptional entry and return it. The caller must call
212 * put_unlocked_mapping_entry() when he decided not to lock the entry or
213 * put_locked_mapping_entry() when he locked the entry and now wants to
216 * The function must be called with mapping->tree_lock held.
218 static void *get_unlocked_mapping_entry(struct address_space *mapping,
219 pgoff_t index, void ***slotp)
222 struct wait_exceptional_entry_queue ewait;
223 wait_queue_head_t *wq;
225 init_wait(&ewait.wait);
226 ewait.wait.func = wake_exceptional_entry_func;
229 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
231 if (!entry || !radix_tree_exceptional_entry(entry) ||
232 !slot_locked(mapping, slot)) {
238 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
239 prepare_to_wait_exclusive(wq, &ewait.wait,
240 TASK_UNINTERRUPTIBLE);
241 spin_unlock_irq(&mapping->tree_lock);
243 finish_wait(wq, &ewait.wait);
244 spin_lock_irq(&mapping->tree_lock);
248 static void dax_unlock_mapping_entry(struct address_space *mapping,
253 spin_lock_irq(&mapping->tree_lock);
254 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
255 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
256 !slot_locked(mapping, slot))) {
257 spin_unlock_irq(&mapping->tree_lock);
260 unlock_slot(mapping, slot);
261 spin_unlock_irq(&mapping->tree_lock);
262 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
265 static void put_locked_mapping_entry(struct address_space *mapping,
266 pgoff_t index, void *entry)
268 if (!radix_tree_exceptional_entry(entry)) {
272 dax_unlock_mapping_entry(mapping, index);
277 * Called when we are done with radix tree entry we looked up via
278 * get_unlocked_mapping_entry() and which we didn't lock in the end.
280 static void put_unlocked_mapping_entry(struct address_space *mapping,
281 pgoff_t index, void *entry)
283 if (!radix_tree_exceptional_entry(entry))
286 /* We have to wake up next waiter for the radix tree entry lock */
287 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
291 * Find radix tree entry at given index. If it points to a page, return with
292 * the page locked. If it points to the exceptional entry, return with the
293 * radix tree entry locked. If the radix tree doesn't contain given index,
294 * create empty exceptional entry for the index and return with it locked.
296 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
297 * either return that locked entry or will return an error. This error will
298 * happen if there are any 4k entries (either zero pages or DAX entries)
299 * within the 2MiB range that we are requesting.
301 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
302 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
303 * insertion will fail if it finds any 4k entries already in the tree, and a
304 * 4k insertion will cause an existing 2MiB entry to be unmapped and
305 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
306 * well as 2MiB empty entries.
308 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
309 * real storage backing them. We will leave these real 2MiB DAX entries in
310 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
312 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
313 * persistent memory the benefit is doubtful. We can add that later if we can
316 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
317 unsigned long size_flag)
319 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
323 spin_lock_irq(&mapping->tree_lock);
324 entry = get_unlocked_mapping_entry(mapping, index, &slot);
327 if (size_flag & RADIX_DAX_PMD) {
328 if (!radix_tree_exceptional_entry(entry) ||
329 dax_is_pte_entry(entry)) {
330 put_unlocked_mapping_entry(mapping, index,
332 entry = ERR_PTR(-EEXIST);
335 } else { /* trying to grab a PTE entry */
336 if (radix_tree_exceptional_entry(entry) &&
337 dax_is_pmd_entry(entry) &&
338 (dax_is_zero_entry(entry) ||
339 dax_is_empty_entry(entry))) {
340 pmd_downgrade = true;
345 /* No entry for given index? Make sure radix tree is big enough. */
346 if (!entry || pmd_downgrade) {
351 * Make sure 'entry' remains valid while we drop
352 * mapping->tree_lock.
354 entry = lock_slot(mapping, slot);
357 spin_unlock_irq(&mapping->tree_lock);
359 * Besides huge zero pages the only other thing that gets
360 * downgraded are empty entries which don't need to be
363 if (pmd_downgrade && dax_is_zero_entry(entry))
364 unmap_mapping_range(mapping,
365 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
367 err = radix_tree_preload(
368 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
371 put_locked_mapping_entry(mapping, index, entry);
374 spin_lock_irq(&mapping->tree_lock);
377 radix_tree_delete(&mapping->page_tree, index);
378 mapping->nrexceptional--;
379 dax_wake_mapping_entry_waiter(mapping, index, entry,
383 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
385 err = __radix_tree_insert(&mapping->page_tree, index,
386 dax_radix_order(entry), entry);
387 radix_tree_preload_end();
389 spin_unlock_irq(&mapping->tree_lock);
391 * Someone already created the entry? This is a
392 * normal failure when inserting PMDs in a range
393 * that already contains PTEs. In that case we want
394 * to return -EEXIST immediately.
396 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
399 * Our insertion of a DAX PMD entry failed, most
400 * likely because it collided with a PTE sized entry
401 * at a different index in the PMD range. We haven't
402 * inserted anything into the radix tree and have no
407 /* Good, we have inserted empty locked entry into the tree. */
408 mapping->nrexceptional++;
409 spin_unlock_irq(&mapping->tree_lock);
412 /* Normal page in radix tree? */
413 if (!radix_tree_exceptional_entry(entry)) {
414 struct page *page = entry;
417 spin_unlock_irq(&mapping->tree_lock);
419 /* Page got truncated? Retry... */
420 if (unlikely(page->mapping != mapping)) {
427 entry = lock_slot(mapping, slot);
429 spin_unlock_irq(&mapping->tree_lock);
434 * We do not necessarily hold the mapping->tree_lock when we call this
435 * function so it is possible that 'entry' is no longer a valid item in the
436 * radix tree. This is okay because all we really need to do is to find the
437 * correct waitqueue where tasks might be waiting for that old 'entry' and
440 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
441 pgoff_t index, void *entry, bool wake_all)
443 struct exceptional_entry_key key;
444 wait_queue_head_t *wq;
446 wq = dax_entry_waitqueue(mapping, index, entry, &key);
449 * Checking for locked entry and prepare_to_wait_exclusive() happens
450 * under mapping->tree_lock, ditto for entry handling in our callers.
451 * So at this point all tasks that could have seen our entry locked
452 * must be in the waitqueue and the following check will see them.
454 if (waitqueue_active(wq))
455 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
458 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
459 pgoff_t index, bool trunc)
463 struct radix_tree_root *page_tree = &mapping->page_tree;
465 spin_lock_irq(&mapping->tree_lock);
466 entry = get_unlocked_mapping_entry(mapping, index, NULL);
467 if (!entry || !radix_tree_exceptional_entry(entry))
470 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
471 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
473 radix_tree_delete(page_tree, index);
474 mapping->nrexceptional--;
477 put_unlocked_mapping_entry(mapping, index, entry);
478 spin_unlock_irq(&mapping->tree_lock);
482 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
483 * entry to get unlocked before deleting it.
485 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
487 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
490 * This gets called from truncate / punch_hole path. As such, the caller
491 * must hold locks protecting against concurrent modifications of the
492 * radix tree (usually fs-private i_mmap_sem for writing). Since the
493 * caller has seen exceptional entry for this index, we better find it
494 * at that index as well...
501 * Invalidate exceptional DAX entry if easily possible. This handles DAX
502 * entries for invalidate_inode_pages() so we evict the entry only if we can
503 * do so without blocking.
505 int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
509 struct radix_tree_root *page_tree = &mapping->page_tree;
511 spin_lock_irq(&mapping->tree_lock);
512 entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
513 if (!entry || !radix_tree_exceptional_entry(entry) ||
514 slot_locked(mapping, slot))
516 if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
517 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
519 radix_tree_delete(page_tree, index);
520 mapping->nrexceptional--;
523 spin_unlock_irq(&mapping->tree_lock);
525 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
530 * Invalidate exceptional DAX entry if it is clean.
532 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
535 return __dax_invalidate_mapping_entry(mapping, index, false);
539 * The user has performed a load from a hole in the file. Allocating
540 * a new page in the file would cause excessive storage usage for
541 * workloads with sparse files. We allocate a page cache page instead.
542 * We'll kick it out of the page cache if it's ever written to,
543 * otherwise it will simply fall out of the page cache under memory
544 * pressure without ever having been dirtied.
546 static int dax_load_hole(struct address_space *mapping, void **entry,
547 struct vm_fault *vmf)
552 /* Hole page already exists? Return it... */
553 if (!radix_tree_exceptional_entry(*entry)) {
558 /* This will replace locked radix tree entry with a hole page */
559 page = find_or_create_page(mapping, vmf->pgoff,
560 vmf->gfp_mask | __GFP_ZERO);
565 ret = finish_fault(vmf);
569 /* Grab reference for PTE that is now referencing the page */
571 return VM_FAULT_NOPAGE;
576 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
577 struct page *to, unsigned long vaddr)
579 struct blk_dax_ctl dax = {
585 if (dax_map_atomic(bdev, &dax) < 0)
586 return PTR_ERR(dax.addr);
587 vto = kmap_atomic(to);
588 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
590 dax_unmap_atomic(bdev, &dax);
595 * By this point grab_mapping_entry() has ensured that we have a locked entry
596 * of the appropriate size so we don't have to worry about downgrading PMDs to
597 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
598 * already in the tree, we will skip the insertion and just dirty the PMD as
601 static void *dax_insert_mapping_entry(struct address_space *mapping,
602 struct vm_fault *vmf,
603 void *entry, sector_t sector,
606 struct radix_tree_root *page_tree = &mapping->page_tree;
608 bool hole_fill = false;
610 pgoff_t index = vmf->pgoff;
612 if (vmf->flags & FAULT_FLAG_WRITE)
613 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
615 /* Replacing hole page with block mapping? */
616 if (!radix_tree_exceptional_entry(entry)) {
619 * Unmap the page now before we remove it from page cache below.
620 * The page is locked so it cannot be faulted in again.
622 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
624 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
626 return ERR_PTR(error);
627 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
628 /* replacing huge zero page with PMD block mapping */
629 unmap_mapping_range(mapping,
630 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
633 spin_lock_irq(&mapping->tree_lock);
634 new_entry = dax_radix_locked_entry(sector, flags);
637 __delete_from_page_cache(entry, NULL);
638 /* Drop pagecache reference */
640 error = __radix_tree_insert(page_tree, index,
641 dax_radix_order(new_entry), new_entry);
643 new_entry = ERR_PTR(error);
646 mapping->nrexceptional++;
647 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
649 * Only swap our new entry into the radix tree if the current
650 * entry is a zero page or an empty entry. If a normal PTE or
651 * PMD entry is already in the tree, we leave it alone. This
652 * means that if we are trying to insert a PTE and the
653 * existing entry is a PMD, we will just leave the PMD in the
654 * tree and dirty it if necessary.
656 struct radix_tree_node *node;
660 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
661 WARN_ON_ONCE(ret != entry);
662 __radix_tree_replace(page_tree, node, slot,
663 new_entry, NULL, NULL);
665 if (vmf->flags & FAULT_FLAG_WRITE)
666 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
668 spin_unlock_irq(&mapping->tree_lock);
670 radix_tree_preload_end();
672 * We don't need hole page anymore, it has been replaced with
673 * locked radix tree entry now.
675 if (mapping->a_ops->freepage)
676 mapping->a_ops->freepage(entry);
683 static inline unsigned long
684 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
686 unsigned long address;
688 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
689 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
693 /* Walk all mappings of a given index of a file and writeprotect them */
694 static void dax_mapping_entry_mkclean(struct address_space *mapping,
695 pgoff_t index, unsigned long pfn)
697 struct vm_area_struct *vma;
698 pte_t pte, *ptep = NULL;
703 i_mmap_lock_read(mapping);
704 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
705 unsigned long address;
709 if (!(vma->vm_flags & VM_SHARED))
712 address = pgoff_address(index, vma);
714 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
718 #ifdef CONFIG_FS_DAX_PMD
721 if (pfn != pmd_pfn(*pmdp))
723 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
726 flush_cache_page(vma, address, pfn);
727 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
728 pmd = pmd_wrprotect(pmd);
729 pmd = pmd_mkclean(pmd);
730 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
736 if (pfn != pte_pfn(*ptep))
738 if (!pte_dirty(*ptep) && !pte_write(*ptep))
741 flush_cache_page(vma, address, pfn);
742 pte = ptep_clear_flush(vma, address, ptep);
743 pte = pte_wrprotect(pte);
744 pte = pte_mkclean(pte);
745 set_pte_at(vma->vm_mm, address, ptep, pte);
748 pte_unmap_unlock(ptep, ptl);
752 mmu_notifier_invalidate_page(vma->vm_mm, address);
754 i_mmap_unlock_read(mapping);
757 static int dax_writeback_one(struct block_device *bdev,
758 struct address_space *mapping, pgoff_t index, void *entry)
760 struct radix_tree_root *page_tree = &mapping->page_tree;
761 struct blk_dax_ctl dax;
762 void *entry2, **slot;
766 * A page got tagged dirty in DAX mapping? Something is seriously
769 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
772 spin_lock_irq(&mapping->tree_lock);
773 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
774 /* Entry got punched out / reallocated? */
775 if (!entry2 || !radix_tree_exceptional_entry(entry2))
778 * Entry got reallocated elsewhere? No need to writeback. We have to
779 * compare sectors as we must not bail out due to difference in lockbit
782 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
784 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
785 dax_is_zero_entry(entry))) {
790 /* Another fsync thread may have already written back this entry */
791 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
793 /* Lock the entry to serialize with page faults */
794 entry = lock_slot(mapping, slot);
796 * We can clear the tag now but we have to be careful so that concurrent
797 * dax_writeback_one() calls for the same index cannot finish before we
798 * actually flush the caches. This is achieved as the calls will look
799 * at the entry only under tree_lock and once they do that they will
800 * see the entry locked and wait for it to unlock.
802 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
803 spin_unlock_irq(&mapping->tree_lock);
806 * Even if dax_writeback_mapping_range() was given a wbc->range_start
807 * in the middle of a PMD, the 'index' we are given will be aligned to
808 * the start index of the PMD, as will the sector we pull from
809 * 'entry'. This allows us to flush for PMD_SIZE and not have to
810 * worry about partial PMD writebacks.
812 dax.sector = dax_radix_sector(entry);
813 dax.size = PAGE_SIZE << dax_radix_order(entry);
816 * We cannot hold tree_lock while calling dax_map_atomic() because it
817 * eventually calls cond_resched().
819 ret = dax_map_atomic(bdev, &dax);
821 put_locked_mapping_entry(mapping, index, entry);
825 if (WARN_ON_ONCE(ret < dax.size)) {
830 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
831 wb_cache_pmem(dax.addr, dax.size);
833 * After we have flushed the cache, we can clear the dirty tag. There
834 * cannot be new dirty data in the pfn after the flush has completed as
835 * the pfn mappings are writeprotected and fault waits for mapping
838 spin_lock_irq(&mapping->tree_lock);
839 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
840 spin_unlock_irq(&mapping->tree_lock);
842 dax_unmap_atomic(bdev, &dax);
843 put_locked_mapping_entry(mapping, index, entry);
847 put_unlocked_mapping_entry(mapping, index, entry2);
848 spin_unlock_irq(&mapping->tree_lock);
853 * Flush the mapping to the persistent domain within the byte range of [start,
854 * end]. This is required by data integrity operations to ensure file data is
855 * on persistent storage prior to completion of the operation.
857 int dax_writeback_mapping_range(struct address_space *mapping,
858 struct block_device *bdev, struct writeback_control *wbc)
860 struct inode *inode = mapping->host;
861 pgoff_t start_index, end_index;
862 pgoff_t indices[PAGEVEC_SIZE];
867 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
870 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
873 start_index = wbc->range_start >> PAGE_SHIFT;
874 end_index = wbc->range_end >> PAGE_SHIFT;
876 tag_pages_for_writeback(mapping, start_index, end_index);
878 pagevec_init(&pvec, 0);
880 pvec.nr = find_get_entries_tag(mapping, start_index,
881 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
882 pvec.pages, indices);
887 for (i = 0; i < pvec.nr; i++) {
888 if (indices[i] > end_index) {
893 ret = dax_writeback_one(bdev, mapping, indices[i],
901 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
903 static int dax_insert_mapping(struct address_space *mapping,
904 struct block_device *bdev, sector_t sector, size_t size,
905 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
907 unsigned long vaddr = vmf->address;
908 struct blk_dax_ctl dax = {
913 void *entry = *entryp;
915 if (dax_map_atomic(bdev, &dax) < 0)
916 return PTR_ERR(dax.addr);
917 dax_unmap_atomic(bdev, &dax);
919 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
924 return vm_insert_mixed(vma, vaddr, dax.pfn);
928 * dax_pfn_mkwrite - handle first write to DAX page
929 * @vmf: The description of the fault
931 int dax_pfn_mkwrite(struct vm_fault *vmf)
933 struct file *file = vmf->vma->vm_file;
934 struct address_space *mapping = file->f_mapping;
936 pgoff_t index = vmf->pgoff;
938 spin_lock_irq(&mapping->tree_lock);
939 entry = get_unlocked_mapping_entry(mapping, index, &slot);
940 if (!entry || !radix_tree_exceptional_entry(entry)) {
942 put_unlocked_mapping_entry(mapping, index, entry);
943 spin_unlock_irq(&mapping->tree_lock);
944 return VM_FAULT_NOPAGE;
946 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
947 entry = lock_slot(mapping, slot);
948 spin_unlock_irq(&mapping->tree_lock);
950 * If we race with somebody updating the PTE and finish_mkwrite_fault()
951 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
952 * the fault in either case.
954 finish_mkwrite_fault(vmf);
955 put_locked_mapping_entry(mapping, index, entry);
956 return VM_FAULT_NOPAGE;
958 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
960 static bool dax_range_is_aligned(struct block_device *bdev,
961 unsigned int offset, unsigned int length)
963 unsigned short sector_size = bdev_logical_block_size(bdev);
965 if (!IS_ALIGNED(offset, sector_size))
967 if (!IS_ALIGNED(length, sector_size))
973 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
974 unsigned int offset, unsigned int length)
976 struct blk_dax_ctl dax = {
981 if (dax_range_is_aligned(bdev, offset, length)) {
982 sector_t start_sector = dax.sector + (offset >> 9);
984 return blkdev_issue_zeroout(bdev, start_sector,
985 length >> 9, GFP_NOFS, true);
987 if (dax_map_atomic(bdev, &dax) < 0)
988 return PTR_ERR(dax.addr);
989 clear_pmem(dax.addr + offset, length);
990 dax_unmap_atomic(bdev, &dax);
994 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
996 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
998 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
1002 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1003 struct iomap *iomap)
1005 struct iov_iter *iter = data;
1006 loff_t end = pos + length, done = 0;
1009 if (iov_iter_rw(iter) == READ) {
1010 end = min(end, i_size_read(inode));
1014 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1015 return iov_iter_zero(min(length, end - pos), iter);
1018 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1022 * Write can allocate block for an area which has a hole page mapped
1023 * into page tables. We have to tear down these mappings so that data
1024 * written by write(2) is visible in mmap.
1026 if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1027 invalidate_inode_pages2_range(inode->i_mapping,
1029 (end - 1) >> PAGE_SHIFT);
1033 unsigned offset = pos & (PAGE_SIZE - 1);
1034 struct blk_dax_ctl dax = { 0 };
1037 if (fatal_signal_pending(current)) {
1042 dax.sector = dax_iomap_sector(iomap, pos);
1043 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1044 map_len = dax_map_atomic(iomap->bdev, &dax);
1052 if (map_len > end - pos)
1053 map_len = end - pos;
1055 if (iov_iter_rw(iter) == WRITE)
1056 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1058 map_len = copy_to_iter(dax.addr, map_len, iter);
1059 dax_unmap_atomic(iomap->bdev, &dax);
1061 ret = map_len ? map_len : -EFAULT;
1070 return done ? done : ret;
1074 * dax_iomap_rw - Perform I/O to a DAX file
1075 * @iocb: The control block for this I/O
1076 * @iter: The addresses to do I/O from or to
1077 * @ops: iomap ops passed from the file system
1079 * This function performs read and write operations to directly mapped
1080 * persistent memory. The callers needs to take care of read/write exclusion
1081 * and evicting any page cache pages in the region under I/O.
1084 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1085 const struct iomap_ops *ops)
1087 struct address_space *mapping = iocb->ki_filp->f_mapping;
1088 struct inode *inode = mapping->host;
1089 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1092 if (iov_iter_rw(iter) == WRITE) {
1093 lockdep_assert_held_exclusive(&inode->i_rwsem);
1094 flags |= IOMAP_WRITE;
1096 lockdep_assert_held(&inode->i_rwsem);
1099 while (iov_iter_count(iter)) {
1100 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1101 iter, dax_iomap_actor);
1108 iocb->ki_pos += done;
1109 return done ? done : ret;
1111 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1113 static int dax_fault_return(int error)
1116 return VM_FAULT_NOPAGE;
1117 if (error == -ENOMEM)
1118 return VM_FAULT_OOM;
1119 return VM_FAULT_SIGBUS;
1122 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1123 const struct iomap_ops *ops)
1125 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1126 struct inode *inode = mapping->host;
1127 unsigned long vaddr = vmf->address;
1128 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1130 struct iomap iomap = { 0 };
1131 unsigned flags = IOMAP_FAULT;
1132 int error, major = 0;
1137 * Check whether offset isn't beyond end of file now. Caller is supposed
1138 * to hold locks serializing us with truncate / punch hole so this is
1141 if (pos >= i_size_read(inode))
1142 return VM_FAULT_SIGBUS;
1144 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1145 flags |= IOMAP_WRITE;
1148 * Note that we don't bother to use iomap_apply here: DAX required
1149 * the file system block size to be equal the page size, which means
1150 * that we never have to deal with more than a single extent here.
1152 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1154 return dax_fault_return(error);
1155 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1156 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1160 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1161 if (IS_ERR(entry)) {
1162 vmf_ret = dax_fault_return(PTR_ERR(entry));
1166 sector = dax_iomap_sector(&iomap, pos);
1168 if (vmf->cow_page) {
1169 switch (iomap.type) {
1171 case IOMAP_UNWRITTEN:
1172 clear_user_highpage(vmf->cow_page, vaddr);
1175 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1176 vmf->cow_page, vaddr);
1185 goto error_unlock_entry;
1187 __SetPageUptodate(vmf->cow_page);
1188 vmf_ret = finish_fault(vmf);
1190 vmf_ret = VM_FAULT_DONE_COW;
1194 switch (iomap.type) {
1196 if (iomap.flags & IOMAP_F_NEW) {
1197 count_vm_event(PGMAJFAULT);
1198 mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1199 major = VM_FAULT_MAJOR;
1201 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1202 PAGE_SIZE, &entry, vmf->vma, vmf);
1203 /* -EBUSY is fine, somebody else faulted on the same PTE */
1204 if (error == -EBUSY)
1207 case IOMAP_UNWRITTEN:
1209 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1210 vmf_ret = dax_load_hole(mapping, &entry, vmf);
1221 vmf_ret = dax_fault_return(error) | major;
1223 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1225 if (ops->iomap_end) {
1226 int copied = PAGE_SIZE;
1228 if (vmf_ret & VM_FAULT_ERROR)
1231 * The fault is done by now and there's no way back (other
1232 * thread may be already happily using PTE we have installed).
1233 * Just ignore error from ->iomap_end since we cannot do much
1236 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1241 #ifdef CONFIG_FS_DAX_PMD
1243 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1244 * more often than one might expect in the below functions.
1246 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1248 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1249 loff_t pos, void **entryp)
1251 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1252 struct block_device *bdev = iomap->bdev;
1253 struct inode *inode = mapping->host;
1254 struct blk_dax_ctl dax = {
1255 .sector = dax_iomap_sector(iomap, pos),
1258 long length = dax_map_atomic(bdev, &dax);
1261 if (length < 0) /* dax_map_atomic() failed */
1263 if (length < PMD_SIZE)
1264 goto unmap_fallback;
1265 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1266 goto unmap_fallback;
1267 if (!pfn_t_devmap(dax.pfn))
1268 goto unmap_fallback;
1270 dax_unmap_atomic(bdev, &dax);
1272 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1278 trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
1279 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1280 dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
1283 dax_unmap_atomic(bdev, &dax);
1285 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
1287 return VM_FAULT_FALLBACK;
1290 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1293 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1294 unsigned long pmd_addr = vmf->address & PMD_MASK;
1295 struct inode *inode = mapping->host;
1296 struct page *zero_page;
1301 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1303 if (unlikely(!zero_page))
1306 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1307 RADIX_DAX_PMD | RADIX_DAX_HZP);
1312 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1313 if (!pmd_none(*(vmf->pmd))) {
1318 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1319 pmd_entry = pmd_mkhuge(pmd_entry);
1320 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1322 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1323 return VM_FAULT_NOPAGE;
1326 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1327 return VM_FAULT_FALLBACK;
1330 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1331 const struct iomap_ops *ops)
1333 struct vm_area_struct *vma = vmf->vma;
1334 struct address_space *mapping = vma->vm_file->f_mapping;
1335 unsigned long pmd_addr = vmf->address & PMD_MASK;
1336 bool write = vmf->flags & FAULT_FLAG_WRITE;
1337 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1338 struct inode *inode = mapping->host;
1339 int result = VM_FAULT_FALLBACK;
1340 struct iomap iomap = { 0 };
1341 pgoff_t max_pgoff, pgoff;
1347 * Check whether offset isn't beyond end of file now. Caller is
1348 * supposed to hold locks serializing us with truncate / punch hole so
1349 * this is a reliable test.
1351 pgoff = linear_page_index(vma, pmd_addr);
1352 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1354 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1356 /* Fall back to PTEs if we're going to COW */
1357 if (write && !(vma->vm_flags & VM_SHARED))
1360 /* If the PMD would extend outside the VMA */
1361 if (pmd_addr < vma->vm_start)
1363 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1366 if (pgoff > max_pgoff) {
1367 result = VM_FAULT_SIGBUS;
1371 /* If the PMD would extend beyond the file size */
1372 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1376 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1377 * setting up a mapping, so really we're using iomap_begin() as a way
1378 * to look up our filesystem block.
1380 pos = (loff_t)pgoff << PAGE_SHIFT;
1381 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1385 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1389 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1390 * PMD or a HZP entry. If it can't (because a 4k page is already in
1391 * the tree, for instance), it will return -EEXIST and we just fall
1392 * back to 4k entries.
1394 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1398 switch (iomap.type) {
1400 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1402 case IOMAP_UNWRITTEN:
1404 if (WARN_ON_ONCE(write))
1406 result = dax_pmd_load_hole(vmf, &iomap, &entry);
1414 put_locked_mapping_entry(mapping, pgoff, entry);
1416 if (ops->iomap_end) {
1417 int copied = PMD_SIZE;
1419 if (result == VM_FAULT_FALLBACK)
1422 * The fault is done by now and there's no way back (other
1423 * thread may be already happily using PMD we have installed).
1424 * Just ignore error from ->iomap_end since we cannot do much
1427 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1431 if (result == VM_FAULT_FALLBACK) {
1432 split_huge_pmd(vma, vmf->pmd, vmf->address);
1433 count_vm_event(THP_FAULT_FALLBACK);
1436 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1440 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1441 const struct iomap_ops *ops)
1443 return VM_FAULT_FALLBACK;
1445 #endif /* CONFIG_FS_DAX_PMD */
1448 * dax_iomap_fault - handle a page fault on a DAX file
1449 * @vmf: The description of the fault
1450 * @ops: iomap ops passed from the file system
1452 * When a page fault occurs, filesystems may call this helper in
1453 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1454 * has done all the necessary locking for page fault to proceed
1457 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1458 const struct iomap_ops *ops)
1462 return dax_iomap_pte_fault(vmf, ops);
1464 return dax_iomap_pmd_fault(vmf, ops);
1466 return VM_FAULT_FALLBACK;
1469 EXPORT_SYMBOL_GPL(dax_iomap_fault);