2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 /* The 'colour' (ie low bits) within a PMD of a page offset. */
46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
51 static int __init init_dax_wait_table(void)
55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56 init_waitqueue_head(wait_table + i);
59 fs_initcall(init_dax_wait_table);
62 * We use lowest available bit in exceptional entry for locking, one bit for
63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
64 * an empty entry that is just used for locking. In total four special bits.
66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
76 static unsigned long dax_radix_pfn(void *entry)
78 return (unsigned long)entry >> RADIX_DAX_SHIFT;
81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
87 static unsigned int dax_radix_order(void *entry)
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
94 static int dax_is_pmd_entry(void *entry)
96 return (unsigned long)entry & RADIX_DAX_PMD;
99 static int dax_is_pte_entry(void *entry)
101 return !((unsigned long)entry & RADIX_DAX_PMD);
104 static int dax_is_zero_entry(void *entry)
106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
109 static int dax_is_empty_entry(void *entry)
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
115 * DAX radix tree locking
117 struct exceptional_entry_key {
118 struct address_space *mapping;
122 struct wait_exceptional_entry_queue {
123 wait_queue_entry_t wait;
124 struct exceptional_entry_key key;
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
133 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 * queue to the start of that PMD. This ensures that all offsets in
135 * the range covered by the PMD map to the same bit lock.
137 if (dax_is_pmd_entry(entry))
138 index &= ~PG_PMD_COLOUR;
140 key->mapping = mapping;
141 key->entry_start = index;
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 int sync, void *keyp)
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
154 if (key->mapping != ewait->key.mapping ||
155 key->entry_start != ewait->key.entry_start)
157 return autoremove_wake_function(wait, mode, sync, NULL);
161 * We do not necessarily hold the mapping->tree_lock when we call this
162 * function so it is possible that 'entry' is no longer a valid item in the
163 * radix tree. This is okay because all we really need to do is to find the
164 * correct waitqueue where tasks might be waiting for that old 'entry' and
167 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
168 pgoff_t index, void *entry, bool wake_all)
170 struct exceptional_entry_key key;
171 wait_queue_head_t *wq;
173 wq = dax_entry_waitqueue(mapping, index, entry, &key);
176 * Checking for locked entry and prepare_to_wait_exclusive() happens
177 * under mapping->tree_lock, ditto for entry handling in our callers.
178 * So at this point all tasks that could have seen our entry locked
179 * must be in the waitqueue and the following check will see them.
181 if (waitqueue_active(wq))
182 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
186 * Check whether the given slot is locked. The function must be called with
187 * mapping->tree_lock held
189 static inline int slot_locked(struct address_space *mapping, void **slot)
191 unsigned long entry = (unsigned long)
192 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
193 return entry & RADIX_DAX_ENTRY_LOCK;
197 * Mark the given slot is locked. The function must be called with
198 * mapping->tree_lock held
200 static inline void *lock_slot(struct address_space *mapping, void **slot)
202 unsigned long entry = (unsigned long)
203 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
205 entry |= RADIX_DAX_ENTRY_LOCK;
206 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
207 return (void *)entry;
211 * Mark the given slot is unlocked. The function must be called with
212 * mapping->tree_lock held
214 static inline void *unlock_slot(struct address_space *mapping, void **slot)
216 unsigned long entry = (unsigned long)
217 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
219 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
220 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
221 return (void *)entry;
225 * Lookup entry in radix tree, wait for it to become unlocked if it is
226 * exceptional entry and return it. The caller must call
227 * put_unlocked_mapping_entry() when he decided not to lock the entry or
228 * put_locked_mapping_entry() when he locked the entry and now wants to
231 * The function must be called with mapping->tree_lock held.
233 static void *get_unlocked_mapping_entry(struct address_space *mapping,
234 pgoff_t index, void ***slotp)
237 struct wait_exceptional_entry_queue ewait;
238 wait_queue_head_t *wq;
240 init_wait(&ewait.wait);
241 ewait.wait.func = wake_exceptional_entry_func;
244 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
247 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
248 !slot_locked(mapping, slot)) {
254 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
255 prepare_to_wait_exclusive(wq, &ewait.wait,
256 TASK_UNINTERRUPTIBLE);
257 spin_unlock_irq(&mapping->tree_lock);
259 finish_wait(wq, &ewait.wait);
260 spin_lock_irq(&mapping->tree_lock);
264 static void dax_unlock_mapping_entry(struct address_space *mapping,
269 spin_lock_irq(&mapping->tree_lock);
270 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
271 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
272 !slot_locked(mapping, slot))) {
273 spin_unlock_irq(&mapping->tree_lock);
276 unlock_slot(mapping, slot);
277 spin_unlock_irq(&mapping->tree_lock);
278 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
281 static void put_locked_mapping_entry(struct address_space *mapping,
284 dax_unlock_mapping_entry(mapping, index);
288 * Called when we are done with radix tree entry we looked up via
289 * get_unlocked_mapping_entry() and which we didn't lock in the end.
291 static void put_unlocked_mapping_entry(struct address_space *mapping,
292 pgoff_t index, void *entry)
297 /* We have to wake up next waiter for the radix tree entry lock */
298 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
301 static unsigned long dax_entry_size(void *entry)
303 if (dax_is_zero_entry(entry))
305 else if (dax_is_empty_entry(entry))
307 else if (dax_is_pmd_entry(entry))
313 static unsigned long dax_radix_end_pfn(void *entry)
315 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
319 * Iterate through all mapped pfns represented by an entry, i.e. skip
320 * 'empty' and 'zero' entries.
322 #define for_each_mapped_pfn(entry, pfn) \
323 for (pfn = dax_radix_pfn(entry); \
324 pfn < dax_radix_end_pfn(entry); pfn++)
326 static void dax_associate_entry(void *entry, struct address_space *mapping)
330 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
333 for_each_mapped_pfn(entry, pfn) {
334 struct page *page = pfn_to_page(pfn);
336 WARN_ON_ONCE(page->mapping);
337 page->mapping = mapping;
341 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
346 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
349 for_each_mapped_pfn(entry, pfn) {
350 struct page *page = pfn_to_page(pfn);
352 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
353 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
354 page->mapping = NULL;
359 * Find radix tree entry at given index. If it points to an exceptional entry,
360 * return it with the radix tree entry locked. If the radix tree doesn't
361 * contain given index, create an empty exceptional entry for the index and
362 * return with it locked.
364 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
365 * either return that locked entry or will return an error. This error will
366 * happen if there are any 4k entries within the 2MiB range that we are
369 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
370 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
371 * insertion will fail if it finds any 4k entries already in the tree, and a
372 * 4k insertion will cause an existing 2MiB entry to be unmapped and
373 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
374 * well as 2MiB empty entries.
376 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
377 * real storage backing them. We will leave these real 2MiB DAX entries in
378 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
380 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
381 * persistent memory the benefit is doubtful. We can add that later if we can
384 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
385 unsigned long size_flag)
387 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
391 spin_lock_irq(&mapping->tree_lock);
392 entry = get_unlocked_mapping_entry(mapping, index, &slot);
394 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
395 entry = ERR_PTR(-EIO);
400 if (size_flag & RADIX_DAX_PMD) {
401 if (dax_is_pte_entry(entry)) {
402 put_unlocked_mapping_entry(mapping, index,
404 entry = ERR_PTR(-EEXIST);
407 } else { /* trying to grab a PTE entry */
408 if (dax_is_pmd_entry(entry) &&
409 (dax_is_zero_entry(entry) ||
410 dax_is_empty_entry(entry))) {
411 pmd_downgrade = true;
416 /* No entry for given index? Make sure radix tree is big enough. */
417 if (!entry || pmd_downgrade) {
422 * Make sure 'entry' remains valid while we drop
423 * mapping->tree_lock.
425 entry = lock_slot(mapping, slot);
428 spin_unlock_irq(&mapping->tree_lock);
430 * Besides huge zero pages the only other thing that gets
431 * downgraded are empty entries which don't need to be
434 if (pmd_downgrade && dax_is_zero_entry(entry))
435 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
438 err = radix_tree_preload(
439 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
442 put_locked_mapping_entry(mapping, index);
445 spin_lock_irq(&mapping->tree_lock);
449 * We needed to drop the page_tree lock while calling
450 * radix_tree_preload() and we didn't have an entry to
451 * lock. See if another thread inserted an entry at
452 * our index during this time.
454 entry = __radix_tree_lookup(&mapping->page_tree, index,
457 radix_tree_preload_end();
458 spin_unlock_irq(&mapping->tree_lock);
464 dax_disassociate_entry(entry, mapping, false);
465 radix_tree_delete(&mapping->page_tree, index);
466 mapping->nrexceptional--;
467 dax_wake_mapping_entry_waiter(mapping, index, entry,
471 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
473 err = __radix_tree_insert(&mapping->page_tree, index,
474 dax_radix_order(entry), entry);
475 radix_tree_preload_end();
477 spin_unlock_irq(&mapping->tree_lock);
479 * Our insertion of a DAX entry failed, most likely
480 * because we were inserting a PMD entry and it
481 * collided with a PTE sized entry at a different
482 * index in the PMD range. We haven't inserted
483 * anything into the radix tree and have no waiters to
488 /* Good, we have inserted empty locked entry into the tree. */
489 mapping->nrexceptional++;
490 spin_unlock_irq(&mapping->tree_lock);
493 entry = lock_slot(mapping, slot);
495 spin_unlock_irq(&mapping->tree_lock);
499 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
500 pgoff_t index, bool trunc)
504 struct radix_tree_root *page_tree = &mapping->page_tree;
506 spin_lock_irq(&mapping->tree_lock);
507 entry = get_unlocked_mapping_entry(mapping, index, NULL);
508 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
511 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
512 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
514 dax_disassociate_entry(entry, mapping, trunc);
515 radix_tree_delete(page_tree, index);
516 mapping->nrexceptional--;
519 put_unlocked_mapping_entry(mapping, index, entry);
520 spin_unlock_irq(&mapping->tree_lock);
524 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
525 * entry to get unlocked before deleting it.
527 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
529 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
532 * This gets called from truncate / punch_hole path. As such, the caller
533 * must hold locks protecting against concurrent modifications of the
534 * radix tree (usually fs-private i_mmap_sem for writing). Since the
535 * caller has seen exceptional entry for this index, we better find it
536 * at that index as well...
543 * Invalidate exceptional DAX entry if it is clean.
545 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
548 return __dax_invalidate_mapping_entry(mapping, index, false);
551 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
552 sector_t sector, size_t size, struct page *to,
561 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
565 id = dax_read_lock();
566 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
571 vto = kmap_atomic(to);
572 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
579 * By this point grab_mapping_entry() has ensured that we have a locked entry
580 * of the appropriate size so we don't have to worry about downgrading PMDs to
581 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
582 * already in the tree, we will skip the insertion and just dirty the PMD as
585 static void *dax_insert_mapping_entry(struct address_space *mapping,
586 struct vm_fault *vmf,
587 void *entry, pfn_t pfn_t,
588 unsigned long flags, bool dirty)
590 struct radix_tree_root *page_tree = &mapping->page_tree;
591 unsigned long pfn = pfn_t_to_pfn(pfn_t);
592 pgoff_t index = vmf->pgoff;
596 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
598 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
599 /* we are replacing a zero page with block mapping */
600 if (dax_is_pmd_entry(entry))
601 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
604 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
607 spin_lock_irq(&mapping->tree_lock);
608 new_entry = dax_radix_locked_entry(pfn, flags);
609 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
610 dax_disassociate_entry(entry, mapping, false);
611 dax_associate_entry(new_entry, mapping);
614 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
616 * Only swap our new entry into the radix tree if the current
617 * entry is a zero page or an empty entry. If a normal PTE or
618 * PMD entry is already in the tree, we leave it alone. This
619 * means that if we are trying to insert a PTE and the
620 * existing entry is a PMD, we will just leave the PMD in the
621 * tree and dirty it if necessary.
623 struct radix_tree_node *node;
627 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
628 WARN_ON_ONCE(ret != entry);
629 __radix_tree_replace(page_tree, node, slot,
635 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
637 spin_unlock_irq(&mapping->tree_lock);
641 static inline unsigned long
642 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
644 unsigned long address;
646 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
647 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
651 /* Walk all mappings of a given index of a file and writeprotect them */
652 static void dax_mapping_entry_mkclean(struct address_space *mapping,
653 pgoff_t index, unsigned long pfn)
655 struct vm_area_struct *vma;
656 pte_t pte, *ptep = NULL;
660 i_mmap_lock_read(mapping);
661 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
662 unsigned long address, start, end;
666 if (!(vma->vm_flags & VM_SHARED))
669 address = pgoff_address(index, vma);
672 * Note because we provide start/end to follow_pte_pmd it will
673 * call mmu_notifier_invalidate_range_start() on our behalf
674 * before taking any lock.
676 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
680 * No need to call mmu_notifier_invalidate_range() as we are
681 * downgrading page table protection not changing it to point
684 * See Documentation/vm/mmu_notifier.txt
687 #ifdef CONFIG_FS_DAX_PMD
690 if (pfn != pmd_pfn(*pmdp))
692 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
695 flush_cache_page(vma, address, pfn);
696 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
697 pmd = pmd_wrprotect(pmd);
698 pmd = pmd_mkclean(pmd);
699 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
704 if (pfn != pte_pfn(*ptep))
706 if (!pte_dirty(*ptep) && !pte_write(*ptep))
709 flush_cache_page(vma, address, pfn);
710 pte = ptep_clear_flush(vma, address, ptep);
711 pte = pte_wrprotect(pte);
712 pte = pte_mkclean(pte);
713 set_pte_at(vma->vm_mm, address, ptep, pte);
715 pte_unmap_unlock(ptep, ptl);
718 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
720 i_mmap_unlock_read(mapping);
723 static int dax_writeback_one(struct dax_device *dax_dev,
724 struct address_space *mapping, pgoff_t index, void *entry)
726 struct radix_tree_root *page_tree = &mapping->page_tree;
727 void *entry2, **slot;
733 * A page got tagged dirty in DAX mapping? Something is seriously
736 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
739 spin_lock_irq(&mapping->tree_lock);
740 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
741 /* Entry got punched out / reallocated? */
742 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
745 * Entry got reallocated elsewhere? No need to writeback. We have to
746 * compare pfns as we must not bail out due to difference in lockbit
749 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
751 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
752 dax_is_zero_entry(entry))) {
757 /* Another fsync thread may have already written back this entry */
758 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
760 /* Lock the entry to serialize with page faults */
761 entry = lock_slot(mapping, slot);
763 * We can clear the tag now but we have to be careful so that concurrent
764 * dax_writeback_one() calls for the same index cannot finish before we
765 * actually flush the caches. This is achieved as the calls will look
766 * at the entry only under tree_lock and once they do that they will
767 * see the entry locked and wait for it to unlock.
769 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
770 spin_unlock_irq(&mapping->tree_lock);
773 * Even if dax_writeback_mapping_range() was given a wbc->range_start
774 * in the middle of a PMD, the 'index' we are given will be aligned to
775 * the start index of the PMD, as will the pfn we pull from 'entry'.
776 * This allows us to flush for PMD_SIZE and not have to worry about
777 * partial PMD writebacks.
779 pfn = dax_radix_pfn(entry);
780 size = PAGE_SIZE << dax_radix_order(entry);
782 dax_mapping_entry_mkclean(mapping, index, pfn);
783 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
785 * After we have flushed the cache, we can clear the dirty tag. There
786 * cannot be new dirty data in the pfn after the flush has completed as
787 * the pfn mappings are writeprotected and fault waits for mapping
790 spin_lock_irq(&mapping->tree_lock);
791 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
792 spin_unlock_irq(&mapping->tree_lock);
793 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
794 put_locked_mapping_entry(mapping, index);
798 put_unlocked_mapping_entry(mapping, index, entry2);
799 spin_unlock_irq(&mapping->tree_lock);
804 * Flush the mapping to the persistent domain within the byte range of [start,
805 * end]. This is required by data integrity operations to ensure file data is
806 * on persistent storage prior to completion of the operation.
808 int dax_writeback_mapping_range(struct address_space *mapping,
809 struct block_device *bdev, struct writeback_control *wbc)
811 struct inode *inode = mapping->host;
812 pgoff_t start_index, end_index;
813 pgoff_t indices[PAGEVEC_SIZE];
814 struct dax_device *dax_dev;
819 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
822 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
825 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
829 start_index = wbc->range_start >> PAGE_SHIFT;
830 end_index = wbc->range_end >> PAGE_SHIFT;
832 trace_dax_writeback_range(inode, start_index, end_index);
834 tag_pages_for_writeback(mapping, start_index, end_index);
838 pvec.nr = find_get_entries_tag(mapping, start_index,
839 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
840 pvec.pages, indices);
845 for (i = 0; i < pvec.nr; i++) {
846 if (indices[i] > end_index) {
851 ret = dax_writeback_one(dax_dev, mapping, indices[i],
854 mapping_set_error(mapping, ret);
858 start_index = indices[pvec.nr - 1] + 1;
862 trace_dax_writeback_range_done(inode, start_index, end_index);
863 return (ret < 0 ? ret : 0);
865 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
867 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
869 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
872 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
875 const sector_t sector = dax_iomap_sector(iomap, pos);
881 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
884 id = dax_read_lock();
885 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
892 if (PFN_PHYS(length) < size)
894 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
896 /* For larger pages we need devmap */
897 if (length > 1 && !pfn_t_devmap(*pfnp))
906 * The user has performed a load from a hole in the file. Allocating a new
907 * page in the file would cause excessive storage usage for workloads with
908 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
909 * If this page is ever written to we will re-fault and change the mapping to
910 * point to real DAX storage instead.
912 static int dax_load_hole(struct address_space *mapping, void *entry,
913 struct vm_fault *vmf)
915 struct inode *inode = mapping->host;
916 unsigned long vaddr = vmf->address;
917 int ret = VM_FAULT_NOPAGE;
918 struct page *zero_page;
922 zero_page = ZERO_PAGE(0);
923 if (unlikely(!zero_page)) {
928 pfn = page_to_pfn_t(zero_page);
929 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
930 RADIX_DAX_ZERO_PAGE, false);
931 if (IS_ERR(entry2)) {
932 ret = VM_FAULT_SIGBUS;
936 vm_insert_mixed(vmf->vma, vaddr, pfn);
938 trace_dax_load_hole(inode, vmf, ret);
942 static bool dax_range_is_aligned(struct block_device *bdev,
943 unsigned int offset, unsigned int length)
945 unsigned short sector_size = bdev_logical_block_size(bdev);
947 if (!IS_ALIGNED(offset, sector_size))
949 if (!IS_ALIGNED(length, sector_size))
955 int __dax_zero_page_range(struct block_device *bdev,
956 struct dax_device *dax_dev, sector_t sector,
957 unsigned int offset, unsigned int size)
959 if (dax_range_is_aligned(bdev, offset, size)) {
960 sector_t start_sector = sector + (offset >> 9);
962 return blkdev_issue_zeroout(bdev, start_sector,
963 size >> 9, GFP_NOFS, 0);
970 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
974 id = dax_read_lock();
975 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
981 memset(kaddr + offset, 0, size);
982 dax_flush(dax_dev, kaddr + offset, size);
987 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
990 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
993 struct block_device *bdev = iomap->bdev;
994 struct dax_device *dax_dev = iomap->dax_dev;
995 struct iov_iter *iter = data;
996 loff_t end = pos + length, done = 0;
1000 if (iov_iter_rw(iter) == READ) {
1001 end = min(end, i_size_read(inode));
1005 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1006 return iov_iter_zero(min(length, end - pos), iter);
1009 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1013 * Write can allocate block for an area which has a hole page mapped
1014 * into page tables. We have to tear down these mappings so that data
1015 * written by write(2) is visible in mmap.
1017 if (iomap->flags & IOMAP_F_NEW) {
1018 invalidate_inode_pages2_range(inode->i_mapping,
1020 (end - 1) >> PAGE_SHIFT);
1023 id = dax_read_lock();
1025 unsigned offset = pos & (PAGE_SIZE - 1);
1026 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1027 const sector_t sector = dax_iomap_sector(iomap, pos);
1033 if (fatal_signal_pending(current)) {
1038 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1042 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1049 map_len = PFN_PHYS(map_len);
1052 if (map_len > end - pos)
1053 map_len = end - pos;
1056 * The userspace address for the memory copy has already been
1057 * validated via access_ok() in either vfs_read() or
1058 * vfs_write(), depending on which operation we are doing.
1060 if (iov_iter_rw(iter) == WRITE)
1061 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1064 map_len = copy_to_iter(kaddr, map_len, iter);
1066 ret = map_len ? map_len : -EFAULT;
1074 dax_read_unlock(id);
1076 return done ? done : ret;
1080 * dax_iomap_rw - Perform I/O to a DAX file
1081 * @iocb: The control block for this I/O
1082 * @iter: The addresses to do I/O from or to
1083 * @ops: iomap ops passed from the file system
1085 * This function performs read and write operations to directly mapped
1086 * persistent memory. The callers needs to take care of read/write exclusion
1087 * and evicting any page cache pages in the region under I/O.
1090 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1091 const struct iomap_ops *ops)
1093 struct address_space *mapping = iocb->ki_filp->f_mapping;
1094 struct inode *inode = mapping->host;
1095 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1098 if (iov_iter_rw(iter) == WRITE) {
1099 lockdep_assert_held_exclusive(&inode->i_rwsem);
1100 flags |= IOMAP_WRITE;
1102 lockdep_assert_held(&inode->i_rwsem);
1105 while (iov_iter_count(iter)) {
1106 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1107 iter, dax_iomap_actor);
1114 iocb->ki_pos += done;
1115 return done ? done : ret;
1117 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1119 static int dax_fault_return(int error)
1122 return VM_FAULT_NOPAGE;
1123 if (error == -ENOMEM)
1124 return VM_FAULT_OOM;
1125 return VM_FAULT_SIGBUS;
1129 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1130 * flushed on write-faults (non-cow), but not read-faults.
1132 static bool dax_fault_is_synchronous(unsigned long flags,
1133 struct vm_area_struct *vma, struct iomap *iomap)
1135 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1136 && (iomap->flags & IOMAP_F_DIRTY);
1139 static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1140 int *iomap_errp, const struct iomap_ops *ops)
1142 struct vm_area_struct *vma = vmf->vma;
1143 struct address_space *mapping = vma->vm_file->f_mapping;
1144 struct inode *inode = mapping->host;
1145 unsigned long vaddr = vmf->address;
1146 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1147 struct iomap iomap = { 0 };
1148 unsigned flags = IOMAP_FAULT;
1149 int error, major = 0;
1150 bool write = vmf->flags & FAULT_FLAG_WRITE;
1156 trace_dax_pte_fault(inode, vmf, vmf_ret);
1158 * Check whether offset isn't beyond end of file now. Caller is supposed
1159 * to hold locks serializing us with truncate / punch hole so this is
1162 if (pos >= i_size_read(inode)) {
1163 vmf_ret = VM_FAULT_SIGBUS;
1167 if (write && !vmf->cow_page)
1168 flags |= IOMAP_WRITE;
1170 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1171 if (IS_ERR(entry)) {
1172 vmf_ret = dax_fault_return(PTR_ERR(entry));
1177 * It is possible, particularly with mixed reads & writes to private
1178 * mappings, that we have raced with a PMD fault that overlaps with
1179 * the PTE we need to set up. If so just return and the fault will be
1182 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1183 vmf_ret = VM_FAULT_NOPAGE;
1188 * Note that we don't bother to use iomap_apply here: DAX required
1189 * the file system block size to be equal the page size, which means
1190 * that we never have to deal with more than a single extent here.
1192 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1194 *iomap_errp = error;
1196 vmf_ret = dax_fault_return(error);
1199 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1200 error = -EIO; /* fs corruption? */
1201 goto error_finish_iomap;
1204 if (vmf->cow_page) {
1205 sector_t sector = dax_iomap_sector(&iomap, pos);
1207 switch (iomap.type) {
1209 case IOMAP_UNWRITTEN:
1210 clear_user_highpage(vmf->cow_page, vaddr);
1213 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1214 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1223 goto error_finish_iomap;
1225 __SetPageUptodate(vmf->cow_page);
1226 vmf_ret = finish_fault(vmf);
1228 vmf_ret = VM_FAULT_DONE_COW;
1232 sync = dax_fault_is_synchronous(flags, vma, &iomap);
1234 switch (iomap.type) {
1236 if (iomap.flags & IOMAP_F_NEW) {
1237 count_vm_event(PGMAJFAULT);
1238 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1239 major = VM_FAULT_MAJOR;
1241 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1243 goto error_finish_iomap;
1245 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1247 if (IS_ERR(entry)) {
1248 error = PTR_ERR(entry);
1249 goto error_finish_iomap;
1253 * If we are doing synchronous page fault and inode needs fsync,
1254 * we can insert PTE into page tables only after that happens.
1255 * Skip insertion for now and return the pfn so that caller can
1256 * insert it after fsync is done.
1259 if (WARN_ON_ONCE(!pfnp)) {
1261 goto error_finish_iomap;
1264 vmf_ret = VM_FAULT_NEEDDSYNC | major;
1267 trace_dax_insert_mapping(inode, vmf, entry);
1269 error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
1271 error = vm_insert_mixed(vma, vaddr, pfn);
1273 /* -EBUSY is fine, somebody else faulted on the same PTE */
1274 if (error == -EBUSY)
1277 case IOMAP_UNWRITTEN:
1280 vmf_ret = dax_load_hole(mapping, entry, vmf);
1291 vmf_ret = dax_fault_return(error) | major;
1293 if (ops->iomap_end) {
1294 int copied = PAGE_SIZE;
1296 if (vmf_ret & VM_FAULT_ERROR)
1299 * The fault is done by now and there's no way back (other
1300 * thread may be already happily using PTE we have installed).
1301 * Just ignore error from ->iomap_end since we cannot do much
1304 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1307 put_locked_mapping_entry(mapping, vmf->pgoff);
1309 trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1313 #ifdef CONFIG_FS_DAX_PMD
1314 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1317 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1318 unsigned long pmd_addr = vmf->address & PMD_MASK;
1319 struct inode *inode = mapping->host;
1320 struct page *zero_page;
1326 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1328 if (unlikely(!zero_page))
1331 pfn = page_to_pfn_t(zero_page);
1332 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1333 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1337 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1338 if (!pmd_none(*(vmf->pmd))) {
1343 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1344 pmd_entry = pmd_mkhuge(pmd_entry);
1345 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1347 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1348 return VM_FAULT_NOPAGE;
1351 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1352 return VM_FAULT_FALLBACK;
1355 static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1356 const struct iomap_ops *ops)
1358 struct vm_area_struct *vma = vmf->vma;
1359 struct address_space *mapping = vma->vm_file->f_mapping;
1360 unsigned long pmd_addr = vmf->address & PMD_MASK;
1361 bool write = vmf->flags & FAULT_FLAG_WRITE;
1363 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1364 struct inode *inode = mapping->host;
1365 int result = VM_FAULT_FALLBACK;
1366 struct iomap iomap = { 0 };
1367 pgoff_t max_pgoff, pgoff;
1374 * Check whether offset isn't beyond end of file now. Caller is
1375 * supposed to hold locks serializing us with truncate / punch hole so
1376 * this is a reliable test.
1378 pgoff = linear_page_index(vma, pmd_addr);
1379 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1381 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1384 * Make sure that the faulting address's PMD offset (color) matches
1385 * the PMD offset from the start of the file. This is necessary so
1386 * that a PMD range in the page table overlaps exactly with a PMD
1387 * range in the radix tree.
1389 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1390 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1393 /* Fall back to PTEs if we're going to COW */
1394 if (write && !(vma->vm_flags & VM_SHARED))
1397 /* If the PMD would extend outside the VMA */
1398 if (pmd_addr < vma->vm_start)
1400 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1403 if (pgoff >= max_pgoff) {
1404 result = VM_FAULT_SIGBUS;
1408 /* If the PMD would extend beyond the file size */
1409 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1413 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1414 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1415 * is already in the tree, for instance), it will return -EEXIST and
1416 * we just fall back to 4k entries.
1418 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1423 * It is possible, particularly with mixed reads & writes to private
1424 * mappings, that we have raced with a PTE fault that overlaps with
1425 * the PMD we need to set up. If so just return and the fault will be
1428 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1429 !pmd_devmap(*vmf->pmd)) {
1435 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1436 * setting up a mapping, so really we're using iomap_begin() as a way
1437 * to look up our filesystem block.
1439 pos = (loff_t)pgoff << PAGE_SHIFT;
1440 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1444 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1447 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1449 switch (iomap.type) {
1451 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1455 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1456 RADIX_DAX_PMD, write && !sync);
1461 * If we are doing synchronous page fault and inode needs fsync,
1462 * we can insert PMD into page tables only after that happens.
1463 * Skip insertion for now and return the pfn so that caller can
1464 * insert it after fsync is done.
1467 if (WARN_ON_ONCE(!pfnp))
1470 result = VM_FAULT_NEEDDSYNC;
1474 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1475 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1478 case IOMAP_UNWRITTEN:
1480 if (WARN_ON_ONCE(write))
1482 result = dax_pmd_load_hole(vmf, &iomap, entry);
1490 if (ops->iomap_end) {
1491 int copied = PMD_SIZE;
1493 if (result == VM_FAULT_FALLBACK)
1496 * The fault is done by now and there's no way back (other
1497 * thread may be already happily using PMD we have installed).
1498 * Just ignore error from ->iomap_end since we cannot do much
1501 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1505 put_locked_mapping_entry(mapping, pgoff);
1507 if (result == VM_FAULT_FALLBACK) {
1508 split_huge_pmd(vma, vmf->pmd, vmf->address);
1509 count_vm_event(THP_FAULT_FALLBACK);
1512 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1516 static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1517 const struct iomap_ops *ops)
1519 return VM_FAULT_FALLBACK;
1521 #endif /* CONFIG_FS_DAX_PMD */
1524 * dax_iomap_fault - handle a page fault on a DAX file
1525 * @vmf: The description of the fault
1526 * @pe_size: Size of the page to fault in
1527 * @pfnp: PFN to insert for synchronous faults if fsync is required
1528 * @iomap_errp: Storage for detailed error code in case of error
1529 * @ops: Iomap ops passed from the file system
1531 * When a page fault occurs, filesystems may call this helper in
1532 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1533 * has done all the necessary locking for page fault to proceed
1536 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1537 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1541 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1543 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1545 return VM_FAULT_FALLBACK;
1548 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1551 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1552 * @vmf: The description of the fault
1553 * @pe_size: Size of entry to be inserted
1554 * @pfn: PFN to insert
1556 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1557 * DAX file. It takes care of marking corresponding radix tree entry as dirty
1560 static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1561 enum page_entry_size pe_size,
1564 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1565 void *entry, **slot;
1566 pgoff_t index = vmf->pgoff;
1569 spin_lock_irq(&mapping->tree_lock);
1570 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1571 /* Did we race with someone splitting entry or so? */
1573 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1574 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1575 put_unlocked_mapping_entry(mapping, index, entry);
1576 spin_unlock_irq(&mapping->tree_lock);
1577 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1579 return VM_FAULT_NOPAGE;
1581 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
1582 entry = lock_slot(mapping, slot);
1583 spin_unlock_irq(&mapping->tree_lock);
1586 error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1587 vmf_ret = dax_fault_return(error);
1589 #ifdef CONFIG_FS_DAX_PMD
1591 vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1596 vmf_ret = VM_FAULT_FALLBACK;
1598 put_locked_mapping_entry(mapping, index);
1599 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
1604 * dax_finish_sync_fault - finish synchronous page fault
1605 * @vmf: The description of the fault
1606 * @pe_size: Size of entry to be inserted
1607 * @pfn: PFN to insert
1609 * This function ensures that the file range touched by the page fault is
1610 * stored persistently on the media and handles inserting of appropriate page
1613 int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1617 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1620 if (pe_size == PE_SIZE_PTE)
1622 else if (pe_size == PE_SIZE_PMD)
1626 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1628 return VM_FAULT_SIGBUS;
1629 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1631 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);