2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/iomap.h>
37 /* We choose 4096 entries - same as per-zone page wait tables */
38 #define DAX_WAIT_TABLE_BITS 12
39 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
41 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
43 static int __init init_dax_wait_table(void)
47 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
48 init_waitqueue_head(wait_table + i);
51 fs_initcall(init_dax_wait_table);
53 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
55 struct request_queue *q = bdev->bd_queue;
58 dax->addr = ERR_PTR(-EIO);
59 if (blk_queue_enter(q, true) != 0)
62 rc = bdev_direct_access(bdev, dax);
64 dax->addr = ERR_PTR(rc);
71 static void dax_unmap_atomic(struct block_device *bdev,
72 const struct blk_dax_ctl *dax)
74 if (IS_ERR(dax->addr))
76 blk_queue_exit(bdev->bd_queue);
79 static int dax_is_pmd_entry(void *entry)
81 return (unsigned long)entry & RADIX_DAX_PMD;
84 static int dax_is_pte_entry(void *entry)
86 return !((unsigned long)entry & RADIX_DAX_PMD);
89 static int dax_is_zero_entry(void *entry)
91 return (unsigned long)entry & RADIX_DAX_HZP;
94 static int dax_is_empty_entry(void *entry)
96 return (unsigned long)entry & RADIX_DAX_EMPTY;
99 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
101 struct page *page = alloc_pages(GFP_KERNEL, 0);
102 struct blk_dax_ctl dax = {
104 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
109 return ERR_PTR(-ENOMEM);
111 rc = dax_map_atomic(bdev, &dax);
114 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
115 dax_unmap_atomic(bdev, &dax);
120 * DAX radix tree locking
122 struct exceptional_entry_key {
123 struct address_space *mapping;
127 struct wait_exceptional_entry_queue {
129 struct exceptional_entry_key key;
132 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
133 pgoff_t index, void *entry, struct exceptional_entry_key *key)
138 * If 'entry' is a PMD, align the 'index' that we use for the wait
139 * queue to the start of that PMD. This ensures that all offsets in
140 * the range covered by the PMD map to the same bit lock.
142 if (dax_is_pmd_entry(entry))
143 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
145 key->mapping = mapping;
146 key->entry_start = index;
148 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
149 return wait_table + hash;
152 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
153 int sync, void *keyp)
155 struct exceptional_entry_key *key = keyp;
156 struct wait_exceptional_entry_queue *ewait =
157 container_of(wait, struct wait_exceptional_entry_queue, wait);
159 if (key->mapping != ewait->key.mapping ||
160 key->entry_start != ewait->key.entry_start)
162 return autoremove_wake_function(wait, mode, sync, NULL);
166 * Check whether the given slot is locked. The function must be called with
167 * mapping->tree_lock held
169 static inline int slot_locked(struct address_space *mapping, void **slot)
171 unsigned long entry = (unsigned long)
172 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
173 return entry & RADIX_DAX_ENTRY_LOCK;
177 * Mark the given slot is locked. The function must be called with
178 * mapping->tree_lock held
180 static inline void *lock_slot(struct address_space *mapping, void **slot)
182 unsigned long entry = (unsigned long)
183 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
185 entry |= RADIX_DAX_ENTRY_LOCK;
186 radix_tree_replace_slot(slot, (void *)entry);
187 return (void *)entry;
191 * Mark the given slot is unlocked. The function must be called with
192 * mapping->tree_lock held
194 static inline void *unlock_slot(struct address_space *mapping, void **slot)
196 unsigned long entry = (unsigned long)
197 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
199 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
200 radix_tree_replace_slot(slot, (void *)entry);
201 return (void *)entry;
205 * Lookup entry in radix tree, wait for it to become unlocked if it is
206 * exceptional entry and return it. The caller must call
207 * put_unlocked_mapping_entry() when he decided not to lock the entry or
208 * put_locked_mapping_entry() when he locked the entry and now wants to
211 * The function must be called with mapping->tree_lock held.
213 static void *get_unlocked_mapping_entry(struct address_space *mapping,
214 pgoff_t index, void ***slotp)
217 struct wait_exceptional_entry_queue ewait;
218 wait_queue_head_t *wq;
220 init_wait(&ewait.wait);
221 ewait.wait.func = wake_exceptional_entry_func;
224 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
226 if (!entry || !radix_tree_exceptional_entry(entry) ||
227 !slot_locked(mapping, slot)) {
233 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
234 prepare_to_wait_exclusive(wq, &ewait.wait,
235 TASK_UNINTERRUPTIBLE);
236 spin_unlock_irq(&mapping->tree_lock);
238 finish_wait(wq, &ewait.wait);
239 spin_lock_irq(&mapping->tree_lock);
243 static void put_locked_mapping_entry(struct address_space *mapping,
244 pgoff_t index, void *entry)
246 if (!radix_tree_exceptional_entry(entry)) {
250 dax_unlock_mapping_entry(mapping, index);
255 * Called when we are done with radix tree entry we looked up via
256 * get_unlocked_mapping_entry() and which we didn't lock in the end.
258 static void put_unlocked_mapping_entry(struct address_space *mapping,
259 pgoff_t index, void *entry)
261 if (!radix_tree_exceptional_entry(entry))
264 /* We have to wake up next waiter for the radix tree entry lock */
265 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
269 * Find radix tree entry at given index. If it points to a page, return with
270 * the page locked. If it points to the exceptional entry, return with the
271 * radix tree entry locked. If the radix tree doesn't contain given index,
272 * create empty exceptional entry for the index and return with it locked.
274 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
275 * either return that locked entry or will return an error. This error will
276 * happen if there are any 4k entries (either zero pages or DAX entries)
277 * within the 2MiB range that we are requesting.
279 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
280 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
281 * insertion will fail if it finds any 4k entries already in the tree, and a
282 * 4k insertion will cause an existing 2MiB entry to be unmapped and
283 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
284 * well as 2MiB empty entries.
286 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
287 * real storage backing them. We will leave these real 2MiB DAX entries in
288 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
290 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
291 * persistent memory the benefit is doubtful. We can add that later if we can
294 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
295 unsigned long size_flag)
297 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
301 spin_lock_irq(&mapping->tree_lock);
302 entry = get_unlocked_mapping_entry(mapping, index, &slot);
305 if (size_flag & RADIX_DAX_PMD) {
306 if (!radix_tree_exceptional_entry(entry) ||
307 dax_is_pte_entry(entry)) {
308 put_unlocked_mapping_entry(mapping, index,
310 entry = ERR_PTR(-EEXIST);
313 } else { /* trying to grab a PTE entry */
314 if (radix_tree_exceptional_entry(entry) &&
315 dax_is_pmd_entry(entry) &&
316 (dax_is_zero_entry(entry) ||
317 dax_is_empty_entry(entry))) {
318 pmd_downgrade = true;
323 /* No entry for given index? Make sure radix tree is big enough. */
324 if (!entry || pmd_downgrade) {
329 * Make sure 'entry' remains valid while we drop
330 * mapping->tree_lock.
332 entry = lock_slot(mapping, slot);
335 spin_unlock_irq(&mapping->tree_lock);
336 err = radix_tree_preload(
337 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
340 put_locked_mapping_entry(mapping, index, entry);
345 * Besides huge zero pages the only other thing that gets
346 * downgraded are empty entries which don't need to be
349 if (pmd_downgrade && dax_is_zero_entry(entry))
350 unmap_mapping_range(mapping,
351 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
353 spin_lock_irq(&mapping->tree_lock);
356 radix_tree_delete(&mapping->page_tree, index);
357 mapping->nrexceptional--;
358 dax_wake_mapping_entry_waiter(mapping, index, entry,
362 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
364 err = __radix_tree_insert(&mapping->page_tree, index,
365 dax_radix_order(entry), entry);
366 radix_tree_preload_end();
368 spin_unlock_irq(&mapping->tree_lock);
370 * Someone already created the entry? This is a
371 * normal failure when inserting PMDs in a range
372 * that already contains PTEs. In that case we want
373 * to return -EEXIST immediately.
375 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
378 * Our insertion of a DAX PMD entry failed, most
379 * likely because it collided with a PTE sized entry
380 * at a different index in the PMD range. We haven't
381 * inserted anything into the radix tree and have no
386 /* Good, we have inserted empty locked entry into the tree. */
387 mapping->nrexceptional++;
388 spin_unlock_irq(&mapping->tree_lock);
391 /* Normal page in radix tree? */
392 if (!radix_tree_exceptional_entry(entry)) {
393 struct page *page = entry;
396 spin_unlock_irq(&mapping->tree_lock);
398 /* Page got truncated? Retry... */
399 if (unlikely(page->mapping != mapping)) {
406 entry = lock_slot(mapping, slot);
408 spin_unlock_irq(&mapping->tree_lock);
413 * We do not necessarily hold the mapping->tree_lock when we call this
414 * function so it is possible that 'entry' is no longer a valid item in the
415 * radix tree. This is okay because all we really need to do is to find the
416 * correct waitqueue where tasks might be waiting for that old 'entry' and
419 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
420 pgoff_t index, void *entry, bool wake_all)
422 struct exceptional_entry_key key;
423 wait_queue_head_t *wq;
425 wq = dax_entry_waitqueue(mapping, index, entry, &key);
428 * Checking for locked entry and prepare_to_wait_exclusive() happens
429 * under mapping->tree_lock, ditto for entry handling in our callers.
430 * So at this point all tasks that could have seen our entry locked
431 * must be in the waitqueue and the following check will see them.
433 if (waitqueue_active(wq))
434 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
437 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
441 spin_lock_irq(&mapping->tree_lock);
442 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
443 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
444 !slot_locked(mapping, slot))) {
445 spin_unlock_irq(&mapping->tree_lock);
448 unlock_slot(mapping, slot);
449 spin_unlock_irq(&mapping->tree_lock);
450 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
454 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
455 * entry to get unlocked before deleting it.
457 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
461 spin_lock_irq(&mapping->tree_lock);
462 entry = get_unlocked_mapping_entry(mapping, index, NULL);
464 * This gets called from truncate / punch_hole path. As such, the caller
465 * must hold locks protecting against concurrent modifications of the
466 * radix tree (usually fs-private i_mmap_sem for writing). Since the
467 * caller has seen exceptional entry for this index, we better find it
468 * at that index as well...
470 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
471 spin_unlock_irq(&mapping->tree_lock);
474 radix_tree_delete(&mapping->page_tree, index);
475 mapping->nrexceptional--;
476 spin_unlock_irq(&mapping->tree_lock);
477 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
483 * The user has performed a load from a hole in the file. Allocating
484 * a new page in the file would cause excessive storage usage for
485 * workloads with sparse files. We allocate a page cache page instead.
486 * We'll kick it out of the page cache if it's ever written to,
487 * otherwise it will simply fall out of the page cache under memory
488 * pressure without ever having been dirtied.
490 static int dax_load_hole(struct address_space *mapping, void *entry,
491 struct vm_fault *vmf)
495 /* Hole page already exists? Return it... */
496 if (!radix_tree_exceptional_entry(entry)) {
498 return VM_FAULT_LOCKED;
501 /* This will replace locked radix tree entry with a hole page */
502 page = find_or_create_page(mapping, vmf->pgoff,
503 vmf->gfp_mask | __GFP_ZERO);
505 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
509 return VM_FAULT_LOCKED;
512 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
513 struct page *to, unsigned long vaddr)
515 struct blk_dax_ctl dax = {
521 if (dax_map_atomic(bdev, &dax) < 0)
522 return PTR_ERR(dax.addr);
523 vto = kmap_atomic(to);
524 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
526 dax_unmap_atomic(bdev, &dax);
531 * By this point grab_mapping_entry() has ensured that we have a locked entry
532 * of the appropriate size so we don't have to worry about downgrading PMDs to
533 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
534 * already in the tree, we will skip the insertion and just dirty the PMD as
537 static void *dax_insert_mapping_entry(struct address_space *mapping,
538 struct vm_fault *vmf,
539 void *entry, sector_t sector,
542 struct radix_tree_root *page_tree = &mapping->page_tree;
544 bool hole_fill = false;
546 pgoff_t index = vmf->pgoff;
548 if (vmf->flags & FAULT_FLAG_WRITE)
549 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
551 /* Replacing hole page with block mapping? */
552 if (!radix_tree_exceptional_entry(entry)) {
555 * Unmap the page now before we remove it from page cache below.
556 * The page is locked so it cannot be faulted in again.
558 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
560 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
562 return ERR_PTR(error);
563 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
564 /* replacing huge zero page with PMD block mapping */
565 unmap_mapping_range(mapping,
566 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
569 spin_lock_irq(&mapping->tree_lock);
570 new_entry = dax_radix_locked_entry(sector, flags);
573 __delete_from_page_cache(entry, NULL);
574 /* Drop pagecache reference */
576 error = __radix_tree_insert(page_tree, index,
577 dax_radix_order(new_entry), new_entry);
579 new_entry = ERR_PTR(error);
582 mapping->nrexceptional++;
583 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
585 * Only swap our new entry into the radix tree if the current
586 * entry is a zero page or an empty entry. If a normal PTE or
587 * PMD entry is already in the tree, we leave it alone. This
588 * means that if we are trying to insert a PTE and the
589 * existing entry is a PMD, we will just leave the PMD in the
590 * tree and dirty it if necessary.
595 ret = __radix_tree_lookup(page_tree, index, NULL, &slot);
596 WARN_ON_ONCE(ret != entry);
597 radix_tree_replace_slot(slot, new_entry);
599 if (vmf->flags & FAULT_FLAG_WRITE)
600 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
602 spin_unlock_irq(&mapping->tree_lock);
604 radix_tree_preload_end();
606 * We don't need hole page anymore, it has been replaced with
607 * locked radix tree entry now.
609 if (mapping->a_ops->freepage)
610 mapping->a_ops->freepage(entry);
617 static int dax_writeback_one(struct block_device *bdev,
618 struct address_space *mapping, pgoff_t index, void *entry)
620 struct radix_tree_root *page_tree = &mapping->page_tree;
621 struct radix_tree_node *node;
622 struct blk_dax_ctl dax;
626 spin_lock_irq(&mapping->tree_lock);
628 * Regular page slots are stabilized by the page lock even
629 * without the tree itself locked. These unlocked entries
630 * need verification under the tree lock.
632 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
637 /* another fsync thread may have already written back this entry */
638 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
641 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
642 dax_is_zero_entry(entry))) {
648 * Even if dax_writeback_mapping_range() was given a wbc->range_start
649 * in the middle of a PMD, the 'index' we are given will be aligned to
650 * the start index of the PMD, as will the sector we pull from
651 * 'entry'. This allows us to flush for PMD_SIZE and not have to
652 * worry about partial PMD writebacks.
654 dax.sector = dax_radix_sector(entry);
655 dax.size = PAGE_SIZE << dax_radix_order(entry);
656 spin_unlock_irq(&mapping->tree_lock);
659 * We cannot hold tree_lock while calling dax_map_atomic() because it
660 * eventually calls cond_resched().
662 ret = dax_map_atomic(bdev, &dax);
666 if (WARN_ON_ONCE(ret < dax.size)) {
671 wb_cache_pmem(dax.addr, dax.size);
673 spin_lock_irq(&mapping->tree_lock);
674 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
675 spin_unlock_irq(&mapping->tree_lock);
677 dax_unmap_atomic(bdev, &dax);
681 spin_unlock_irq(&mapping->tree_lock);
686 * Flush the mapping to the persistent domain within the byte range of [start,
687 * end]. This is required by data integrity operations to ensure file data is
688 * on persistent storage prior to completion of the operation.
690 int dax_writeback_mapping_range(struct address_space *mapping,
691 struct block_device *bdev, struct writeback_control *wbc)
693 struct inode *inode = mapping->host;
694 pgoff_t start_index, end_index;
695 pgoff_t indices[PAGEVEC_SIZE];
700 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
703 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
706 start_index = wbc->range_start >> PAGE_SHIFT;
707 end_index = wbc->range_end >> PAGE_SHIFT;
709 tag_pages_for_writeback(mapping, start_index, end_index);
711 pagevec_init(&pvec, 0);
713 pvec.nr = find_get_entries_tag(mapping, start_index,
714 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
715 pvec.pages, indices);
720 for (i = 0; i < pvec.nr; i++) {
721 if (indices[i] > end_index) {
726 ret = dax_writeback_one(bdev, mapping, indices[i],
734 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
736 static int dax_insert_mapping(struct address_space *mapping,
737 struct block_device *bdev, sector_t sector, size_t size,
738 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
740 unsigned long vaddr = (unsigned long)vmf->virtual_address;
741 struct blk_dax_ctl dax = {
746 void *entry = *entryp;
748 if (dax_map_atomic(bdev, &dax) < 0)
749 return PTR_ERR(dax.addr);
750 dax_unmap_atomic(bdev, &dax);
752 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
757 return vm_insert_mixed(vma, vaddr, dax.pfn);
761 * dax_pfn_mkwrite - handle first write to DAX page
762 * @vma: The virtual memory area where the fault occurred
763 * @vmf: The description of the fault
765 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
767 struct file *file = vma->vm_file;
768 struct address_space *mapping = file->f_mapping;
770 pgoff_t index = vmf->pgoff;
772 spin_lock_irq(&mapping->tree_lock);
773 entry = get_unlocked_mapping_entry(mapping, index, NULL);
774 if (!entry || !radix_tree_exceptional_entry(entry))
776 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
777 put_unlocked_mapping_entry(mapping, index, entry);
779 spin_unlock_irq(&mapping->tree_lock);
780 return VM_FAULT_NOPAGE;
782 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
784 static bool dax_range_is_aligned(struct block_device *bdev,
785 unsigned int offset, unsigned int length)
787 unsigned short sector_size = bdev_logical_block_size(bdev);
789 if (!IS_ALIGNED(offset, sector_size))
791 if (!IS_ALIGNED(length, sector_size))
797 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
798 unsigned int offset, unsigned int length)
800 struct blk_dax_ctl dax = {
805 if (dax_range_is_aligned(bdev, offset, length)) {
806 sector_t start_sector = dax.sector + (offset >> 9);
808 return blkdev_issue_zeroout(bdev, start_sector,
809 length >> 9, GFP_NOFS, true);
811 if (dax_map_atomic(bdev, &dax) < 0)
812 return PTR_ERR(dax.addr);
813 clear_pmem(dax.addr + offset, length);
814 dax_unmap_atomic(bdev, &dax);
818 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
820 #ifdef CONFIG_FS_IOMAP
821 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
823 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
827 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
830 struct iov_iter *iter = data;
831 loff_t end = pos + length, done = 0;
834 if (iov_iter_rw(iter) == READ) {
835 end = min(end, i_size_read(inode));
839 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
840 return iov_iter_zero(min(length, end - pos), iter);
843 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
847 unsigned offset = pos & (PAGE_SIZE - 1);
848 struct blk_dax_ctl dax = { 0 };
851 dax.sector = dax_iomap_sector(iomap, pos);
852 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
853 map_len = dax_map_atomic(iomap->bdev, &dax);
861 if (map_len > end - pos)
864 if (iov_iter_rw(iter) == WRITE)
865 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
867 map_len = copy_to_iter(dax.addr, map_len, iter);
868 dax_unmap_atomic(iomap->bdev, &dax);
870 ret = map_len ? map_len : -EFAULT;
879 return done ? done : ret;
883 * dax_iomap_rw - Perform I/O to a DAX file
884 * @iocb: The control block for this I/O
885 * @iter: The addresses to do I/O from or to
886 * @ops: iomap ops passed from the file system
888 * This function performs read and write operations to directly mapped
889 * persistent memory. The callers needs to take care of read/write exclusion
890 * and evicting any page cache pages in the region under I/O.
893 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
894 struct iomap_ops *ops)
896 struct address_space *mapping = iocb->ki_filp->f_mapping;
897 struct inode *inode = mapping->host;
898 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
901 if (iov_iter_rw(iter) == WRITE)
902 flags |= IOMAP_WRITE;
905 * Yes, even DAX files can have page cache attached to them: A zeroed
906 * page is inserted into the pagecache when we have to serve a write
907 * fault on a hole. It should never be dirtied and can simply be
908 * dropped from the pagecache once we get real data for the page.
910 * XXX: This is racy against mmap, and there's nothing we can do about
911 * it. We'll eventually need to shift this down even further so that
912 * we can check if we allocated blocks over a hole first.
914 if (mapping->nrpages) {
915 ret = invalidate_inode_pages2_range(mapping,
917 (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
921 while (iov_iter_count(iter)) {
922 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
923 iter, dax_iomap_actor);
930 iocb->ki_pos += done;
931 return done ? done : ret;
933 EXPORT_SYMBOL_GPL(dax_iomap_rw);
936 * dax_iomap_fault - handle a page fault on a DAX file
937 * @vma: The virtual memory area where the fault occurred
938 * @vmf: The description of the fault
939 * @ops: iomap ops passed from the file system
941 * When a page fault occurs, filesystems may call this helper in their fault
942 * or mkwrite handler for DAX files. Assumes the caller has done all the
943 * necessary locking for the page fault to proceed successfully.
945 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
946 struct iomap_ops *ops)
948 struct address_space *mapping = vma->vm_file->f_mapping;
949 struct inode *inode = mapping->host;
950 unsigned long vaddr = (unsigned long)vmf->virtual_address;
951 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
953 struct iomap iomap = { 0 };
954 unsigned flags = IOMAP_FAULT;
955 int error, major = 0;
956 int locked_status = 0;
960 * Check whether offset isn't beyond end of file now. Caller is supposed
961 * to hold locks serializing us with truncate / punch hole so this is
964 if (pos >= i_size_read(inode))
965 return VM_FAULT_SIGBUS;
967 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
969 error = PTR_ERR(entry);
973 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
974 flags |= IOMAP_WRITE;
977 * Note that we don't bother to use iomap_apply here: DAX required
978 * the file system block size to be equal the page size, which means
979 * that we never have to deal with more than a single extent here.
981 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
984 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
985 error = -EIO; /* fs corruption? */
989 sector = dax_iomap_sector(&iomap, pos);
992 switch (iomap.type) {
994 case IOMAP_UNWRITTEN:
995 clear_user_highpage(vmf->cow_page, vaddr);
998 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
999 vmf->cow_page, vaddr);
1009 if (!radix_tree_exceptional_entry(entry)) {
1011 locked_status = VM_FAULT_LOCKED;
1014 locked_status = VM_FAULT_DAX_LOCKED;
1019 switch (iomap.type) {
1021 if (iomap.flags & IOMAP_F_NEW) {
1022 count_vm_event(PGMAJFAULT);
1023 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1024 major = VM_FAULT_MAJOR;
1026 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1027 PAGE_SIZE, &entry, vma, vmf);
1029 case IOMAP_UNWRITTEN:
1031 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1032 locked_status = dax_load_hole(mapping, entry, vmf);
1043 if (ops->iomap_end) {
1045 /* keep previous error */
1046 ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
1049 error = ops->iomap_end(inode, pos, PAGE_SIZE,
1050 PAGE_SIZE, flags, &iomap);
1054 if (!locked_status || error)
1055 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1057 if (error == -ENOMEM)
1058 return VM_FAULT_OOM | major;
1059 /* -EBUSY is fine, somebody else faulted on the same PTE */
1060 if (error < 0 && error != -EBUSY)
1061 return VM_FAULT_SIGBUS | major;
1062 if (locked_status) {
1063 WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
1064 return locked_status;
1066 return VM_FAULT_NOPAGE | major;
1068 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1070 #ifdef CONFIG_FS_DAX_PMD
1072 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1073 * more often than one might expect in the below functions.
1075 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1077 static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
1078 struct vm_fault *vmf, unsigned long address,
1079 struct iomap *iomap, loff_t pos, bool write, void **entryp)
1081 struct address_space *mapping = vma->vm_file->f_mapping;
1082 struct block_device *bdev = iomap->bdev;
1083 struct blk_dax_ctl dax = {
1084 .sector = dax_iomap_sector(iomap, pos),
1087 long length = dax_map_atomic(bdev, &dax);
1090 if (length < 0) /* dax_map_atomic() failed */
1091 return VM_FAULT_FALLBACK;
1092 if (length < PMD_SIZE)
1093 goto unmap_fallback;
1094 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1095 goto unmap_fallback;
1096 if (!pfn_t_devmap(dax.pfn))
1097 goto unmap_fallback;
1099 dax_unmap_atomic(bdev, &dax);
1101 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1104 return VM_FAULT_FALLBACK;
1107 return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
1110 dax_unmap_atomic(bdev, &dax);
1111 return VM_FAULT_FALLBACK;
1114 static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
1115 struct vm_fault *vmf, unsigned long address,
1116 struct iomap *iomap, void **entryp)
1118 struct address_space *mapping = vma->vm_file->f_mapping;
1119 unsigned long pmd_addr = address & PMD_MASK;
1120 struct page *zero_page;
1125 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1127 if (unlikely(!zero_page))
1128 return VM_FAULT_FALLBACK;
1130 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1131 RADIX_DAX_PMD | RADIX_DAX_HZP);
1133 return VM_FAULT_FALLBACK;
1136 ptl = pmd_lock(vma->vm_mm, pmd);
1137 if (!pmd_none(*pmd)) {
1139 return VM_FAULT_FALLBACK;
1142 pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
1143 pmd_entry = pmd_mkhuge(pmd_entry);
1144 set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
1146 return VM_FAULT_NOPAGE;
1149 int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1150 pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
1152 struct address_space *mapping = vma->vm_file->f_mapping;
1153 unsigned long pmd_addr = address & PMD_MASK;
1154 bool write = flags & FAULT_FLAG_WRITE;
1155 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1156 struct inode *inode = mapping->host;
1157 int result = VM_FAULT_FALLBACK;
1158 struct iomap iomap = { 0 };
1159 pgoff_t max_pgoff, pgoff;
1160 struct vm_fault vmf;
1165 /* Fall back to PTEs if we're going to COW */
1166 if (write && !(vma->vm_flags & VM_SHARED))
1169 /* If the PMD would extend outside the VMA */
1170 if (pmd_addr < vma->vm_start)
1172 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1176 * Check whether offset isn't beyond end of file now. Caller is
1177 * supposed to hold locks serializing us with truncate / punch hole so
1178 * this is a reliable test.
1180 pgoff = linear_page_index(vma, pmd_addr);
1181 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1183 if (pgoff > max_pgoff)
1184 return VM_FAULT_SIGBUS;
1186 /* If the PMD would extend beyond the file size */
1187 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1191 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1192 * PMD or a HZP entry. If it can't (because a 4k page is already in
1193 * the tree, for instance), it will return -EEXIST and we just fall
1194 * back to 4k entries.
1196 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1201 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1202 * setting up a mapping, so really we're using iomap_begin() as a way
1203 * to look up our filesystem block.
1205 pos = (loff_t)pgoff << PAGE_SHIFT;
1206 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1209 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1214 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
1216 switch (iomap.type) {
1218 result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
1219 &iomap, pos, write, &entry);
1221 case IOMAP_UNWRITTEN:
1223 if (WARN_ON_ONCE(write))
1225 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1234 if (ops->iomap_end) {
1235 if (result == VM_FAULT_FALLBACK) {
1236 ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
1239 error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
1240 iomap_flags, &iomap);
1242 result = VM_FAULT_FALLBACK;
1246 put_locked_mapping_entry(mapping, pgoff, entry);
1248 if (result == VM_FAULT_FALLBACK) {
1249 split_huge_pmd(vma, pmd, address);
1250 count_vm_event(THP_FAULT_FALLBACK);
1254 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1255 #endif /* CONFIG_FS_DAX_PMD */
1256 #endif /* CONFIG_FS_IOMAP */