]> Git Repo - linux.git/blame - fs/dax.c
KVM: arm64: Expose PSCI SYSTEM_RESET2 call to the guest
[linux.git] / fs / dax.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
d475c634
MW
2/*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <[email protected]>
6 * Author: Ross Zwisler <[email protected]>
d475c634
MW
7 */
8
9#include <linux/atomic.h>
10#include <linux/blkdev.h>
11#include <linux/buffer_head.h>
d77e92e2 12#include <linux/dax.h>
d475c634
MW
13#include <linux/fs.h>
14#include <linux/genhd.h>
f7ca90b1
MW
15#include <linux/highmem.h>
16#include <linux/memcontrol.h>
17#include <linux/mm.h>
d475c634 18#include <linux/mutex.h>
9973c98e 19#include <linux/pagevec.h>
289c6aed 20#include <linux/sched.h>
f361bf4a 21#include <linux/sched/signal.h>
d475c634 22#include <linux/uio.h>
f7ca90b1 23#include <linux/vmstat.h>
34c0fd54 24#include <linux/pfn_t.h>
0e749e54 25#include <linux/sizes.h>
4b4bb46d 26#include <linux/mmu_notifier.h>
a254e568 27#include <linux/iomap.h>
11cf9d86 28#include <asm/pgalloc.h>
d475c634 29
282a8e03
RZ
30#define CREATE_TRACE_POINTS
31#include <trace/events/fs_dax.h>
32
cfc93c6c
MW
33static inline unsigned int pe_order(enum page_entry_size pe_size)
34{
35 if (pe_size == PE_SIZE_PTE)
36 return PAGE_SHIFT - PAGE_SHIFT;
37 if (pe_size == PE_SIZE_PMD)
38 return PMD_SHIFT - PAGE_SHIFT;
39 if (pe_size == PE_SIZE_PUD)
40 return PUD_SHIFT - PAGE_SHIFT;
41 return ~0;
42}
43
ac401cc7
JK
44/* We choose 4096 entries - same as per-zone page wait tables */
45#define DAX_WAIT_TABLE_BITS 12
46#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47
917f3452
RZ
48/* The 'colour' (ie low bits) within a PMD of a page offset. */
49#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
977fbdcd 50#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
917f3452 51
cfc93c6c
MW
52/* The order of a PMD entry */
53#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
54
ce95ab0f 55static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
56
57static int __init init_dax_wait_table(void)
58{
59 int i;
60
61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 init_waitqueue_head(wait_table + i);
63 return 0;
64}
65fs_initcall(init_dax_wait_table);
66
527b19d0 67/*
3159f943
MW
68 * DAX pagecache entries use XArray value entries so they can't be mistaken
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
71 * is just used for locking. In total four special bits.
527b19d0
RZ
72 *
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75 * block allocation.
76 */
3159f943
MW
77#define DAX_SHIFT (4)
78#define DAX_LOCKED (1UL << 0)
79#define DAX_PMD (1UL << 1)
80#define DAX_ZERO_PAGE (1UL << 2)
81#define DAX_EMPTY (1UL << 3)
527b19d0 82
a77d19f4 83static unsigned long dax_to_pfn(void *entry)
527b19d0 84{
3159f943 85 return xa_to_value(entry) >> DAX_SHIFT;
527b19d0
RZ
86}
87
9f32d221
MW
88static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89{
90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91}
92
cfc93c6c
MW
93static bool dax_is_locked(void *entry)
94{
95 return xa_to_value(entry) & DAX_LOCKED;
96}
97
a77d19f4 98static unsigned int dax_entry_order(void *entry)
527b19d0 99{
3159f943 100 if (xa_to_value(entry) & DAX_PMD)
cfc93c6c 101 return PMD_ORDER;
527b19d0
RZ
102 return 0;
103}
104
fda490d3 105static unsigned long dax_is_pmd_entry(void *entry)
d1a5f2b4 106{
3159f943 107 return xa_to_value(entry) & DAX_PMD;
d1a5f2b4
DW
108}
109
fda490d3 110static bool dax_is_pte_entry(void *entry)
d475c634 111{
3159f943 112 return !(xa_to_value(entry) & DAX_PMD);
d475c634
MW
113}
114
642261ac 115static int dax_is_zero_entry(void *entry)
d475c634 116{
3159f943 117 return xa_to_value(entry) & DAX_ZERO_PAGE;
d475c634
MW
118}
119
642261ac 120static int dax_is_empty_entry(void *entry)
b2e0d162 121{
3159f943 122 return xa_to_value(entry) & DAX_EMPTY;
b2e0d162
DW
123}
124
23c84eb7
MWO
125/*
126 * true if the entry that was found is of a smaller order than the entry
127 * we were looking for
128 */
129static bool dax_is_conflict(void *entry)
130{
131 return entry == XA_RETRY_ENTRY;
132}
133
ac401cc7 134/*
a77d19f4 135 * DAX page cache entry locking
ac401cc7
JK
136 */
137struct exceptional_entry_key {
ec4907ff 138 struct xarray *xa;
63e95b5c 139 pgoff_t entry_start;
ac401cc7
JK
140};
141
142struct wait_exceptional_entry_queue {
ac6424b9 143 wait_queue_entry_t wait;
ac401cc7
JK
144 struct exceptional_entry_key key;
145};
146
698ab77a
VG
147/**
148 * enum dax_wake_mode: waitqueue wakeup behaviour
149 * @WAKE_ALL: wake all waiters in the waitqueue
150 * @WAKE_NEXT: wake only the first waiter in the waitqueue
151 */
152enum dax_wake_mode {
153 WAKE_ALL,
154 WAKE_NEXT,
155};
156
b15cd800
MW
157static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158 void *entry, struct exceptional_entry_key *key)
63e95b5c
RZ
159{
160 unsigned long hash;
b15cd800 161 unsigned long index = xas->xa_index;
63e95b5c
RZ
162
163 /*
164 * If 'entry' is a PMD, align the 'index' that we use for the wait
165 * queue to the start of that PMD. This ensures that all offsets in
166 * the range covered by the PMD map to the same bit lock.
167 */
642261ac 168 if (dax_is_pmd_entry(entry))
917f3452 169 index &= ~PG_PMD_COLOUR;
b15cd800 170 key->xa = xas->xa;
63e95b5c
RZ
171 key->entry_start = index;
172
b15cd800 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
63e95b5c
RZ
174 return wait_table + hash;
175}
176
ec4907ff
MW
177static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178 unsigned int mode, int sync, void *keyp)
ac401cc7
JK
179{
180 struct exceptional_entry_key *key = keyp;
181 struct wait_exceptional_entry_queue *ewait =
182 container_of(wait, struct wait_exceptional_entry_queue, wait);
183
ec4907ff 184 if (key->xa != ewait->key.xa ||
63e95b5c 185 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
186 return 0;
187 return autoremove_wake_function(wait, mode, sync, NULL);
188}
189
e30331ff 190/*
b93b0163
MW
191 * @entry may no longer be the entry at the index in the mapping.
192 * The important information it's conveying is whether the entry at
193 * this index used to be a PMD entry.
e30331ff 194 */
698ab77a
VG
195static void dax_wake_entry(struct xa_state *xas, void *entry,
196 enum dax_wake_mode mode)
e30331ff
RZ
197{
198 struct exceptional_entry_key key;
199 wait_queue_head_t *wq;
200
b15cd800 201 wq = dax_entry_waitqueue(xas, entry, &key);
e30331ff
RZ
202
203 /*
204 * Checking for locked entry and prepare_to_wait_exclusive() happens
b93b0163 205 * under the i_pages lock, ditto for entry handling in our callers.
e30331ff
RZ
206 * So at this point all tasks that could have seen our entry locked
207 * must be in the waitqueue and the following check will see them.
208 */
209 if (waitqueue_active(wq))
698ab77a 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
e30331ff
RZ
211}
212
cfc93c6c
MW
213/*
214 * Look up entry in page cache, wait for it to become unlocked if it
215 * is a DAX entry and return it. The caller must subsequently call
216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
23c84eb7
MWO
217 * if it did. The entry returned may have a larger order than @order.
218 * If @order is larger than the order of the entry found in i_pages, this
219 * function returns a dax_is_conflict entry.
cfc93c6c
MW
220 *
221 * Must be called with the i_pages lock held.
222 */
23c84eb7 223static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
cfc93c6c
MW
224{
225 void *entry;
226 struct wait_exceptional_entry_queue ewait;
227 wait_queue_head_t *wq;
228
229 init_wait(&ewait.wait);
230 ewait.wait.func = wake_exceptional_entry_func;
231
232 for (;;) {
0e40de03 233 entry = xas_find_conflict(xas);
6370740e
DW
234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
235 return entry;
23c84eb7
MWO
236 if (dax_entry_order(entry) < order)
237 return XA_RETRY_ENTRY;
6370740e 238 if (!dax_is_locked(entry))
cfc93c6c
MW
239 return entry;
240
b15cd800 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
cfc93c6c
MW
242 prepare_to_wait_exclusive(wq, &ewait.wait,
243 TASK_UNINTERRUPTIBLE);
244 xas_unlock_irq(xas);
245 xas_reset(xas);
246 schedule();
247 finish_wait(wq, &ewait.wait);
248 xas_lock_irq(xas);
249 }
250}
251
55e56f06
MW
252/*
253 * The only thing keeping the address space around is the i_pages lock
254 * (it's cycled in clear_inode() after removing the entries from i_pages)
255 * After we call xas_unlock_irq(), we cannot touch xas->xa.
256 */
257static void wait_entry_unlocked(struct xa_state *xas, void *entry)
258{
259 struct wait_exceptional_entry_queue ewait;
260 wait_queue_head_t *wq;
261
262 init_wait(&ewait.wait);
263 ewait.wait.func = wake_exceptional_entry_func;
264
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
d8a70641
DW
266 /*
267 * Unlike get_unlocked_entry() there is no guarantee that this
268 * path ever successfully retrieves an unlocked entry before an
269 * inode dies. Perform a non-exclusive wait in case this path
270 * never successfully performs its own wake up.
271 */
272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
55e56f06
MW
273 xas_unlock_irq(xas);
274 schedule();
275 finish_wait(wq, &ewait.wait);
55e56f06
MW
276}
277
4c3d043d
VG
278static void put_unlocked_entry(struct xa_state *xas, void *entry,
279 enum dax_wake_mode mode)
cfc93c6c 280{
61c30c98 281 if (entry && !dax_is_conflict(entry))
4c3d043d 282 dax_wake_entry(xas, entry, mode);
cfc93c6c
MW
283}
284
285/*
286 * We used the xa_state to get the entry, but then we locked the entry and
287 * dropped the xa_lock, so we know the xa_state is stale and must be reset
288 * before use.
289 */
290static void dax_unlock_entry(struct xa_state *xas, void *entry)
291{
292 void *old;
293
7ae2ea7d 294 BUG_ON(dax_is_locked(entry));
cfc93c6c
MW
295 xas_reset(xas);
296 xas_lock_irq(xas);
297 old = xas_store(xas, entry);
298 xas_unlock_irq(xas);
299 BUG_ON(!dax_is_locked(old));
698ab77a 300 dax_wake_entry(xas, entry, WAKE_NEXT);
cfc93c6c
MW
301}
302
303/*
304 * Return: The entry stored at this location before it was locked.
305 */
306static void *dax_lock_entry(struct xa_state *xas, void *entry)
307{
308 unsigned long v = xa_to_value(entry);
309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310}
311
d2c997c0
DW
312static unsigned long dax_entry_size(void *entry)
313{
314 if (dax_is_zero_entry(entry))
315 return 0;
316 else if (dax_is_empty_entry(entry))
317 return 0;
318 else if (dax_is_pmd_entry(entry))
319 return PMD_SIZE;
320 else
321 return PAGE_SIZE;
322}
323
a77d19f4 324static unsigned long dax_end_pfn(void *entry)
d2c997c0 325{
a77d19f4 326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
d2c997c0
DW
327}
328
329/*
330 * Iterate through all mapped pfns represented by an entry, i.e. skip
331 * 'empty' and 'zero' entries.
332 */
333#define for_each_mapped_pfn(entry, pfn) \
a77d19f4
MW
334 for (pfn = dax_to_pfn(entry); \
335 pfn < dax_end_pfn(entry); pfn++)
d2c997c0 336
73449daf
DW
337/*
338 * TODO: for reflink+dax we need a way to associate a single page with
339 * multiple address_space instances at different linear_page_index()
340 * offsets.
341 */
342static void dax_associate_entry(void *entry, struct address_space *mapping,
343 struct vm_area_struct *vma, unsigned long address)
d2c997c0 344{
73449daf
DW
345 unsigned long size = dax_entry_size(entry), pfn, index;
346 int i = 0;
d2c997c0
DW
347
348 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
349 return;
350
73449daf 351 index = linear_page_index(vma, address & ~(size - 1));
d2c997c0
DW
352 for_each_mapped_pfn(entry, pfn) {
353 struct page *page = pfn_to_page(pfn);
354
355 WARN_ON_ONCE(page->mapping);
356 page->mapping = mapping;
73449daf 357 page->index = index + i++;
d2c997c0
DW
358 }
359}
360
361static void dax_disassociate_entry(void *entry, struct address_space *mapping,
362 bool trunc)
363{
364 unsigned long pfn;
365
366 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
367 return;
368
369 for_each_mapped_pfn(entry, pfn) {
370 struct page *page = pfn_to_page(pfn);
371
372 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
373 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
374 page->mapping = NULL;
73449daf 375 page->index = 0;
d2c997c0
DW
376 }
377}
378
5fac7408
DW
379static struct page *dax_busy_page(void *entry)
380{
381 unsigned long pfn;
382
383 for_each_mapped_pfn(entry, pfn) {
384 struct page *page = pfn_to_page(pfn);
385
386 if (page_ref_count(page) > 1)
387 return page;
388 }
389 return NULL;
390}
391
c5bbd451
MW
392/*
393 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
394 * @page: The page whose entry we want to lock
395 *
396 * Context: Process context.
27359fd6
MW
397 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
398 * not be locked.
c5bbd451 399 */
27359fd6 400dax_entry_t dax_lock_page(struct page *page)
c2a7d2a1 401{
9f32d221
MW
402 XA_STATE(xas, NULL, 0);
403 void *entry;
c2a7d2a1 404
c5bbd451
MW
405 /* Ensure page->mapping isn't freed while we look at it */
406 rcu_read_lock();
c2a7d2a1 407 for (;;) {
9f32d221 408 struct address_space *mapping = READ_ONCE(page->mapping);
c2a7d2a1 409
27359fd6 410 entry = NULL;
c93db7bb 411 if (!mapping || !dax_mapping(mapping))
c5bbd451 412 break;
c2a7d2a1
DW
413
414 /*
415 * In the device-dax case there's no need to lock, a
416 * struct dev_pagemap pin is sufficient to keep the
417 * inode alive, and we assume we have dev_pagemap pin
418 * otherwise we would not have a valid pfn_to_page()
419 * translation.
420 */
27359fd6 421 entry = (void *)~0UL;
9f32d221 422 if (S_ISCHR(mapping->host->i_mode))
c5bbd451 423 break;
c2a7d2a1 424
9f32d221
MW
425 xas.xa = &mapping->i_pages;
426 xas_lock_irq(&xas);
c2a7d2a1 427 if (mapping != page->mapping) {
9f32d221 428 xas_unlock_irq(&xas);
c2a7d2a1
DW
429 continue;
430 }
9f32d221
MW
431 xas_set(&xas, page->index);
432 entry = xas_load(&xas);
433 if (dax_is_locked(entry)) {
c5bbd451 434 rcu_read_unlock();
55e56f06 435 wait_entry_unlocked(&xas, entry);
c5bbd451 436 rcu_read_lock();
6d7cd8c1 437 continue;
c2a7d2a1 438 }
9f32d221
MW
439 dax_lock_entry(&xas, entry);
440 xas_unlock_irq(&xas);
c5bbd451 441 break;
c2a7d2a1 442 }
c5bbd451 443 rcu_read_unlock();
27359fd6 444 return (dax_entry_t)entry;
c2a7d2a1
DW
445}
446
27359fd6 447void dax_unlock_page(struct page *page, dax_entry_t cookie)
c2a7d2a1
DW
448{
449 struct address_space *mapping = page->mapping;
9f32d221 450 XA_STATE(xas, &mapping->i_pages, page->index);
c2a7d2a1 451
9f32d221 452 if (S_ISCHR(mapping->host->i_mode))
c2a7d2a1
DW
453 return;
454
27359fd6 455 dax_unlock_entry(&xas, (void *)cookie);
c2a7d2a1
DW
456}
457
ac401cc7 458/*
a77d19f4
MW
459 * Find page cache entry at given index. If it is a DAX entry, return it
460 * with the entry locked. If the page cache doesn't contain an entry at
461 * that index, add a locked empty entry.
ac401cc7 462 *
3159f943 463 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
b15cd800
MW
464 * either return that locked entry or will return VM_FAULT_FALLBACK.
465 * This will happen if there are any PTE entries within the PMD range
466 * that we are requesting.
642261ac 467 *
b15cd800
MW
468 * We always favor PTE entries over PMD entries. There isn't a flow where we
469 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
470 * insertion will fail if it finds any PTE entries already in the tree, and a
471 * PTE insertion will cause an existing PMD entry to be unmapped and
472 * downgraded to PTE entries. This happens for both PMD zero pages as
473 * well as PMD empty entries.
642261ac 474 *
b15cd800
MW
475 * The exception to this downgrade path is for PMD entries that have
476 * real storage backing them. We will leave these real PMD entries in
477 * the tree, and PTE writes will simply dirty the entire PMD entry.
642261ac 478 *
ac401cc7
JK
479 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
480 * persistent memory the benefit is doubtful. We can add that later if we can
481 * show it helps.
b15cd800
MW
482 *
483 * On error, this function does not return an ERR_PTR. Instead it returns
484 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
485 * overlap with xarray value entries.
ac401cc7 486 */
b15cd800 487static void *grab_mapping_entry(struct xa_state *xas,
23c84eb7 488 struct address_space *mapping, unsigned int order)
ac401cc7 489{
b15cd800 490 unsigned long index = xas->xa_index;
1a14e377 491 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
b15cd800 492 void *entry;
642261ac 493
b15cd800 494retry:
1a14e377 495 pmd_downgrade = false;
b15cd800 496 xas_lock_irq(xas);
23c84eb7 497 entry = get_unlocked_entry(xas, order);
91d25ba8 498
642261ac 499 if (entry) {
23c84eb7
MWO
500 if (dax_is_conflict(entry))
501 goto fallback;
0e40de03 502 if (!xa_is_value(entry)) {
49688e65 503 xas_set_err(xas, -EIO);
b15cd800
MW
504 goto out_unlock;
505 }
506
23c84eb7 507 if (order == 0) {
91d25ba8 508 if (dax_is_pmd_entry(entry) &&
642261ac
RZ
509 (dax_is_zero_entry(entry) ||
510 dax_is_empty_entry(entry))) {
511 pmd_downgrade = true;
512 }
513 }
514 }
515
b15cd800
MW
516 if (pmd_downgrade) {
517 /*
518 * Make sure 'entry' remains valid while we drop
519 * the i_pages lock.
520 */
521 dax_lock_entry(xas, entry);
642261ac 522
642261ac
RZ
523 /*
524 * Besides huge zero pages the only other thing that gets
525 * downgraded are empty entries which don't need to be
526 * unmapped.
527 */
b15cd800
MW
528 if (dax_is_zero_entry(entry)) {
529 xas_unlock_irq(xas);
530 unmap_mapping_pages(mapping,
531 xas->xa_index & ~PG_PMD_COLOUR,
532 PG_PMD_NR, false);
533 xas_reset(xas);
534 xas_lock_irq(xas);
e11f8b7b
RZ
535 }
536
b15cd800
MW
537 dax_disassociate_entry(entry, mapping, false);
538 xas_store(xas, NULL); /* undo the PMD join */
698ab77a 539 dax_wake_entry(xas, entry, WAKE_ALL);
7f0e07fb 540 mapping->nrpages -= PG_PMD_NR;
b15cd800
MW
541 entry = NULL;
542 xas_set(xas, index);
543 }
642261ac 544
b15cd800
MW
545 if (entry) {
546 dax_lock_entry(xas, entry);
547 } else {
23c84eb7
MWO
548 unsigned long flags = DAX_EMPTY;
549
550 if (order > 0)
551 flags |= DAX_PMD;
552 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
b15cd800
MW
553 dax_lock_entry(xas, entry);
554 if (xas_error(xas))
555 goto out_unlock;
7f0e07fb 556 mapping->nrpages += 1UL << order;
ac401cc7 557 }
b15cd800
MW
558
559out_unlock:
560 xas_unlock_irq(xas);
561 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
562 goto retry;
563 if (xas->xa_node == XA_ERROR(-ENOMEM))
564 return xa_mk_internal(VM_FAULT_OOM);
565 if (xas_error(xas))
566 return xa_mk_internal(VM_FAULT_SIGBUS);
e3ad61c6 567 return entry;
b15cd800
MW
568fallback:
569 xas_unlock_irq(xas);
570 return xa_mk_internal(VM_FAULT_FALLBACK);
ac401cc7
JK
571}
572
5fac7408 573/**
6bbdd563 574 * dax_layout_busy_page_range - find first pinned page in @mapping
5fac7408 575 * @mapping: address space to scan for a page with ref count > 1
6bbdd563
VG
576 * @start: Starting offset. Page containing 'start' is included.
577 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
578 * pages from 'start' till the end of file are included.
5fac7408
DW
579 *
580 * DAX requires ZONE_DEVICE mapped pages. These pages are never
581 * 'onlined' to the page allocator so they are considered idle when
582 * page->count == 1. A filesystem uses this interface to determine if
583 * any page in the mapping is busy, i.e. for DMA, or other
584 * get_user_pages() usages.
585 *
586 * It is expected that the filesystem is holding locks to block the
587 * establishment of new mappings in this address_space. I.e. it expects
588 * to be able to run unmap_mapping_range() and subsequently not race
589 * mapping_mapped() becoming true.
590 */
6bbdd563
VG
591struct page *dax_layout_busy_page_range(struct address_space *mapping,
592 loff_t start, loff_t end)
5fac7408 593{
084a8990
MW
594 void *entry;
595 unsigned int scanned = 0;
5fac7408 596 struct page *page = NULL;
6bbdd563
VG
597 pgoff_t start_idx = start >> PAGE_SHIFT;
598 pgoff_t end_idx;
599 XA_STATE(xas, &mapping->i_pages, start_idx);
5fac7408
DW
600
601 /*
602 * In the 'limited' case get_user_pages() for dax is disabled.
603 */
604 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
605 return NULL;
606
607 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
608 return NULL;
609
6bbdd563
VG
610 /* If end == LLONG_MAX, all pages from start to till end of file */
611 if (end == LLONG_MAX)
612 end_idx = ULONG_MAX;
613 else
614 end_idx = end >> PAGE_SHIFT;
5fac7408
DW
615 /*
616 * If we race get_user_pages_fast() here either we'll see the
084a8990 617 * elevated page count in the iteration and wait, or
5fac7408
DW
618 * get_user_pages_fast() will see that the page it took a reference
619 * against is no longer mapped in the page tables and bail to the
620 * get_user_pages() slow path. The slow path is protected by
621 * pte_lock() and pmd_lock(). New references are not taken without
6bbdd563 622 * holding those locks, and unmap_mapping_pages() will not zero the
5fac7408
DW
623 * pte or pmd without holding the respective lock, so we are
624 * guaranteed to either see new references or prevent new
625 * references from being established.
626 */
6bbdd563 627 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
5fac7408 628
084a8990 629 xas_lock_irq(&xas);
6bbdd563 630 xas_for_each(&xas, entry, end_idx) {
084a8990
MW
631 if (WARN_ON_ONCE(!xa_is_value(entry)))
632 continue;
633 if (unlikely(dax_is_locked(entry)))
23c84eb7 634 entry = get_unlocked_entry(&xas, 0);
084a8990
MW
635 if (entry)
636 page = dax_busy_page(entry);
4c3d043d 637 put_unlocked_entry(&xas, entry, WAKE_NEXT);
5fac7408
DW
638 if (page)
639 break;
084a8990
MW
640 if (++scanned % XA_CHECK_SCHED)
641 continue;
642
643 xas_pause(&xas);
644 xas_unlock_irq(&xas);
645 cond_resched();
646 xas_lock_irq(&xas);
5fac7408 647 }
084a8990 648 xas_unlock_irq(&xas);
5fac7408
DW
649 return page;
650}
6bbdd563
VG
651EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
652
653struct page *dax_layout_busy_page(struct address_space *mapping)
654{
655 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
656}
5fac7408
DW
657EXPORT_SYMBOL_GPL(dax_layout_busy_page);
658
a77d19f4 659static int __dax_invalidate_entry(struct address_space *mapping,
c6dcf52c
JK
660 pgoff_t index, bool trunc)
661{
07f2d89c 662 XA_STATE(xas, &mapping->i_pages, index);
c6dcf52c
JK
663 int ret = 0;
664 void *entry;
c6dcf52c 665
07f2d89c 666 xas_lock_irq(&xas);
23c84eb7 667 entry = get_unlocked_entry(&xas, 0);
3159f943 668 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
c6dcf52c
JK
669 goto out;
670 if (!trunc &&
07f2d89c
MW
671 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
672 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
c6dcf52c 673 goto out;
d2c997c0 674 dax_disassociate_entry(entry, mapping, trunc);
07f2d89c 675 xas_store(&xas, NULL);
7f0e07fb 676 mapping->nrpages -= 1UL << dax_entry_order(entry);
c6dcf52c
JK
677 ret = 1;
678out:
23738832 679 put_unlocked_entry(&xas, entry, WAKE_ALL);
07f2d89c 680 xas_unlock_irq(&xas);
c6dcf52c
JK
681 return ret;
682}
07f2d89c 683
ac401cc7 684/*
3159f943
MW
685 * Delete DAX entry at @index from @mapping. Wait for it
686 * to be unlocked before deleting it.
ac401cc7
JK
687 */
688int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
689{
a77d19f4 690 int ret = __dax_invalidate_entry(mapping, index, true);
ac401cc7 691
ac401cc7
JK
692 /*
693 * This gets called from truncate / punch_hole path. As such, the caller
694 * must hold locks protecting against concurrent modifications of the
a77d19f4 695 * page cache (usually fs-private i_mmap_sem for writing). Since the
3159f943 696 * caller has seen a DAX entry for this index, we better find it
ac401cc7
JK
697 * at that index as well...
698 */
c6dcf52c
JK
699 WARN_ON_ONCE(!ret);
700 return ret;
701}
702
c6dcf52c 703/*
3159f943 704 * Invalidate DAX entry if it is clean.
c6dcf52c
JK
705 */
706int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
707 pgoff_t index)
708{
a77d19f4 709 return __dax_invalidate_entry(mapping, index, false);
ac401cc7
JK
710}
711
60696eb2 712static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
f7ca90b1 713{
de205114 714 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
429f8de7
CH
715}
716
717static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
718{
60696eb2 719 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
cccbce67 720 void *vto, *kaddr;
cccbce67
DW
721 long rc;
722 int id;
723
cccbce67 724 id = dax_read_lock();
429f8de7 725 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
cccbce67
DW
726 if (rc < 0) {
727 dax_read_unlock(id);
728 return rc;
729 }
429f8de7
CH
730 vto = kmap_atomic(vmf->cow_page);
731 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
f7ca90b1 732 kunmap_atomic(vto);
cccbce67 733 dax_read_unlock(id);
f7ca90b1
MW
734 return 0;
735}
736
642261ac
RZ
737/*
738 * By this point grab_mapping_entry() has ensured that we have a locked entry
739 * of the appropriate size so we don't have to worry about downgrading PMDs to
740 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
741 * already in the tree, we will skip the insertion and just dirty the PMD as
742 * appropriate.
743 */
b15cd800
MW
744static void *dax_insert_entry(struct xa_state *xas,
745 struct address_space *mapping, struct vm_fault *vmf,
746 void *entry, pfn_t pfn, unsigned long flags, bool dirty)
9973c98e 747{
b15cd800 748 void *new_entry = dax_make_entry(pfn, flags);
9973c98e 749
f5b7b748 750 if (dirty)
d2b2a28e 751 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 752
3159f943 753 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
b15cd800 754 unsigned long index = xas->xa_index;
91d25ba8
RZ
755 /* we are replacing a zero page with block mapping */
756 if (dax_is_pmd_entry(entry))
977fbdcd 757 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
b15cd800 758 PG_PMD_NR, false);
91d25ba8 759 else /* pte entry */
b15cd800 760 unmap_mapping_pages(mapping, index, 1, false);
9973c98e
RZ
761 }
762
b15cd800
MW
763 xas_reset(xas);
764 xas_lock_irq(xas);
1571c029
JK
765 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
766 void *old;
767
d2c997c0 768 dax_disassociate_entry(entry, mapping, false);
73449daf 769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
642261ac 770 /*
a77d19f4 771 * Only swap our new entry into the page cache if the current
642261ac 772 * entry is a zero page or an empty entry. If a normal PTE or
a77d19f4 773 * PMD entry is already in the cache, we leave it alone. This
642261ac
RZ
774 * means that if we are trying to insert a PTE and the
775 * existing entry is a PMD, we will just leave the PMD in the
776 * tree and dirty it if necessary.
777 */
1571c029 778 old = dax_lock_entry(xas, new_entry);
b15cd800
MW
779 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
780 DAX_LOCKED));
91d25ba8 781 entry = new_entry;
b15cd800
MW
782 } else {
783 xas_load(xas); /* Walk the xa_state */
9973c98e 784 }
91d25ba8 785
f5b7b748 786 if (dirty)
b15cd800 787 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
91d25ba8 788
b15cd800 789 xas_unlock_irq(xas);
91d25ba8 790 return entry;
9973c98e
RZ
791}
792
a77d19f4
MW
793static inline
794unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
4b4bb46d
JK
795{
796 unsigned long address;
797
798 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
799 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
800 return address;
801}
802
803/* Walk all mappings of a given index of a file and writeprotect them */
a77d19f4
MW
804static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
805 unsigned long pfn)
4b4bb46d
JK
806{
807 struct vm_area_struct *vma;
f729c8c9
RZ
808 pte_t pte, *ptep = NULL;
809 pmd_t *pmdp = NULL;
4b4bb46d 810 spinlock_t *ptl;
4b4bb46d
JK
811
812 i_mmap_lock_read(mapping);
813 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
ac46d4f3
JG
814 struct mmu_notifier_range range;
815 unsigned long address;
4b4bb46d
JK
816
817 cond_resched();
818
819 if (!(vma->vm_flags & VM_SHARED))
820 continue;
821
822 address = pgoff_address(index, vma);
a4d1a885
JG
823
824 /*
9fd6dad1 825 * follow_invalidate_pte() will use the range to call
ff5c19ed
CH
826 * mmu_notifier_invalidate_range_start() on our behalf before
827 * taking any lock.
a4d1a885 828 */
9fd6dad1
PB
829 if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
830 &pmdp, &ptl))
4b4bb46d 831 continue;
4b4bb46d 832
0f10851e
JG
833 /*
834 * No need to call mmu_notifier_invalidate_range() as we are
835 * downgrading page table protection not changing it to point
836 * to a new page.
837 *
ad56b738 838 * See Documentation/vm/mmu_notifier.rst
0f10851e 839 */
f729c8c9
RZ
840 if (pmdp) {
841#ifdef CONFIG_FS_DAX_PMD
842 pmd_t pmd;
843
844 if (pfn != pmd_pfn(*pmdp))
845 goto unlock_pmd;
f6f37321 846 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
f729c8c9
RZ
847 goto unlock_pmd;
848
849 flush_cache_page(vma, address, pfn);
024eee0e 850 pmd = pmdp_invalidate(vma, address, pmdp);
f729c8c9
RZ
851 pmd = pmd_wrprotect(pmd);
852 pmd = pmd_mkclean(pmd);
853 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
f729c8c9 854unlock_pmd:
f729c8c9 855#endif
ee190ca6 856 spin_unlock(ptl);
f729c8c9
RZ
857 } else {
858 if (pfn != pte_pfn(*ptep))
859 goto unlock_pte;
860 if (!pte_dirty(*ptep) && !pte_write(*ptep))
861 goto unlock_pte;
862
863 flush_cache_page(vma, address, pfn);
864 pte = ptep_clear_flush(vma, address, ptep);
865 pte = pte_wrprotect(pte);
866 pte = pte_mkclean(pte);
867 set_pte_at(vma->vm_mm, address, ptep, pte);
f729c8c9
RZ
868unlock_pte:
869 pte_unmap_unlock(ptep, ptl);
870 }
4b4bb46d 871
ac46d4f3 872 mmu_notifier_invalidate_range_end(&range);
4b4bb46d
JK
873 }
874 i_mmap_unlock_read(mapping);
875}
876
9fc747f6
MW
877static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
878 struct address_space *mapping, void *entry)
9973c98e 879{
e4b3448b 880 unsigned long pfn, index, count;
3fe0791c 881 long ret = 0;
9973c98e 882
9973c98e 883 /*
a6abc2c0
JK
884 * A page got tagged dirty in DAX mapping? Something is seriously
885 * wrong.
9973c98e 886 */
3159f943 887 if (WARN_ON(!xa_is_value(entry)))
a6abc2c0 888 return -EIO;
9973c98e 889
9fc747f6
MW
890 if (unlikely(dax_is_locked(entry))) {
891 void *old_entry = entry;
892
23c84eb7 893 entry = get_unlocked_entry(xas, 0);
9fc747f6
MW
894
895 /* Entry got punched out / reallocated? */
896 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
897 goto put_unlocked;
898 /*
899 * Entry got reallocated elsewhere? No need to writeback.
900 * We have to compare pfns as we must not bail out due to
901 * difference in lockbit or entry type.
902 */
903 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
904 goto put_unlocked;
905 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
906 dax_is_zero_entry(entry))) {
907 ret = -EIO;
908 goto put_unlocked;
909 }
910
911 /* Another fsync thread may have already done this entry */
912 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
913 goto put_unlocked;
9973c98e
RZ
914 }
915
a6abc2c0 916 /* Lock the entry to serialize with page faults */
9fc747f6
MW
917 dax_lock_entry(xas, entry);
918
a6abc2c0
JK
919 /*
920 * We can clear the tag now but we have to be careful so that concurrent
921 * dax_writeback_one() calls for the same index cannot finish before we
922 * actually flush the caches. This is achieved as the calls will look
b93b0163
MW
923 * at the entry only under the i_pages lock and once they do that
924 * they will see the entry locked and wait for it to unlock.
a6abc2c0 925 */
9fc747f6
MW
926 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
927 xas_unlock_irq(xas);
a6abc2c0 928
642261ac 929 /*
e4b3448b
MW
930 * If dax_writeback_mapping_range() was given a wbc->range_start
931 * in the middle of a PMD, the 'index' we use needs to be
932 * aligned to the start of the PMD.
3fe0791c
DW
933 * This allows us to flush for PMD_SIZE and not have to worry about
934 * partial PMD writebacks.
642261ac 935 */
a77d19f4 936 pfn = dax_to_pfn(entry);
e4b3448b
MW
937 count = 1UL << dax_entry_order(entry);
938 index = xas->xa_index & ~(count - 1);
cccbce67 939
e4b3448b
MW
940 dax_entry_mkclean(mapping, index, pfn);
941 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
4b4bb46d
JK
942 /*
943 * After we have flushed the cache, we can clear the dirty tag. There
944 * cannot be new dirty data in the pfn after the flush has completed as
945 * the pfn mappings are writeprotected and fault waits for mapping
946 * entry lock.
947 */
9fc747f6
MW
948 xas_reset(xas);
949 xas_lock_irq(xas);
950 xas_store(xas, entry);
951 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
698ab77a 952 dax_wake_entry(xas, entry, WAKE_NEXT);
9fc747f6 953
e4b3448b 954 trace_dax_writeback_one(mapping->host, index, count);
9973c98e
RZ
955 return ret;
956
a6abc2c0 957 put_unlocked:
4c3d043d 958 put_unlocked_entry(xas, entry, WAKE_NEXT);
9973c98e
RZ
959 return ret;
960}
961
962/*
963 * Flush the mapping to the persistent domain within the byte range of [start,
964 * end]. This is required by data integrity operations to ensure file data is
965 * on persistent storage prior to completion of the operation.
966 */
7f6d5b52 967int dax_writeback_mapping_range(struct address_space *mapping,
3f666c56 968 struct dax_device *dax_dev, struct writeback_control *wbc)
9973c98e 969{
9fc747f6 970 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
9973c98e 971 struct inode *inode = mapping->host;
9fc747f6 972 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
9fc747f6
MW
973 void *entry;
974 int ret = 0;
975 unsigned int scanned = 0;
9973c98e
RZ
976
977 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
978 return -EIO;
979
7716506a 980 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
7f6d5b52
RZ
981 return 0;
982
9fc747f6 983 trace_dax_writeback_range(inode, xas.xa_index, end_index);
9973c98e 984
9fc747f6 985 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
9973c98e 986
9fc747f6
MW
987 xas_lock_irq(&xas);
988 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
989 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
990 if (ret < 0) {
991 mapping_set_error(mapping, ret);
9973c98e 992 break;
9973c98e 993 }
9fc747f6
MW
994 if (++scanned % XA_CHECK_SCHED)
995 continue;
996
997 xas_pause(&xas);
998 xas_unlock_irq(&xas);
999 cond_resched();
1000 xas_lock_irq(&xas);
9973c98e 1001 }
9fc747f6 1002 xas_unlock_irq(&xas);
9fc747f6
MW
1003 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1004 return ret;
9973c98e
RZ
1005}
1006EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1007
65dd814a 1008static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
5e161e40 1009 pfn_t *pfnp)
f7ca90b1 1010{
60696eb2 1011 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
cccbce67 1012 int id, rc;
5e161e40 1013 long length;
f7ca90b1 1014
cccbce67 1015 id = dax_read_lock();
5e161e40 1016 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
86ed913b 1017 NULL, pfnp);
5e161e40
JK
1018 if (length < 0) {
1019 rc = length;
1020 goto out;
cccbce67 1021 }
5e161e40
JK
1022 rc = -EINVAL;
1023 if (PFN_PHYS(length) < size)
1024 goto out;
1025 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1026 goto out;
1027 /* For larger pages we need devmap */
1028 if (length > 1 && !pfn_t_devmap(*pfnp))
1029 goto out;
1030 rc = 0;
1031out:
cccbce67 1032 dax_read_unlock(id);
5e161e40 1033 return rc;
0e3b210c 1034}
0e3b210c 1035
e30331ff 1036/*
91d25ba8
RZ
1037 * The user has performed a load from a hole in the file. Allocating a new
1038 * page in the file would cause excessive storage usage for workloads with
1039 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1040 * If this page is ever written to we will re-fault and change the mapping to
1041 * point to real DAX storage instead.
e30331ff 1042 */
b15cd800
MW
1043static vm_fault_t dax_load_hole(struct xa_state *xas,
1044 struct address_space *mapping, void **entry,
1045 struct vm_fault *vmf)
e30331ff
RZ
1046{
1047 struct inode *inode = mapping->host;
91d25ba8 1048 unsigned long vaddr = vmf->address;
b90ca5cc
MW
1049 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1050 vm_fault_t ret;
e30331ff 1051
b15cd800 1052 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
3159f943
MW
1053 DAX_ZERO_PAGE, false);
1054
ab77dab4 1055 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
e30331ff
RZ
1056 trace_dax_load_hole(inode, vmf, ret);
1057 return ret;
1058}
1059
c2436190
SR
1060#ifdef CONFIG_FS_DAX_PMD
1061static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
65dd814a 1062 const struct iomap *iomap, void **entry)
c2436190
SR
1063{
1064 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1065 unsigned long pmd_addr = vmf->address & PMD_MASK;
1066 struct vm_area_struct *vma = vmf->vma;
1067 struct inode *inode = mapping->host;
1068 pgtable_t pgtable = NULL;
1069 struct page *zero_page;
1070 spinlock_t *ptl;
1071 pmd_t pmd_entry;
1072 pfn_t pfn;
1073
1074 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1075
1076 if (unlikely(!zero_page))
1077 goto fallback;
1078
1079 pfn = page_to_pfn_t(zero_page);
1080 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1081 DAX_PMD | DAX_ZERO_PAGE, false);
1082
1083 if (arch_needs_pgtable_deposit()) {
1084 pgtable = pte_alloc_one(vma->vm_mm);
1085 if (!pgtable)
1086 return VM_FAULT_OOM;
1087 }
1088
1089 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1090 if (!pmd_none(*(vmf->pmd))) {
1091 spin_unlock(ptl);
1092 goto fallback;
1093 }
1094
1095 if (pgtable) {
1096 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1097 mm_inc_nr_ptes(vma->vm_mm);
1098 }
1099 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1100 pmd_entry = pmd_mkhuge(pmd_entry);
1101 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1102 spin_unlock(ptl);
1103 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1104 return VM_FAULT_NOPAGE;
1105
1106fallback:
1107 if (pgtable)
1108 pte_free(vma->vm_mm, pgtable);
1109 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1110 return VM_FAULT_FALLBACK;
1111}
1112#else
1113static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
65dd814a 1114 const struct iomap *iomap, void **entry)
c2436190
SR
1115{
1116 return VM_FAULT_FALLBACK;
1117}
1118#endif /* CONFIG_FS_DAX_PMD */
1119
e5c71954
CH
1120static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
1121 unsigned int offset, size_t size)
1122{
1123 void *kaddr;
1124 long ret;
1125
1126 ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1127 if (ret > 0) {
1128 memset(kaddr + offset, 0, size);
1129 dax_flush(dax_dev, kaddr + offset, size);
1130 }
1131 return ret;
1132}
1133
c6f40468 1134static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
679c8bd3 1135{
c6f40468
CH
1136 const struct iomap *iomap = &iter->iomap;
1137 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1138 loff_t pos = iter->pos;
1139 u64 length = iomap_length(iter);
1140 s64 written = 0;
1141
1142 /* already zeroed? we're done. */
1143 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1144 return length;
1145
1146 do {
1147 unsigned offset = offset_in_page(pos);
1148 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1149 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1150 long rc;
1151 int id;
1152
1153 id = dax_read_lock();
1154 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1155 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1156 else
1157 rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
1158 dax_read_unlock(id);
cccbce67 1159
c6f40468
CH
1160 if (rc < 0)
1161 return rc;
1162 pos += size;
1163 length -= size;
1164 written += size;
1165 if (did_zero)
1166 *did_zero = true;
1167 } while (length > 0);
e5c71954 1168
c6f40468
CH
1169 return written;
1170}
1171
1172int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1173 const struct iomap_ops *ops)
1174{
1175 struct iomap_iter iter = {
1176 .inode = inode,
1177 .pos = pos,
1178 .len = len,
952da063 1179 .flags = IOMAP_DAX | IOMAP_ZERO,
c6f40468
CH
1180 };
1181 int ret;
1182
1183 while ((ret = iomap_iter(&iter, ops)) > 0)
1184 iter.processed = dax_zero_iter(&iter, did_zero);
1185 return ret;
1186}
1187EXPORT_SYMBOL_GPL(dax_zero_range);
1188
1189int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1190 const struct iomap_ops *ops)
1191{
1192 unsigned int blocksize = i_blocksize(inode);
1193 unsigned int off = pos & (blocksize - 1);
1194
1195 /* Block boundary? Nothing to do */
1196 if (!off)
1197 return 0;
1198 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
679c8bd3 1199}
c6f40468 1200EXPORT_SYMBOL_GPL(dax_truncate_page);
679c8bd3 1201
ca289e0b
CH
1202static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1203 struct iov_iter *iter)
a254e568 1204{
ca289e0b
CH
1205 const struct iomap *iomap = &iomi->iomap;
1206 loff_t length = iomap_length(iomi);
1207 loff_t pos = iomi->pos;
cccbce67 1208 struct dax_device *dax_dev = iomap->dax_dev;
a254e568
CH
1209 loff_t end = pos + length, done = 0;
1210 ssize_t ret = 0;
a77d4786 1211 size_t xfer;
cccbce67 1212 int id;
a254e568
CH
1213
1214 if (iov_iter_rw(iter) == READ) {
ca289e0b 1215 end = min(end, i_size_read(iomi->inode));
a254e568
CH
1216 if (pos >= end)
1217 return 0;
1218
1219 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1220 return iov_iter_zero(min(length, end - pos), iter);
1221 }
1222
1223 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1224 return -EIO;
1225
e3fce68c
JK
1226 /*
1227 * Write can allocate block for an area which has a hole page mapped
1228 * into page tables. We have to tear down these mappings so that data
1229 * written by write(2) is visible in mmap.
1230 */
cd656375 1231 if (iomap->flags & IOMAP_F_NEW) {
ca289e0b 1232 invalidate_inode_pages2_range(iomi->inode->i_mapping,
e3fce68c
JK
1233 pos >> PAGE_SHIFT,
1234 (end - 1) >> PAGE_SHIFT);
1235 }
1236
cccbce67 1237 id = dax_read_lock();
a254e568
CH
1238 while (pos < end) {
1239 unsigned offset = pos & (PAGE_SIZE - 1);
cccbce67 1240 const size_t size = ALIGN(length + offset, PAGE_SIZE);
60696eb2 1241 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
a254e568 1242 ssize_t map_len;
cccbce67 1243 void *kaddr;
a254e568 1244
d1908f52
MH
1245 if (fatal_signal_pending(current)) {
1246 ret = -EINTR;
1247 break;
1248 }
1249
cccbce67 1250 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
86ed913b 1251 &kaddr, NULL);
a254e568
CH
1252 if (map_len < 0) {
1253 ret = map_len;
1254 break;
1255 }
1256
cccbce67
DW
1257 map_len = PFN_PHYS(map_len);
1258 kaddr += offset;
a254e568
CH
1259 map_len -= offset;
1260 if (map_len > end - pos)
1261 map_len = end - pos;
1262
1263 if (iov_iter_rw(iter) == WRITE)
a77d4786 1264 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
fec53774 1265 map_len, iter);
a254e568 1266 else
a77d4786 1267 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
b3a9a0c3 1268 map_len, iter);
a254e568 1269
a77d4786
DW
1270 pos += xfer;
1271 length -= xfer;
1272 done += xfer;
1273
1274 if (xfer == 0)
1275 ret = -EFAULT;
1276 if (xfer < map_len)
1277 break;
a254e568 1278 }
cccbce67 1279 dax_read_unlock(id);
a254e568
CH
1280
1281 return done ? done : ret;
1282}
1283
1284/**
11c59c92 1285 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
1286 * @iocb: The control block for this I/O
1287 * @iter: The addresses to do I/O from or to
1288 * @ops: iomap ops passed from the file system
1289 *
1290 * This function performs read and write operations to directly mapped
1291 * persistent memory. The callers needs to take care of read/write exclusion
1292 * and evicting any page cache pages in the region under I/O.
1293 */
1294ssize_t
11c59c92 1295dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 1296 const struct iomap_ops *ops)
a254e568 1297{
ca289e0b
CH
1298 struct iomap_iter iomi = {
1299 .inode = iocb->ki_filp->f_mapping->host,
1300 .pos = iocb->ki_pos,
1301 .len = iov_iter_count(iter),
952da063 1302 .flags = IOMAP_DAX,
ca289e0b
CH
1303 };
1304 loff_t done = 0;
1305 int ret;
a254e568 1306
168316db 1307 if (iov_iter_rw(iter) == WRITE) {
ca289e0b
CH
1308 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1309 iomi.flags |= IOMAP_WRITE;
168316db 1310 } else {
ca289e0b 1311 lockdep_assert_held(&iomi.inode->i_rwsem);
168316db 1312 }
a254e568 1313
96222d53 1314 if (iocb->ki_flags & IOCB_NOWAIT)
ca289e0b 1315 iomi.flags |= IOMAP_NOWAIT;
96222d53 1316
ca289e0b
CH
1317 while ((ret = iomap_iter(&iomi, ops)) > 0)
1318 iomi.processed = dax_iomap_iter(&iomi, iter);
a254e568 1319
ca289e0b
CH
1320 done = iomi.pos - iocb->ki_pos;
1321 iocb->ki_pos = iomi.pos;
a254e568
CH
1322 return done ? done : ret;
1323}
11c59c92 1324EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6 1325
ab77dab4 1326static vm_fault_t dax_fault_return(int error)
9f141d6e
JK
1327{
1328 if (error == 0)
1329 return VM_FAULT_NOPAGE;
c9aed74e 1330 return vmf_error(error);
9f141d6e
JK
1331}
1332
aaa422c4
DW
1333/*
1334 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1335 * flushed on write-faults (non-cow), but not read-faults.
1336 */
1337static bool dax_fault_is_synchronous(unsigned long flags,
65dd814a 1338 struct vm_area_struct *vma, const struct iomap *iomap)
aaa422c4
DW
1339{
1340 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1341 && (iomap->flags & IOMAP_F_DIRTY);
1342}
1343
55f81639
SR
1344/*
1345 * When handling a synchronous page fault and the inode need a fsync, we can
1346 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1347 * insertion for now and return the pfn so that caller can insert it after the
1348 * fsync is done.
1349 */
1350static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1351{
1352 if (WARN_ON_ONCE(!pfnp))
1353 return VM_FAULT_SIGBUS;
1354 *pfnp = pfn;
1355 return VM_FAULT_NEEDDSYNC;
1356}
1357
65dd814a
CH
1358static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1359 const struct iomap_iter *iter)
55f81639 1360{
55f81639
SR
1361 vm_fault_t ret;
1362 int error = 0;
1363
65dd814a 1364 switch (iter->iomap.type) {
55f81639
SR
1365 case IOMAP_HOLE:
1366 case IOMAP_UNWRITTEN:
429f8de7 1367 clear_user_highpage(vmf->cow_page, vmf->address);
55f81639
SR
1368 break;
1369 case IOMAP_MAPPED:
429f8de7 1370 error = copy_cow_page_dax(vmf, iter);
55f81639
SR
1371 break;
1372 default:
1373 WARN_ON_ONCE(1);
1374 error = -EIO;
1375 break;
1376 }
1377
1378 if (error)
1379 return dax_fault_return(error);
1380
1381 __SetPageUptodate(vmf->cow_page);
1382 ret = finish_fault(vmf);
1383 if (!ret)
1384 return VM_FAULT_DONE_COW;
1385 return ret;
1386}
1387
c2436190 1388/**
65dd814a 1389 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
c2436190 1390 * @vmf: vm fault instance
65dd814a 1391 * @iter: iomap iter
c2436190
SR
1392 * @pfnp: pfn to be returned
1393 * @xas: the dax mapping tree of a file
1394 * @entry: an unlocked dax entry to be inserted
1395 * @pmd: distinguish whether it is a pmd fault
c2436190 1396 */
65dd814a
CH
1397static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1398 const struct iomap_iter *iter, pfn_t *pfnp,
1399 struct xa_state *xas, void **entry, bool pmd)
c2436190
SR
1400{
1401 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
65dd814a 1402 const struct iomap *iomap = &iter->iomap;
c2436190
SR
1403 size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1404 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1405 bool write = vmf->flags & FAULT_FLAG_WRITE;
65dd814a 1406 bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
c2436190
SR
1407 unsigned long entry_flags = pmd ? DAX_PMD : 0;
1408 int err = 0;
1409 pfn_t pfn;
1410
65dd814a
CH
1411 if (!pmd && vmf->cow_page)
1412 return dax_fault_cow_page(vmf, iter);
1413
c2436190
SR
1414 /* if we are reading UNWRITTEN and HOLE, return a hole. */
1415 if (!write &&
1416 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1417 if (!pmd)
1418 return dax_load_hole(xas, mapping, entry, vmf);
1419 return dax_pmd_load_hole(xas, vmf, iomap, entry);
1420 }
1421
1422 if (iomap->type != IOMAP_MAPPED) {
1423 WARN_ON_ONCE(1);
1424 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1425 }
1426
65dd814a 1427 err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
c2436190
SR
1428 if (err)
1429 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1430
1431 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
1432 write && !sync);
1433
1434 if (sync)
1435 return dax_fault_synchronous_pfnp(pfnp, pfn);
1436
1437 /* insert PMD pfn */
1438 if (pmd)
1439 return vmf_insert_pfn_pmd(vmf, pfn, write);
1440
1441 /* insert PTE pfn */
1442 if (write)
1443 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1444 return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1445}
1446
ab77dab4 1447static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
c0b24625 1448 int *iomap_errp, const struct iomap_ops *ops)
a7d73fe6 1449{
65dd814a 1450 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1451 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
65dd814a
CH
1452 struct iomap_iter iter = {
1453 .inode = mapping->host,
1454 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
1455 .len = PAGE_SIZE,
952da063 1456 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1457 };
ab77dab4 1458 vm_fault_t ret = 0;
a7d73fe6 1459 void *entry;
65dd814a 1460 int error;
a7d73fe6 1461
65dd814a 1462 trace_dax_pte_fault(iter.inode, vmf, ret);
a7d73fe6
CH
1463 /*
1464 * Check whether offset isn't beyond end of file now. Caller is supposed
1465 * to hold locks serializing us with truncate / punch hole so this is
1466 * a reliable test.
1467 */
65dd814a 1468 if (iter.pos >= i_size_read(iter.inode)) {
ab77dab4 1469 ret = VM_FAULT_SIGBUS;
a9c42b33
RZ
1470 goto out;
1471 }
a7d73fe6 1472
65dd814a
CH
1473 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1474 iter.flags |= IOMAP_WRITE;
a7d73fe6 1475
b15cd800
MW
1476 entry = grab_mapping_entry(&xas, mapping, 0);
1477 if (xa_is_internal(entry)) {
1478 ret = xa_to_internal(entry);
13e451fd
JK
1479 goto out;
1480 }
1481
e2093926
RZ
1482 /*
1483 * It is possible, particularly with mixed reads & writes to private
1484 * mappings, that we have raced with a PMD fault that overlaps with
1485 * the PTE we need to set up. If so just return and the fault will be
1486 * retried.
1487 */
1488 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
ab77dab4 1489 ret = VM_FAULT_NOPAGE;
e2093926
RZ
1490 goto unlock_entry;
1491 }
1492
65dd814a
CH
1493 while ((error = iomap_iter(&iter, ops)) > 0) {
1494 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1495 iter.processed = -EIO; /* fs corruption? */
1496 continue;
a7d73fe6
CH
1497 }
1498
65dd814a
CH
1499 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1500 if (ret != VM_FAULT_SIGBUS &&
1501 (iter.iomap.flags & IOMAP_F_NEW)) {
a7d73fe6 1502 count_vm_event(PGMAJFAULT);
65dd814a
CH
1503 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1504 ret |= VM_FAULT_MAJOR;
a7d73fe6 1505 }
1b5a1cb2 1506
65dd814a
CH
1507 if (!(ret & VM_FAULT_ERROR))
1508 iter.processed = PAGE_SIZE;
a7d73fe6
CH
1509 }
1510
65dd814a
CH
1511 if (iomap_errp)
1512 *iomap_errp = error;
1513 if (!ret && error)
1514 ret = dax_fault_return(error);
9f141d6e 1515
c2436190 1516unlock_entry:
b15cd800 1517 dax_unlock_entry(&xas, entry);
c2436190 1518out:
65dd814a
CH
1519 trace_dax_pte_fault_done(iter.inode, vmf, ret);
1520 return ret;
a7d73fe6 1521}
642261ac
RZ
1522
1523#ifdef CONFIG_FS_DAX_PMD
55f81639
SR
1524static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1525 pgoff_t max_pgoff)
642261ac 1526{
f4200391 1527 unsigned long pmd_addr = vmf->address & PMD_MASK;
55f81639 1528 bool write = vmf->flags & FAULT_FLAG_WRITE;
642261ac 1529
55f81639
SR
1530 /*
1531 * Make sure that the faulting address's PMD offset (color) matches
1532 * the PMD offset from the start of the file. This is necessary so
1533 * that a PMD range in the page table overlaps exactly with a PMD
1534 * range in the page cache.
1535 */
1536 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1537 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1538 return true;
642261ac 1539
55f81639
SR
1540 /* Fall back to PTEs if we're going to COW */
1541 if (write && !(vmf->vma->vm_flags & VM_SHARED))
1542 return true;
11cf9d86 1543
55f81639
SR
1544 /* If the PMD would extend outside the VMA */
1545 if (pmd_addr < vmf->vma->vm_start)
1546 return true;
1547 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1548 return true;
642261ac 1549
55f81639
SR
1550 /* If the PMD would extend beyond the file size */
1551 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1552 return true;
653b2ea3 1553
55f81639 1554 return false;
642261ac
RZ
1555}
1556
ab77dab4 1557static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
a2d58167 1558 const struct iomap_ops *ops)
642261ac 1559{
65dd814a 1560 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1561 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
65dd814a
CH
1562 struct iomap_iter iter = {
1563 .inode = mapping->host,
1564 .len = PMD_SIZE,
952da063 1565 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1566 };
c2436190 1567 vm_fault_t ret = VM_FAULT_FALLBACK;
b15cd800 1568 pgoff_t max_pgoff;
642261ac 1569 void *entry;
642261ac
RZ
1570 int error;
1571
65dd814a
CH
1572 if (vmf->flags & FAULT_FLAG_WRITE)
1573 iter.flags |= IOMAP_WRITE;
642261ac 1574
282a8e03
RZ
1575 /*
1576 * Check whether offset isn't beyond end of file now. Caller is
1577 * supposed to hold locks serializing us with truncate / punch hole so
1578 * this is a reliable test.
1579 */
65dd814a 1580 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
fffa281b 1581
65dd814a 1582 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
642261ac 1583
b15cd800 1584 if (xas.xa_index >= max_pgoff) {
c2436190 1585 ret = VM_FAULT_SIGBUS;
282a8e03
RZ
1586 goto out;
1587 }
642261ac 1588
55f81639 1589 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
642261ac
RZ
1590 goto fallback;
1591
876f2946 1592 /*
b15cd800
MW
1593 * grab_mapping_entry() will make sure we get an empty PMD entry,
1594 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1595 * entry is already in the array, for instance), it will return
1596 * VM_FAULT_FALLBACK.
876f2946 1597 */
23c84eb7 1598 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
b15cd800 1599 if (xa_is_internal(entry)) {
c2436190 1600 ret = xa_to_internal(entry);
876f2946 1601 goto fallback;
b15cd800 1602 }
876f2946 1603
e2093926
RZ
1604 /*
1605 * It is possible, particularly with mixed reads & writes to private
1606 * mappings, that we have raced with a PTE fault that overlaps with
1607 * the PMD we need to set up. If so just return and the fault will be
1608 * retried.
1609 */
1610 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1611 !pmd_devmap(*vmf->pmd)) {
c2436190 1612 ret = 0;
e2093926
RZ
1613 goto unlock_entry;
1614 }
1615
65dd814a
CH
1616 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1617 while ((error = iomap_iter(&iter, ops)) > 0) {
1618 if (iomap_length(&iter) < PMD_SIZE)
1619 continue; /* actually breaks out of the loop */
caa51d26 1620
65dd814a
CH
1621 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1622 if (ret != VM_FAULT_FALLBACK)
1623 iter.processed = PMD_SIZE;
642261ac
RZ
1624 }
1625
c2436190 1626unlock_entry:
b15cd800 1627 dax_unlock_entry(&xas, entry);
c2436190
SR
1628fallback:
1629 if (ret == VM_FAULT_FALLBACK) {
65dd814a 1630 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
642261ac
RZ
1631 count_vm_event(THP_FAULT_FALLBACK);
1632 }
282a8e03 1633out:
65dd814a 1634 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
c2436190 1635 return ret;
642261ac 1636}
a2d58167 1637#else
ab77dab4 1638static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
01cddfe9 1639 const struct iomap_ops *ops)
a2d58167
DJ
1640{
1641 return VM_FAULT_FALLBACK;
1642}
642261ac 1643#endif /* CONFIG_FS_DAX_PMD */
a2d58167
DJ
1644
1645/**
1646 * dax_iomap_fault - handle a page fault on a DAX file
1647 * @vmf: The description of the fault
cec04e8c 1648 * @pe_size: Size of the page to fault in
9a0dd422 1649 * @pfnp: PFN to insert for synchronous faults if fsync is required
c0b24625 1650 * @iomap_errp: Storage for detailed error code in case of error
cec04e8c 1651 * @ops: Iomap ops passed from the file system
a2d58167
DJ
1652 *
1653 * When a page fault occurs, filesystems may call this helper in
1654 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1655 * has done all the necessary locking for page fault to proceed
1656 * successfully.
1657 */
ab77dab4 1658vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
c0b24625 1659 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
a2d58167 1660{
c791ace1
DJ
1661 switch (pe_size) {
1662 case PE_SIZE_PTE:
c0b24625 1663 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
c791ace1 1664 case PE_SIZE_PMD:
9a0dd422 1665 return dax_iomap_pmd_fault(vmf, pfnp, ops);
a2d58167
DJ
1666 default:
1667 return VM_FAULT_FALLBACK;
1668 }
1669}
1670EXPORT_SYMBOL_GPL(dax_iomap_fault);
71eab6df 1671
a77d19f4 1672/*
71eab6df
JK
1673 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1674 * @vmf: The description of the fault
71eab6df 1675 * @pfn: PFN to insert
cfc93c6c 1676 * @order: Order of entry to insert.
71eab6df 1677 *
a77d19f4
MW
1678 * This function inserts a writeable PTE or PMD entry into the page tables
1679 * for an mmaped DAX file. It also marks the page cache entry as dirty.
71eab6df 1680 */
cfc93c6c
MW
1681static vm_fault_t
1682dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
71eab6df
JK
1683{
1684 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
cfc93c6c
MW
1685 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1686 void *entry;
ab77dab4 1687 vm_fault_t ret;
71eab6df 1688
cfc93c6c 1689 xas_lock_irq(&xas);
23c84eb7 1690 entry = get_unlocked_entry(&xas, order);
71eab6df 1691 /* Did we race with someone splitting entry or so? */
23c84eb7
MWO
1692 if (!entry || dax_is_conflict(entry) ||
1693 (order == 0 && !dax_is_pte_entry(entry))) {
4c3d043d 1694 put_unlocked_entry(&xas, entry, WAKE_NEXT);
cfc93c6c 1695 xas_unlock_irq(&xas);
71eab6df
JK
1696 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1697 VM_FAULT_NOPAGE);
1698 return VM_FAULT_NOPAGE;
1699 }
cfc93c6c
MW
1700 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1701 dax_lock_entry(&xas, entry);
1702 xas_unlock_irq(&xas);
1703 if (order == 0)
ab77dab4 1704 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
71eab6df 1705#ifdef CONFIG_FS_DAX_PMD
cfc93c6c 1706 else if (order == PMD_ORDER)
fce86ff5 1707 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
71eab6df 1708#endif
cfc93c6c 1709 else
ab77dab4 1710 ret = VM_FAULT_FALLBACK;
cfc93c6c 1711 dax_unlock_entry(&xas, entry);
ab77dab4
SJ
1712 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1713 return ret;
71eab6df
JK
1714}
1715
1716/**
1717 * dax_finish_sync_fault - finish synchronous page fault
1718 * @vmf: The description of the fault
1719 * @pe_size: Size of entry to be inserted
1720 * @pfn: PFN to insert
1721 *
1722 * This function ensures that the file range touched by the page fault is
1723 * stored persistently on the media and handles inserting of appropriate page
1724 * table entry.
1725 */
ab77dab4
SJ
1726vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1727 enum page_entry_size pe_size, pfn_t pfn)
71eab6df
JK
1728{
1729 int err;
1730 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
cfc93c6c
MW
1731 unsigned int order = pe_order(pe_size);
1732 size_t len = PAGE_SIZE << order;
71eab6df 1733
71eab6df
JK
1734 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1735 if (err)
1736 return VM_FAULT_SIGBUS;
cfc93c6c 1737 return dax_insert_pfn_mkwrite(vmf, pfn, order);
71eab6df
JK
1738}
1739EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
This page took 0.866426 seconds and 4 git commands to generate.