]>
Commit | Line | Data |
---|---|---|
d475c634 MW |
1 | /* |
2 | * fs/dax.c - Direct Access filesystem code | |
3 | * Copyright (c) 2013-2014 Intel Corporation | |
4 | * Author: Matthew Wilcox <[email protected]> | |
5 | * Author: Ross Zwisler <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | */ | |
16 | ||
17 | #include <linux/atomic.h> | |
18 | #include <linux/blkdev.h> | |
19 | #include <linux/buffer_head.h> | |
d77e92e2 | 20 | #include <linux/dax.h> |
d475c634 MW |
21 | #include <linux/fs.h> |
22 | #include <linux/genhd.h> | |
f7ca90b1 MW |
23 | #include <linux/highmem.h> |
24 | #include <linux/memcontrol.h> | |
25 | #include <linux/mm.h> | |
d475c634 | 26 | #include <linux/mutex.h> |
2765cfbb | 27 | #include <linux/pmem.h> |
289c6aed | 28 | #include <linux/sched.h> |
d475c634 | 29 | #include <linux/uio.h> |
f7ca90b1 | 30 | #include <linux/vmstat.h> |
d475c634 | 31 | |
1ca19157 DC |
32 | /* |
33 | * dax_clear_blocks() is called from within transaction context from XFS, | |
34 | * and hence this means the stack from this point must follow GFP_NOFS | |
35 | * semantics for all operations. | |
36 | */ | |
289c6aed MW |
37 | int dax_clear_blocks(struct inode *inode, sector_t block, long size) |
38 | { | |
39 | struct block_device *bdev = inode->i_sb->s_bdev; | |
40 | sector_t sector = block << (inode->i_blkbits - 9); | |
41 | ||
42 | might_sleep(); | |
43 | do { | |
e2e05394 | 44 | void __pmem *addr; |
289c6aed MW |
45 | unsigned long pfn; |
46 | long count; | |
47 | ||
48 | count = bdev_direct_access(bdev, sector, &addr, &pfn, size); | |
49 | if (count < 0) | |
50 | return count; | |
51 | BUG_ON(size < count); | |
52 | while (count > 0) { | |
53 | unsigned pgsz = PAGE_SIZE - offset_in_page(addr); | |
54 | if (pgsz > count) | |
55 | pgsz = count; | |
e2e05394 | 56 | clear_pmem(addr, pgsz); |
289c6aed MW |
57 | addr += pgsz; |
58 | size -= pgsz; | |
59 | count -= pgsz; | |
60 | BUG_ON(pgsz & 511); | |
61 | sector += pgsz / 512; | |
62 | cond_resched(); | |
63 | } | |
64 | } while (size); | |
65 | ||
2765cfbb | 66 | wmb_pmem(); |
289c6aed MW |
67 | return 0; |
68 | } | |
69 | EXPORT_SYMBOL_GPL(dax_clear_blocks); | |
70 | ||
e2e05394 RZ |
71 | static long dax_get_addr(struct buffer_head *bh, void __pmem **addr, |
72 | unsigned blkbits) | |
d475c634 MW |
73 | { |
74 | unsigned long pfn; | |
75 | sector_t sector = bh->b_blocknr << (blkbits - 9); | |
76 | return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); | |
77 | } | |
78 | ||
2765cfbb | 79 | /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ |
e2e05394 RZ |
80 | static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first, |
81 | loff_t pos, loff_t end) | |
d475c634 MW |
82 | { |
83 | loff_t final = end - pos + first; /* The final byte of the buffer */ | |
84 | ||
85 | if (first > 0) | |
e2e05394 | 86 | clear_pmem(addr, first); |
d475c634 | 87 | if (final < size) |
e2e05394 | 88 | clear_pmem(addr + final, size - final); |
d475c634 MW |
89 | } |
90 | ||
91 | static bool buffer_written(struct buffer_head *bh) | |
92 | { | |
93 | return buffer_mapped(bh) && !buffer_unwritten(bh); | |
94 | } | |
95 | ||
96 | /* | |
97 | * When ext4 encounters a hole, it returns without modifying the buffer_head | |
98 | * which means that we can't trust b_size. To cope with this, we set b_state | |
99 | * to 0 before calling get_block and, if any bit is set, we know we can trust | |
100 | * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is | |
101 | * and would save us time calling get_block repeatedly. | |
102 | */ | |
103 | static bool buffer_size_valid(struct buffer_head *bh) | |
104 | { | |
105 | return bh->b_state != 0; | |
106 | } | |
107 | ||
a95cd631 OS |
108 | static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, |
109 | loff_t start, loff_t end, get_block_t get_block, | |
110 | struct buffer_head *bh) | |
d475c634 MW |
111 | { |
112 | ssize_t retval = 0; | |
113 | loff_t pos = start; | |
114 | loff_t max = start; | |
115 | loff_t bh_max = start; | |
e2e05394 | 116 | void __pmem *addr; |
d475c634 | 117 | bool hole = false; |
2765cfbb | 118 | bool need_wmb = false; |
d475c634 | 119 | |
a95cd631 | 120 | if (iov_iter_rw(iter) != WRITE) |
d475c634 MW |
121 | end = min(end, i_size_read(inode)); |
122 | ||
123 | while (pos < end) { | |
2765cfbb | 124 | size_t len; |
d475c634 MW |
125 | if (pos == max) { |
126 | unsigned blkbits = inode->i_blkbits; | |
e94f5a22 JM |
127 | long page = pos >> PAGE_SHIFT; |
128 | sector_t block = page << (PAGE_SHIFT - blkbits); | |
d475c634 MW |
129 | unsigned first = pos - (block << blkbits); |
130 | long size; | |
131 | ||
132 | if (pos == bh_max) { | |
133 | bh->b_size = PAGE_ALIGN(end - pos); | |
134 | bh->b_state = 0; | |
135 | retval = get_block(inode, block, bh, | |
a95cd631 | 136 | iov_iter_rw(iter) == WRITE); |
d475c634 MW |
137 | if (retval) |
138 | break; | |
139 | if (!buffer_size_valid(bh)) | |
140 | bh->b_size = 1 << blkbits; | |
141 | bh_max = pos - first + bh->b_size; | |
142 | } else { | |
143 | unsigned done = bh->b_size - | |
144 | (bh_max - (pos - first)); | |
145 | bh->b_blocknr += done >> blkbits; | |
146 | bh->b_size -= done; | |
147 | } | |
148 | ||
a95cd631 | 149 | hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh); |
d475c634 MW |
150 | if (hole) { |
151 | addr = NULL; | |
152 | size = bh->b_size - first; | |
153 | } else { | |
154 | retval = dax_get_addr(bh, &addr, blkbits); | |
155 | if (retval < 0) | |
156 | break; | |
2765cfbb | 157 | if (buffer_unwritten(bh) || buffer_new(bh)) { |
d475c634 MW |
158 | dax_new_buf(addr, retval, first, pos, |
159 | end); | |
2765cfbb RZ |
160 | need_wmb = true; |
161 | } | |
d475c634 MW |
162 | addr += first; |
163 | size = retval - first; | |
164 | } | |
165 | max = min(pos + size, end); | |
166 | } | |
167 | ||
2765cfbb | 168 | if (iov_iter_rw(iter) == WRITE) { |
e2e05394 | 169 | len = copy_from_iter_pmem(addr, max - pos, iter); |
2765cfbb RZ |
170 | need_wmb = true; |
171 | } else if (!hole) | |
e2e05394 RZ |
172 | len = copy_to_iter((void __force *)addr, max - pos, |
173 | iter); | |
d475c634 MW |
174 | else |
175 | len = iov_iter_zero(max - pos, iter); | |
176 | ||
cadfbb6e AV |
177 | if (!len) { |
178 | retval = -EFAULT; | |
d475c634 | 179 | break; |
cadfbb6e | 180 | } |
d475c634 MW |
181 | |
182 | pos += len; | |
183 | addr += len; | |
184 | } | |
185 | ||
2765cfbb RZ |
186 | if (need_wmb) |
187 | wmb_pmem(); | |
188 | ||
d475c634 MW |
189 | return (pos == start) ? retval : pos - start; |
190 | } | |
191 | ||
192 | /** | |
193 | * dax_do_io - Perform I/O to a DAX file | |
d475c634 MW |
194 | * @iocb: The control block for this I/O |
195 | * @inode: The file which the I/O is directed at | |
196 | * @iter: The addresses to do I/O from or to | |
197 | * @pos: The file offset where the I/O starts | |
198 | * @get_block: The filesystem method used to translate file offsets to blocks | |
199 | * @end_io: A filesystem callback for I/O completion | |
200 | * @flags: See below | |
201 | * | |
202 | * This function uses the same locking scheme as do_blockdev_direct_IO: | |
203 | * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the | |
204 | * caller for writes. For reads, we take and release the i_mutex ourselves. | |
205 | * If DIO_LOCKING is not set, the filesystem takes care of its own locking. | |
206 | * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O | |
207 | * is in progress. | |
208 | */ | |
a95cd631 OS |
209 | ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, |
210 | struct iov_iter *iter, loff_t pos, get_block_t get_block, | |
211 | dio_iodone_t end_io, int flags) | |
d475c634 MW |
212 | { |
213 | struct buffer_head bh; | |
214 | ssize_t retval = -EINVAL; | |
215 | loff_t end = pos + iov_iter_count(iter); | |
216 | ||
217 | memset(&bh, 0, sizeof(bh)); | |
218 | ||
a95cd631 | 219 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) { |
d475c634 MW |
220 | struct address_space *mapping = inode->i_mapping; |
221 | mutex_lock(&inode->i_mutex); | |
222 | retval = filemap_write_and_wait_range(mapping, pos, end - 1); | |
223 | if (retval) { | |
224 | mutex_unlock(&inode->i_mutex); | |
225 | goto out; | |
226 | } | |
227 | } | |
228 | ||
229 | /* Protects against truncate */ | |
bbab37dd MW |
230 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
231 | inode_dio_begin(inode); | |
d475c634 | 232 | |
a95cd631 | 233 | retval = dax_io(inode, iter, pos, end, get_block, &bh); |
d475c634 | 234 | |
a95cd631 | 235 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) |
d475c634 MW |
236 | mutex_unlock(&inode->i_mutex); |
237 | ||
238 | if ((retval > 0) && end_io) | |
239 | end_io(iocb, pos, retval, bh.b_private); | |
240 | ||
bbab37dd MW |
241 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
242 | inode_dio_end(inode); | |
d475c634 MW |
243 | out: |
244 | return retval; | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(dax_do_io); | |
f7ca90b1 MW |
247 | |
248 | /* | |
249 | * The user has performed a load from a hole in the file. Allocating | |
250 | * a new page in the file would cause excessive storage usage for | |
251 | * workloads with sparse files. We allocate a page cache page instead. | |
252 | * We'll kick it out of the page cache if it's ever written to, | |
253 | * otherwise it will simply fall out of the page cache under memory | |
254 | * pressure without ever having been dirtied. | |
255 | */ | |
256 | static int dax_load_hole(struct address_space *mapping, struct page *page, | |
257 | struct vm_fault *vmf) | |
258 | { | |
259 | unsigned long size; | |
260 | struct inode *inode = mapping->host; | |
261 | if (!page) | |
262 | page = find_or_create_page(mapping, vmf->pgoff, | |
263 | GFP_KERNEL | __GFP_ZERO); | |
264 | if (!page) | |
265 | return VM_FAULT_OOM; | |
266 | /* Recheck i_size under page lock to avoid truncate race */ | |
267 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
268 | if (vmf->pgoff >= size) { | |
269 | unlock_page(page); | |
270 | page_cache_release(page); | |
271 | return VM_FAULT_SIGBUS; | |
272 | } | |
273 | ||
274 | vmf->page = page; | |
275 | return VM_FAULT_LOCKED; | |
276 | } | |
277 | ||
278 | static int copy_user_bh(struct page *to, struct buffer_head *bh, | |
279 | unsigned blkbits, unsigned long vaddr) | |
280 | { | |
e2e05394 RZ |
281 | void __pmem *vfrom; |
282 | void *vto; | |
283 | ||
f7ca90b1 MW |
284 | if (dax_get_addr(bh, &vfrom, blkbits) < 0) |
285 | return -EIO; | |
286 | vto = kmap_atomic(to); | |
e2e05394 | 287 | copy_user_page(vto, (void __force *)vfrom, vaddr, to); |
f7ca90b1 MW |
288 | kunmap_atomic(vto); |
289 | return 0; | |
290 | } | |
291 | ||
292 | static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | |
293 | struct vm_area_struct *vma, struct vm_fault *vmf) | |
294 | { | |
0f90cc66 | 295 | struct address_space *mapping = inode->i_mapping; |
f7ca90b1 MW |
296 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); |
297 | unsigned long vaddr = (unsigned long)vmf->virtual_address; | |
e2e05394 | 298 | void __pmem *addr; |
f7ca90b1 MW |
299 | unsigned long pfn; |
300 | pgoff_t size; | |
301 | int error; | |
302 | ||
0f90cc66 RZ |
303 | i_mmap_lock_read(mapping); |
304 | ||
f7ca90b1 MW |
305 | /* |
306 | * Check truncate didn't happen while we were allocating a block. | |
307 | * If it did, this block may or may not be still allocated to the | |
308 | * file. We can't tell the filesystem to free it because we can't | |
309 | * take i_mutex here. In the worst case, the file still has blocks | |
310 | * allocated past the end of the file. | |
311 | */ | |
312 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
313 | if (unlikely(vmf->pgoff >= size)) { | |
314 | error = -EIO; | |
315 | goto out; | |
316 | } | |
317 | ||
318 | error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size); | |
319 | if (error < 0) | |
320 | goto out; | |
321 | if (error < PAGE_SIZE) { | |
322 | error = -EIO; | |
323 | goto out; | |
324 | } | |
325 | ||
2765cfbb | 326 | if (buffer_unwritten(bh) || buffer_new(bh)) { |
e2e05394 | 327 | clear_pmem(addr, PAGE_SIZE); |
2765cfbb RZ |
328 | wmb_pmem(); |
329 | } | |
f7ca90b1 MW |
330 | |
331 | error = vm_insert_mixed(vma, vaddr, pfn); | |
332 | ||
333 | out: | |
0f90cc66 RZ |
334 | i_mmap_unlock_read(mapping); |
335 | ||
f7ca90b1 MW |
336 | return error; |
337 | } | |
338 | ||
ce5c5d55 DC |
339 | /** |
340 | * __dax_fault - handle a page fault on a DAX file | |
341 | * @vma: The virtual memory area where the fault occurred | |
342 | * @vmf: The description of the fault | |
343 | * @get_block: The filesystem method used to translate file offsets to blocks | |
b2442c5a DC |
344 | * @complete_unwritten: The filesystem method used to convert unwritten blocks |
345 | * to written so the data written to them is exposed. This is required for | |
346 | * required by write faults for filesystems that will return unwritten | |
347 | * extent mappings from @get_block, but it is optional for reads as | |
348 | * dax_insert_mapping() will always zero unwritten blocks. If the fs does | |
349 | * not support unwritten extents, the it should pass NULL. | |
ce5c5d55 DC |
350 | * |
351 | * When a page fault occurs, filesystems may call this helper in their | |
352 | * fault handler for DAX files. __dax_fault() assumes the caller has done all | |
353 | * the necessary locking for the page fault to proceed successfully. | |
354 | */ | |
355 | int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |
e842f290 | 356 | get_block_t get_block, dax_iodone_t complete_unwritten) |
f7ca90b1 MW |
357 | { |
358 | struct file *file = vma->vm_file; | |
359 | struct address_space *mapping = file->f_mapping; | |
360 | struct inode *inode = mapping->host; | |
361 | struct page *page; | |
362 | struct buffer_head bh; | |
363 | unsigned long vaddr = (unsigned long)vmf->virtual_address; | |
364 | unsigned blkbits = inode->i_blkbits; | |
365 | sector_t block; | |
366 | pgoff_t size; | |
367 | int error; | |
368 | int major = 0; | |
369 | ||
370 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
371 | if (vmf->pgoff >= size) | |
372 | return VM_FAULT_SIGBUS; | |
373 | ||
374 | memset(&bh, 0, sizeof(bh)); | |
375 | block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); | |
376 | bh.b_size = PAGE_SIZE; | |
377 | ||
378 | repeat: | |
379 | page = find_get_page(mapping, vmf->pgoff); | |
380 | if (page) { | |
381 | if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { | |
382 | page_cache_release(page); | |
383 | return VM_FAULT_RETRY; | |
384 | } | |
385 | if (unlikely(page->mapping != mapping)) { | |
386 | unlock_page(page); | |
387 | page_cache_release(page); | |
388 | goto repeat; | |
389 | } | |
390 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
391 | if (unlikely(vmf->pgoff >= size)) { | |
392 | /* | |
393 | * We have a struct page covering a hole in the file | |
394 | * from a read fault and we've raced with a truncate | |
395 | */ | |
396 | error = -EIO; | |
0f90cc66 | 397 | goto unlock_page; |
f7ca90b1 MW |
398 | } |
399 | } | |
400 | ||
401 | error = get_block(inode, block, &bh, 0); | |
402 | if (!error && (bh.b_size < PAGE_SIZE)) | |
403 | error = -EIO; /* fs corruption? */ | |
404 | if (error) | |
0f90cc66 | 405 | goto unlock_page; |
f7ca90b1 MW |
406 | |
407 | if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { | |
408 | if (vmf->flags & FAULT_FLAG_WRITE) { | |
409 | error = get_block(inode, block, &bh, 1); | |
410 | count_vm_event(PGMAJFAULT); | |
411 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | |
412 | major = VM_FAULT_MAJOR; | |
413 | if (!error && (bh.b_size < PAGE_SIZE)) | |
414 | error = -EIO; | |
415 | if (error) | |
0f90cc66 | 416 | goto unlock_page; |
f7ca90b1 MW |
417 | } else { |
418 | return dax_load_hole(mapping, page, vmf); | |
419 | } | |
420 | } | |
421 | ||
422 | if (vmf->cow_page) { | |
423 | struct page *new_page = vmf->cow_page; | |
424 | if (buffer_written(&bh)) | |
425 | error = copy_user_bh(new_page, &bh, blkbits, vaddr); | |
426 | else | |
427 | clear_user_highpage(new_page, vaddr); | |
428 | if (error) | |
0f90cc66 | 429 | goto unlock_page; |
f7ca90b1 MW |
430 | vmf->page = page; |
431 | if (!page) { | |
0f90cc66 | 432 | i_mmap_lock_read(mapping); |
f7ca90b1 MW |
433 | /* Check we didn't race with truncate */ |
434 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> | |
435 | PAGE_SHIFT; | |
436 | if (vmf->pgoff >= size) { | |
0f90cc66 | 437 | i_mmap_unlock_read(mapping); |
f7ca90b1 | 438 | error = -EIO; |
0f90cc66 | 439 | goto out; |
f7ca90b1 MW |
440 | } |
441 | } | |
442 | return VM_FAULT_LOCKED; | |
443 | } | |
444 | ||
445 | /* Check we didn't race with a read fault installing a new page */ | |
446 | if (!page && major) | |
447 | page = find_lock_page(mapping, vmf->pgoff); | |
448 | ||
449 | if (page) { | |
450 | unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, | |
451 | PAGE_CACHE_SIZE, 0); | |
452 | delete_from_page_cache(page); | |
453 | unlock_page(page); | |
454 | page_cache_release(page); | |
455 | } | |
456 | ||
e842f290 DC |
457 | /* |
458 | * If we successfully insert the new mapping over an unwritten extent, | |
459 | * we need to ensure we convert the unwritten extent. If there is an | |
460 | * error inserting the mapping, the filesystem needs to leave it as | |
461 | * unwritten to prevent exposure of the stale underlying data to | |
462 | * userspace, but we still need to call the completion function so | |
463 | * the private resources on the mapping buffer can be released. We | |
464 | * indicate what the callback should do via the uptodate variable, same | |
465 | * as for normal BH based IO completions. | |
466 | */ | |
f7ca90b1 | 467 | error = dax_insert_mapping(inode, &bh, vma, vmf); |
b2442c5a DC |
468 | if (buffer_unwritten(&bh)) { |
469 | if (complete_unwritten) | |
470 | complete_unwritten(&bh, !error); | |
471 | else | |
472 | WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); | |
473 | } | |
f7ca90b1 MW |
474 | |
475 | out: | |
476 | if (error == -ENOMEM) | |
477 | return VM_FAULT_OOM | major; | |
478 | /* -EBUSY is fine, somebody else faulted on the same PTE */ | |
479 | if ((error < 0) && (error != -EBUSY)) | |
480 | return VM_FAULT_SIGBUS | major; | |
481 | return VM_FAULT_NOPAGE | major; | |
482 | ||
0f90cc66 | 483 | unlock_page: |
f7ca90b1 MW |
484 | if (page) { |
485 | unlock_page(page); | |
486 | page_cache_release(page); | |
487 | } | |
488 | goto out; | |
489 | } | |
ce5c5d55 | 490 | EXPORT_SYMBOL(__dax_fault); |
f7ca90b1 MW |
491 | |
492 | /** | |
493 | * dax_fault - handle a page fault on a DAX file | |
494 | * @vma: The virtual memory area where the fault occurred | |
495 | * @vmf: The description of the fault | |
496 | * @get_block: The filesystem method used to translate file offsets to blocks | |
497 | * | |
498 | * When a page fault occurs, filesystems may call this helper in their | |
499 | * fault handler for DAX files. | |
500 | */ | |
501 | int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |
e842f290 | 502 | get_block_t get_block, dax_iodone_t complete_unwritten) |
f7ca90b1 MW |
503 | { |
504 | int result; | |
505 | struct super_block *sb = file_inode(vma->vm_file)->i_sb; | |
506 | ||
507 | if (vmf->flags & FAULT_FLAG_WRITE) { | |
508 | sb_start_pagefault(sb); | |
509 | file_update_time(vma->vm_file); | |
510 | } | |
ce5c5d55 | 511 | result = __dax_fault(vma, vmf, get_block, complete_unwritten); |
f7ca90b1 MW |
512 | if (vmf->flags & FAULT_FLAG_WRITE) |
513 | sb_end_pagefault(sb); | |
514 | ||
515 | return result; | |
516 | } | |
517 | EXPORT_SYMBOL_GPL(dax_fault); | |
4c0ccfef | 518 | |
844f35db MW |
519 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
520 | /* | |
521 | * The 'colour' (ie low bits) within a PMD of a page offset. This comes up | |
522 | * more often than one might expect in the below function. | |
523 | */ | |
524 | #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) | |
525 | ||
526 | int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |
527 | pmd_t *pmd, unsigned int flags, get_block_t get_block, | |
528 | dax_iodone_t complete_unwritten) | |
529 | { | |
530 | struct file *file = vma->vm_file; | |
531 | struct address_space *mapping = file->f_mapping; | |
532 | struct inode *inode = mapping->host; | |
533 | struct buffer_head bh; | |
534 | unsigned blkbits = inode->i_blkbits; | |
535 | unsigned long pmd_addr = address & PMD_MASK; | |
536 | bool write = flags & FAULT_FLAG_WRITE; | |
537 | long length; | |
d77e92e2 | 538 | void __pmem *kaddr; |
844f35db MW |
539 | pgoff_t size, pgoff; |
540 | sector_t block, sector; | |
541 | unsigned long pfn; | |
542 | int result = 0; | |
543 | ||
ee82c9ed DW |
544 | /* dax pmd mappings are broken wrt gup and fork */ |
545 | if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) | |
546 | return VM_FAULT_FALLBACK; | |
547 | ||
844f35db MW |
548 | /* Fall back to PTEs if we're going to COW */ |
549 | if (write && !(vma->vm_flags & VM_SHARED)) | |
550 | return VM_FAULT_FALLBACK; | |
551 | /* If the PMD would extend outside the VMA */ | |
552 | if (pmd_addr < vma->vm_start) | |
553 | return VM_FAULT_FALLBACK; | |
554 | if ((pmd_addr + PMD_SIZE) > vma->vm_end) | |
555 | return VM_FAULT_FALLBACK; | |
556 | ||
3fdd1b47 | 557 | pgoff = linear_page_index(vma, pmd_addr); |
844f35db MW |
558 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
559 | if (pgoff >= size) | |
560 | return VM_FAULT_SIGBUS; | |
561 | /* If the PMD would cover blocks out of the file */ | |
562 | if ((pgoff | PG_PMD_COLOUR) >= size) | |
563 | return VM_FAULT_FALLBACK; | |
564 | ||
565 | memset(&bh, 0, sizeof(bh)); | |
566 | block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); | |
567 | ||
568 | bh.b_size = PMD_SIZE; | |
569 | length = get_block(inode, block, &bh, write); | |
570 | if (length) | |
571 | return VM_FAULT_SIGBUS; | |
0f90cc66 | 572 | i_mmap_lock_read(mapping); |
844f35db MW |
573 | |
574 | /* | |
575 | * If the filesystem isn't willing to tell us the length of a hole, | |
576 | * just fall back to PTEs. Calling get_block 512 times in a loop | |
577 | * would be silly. | |
578 | */ | |
579 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) | |
580 | goto fallback; | |
581 | ||
46c043ed KS |
582 | /* |
583 | * If we allocated new storage, make sure no process has any | |
584 | * zero pages covering this hole | |
585 | */ | |
586 | if (buffer_new(&bh)) { | |
0f90cc66 | 587 | i_mmap_unlock_read(mapping); |
46c043ed | 588 | unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); |
0f90cc66 | 589 | i_mmap_lock_read(mapping); |
46c043ed KS |
590 | } |
591 | ||
84c4e5e6 MW |
592 | /* |
593 | * If a truncate happened while we were allocating blocks, we may | |
594 | * leave blocks allocated to the file that are beyond EOF. We can't | |
595 | * take i_mutex here, so just leave them hanging; they'll be freed | |
596 | * when the file is deleted. | |
597 | */ | |
844f35db MW |
598 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
599 | if (pgoff >= size) { | |
600 | result = VM_FAULT_SIGBUS; | |
601 | goto out; | |
602 | } | |
603 | if ((pgoff | PG_PMD_COLOUR) >= size) | |
604 | goto fallback; | |
605 | ||
844f35db | 606 | if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { |
844f35db | 607 | spinlock_t *ptl; |
d295e341 | 608 | pmd_t entry; |
844f35db | 609 | struct page *zero_page = get_huge_zero_page(); |
d295e341 | 610 | |
844f35db MW |
611 | if (unlikely(!zero_page)) |
612 | goto fallback; | |
613 | ||
d295e341 KS |
614 | ptl = pmd_lock(vma->vm_mm, pmd); |
615 | if (!pmd_none(*pmd)) { | |
616 | spin_unlock(ptl); | |
617 | goto fallback; | |
618 | } | |
619 | ||
620 | entry = mk_pmd(zero_page, vma->vm_page_prot); | |
621 | entry = pmd_mkhuge(entry); | |
622 | set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); | |
844f35db | 623 | result = VM_FAULT_NOPAGE; |
d295e341 | 624 | spin_unlock(ptl); |
844f35db | 625 | } else { |
0f90cc66 | 626 | sector = bh.b_blocknr << (blkbits - 9); |
844f35db MW |
627 | length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, |
628 | bh.b_size); | |
629 | if (length < 0) { | |
630 | result = VM_FAULT_SIGBUS; | |
631 | goto out; | |
632 | } | |
633 | if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) | |
634 | goto fallback; | |
635 | ||
152d7bd8 DW |
636 | /* |
637 | * TODO: teach vmf_insert_pfn_pmd() to support | |
638 | * 'pte_special' for pmds | |
639 | */ | |
640 | if (pfn_valid(pfn)) | |
641 | goto fallback; | |
642 | ||
0f90cc66 RZ |
643 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { |
644 | int i; | |
645 | for (i = 0; i < PTRS_PER_PMD; i++) | |
646 | clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); | |
647 | wmb_pmem(); | |
648 | count_vm_event(PGMAJFAULT); | |
649 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | |
650 | result |= VM_FAULT_MAJOR; | |
651 | } | |
652 | ||
844f35db MW |
653 | result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); |
654 | } | |
655 | ||
656 | out: | |
0f90cc66 RZ |
657 | i_mmap_unlock_read(mapping); |
658 | ||
844f35db MW |
659 | if (buffer_unwritten(&bh)) |
660 | complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); | |
661 | ||
662 | return result; | |
663 | ||
664 | fallback: | |
665 | count_vm_event(THP_FAULT_FALLBACK); | |
666 | result = VM_FAULT_FALLBACK; | |
667 | goto out; | |
668 | } | |
669 | EXPORT_SYMBOL_GPL(__dax_pmd_fault); | |
670 | ||
671 | /** | |
672 | * dax_pmd_fault - handle a PMD fault on a DAX file | |
673 | * @vma: The virtual memory area where the fault occurred | |
674 | * @vmf: The description of the fault | |
675 | * @get_block: The filesystem method used to translate file offsets to blocks | |
676 | * | |
677 | * When a page fault occurs, filesystems may call this helper in their | |
678 | * pmd_fault handler for DAX files. | |
679 | */ | |
680 | int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |
681 | pmd_t *pmd, unsigned int flags, get_block_t get_block, | |
682 | dax_iodone_t complete_unwritten) | |
683 | { | |
684 | int result; | |
685 | struct super_block *sb = file_inode(vma->vm_file)->i_sb; | |
686 | ||
687 | if (flags & FAULT_FLAG_WRITE) { | |
688 | sb_start_pagefault(sb); | |
689 | file_update_time(vma->vm_file); | |
690 | } | |
691 | result = __dax_pmd_fault(vma, address, pmd, flags, get_block, | |
692 | complete_unwritten); | |
693 | if (flags & FAULT_FLAG_WRITE) | |
694 | sb_end_pagefault(sb); | |
695 | ||
696 | return result; | |
697 | } | |
698 | EXPORT_SYMBOL_GPL(dax_pmd_fault); | |
dd8a2b6c | 699 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
844f35db | 700 | |
0e3b210c BH |
701 | /** |
702 | * dax_pfn_mkwrite - handle first write to DAX page | |
703 | * @vma: The virtual memory area where the fault occurred | |
704 | * @vmf: The description of the fault | |
705 | * | |
706 | */ | |
707 | int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |
708 | { | |
709 | struct super_block *sb = file_inode(vma->vm_file)->i_sb; | |
710 | ||
711 | sb_start_pagefault(sb); | |
712 | file_update_time(vma->vm_file); | |
713 | sb_end_pagefault(sb); | |
714 | return VM_FAULT_NOPAGE; | |
715 | } | |
716 | EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); | |
717 | ||
4c0ccfef | 718 | /** |
25726bc1 | 719 | * dax_zero_page_range - zero a range within a page of a DAX file |
4c0ccfef MW |
720 | * @inode: The file being truncated |
721 | * @from: The file offset that is being truncated to | |
25726bc1 | 722 | * @length: The number of bytes to zero |
4c0ccfef MW |
723 | * @get_block: The filesystem method used to translate file offsets to blocks |
724 | * | |
25726bc1 MW |
725 | * This function can be called by a filesystem when it is zeroing part of a |
726 | * page in a DAX file. This is intended for hole-punch operations. If | |
727 | * you are truncating a file, the helper function dax_truncate_page() may be | |
728 | * more convenient. | |
4c0ccfef MW |
729 | * |
730 | * We work in terms of PAGE_CACHE_SIZE here for commonality with | |
731 | * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem | |
732 | * took care of disposing of the unnecessary blocks. Even if the filesystem | |
733 | * block size is smaller than PAGE_SIZE, we have to zero the rest of the page | |
25726bc1 | 734 | * since the file might be mmapped. |
4c0ccfef | 735 | */ |
25726bc1 MW |
736 | int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, |
737 | get_block_t get_block) | |
4c0ccfef MW |
738 | { |
739 | struct buffer_head bh; | |
740 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | |
741 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
4c0ccfef MW |
742 | int err; |
743 | ||
744 | /* Block boundary? Nothing to do */ | |
745 | if (!length) | |
746 | return 0; | |
25726bc1 | 747 | BUG_ON((offset + length) > PAGE_CACHE_SIZE); |
4c0ccfef MW |
748 | |
749 | memset(&bh, 0, sizeof(bh)); | |
750 | bh.b_size = PAGE_CACHE_SIZE; | |
751 | err = get_block(inode, index, &bh, 0); | |
752 | if (err < 0) | |
753 | return err; | |
754 | if (buffer_written(&bh)) { | |
e2e05394 | 755 | void __pmem *addr; |
4c0ccfef MW |
756 | err = dax_get_addr(&bh, &addr, inode->i_blkbits); |
757 | if (err < 0) | |
758 | return err; | |
e2e05394 | 759 | clear_pmem(addr + offset, length); |
2765cfbb | 760 | wmb_pmem(); |
4c0ccfef MW |
761 | } |
762 | ||
763 | return 0; | |
764 | } | |
25726bc1 MW |
765 | EXPORT_SYMBOL_GPL(dax_zero_page_range); |
766 | ||
767 | /** | |
768 | * dax_truncate_page - handle a partial page being truncated in a DAX file | |
769 | * @inode: The file being truncated | |
770 | * @from: The file offset that is being truncated to | |
771 | * @get_block: The filesystem method used to translate file offsets to blocks | |
772 | * | |
773 | * Similar to block_truncate_page(), this function can be called by a | |
774 | * filesystem when it is truncating a DAX file to handle the partial page. | |
775 | * | |
776 | * We work in terms of PAGE_CACHE_SIZE here for commonality with | |
777 | * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem | |
778 | * took care of disposing of the unnecessary blocks. Even if the filesystem | |
779 | * block size is smaller than PAGE_SIZE, we have to zero the rest of the page | |
780 | * since the file might be mmapped. | |
781 | */ | |
782 | int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) | |
783 | { | |
784 | unsigned length = PAGE_CACHE_ALIGN(from) - from; | |
785 | return dax_zero_page_range(inode, from, length, get_block); | |
786 | } | |
4c0ccfef | 787 | EXPORT_SYMBOL_GPL(dax_truncate_page); |