]>
Commit | Line | Data |
---|---|---|
d475c634 MW |
1 | /* |
2 | * fs/dax.c - Direct Access filesystem code | |
3 | * Copyright (c) 2013-2014 Intel Corporation | |
4 | * Author: Matthew Wilcox <[email protected]> | |
5 | * Author: Ross Zwisler <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | */ | |
16 | ||
17 | #include <linux/atomic.h> | |
18 | #include <linux/blkdev.h> | |
19 | #include <linux/buffer_head.h> | |
d77e92e2 | 20 | #include <linux/dax.h> |
d475c634 MW |
21 | #include <linux/fs.h> |
22 | #include <linux/genhd.h> | |
f7ca90b1 MW |
23 | #include <linux/highmem.h> |
24 | #include <linux/memcontrol.h> | |
25 | #include <linux/mm.h> | |
d475c634 | 26 | #include <linux/mutex.h> |
2765cfbb | 27 | #include <linux/pmem.h> |
289c6aed | 28 | #include <linux/sched.h> |
d475c634 | 29 | #include <linux/uio.h> |
f7ca90b1 | 30 | #include <linux/vmstat.h> |
34c0fd54 | 31 | #include <linux/pfn_t.h> |
0e749e54 | 32 | #include <linux/sizes.h> |
d475c634 | 33 | |
b2e0d162 DW |
34 | static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) |
35 | { | |
36 | struct request_queue *q = bdev->bd_queue; | |
37 | long rc = -EIO; | |
38 | ||
39 | dax->addr = (void __pmem *) ERR_PTR(-EIO); | |
40 | if (blk_queue_enter(q, true) != 0) | |
41 | return rc; | |
42 | ||
43 | rc = bdev_direct_access(bdev, dax); | |
44 | if (rc < 0) { | |
45 | dax->addr = (void __pmem *) ERR_PTR(rc); | |
46 | blk_queue_exit(q); | |
47 | return rc; | |
48 | } | |
49 | return rc; | |
50 | } | |
51 | ||
52 | static void dax_unmap_atomic(struct block_device *bdev, | |
53 | const struct blk_dax_ctl *dax) | |
54 | { | |
55 | if (IS_ERR(dax->addr)) | |
56 | return; | |
57 | blk_queue_exit(bdev->bd_queue); | |
58 | } | |
59 | ||
1ca19157 DC |
60 | /* |
61 | * dax_clear_blocks() is called from within transaction context from XFS, | |
62 | * and hence this means the stack from this point must follow GFP_NOFS | |
63 | * semantics for all operations. | |
64 | */ | |
b2e0d162 | 65 | int dax_clear_blocks(struct inode *inode, sector_t block, long _size) |
289c6aed MW |
66 | { |
67 | struct block_device *bdev = inode->i_sb->s_bdev; | |
b2e0d162 DW |
68 | struct blk_dax_ctl dax = { |
69 | .sector = block << (inode->i_blkbits - 9), | |
70 | .size = _size, | |
71 | }; | |
289c6aed MW |
72 | |
73 | might_sleep(); | |
74 | do { | |
0e749e54 | 75 | long count, sz; |
289c6aed | 76 | |
b2e0d162 | 77 | count = dax_map_atomic(bdev, &dax); |
289c6aed MW |
78 | if (count < 0) |
79 | return count; | |
0e749e54 | 80 | sz = min_t(long, count, SZ_128K); |
b2e0d162 DW |
81 | clear_pmem(dax.addr, sz); |
82 | dax.size -= sz; | |
83 | dax.sector += sz / 512; | |
84 | dax_unmap_atomic(bdev, &dax); | |
0e749e54 | 85 | cond_resched(); |
b2e0d162 | 86 | } while (dax.size); |
289c6aed | 87 | |
2765cfbb | 88 | wmb_pmem(); |
289c6aed MW |
89 | return 0; |
90 | } | |
91 | EXPORT_SYMBOL_GPL(dax_clear_blocks); | |
92 | ||
2765cfbb | 93 | /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ |
e2e05394 RZ |
94 | static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first, |
95 | loff_t pos, loff_t end) | |
d475c634 MW |
96 | { |
97 | loff_t final = end - pos + first; /* The final byte of the buffer */ | |
98 | ||
99 | if (first > 0) | |
e2e05394 | 100 | clear_pmem(addr, first); |
d475c634 | 101 | if (final < size) |
e2e05394 | 102 | clear_pmem(addr + final, size - final); |
d475c634 MW |
103 | } |
104 | ||
105 | static bool buffer_written(struct buffer_head *bh) | |
106 | { | |
107 | return buffer_mapped(bh) && !buffer_unwritten(bh); | |
108 | } | |
109 | ||
110 | /* | |
111 | * When ext4 encounters a hole, it returns without modifying the buffer_head | |
112 | * which means that we can't trust b_size. To cope with this, we set b_state | |
113 | * to 0 before calling get_block and, if any bit is set, we know we can trust | |
114 | * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is | |
115 | * and would save us time calling get_block repeatedly. | |
116 | */ | |
117 | static bool buffer_size_valid(struct buffer_head *bh) | |
118 | { | |
119 | return bh->b_state != 0; | |
120 | } | |
121 | ||
b2e0d162 DW |
122 | |
123 | static sector_t to_sector(const struct buffer_head *bh, | |
124 | const struct inode *inode) | |
125 | { | |
126 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); | |
127 | ||
128 | return sector; | |
129 | } | |
130 | ||
a95cd631 OS |
131 | static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, |
132 | loff_t start, loff_t end, get_block_t get_block, | |
133 | struct buffer_head *bh) | |
d475c634 | 134 | { |
b2e0d162 DW |
135 | loff_t pos = start, max = start, bh_max = start; |
136 | bool hole = false, need_wmb = false; | |
137 | struct block_device *bdev = NULL; | |
138 | int rw = iov_iter_rw(iter), rc; | |
139 | long map_len = 0; | |
140 | struct blk_dax_ctl dax = { | |
141 | .addr = (void __pmem *) ERR_PTR(-EIO), | |
142 | }; | |
143 | ||
144 | if (rw == READ) | |
d475c634 MW |
145 | end = min(end, i_size_read(inode)); |
146 | ||
147 | while (pos < end) { | |
2765cfbb | 148 | size_t len; |
d475c634 MW |
149 | if (pos == max) { |
150 | unsigned blkbits = inode->i_blkbits; | |
e94f5a22 JM |
151 | long page = pos >> PAGE_SHIFT; |
152 | sector_t block = page << (PAGE_SHIFT - blkbits); | |
d475c634 MW |
153 | unsigned first = pos - (block << blkbits); |
154 | long size; | |
155 | ||
156 | if (pos == bh_max) { | |
157 | bh->b_size = PAGE_ALIGN(end - pos); | |
158 | bh->b_state = 0; | |
b2e0d162 DW |
159 | rc = get_block(inode, block, bh, rw == WRITE); |
160 | if (rc) | |
d475c634 MW |
161 | break; |
162 | if (!buffer_size_valid(bh)) | |
163 | bh->b_size = 1 << blkbits; | |
164 | bh_max = pos - first + bh->b_size; | |
b2e0d162 | 165 | bdev = bh->b_bdev; |
d475c634 MW |
166 | } else { |
167 | unsigned done = bh->b_size - | |
168 | (bh_max - (pos - first)); | |
169 | bh->b_blocknr += done >> blkbits; | |
170 | bh->b_size -= done; | |
171 | } | |
172 | ||
b2e0d162 | 173 | hole = rw == READ && !buffer_written(bh); |
d475c634 | 174 | if (hole) { |
d475c634 MW |
175 | size = bh->b_size - first; |
176 | } else { | |
b2e0d162 DW |
177 | dax_unmap_atomic(bdev, &dax); |
178 | dax.sector = to_sector(bh, inode); | |
179 | dax.size = bh->b_size; | |
180 | map_len = dax_map_atomic(bdev, &dax); | |
181 | if (map_len < 0) { | |
182 | rc = map_len; | |
d475c634 | 183 | break; |
b2e0d162 | 184 | } |
2765cfbb | 185 | if (buffer_unwritten(bh) || buffer_new(bh)) { |
b2e0d162 DW |
186 | dax_new_buf(dax.addr, map_len, first, |
187 | pos, end); | |
2765cfbb RZ |
188 | need_wmb = true; |
189 | } | |
b2e0d162 DW |
190 | dax.addr += first; |
191 | size = map_len - first; | |
d475c634 MW |
192 | } |
193 | max = min(pos + size, end); | |
194 | } | |
195 | ||
2765cfbb | 196 | if (iov_iter_rw(iter) == WRITE) { |
b2e0d162 | 197 | len = copy_from_iter_pmem(dax.addr, max - pos, iter); |
2765cfbb RZ |
198 | need_wmb = true; |
199 | } else if (!hole) | |
b2e0d162 | 200 | len = copy_to_iter((void __force *) dax.addr, max - pos, |
e2e05394 | 201 | iter); |
d475c634 MW |
202 | else |
203 | len = iov_iter_zero(max - pos, iter); | |
204 | ||
cadfbb6e | 205 | if (!len) { |
b2e0d162 | 206 | rc = -EFAULT; |
d475c634 | 207 | break; |
cadfbb6e | 208 | } |
d475c634 MW |
209 | |
210 | pos += len; | |
b2e0d162 DW |
211 | if (!IS_ERR(dax.addr)) |
212 | dax.addr += len; | |
d475c634 MW |
213 | } |
214 | ||
2765cfbb RZ |
215 | if (need_wmb) |
216 | wmb_pmem(); | |
b2e0d162 | 217 | dax_unmap_atomic(bdev, &dax); |
2765cfbb | 218 | |
b2e0d162 | 219 | return (pos == start) ? rc : pos - start; |
d475c634 MW |
220 | } |
221 | ||
222 | /** | |
223 | * dax_do_io - Perform I/O to a DAX file | |
d475c634 MW |
224 | * @iocb: The control block for this I/O |
225 | * @inode: The file which the I/O is directed at | |
226 | * @iter: The addresses to do I/O from or to | |
227 | * @pos: The file offset where the I/O starts | |
228 | * @get_block: The filesystem method used to translate file offsets to blocks | |
229 | * @end_io: A filesystem callback for I/O completion | |
230 | * @flags: See below | |
231 | * | |
232 | * This function uses the same locking scheme as do_blockdev_direct_IO: | |
233 | * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the | |
234 | * caller for writes. For reads, we take and release the i_mutex ourselves. | |
235 | * If DIO_LOCKING is not set, the filesystem takes care of its own locking. | |
236 | * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O | |
237 | * is in progress. | |
238 | */ | |
a95cd631 OS |
239 | ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, |
240 | struct iov_iter *iter, loff_t pos, get_block_t get_block, | |
241 | dio_iodone_t end_io, int flags) | |
d475c634 MW |
242 | { |
243 | struct buffer_head bh; | |
244 | ssize_t retval = -EINVAL; | |
245 | loff_t end = pos + iov_iter_count(iter); | |
246 | ||
247 | memset(&bh, 0, sizeof(bh)); | |
248 | ||
a95cd631 | 249 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) { |
d475c634 MW |
250 | struct address_space *mapping = inode->i_mapping; |
251 | mutex_lock(&inode->i_mutex); | |
252 | retval = filemap_write_and_wait_range(mapping, pos, end - 1); | |
253 | if (retval) { | |
254 | mutex_unlock(&inode->i_mutex); | |
255 | goto out; | |
256 | } | |
257 | } | |
258 | ||
259 | /* Protects against truncate */ | |
bbab37dd MW |
260 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
261 | inode_dio_begin(inode); | |
d475c634 | 262 | |
a95cd631 | 263 | retval = dax_io(inode, iter, pos, end, get_block, &bh); |
d475c634 | 264 | |
a95cd631 | 265 | if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) |
d475c634 MW |
266 | mutex_unlock(&inode->i_mutex); |
267 | ||
268 | if ((retval > 0) && end_io) | |
269 | end_io(iocb, pos, retval, bh.b_private); | |
270 | ||
bbab37dd MW |
271 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
272 | inode_dio_end(inode); | |
d475c634 MW |
273 | out: |
274 | return retval; | |
275 | } | |
276 | EXPORT_SYMBOL_GPL(dax_do_io); | |
f7ca90b1 MW |
277 | |
278 | /* | |
279 | * The user has performed a load from a hole in the file. Allocating | |
280 | * a new page in the file would cause excessive storage usage for | |
281 | * workloads with sparse files. We allocate a page cache page instead. | |
282 | * We'll kick it out of the page cache if it's ever written to, | |
283 | * otherwise it will simply fall out of the page cache under memory | |
284 | * pressure without ever having been dirtied. | |
285 | */ | |
286 | static int dax_load_hole(struct address_space *mapping, struct page *page, | |
287 | struct vm_fault *vmf) | |
288 | { | |
289 | unsigned long size; | |
290 | struct inode *inode = mapping->host; | |
291 | if (!page) | |
292 | page = find_or_create_page(mapping, vmf->pgoff, | |
293 | GFP_KERNEL | __GFP_ZERO); | |
294 | if (!page) | |
295 | return VM_FAULT_OOM; | |
296 | /* Recheck i_size under page lock to avoid truncate race */ | |
297 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
298 | if (vmf->pgoff >= size) { | |
299 | unlock_page(page); | |
300 | page_cache_release(page); | |
301 | return VM_FAULT_SIGBUS; | |
302 | } | |
303 | ||
304 | vmf->page = page; | |
305 | return VM_FAULT_LOCKED; | |
306 | } | |
307 | ||
b2e0d162 DW |
308 | static int copy_user_bh(struct page *to, struct inode *inode, |
309 | struct buffer_head *bh, unsigned long vaddr) | |
f7ca90b1 | 310 | { |
b2e0d162 DW |
311 | struct blk_dax_ctl dax = { |
312 | .sector = to_sector(bh, inode), | |
313 | .size = bh->b_size, | |
314 | }; | |
315 | struct block_device *bdev = bh->b_bdev; | |
e2e05394 RZ |
316 | void *vto; |
317 | ||
b2e0d162 DW |
318 | if (dax_map_atomic(bdev, &dax) < 0) |
319 | return PTR_ERR(dax.addr); | |
f7ca90b1 | 320 | vto = kmap_atomic(to); |
b2e0d162 | 321 | copy_user_page(vto, (void __force *)dax.addr, vaddr, to); |
f7ca90b1 | 322 | kunmap_atomic(vto); |
b2e0d162 | 323 | dax_unmap_atomic(bdev, &dax); |
f7ca90b1 MW |
324 | return 0; |
325 | } | |
326 | ||
327 | static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | |
328 | struct vm_area_struct *vma, struct vm_fault *vmf) | |
329 | { | |
f7ca90b1 | 330 | unsigned long vaddr = (unsigned long)vmf->virtual_address; |
b2e0d162 DW |
331 | struct address_space *mapping = inode->i_mapping; |
332 | struct block_device *bdev = bh->b_bdev; | |
333 | struct blk_dax_ctl dax = { | |
334 | .sector = to_sector(bh, inode), | |
335 | .size = bh->b_size, | |
336 | }; | |
f7ca90b1 MW |
337 | pgoff_t size; |
338 | int error; | |
339 | ||
0f90cc66 RZ |
340 | i_mmap_lock_read(mapping); |
341 | ||
f7ca90b1 MW |
342 | /* |
343 | * Check truncate didn't happen while we were allocating a block. | |
344 | * If it did, this block may or may not be still allocated to the | |
345 | * file. We can't tell the filesystem to free it because we can't | |
346 | * take i_mutex here. In the worst case, the file still has blocks | |
347 | * allocated past the end of the file. | |
348 | */ | |
349 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
350 | if (unlikely(vmf->pgoff >= size)) { | |
351 | error = -EIO; | |
352 | goto out; | |
353 | } | |
354 | ||
b2e0d162 DW |
355 | if (dax_map_atomic(bdev, &dax) < 0) { |
356 | error = PTR_ERR(dax.addr); | |
f7ca90b1 MW |
357 | goto out; |
358 | } | |
359 | ||
2765cfbb | 360 | if (buffer_unwritten(bh) || buffer_new(bh)) { |
b2e0d162 | 361 | clear_pmem(dax.addr, PAGE_SIZE); |
2765cfbb RZ |
362 | wmb_pmem(); |
363 | } | |
b2e0d162 | 364 | dax_unmap_atomic(bdev, &dax); |
f7ca90b1 | 365 | |
01c8f1c4 | 366 | error = vm_insert_mixed(vma, vaddr, dax.pfn); |
f7ca90b1 MW |
367 | |
368 | out: | |
0f90cc66 RZ |
369 | i_mmap_unlock_read(mapping); |
370 | ||
f7ca90b1 MW |
371 | return error; |
372 | } | |
373 | ||
ce5c5d55 DC |
374 | /** |
375 | * __dax_fault - handle a page fault on a DAX file | |
376 | * @vma: The virtual memory area where the fault occurred | |
377 | * @vmf: The description of the fault | |
378 | * @get_block: The filesystem method used to translate file offsets to blocks | |
b2442c5a DC |
379 | * @complete_unwritten: The filesystem method used to convert unwritten blocks |
380 | * to written so the data written to them is exposed. This is required for | |
381 | * required by write faults for filesystems that will return unwritten | |
382 | * extent mappings from @get_block, but it is optional for reads as | |
383 | * dax_insert_mapping() will always zero unwritten blocks. If the fs does | |
384 | * not support unwritten extents, the it should pass NULL. | |
ce5c5d55 DC |
385 | * |
386 | * When a page fault occurs, filesystems may call this helper in their | |
387 | * fault handler for DAX files. __dax_fault() assumes the caller has done all | |
388 | * the necessary locking for the page fault to proceed successfully. | |
389 | */ | |
390 | int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |
e842f290 | 391 | get_block_t get_block, dax_iodone_t complete_unwritten) |
f7ca90b1 MW |
392 | { |
393 | struct file *file = vma->vm_file; | |
394 | struct address_space *mapping = file->f_mapping; | |
395 | struct inode *inode = mapping->host; | |
396 | struct page *page; | |
397 | struct buffer_head bh; | |
398 | unsigned long vaddr = (unsigned long)vmf->virtual_address; | |
399 | unsigned blkbits = inode->i_blkbits; | |
400 | sector_t block; | |
401 | pgoff_t size; | |
402 | int error; | |
403 | int major = 0; | |
404 | ||
405 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
406 | if (vmf->pgoff >= size) | |
407 | return VM_FAULT_SIGBUS; | |
408 | ||
409 | memset(&bh, 0, sizeof(bh)); | |
410 | block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); | |
411 | bh.b_size = PAGE_SIZE; | |
412 | ||
413 | repeat: | |
414 | page = find_get_page(mapping, vmf->pgoff); | |
415 | if (page) { | |
416 | if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { | |
417 | page_cache_release(page); | |
418 | return VM_FAULT_RETRY; | |
419 | } | |
420 | if (unlikely(page->mapping != mapping)) { | |
421 | unlock_page(page); | |
422 | page_cache_release(page); | |
423 | goto repeat; | |
424 | } | |
425 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
426 | if (unlikely(vmf->pgoff >= size)) { | |
427 | /* | |
428 | * We have a struct page covering a hole in the file | |
429 | * from a read fault and we've raced with a truncate | |
430 | */ | |
431 | error = -EIO; | |
0f90cc66 | 432 | goto unlock_page; |
f7ca90b1 MW |
433 | } |
434 | } | |
435 | ||
436 | error = get_block(inode, block, &bh, 0); | |
437 | if (!error && (bh.b_size < PAGE_SIZE)) | |
438 | error = -EIO; /* fs corruption? */ | |
439 | if (error) | |
0f90cc66 | 440 | goto unlock_page; |
f7ca90b1 MW |
441 | |
442 | if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { | |
443 | if (vmf->flags & FAULT_FLAG_WRITE) { | |
444 | error = get_block(inode, block, &bh, 1); | |
445 | count_vm_event(PGMAJFAULT); | |
446 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | |
447 | major = VM_FAULT_MAJOR; | |
448 | if (!error && (bh.b_size < PAGE_SIZE)) | |
449 | error = -EIO; | |
450 | if (error) | |
0f90cc66 | 451 | goto unlock_page; |
f7ca90b1 MW |
452 | } else { |
453 | return dax_load_hole(mapping, page, vmf); | |
454 | } | |
455 | } | |
456 | ||
457 | if (vmf->cow_page) { | |
458 | struct page *new_page = vmf->cow_page; | |
459 | if (buffer_written(&bh)) | |
b2e0d162 | 460 | error = copy_user_bh(new_page, inode, &bh, vaddr); |
f7ca90b1 MW |
461 | else |
462 | clear_user_highpage(new_page, vaddr); | |
463 | if (error) | |
0f90cc66 | 464 | goto unlock_page; |
f7ca90b1 MW |
465 | vmf->page = page; |
466 | if (!page) { | |
0f90cc66 | 467 | i_mmap_lock_read(mapping); |
f7ca90b1 MW |
468 | /* Check we didn't race with truncate */ |
469 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> | |
470 | PAGE_SHIFT; | |
471 | if (vmf->pgoff >= size) { | |
0f90cc66 | 472 | i_mmap_unlock_read(mapping); |
f7ca90b1 | 473 | error = -EIO; |
0f90cc66 | 474 | goto out; |
f7ca90b1 MW |
475 | } |
476 | } | |
477 | return VM_FAULT_LOCKED; | |
478 | } | |
479 | ||
480 | /* Check we didn't race with a read fault installing a new page */ | |
481 | if (!page && major) | |
482 | page = find_lock_page(mapping, vmf->pgoff); | |
483 | ||
484 | if (page) { | |
485 | unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, | |
486 | PAGE_CACHE_SIZE, 0); | |
487 | delete_from_page_cache(page); | |
488 | unlock_page(page); | |
489 | page_cache_release(page); | |
490 | } | |
491 | ||
e842f290 DC |
492 | /* |
493 | * If we successfully insert the new mapping over an unwritten extent, | |
494 | * we need to ensure we convert the unwritten extent. If there is an | |
495 | * error inserting the mapping, the filesystem needs to leave it as | |
496 | * unwritten to prevent exposure of the stale underlying data to | |
497 | * userspace, but we still need to call the completion function so | |
498 | * the private resources on the mapping buffer can be released. We | |
499 | * indicate what the callback should do via the uptodate variable, same | |
500 | * as for normal BH based IO completions. | |
501 | */ | |
f7ca90b1 | 502 | error = dax_insert_mapping(inode, &bh, vma, vmf); |
b2442c5a DC |
503 | if (buffer_unwritten(&bh)) { |
504 | if (complete_unwritten) | |
505 | complete_unwritten(&bh, !error); | |
506 | else | |
507 | WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); | |
508 | } | |
f7ca90b1 MW |
509 | |
510 | out: | |
511 | if (error == -ENOMEM) | |
512 | return VM_FAULT_OOM | major; | |
513 | /* -EBUSY is fine, somebody else faulted on the same PTE */ | |
514 | if ((error < 0) && (error != -EBUSY)) | |
515 | return VM_FAULT_SIGBUS | major; | |
516 | return VM_FAULT_NOPAGE | major; | |
517 | ||
0f90cc66 | 518 | unlock_page: |
f7ca90b1 MW |
519 | if (page) { |
520 | unlock_page(page); | |
521 | page_cache_release(page); | |
522 | } | |
523 | goto out; | |
524 | } | |
ce5c5d55 | 525 | EXPORT_SYMBOL(__dax_fault); |
f7ca90b1 MW |
526 | |
527 | /** | |
528 | * dax_fault - handle a page fault on a DAX file | |
529 | * @vma: The virtual memory area where the fault occurred | |
530 | * @vmf: The description of the fault | |
531 | * @get_block: The filesystem method used to translate file offsets to blocks | |
532 | * | |
533 | * When a page fault occurs, filesystems may call this helper in their | |
534 | * fault handler for DAX files. | |
535 | */ | |
536 | int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |
e842f290 | 537 | get_block_t get_block, dax_iodone_t complete_unwritten) |
f7ca90b1 MW |
538 | { |
539 | int result; | |
540 | struct super_block *sb = file_inode(vma->vm_file)->i_sb; | |
541 | ||
542 | if (vmf->flags & FAULT_FLAG_WRITE) { | |
543 | sb_start_pagefault(sb); | |
544 | file_update_time(vma->vm_file); | |
545 | } | |
ce5c5d55 | 546 | result = __dax_fault(vma, vmf, get_block, complete_unwritten); |
f7ca90b1 MW |
547 | if (vmf->flags & FAULT_FLAG_WRITE) |
548 | sb_end_pagefault(sb); | |
549 | ||
550 | return result; | |
551 | } | |
552 | EXPORT_SYMBOL_GPL(dax_fault); | |
4c0ccfef | 553 | |
844f35db MW |
554 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
555 | /* | |
556 | * The 'colour' (ie low bits) within a PMD of a page offset. This comes up | |
557 | * more often than one might expect in the below function. | |
558 | */ | |
559 | #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) | |
560 | ||
561 | int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |
562 | pmd_t *pmd, unsigned int flags, get_block_t get_block, | |
563 | dax_iodone_t complete_unwritten) | |
564 | { | |
565 | struct file *file = vma->vm_file; | |
566 | struct address_space *mapping = file->f_mapping; | |
567 | struct inode *inode = mapping->host; | |
568 | struct buffer_head bh; | |
569 | unsigned blkbits = inode->i_blkbits; | |
570 | unsigned long pmd_addr = address & PMD_MASK; | |
571 | bool write = flags & FAULT_FLAG_WRITE; | |
b2e0d162 | 572 | struct block_device *bdev; |
844f35db | 573 | pgoff_t size, pgoff; |
b2e0d162 | 574 | sector_t block; |
844f35db MW |
575 | int result = 0; |
576 | ||
ee82c9ed DW |
577 | /* dax pmd mappings are broken wrt gup and fork */ |
578 | if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) | |
579 | return VM_FAULT_FALLBACK; | |
580 | ||
844f35db | 581 | /* Fall back to PTEs if we're going to COW */ |
59bf4fb9 TK |
582 | if (write && !(vma->vm_flags & VM_SHARED)) { |
583 | split_huge_pmd(vma, pmd, address); | |
844f35db | 584 | return VM_FAULT_FALLBACK; |
59bf4fb9 | 585 | } |
844f35db MW |
586 | /* If the PMD would extend outside the VMA */ |
587 | if (pmd_addr < vma->vm_start) | |
588 | return VM_FAULT_FALLBACK; | |
589 | if ((pmd_addr + PMD_SIZE) > vma->vm_end) | |
590 | return VM_FAULT_FALLBACK; | |
591 | ||
3fdd1b47 | 592 | pgoff = linear_page_index(vma, pmd_addr); |
844f35db MW |
593 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
594 | if (pgoff >= size) | |
595 | return VM_FAULT_SIGBUS; | |
596 | /* If the PMD would cover blocks out of the file */ | |
597 | if ((pgoff | PG_PMD_COLOUR) >= size) | |
598 | return VM_FAULT_FALLBACK; | |
599 | ||
600 | memset(&bh, 0, sizeof(bh)); | |
601 | block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); | |
602 | ||
603 | bh.b_size = PMD_SIZE; | |
b2e0d162 | 604 | if (get_block(inode, block, &bh, write) != 0) |
844f35db | 605 | return VM_FAULT_SIGBUS; |
b2e0d162 | 606 | bdev = bh.b_bdev; |
0f90cc66 | 607 | i_mmap_lock_read(mapping); |
844f35db MW |
608 | |
609 | /* | |
610 | * If the filesystem isn't willing to tell us the length of a hole, | |
611 | * just fall back to PTEs. Calling get_block 512 times in a loop | |
612 | * would be silly. | |
613 | */ | |
614 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) | |
615 | goto fallback; | |
616 | ||
46c043ed KS |
617 | /* |
618 | * If we allocated new storage, make sure no process has any | |
619 | * zero pages covering this hole | |
620 | */ | |
621 | if (buffer_new(&bh)) { | |
0f90cc66 | 622 | i_mmap_unlock_read(mapping); |
46c043ed | 623 | unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); |
0f90cc66 | 624 | i_mmap_lock_read(mapping); |
46c043ed KS |
625 | } |
626 | ||
84c4e5e6 MW |
627 | /* |
628 | * If a truncate happened while we were allocating blocks, we may | |
629 | * leave blocks allocated to the file that are beyond EOF. We can't | |
630 | * take i_mutex here, so just leave them hanging; they'll be freed | |
631 | * when the file is deleted. | |
632 | */ | |
844f35db MW |
633 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
634 | if (pgoff >= size) { | |
635 | result = VM_FAULT_SIGBUS; | |
636 | goto out; | |
637 | } | |
638 | if ((pgoff | PG_PMD_COLOUR) >= size) | |
639 | goto fallback; | |
640 | ||
844f35db | 641 | if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { |
844f35db | 642 | spinlock_t *ptl; |
d295e341 | 643 | pmd_t entry; |
844f35db | 644 | struct page *zero_page = get_huge_zero_page(); |
d295e341 | 645 | |
844f35db MW |
646 | if (unlikely(!zero_page)) |
647 | goto fallback; | |
648 | ||
d295e341 KS |
649 | ptl = pmd_lock(vma->vm_mm, pmd); |
650 | if (!pmd_none(*pmd)) { | |
651 | spin_unlock(ptl); | |
652 | goto fallback; | |
653 | } | |
654 | ||
655 | entry = mk_pmd(zero_page, vma->vm_page_prot); | |
656 | entry = pmd_mkhuge(entry); | |
657 | set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); | |
844f35db | 658 | result = VM_FAULT_NOPAGE; |
d295e341 | 659 | spin_unlock(ptl); |
844f35db | 660 | } else { |
b2e0d162 DW |
661 | struct blk_dax_ctl dax = { |
662 | .sector = to_sector(&bh, inode), | |
663 | .size = PMD_SIZE, | |
664 | }; | |
665 | long length = dax_map_atomic(bdev, &dax); | |
666 | ||
844f35db MW |
667 | if (length < 0) { |
668 | result = VM_FAULT_SIGBUS; | |
669 | goto out; | |
670 | } | |
34c0fd54 DW |
671 | if (length < PMD_SIZE |
672 | || (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)) { | |
b2e0d162 | 673 | dax_unmap_atomic(bdev, &dax); |
844f35db | 674 | goto fallback; |
b2e0d162 | 675 | } |
844f35db | 676 | |
152d7bd8 DW |
677 | /* |
678 | * TODO: teach vmf_insert_pfn_pmd() to support | |
679 | * 'pte_special' for pmds | |
680 | */ | |
34c0fd54 | 681 | if (pfn_t_has_page(dax.pfn)) { |
b2e0d162 | 682 | dax_unmap_atomic(bdev, &dax); |
152d7bd8 | 683 | goto fallback; |
b2e0d162 | 684 | } |
152d7bd8 | 685 | |
0f90cc66 | 686 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { |
b2e0d162 | 687 | clear_pmem(dax.addr, PMD_SIZE); |
0f90cc66 RZ |
688 | wmb_pmem(); |
689 | count_vm_event(PGMAJFAULT); | |
690 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | |
691 | result |= VM_FAULT_MAJOR; | |
692 | } | |
b2e0d162 | 693 | dax_unmap_atomic(bdev, &dax); |
0f90cc66 | 694 | |
34c0fd54 | 695 | result |= vmf_insert_pfn_pmd(vma, address, pmd, |
f25748e3 | 696 | dax.pfn, write); |
844f35db MW |
697 | } |
698 | ||
699 | out: | |
0f90cc66 RZ |
700 | i_mmap_unlock_read(mapping); |
701 | ||
844f35db MW |
702 | if (buffer_unwritten(&bh)) |
703 | complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); | |
704 | ||
705 | return result; | |
706 | ||
707 | fallback: | |
708 | count_vm_event(THP_FAULT_FALLBACK); | |
709 | result = VM_FAULT_FALLBACK; | |
710 | goto out; | |
711 | } | |
712 | EXPORT_SYMBOL_GPL(__dax_pmd_fault); | |
713 | ||
714 | /** | |
715 | * dax_pmd_fault - handle a PMD fault on a DAX file | |
716 | * @vma: The virtual memory area where the fault occurred | |
717 | * @vmf: The description of the fault | |
718 | * @get_block: The filesystem method used to translate file offsets to blocks | |
719 | * | |
720 | * When a page fault occurs, filesystems may call this helper in their | |
721 | * pmd_fault handler for DAX files. | |
722 | */ | |
723 | int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |
724 | pmd_t *pmd, unsigned int flags, get_block_t get_block, | |
725 | dax_iodone_t complete_unwritten) | |
726 | { | |
727 | int result; | |
728 | struct super_block *sb = file_inode(vma->vm_file)->i_sb; | |
729 | ||
730 | if (flags & FAULT_FLAG_WRITE) { | |
731 | sb_start_pagefault(sb); | |
732 | file_update_time(vma->vm_file); | |
733 | } | |
734 | result = __dax_pmd_fault(vma, address, pmd, flags, get_block, | |
735 | complete_unwritten); | |
736 | if (flags & FAULT_FLAG_WRITE) | |
737 | sb_end_pagefault(sb); | |
738 | ||
739 | return result; | |
740 | } | |
741 | EXPORT_SYMBOL_GPL(dax_pmd_fault); | |
dd8a2b6c | 742 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
844f35db | 743 | |
0e3b210c BH |
744 | /** |
745 | * dax_pfn_mkwrite - handle first write to DAX page | |
746 | * @vma: The virtual memory area where the fault occurred | |
747 | * @vmf: The description of the fault | |
748 | * | |
749 | */ | |
750 | int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |
751 | { | |
752 | struct super_block *sb = file_inode(vma->vm_file)->i_sb; | |
753 | ||
754 | sb_start_pagefault(sb); | |
755 | file_update_time(vma->vm_file); | |
756 | sb_end_pagefault(sb); | |
757 | return VM_FAULT_NOPAGE; | |
758 | } | |
759 | EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); | |
760 | ||
4c0ccfef | 761 | /** |
25726bc1 | 762 | * dax_zero_page_range - zero a range within a page of a DAX file |
4c0ccfef MW |
763 | * @inode: The file being truncated |
764 | * @from: The file offset that is being truncated to | |
25726bc1 | 765 | * @length: The number of bytes to zero |
4c0ccfef MW |
766 | * @get_block: The filesystem method used to translate file offsets to blocks |
767 | * | |
25726bc1 MW |
768 | * This function can be called by a filesystem when it is zeroing part of a |
769 | * page in a DAX file. This is intended for hole-punch operations. If | |
770 | * you are truncating a file, the helper function dax_truncate_page() may be | |
771 | * more convenient. | |
4c0ccfef MW |
772 | * |
773 | * We work in terms of PAGE_CACHE_SIZE here for commonality with | |
774 | * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem | |
775 | * took care of disposing of the unnecessary blocks. Even if the filesystem | |
776 | * block size is smaller than PAGE_SIZE, we have to zero the rest of the page | |
25726bc1 | 777 | * since the file might be mmapped. |
4c0ccfef | 778 | */ |
25726bc1 MW |
779 | int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, |
780 | get_block_t get_block) | |
4c0ccfef MW |
781 | { |
782 | struct buffer_head bh; | |
783 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | |
784 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
4c0ccfef MW |
785 | int err; |
786 | ||
787 | /* Block boundary? Nothing to do */ | |
788 | if (!length) | |
789 | return 0; | |
25726bc1 | 790 | BUG_ON((offset + length) > PAGE_CACHE_SIZE); |
4c0ccfef MW |
791 | |
792 | memset(&bh, 0, sizeof(bh)); | |
793 | bh.b_size = PAGE_CACHE_SIZE; | |
794 | err = get_block(inode, index, &bh, 0); | |
795 | if (err < 0) | |
796 | return err; | |
797 | if (buffer_written(&bh)) { | |
b2e0d162 DW |
798 | struct block_device *bdev = bh.b_bdev; |
799 | struct blk_dax_ctl dax = { | |
800 | .sector = to_sector(&bh, inode), | |
801 | .size = PAGE_CACHE_SIZE, | |
802 | }; | |
803 | ||
804 | if (dax_map_atomic(bdev, &dax) < 0) | |
805 | return PTR_ERR(dax.addr); | |
806 | clear_pmem(dax.addr + offset, length); | |
2765cfbb | 807 | wmb_pmem(); |
b2e0d162 | 808 | dax_unmap_atomic(bdev, &dax); |
4c0ccfef MW |
809 | } |
810 | ||
811 | return 0; | |
812 | } | |
25726bc1 MW |
813 | EXPORT_SYMBOL_GPL(dax_zero_page_range); |
814 | ||
815 | /** | |
816 | * dax_truncate_page - handle a partial page being truncated in a DAX file | |
817 | * @inode: The file being truncated | |
818 | * @from: The file offset that is being truncated to | |
819 | * @get_block: The filesystem method used to translate file offsets to blocks | |
820 | * | |
821 | * Similar to block_truncate_page(), this function can be called by a | |
822 | * filesystem when it is truncating a DAX file to handle the partial page. | |
823 | * | |
824 | * We work in terms of PAGE_CACHE_SIZE here for commonality with | |
825 | * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem | |
826 | * took care of disposing of the unnecessary blocks. Even if the filesystem | |
827 | * block size is smaller than PAGE_SIZE, we have to zero the rest of the page | |
828 | * since the file might be mmapped. | |
829 | */ | |
830 | int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) | |
831 | { | |
832 | unsigned length = PAGE_CACHE_ALIGN(from) - from; | |
833 | return dax_zero_page_range(inode, from, length, get_block); | |
834 | } | |
4c0ccfef | 835 | EXPORT_SYMBOL_GPL(dax_truncate_page); |