]> Git Repo - linux.git/blame - fs/fuse/dax.c
Linux 6.14-rc3
[linux.git] / fs / fuse / dax.c
CommitLineData
1dd53957
VG
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * dax: direct host memory access
4 * Copyright (C) 2020 Red Hat, Inc.
5 */
6
7#include "fuse_i.h"
8
9a752d18 9#include <linux/delay.h>
1dd53957 10#include <linux/dax.h>
c2d0ad00 11#include <linux/uio.h>
3a6b2162 12#include <linux/pagemap.h>
45f2348e 13#include <linux/pfn_t.h>
c2d0ad00
VG
14#include <linux/iomap.h>
15#include <linux/interval_tree.h>
45f2348e 16
fd1a1dc6
SH
17/*
18 * Default memory range size. A power of 2 so it agrees with common FUSE_INIT
19 * map_alignment values 4KB and 64KB.
20 */
45f2348e
VG
21#define FUSE_DAX_SHIFT 21
22#define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT)
23#define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
24
9a752d18
VG
25/* Number of ranges reclaimer will try to free in one invocation */
26#define FUSE_DAX_RECLAIM_CHUNK (10)
27
28/*
29 * Dax memory reclaim threshold in percetage of total ranges. When free
30 * number of free ranges drops below this threshold, reclaim can trigger
31 * Default is 20%
32 */
33#define FUSE_DAX_RECLAIM_THRESHOLD (20)
34
45f2348e
VG
35/** Translation information for file offsets to DAX window offsets */
36struct fuse_dax_mapping {
9a752d18
VG
37 /* Pointer to inode where this memory range is mapped */
38 struct inode *inode;
39
45f2348e
VG
40 /* Will connect in fcd->free_ranges to keep track of free memory */
41 struct list_head list;
42
c2d0ad00
VG
43 /* For interval tree in file/inode */
44 struct interval_tree_node itn;
45
d0cfb9dc
VG
46 /* Will connect in fc->busy_ranges to keep track busy memory */
47 struct list_head busy_list;
48
45f2348e
VG
49 /** Position in DAX window */
50 u64 window_offset;
51
52 /** Length of mapping, in bytes */
53 loff_t length;
c2d0ad00
VG
54
55 /* Is this mapping read-only or read-write */
56 bool writable;
9a752d18
VG
57
58 /* reference count when the mapping is used by dax iomap. */
59 refcount_t refcnt;
c2d0ad00
VG
60};
61
62/* Per-inode dax map */
63struct fuse_inode_dax {
64 /* Semaphore to protect modifications to the dmap tree */
65 struct rw_semaphore sem;
66
67 /* Sorted rb tree of struct fuse_dax_mapping elements */
68 struct rb_root_cached tree;
69 unsigned long nr;
45f2348e 70};
1dd53957
VG
71
72struct fuse_conn_dax {
73 /* DAX device */
74 struct dax_device *dev;
45f2348e 75
c2d0ad00
VG
76 /* Lock protecting accessess to members of this structure */
77 spinlock_t lock;
78
d0cfb9dc
VG
79 /* List of memory ranges which are busy */
80 unsigned long nr_busy_ranges;
81 struct list_head busy_ranges;
82
9a752d18
VG
83 /* Worker to free up memory ranges */
84 struct delayed_work free_work;
85
86 /* Wait queue for a dax range to become free */
87 wait_queue_head_t range_waitq;
88
45f2348e
VG
89 /* DAX Window Free Ranges */
90 long nr_free_ranges;
91 struct list_head free_ranges;
9a752d18
VG
92
93 unsigned long nr_ranges;
1dd53957
VG
94};
95
c2d0ad00
VG
96static inline struct fuse_dax_mapping *
97node_to_dmap(struct interval_tree_node *node)
98{
99 if (!node)
100 return NULL;
101
102 return container_of(node, struct fuse_dax_mapping, itn);
103}
104
9a752d18
VG
105static struct fuse_dax_mapping *
106alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
107
108static void
109__kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
110{
111 unsigned long free_threshold;
112
113 /* If number of free ranges are below threshold, start reclaim */
114 free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
115 1);
116 if (fcd->nr_free_ranges < free_threshold)
117 queue_delayed_work(system_long_wq, &fcd->free_work,
118 msecs_to_jiffies(delay_ms));
119}
120
121static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
122 unsigned long delay_ms)
123{
124 spin_lock(&fcd->lock);
125 __kick_dmap_free_worker(fcd, delay_ms);
126 spin_unlock(&fcd->lock);
127}
128
c2d0ad00
VG
129static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
130{
131 struct fuse_dax_mapping *dmap;
132
133 spin_lock(&fcd->lock);
134 dmap = list_first_entry_or_null(&fcd->free_ranges,
135 struct fuse_dax_mapping, list);
136 if (dmap) {
137 list_del_init(&dmap->list);
138 WARN_ON(fcd->nr_free_ranges <= 0);
139 fcd->nr_free_ranges--;
140 }
47e30149 141 __kick_dmap_free_worker(fcd, 0);
c2d0ad00 142 spin_unlock(&fcd->lock);
9a752d18 143
c2d0ad00
VG
144 return dmap;
145}
146
d0cfb9dc
VG
147/* This assumes fcd->lock is held */
148static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
149 struct fuse_dax_mapping *dmap)
150{
151 list_del_init(&dmap->busy_list);
152 WARN_ON(fcd->nr_busy_ranges == 0);
153 fcd->nr_busy_ranges--;
154}
155
9a752d18
VG
156static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
157 struct fuse_dax_mapping *dmap)
158{
159 spin_lock(&fcd->lock);
160 __dmap_remove_busy_list(fcd, dmap);
161 spin_unlock(&fcd->lock);
162}
163
c2d0ad00
VG
164/* This assumes fcd->lock is held */
165static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
166 struct fuse_dax_mapping *dmap)
167{
168 list_add_tail(&dmap->list, &fcd->free_ranges);
169 fcd->nr_free_ranges++;
9a752d18 170 wake_up(&fcd->range_waitq);
c2d0ad00
VG
171}
172
173static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
174 struct fuse_dax_mapping *dmap)
175{
176 /* Return fuse_dax_mapping to free list */
177 spin_lock(&fcd->lock);
178 __dmap_add_to_free_pool(fcd, dmap);
179 spin_unlock(&fcd->lock);
180}
181
182static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
183 struct fuse_dax_mapping *dmap, bool writable,
184 bool upgrade)
185{
fcee216b
MR
186 struct fuse_mount *fm = get_fuse_mount(inode);
187 struct fuse_conn_dax *fcd = fm->fc->dax;
c2d0ad00
VG
188 struct fuse_inode *fi = get_fuse_inode(inode);
189 struct fuse_setupmapping_in inarg;
190 loff_t offset = start_idx << FUSE_DAX_SHIFT;
191 FUSE_ARGS(args);
192 ssize_t err;
193
194 WARN_ON(fcd->nr_free_ranges < 0);
195
196 /* Ask fuse daemon to setup mapping */
197 memset(&inarg, 0, sizeof(inarg));
198 inarg.foffset = offset;
199 inarg.fh = -1;
200 inarg.moffset = dmap->window_offset;
201 inarg.len = FUSE_DAX_SZ;
202 inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
203 if (writable)
204 inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
205 args.opcode = FUSE_SETUPMAPPING;
206 args.nodeid = fi->nodeid;
207 args.in_numargs = 1;
208 args.in_args[0].size = sizeof(inarg);
209 args.in_args[0].value = &inarg;
fcee216b 210 err = fuse_simple_request(fm, &args);
c2d0ad00
VG
211 if (err < 0)
212 return err;
213 dmap->writable = writable;
214 if (!upgrade) {
9a752d18 215 /*
c4e0cd4e 216 * We don't take a reference on inode. inode is valid right now
9a752d18
VG
217 * and when inode is going away, cleanup logic should first
218 * cleanup dmap entries.
219 */
220 dmap->inode = inode;
c2d0ad00
VG
221 dmap->itn.start = dmap->itn.last = start_idx;
222 /* Protected by fi->dax->sem */
223 interval_tree_insert(&dmap->itn, &fi->dax->tree);
224 fi->dax->nr++;
d0cfb9dc
VG
225 spin_lock(&fcd->lock);
226 list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
227 fcd->nr_busy_ranges++;
228 spin_unlock(&fcd->lock);
c2d0ad00
VG
229 }
230 return 0;
231}
232
233static int fuse_send_removemapping(struct inode *inode,
234 struct fuse_removemapping_in *inargp,
235 struct fuse_removemapping_one *remove_one)
236{
237 struct fuse_inode *fi = get_fuse_inode(inode);
fcee216b 238 struct fuse_mount *fm = get_fuse_mount(inode);
c2d0ad00
VG
239 FUSE_ARGS(args);
240
241 args.opcode = FUSE_REMOVEMAPPING;
242 args.nodeid = fi->nodeid;
7ccd86ba
BS
243 args.in_numargs = 3;
244 fuse_set_zero_arg0(&args);
245 args.in_args[1].size = sizeof(*inargp);
246 args.in_args[1].value = inargp;
247 args.in_args[2].size = inargp->count * sizeof(*remove_one);
248 args.in_args[2].value = remove_one;
fcee216b 249 return fuse_simple_request(fm, &args);
c2d0ad00
VG
250}
251
252static int dmap_removemapping_list(struct inode *inode, unsigned int num,
253 struct list_head *to_remove)
254{
255 struct fuse_removemapping_one *remove_one, *ptr;
256 struct fuse_removemapping_in inarg;
257 struct fuse_dax_mapping *dmap;
258 int ret, i = 0, nr_alloc;
259
260 nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
261 remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
262 if (!remove_one)
263 return -ENOMEM;
264
265 ptr = remove_one;
266 list_for_each_entry(dmap, to_remove, list) {
267 ptr->moffset = dmap->window_offset;
268 ptr->len = dmap->length;
269 ptr++;
270 i++;
271 num--;
272 if (i >= nr_alloc || num == 0) {
273 memset(&inarg, 0, sizeof(inarg));
274 inarg.count = i;
275 ret = fuse_send_removemapping(inode, &inarg,
276 remove_one);
277 if (ret)
278 goto out;
279 ptr = remove_one;
280 i = 0;
281 }
282 }
283out:
284 kfree(remove_one);
285 return ret;
286}
287
288/*
289 * Cleanup dmap entry and add back to free list. This should be called with
290 * fcd->lock held.
291 */
292static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
293 struct fuse_dax_mapping *dmap)
294{
295 pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
296 dmap->itn.start, dmap->itn.last, dmap->window_offset,
297 dmap->length);
d0cfb9dc 298 __dmap_remove_busy_list(fcd, dmap);
9a752d18 299 dmap->inode = NULL;
c2d0ad00
VG
300 dmap->itn.start = dmap->itn.last = 0;
301 __dmap_add_to_free_pool(fcd, dmap);
302}
303
304/*
305 * Free inode dmap entries whose range falls inside [start, end].
306 * Does not take any locks. At this point of time it should only be
307 * called from evict_inode() path where we know all dmap entries can be
308 * reclaimed.
309 */
310static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
311 struct inode *inode,
312 loff_t start, loff_t end)
313{
314 struct fuse_inode *fi = get_fuse_inode(inode);
315 struct fuse_dax_mapping *dmap, *n;
316 int err, num = 0;
317 LIST_HEAD(to_remove);
318 unsigned long start_idx = start >> FUSE_DAX_SHIFT;
319 unsigned long end_idx = end >> FUSE_DAX_SHIFT;
320 struct interval_tree_node *node;
321
322 while (1) {
323 node = interval_tree_iter_first(&fi->dax->tree, start_idx,
324 end_idx);
325 if (!node)
326 break;
327 dmap = node_to_dmap(node);
9a752d18
VG
328 /* inode is going away. There should not be any users of dmap */
329 WARN_ON(refcount_read(&dmap->refcnt) > 1);
c2d0ad00
VG
330 interval_tree_remove(&dmap->itn, &fi->dax->tree);
331 num++;
332 list_add(&dmap->list, &to_remove);
333 }
334
335 /* Nothing to remove */
336 if (list_empty(&to_remove))
337 return;
338
339 WARN_ON(fi->dax->nr < num);
340 fi->dax->nr -= num;
341 err = dmap_removemapping_list(inode, num, &to_remove);
342 if (err && err != -ENOTCONN) {
343 pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
344 start, end);
345 }
346 spin_lock(&fcd->lock);
347 list_for_each_entry_safe(dmap, n, &to_remove, list) {
348 list_del_init(&dmap->list);
349 dmap_reinit_add_to_free_pool(fcd, dmap);
350 }
351 spin_unlock(&fcd->lock);
352}
353
9a752d18
VG
354static int dmap_removemapping_one(struct inode *inode,
355 struct fuse_dax_mapping *dmap)
356{
357 struct fuse_removemapping_one forget_one;
358 struct fuse_removemapping_in inarg;
359
360 memset(&inarg, 0, sizeof(inarg));
361 inarg.count = 1;
362 memset(&forget_one, 0, sizeof(forget_one));
363 forget_one.moffset = dmap->window_offset;
364 forget_one.len = dmap->length;
365
366 return fuse_send_removemapping(inode, &inarg, &forget_one);
367}
368
c2d0ad00
VG
369/*
370 * It is called from evict_inode() and by that time inode is going away. So
371 * this function does not take any locks like fi->dax->sem for traversing
372 * that fuse inode interval tree. If that lock is taken then lock validator
373 * complains of deadlock situation w.r.t fs_reclaim lock.
374 */
375void fuse_dax_inode_cleanup(struct inode *inode)
376{
377 struct fuse_conn *fc = get_fuse_conn(inode);
378 struct fuse_inode *fi = get_fuse_inode(inode);
379
380 /*
381 * fuse_evict_inode() has already called truncate_inode_pages_final()
382 * before we arrive here. So we should not have to worry about any
383 * pages/exception entries still associated with inode.
384 */
385 inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
386 WARN_ON(fi->dax->nr);
387}
388
389static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
390{
391 iomap->addr = IOMAP_NULL_ADDR;
392 iomap->length = length;
393 iomap->type = IOMAP_HOLE;
394}
395
396static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
397 struct iomap *iomap, struct fuse_dax_mapping *dmap,
398 unsigned int flags)
399{
400 loff_t offset, len;
401 loff_t i_size = i_size_read(inode);
402
403 offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
404 len = min(length, dmap->length - offset);
405
406 /* If length is beyond end of file, truncate further */
407 if (pos + len > i_size)
408 len = i_size - pos;
409
410 if (len > 0) {
411 iomap->addr = dmap->window_offset + offset;
412 iomap->length = len;
413 if (flags & IOMAP_FAULT)
414 iomap->length = ALIGN(len, PAGE_SIZE);
415 iomap->type = IOMAP_MAPPED;
9a752d18
VG
416 /*
417 * increace refcnt so that reclaim code knows this dmap is in
418 * use. This assumes fi->dax->sem mutex is held either
419 * shared/exclusive.
420 */
421 refcount_inc(&dmap->refcnt);
422
423 /* iomap->private should be NULL */
424 WARN_ON_ONCE(iomap->private);
425 iomap->private = dmap;
c2d0ad00
VG
426 } else {
427 /* Mapping beyond end of file is hole */
428 fuse_fill_iomap_hole(iomap, length);
429 }
430}
431
432static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
433 loff_t length, unsigned int flags,
434 struct iomap *iomap)
435{
436 struct fuse_inode *fi = get_fuse_inode(inode);
437 struct fuse_conn *fc = get_fuse_conn(inode);
438 struct fuse_conn_dax *fcd = fc->dax;
439 struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
440 int ret;
441 bool writable = flags & IOMAP_WRITE;
442 unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
443 struct interval_tree_node *node;
444
9a752d18
VG
445 /*
446 * Can't do inline reclaim in fault path. We call
447 * dax_layout_busy_page() before we free a range. And
8bcbbe9c
JK
448 * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
449 * In fault path we enter with mapping->invalidate_lock held and can't
450 * drop it. Also in fault path we hold mapping->invalidate_lock shared
451 * and not exclusive, so that creates further issues with
452 * fuse_wait_dax_page(). Hence return -EAGAIN and fuse_dax_fault()
453 * will wait for a memory range to become free and retry.
9a752d18
VG
454 */
455 if (flags & IOMAP_FAULT) {
456 alloc_dmap = alloc_dax_mapping(fcd);
457 if (!alloc_dmap)
458 return -EAGAIN;
459 } else {
460 alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
461 if (IS_ERR(alloc_dmap))
462 return PTR_ERR(alloc_dmap);
463 }
464
465 /* If we are here, we should have memory allocated */
466 if (WARN_ON(!alloc_dmap))
c2d0ad00
VG
467 return -EIO;
468
469 /*
470 * Take write lock so that only one caller can try to setup mapping
471 * and other waits.
472 */
473 down_write(&fi->dax->sem);
474 /*
475 * We dropped lock. Check again if somebody else setup
476 * mapping already.
477 */
478 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
479 if (node) {
480 dmap = node_to_dmap(node);
481 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
482 dmap_add_to_free_pool(fcd, alloc_dmap);
483 up_write(&fi->dax->sem);
484 return 0;
485 }
486
487 /* Setup one mapping */
488 ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
489 writable, false);
490 if (ret < 0) {
491 dmap_add_to_free_pool(fcd, alloc_dmap);
492 up_write(&fi->dax->sem);
493 return ret;
494 }
495 fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
496 up_write(&fi->dax->sem);
497 return 0;
498}
499
500static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
501 loff_t length, unsigned int flags,
502 struct iomap *iomap)
503{
504 struct fuse_inode *fi = get_fuse_inode(inode);
505 struct fuse_dax_mapping *dmap;
506 int ret;
507 unsigned long idx = pos >> FUSE_DAX_SHIFT;
508 struct interval_tree_node *node;
509
510 /*
511 * Take exclusive lock so that only one caller can try to setup
512 * mapping and others wait.
513 */
514 down_write(&fi->dax->sem);
515 node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
516
8bcbbe9c 517 /* We are holding either inode lock or invalidate_lock, and that should
9a752d18
VG
518 * ensure that dmap can't be truncated. We are holding a reference
519 * on dmap and that should make sure it can't be reclaimed. So dmap
520 * should still be there in tree despite the fact we dropped and
521 * re-acquired the fi->dax->sem lock.
c2d0ad00
VG
522 */
523 ret = -EIO;
524 if (WARN_ON(!node))
525 goto out_err;
526
527 dmap = node_to_dmap(node);
528
9a752d18
VG
529 /* We took an extra reference on dmap to make sure its not reclaimd.
530 * Now we hold fi->dax->sem lock and that reference is not needed
531 * anymore. Drop it.
532 */
533 if (refcount_dec_and_test(&dmap->refcnt)) {
534 /* refcount should not hit 0. This object only goes
535 * away when fuse connection goes away
536 */
537 WARN_ON_ONCE(1);
538 }
539
c2d0ad00
VG
540 /* Maybe another thread already upgraded mapping while we were not
541 * holding lock.
542 */
543 if (dmap->writable) {
544 ret = 0;
545 goto out_fill_iomap;
546 }
547
548 ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
549 true);
550 if (ret < 0)
551 goto out_err;
552out_fill_iomap:
553 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
554out_err:
555 up_write(&fi->dax->sem);
556 return ret;
557}
558
559/* This is just for DAX and the mapping is ephemeral, do not use it for other
560 * purposes since there is no block device with a permanent mapping.
561 */
562static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
563 unsigned int flags, struct iomap *iomap,
564 struct iomap *srcmap)
565{
566 struct fuse_inode *fi = get_fuse_inode(inode);
567 struct fuse_conn *fc = get_fuse_conn(inode);
568 struct fuse_dax_mapping *dmap;
569 bool writable = flags & IOMAP_WRITE;
570 unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
571 struct interval_tree_node *node;
572
573 /* We don't support FIEMAP */
574 if (WARN_ON(flags & IOMAP_REPORT))
575 return -EIO;
576
577 iomap->offset = pos;
578 iomap->flags = 0;
579 iomap->bdev = NULL;
580 iomap->dax_dev = fc->dax->dev;
581
582 /*
583 * Both read/write and mmap path can race here. So we need something
584 * to make sure if we are setting up mapping, then other path waits
585 *
586 * For now, use a semaphore for this. It probably needs to be
587 * optimized later.
588 */
589 down_read(&fi->dax->sem);
590 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
591 if (node) {
592 dmap = node_to_dmap(node);
593 if (writable && !dmap->writable) {
594 /* Upgrade read-only mapping to read-write. This will
595 * require exclusive fi->dax->sem lock as we don't want
596 * two threads to be trying to this simultaneously
597 * for same dmap. So drop shared lock and acquire
598 * exclusive lock.
9a752d18
VG
599 *
600 * Before dropping fi->dax->sem lock, take reference
601 * on dmap so that its not freed by range reclaim.
c2d0ad00 602 */
9a752d18 603 refcount_inc(&dmap->refcnt);
c2d0ad00
VG
604 up_read(&fi->dax->sem);
605 pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
606 __func__, pos, length);
607 return fuse_upgrade_dax_mapping(inode, pos, length,
608 flags, iomap);
609 } else {
610 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
611 up_read(&fi->dax->sem);
612 return 0;
613 }
614 } else {
615 up_read(&fi->dax->sem);
616 pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
617 __func__, pos, length);
618 if (pos >= i_size_read(inode))
619 goto iomap_hole;
620
621 return fuse_setup_new_dax_mapping(inode, pos, length, flags,
622 iomap);
623 }
624
625 /*
c4e0cd4e 626 * If read beyond end of file happens, fs code seems to return
c2d0ad00
VG
627 * it as hole
628 */
629iomap_hole:
630 fuse_fill_iomap_hole(iomap, length);
631 pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
632 __func__, pos, length, iomap->length);
633 return 0;
634}
635
636static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
637 ssize_t written, unsigned int flags,
638 struct iomap *iomap)
639{
9a752d18
VG
640 struct fuse_dax_mapping *dmap = iomap->private;
641
642 if (dmap) {
643 if (refcount_dec_and_test(&dmap->refcnt)) {
644 /* refcount should not hit 0. This object only goes
645 * away when fuse connection goes away
646 */
647 WARN_ON_ONCE(1);
648 }
649 }
650
c2d0ad00
VG
651 /* DAX writes beyond end-of-file aren't handled using iomap, so the
652 * file size is unchanged and there is nothing to do here.
653 */
654 return 0;
655}
656
657static const struct iomap_ops fuse_iomap_ops = {
658 .iomap_begin = fuse_iomap_begin,
659 .iomap_end = fuse_iomap_end,
660};
661
6ae330ca
VG
662static void fuse_wait_dax_page(struct inode *inode)
663{
8bcbbe9c 664 filemap_invalidate_unlock(inode->i_mapping);
6ae330ca 665 schedule();
8bcbbe9c 666 filemap_invalidate_lock(inode->i_mapping);
6ae330ca
VG
667}
668
8bcbbe9c 669/* Should be called with mapping->invalidate_lock held exclusively */
6ae330ca
VG
670static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
671 loff_t start, loff_t end)
672{
673 struct page *page;
674
675 page = dax_layout_busy_page_range(inode->i_mapping, start, end);
676 if (!page)
677 return 0;
678
679 *retry = true;
680 return ___wait_var_event(&page->_refcount,
681 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
682 0, 0, fuse_wait_dax_page(inode));
683}
684
685/* dmap_end == 0 leads to unmapping of whole file */
686int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
687 u64 dmap_end)
688{
689 bool retry;
690 int ret;
691
692 do {
693 retry = false;
694 ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
695 dmap_end);
696 } while (ret == 0 && retry);
697
698 return ret;
699}
700
c2d0ad00
VG
701ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
702{
703 struct inode *inode = file_inode(iocb->ki_filp);
704 ssize_t ret;
705
706 if (iocb->ki_flags & IOCB_NOWAIT) {
707 if (!inode_trylock_shared(inode))
708 return -EAGAIN;
709 } else {
710 inode_lock_shared(inode);
711 }
712
713 ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
714 inode_unlock_shared(inode);
715
716 /* TODO file_accessed(iocb->f_filp) */
717 return ret;
718}
719
720static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
721{
722 struct inode *inode = file_inode(iocb->ki_filp);
723
724 return (iov_iter_rw(from) == WRITE &&
725 ((iocb->ki_pos) >= i_size_read(inode) ||
726 (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
727}
728
729static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
730{
731 struct inode *inode = file_inode(iocb->ki_filp);
732 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
733 ssize_t ret;
734
735 ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
c2d0ad00 736
d347739a 737 fuse_write_update_attr(inode, iocb->ki_pos, ret);
c2d0ad00
VG
738 return ret;
739}
740
741ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
742{
743 struct inode *inode = file_inode(iocb->ki_filp);
744 ssize_t ret;
745
746 if (iocb->ki_flags & IOCB_NOWAIT) {
747 if (!inode_trylock(inode))
748 return -EAGAIN;
749 } else {
750 inode_lock(inode);
751 }
752
753 ret = generic_write_checks(iocb, from);
754 if (ret <= 0)
755 goto out;
756
757 ret = file_remove_privs(iocb->ki_filp);
758 if (ret)
759 goto out;
760 /* TODO file_update_time() but we don't want metadata I/O */
761
762 /* Do not use dax for file extending writes as write and on
763 * disk i_size increase are not atomic otherwise.
764 */
765 if (file_extending_write(iocb, from))
766 ret = fuse_dax_direct_write(iocb, from);
767 else
768 ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
769
770out:
771 inode_unlock(inode);
772
773 if (ret > 0)
774 ret = generic_write_sync(iocb, ret);
775 return ret;
776}
777
1d024e7a
MWO
778static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
779 bool write)
2a9a609a
SH
780{
781 vm_fault_t ret;
782 struct inode *inode = file_inode(vmf->vma->vm_file);
783 struct super_block *sb = inode->i_sb;
784 pfn_t pfn;
9a752d18
VG
785 int error = 0;
786 struct fuse_conn *fc = get_fuse_conn(inode);
787 struct fuse_conn_dax *fcd = fc->dax;
788 bool retry = false;
2a9a609a
SH
789
790 if (write)
791 sb_start_pagefault(sb);
9a752d18
VG
792retry:
793 if (retry && !(fcd->nr_free_ranges > 0))
794 wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
2a9a609a 795
6ae330ca
VG
796 /*
797 * We need to serialize against not only truncate but also against
798 * fuse dax memory range reclaim. While a range is being reclaimed,
799 * we do not want any read/write/mmap to make progress and try
800 * to populate page cache or access memory we are trying to free.
801 */
8bcbbe9c 802 filemap_invalidate_lock_shared(inode->i_mapping);
1d024e7a 803 ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
9a752d18
VG
804 if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
805 error = 0;
806 retry = true;
8bcbbe9c 807 filemap_invalidate_unlock_shared(inode->i_mapping);
9a752d18
VG
808 goto retry;
809 }
2a9a609a
SH
810
811 if (ret & VM_FAULT_NEEDDSYNC)
1d024e7a 812 ret = dax_finish_sync_fault(vmf, order, pfn);
8bcbbe9c 813 filemap_invalidate_unlock_shared(inode->i_mapping);
2a9a609a
SH
814
815 if (write)
816 sb_end_pagefault(sb);
817
818 return ret;
819}
820
821static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
822{
1d024e7a 823 return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE);
2a9a609a
SH
824}
825
1d024e7a 826static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
2a9a609a 827{
1d024e7a 828 return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE);
2a9a609a
SH
829}
830
831static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
832{
1d024e7a 833 return __fuse_dax_fault(vmf, 0, true);
2a9a609a
SH
834}
835
836static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
837{
1d024e7a 838 return __fuse_dax_fault(vmf, 0, true);
2a9a609a
SH
839}
840
841static const struct vm_operations_struct fuse_dax_vm_ops = {
842 .fault = fuse_dax_fault,
843 .huge_fault = fuse_dax_huge_fault,
844 .page_mkwrite = fuse_dax_page_mkwrite,
845 .pfn_mkwrite = fuse_dax_pfn_mkwrite,
846};
847
848int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
849{
850 file_accessed(file);
851 vma->vm_ops = &fuse_dax_vm_ops;
1c71222e 852 vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
2a9a609a
SH
853 return 0;
854}
855
9a752d18
VG
856static int dmap_writeback_invalidate(struct inode *inode,
857 struct fuse_dax_mapping *dmap)
858{
859 int ret;
860 loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
861 loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
862
863 ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
864 if (ret) {
865 pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
866 ret, start_pos, end_pos);
867 return ret;
868 }
869
870 ret = invalidate_inode_pages2_range(inode->i_mapping,
871 start_pos >> PAGE_SHIFT,
872 end_pos >> PAGE_SHIFT);
873 if (ret)
874 pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
875 ret);
876
877 return ret;
878}
879
880static int reclaim_one_dmap_locked(struct inode *inode,
881 struct fuse_dax_mapping *dmap)
882{
883 int ret;
884 struct fuse_inode *fi = get_fuse_inode(inode);
885
886 /*
887 * igrab() was done to make sure inode won't go under us, and this
888 * further avoids the race with evict().
889 */
890 ret = dmap_writeback_invalidate(inode, dmap);
891 if (ret)
892 return ret;
893
894 /* Remove dax mapping from inode interval tree now */
895 interval_tree_remove(&dmap->itn, &fi->dax->tree);
896 fi->dax->nr--;
897
898 /* It is possible that umount/shutdown has killed the fuse connection
899 * and worker thread is trying to reclaim memory in parallel. Don't
900 * warn in that case.
901 */
902 ret = dmap_removemapping_one(inode, dmap);
903 if (ret && ret != -ENOTCONN) {
904 pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
905 dmap->window_offset, dmap->length, ret);
906 }
907 return 0;
908}
909
910/* Find first mapped dmap for an inode and return file offset. Caller needs
911 * to hold fi->dax->sem lock either shared or exclusive.
912 */
913static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
914{
915 struct fuse_inode *fi = get_fuse_inode(inode);
916 struct fuse_dax_mapping *dmap;
917 struct interval_tree_node *node;
918
919 for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
920 node = interval_tree_iter_next(node, 0, -1)) {
921 dmap = node_to_dmap(node);
922 /* still in use. */
923 if (refcount_read(&dmap->refcnt) > 1)
924 continue;
925
926 return dmap;
927 }
928
929 return NULL;
930}
931
932/*
933 * Find first mapping in the tree and free it and return it. Do not add
934 * it back to free pool.
935 */
936static struct fuse_dax_mapping *
937inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
938 bool *retry)
939{
940 struct fuse_inode *fi = get_fuse_inode(inode);
941 struct fuse_dax_mapping *dmap;
942 u64 dmap_start, dmap_end;
943 unsigned long start_idx;
944 int ret;
945 struct interval_tree_node *node;
946
8bcbbe9c 947 filemap_invalidate_lock(inode->i_mapping);
9a752d18
VG
948
949 /* Lookup a dmap and corresponding file offset to reclaim. */
950 down_read(&fi->dax->sem);
951 dmap = inode_lookup_first_dmap(inode);
952 if (dmap) {
953 start_idx = dmap->itn.start;
954 dmap_start = start_idx << FUSE_DAX_SHIFT;
955 dmap_end = dmap_start + FUSE_DAX_SZ - 1;
956 }
957 up_read(&fi->dax->sem);
958
959 if (!dmap)
960 goto out_mmap_sem;
961 /*
962 * Make sure there are no references to inode pages using
963 * get_user_pages()
964 */
965 ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
966 if (ret) {
967 pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
968 ret);
969 dmap = ERR_PTR(ret);
970 goto out_mmap_sem;
971 }
972
973 down_write(&fi->dax->sem);
974 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
975 /* Range already got reclaimed by somebody else */
976 if (!node) {
977 if (retry)
978 *retry = true;
979 goto out_write_dmap_sem;
980 }
981
982 dmap = node_to_dmap(node);
983 /* still in use. */
984 if (refcount_read(&dmap->refcnt) > 1) {
985 dmap = NULL;
986 if (retry)
987 *retry = true;
988 goto out_write_dmap_sem;
989 }
990
991 ret = reclaim_one_dmap_locked(inode, dmap);
992 if (ret < 0) {
993 dmap = ERR_PTR(ret);
994 goto out_write_dmap_sem;
995 }
996
997 /* Clean up dmap. Do not add back to free list */
998 dmap_remove_busy_list(fcd, dmap);
999 dmap->inode = NULL;
1000 dmap->itn.start = dmap->itn.last = 0;
1001
1002 pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
1003 __func__, inode, dmap->window_offset, dmap->length);
1004
1005out_write_dmap_sem:
1006 up_write(&fi->dax->sem);
1007out_mmap_sem:
8bcbbe9c 1008 filemap_invalidate_unlock(inode->i_mapping);
9a752d18
VG
1009 return dmap;
1010}
1011
1012static struct fuse_dax_mapping *
1013alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
1014{
1015 struct fuse_dax_mapping *dmap;
1016 struct fuse_inode *fi = get_fuse_inode(inode);
1017
1018 while (1) {
1019 bool retry = false;
1020
1021 dmap = alloc_dax_mapping(fcd);
1022 if (dmap)
1023 return dmap;
1024
1025 dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1026 /*
1027 * Either we got a mapping or it is an error, return in both
1028 * the cases.
1029 */
1030 if (dmap)
1031 return dmap;
1032
1033 /* If we could not reclaim a mapping because it
1034 * had a reference or some other temporary failure,
1035 * Try again. We want to give up inline reclaim only
1036 * if there is no range assigned to this node. Otherwise
8bcbbe9c
JK
1037 * if a deadlock is possible if we sleep with
1038 * mapping->invalidate_lock held and worker to free memory
1039 * can't make progress due to unavailability of
1040 * mapping->invalidate_lock. So sleep only if fi->dax->nr=0
9a752d18
VG
1041 */
1042 if (retry)
1043 continue;
1044 /*
1045 * There are no mappings which can be reclaimed. Wait for one.
1046 * We are not holding fi->dax->sem. So it is possible
1047 * that range gets added now. But as we are not holding
8bcbbe9c
JK
1048 * mapping->invalidate_lock, worker should still be able to
1049 * free up a range and wake us up.
9a752d18
VG
1050 */
1051 if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1052 if (wait_event_killable_exclusive(fcd->range_waitq,
1053 (fcd->nr_free_ranges > 0))) {
1054 return ERR_PTR(-EINTR);
1055 }
1056 }
1057 }
1058}
1059
1060static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
1061 struct inode *inode,
1062 unsigned long start_idx)
1063{
1064 int ret;
1065 struct fuse_inode *fi = get_fuse_inode(inode);
1066 struct fuse_dax_mapping *dmap;
1067 struct interval_tree_node *node;
1068
1069 /* Find fuse dax mapping at file offset inode. */
1070 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1071
1072 /* Range already got cleaned up by somebody else */
1073 if (!node)
1074 return 0;
1075 dmap = node_to_dmap(node);
1076
1077 /* still in use. */
1078 if (refcount_read(&dmap->refcnt) > 1)
1079 return 0;
1080
1081 ret = reclaim_one_dmap_locked(inode, dmap);
1082 if (ret < 0)
1083 return ret;
1084
1085 /* Cleanup dmap entry and add back to free list */
1086 spin_lock(&fcd->lock);
1087 dmap_reinit_add_to_free_pool(fcd, dmap);
1088 spin_unlock(&fcd->lock);
1089 return ret;
1090}
1091
1092/*
1093 * Free a range of memory.
1094 * Locking:
8bcbbe9c 1095 * 1. Take mapping->invalidate_lock to block dax faults.
9a752d18
VG
1096 * 2. Take fi->dax->sem to protect interval tree and also to make sure
1097 * read/write can not reuse a dmap which we might be freeing.
1098 */
1099static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
1100 struct inode *inode,
1101 unsigned long start_idx,
1102 unsigned long end_idx)
1103{
1104 int ret;
1105 struct fuse_inode *fi = get_fuse_inode(inode);
1106 loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
1107 loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
1108
8bcbbe9c 1109 filemap_invalidate_lock(inode->i_mapping);
9a752d18
VG
1110 ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
1111 if (ret) {
1112 pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
1113 ret);
1114 goto out_mmap_sem;
1115 }
1116
1117 down_write(&fi->dax->sem);
1118 ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
1119 up_write(&fi->dax->sem);
1120out_mmap_sem:
8bcbbe9c 1121 filemap_invalidate_unlock(inode->i_mapping);
9a752d18
VG
1122 return ret;
1123}
1124
1125static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
1126 unsigned long nr_to_free)
1127{
1128 struct fuse_dax_mapping *dmap, *pos, *temp;
1129 int ret, nr_freed = 0;
1130 unsigned long start_idx = 0, end_idx = 0;
1131 struct inode *inode = NULL;
1132
1133 /* Pick first busy range and free it for now*/
1134 while (1) {
1135 if (nr_freed >= nr_to_free)
1136 break;
1137
1138 dmap = NULL;
1139 spin_lock(&fcd->lock);
1140
1141 if (!fcd->nr_busy_ranges) {
1142 spin_unlock(&fcd->lock);
1143 return 0;
1144 }
1145
1146 list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
1147 busy_list) {
1148 /* skip this range if it's in use. */
1149 if (refcount_read(&pos->refcnt) > 1)
1150 continue;
1151
1152 inode = igrab(pos->inode);
1153 /*
1154 * This inode is going away. That will free
1155 * up all the ranges anyway, continue to
1156 * next range.
1157 */
1158 if (!inode)
1159 continue;
1160 /*
1161 * Take this element off list and add it tail. If
1162 * this element can't be freed, it will help with
1163 * selecting new element in next iteration of loop.
1164 */
1165 dmap = pos;
1166 list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1167 start_idx = end_idx = dmap->itn.start;
1168 break;
1169 }
1170 spin_unlock(&fcd->lock);
1171 if (!dmap)
1172 return 0;
1173
1174 ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
1175 iput(inode);
1176 if (ret)
1177 return ret;
1178 nr_freed++;
1179 }
1180 return 0;
1181}
1182
1183static void fuse_dax_free_mem_worker(struct work_struct *work)
1184{
1185 int ret;
1186 struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
1187 free_work.work);
1188 ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
1189 if (ret) {
1190 pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
1191 ret);
1192 }
1193
c4e0cd4e 1194 /* If number of free ranges are still below threshold, requeue */
9a752d18
VG
1195 kick_dmap_free_worker(fcd, 1);
1196}
1197
45f2348e
VG
1198static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
1199{
1200 struct fuse_dax_mapping *range, *temp;
1201
1202 /* Free All allocated elements */
1203 list_for_each_entry_safe(range, temp, mem_list, list) {
1204 list_del(&range->list);
d0cfb9dc
VG
1205 if (!list_empty(&range->busy_list))
1206 list_del(&range->busy_list);
45f2348e
VG
1207 kfree(range);
1208 }
1209}
1210
1dd53957
VG
1211void fuse_dax_conn_free(struct fuse_conn *fc)
1212{
45f2348e
VG
1213 if (fc->dax) {
1214 fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
1215 kfree(fc->dax);
7f8ed28d 1216 fc->dax = NULL;
45f2348e
VG
1217 }
1218}
1219
1220static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
1221{
1222 long nr_pages, nr_ranges;
45f2348e
VG
1223 struct fuse_dax_mapping *range;
1224 int ret, id;
1225 size_t dax_size = -1;
1226 unsigned long i;
1227
9a752d18 1228 init_waitqueue_head(&fcd->range_waitq);
45f2348e 1229 INIT_LIST_HEAD(&fcd->free_ranges);
d0cfb9dc 1230 INIT_LIST_HEAD(&fcd->busy_ranges);
9a752d18
VG
1231 INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
1232
45f2348e 1233 id = dax_read_lock();
e511c4a3
JC
1234 nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
1235 DAX_ACCESS, NULL, NULL);
45f2348e
VG
1236 dax_read_unlock(id);
1237 if (nr_pages < 0) {
1238 pr_debug("dax_direct_access() returned %ld\n", nr_pages);
1239 return nr_pages;
1240 }
1241
1242 nr_ranges = nr_pages/FUSE_DAX_PAGES;
1243 pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1244 __func__, nr_pages, nr_ranges);
1245
1246 for (i = 0; i < nr_ranges; i++) {
1247 range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL);
1248 ret = -ENOMEM;
1249 if (!range)
1250 goto out_err;
1251
1252 /* TODO: This offset only works if virtio-fs driver is not
1253 * having some memory hidden at the beginning. This needs
1254 * better handling
1255 */
1256 range->window_offset = i * FUSE_DAX_SZ;
1257 range->length = FUSE_DAX_SZ;
d0cfb9dc 1258 INIT_LIST_HEAD(&range->busy_list);
9a752d18 1259 refcount_set(&range->refcnt, 1);
45f2348e
VG
1260 list_add_tail(&range->list, &fcd->free_ranges);
1261 }
1262
1263 fcd->nr_free_ranges = nr_ranges;
9a752d18 1264 fcd->nr_ranges = nr_ranges;
45f2348e
VG
1265 return 0;
1266out_err:
1267 /* Free All allocated elements */
1268 fuse_free_dax_mem_ranges(&fcd->free_ranges);
1269 return ret;
1dd53957
VG
1270}
1271
780b1b95
JX
1272int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
1273 struct dax_device *dax_dev)
1dd53957
VG
1274{
1275 struct fuse_conn_dax *fcd;
45f2348e 1276 int err;
1dd53957 1277
780b1b95
JX
1278 fc->dax_mode = dax_mode;
1279
1dd53957
VG
1280 if (!dax_dev)
1281 return 0;
1282
1283 fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
1284 if (!fcd)
1285 return -ENOMEM;
1286
c2d0ad00 1287 spin_lock_init(&fcd->lock);
1dd53957 1288 fcd->dev = dax_dev;
45f2348e
VG
1289 err = fuse_dax_mem_range_init(fcd);
1290 if (err) {
1291 kfree(fcd);
1292 return err;
1293 }
1dd53957
VG
1294
1295 fc->dax = fcd;
1296 return 0;
1297}
fd1a1dc6 1298
c2d0ad00
VG
1299bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
1300{
1301 struct fuse_conn *fc = get_fuse_conn_super(sb);
1302
1303 fi->dax = NULL;
1304 if (fc->dax) {
1305 fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
1306 if (!fi->dax)
1307 return false;
1308
1309 init_rwsem(&fi->dax->sem);
1310 fi->dax->tree = RB_ROOT_CACHED;
1311 }
1312
1313 return true;
1314}
1315
9483e7d5 1316static const struct address_space_operations fuse_dax_file_aops = {
9483e7d5 1317 .direct_IO = noop_direct_IO,
46de8b97 1318 .dirty_folio = noop_dirty_folio,
9483e7d5
VG
1319};
1320
93a497b9 1321static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
c2d0ad00
VG
1322{
1323 struct fuse_conn *fc = get_fuse_conn(inode);
780b1b95
JX
1324 enum fuse_dax_mode dax_mode = fc->dax_mode;
1325
1326 if (dax_mode == FUSE_DAX_NEVER)
1327 return false;
c2d0ad00 1328
780b1b95
JX
1329 /*
1330 * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
1331 * support DAX, in which case it will silently fallback to 'never' mode.
1332 */
c2d0ad00 1333 if (!fc->dax)
cecd4916
JX
1334 return false;
1335
93a497b9
JX
1336 if (dax_mode == FUSE_DAX_ALWAYS)
1337 return true;
1338
1339 /* dax_mode is FUSE_DAX_INODE* */
2ee019fa 1340 return fc->inode_dax && (flags & FUSE_ATTR_DAX);
cecd4916
JX
1341}
1342
93a497b9 1343void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
cecd4916 1344{
93a497b9 1345 if (!fuse_should_enable_dax(inode, flags))
c2d0ad00
VG
1346 return;
1347
1348 inode->i_flags |= S_DAX;
9483e7d5 1349 inode->i_data.a_ops = &fuse_dax_file_aops;
c2d0ad00
VG
1350}
1351
c3cb6f93
JX
1352void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
1353{
1354 struct fuse_conn *fc = get_fuse_conn(inode);
1355
1356 if (fuse_is_inode_dax_mode(fc->dax_mode) &&
1357 ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX)))
1358 d_mark_dontcache(inode);
1359}
1360
fd1a1dc6
SH
1361bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
1362{
1363 if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
1364 pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1365 map_alignment, FUSE_DAX_SZ);
1366 return false;
1367 }
1368 return true;
1369}
9a752d18
VG
1370
1371void fuse_dax_cancel_work(struct fuse_conn *fc)
1372{
1373 struct fuse_conn_dax *fcd = fc->dax;
1374
1375 if (fcd)
1376 cancel_delayed_work_sync(&fcd->free_work);
1377
1378}
1379EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);
This page took 0.519427 seconds and 4 git commands to generate.