]>
Commit | Line | Data |
---|---|---|
1dd53957 VG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * dax: direct host memory access | |
4 | * Copyright (C) 2020 Red Hat, Inc. | |
5 | */ | |
6 | ||
7 | #include "fuse_i.h" | |
8 | ||
9a752d18 | 9 | #include <linux/delay.h> |
1dd53957 | 10 | #include <linux/dax.h> |
c2d0ad00 | 11 | #include <linux/uio.h> |
3a6b2162 | 12 | #include <linux/pagemap.h> |
45f2348e | 13 | #include <linux/pfn_t.h> |
c2d0ad00 VG |
14 | #include <linux/iomap.h> |
15 | #include <linux/interval_tree.h> | |
45f2348e | 16 | |
fd1a1dc6 SH |
17 | /* |
18 | * Default memory range size. A power of 2 so it agrees with common FUSE_INIT | |
19 | * map_alignment values 4KB and 64KB. | |
20 | */ | |
45f2348e VG |
21 | #define FUSE_DAX_SHIFT 21 |
22 | #define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT) | |
23 | #define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE) | |
24 | ||
9a752d18 VG |
25 | /* Number of ranges reclaimer will try to free in one invocation */ |
26 | #define FUSE_DAX_RECLAIM_CHUNK (10) | |
27 | ||
28 | /* | |
29 | * Dax memory reclaim threshold in percetage of total ranges. When free | |
30 | * number of free ranges drops below this threshold, reclaim can trigger | |
31 | * Default is 20% | |
32 | */ | |
33 | #define FUSE_DAX_RECLAIM_THRESHOLD (20) | |
34 | ||
45f2348e VG |
35 | /** Translation information for file offsets to DAX window offsets */ |
36 | struct fuse_dax_mapping { | |
9a752d18 VG |
37 | /* Pointer to inode where this memory range is mapped */ |
38 | struct inode *inode; | |
39 | ||
45f2348e VG |
40 | /* Will connect in fcd->free_ranges to keep track of free memory */ |
41 | struct list_head list; | |
42 | ||
c2d0ad00 VG |
43 | /* For interval tree in file/inode */ |
44 | struct interval_tree_node itn; | |
45 | ||
d0cfb9dc VG |
46 | /* Will connect in fc->busy_ranges to keep track busy memory */ |
47 | struct list_head busy_list; | |
48 | ||
45f2348e VG |
49 | /** Position in DAX window */ |
50 | u64 window_offset; | |
51 | ||
52 | /** Length of mapping, in bytes */ | |
53 | loff_t length; | |
c2d0ad00 VG |
54 | |
55 | /* Is this mapping read-only or read-write */ | |
56 | bool writable; | |
9a752d18 VG |
57 | |
58 | /* reference count when the mapping is used by dax iomap. */ | |
59 | refcount_t refcnt; | |
c2d0ad00 VG |
60 | }; |
61 | ||
62 | /* Per-inode dax map */ | |
63 | struct fuse_inode_dax { | |
64 | /* Semaphore to protect modifications to the dmap tree */ | |
65 | struct rw_semaphore sem; | |
66 | ||
67 | /* Sorted rb tree of struct fuse_dax_mapping elements */ | |
68 | struct rb_root_cached tree; | |
69 | unsigned long nr; | |
45f2348e | 70 | }; |
1dd53957 VG |
71 | |
72 | struct fuse_conn_dax { | |
73 | /* DAX device */ | |
74 | struct dax_device *dev; | |
45f2348e | 75 | |
c2d0ad00 VG |
76 | /* Lock protecting accessess to members of this structure */ |
77 | spinlock_t lock; | |
78 | ||
d0cfb9dc VG |
79 | /* List of memory ranges which are busy */ |
80 | unsigned long nr_busy_ranges; | |
81 | struct list_head busy_ranges; | |
82 | ||
9a752d18 VG |
83 | /* Worker to free up memory ranges */ |
84 | struct delayed_work free_work; | |
85 | ||
86 | /* Wait queue for a dax range to become free */ | |
87 | wait_queue_head_t range_waitq; | |
88 | ||
45f2348e VG |
89 | /* DAX Window Free Ranges */ |
90 | long nr_free_ranges; | |
91 | struct list_head free_ranges; | |
9a752d18 VG |
92 | |
93 | unsigned long nr_ranges; | |
1dd53957 VG |
94 | }; |
95 | ||
c2d0ad00 VG |
96 | static inline struct fuse_dax_mapping * |
97 | node_to_dmap(struct interval_tree_node *node) | |
98 | { | |
99 | if (!node) | |
100 | return NULL; | |
101 | ||
102 | return container_of(node, struct fuse_dax_mapping, itn); | |
103 | } | |
104 | ||
9a752d18 VG |
105 | static struct fuse_dax_mapping * |
106 | alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode); | |
107 | ||
108 | static void | |
109 | __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms) | |
110 | { | |
111 | unsigned long free_threshold; | |
112 | ||
113 | /* If number of free ranges are below threshold, start reclaim */ | |
114 | free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100, | |
115 | 1); | |
116 | if (fcd->nr_free_ranges < free_threshold) | |
117 | queue_delayed_work(system_long_wq, &fcd->free_work, | |
118 | msecs_to_jiffies(delay_ms)); | |
119 | } | |
120 | ||
121 | static void kick_dmap_free_worker(struct fuse_conn_dax *fcd, | |
122 | unsigned long delay_ms) | |
123 | { | |
124 | spin_lock(&fcd->lock); | |
125 | __kick_dmap_free_worker(fcd, delay_ms); | |
126 | spin_unlock(&fcd->lock); | |
127 | } | |
128 | ||
c2d0ad00 VG |
129 | static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd) |
130 | { | |
131 | struct fuse_dax_mapping *dmap; | |
132 | ||
133 | spin_lock(&fcd->lock); | |
134 | dmap = list_first_entry_or_null(&fcd->free_ranges, | |
135 | struct fuse_dax_mapping, list); | |
136 | if (dmap) { | |
137 | list_del_init(&dmap->list); | |
138 | WARN_ON(fcd->nr_free_ranges <= 0); | |
139 | fcd->nr_free_ranges--; | |
140 | } | |
47e30149 | 141 | __kick_dmap_free_worker(fcd, 0); |
c2d0ad00 | 142 | spin_unlock(&fcd->lock); |
9a752d18 | 143 | |
c2d0ad00 VG |
144 | return dmap; |
145 | } | |
146 | ||
d0cfb9dc VG |
147 | /* This assumes fcd->lock is held */ |
148 | static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd, | |
149 | struct fuse_dax_mapping *dmap) | |
150 | { | |
151 | list_del_init(&dmap->busy_list); | |
152 | WARN_ON(fcd->nr_busy_ranges == 0); | |
153 | fcd->nr_busy_ranges--; | |
154 | } | |
155 | ||
9a752d18 VG |
156 | static void dmap_remove_busy_list(struct fuse_conn_dax *fcd, |
157 | struct fuse_dax_mapping *dmap) | |
158 | { | |
159 | spin_lock(&fcd->lock); | |
160 | __dmap_remove_busy_list(fcd, dmap); | |
161 | spin_unlock(&fcd->lock); | |
162 | } | |
163 | ||
c2d0ad00 VG |
164 | /* This assumes fcd->lock is held */ |
165 | static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd, | |
166 | struct fuse_dax_mapping *dmap) | |
167 | { | |
168 | list_add_tail(&dmap->list, &fcd->free_ranges); | |
169 | fcd->nr_free_ranges++; | |
9a752d18 | 170 | wake_up(&fcd->range_waitq); |
c2d0ad00 VG |
171 | } |
172 | ||
173 | static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd, | |
174 | struct fuse_dax_mapping *dmap) | |
175 | { | |
176 | /* Return fuse_dax_mapping to free list */ | |
177 | spin_lock(&fcd->lock); | |
178 | __dmap_add_to_free_pool(fcd, dmap); | |
179 | spin_unlock(&fcd->lock); | |
180 | } | |
181 | ||
182 | static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx, | |
183 | struct fuse_dax_mapping *dmap, bool writable, | |
184 | bool upgrade) | |
185 | { | |
fcee216b MR |
186 | struct fuse_mount *fm = get_fuse_mount(inode); |
187 | struct fuse_conn_dax *fcd = fm->fc->dax; | |
c2d0ad00 VG |
188 | struct fuse_inode *fi = get_fuse_inode(inode); |
189 | struct fuse_setupmapping_in inarg; | |
190 | loff_t offset = start_idx << FUSE_DAX_SHIFT; | |
191 | FUSE_ARGS(args); | |
192 | ssize_t err; | |
193 | ||
194 | WARN_ON(fcd->nr_free_ranges < 0); | |
195 | ||
196 | /* Ask fuse daemon to setup mapping */ | |
197 | memset(&inarg, 0, sizeof(inarg)); | |
198 | inarg.foffset = offset; | |
199 | inarg.fh = -1; | |
200 | inarg.moffset = dmap->window_offset; | |
201 | inarg.len = FUSE_DAX_SZ; | |
202 | inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ; | |
203 | if (writable) | |
204 | inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE; | |
205 | args.opcode = FUSE_SETUPMAPPING; | |
206 | args.nodeid = fi->nodeid; | |
207 | args.in_numargs = 1; | |
208 | args.in_args[0].size = sizeof(inarg); | |
209 | args.in_args[0].value = &inarg; | |
fcee216b | 210 | err = fuse_simple_request(fm, &args); |
c2d0ad00 VG |
211 | if (err < 0) |
212 | return err; | |
213 | dmap->writable = writable; | |
214 | if (!upgrade) { | |
9a752d18 | 215 | /* |
c4e0cd4e | 216 | * We don't take a reference on inode. inode is valid right now |
9a752d18 VG |
217 | * and when inode is going away, cleanup logic should first |
218 | * cleanup dmap entries. | |
219 | */ | |
220 | dmap->inode = inode; | |
c2d0ad00 VG |
221 | dmap->itn.start = dmap->itn.last = start_idx; |
222 | /* Protected by fi->dax->sem */ | |
223 | interval_tree_insert(&dmap->itn, &fi->dax->tree); | |
224 | fi->dax->nr++; | |
d0cfb9dc VG |
225 | spin_lock(&fcd->lock); |
226 | list_add_tail(&dmap->busy_list, &fcd->busy_ranges); | |
227 | fcd->nr_busy_ranges++; | |
228 | spin_unlock(&fcd->lock); | |
c2d0ad00 VG |
229 | } |
230 | return 0; | |
231 | } | |
232 | ||
233 | static int fuse_send_removemapping(struct inode *inode, | |
234 | struct fuse_removemapping_in *inargp, | |
235 | struct fuse_removemapping_one *remove_one) | |
236 | { | |
237 | struct fuse_inode *fi = get_fuse_inode(inode); | |
fcee216b | 238 | struct fuse_mount *fm = get_fuse_mount(inode); |
c2d0ad00 VG |
239 | FUSE_ARGS(args); |
240 | ||
241 | args.opcode = FUSE_REMOVEMAPPING; | |
242 | args.nodeid = fi->nodeid; | |
243 | args.in_numargs = 2; | |
244 | args.in_args[0].size = sizeof(*inargp); | |
245 | args.in_args[0].value = inargp; | |
246 | args.in_args[1].size = inargp->count * sizeof(*remove_one); | |
247 | args.in_args[1].value = remove_one; | |
fcee216b | 248 | return fuse_simple_request(fm, &args); |
c2d0ad00 VG |
249 | } |
250 | ||
251 | static int dmap_removemapping_list(struct inode *inode, unsigned int num, | |
252 | struct list_head *to_remove) | |
253 | { | |
254 | struct fuse_removemapping_one *remove_one, *ptr; | |
255 | struct fuse_removemapping_in inarg; | |
256 | struct fuse_dax_mapping *dmap; | |
257 | int ret, i = 0, nr_alloc; | |
258 | ||
259 | nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY); | |
260 | remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS); | |
261 | if (!remove_one) | |
262 | return -ENOMEM; | |
263 | ||
264 | ptr = remove_one; | |
265 | list_for_each_entry(dmap, to_remove, list) { | |
266 | ptr->moffset = dmap->window_offset; | |
267 | ptr->len = dmap->length; | |
268 | ptr++; | |
269 | i++; | |
270 | num--; | |
271 | if (i >= nr_alloc || num == 0) { | |
272 | memset(&inarg, 0, sizeof(inarg)); | |
273 | inarg.count = i; | |
274 | ret = fuse_send_removemapping(inode, &inarg, | |
275 | remove_one); | |
276 | if (ret) | |
277 | goto out; | |
278 | ptr = remove_one; | |
279 | i = 0; | |
280 | } | |
281 | } | |
282 | out: | |
283 | kfree(remove_one); | |
284 | return ret; | |
285 | } | |
286 | ||
287 | /* | |
288 | * Cleanup dmap entry and add back to free list. This should be called with | |
289 | * fcd->lock held. | |
290 | */ | |
291 | static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd, | |
292 | struct fuse_dax_mapping *dmap) | |
293 | { | |
294 | pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n", | |
295 | dmap->itn.start, dmap->itn.last, dmap->window_offset, | |
296 | dmap->length); | |
d0cfb9dc | 297 | __dmap_remove_busy_list(fcd, dmap); |
9a752d18 | 298 | dmap->inode = NULL; |
c2d0ad00 VG |
299 | dmap->itn.start = dmap->itn.last = 0; |
300 | __dmap_add_to_free_pool(fcd, dmap); | |
301 | } | |
302 | ||
303 | /* | |
304 | * Free inode dmap entries whose range falls inside [start, end]. | |
305 | * Does not take any locks. At this point of time it should only be | |
306 | * called from evict_inode() path where we know all dmap entries can be | |
307 | * reclaimed. | |
308 | */ | |
309 | static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd, | |
310 | struct inode *inode, | |
311 | loff_t start, loff_t end) | |
312 | { | |
313 | struct fuse_inode *fi = get_fuse_inode(inode); | |
314 | struct fuse_dax_mapping *dmap, *n; | |
315 | int err, num = 0; | |
316 | LIST_HEAD(to_remove); | |
317 | unsigned long start_idx = start >> FUSE_DAX_SHIFT; | |
318 | unsigned long end_idx = end >> FUSE_DAX_SHIFT; | |
319 | struct interval_tree_node *node; | |
320 | ||
321 | while (1) { | |
322 | node = interval_tree_iter_first(&fi->dax->tree, start_idx, | |
323 | end_idx); | |
324 | if (!node) | |
325 | break; | |
326 | dmap = node_to_dmap(node); | |
9a752d18 VG |
327 | /* inode is going away. There should not be any users of dmap */ |
328 | WARN_ON(refcount_read(&dmap->refcnt) > 1); | |
c2d0ad00 VG |
329 | interval_tree_remove(&dmap->itn, &fi->dax->tree); |
330 | num++; | |
331 | list_add(&dmap->list, &to_remove); | |
332 | } | |
333 | ||
334 | /* Nothing to remove */ | |
335 | if (list_empty(&to_remove)) | |
336 | return; | |
337 | ||
338 | WARN_ON(fi->dax->nr < num); | |
339 | fi->dax->nr -= num; | |
340 | err = dmap_removemapping_list(inode, num, &to_remove); | |
341 | if (err && err != -ENOTCONN) { | |
342 | pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n", | |
343 | start, end); | |
344 | } | |
345 | spin_lock(&fcd->lock); | |
346 | list_for_each_entry_safe(dmap, n, &to_remove, list) { | |
347 | list_del_init(&dmap->list); | |
348 | dmap_reinit_add_to_free_pool(fcd, dmap); | |
349 | } | |
350 | spin_unlock(&fcd->lock); | |
351 | } | |
352 | ||
9a752d18 VG |
353 | static int dmap_removemapping_one(struct inode *inode, |
354 | struct fuse_dax_mapping *dmap) | |
355 | { | |
356 | struct fuse_removemapping_one forget_one; | |
357 | struct fuse_removemapping_in inarg; | |
358 | ||
359 | memset(&inarg, 0, sizeof(inarg)); | |
360 | inarg.count = 1; | |
361 | memset(&forget_one, 0, sizeof(forget_one)); | |
362 | forget_one.moffset = dmap->window_offset; | |
363 | forget_one.len = dmap->length; | |
364 | ||
365 | return fuse_send_removemapping(inode, &inarg, &forget_one); | |
366 | } | |
367 | ||
c2d0ad00 VG |
368 | /* |
369 | * It is called from evict_inode() and by that time inode is going away. So | |
370 | * this function does not take any locks like fi->dax->sem for traversing | |
371 | * that fuse inode interval tree. If that lock is taken then lock validator | |
372 | * complains of deadlock situation w.r.t fs_reclaim lock. | |
373 | */ | |
374 | void fuse_dax_inode_cleanup(struct inode *inode) | |
375 | { | |
376 | struct fuse_conn *fc = get_fuse_conn(inode); | |
377 | struct fuse_inode *fi = get_fuse_inode(inode); | |
378 | ||
379 | /* | |
380 | * fuse_evict_inode() has already called truncate_inode_pages_final() | |
381 | * before we arrive here. So we should not have to worry about any | |
382 | * pages/exception entries still associated with inode. | |
383 | */ | |
384 | inode_reclaim_dmap_range(fc->dax, inode, 0, -1); | |
385 | WARN_ON(fi->dax->nr); | |
386 | } | |
387 | ||
388 | static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length) | |
389 | { | |
390 | iomap->addr = IOMAP_NULL_ADDR; | |
391 | iomap->length = length; | |
392 | iomap->type = IOMAP_HOLE; | |
393 | } | |
394 | ||
395 | static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length, | |
396 | struct iomap *iomap, struct fuse_dax_mapping *dmap, | |
397 | unsigned int flags) | |
398 | { | |
399 | loff_t offset, len; | |
400 | loff_t i_size = i_size_read(inode); | |
401 | ||
402 | offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT); | |
403 | len = min(length, dmap->length - offset); | |
404 | ||
405 | /* If length is beyond end of file, truncate further */ | |
406 | if (pos + len > i_size) | |
407 | len = i_size - pos; | |
408 | ||
409 | if (len > 0) { | |
410 | iomap->addr = dmap->window_offset + offset; | |
411 | iomap->length = len; | |
412 | if (flags & IOMAP_FAULT) | |
413 | iomap->length = ALIGN(len, PAGE_SIZE); | |
414 | iomap->type = IOMAP_MAPPED; | |
9a752d18 VG |
415 | /* |
416 | * increace refcnt so that reclaim code knows this dmap is in | |
417 | * use. This assumes fi->dax->sem mutex is held either | |
418 | * shared/exclusive. | |
419 | */ | |
420 | refcount_inc(&dmap->refcnt); | |
421 | ||
422 | /* iomap->private should be NULL */ | |
423 | WARN_ON_ONCE(iomap->private); | |
424 | iomap->private = dmap; | |
c2d0ad00 VG |
425 | } else { |
426 | /* Mapping beyond end of file is hole */ | |
427 | fuse_fill_iomap_hole(iomap, length); | |
428 | } | |
429 | } | |
430 | ||
431 | static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos, | |
432 | loff_t length, unsigned int flags, | |
433 | struct iomap *iomap) | |
434 | { | |
435 | struct fuse_inode *fi = get_fuse_inode(inode); | |
436 | struct fuse_conn *fc = get_fuse_conn(inode); | |
437 | struct fuse_conn_dax *fcd = fc->dax; | |
438 | struct fuse_dax_mapping *dmap, *alloc_dmap = NULL; | |
439 | int ret; | |
440 | bool writable = flags & IOMAP_WRITE; | |
441 | unsigned long start_idx = pos >> FUSE_DAX_SHIFT; | |
442 | struct interval_tree_node *node; | |
443 | ||
9a752d18 VG |
444 | /* |
445 | * Can't do inline reclaim in fault path. We call | |
446 | * dax_layout_busy_page() before we free a range. And | |
8bcbbe9c JK |
447 | * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it. |
448 | * In fault path we enter with mapping->invalidate_lock held and can't | |
449 | * drop it. Also in fault path we hold mapping->invalidate_lock shared | |
450 | * and not exclusive, so that creates further issues with | |
451 | * fuse_wait_dax_page(). Hence return -EAGAIN and fuse_dax_fault() | |
452 | * will wait for a memory range to become free and retry. | |
9a752d18 VG |
453 | */ |
454 | if (flags & IOMAP_FAULT) { | |
455 | alloc_dmap = alloc_dax_mapping(fcd); | |
456 | if (!alloc_dmap) | |
457 | return -EAGAIN; | |
458 | } else { | |
459 | alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode); | |
460 | if (IS_ERR(alloc_dmap)) | |
461 | return PTR_ERR(alloc_dmap); | |
462 | } | |
463 | ||
464 | /* If we are here, we should have memory allocated */ | |
465 | if (WARN_ON(!alloc_dmap)) | |
c2d0ad00 VG |
466 | return -EIO; |
467 | ||
468 | /* | |
469 | * Take write lock so that only one caller can try to setup mapping | |
470 | * and other waits. | |
471 | */ | |
472 | down_write(&fi->dax->sem); | |
473 | /* | |
474 | * We dropped lock. Check again if somebody else setup | |
475 | * mapping already. | |
476 | */ | |
477 | node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); | |
478 | if (node) { | |
479 | dmap = node_to_dmap(node); | |
480 | fuse_fill_iomap(inode, pos, length, iomap, dmap, flags); | |
481 | dmap_add_to_free_pool(fcd, alloc_dmap); | |
482 | up_write(&fi->dax->sem); | |
483 | return 0; | |
484 | } | |
485 | ||
486 | /* Setup one mapping */ | |
487 | ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap, | |
488 | writable, false); | |
489 | if (ret < 0) { | |
490 | dmap_add_to_free_pool(fcd, alloc_dmap); | |
491 | up_write(&fi->dax->sem); | |
492 | return ret; | |
493 | } | |
494 | fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags); | |
495 | up_write(&fi->dax->sem); | |
496 | return 0; | |
497 | } | |
498 | ||
499 | static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos, | |
500 | loff_t length, unsigned int flags, | |
501 | struct iomap *iomap) | |
502 | { | |
503 | struct fuse_inode *fi = get_fuse_inode(inode); | |
504 | struct fuse_dax_mapping *dmap; | |
505 | int ret; | |
506 | unsigned long idx = pos >> FUSE_DAX_SHIFT; | |
507 | struct interval_tree_node *node; | |
508 | ||
509 | /* | |
510 | * Take exclusive lock so that only one caller can try to setup | |
511 | * mapping and others wait. | |
512 | */ | |
513 | down_write(&fi->dax->sem); | |
514 | node = interval_tree_iter_first(&fi->dax->tree, idx, idx); | |
515 | ||
8bcbbe9c | 516 | /* We are holding either inode lock or invalidate_lock, and that should |
9a752d18 VG |
517 | * ensure that dmap can't be truncated. We are holding a reference |
518 | * on dmap and that should make sure it can't be reclaimed. So dmap | |
519 | * should still be there in tree despite the fact we dropped and | |
520 | * re-acquired the fi->dax->sem lock. | |
c2d0ad00 VG |
521 | */ |
522 | ret = -EIO; | |
523 | if (WARN_ON(!node)) | |
524 | goto out_err; | |
525 | ||
526 | dmap = node_to_dmap(node); | |
527 | ||
9a752d18 VG |
528 | /* We took an extra reference on dmap to make sure its not reclaimd. |
529 | * Now we hold fi->dax->sem lock and that reference is not needed | |
530 | * anymore. Drop it. | |
531 | */ | |
532 | if (refcount_dec_and_test(&dmap->refcnt)) { | |
533 | /* refcount should not hit 0. This object only goes | |
534 | * away when fuse connection goes away | |
535 | */ | |
536 | WARN_ON_ONCE(1); | |
537 | } | |
538 | ||
c2d0ad00 VG |
539 | /* Maybe another thread already upgraded mapping while we were not |
540 | * holding lock. | |
541 | */ | |
542 | if (dmap->writable) { | |
543 | ret = 0; | |
544 | goto out_fill_iomap; | |
545 | } | |
546 | ||
547 | ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true, | |
548 | true); | |
549 | if (ret < 0) | |
550 | goto out_err; | |
551 | out_fill_iomap: | |
552 | fuse_fill_iomap(inode, pos, length, iomap, dmap, flags); | |
553 | out_err: | |
554 | up_write(&fi->dax->sem); | |
555 | return ret; | |
556 | } | |
557 | ||
558 | /* This is just for DAX and the mapping is ephemeral, do not use it for other | |
559 | * purposes since there is no block device with a permanent mapping. | |
560 | */ | |
561 | static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length, | |
562 | unsigned int flags, struct iomap *iomap, | |
563 | struct iomap *srcmap) | |
564 | { | |
565 | struct fuse_inode *fi = get_fuse_inode(inode); | |
566 | struct fuse_conn *fc = get_fuse_conn(inode); | |
567 | struct fuse_dax_mapping *dmap; | |
568 | bool writable = flags & IOMAP_WRITE; | |
569 | unsigned long start_idx = pos >> FUSE_DAX_SHIFT; | |
570 | struct interval_tree_node *node; | |
571 | ||
572 | /* We don't support FIEMAP */ | |
573 | if (WARN_ON(flags & IOMAP_REPORT)) | |
574 | return -EIO; | |
575 | ||
576 | iomap->offset = pos; | |
577 | iomap->flags = 0; | |
578 | iomap->bdev = NULL; | |
579 | iomap->dax_dev = fc->dax->dev; | |
580 | ||
581 | /* | |
582 | * Both read/write and mmap path can race here. So we need something | |
583 | * to make sure if we are setting up mapping, then other path waits | |
584 | * | |
585 | * For now, use a semaphore for this. It probably needs to be | |
586 | * optimized later. | |
587 | */ | |
588 | down_read(&fi->dax->sem); | |
589 | node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); | |
590 | if (node) { | |
591 | dmap = node_to_dmap(node); | |
592 | if (writable && !dmap->writable) { | |
593 | /* Upgrade read-only mapping to read-write. This will | |
594 | * require exclusive fi->dax->sem lock as we don't want | |
595 | * two threads to be trying to this simultaneously | |
596 | * for same dmap. So drop shared lock and acquire | |
597 | * exclusive lock. | |
9a752d18 VG |
598 | * |
599 | * Before dropping fi->dax->sem lock, take reference | |
600 | * on dmap so that its not freed by range reclaim. | |
c2d0ad00 | 601 | */ |
9a752d18 | 602 | refcount_inc(&dmap->refcnt); |
c2d0ad00 VG |
603 | up_read(&fi->dax->sem); |
604 | pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n", | |
605 | __func__, pos, length); | |
606 | return fuse_upgrade_dax_mapping(inode, pos, length, | |
607 | flags, iomap); | |
608 | } else { | |
609 | fuse_fill_iomap(inode, pos, length, iomap, dmap, flags); | |
610 | up_read(&fi->dax->sem); | |
611 | return 0; | |
612 | } | |
613 | } else { | |
614 | up_read(&fi->dax->sem); | |
615 | pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n", | |
616 | __func__, pos, length); | |
617 | if (pos >= i_size_read(inode)) | |
618 | goto iomap_hole; | |
619 | ||
620 | return fuse_setup_new_dax_mapping(inode, pos, length, flags, | |
621 | iomap); | |
622 | } | |
623 | ||
624 | /* | |
c4e0cd4e | 625 | * If read beyond end of file happens, fs code seems to return |
c2d0ad00 VG |
626 | * it as hole |
627 | */ | |
628 | iomap_hole: | |
629 | fuse_fill_iomap_hole(iomap, length); | |
630 | pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n", | |
631 | __func__, pos, length, iomap->length); | |
632 | return 0; | |
633 | } | |
634 | ||
635 | static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length, | |
636 | ssize_t written, unsigned int flags, | |
637 | struct iomap *iomap) | |
638 | { | |
9a752d18 VG |
639 | struct fuse_dax_mapping *dmap = iomap->private; |
640 | ||
641 | if (dmap) { | |
642 | if (refcount_dec_and_test(&dmap->refcnt)) { | |
643 | /* refcount should not hit 0. This object only goes | |
644 | * away when fuse connection goes away | |
645 | */ | |
646 | WARN_ON_ONCE(1); | |
647 | } | |
648 | } | |
649 | ||
c2d0ad00 VG |
650 | /* DAX writes beyond end-of-file aren't handled using iomap, so the |
651 | * file size is unchanged and there is nothing to do here. | |
652 | */ | |
653 | return 0; | |
654 | } | |
655 | ||
656 | static const struct iomap_ops fuse_iomap_ops = { | |
657 | .iomap_begin = fuse_iomap_begin, | |
658 | .iomap_end = fuse_iomap_end, | |
659 | }; | |
660 | ||
6ae330ca VG |
661 | static void fuse_wait_dax_page(struct inode *inode) |
662 | { | |
8bcbbe9c | 663 | filemap_invalidate_unlock(inode->i_mapping); |
6ae330ca | 664 | schedule(); |
8bcbbe9c | 665 | filemap_invalidate_lock(inode->i_mapping); |
6ae330ca VG |
666 | } |
667 | ||
8bcbbe9c | 668 | /* Should be called with mapping->invalidate_lock held exclusively */ |
6ae330ca VG |
669 | static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, |
670 | loff_t start, loff_t end) | |
671 | { | |
672 | struct page *page; | |
673 | ||
674 | page = dax_layout_busy_page_range(inode->i_mapping, start, end); | |
675 | if (!page) | |
676 | return 0; | |
677 | ||
678 | *retry = true; | |
679 | return ___wait_var_event(&page->_refcount, | |
680 | atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, | |
681 | 0, 0, fuse_wait_dax_page(inode)); | |
682 | } | |
683 | ||
684 | /* dmap_end == 0 leads to unmapping of whole file */ | |
685 | int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, | |
686 | u64 dmap_end) | |
687 | { | |
688 | bool retry; | |
689 | int ret; | |
690 | ||
691 | do { | |
692 | retry = false; | |
693 | ret = __fuse_dax_break_layouts(inode, &retry, dmap_start, | |
694 | dmap_end); | |
695 | } while (ret == 0 && retry); | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
c2d0ad00 VG |
700 | ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) |
701 | { | |
702 | struct inode *inode = file_inode(iocb->ki_filp); | |
703 | ssize_t ret; | |
704 | ||
705 | if (iocb->ki_flags & IOCB_NOWAIT) { | |
706 | if (!inode_trylock_shared(inode)) | |
707 | return -EAGAIN; | |
708 | } else { | |
709 | inode_lock_shared(inode); | |
710 | } | |
711 | ||
712 | ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops); | |
713 | inode_unlock_shared(inode); | |
714 | ||
715 | /* TODO file_accessed(iocb->f_filp) */ | |
716 | return ret; | |
717 | } | |
718 | ||
719 | static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from) | |
720 | { | |
721 | struct inode *inode = file_inode(iocb->ki_filp); | |
722 | ||
723 | return (iov_iter_rw(from) == WRITE && | |
724 | ((iocb->ki_pos) >= i_size_read(inode) || | |
725 | (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode)))); | |
726 | } | |
727 | ||
728 | static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from) | |
729 | { | |
730 | struct inode *inode = file_inode(iocb->ki_filp); | |
731 | struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); | |
732 | ssize_t ret; | |
733 | ||
734 | ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); | |
c2d0ad00 | 735 | |
d347739a | 736 | fuse_write_update_attr(inode, iocb->ki_pos, ret); |
c2d0ad00 VG |
737 | return ret; |
738 | } | |
739 | ||
740 | ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) | |
741 | { | |
742 | struct inode *inode = file_inode(iocb->ki_filp); | |
743 | ssize_t ret; | |
744 | ||
745 | if (iocb->ki_flags & IOCB_NOWAIT) { | |
746 | if (!inode_trylock(inode)) | |
747 | return -EAGAIN; | |
748 | } else { | |
749 | inode_lock(inode); | |
750 | } | |
751 | ||
752 | ret = generic_write_checks(iocb, from); | |
753 | if (ret <= 0) | |
754 | goto out; | |
755 | ||
756 | ret = file_remove_privs(iocb->ki_filp); | |
757 | if (ret) | |
758 | goto out; | |
759 | /* TODO file_update_time() but we don't want metadata I/O */ | |
760 | ||
761 | /* Do not use dax for file extending writes as write and on | |
762 | * disk i_size increase are not atomic otherwise. | |
763 | */ | |
764 | if (file_extending_write(iocb, from)) | |
765 | ret = fuse_dax_direct_write(iocb, from); | |
766 | else | |
767 | ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops); | |
768 | ||
769 | out: | |
770 | inode_unlock(inode); | |
771 | ||
772 | if (ret > 0) | |
773 | ret = generic_write_sync(iocb, ret); | |
774 | return ret; | |
775 | } | |
776 | ||
9483e7d5 VG |
777 | static int fuse_dax_writepages(struct address_space *mapping, |
778 | struct writeback_control *wbc) | |
779 | { | |
780 | ||
781 | struct inode *inode = mapping->host; | |
782 | struct fuse_conn *fc = get_fuse_conn(inode); | |
783 | ||
784 | return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc); | |
785 | } | |
786 | ||
1d024e7a MWO |
787 | static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order, |
788 | bool write) | |
2a9a609a SH |
789 | { |
790 | vm_fault_t ret; | |
791 | struct inode *inode = file_inode(vmf->vma->vm_file); | |
792 | struct super_block *sb = inode->i_sb; | |
793 | pfn_t pfn; | |
9a752d18 VG |
794 | int error = 0; |
795 | struct fuse_conn *fc = get_fuse_conn(inode); | |
796 | struct fuse_conn_dax *fcd = fc->dax; | |
797 | bool retry = false; | |
2a9a609a SH |
798 | |
799 | if (write) | |
800 | sb_start_pagefault(sb); | |
9a752d18 VG |
801 | retry: |
802 | if (retry && !(fcd->nr_free_ranges > 0)) | |
803 | wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0)); | |
2a9a609a | 804 | |
6ae330ca VG |
805 | /* |
806 | * We need to serialize against not only truncate but also against | |
807 | * fuse dax memory range reclaim. While a range is being reclaimed, | |
808 | * we do not want any read/write/mmap to make progress and try | |
809 | * to populate page cache or access memory we are trying to free. | |
810 | */ | |
8bcbbe9c | 811 | filemap_invalidate_lock_shared(inode->i_mapping); |
1d024e7a | 812 | ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops); |
9a752d18 VG |
813 | if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) { |
814 | error = 0; | |
815 | retry = true; | |
8bcbbe9c | 816 | filemap_invalidate_unlock_shared(inode->i_mapping); |
9a752d18 VG |
817 | goto retry; |
818 | } | |
2a9a609a SH |
819 | |
820 | if (ret & VM_FAULT_NEEDDSYNC) | |
1d024e7a | 821 | ret = dax_finish_sync_fault(vmf, order, pfn); |
8bcbbe9c | 822 | filemap_invalidate_unlock_shared(inode->i_mapping); |
2a9a609a SH |
823 | |
824 | if (write) | |
825 | sb_end_pagefault(sb); | |
826 | ||
827 | return ret; | |
828 | } | |
829 | ||
830 | static vm_fault_t fuse_dax_fault(struct vm_fault *vmf) | |
831 | { | |
1d024e7a | 832 | return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE); |
2a9a609a SH |
833 | } |
834 | ||
1d024e7a | 835 | static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order) |
2a9a609a | 836 | { |
1d024e7a | 837 | return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE); |
2a9a609a SH |
838 | } |
839 | ||
840 | static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf) | |
841 | { | |
1d024e7a | 842 | return __fuse_dax_fault(vmf, 0, true); |
2a9a609a SH |
843 | } |
844 | ||
845 | static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf) | |
846 | { | |
1d024e7a | 847 | return __fuse_dax_fault(vmf, 0, true); |
2a9a609a SH |
848 | } |
849 | ||
850 | static const struct vm_operations_struct fuse_dax_vm_ops = { | |
851 | .fault = fuse_dax_fault, | |
852 | .huge_fault = fuse_dax_huge_fault, | |
853 | .page_mkwrite = fuse_dax_page_mkwrite, | |
854 | .pfn_mkwrite = fuse_dax_pfn_mkwrite, | |
855 | }; | |
856 | ||
857 | int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma) | |
858 | { | |
859 | file_accessed(file); | |
860 | vma->vm_ops = &fuse_dax_vm_ops; | |
1c71222e | 861 | vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE); |
2a9a609a SH |
862 | return 0; |
863 | } | |
864 | ||
9a752d18 VG |
865 | static int dmap_writeback_invalidate(struct inode *inode, |
866 | struct fuse_dax_mapping *dmap) | |
867 | { | |
868 | int ret; | |
869 | loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT; | |
870 | loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1); | |
871 | ||
872 | ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos); | |
873 | if (ret) { | |
874 | pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n", | |
875 | ret, start_pos, end_pos); | |
876 | return ret; | |
877 | } | |
878 | ||
879 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
880 | start_pos >> PAGE_SHIFT, | |
881 | end_pos >> PAGE_SHIFT); | |
882 | if (ret) | |
883 | pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n", | |
884 | ret); | |
885 | ||
886 | return ret; | |
887 | } | |
888 | ||
889 | static int reclaim_one_dmap_locked(struct inode *inode, | |
890 | struct fuse_dax_mapping *dmap) | |
891 | { | |
892 | int ret; | |
893 | struct fuse_inode *fi = get_fuse_inode(inode); | |
894 | ||
895 | /* | |
896 | * igrab() was done to make sure inode won't go under us, and this | |
897 | * further avoids the race with evict(). | |
898 | */ | |
899 | ret = dmap_writeback_invalidate(inode, dmap); | |
900 | if (ret) | |
901 | return ret; | |
902 | ||
903 | /* Remove dax mapping from inode interval tree now */ | |
904 | interval_tree_remove(&dmap->itn, &fi->dax->tree); | |
905 | fi->dax->nr--; | |
906 | ||
907 | /* It is possible that umount/shutdown has killed the fuse connection | |
908 | * and worker thread is trying to reclaim memory in parallel. Don't | |
909 | * warn in that case. | |
910 | */ | |
911 | ret = dmap_removemapping_one(inode, dmap); | |
912 | if (ret && ret != -ENOTCONN) { | |
913 | pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n", | |
914 | dmap->window_offset, dmap->length, ret); | |
915 | } | |
916 | return 0; | |
917 | } | |
918 | ||
919 | /* Find first mapped dmap for an inode and return file offset. Caller needs | |
920 | * to hold fi->dax->sem lock either shared or exclusive. | |
921 | */ | |
922 | static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode) | |
923 | { | |
924 | struct fuse_inode *fi = get_fuse_inode(inode); | |
925 | struct fuse_dax_mapping *dmap; | |
926 | struct interval_tree_node *node; | |
927 | ||
928 | for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node; | |
929 | node = interval_tree_iter_next(node, 0, -1)) { | |
930 | dmap = node_to_dmap(node); | |
931 | /* still in use. */ | |
932 | if (refcount_read(&dmap->refcnt) > 1) | |
933 | continue; | |
934 | ||
935 | return dmap; | |
936 | } | |
937 | ||
938 | return NULL; | |
939 | } | |
940 | ||
941 | /* | |
942 | * Find first mapping in the tree and free it and return it. Do not add | |
943 | * it back to free pool. | |
944 | */ | |
945 | static struct fuse_dax_mapping * | |
946 | inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode, | |
947 | bool *retry) | |
948 | { | |
949 | struct fuse_inode *fi = get_fuse_inode(inode); | |
950 | struct fuse_dax_mapping *dmap; | |
951 | u64 dmap_start, dmap_end; | |
952 | unsigned long start_idx; | |
953 | int ret; | |
954 | struct interval_tree_node *node; | |
955 | ||
8bcbbe9c | 956 | filemap_invalidate_lock(inode->i_mapping); |
9a752d18 VG |
957 | |
958 | /* Lookup a dmap and corresponding file offset to reclaim. */ | |
959 | down_read(&fi->dax->sem); | |
960 | dmap = inode_lookup_first_dmap(inode); | |
961 | if (dmap) { | |
962 | start_idx = dmap->itn.start; | |
963 | dmap_start = start_idx << FUSE_DAX_SHIFT; | |
964 | dmap_end = dmap_start + FUSE_DAX_SZ - 1; | |
965 | } | |
966 | up_read(&fi->dax->sem); | |
967 | ||
968 | if (!dmap) | |
969 | goto out_mmap_sem; | |
970 | /* | |
971 | * Make sure there are no references to inode pages using | |
972 | * get_user_pages() | |
973 | */ | |
974 | ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end); | |
975 | if (ret) { | |
976 | pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n", | |
977 | ret); | |
978 | dmap = ERR_PTR(ret); | |
979 | goto out_mmap_sem; | |
980 | } | |
981 | ||
982 | down_write(&fi->dax->sem); | |
983 | node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); | |
984 | /* Range already got reclaimed by somebody else */ | |
985 | if (!node) { | |
986 | if (retry) | |
987 | *retry = true; | |
988 | goto out_write_dmap_sem; | |
989 | } | |
990 | ||
991 | dmap = node_to_dmap(node); | |
992 | /* still in use. */ | |
993 | if (refcount_read(&dmap->refcnt) > 1) { | |
994 | dmap = NULL; | |
995 | if (retry) | |
996 | *retry = true; | |
997 | goto out_write_dmap_sem; | |
998 | } | |
999 | ||
1000 | ret = reclaim_one_dmap_locked(inode, dmap); | |
1001 | if (ret < 0) { | |
1002 | dmap = ERR_PTR(ret); | |
1003 | goto out_write_dmap_sem; | |
1004 | } | |
1005 | ||
1006 | /* Clean up dmap. Do not add back to free list */ | |
1007 | dmap_remove_busy_list(fcd, dmap); | |
1008 | dmap->inode = NULL; | |
1009 | dmap->itn.start = dmap->itn.last = 0; | |
1010 | ||
1011 | pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n", | |
1012 | __func__, inode, dmap->window_offset, dmap->length); | |
1013 | ||
1014 | out_write_dmap_sem: | |
1015 | up_write(&fi->dax->sem); | |
1016 | out_mmap_sem: | |
8bcbbe9c | 1017 | filemap_invalidate_unlock(inode->i_mapping); |
9a752d18 VG |
1018 | return dmap; |
1019 | } | |
1020 | ||
1021 | static struct fuse_dax_mapping * | |
1022 | alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode) | |
1023 | { | |
1024 | struct fuse_dax_mapping *dmap; | |
1025 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1026 | ||
1027 | while (1) { | |
1028 | bool retry = false; | |
1029 | ||
1030 | dmap = alloc_dax_mapping(fcd); | |
1031 | if (dmap) | |
1032 | return dmap; | |
1033 | ||
1034 | dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry); | |
1035 | /* | |
1036 | * Either we got a mapping or it is an error, return in both | |
1037 | * the cases. | |
1038 | */ | |
1039 | if (dmap) | |
1040 | return dmap; | |
1041 | ||
1042 | /* If we could not reclaim a mapping because it | |
1043 | * had a reference or some other temporary failure, | |
1044 | * Try again. We want to give up inline reclaim only | |
1045 | * if there is no range assigned to this node. Otherwise | |
8bcbbe9c JK |
1046 | * if a deadlock is possible if we sleep with |
1047 | * mapping->invalidate_lock held and worker to free memory | |
1048 | * can't make progress due to unavailability of | |
1049 | * mapping->invalidate_lock. So sleep only if fi->dax->nr=0 | |
9a752d18 VG |
1050 | */ |
1051 | if (retry) | |
1052 | continue; | |
1053 | /* | |
1054 | * There are no mappings which can be reclaimed. Wait for one. | |
1055 | * We are not holding fi->dax->sem. So it is possible | |
1056 | * that range gets added now. But as we are not holding | |
8bcbbe9c JK |
1057 | * mapping->invalidate_lock, worker should still be able to |
1058 | * free up a range and wake us up. | |
9a752d18 VG |
1059 | */ |
1060 | if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) { | |
1061 | if (wait_event_killable_exclusive(fcd->range_waitq, | |
1062 | (fcd->nr_free_ranges > 0))) { | |
1063 | return ERR_PTR(-EINTR); | |
1064 | } | |
1065 | } | |
1066 | } | |
1067 | } | |
1068 | ||
1069 | static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd, | |
1070 | struct inode *inode, | |
1071 | unsigned long start_idx) | |
1072 | { | |
1073 | int ret; | |
1074 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1075 | struct fuse_dax_mapping *dmap; | |
1076 | struct interval_tree_node *node; | |
1077 | ||
1078 | /* Find fuse dax mapping at file offset inode. */ | |
1079 | node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); | |
1080 | ||
1081 | /* Range already got cleaned up by somebody else */ | |
1082 | if (!node) | |
1083 | return 0; | |
1084 | dmap = node_to_dmap(node); | |
1085 | ||
1086 | /* still in use. */ | |
1087 | if (refcount_read(&dmap->refcnt) > 1) | |
1088 | return 0; | |
1089 | ||
1090 | ret = reclaim_one_dmap_locked(inode, dmap); | |
1091 | if (ret < 0) | |
1092 | return ret; | |
1093 | ||
1094 | /* Cleanup dmap entry and add back to free list */ | |
1095 | spin_lock(&fcd->lock); | |
1096 | dmap_reinit_add_to_free_pool(fcd, dmap); | |
1097 | spin_unlock(&fcd->lock); | |
1098 | return ret; | |
1099 | } | |
1100 | ||
1101 | /* | |
1102 | * Free a range of memory. | |
1103 | * Locking: | |
8bcbbe9c | 1104 | * 1. Take mapping->invalidate_lock to block dax faults. |
9a752d18 VG |
1105 | * 2. Take fi->dax->sem to protect interval tree and also to make sure |
1106 | * read/write can not reuse a dmap which we might be freeing. | |
1107 | */ | |
1108 | static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd, | |
1109 | struct inode *inode, | |
1110 | unsigned long start_idx, | |
1111 | unsigned long end_idx) | |
1112 | { | |
1113 | int ret; | |
1114 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1115 | loff_t dmap_start = start_idx << FUSE_DAX_SHIFT; | |
1116 | loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1; | |
1117 | ||
8bcbbe9c | 1118 | filemap_invalidate_lock(inode->i_mapping); |
9a752d18 VG |
1119 | ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end); |
1120 | if (ret) { | |
1121 | pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n", | |
1122 | ret); | |
1123 | goto out_mmap_sem; | |
1124 | } | |
1125 | ||
1126 | down_write(&fi->dax->sem); | |
1127 | ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx); | |
1128 | up_write(&fi->dax->sem); | |
1129 | out_mmap_sem: | |
8bcbbe9c | 1130 | filemap_invalidate_unlock(inode->i_mapping); |
9a752d18 VG |
1131 | return ret; |
1132 | } | |
1133 | ||
1134 | static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd, | |
1135 | unsigned long nr_to_free) | |
1136 | { | |
1137 | struct fuse_dax_mapping *dmap, *pos, *temp; | |
1138 | int ret, nr_freed = 0; | |
1139 | unsigned long start_idx = 0, end_idx = 0; | |
1140 | struct inode *inode = NULL; | |
1141 | ||
1142 | /* Pick first busy range and free it for now*/ | |
1143 | while (1) { | |
1144 | if (nr_freed >= nr_to_free) | |
1145 | break; | |
1146 | ||
1147 | dmap = NULL; | |
1148 | spin_lock(&fcd->lock); | |
1149 | ||
1150 | if (!fcd->nr_busy_ranges) { | |
1151 | spin_unlock(&fcd->lock); | |
1152 | return 0; | |
1153 | } | |
1154 | ||
1155 | list_for_each_entry_safe(pos, temp, &fcd->busy_ranges, | |
1156 | busy_list) { | |
1157 | /* skip this range if it's in use. */ | |
1158 | if (refcount_read(&pos->refcnt) > 1) | |
1159 | continue; | |
1160 | ||
1161 | inode = igrab(pos->inode); | |
1162 | /* | |
1163 | * This inode is going away. That will free | |
1164 | * up all the ranges anyway, continue to | |
1165 | * next range. | |
1166 | */ | |
1167 | if (!inode) | |
1168 | continue; | |
1169 | /* | |
1170 | * Take this element off list and add it tail. If | |
1171 | * this element can't be freed, it will help with | |
1172 | * selecting new element in next iteration of loop. | |
1173 | */ | |
1174 | dmap = pos; | |
1175 | list_move_tail(&dmap->busy_list, &fcd->busy_ranges); | |
1176 | start_idx = end_idx = dmap->itn.start; | |
1177 | break; | |
1178 | } | |
1179 | spin_unlock(&fcd->lock); | |
1180 | if (!dmap) | |
1181 | return 0; | |
1182 | ||
1183 | ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx); | |
1184 | iput(inode); | |
1185 | if (ret) | |
1186 | return ret; | |
1187 | nr_freed++; | |
1188 | } | |
1189 | return 0; | |
1190 | } | |
1191 | ||
1192 | static void fuse_dax_free_mem_worker(struct work_struct *work) | |
1193 | { | |
1194 | int ret; | |
1195 | struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax, | |
1196 | free_work.work); | |
1197 | ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK); | |
1198 | if (ret) { | |
1199 | pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n", | |
1200 | ret); | |
1201 | } | |
1202 | ||
c4e0cd4e | 1203 | /* If number of free ranges are still below threshold, requeue */ |
9a752d18 VG |
1204 | kick_dmap_free_worker(fcd, 1); |
1205 | } | |
1206 | ||
45f2348e VG |
1207 | static void fuse_free_dax_mem_ranges(struct list_head *mem_list) |
1208 | { | |
1209 | struct fuse_dax_mapping *range, *temp; | |
1210 | ||
1211 | /* Free All allocated elements */ | |
1212 | list_for_each_entry_safe(range, temp, mem_list, list) { | |
1213 | list_del(&range->list); | |
d0cfb9dc VG |
1214 | if (!list_empty(&range->busy_list)) |
1215 | list_del(&range->busy_list); | |
45f2348e VG |
1216 | kfree(range); |
1217 | } | |
1218 | } | |
1219 | ||
1dd53957 VG |
1220 | void fuse_dax_conn_free(struct fuse_conn *fc) |
1221 | { | |
45f2348e VG |
1222 | if (fc->dax) { |
1223 | fuse_free_dax_mem_ranges(&fc->dax->free_ranges); | |
1224 | kfree(fc->dax); | |
1225 | } | |
1226 | } | |
1227 | ||
1228 | static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd) | |
1229 | { | |
1230 | long nr_pages, nr_ranges; | |
45f2348e VG |
1231 | struct fuse_dax_mapping *range; |
1232 | int ret, id; | |
1233 | size_t dax_size = -1; | |
1234 | unsigned long i; | |
1235 | ||
9a752d18 | 1236 | init_waitqueue_head(&fcd->range_waitq); |
45f2348e | 1237 | INIT_LIST_HEAD(&fcd->free_ranges); |
d0cfb9dc | 1238 | INIT_LIST_HEAD(&fcd->busy_ranges); |
9a752d18 VG |
1239 | INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker); |
1240 | ||
45f2348e | 1241 | id = dax_read_lock(); |
e511c4a3 JC |
1242 | nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), |
1243 | DAX_ACCESS, NULL, NULL); | |
45f2348e VG |
1244 | dax_read_unlock(id); |
1245 | if (nr_pages < 0) { | |
1246 | pr_debug("dax_direct_access() returned %ld\n", nr_pages); | |
1247 | return nr_pages; | |
1248 | } | |
1249 | ||
1250 | nr_ranges = nr_pages/FUSE_DAX_PAGES; | |
1251 | pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n", | |
1252 | __func__, nr_pages, nr_ranges); | |
1253 | ||
1254 | for (i = 0; i < nr_ranges; i++) { | |
1255 | range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL); | |
1256 | ret = -ENOMEM; | |
1257 | if (!range) | |
1258 | goto out_err; | |
1259 | ||
1260 | /* TODO: This offset only works if virtio-fs driver is not | |
1261 | * having some memory hidden at the beginning. This needs | |
1262 | * better handling | |
1263 | */ | |
1264 | range->window_offset = i * FUSE_DAX_SZ; | |
1265 | range->length = FUSE_DAX_SZ; | |
d0cfb9dc | 1266 | INIT_LIST_HEAD(&range->busy_list); |
9a752d18 | 1267 | refcount_set(&range->refcnt, 1); |
45f2348e VG |
1268 | list_add_tail(&range->list, &fcd->free_ranges); |
1269 | } | |
1270 | ||
1271 | fcd->nr_free_ranges = nr_ranges; | |
9a752d18 | 1272 | fcd->nr_ranges = nr_ranges; |
45f2348e VG |
1273 | return 0; |
1274 | out_err: | |
1275 | /* Free All allocated elements */ | |
1276 | fuse_free_dax_mem_ranges(&fcd->free_ranges); | |
1277 | return ret; | |
1dd53957 VG |
1278 | } |
1279 | ||
780b1b95 JX |
1280 | int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode, |
1281 | struct dax_device *dax_dev) | |
1dd53957 VG |
1282 | { |
1283 | struct fuse_conn_dax *fcd; | |
45f2348e | 1284 | int err; |
1dd53957 | 1285 | |
780b1b95 JX |
1286 | fc->dax_mode = dax_mode; |
1287 | ||
1dd53957 VG |
1288 | if (!dax_dev) |
1289 | return 0; | |
1290 | ||
1291 | fcd = kzalloc(sizeof(*fcd), GFP_KERNEL); | |
1292 | if (!fcd) | |
1293 | return -ENOMEM; | |
1294 | ||
c2d0ad00 | 1295 | spin_lock_init(&fcd->lock); |
1dd53957 | 1296 | fcd->dev = dax_dev; |
45f2348e VG |
1297 | err = fuse_dax_mem_range_init(fcd); |
1298 | if (err) { | |
1299 | kfree(fcd); | |
1300 | return err; | |
1301 | } | |
1dd53957 VG |
1302 | |
1303 | fc->dax = fcd; | |
1304 | return 0; | |
1305 | } | |
fd1a1dc6 | 1306 | |
c2d0ad00 VG |
1307 | bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi) |
1308 | { | |
1309 | struct fuse_conn *fc = get_fuse_conn_super(sb); | |
1310 | ||
1311 | fi->dax = NULL; | |
1312 | if (fc->dax) { | |
1313 | fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT); | |
1314 | if (!fi->dax) | |
1315 | return false; | |
1316 | ||
1317 | init_rwsem(&fi->dax->sem); | |
1318 | fi->dax->tree = RB_ROOT_CACHED; | |
1319 | } | |
1320 | ||
1321 | return true; | |
1322 | } | |
1323 | ||
9483e7d5 VG |
1324 | static const struct address_space_operations fuse_dax_file_aops = { |
1325 | .writepages = fuse_dax_writepages, | |
1326 | .direct_IO = noop_direct_IO, | |
46de8b97 | 1327 | .dirty_folio = noop_dirty_folio, |
9483e7d5 VG |
1328 | }; |
1329 | ||
93a497b9 | 1330 | static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags) |
c2d0ad00 VG |
1331 | { |
1332 | struct fuse_conn *fc = get_fuse_conn(inode); | |
780b1b95 JX |
1333 | enum fuse_dax_mode dax_mode = fc->dax_mode; |
1334 | ||
1335 | if (dax_mode == FUSE_DAX_NEVER) | |
1336 | return false; | |
c2d0ad00 | 1337 | |
780b1b95 JX |
1338 | /* |
1339 | * fc->dax may be NULL in 'inode' mode when filesystem device doesn't | |
1340 | * support DAX, in which case it will silently fallback to 'never' mode. | |
1341 | */ | |
c2d0ad00 | 1342 | if (!fc->dax) |
cecd4916 JX |
1343 | return false; |
1344 | ||
93a497b9 JX |
1345 | if (dax_mode == FUSE_DAX_ALWAYS) |
1346 | return true; | |
1347 | ||
1348 | /* dax_mode is FUSE_DAX_INODE* */ | |
2ee019fa | 1349 | return fc->inode_dax && (flags & FUSE_ATTR_DAX); |
cecd4916 JX |
1350 | } |
1351 | ||
93a497b9 | 1352 | void fuse_dax_inode_init(struct inode *inode, unsigned int flags) |
cecd4916 | 1353 | { |
93a497b9 | 1354 | if (!fuse_should_enable_dax(inode, flags)) |
c2d0ad00 VG |
1355 | return; |
1356 | ||
1357 | inode->i_flags |= S_DAX; | |
9483e7d5 | 1358 | inode->i_data.a_ops = &fuse_dax_file_aops; |
c2d0ad00 VG |
1359 | } |
1360 | ||
c3cb6f93 JX |
1361 | void fuse_dax_dontcache(struct inode *inode, unsigned int flags) |
1362 | { | |
1363 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1364 | ||
1365 | if (fuse_is_inode_dax_mode(fc->dax_mode) && | |
1366 | ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX))) | |
1367 | d_mark_dontcache(inode); | |
1368 | } | |
1369 | ||
fd1a1dc6 SH |
1370 | bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment) |
1371 | { | |
1372 | if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) { | |
1373 | pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n", | |
1374 | map_alignment, FUSE_DAX_SZ); | |
1375 | return false; | |
1376 | } | |
1377 | return true; | |
1378 | } | |
9a752d18 VG |
1379 | |
1380 | void fuse_dax_cancel_work(struct fuse_conn *fc) | |
1381 | { | |
1382 | struct fuse_conn_dax *fcd = fc->dax; | |
1383 | ||
1384 | if (fcd) | |
1385 | cancel_delayed_work_sync(&fcd->free_work); | |
1386 | ||
1387 | } | |
1388 | EXPORT_SYMBOL_GPL(fuse_dax_cancel_work); |