]>
Commit | Line | Data |
---|---|---|
b5108822 DH |
1 | /* Cache page management and data I/O routines |
2 | * | |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #define FSCACHE_DEBUG_LEVEL PAGE | |
13 | #include <linux/module.h> | |
14 | #include <linux/fscache-cache.h> | |
15 | #include <linux/buffer_head.h> | |
16 | #include <linux/pagevec.h> | |
5a0e3ad6 | 17 | #include <linux/slab.h> |
b5108822 DH |
18 | #include "internal.h" |
19 | ||
20 | /* | |
21 | * check to see if a page is being written to the cache | |
22 | */ | |
23 | bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) | |
24 | { | |
25 | void *val; | |
26 | ||
27 | rcu_read_lock(); | |
28 | val = radix_tree_lookup(&cookie->stores, page->index); | |
29 | rcu_read_unlock(); | |
08c2e3d0 | 30 | trace_fscache_check_page(cookie, page, val, 0); |
b5108822 DH |
31 | |
32 | return val != NULL; | |
33 | } | |
34 | EXPORT_SYMBOL(__fscache_check_page_write); | |
35 | ||
36 | /* | |
37 | * wait for a page to finish being written to the cache | |
38 | */ | |
39 | void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) | |
40 | { | |
41 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); | |
42 | ||
08c2e3d0 DH |
43 | trace_fscache_page(cookie, page, fscache_page_write_wait); |
44 | ||
b5108822 DH |
45 | wait_event(*wq, !__fscache_check_page_write(cookie, page)); |
46 | } | |
47 | EXPORT_SYMBOL(__fscache_wait_on_page_write); | |
48 | ||
9776de96 MT |
49 | /* |
50 | * wait for a page to finish being written to the cache. Put a timeout here | |
51 | * since we might be called recursively via parent fs. | |
52 | */ | |
53 | static | |
54 | bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) | |
55 | { | |
56 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); | |
57 | ||
58 | return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), | |
59 | HZ); | |
60 | } | |
61 | ||
201a1542 DH |
62 | /* |
63 | * decide whether a page can be released, possibly by cancelling a store to it | |
d0164adc | 64 | * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged |
201a1542 DH |
65 | */ |
66 | bool __fscache_maybe_release_page(struct fscache_cookie *cookie, | |
67 | struct page *page, | |
68 | gfp_t gfp) | |
69 | { | |
70 | struct page *xpage; | |
71 | void *val; | |
72 | ||
73 | _enter("%p,%p,%x", cookie, page, gfp); | |
74 | ||
08c2e3d0 DH |
75 | trace_fscache_page(cookie, page, fscache_page_maybe_release); |
76 | ||
8c209ce7 | 77 | try_again: |
201a1542 DH |
78 | rcu_read_lock(); |
79 | val = radix_tree_lookup(&cookie->stores, page->index); | |
80 | if (!val) { | |
81 | rcu_read_unlock(); | |
82 | fscache_stat(&fscache_n_store_vmscan_not_storing); | |
83 | __fscache_uncache_page(cookie, page); | |
84 | return true; | |
85 | } | |
86 | ||
87 | /* see if the page is actually undergoing storage - if so we can't get | |
88 | * rid of it till the cache has finished with it */ | |
89 | if (radix_tree_tag_get(&cookie->stores, page->index, | |
90 | FSCACHE_COOKIE_STORING_TAG)) { | |
91 | rcu_read_unlock(); | |
92 | goto page_busy; | |
93 | } | |
94 | ||
95 | /* the page is pending storage, so we attempt to cancel the store and | |
96 | * discard the store request so that the page can be reclaimed */ | |
97 | spin_lock(&cookie->stores_lock); | |
98 | rcu_read_unlock(); | |
99 | ||
100 | if (radix_tree_tag_get(&cookie->stores, page->index, | |
101 | FSCACHE_COOKIE_STORING_TAG)) { | |
102 | /* the page started to undergo storage whilst we were looking, | |
103 | * so now we can only wait or return */ | |
104 | spin_unlock(&cookie->stores_lock); | |
105 | goto page_busy; | |
106 | } | |
107 | ||
108 | xpage = radix_tree_delete(&cookie->stores, page->index); | |
08c2e3d0 | 109 | trace_fscache_page(cookie, page, fscache_page_radix_delete); |
201a1542 DH |
110 | spin_unlock(&cookie->stores_lock); |
111 | ||
112 | if (xpage) { | |
113 | fscache_stat(&fscache_n_store_vmscan_cancelled); | |
114 | fscache_stat(&fscache_n_store_radix_deletes); | |
115 | ASSERTCMP(xpage, ==, page); | |
116 | } else { | |
117 | fscache_stat(&fscache_n_store_vmscan_gone); | |
118 | } | |
119 | ||
120 | wake_up_bit(&cookie->flags, 0); | |
08c2e3d0 | 121 | trace_fscache_wake_cookie(cookie); |
201a1542 | 122 | if (xpage) |
09cbfeaf | 123 | put_page(xpage); |
201a1542 DH |
124 | __fscache_uncache_page(cookie, page); |
125 | return true; | |
126 | ||
127 | page_busy: | |
8c209ce7 DH |
128 | /* We will wait here if we're allowed to, but that could deadlock the |
129 | * allocator as the work threads writing to the cache may all end up | |
130 | * sleeping on memory allocation, so we may need to impose a timeout | |
131 | * too. */ | |
d0164adc | 132 | if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { |
8c209ce7 DH |
133 | fscache_stat(&fscache_n_store_vmscan_busy); |
134 | return false; | |
135 | } | |
136 | ||
137 | fscache_stat(&fscache_n_store_vmscan_wait); | |
9776de96 MT |
138 | if (!release_page_wait_timeout(cookie, page)) |
139 | _debug("fscache writeout timeout page: %p{%lx}", | |
140 | page, page->index); | |
141 | ||
d0164adc | 142 | gfp &= ~__GFP_DIRECT_RECLAIM; |
8c209ce7 | 143 | goto try_again; |
201a1542 DH |
144 | } |
145 | EXPORT_SYMBOL(__fscache_maybe_release_page); | |
146 | ||
b5108822 DH |
147 | /* |
148 | * note that a page has finished being written to the cache | |
149 | */ | |
1bccf513 DH |
150 | static void fscache_end_page_write(struct fscache_object *object, |
151 | struct page *page) | |
b5108822 | 152 | { |
1bccf513 | 153 | struct fscache_cookie *cookie; |
08c2e3d0 | 154 | struct page *xpage = NULL, *val; |
b5108822 | 155 | |
1bccf513 DH |
156 | spin_lock(&object->lock); |
157 | cookie = object->cookie; | |
158 | if (cookie) { | |
159 | /* delete the page from the tree if it is now no longer | |
160 | * pending */ | |
161 | spin_lock(&cookie->stores_lock); | |
201a1542 DH |
162 | radix_tree_tag_clear(&cookie->stores, page->index, |
163 | FSCACHE_COOKIE_STORING_TAG); | |
08c2e3d0 | 164 | trace_fscache_page(cookie, page, fscache_page_radix_clear_store); |
285e728b DH |
165 | if (!radix_tree_tag_get(&cookie->stores, page->index, |
166 | FSCACHE_COOKIE_PENDING_TAG)) { | |
167 | fscache_stat(&fscache_n_store_radix_deletes); | |
168 | xpage = radix_tree_delete(&cookie->stores, page->index); | |
08c2e3d0 DH |
169 | trace_fscache_page(cookie, page, fscache_page_radix_delete); |
170 | trace_fscache_page(cookie, page, fscache_page_write_end); | |
171 | ||
172 | val = radix_tree_lookup(&cookie->stores, page->index); | |
173 | trace_fscache_check_page(cookie, page, val, 1); | |
174 | } else { | |
175 | trace_fscache_page(cookie, page, fscache_page_write_end_pend); | |
285e728b | 176 | } |
1bccf513 DH |
177 | spin_unlock(&cookie->stores_lock); |
178 | wake_up_bit(&cookie->flags, 0); | |
08c2e3d0 DH |
179 | trace_fscache_wake_cookie(cookie); |
180 | } else { | |
181 | trace_fscache_page(cookie, page, fscache_page_write_end_noc); | |
1bccf513 DH |
182 | } |
183 | spin_unlock(&object->lock); | |
184 | if (xpage) | |
09cbfeaf | 185 | put_page(xpage); |
b5108822 DH |
186 | } |
187 | ||
188 | /* | |
189 | * actually apply the changed attributes to a cache object | |
190 | */ | |
191 | static void fscache_attr_changed_op(struct fscache_operation *op) | |
192 | { | |
193 | struct fscache_object *object = op->object; | |
440f0aff | 194 | int ret; |
b5108822 DH |
195 | |
196 | _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); | |
197 | ||
198 | fscache_stat(&fscache_n_attr_changed_calls); | |
199 | ||
8fb883f3 | 200 | if (fscache_object_is_active(object)) { |
52bd75fd | 201 | fscache_stat(&fscache_n_cop_attr_changed); |
440f0aff | 202 | ret = object->cache->ops->attr_changed(object); |
52bd75fd | 203 | fscache_stat_d(&fscache_n_cop_attr_changed); |
440f0aff DH |
204 | if (ret < 0) |
205 | fscache_abort_object(object); | |
b27ddd46 DH |
206 | fscache_op_complete(op, ret < 0); |
207 | } else { | |
208 | fscache_op_complete(op, true); | |
440f0aff | 209 | } |
b5108822 DH |
210 | |
211 | _leave(""); | |
212 | } | |
213 | ||
214 | /* | |
215 | * notification that the attributes on an object have changed | |
216 | */ | |
217 | int __fscache_attr_changed(struct fscache_cookie *cookie) | |
218 | { | |
219 | struct fscache_operation *op; | |
220 | struct fscache_object *object; | |
3e1199dc | 221 | bool wake_cookie = false; |
b5108822 DH |
222 | |
223 | _enter("%p", cookie); | |
224 | ||
225 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
226 | ||
227 | fscache_stat(&fscache_n_attr_changed); | |
228 | ||
229 | op = kzalloc(sizeof(*op), GFP_KERNEL); | |
230 | if (!op) { | |
231 | fscache_stat(&fscache_n_attr_changed_nomem); | |
232 | _leave(" = -ENOMEM"); | |
233 | return -ENOMEM; | |
234 | } | |
235 | ||
08c2e3d0 DH |
236 | fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL); |
237 | trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed); | |
8fb883f3 DH |
238 | op->flags = FSCACHE_OP_ASYNC | |
239 | (1 << FSCACHE_OP_EXCLUSIVE) | | |
240 | (1 << FSCACHE_OP_UNUSE_COOKIE); | |
b5108822 DH |
241 | |
242 | spin_lock(&cookie->lock); | |
243 | ||
94d30ae9 DH |
244 | if (!fscache_cookie_enabled(cookie) || |
245 | hlist_empty(&cookie->backing_objects)) | |
b5108822 DH |
246 | goto nobufs; |
247 | object = hlist_entry(cookie->backing_objects.first, | |
248 | struct fscache_object, cookie_link); | |
249 | ||
8fb883f3 | 250 | __fscache_use_cookie(cookie); |
b5108822 | 251 | if (fscache_submit_exclusive_op(object, op) < 0) |
3e1199dc | 252 | goto nobufs_dec; |
b5108822 DH |
253 | spin_unlock(&cookie->lock); |
254 | fscache_stat(&fscache_n_attr_changed_ok); | |
255 | fscache_put_operation(op); | |
256 | _leave(" = 0"); | |
257 | return 0; | |
258 | ||
3e1199dc | 259 | nobufs_dec: |
8fb883f3 | 260 | wake_cookie = __fscache_unuse_cookie(cookie); |
3e1199dc | 261 | nobufs: |
b5108822 | 262 | spin_unlock(&cookie->lock); |
a39caadf | 263 | fscache_put_operation(op); |
8fb883f3 DH |
264 | if (wake_cookie) |
265 | __fscache_wake_unused_cookie(cookie); | |
b5108822 DH |
266 | fscache_stat(&fscache_n_attr_changed_nobufs); |
267 | _leave(" = %d", -ENOBUFS); | |
268 | return -ENOBUFS; | |
269 | } | |
270 | EXPORT_SYMBOL(__fscache_attr_changed); | |
271 | ||
d3b97ca4 DH |
272 | /* |
273 | * Handle cancellation of a pending retrieval op | |
274 | */ | |
275 | static void fscache_do_cancel_retrieval(struct fscache_operation *_op) | |
276 | { | |
277 | struct fscache_retrieval *op = | |
278 | container_of(_op, struct fscache_retrieval, op); | |
279 | ||
280 | atomic_set(&op->n_pages, 0); | |
281 | } | |
282 | ||
b5108822 DH |
283 | /* |
284 | * release a retrieval op reference | |
285 | */ | |
286 | static void fscache_release_retrieval_op(struct fscache_operation *_op) | |
287 | { | |
288 | struct fscache_retrieval *op = | |
289 | container_of(_op, struct fscache_retrieval, op); | |
290 | ||
291 | _enter("{OP%x}", op->op.debug_id); | |
292 | ||
4a47132f DH |
293 | ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, |
294 | atomic_read(&op->n_pages), ==, 0); | |
9f10523f | 295 | |
b5108822 DH |
296 | fscache_hist(fscache_retrieval_histogram, op->start_time); |
297 | if (op->context) | |
4a47132f | 298 | fscache_put_context(op->cookie, op->context); |
b5108822 DH |
299 | |
300 | _leave(""); | |
301 | } | |
302 | ||
303 | /* | |
304 | * allocate a retrieval op | |
305 | */ | |
306 | static struct fscache_retrieval *fscache_alloc_retrieval( | |
1362729b | 307 | struct fscache_cookie *cookie, |
b5108822 DH |
308 | struct address_space *mapping, |
309 | fscache_rw_complete_t end_io_func, | |
310 | void *context) | |
311 | { | |
312 | struct fscache_retrieval *op; | |
313 | ||
314 | /* allocate a retrieval operation and attempt to submit it */ | |
315 | op = kzalloc(sizeof(*op), GFP_NOIO); | |
316 | if (!op) { | |
317 | fscache_stat(&fscache_n_retrievals_nomem); | |
318 | return NULL; | |
319 | } | |
320 | ||
08c2e3d0 | 321 | fscache_operation_init(cookie, &op->op, NULL, |
d3b97ca4 DH |
322 | fscache_do_cancel_retrieval, |
323 | fscache_release_retrieval_op); | |
1362729b DH |
324 | op->op.flags = FSCACHE_OP_MYTHREAD | |
325 | (1UL << FSCACHE_OP_WAITING) | | |
326 | (1UL << FSCACHE_OP_UNUSE_COOKIE); | |
4a47132f | 327 | op->cookie = cookie; |
b5108822 DH |
328 | op->mapping = mapping; |
329 | op->end_io_func = end_io_func; | |
330 | op->context = context; | |
331 | op->start_time = jiffies; | |
b5108822 | 332 | INIT_LIST_HEAD(&op->to_do); |
4a47132f DH |
333 | |
334 | /* Pin the netfs read context in case we need to do the actual netfs | |
335 | * read because we've encountered a cache read failure. | |
336 | */ | |
337 | if (context) | |
338 | fscache_get_context(op->cookie, context); | |
b5108822 DH |
339 | return op; |
340 | } | |
341 | ||
342 | /* | |
343 | * wait for a deferred lookup to complete | |
344 | */ | |
da9803bc | 345 | int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) |
b5108822 DH |
346 | { |
347 | unsigned long jif; | |
348 | ||
349 | _enter(""); | |
350 | ||
351 | if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { | |
352 | _leave(" = 0 [imm]"); | |
353 | return 0; | |
354 | } | |
355 | ||
356 | fscache_stat(&fscache_n_retrievals_wait); | |
357 | ||
358 | jif = jiffies; | |
359 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, | |
b5108822 DH |
360 | TASK_INTERRUPTIBLE) != 0) { |
361 | fscache_stat(&fscache_n_retrievals_intr); | |
362 | _leave(" = -ERESTARTSYS"); | |
363 | return -ERESTARTSYS; | |
364 | } | |
365 | ||
366 | ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); | |
367 | ||
368 | smp_rmb(); | |
369 | fscache_hist(fscache_retrieval_delay_histogram, jif); | |
370 | _leave(" = 0 [dly]"); | |
371 | return 0; | |
372 | } | |
373 | ||
60d543ca DH |
374 | /* |
375 | * wait for an object to become active (or dead) | |
376 | */ | |
da9803bc DH |
377 | int fscache_wait_for_operation_activation(struct fscache_object *object, |
378 | struct fscache_operation *op, | |
379 | atomic_t *stat_op_waits, | |
d3b97ca4 | 380 | atomic_t *stat_object_dead) |
60d543ca DH |
381 | { |
382 | int ret; | |
383 | ||
da9803bc | 384 | if (!test_bit(FSCACHE_OP_WAITING, &op->flags)) |
60d543ca DH |
385 | goto check_if_dead; |
386 | ||
387 | _debug(">>> WT"); | |
da9803bc DH |
388 | if (stat_op_waits) |
389 | fscache_stat(stat_op_waits); | |
390 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, | |
9c04caa8 | 391 | TASK_INTERRUPTIBLE) != 0) { |
08c2e3d0 | 392 | trace_fscache_op(object->cookie, op, fscache_op_signal); |
d3b97ca4 | 393 | ret = fscache_cancel_op(op, false); |
60d543ca DH |
394 | if (ret == 0) |
395 | return -ERESTARTSYS; | |
396 | ||
397 | /* it's been removed from the pending queue by another party, | |
398 | * so we should get to run shortly */ | |
da9803bc | 399 | wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
74316201 | 400 | TASK_UNINTERRUPTIBLE); |
60d543ca DH |
401 | } |
402 | _debug("<<< GO"); | |
403 | ||
404 | check_if_dead: | |
da9803bc DH |
405 | if (op->state == FSCACHE_OP_ST_CANCELLED) { |
406 | if (stat_object_dead) | |
407 | fscache_stat(stat_object_dead); | |
9f10523f DH |
408 | _leave(" = -ENOBUFS [cancelled]"); |
409 | return -ENOBUFS; | |
410 | } | |
87021526 DH |
411 | if (unlikely(fscache_object_is_dying(object) || |
412 | fscache_cache_is_broken(object))) { | |
413 | enum fscache_operation_state state = op->state; | |
08c2e3d0 | 414 | trace_fscache_op(object->cookie, op, fscache_op_signal); |
d3b97ca4 | 415 | fscache_cancel_op(op, true); |
da9803bc DH |
416 | if (stat_object_dead) |
417 | fscache_stat(stat_object_dead); | |
87021526 | 418 | _leave(" = -ENOBUFS [obj dead %d]", state); |
60d543ca DH |
419 | return -ENOBUFS; |
420 | } | |
421 | return 0; | |
422 | } | |
423 | ||
b5108822 DH |
424 | /* |
425 | * read a page from the cache or allocate a block in which to store it | |
426 | * - we return: | |
427 | * -ENOMEM - out of memory, nothing done | |
428 | * -ERESTARTSYS - interrupted | |
429 | * -ENOBUFS - no backing object available in which to cache the block | |
430 | * -ENODATA - no data available in the backing object for this block | |
431 | * 0 - dispatched a read - it'll call end_io_func() when finished | |
432 | */ | |
433 | int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, | |
434 | struct page *page, | |
435 | fscache_rw_complete_t end_io_func, | |
436 | void *context, | |
437 | gfp_t gfp) | |
438 | { | |
439 | struct fscache_retrieval *op; | |
440 | struct fscache_object *object; | |
8fb883f3 | 441 | bool wake_cookie = false; |
b5108822 DH |
442 | int ret; |
443 | ||
444 | _enter("%p,%p,,,", cookie, page); | |
445 | ||
446 | fscache_stat(&fscache_n_retrievals); | |
447 | ||
448 | if (hlist_empty(&cookie->backing_objects)) | |
449 | goto nobufs; | |
450 | ||
ef778e7a DH |
451 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
452 | _leave(" = -ENOBUFS [invalidating]"); | |
453 | return -ENOBUFS; | |
454 | } | |
455 | ||
b5108822 DH |
456 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
457 | ASSERTCMP(page, !=, NULL); | |
458 | ||
459 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | |
460 | return -ERESTARTSYS; | |
461 | ||
1362729b | 462 | op = fscache_alloc_retrieval(cookie, page->mapping, |
94d30ae9 | 463 | end_io_func, context); |
b5108822 DH |
464 | if (!op) { |
465 | _leave(" = -ENOMEM"); | |
466 | return -ENOMEM; | |
467 | } | |
1bb4b7f9 | 468 | atomic_set(&op->n_pages, 1); |
08c2e3d0 | 469 | trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one); |
b5108822 DH |
470 | |
471 | spin_lock(&cookie->lock); | |
472 | ||
94d30ae9 DH |
473 | if (!fscache_cookie_enabled(cookie) || |
474 | hlist_empty(&cookie->backing_objects)) | |
b5108822 DH |
475 | goto nobufs_unlock; |
476 | object = hlist_entry(cookie->backing_objects.first, | |
477 | struct fscache_object, cookie_link); | |
478 | ||
caaef690 | 479 | ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); |
b5108822 | 480 | |
8fb883f3 | 481 | __fscache_use_cookie(cookie); |
4fbf4291 | 482 | atomic_inc(&object->n_reads); |
9f10523f | 483 | __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); |
4fbf4291 | 484 | |
b5108822 | 485 | if (fscache_submit_op(object, &op->op) < 0) |
9f10523f | 486 | goto nobufs_unlock_dec; |
b5108822 DH |
487 | spin_unlock(&cookie->lock); |
488 | ||
489 | fscache_stat(&fscache_n_retrieval_ops); | |
490 | ||
b5108822 DH |
491 | /* we wait for the operation to become active, and then process it |
492 | * *here*, in this thread, and not in the thread pool */ | |
da9803bc DH |
493 | ret = fscache_wait_for_operation_activation( |
494 | object, &op->op, | |
60d543ca | 495 | __fscache_stat(&fscache_n_retrieval_op_waits), |
d3b97ca4 | 496 | __fscache_stat(&fscache_n_retrievals_object_dead)); |
60d543ca DH |
497 | if (ret < 0) |
498 | goto error; | |
b5108822 DH |
499 | |
500 | /* ask the cache to honour the operation */ | |
501 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { | |
52bd75fd | 502 | fscache_stat(&fscache_n_cop_allocate_page); |
b5108822 | 503 | ret = object->cache->ops->allocate_page(op, page, gfp); |
52bd75fd | 504 | fscache_stat_d(&fscache_n_cop_allocate_page); |
b5108822 DH |
505 | if (ret == 0) |
506 | ret = -ENODATA; | |
507 | } else { | |
52bd75fd | 508 | fscache_stat(&fscache_n_cop_read_or_alloc_page); |
b5108822 | 509 | ret = object->cache->ops->read_or_alloc_page(op, page, gfp); |
52bd75fd | 510 | fscache_stat_d(&fscache_n_cop_read_or_alloc_page); |
b5108822 DH |
511 | } |
512 | ||
5753c441 | 513 | error: |
b5108822 DH |
514 | if (ret == -ENOMEM) |
515 | fscache_stat(&fscache_n_retrievals_nomem); | |
516 | else if (ret == -ERESTARTSYS) | |
517 | fscache_stat(&fscache_n_retrievals_intr); | |
518 | else if (ret == -ENODATA) | |
519 | fscache_stat(&fscache_n_retrievals_nodata); | |
520 | else if (ret < 0) | |
521 | fscache_stat(&fscache_n_retrievals_nobufs); | |
522 | else | |
523 | fscache_stat(&fscache_n_retrievals_ok); | |
524 | ||
525 | fscache_put_retrieval(op); | |
526 | _leave(" = %d", ret); | |
527 | return ret; | |
528 | ||
9f10523f DH |
529 | nobufs_unlock_dec: |
530 | atomic_dec(&object->n_reads); | |
8fb883f3 | 531 | wake_cookie = __fscache_unuse_cookie(cookie); |
b5108822 DH |
532 | nobufs_unlock: |
533 | spin_unlock(&cookie->lock); | |
8fb883f3 DH |
534 | if (wake_cookie) |
535 | __fscache_wake_unused_cookie(cookie); | |
a39caadf | 536 | fscache_put_retrieval(op); |
b5108822 DH |
537 | nobufs: |
538 | fscache_stat(&fscache_n_retrievals_nobufs); | |
539 | _leave(" = -ENOBUFS"); | |
540 | return -ENOBUFS; | |
541 | } | |
542 | EXPORT_SYMBOL(__fscache_read_or_alloc_page); | |
543 | ||
544 | /* | |
545 | * read a list of page from the cache or allocate a block in which to store | |
546 | * them | |
547 | * - we return: | |
548 | * -ENOMEM - out of memory, some pages may be being read | |
549 | * -ERESTARTSYS - interrupted, some pages may be being read | |
550 | * -ENOBUFS - no backing object or space available in which to cache any | |
551 | * pages not being read | |
552 | * -ENODATA - no data available in the backing object for some or all of | |
553 | * the pages | |
554 | * 0 - dispatched a read on all pages | |
555 | * | |
556 | * end_io_func() will be called for each page read from the cache as it is | |
557 | * finishes being read | |
558 | * | |
559 | * any pages for which a read is dispatched will be removed from pages and | |
560 | * nr_pages | |
561 | */ | |
562 | int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | |
563 | struct address_space *mapping, | |
564 | struct list_head *pages, | |
565 | unsigned *nr_pages, | |
566 | fscache_rw_complete_t end_io_func, | |
567 | void *context, | |
568 | gfp_t gfp) | |
569 | { | |
b5108822 DH |
570 | struct fscache_retrieval *op; |
571 | struct fscache_object *object; | |
8fb883f3 | 572 | bool wake_cookie = false; |
b5108822 DH |
573 | int ret; |
574 | ||
575 | _enter("%p,,%d,,,", cookie, *nr_pages); | |
576 | ||
577 | fscache_stat(&fscache_n_retrievals); | |
578 | ||
579 | if (hlist_empty(&cookie->backing_objects)) | |
580 | goto nobufs; | |
581 | ||
ef778e7a DH |
582 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
583 | _leave(" = -ENOBUFS [invalidating]"); | |
584 | return -ENOBUFS; | |
585 | } | |
586 | ||
b5108822 DH |
587 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
588 | ASSERTCMP(*nr_pages, >, 0); | |
589 | ASSERT(!list_empty(pages)); | |
590 | ||
591 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | |
592 | return -ERESTARTSYS; | |
593 | ||
1362729b | 594 | op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); |
b5108822 DH |
595 | if (!op) |
596 | return -ENOMEM; | |
1bb4b7f9 | 597 | atomic_set(&op->n_pages, *nr_pages); |
08c2e3d0 | 598 | trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi); |
b5108822 DH |
599 | |
600 | spin_lock(&cookie->lock); | |
601 | ||
94d30ae9 DH |
602 | if (!fscache_cookie_enabled(cookie) || |
603 | hlist_empty(&cookie->backing_objects)) | |
b5108822 DH |
604 | goto nobufs_unlock; |
605 | object = hlist_entry(cookie->backing_objects.first, | |
606 | struct fscache_object, cookie_link); | |
607 | ||
8fb883f3 | 608 | __fscache_use_cookie(cookie); |
4fbf4291 | 609 | atomic_inc(&object->n_reads); |
9f10523f | 610 | __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); |
4fbf4291 | 611 | |
b5108822 | 612 | if (fscache_submit_op(object, &op->op) < 0) |
9f10523f | 613 | goto nobufs_unlock_dec; |
b5108822 DH |
614 | spin_unlock(&cookie->lock); |
615 | ||
616 | fscache_stat(&fscache_n_retrieval_ops); | |
617 | ||
b5108822 DH |
618 | /* we wait for the operation to become active, and then process it |
619 | * *here*, in this thread, and not in the thread pool */ | |
da9803bc DH |
620 | ret = fscache_wait_for_operation_activation( |
621 | object, &op->op, | |
60d543ca | 622 | __fscache_stat(&fscache_n_retrieval_op_waits), |
d3b97ca4 | 623 | __fscache_stat(&fscache_n_retrievals_object_dead)); |
60d543ca DH |
624 | if (ret < 0) |
625 | goto error; | |
b5108822 DH |
626 | |
627 | /* ask the cache to honour the operation */ | |
52bd75fd DH |
628 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { |
629 | fscache_stat(&fscache_n_cop_allocate_pages); | |
630 | ret = object->cache->ops->allocate_pages( | |
631 | op, pages, nr_pages, gfp); | |
632 | fscache_stat_d(&fscache_n_cop_allocate_pages); | |
633 | } else { | |
634 | fscache_stat(&fscache_n_cop_read_or_alloc_pages); | |
635 | ret = object->cache->ops->read_or_alloc_pages( | |
636 | op, pages, nr_pages, gfp); | |
637 | fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); | |
638 | } | |
b5108822 | 639 | |
5753c441 | 640 | error: |
b5108822 DH |
641 | if (ret == -ENOMEM) |
642 | fscache_stat(&fscache_n_retrievals_nomem); | |
643 | else if (ret == -ERESTARTSYS) | |
644 | fscache_stat(&fscache_n_retrievals_intr); | |
645 | else if (ret == -ENODATA) | |
646 | fscache_stat(&fscache_n_retrievals_nodata); | |
647 | else if (ret < 0) | |
648 | fscache_stat(&fscache_n_retrievals_nobufs); | |
649 | else | |
650 | fscache_stat(&fscache_n_retrievals_ok); | |
651 | ||
652 | fscache_put_retrieval(op); | |
653 | _leave(" = %d", ret); | |
654 | return ret; | |
655 | ||
9f10523f DH |
656 | nobufs_unlock_dec: |
657 | atomic_dec(&object->n_reads); | |
8fb883f3 | 658 | wake_cookie = __fscache_unuse_cookie(cookie); |
b5108822 DH |
659 | nobufs_unlock: |
660 | spin_unlock(&cookie->lock); | |
a39caadf | 661 | fscache_put_retrieval(op); |
8fb883f3 DH |
662 | if (wake_cookie) |
663 | __fscache_wake_unused_cookie(cookie); | |
b5108822 DH |
664 | nobufs: |
665 | fscache_stat(&fscache_n_retrievals_nobufs); | |
666 | _leave(" = -ENOBUFS"); | |
667 | return -ENOBUFS; | |
668 | } | |
669 | EXPORT_SYMBOL(__fscache_read_or_alloc_pages); | |
670 | ||
671 | /* | |
672 | * allocate a block in the cache on which to store a page | |
673 | * - we return: | |
674 | * -ENOMEM - out of memory, nothing done | |
675 | * -ERESTARTSYS - interrupted | |
676 | * -ENOBUFS - no backing object available in which to cache the block | |
677 | * 0 - block allocated | |
678 | */ | |
679 | int __fscache_alloc_page(struct fscache_cookie *cookie, | |
680 | struct page *page, | |
681 | gfp_t gfp) | |
682 | { | |
683 | struct fscache_retrieval *op; | |
684 | struct fscache_object *object; | |
8fb883f3 | 685 | bool wake_cookie = false; |
b5108822 DH |
686 | int ret; |
687 | ||
688 | _enter("%p,%p,,,", cookie, page); | |
689 | ||
690 | fscache_stat(&fscache_n_allocs); | |
691 | ||
692 | if (hlist_empty(&cookie->backing_objects)) | |
693 | goto nobufs; | |
694 | ||
695 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
696 | ASSERTCMP(page, !=, NULL); | |
697 | ||
ef778e7a DH |
698 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
699 | _leave(" = -ENOBUFS [invalidating]"); | |
700 | return -ENOBUFS; | |
701 | } | |
702 | ||
b5108822 DH |
703 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
704 | return -ERESTARTSYS; | |
705 | ||
1362729b | 706 | op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); |
b5108822 DH |
707 | if (!op) |
708 | return -ENOMEM; | |
1bb4b7f9 | 709 | atomic_set(&op->n_pages, 1); |
08c2e3d0 | 710 | trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one); |
b5108822 DH |
711 | |
712 | spin_lock(&cookie->lock); | |
713 | ||
94d30ae9 DH |
714 | if (!fscache_cookie_enabled(cookie) || |
715 | hlist_empty(&cookie->backing_objects)) | |
b5108822 DH |
716 | goto nobufs_unlock; |
717 | object = hlist_entry(cookie->backing_objects.first, | |
718 | struct fscache_object, cookie_link); | |
719 | ||
8fb883f3 | 720 | __fscache_use_cookie(cookie); |
b5108822 | 721 | if (fscache_submit_op(object, &op->op) < 0) |
8fb883f3 | 722 | goto nobufs_unlock_dec; |
b5108822 DH |
723 | spin_unlock(&cookie->lock); |
724 | ||
725 | fscache_stat(&fscache_n_alloc_ops); | |
726 | ||
da9803bc DH |
727 | ret = fscache_wait_for_operation_activation( |
728 | object, &op->op, | |
60d543ca | 729 | __fscache_stat(&fscache_n_alloc_op_waits), |
d3b97ca4 | 730 | __fscache_stat(&fscache_n_allocs_object_dead)); |
60d543ca DH |
731 | if (ret < 0) |
732 | goto error; | |
b5108822 DH |
733 | |
734 | /* ask the cache to honour the operation */ | |
52bd75fd | 735 | fscache_stat(&fscache_n_cop_allocate_page); |
b5108822 | 736 | ret = object->cache->ops->allocate_page(op, page, gfp); |
52bd75fd | 737 | fscache_stat_d(&fscache_n_cop_allocate_page); |
b5108822 | 738 | |
5753c441 DH |
739 | error: |
740 | if (ret == -ERESTARTSYS) | |
741 | fscache_stat(&fscache_n_allocs_intr); | |
742 | else if (ret < 0) | |
b5108822 DH |
743 | fscache_stat(&fscache_n_allocs_nobufs); |
744 | else | |
745 | fscache_stat(&fscache_n_allocs_ok); | |
746 | ||
747 | fscache_put_retrieval(op); | |
748 | _leave(" = %d", ret); | |
749 | return ret; | |
750 | ||
8fb883f3 DH |
751 | nobufs_unlock_dec: |
752 | wake_cookie = __fscache_unuse_cookie(cookie); | |
b5108822 DH |
753 | nobufs_unlock: |
754 | spin_unlock(&cookie->lock); | |
a39caadf | 755 | fscache_put_retrieval(op); |
8fb883f3 DH |
756 | if (wake_cookie) |
757 | __fscache_wake_unused_cookie(cookie); | |
b5108822 DH |
758 | nobufs: |
759 | fscache_stat(&fscache_n_allocs_nobufs); | |
760 | _leave(" = -ENOBUFS"); | |
761 | return -ENOBUFS; | |
762 | } | |
763 | EXPORT_SYMBOL(__fscache_alloc_page); | |
764 | ||
5a6f282a MT |
765 | /* |
766 | * Unmark pages allocate in the readahead code path (via: | |
767 | * fscache_readpages_or_alloc) after delegating to the base filesystem | |
768 | */ | |
769 | void __fscache_readpages_cancel(struct fscache_cookie *cookie, | |
770 | struct list_head *pages) | |
771 | { | |
772 | struct page *page; | |
773 | ||
774 | list_for_each_entry(page, pages, lru) { | |
775 | if (PageFsCache(page)) | |
776 | __fscache_uncache_page(cookie, page); | |
777 | } | |
778 | } | |
779 | EXPORT_SYMBOL(__fscache_readpages_cancel); | |
780 | ||
b5108822 DH |
781 | /* |
782 | * release a write op reference | |
783 | */ | |
784 | static void fscache_release_write_op(struct fscache_operation *_op) | |
785 | { | |
786 | _enter("{OP%x}", _op->debug_id); | |
787 | } | |
788 | ||
789 | /* | |
790 | * perform the background storage of a page into the cache | |
791 | */ | |
792 | static void fscache_write_op(struct fscache_operation *_op) | |
793 | { | |
794 | struct fscache_storage *op = | |
795 | container_of(_op, struct fscache_storage, op); | |
796 | struct fscache_object *object = op->op.object; | |
1bccf513 | 797 | struct fscache_cookie *cookie; |
b5108822 DH |
798 | struct page *page; |
799 | unsigned n; | |
800 | void *results[1]; | |
801 | int ret; | |
802 | ||
803 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); | |
804 | ||
2c984257 | 805 | again: |
b5108822 | 806 | spin_lock(&object->lock); |
1bccf513 | 807 | cookie = object->cookie; |
b5108822 | 808 | |
7ef001e9 | 809 | if (!fscache_object_is_active(object)) { |
b27ddd46 DH |
810 | /* If we get here, then the on-disk cache object likely no |
811 | * longer exists, so we should just cancel this write | |
812 | * operation. | |
7ef001e9 | 813 | */ |
b5108822 | 814 | spin_unlock(&object->lock); |
b27ddd46 | 815 | fscache_op_complete(&op->op, true); |
7ef001e9 DH |
816 | _leave(" [inactive]"); |
817 | return; | |
818 | } | |
819 | ||
820 | if (!cookie) { | |
821 | /* If we get here, then the cookie belonging to the object was | |
822 | * detached, probably by the cookie being withdrawn due to | |
823 | * memory pressure, which means that the pages we might write | |
824 | * to the cache from no longer exist - therefore, we can just | |
825 | * cancel this write operation. | |
826 | */ | |
827 | spin_unlock(&object->lock); | |
b27ddd46 | 828 | fscache_op_complete(&op->op, true); |
caaef690 DH |
829 | _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", |
830 | _op->flags, _op->state, object->state->short_name, | |
831 | object->flags); | |
b5108822 DH |
832 | return; |
833 | } | |
834 | ||
1bccf513 DH |
835 | spin_lock(&cookie->stores_lock); |
836 | ||
b5108822 DH |
837 | fscache_stat(&fscache_n_store_calls); |
838 | ||
839 | /* find a page to store */ | |
08c2e3d0 | 840 | results[0] = NULL; |
b5108822 DH |
841 | page = NULL; |
842 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, | |
843 | FSCACHE_COOKIE_PENDING_TAG); | |
08c2e3d0 | 844 | trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit); |
b5108822 DH |
845 | if (n != 1) |
846 | goto superseded; | |
847 | page = results[0]; | |
848 | _debug("gang %d [%lx]", n, page->index); | |
b5108822 | 849 | |
08a66859 DC |
850 | radix_tree_tag_set(&cookie->stores, page->index, |
851 | FSCACHE_COOKIE_STORING_TAG); | |
852 | radix_tree_tag_clear(&cookie->stores, page->index, | |
853 | FSCACHE_COOKIE_PENDING_TAG); | |
08c2e3d0 | 854 | trace_fscache_page(cookie, page, fscache_page_radix_pend2store); |
b5108822 | 855 | |
1bccf513 | 856 | spin_unlock(&cookie->stores_lock); |
b5108822 | 857 | spin_unlock(&object->lock); |
b5108822 | 858 | |
2c984257 DH |
859 | if (page->index >= op->store_limit) |
860 | goto discard_page; | |
861 | ||
08a66859 DC |
862 | fscache_stat(&fscache_n_store_pages); |
863 | fscache_stat(&fscache_n_cop_write_page); | |
864 | ret = object->cache->ops->write_page(op, page); | |
865 | fscache_stat_d(&fscache_n_cop_write_page); | |
08c2e3d0 | 866 | trace_fscache_wrote_page(cookie, page, &op->op, ret); |
08a66859 DC |
867 | fscache_end_page_write(object, page); |
868 | if (ret < 0) { | |
08a66859 | 869 | fscache_abort_object(object); |
1f372dff | 870 | fscache_op_complete(&op->op, true); |
08a66859 DC |
871 | } else { |
872 | fscache_enqueue_operation(&op->op); | |
b5108822 DH |
873 | } |
874 | ||
875 | _leave(""); | |
876 | return; | |
877 | ||
2c984257 DH |
878 | discard_page: |
879 | fscache_stat(&fscache_n_store_pages_over_limit); | |
08c2e3d0 | 880 | trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS); |
2c984257 DH |
881 | fscache_end_page_write(object, page); |
882 | goto again; | |
883 | ||
b5108822 DH |
884 | superseded: |
885 | /* this writer is going away and there aren't any more things to | |
886 | * write */ | |
887 | _debug("cease"); | |
1bccf513 | 888 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
889 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); |
890 | spin_unlock(&object->lock); | |
b27ddd46 | 891 | fscache_op_complete(&op->op, false); |
b5108822 DH |
892 | _leave(""); |
893 | } | |
894 | ||
ef778e7a DH |
895 | /* |
896 | * Clear the pages pending writing for invalidation | |
897 | */ | |
898 | void fscache_invalidate_writes(struct fscache_cookie *cookie) | |
899 | { | |
900 | struct page *page; | |
901 | void *results[16]; | |
902 | int n, i; | |
903 | ||
904 | _enter(""); | |
905 | ||
ee8be57b SAS |
906 | for (;;) { |
907 | spin_lock(&cookie->stores_lock); | |
908 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, | |
909 | ARRAY_SIZE(results), | |
910 | FSCACHE_COOKIE_PENDING_TAG); | |
911 | if (n == 0) { | |
912 | spin_unlock(&cookie->stores_lock); | |
913 | break; | |
914 | } | |
915 | ||
ef778e7a DH |
916 | for (i = n - 1; i >= 0; i--) { |
917 | page = results[i]; | |
918 | radix_tree_delete(&cookie->stores, page->index); | |
08c2e3d0 DH |
919 | trace_fscache_page(cookie, page, fscache_page_radix_delete); |
920 | trace_fscache_page(cookie, page, fscache_page_inval); | |
ef778e7a DH |
921 | } |
922 | ||
923 | spin_unlock(&cookie->stores_lock); | |
924 | ||
925 | for (i = n - 1; i >= 0; i--) | |
09cbfeaf | 926 | put_page(results[i]); |
ef778e7a DH |
927 | } |
928 | ||
d2138455 | 929 | wake_up_bit(&cookie->flags, 0); |
08c2e3d0 | 930 | trace_fscache_wake_cookie(cookie); |
d2138455 | 931 | |
ef778e7a DH |
932 | _leave(""); |
933 | } | |
934 | ||
b5108822 DH |
935 | /* |
936 | * request a page be stored in the cache | |
937 | * - returns: | |
938 | * -ENOMEM - out of memory, nothing done | |
939 | * -ENOBUFS - no backing object available in which to cache the page | |
940 | * 0 - dispatched a write - it'll call end_io_func() when finished | |
941 | * | |
942 | * if the cookie still has a backing object at this point, that object can be | |
943 | * in one of a few states with respect to storage processing: | |
944 | * | |
945 | * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is | |
946 | * set) | |
947 | * | |
caaef690 | 948 | * (a) no writes yet |
b5108822 DH |
949 | * |
950 | * (b) writes deferred till post-creation (mark page for writing and | |
951 | * return immediately) | |
952 | * | |
953 | * (2) negative lookup, object created, initial fill being made from netfs | |
b5108822 DH |
954 | * |
955 | * (a) fill point not yet reached this page (mark page for writing and | |
956 | * return) | |
957 | * | |
958 | * (b) fill point passed this page (queue op to store this page) | |
959 | * | |
960 | * (3) object extant (queue op to store this page) | |
961 | * | |
962 | * any other state is invalid | |
963 | */ | |
964 | int __fscache_write_page(struct fscache_cookie *cookie, | |
965 | struct page *page, | |
ee1235a9 | 966 | loff_t object_size, |
b5108822 DH |
967 | gfp_t gfp) |
968 | { | |
969 | struct fscache_storage *op; | |
970 | struct fscache_object *object; | |
8fb883f3 | 971 | bool wake_cookie = false; |
b5108822 DH |
972 | int ret; |
973 | ||
974 | _enter("%p,%x,", cookie, (u32) page->flags); | |
975 | ||
976 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
977 | ASSERT(PageFsCache(page)); | |
978 | ||
979 | fscache_stat(&fscache_n_stores); | |
980 | ||
ef778e7a DH |
981 | if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { |
982 | _leave(" = -ENOBUFS [invalidating]"); | |
983 | return -ENOBUFS; | |
984 | } | |
985 | ||
5f4f9f4a | 986 | op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); |
b5108822 DH |
987 | if (!op) |
988 | goto nomem; | |
989 | ||
08c2e3d0 | 990 | fscache_operation_init(cookie, &op->op, fscache_write_op, NULL, |
8af7c124 | 991 | fscache_release_write_op); |
1362729b DH |
992 | op->op.flags = FSCACHE_OP_ASYNC | |
993 | (1 << FSCACHE_OP_WAITING) | | |
994 | (1 << FSCACHE_OP_UNUSE_COOKIE); | |
b5108822 | 995 | |
5e4c0d97 | 996 | ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); |
b5108822 DH |
997 | if (ret < 0) |
998 | goto nomem_free; | |
999 | ||
08c2e3d0 DH |
1000 | trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one); |
1001 | ||
b5108822 DH |
1002 | ret = -ENOBUFS; |
1003 | spin_lock(&cookie->lock); | |
1004 | ||
94d30ae9 DH |
1005 | if (!fscache_cookie_enabled(cookie) || |
1006 | hlist_empty(&cookie->backing_objects)) | |
b5108822 DH |
1007 | goto nobufs; |
1008 | object = hlist_entry(cookie->backing_objects.first, | |
1009 | struct fscache_object, cookie_link); | |
1010 | if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) | |
1011 | goto nobufs; | |
1012 | ||
08c2e3d0 DH |
1013 | trace_fscache_page(cookie, page, fscache_page_write); |
1014 | ||
b5108822 DH |
1015 | /* add the page to the pending-storage radix tree on the backing |
1016 | * object */ | |
1017 | spin_lock(&object->lock); | |
ee1235a9 DH |
1018 | |
1019 | if (object->store_limit_l != object_size) | |
1020 | fscache_set_store_limit(object, object_size); | |
1021 | ||
1bccf513 | 1022 | spin_lock(&cookie->stores_lock); |
b5108822 DH |
1023 | |
1024 | _debug("store limit %llx", (unsigned long long) object->store_limit); | |
1025 | ||
1026 | ret = radix_tree_insert(&cookie->stores, page->index, page); | |
1027 | if (ret < 0) { | |
1028 | if (ret == -EEXIST) | |
1029 | goto already_queued; | |
1030 | _debug("insert failed %d", ret); | |
1031 | goto nobufs_unlock_obj; | |
1032 | } | |
1033 | ||
08c2e3d0 | 1034 | trace_fscache_page(cookie, page, fscache_page_radix_insert); |
b5108822 DH |
1035 | radix_tree_tag_set(&cookie->stores, page->index, |
1036 | FSCACHE_COOKIE_PENDING_TAG); | |
08c2e3d0 | 1037 | trace_fscache_page(cookie, page, fscache_page_radix_set_pend); |
09cbfeaf | 1038 | get_page(page); |
b5108822 DH |
1039 | |
1040 | /* we only want one writer at a time, but we do need to queue new | |
1041 | * writers after exclusive ops */ | |
1042 | if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) | |
1043 | goto already_pending; | |
1044 | ||
1bccf513 | 1045 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
1046 | spin_unlock(&object->lock); |
1047 | ||
1048 | op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); | |
1049 | op->store_limit = object->store_limit; | |
1050 | ||
8fb883f3 | 1051 | __fscache_use_cookie(cookie); |
b5108822 DH |
1052 | if (fscache_submit_op(object, &op->op) < 0) |
1053 | goto submit_failed; | |
1054 | ||
1055 | spin_unlock(&cookie->lock); | |
1056 | radix_tree_preload_end(); | |
1057 | fscache_stat(&fscache_n_store_ops); | |
1058 | fscache_stat(&fscache_n_stores_ok); | |
1059 | ||
8af7c124 | 1060 | /* the work queue now carries its own ref on the object */ |
b5108822 DH |
1061 | fscache_put_operation(&op->op); |
1062 | _leave(" = 0"); | |
1063 | return 0; | |
1064 | ||
1065 | already_queued: | |
1066 | fscache_stat(&fscache_n_stores_again); | |
1067 | already_pending: | |
1bccf513 | 1068 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
1069 | spin_unlock(&object->lock); |
1070 | spin_unlock(&cookie->lock); | |
1071 | radix_tree_preload_end(); | |
a39caadf | 1072 | fscache_put_operation(&op->op); |
b5108822 DH |
1073 | fscache_stat(&fscache_n_stores_ok); |
1074 | _leave(" = 0"); | |
1075 | return 0; | |
1076 | ||
1077 | submit_failed: | |
1bccf513 | 1078 | spin_lock(&cookie->stores_lock); |
b5108822 | 1079 | radix_tree_delete(&cookie->stores, page->index); |
08c2e3d0 | 1080 | trace_fscache_page(cookie, page, fscache_page_radix_delete); |
1bccf513 | 1081 | spin_unlock(&cookie->stores_lock); |
8fb883f3 | 1082 | wake_cookie = __fscache_unuse_cookie(cookie); |
09cbfeaf | 1083 | put_page(page); |
b5108822 DH |
1084 | ret = -ENOBUFS; |
1085 | goto nobufs; | |
1086 | ||
1087 | nobufs_unlock_obj: | |
1147d0f9 | 1088 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
1089 | spin_unlock(&object->lock); |
1090 | nobufs: | |
1091 | spin_unlock(&cookie->lock); | |
1092 | radix_tree_preload_end(); | |
a39caadf | 1093 | fscache_put_operation(&op->op); |
8fb883f3 DH |
1094 | if (wake_cookie) |
1095 | __fscache_wake_unused_cookie(cookie); | |
b5108822 DH |
1096 | fscache_stat(&fscache_n_stores_nobufs); |
1097 | _leave(" = -ENOBUFS"); | |
1098 | return -ENOBUFS; | |
1099 | ||
1100 | nomem_free: | |
a39caadf | 1101 | fscache_put_operation(&op->op); |
b5108822 DH |
1102 | nomem: |
1103 | fscache_stat(&fscache_n_stores_oom); | |
1104 | _leave(" = -ENOMEM"); | |
1105 | return -ENOMEM; | |
1106 | } | |
1107 | EXPORT_SYMBOL(__fscache_write_page); | |
1108 | ||
1109 | /* | |
1110 | * remove a page from the cache | |
1111 | */ | |
1112 | void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) | |
1113 | { | |
1114 | struct fscache_object *object; | |
1115 | ||
1116 | _enter(",%p", page); | |
1117 | ||
1118 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
1119 | ASSERTCMP(page, !=, NULL); | |
1120 | ||
1121 | fscache_stat(&fscache_n_uncaches); | |
1122 | ||
1123 | /* cache withdrawal may beat us to it */ | |
1124 | if (!PageFsCache(page)) | |
1125 | goto done; | |
1126 | ||
08c2e3d0 DH |
1127 | trace_fscache_page(cookie, page, fscache_page_uncache); |
1128 | ||
b5108822 DH |
1129 | /* get the object */ |
1130 | spin_lock(&cookie->lock); | |
1131 | ||
1132 | if (hlist_empty(&cookie->backing_objects)) { | |
1133 | ClearPageFsCache(page); | |
1134 | goto done_unlock; | |
1135 | } | |
1136 | ||
1137 | object = hlist_entry(cookie->backing_objects.first, | |
1138 | struct fscache_object, cookie_link); | |
1139 | ||
1140 | /* there might now be stuff on disk we could read */ | |
1141 | clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | |
1142 | ||
1143 | /* only invoke the cache backend if we managed to mark the page | |
1144 | * uncached here; this deals with synchronisation vs withdrawal */ | |
1145 | if (TestClearPageFsCache(page) && | |
1146 | object->cache->ops->uncache_page) { | |
1147 | /* the cache backend releases the cookie lock */ | |
52bd75fd | 1148 | fscache_stat(&fscache_n_cop_uncache_page); |
b5108822 | 1149 | object->cache->ops->uncache_page(object, page); |
52bd75fd | 1150 | fscache_stat_d(&fscache_n_cop_uncache_page); |
b5108822 DH |
1151 | goto done; |
1152 | } | |
1153 | ||
1154 | done_unlock: | |
1155 | spin_unlock(&cookie->lock); | |
1156 | done: | |
1157 | _leave(""); | |
1158 | } | |
1159 | EXPORT_SYMBOL(__fscache_uncache_page); | |
1160 | ||
c4d6d8db DH |
1161 | /** |
1162 | * fscache_mark_page_cached - Mark a page as being cached | |
1163 | * @op: The retrieval op pages are being marked for | |
1164 | * @page: The page to be marked | |
1165 | * | |
1166 | * Mark a netfs page as being cached. After this is called, the netfs | |
1167 | * must call fscache_uncache_page() to remove the mark. | |
1168 | */ | |
1169 | void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) | |
1170 | { | |
1171 | struct fscache_cookie *cookie = op->op.object->cookie; | |
1172 | ||
1173 | #ifdef CONFIG_FSCACHE_STATS | |
1174 | atomic_inc(&fscache_n_marks); | |
1175 | #endif | |
1176 | ||
08c2e3d0 DH |
1177 | trace_fscache_page(cookie, page, fscache_page_cached); |
1178 | ||
c4d6d8db DH |
1179 | _debug("- mark %p{%lx}", page, page->index); |
1180 | if (TestSetPageFsCache(page)) { | |
1181 | static bool once_only; | |
1182 | if (!once_only) { | |
1183 | once_only = true; | |
36dfd116 FF |
1184 | pr_warn("Cookie type %s marked page %lx multiple times\n", |
1185 | cookie->def->name, page->index); | |
c4d6d8db DH |
1186 | } |
1187 | } | |
1188 | ||
1189 | if (cookie->def->mark_page_cached) | |
1190 | cookie->def->mark_page_cached(cookie->netfs_data, | |
1191 | op->mapping, page); | |
1192 | } | |
1193 | EXPORT_SYMBOL(fscache_mark_page_cached); | |
1194 | ||
b5108822 DH |
1195 | /** |
1196 | * fscache_mark_pages_cached - Mark pages as being cached | |
1197 | * @op: The retrieval op pages are being marked for | |
1198 | * @pagevec: The pages to be marked | |
1199 | * | |
1200 | * Mark a bunch of netfs pages as being cached. After this is called, | |
1201 | * the netfs must call fscache_uncache_page() to remove the mark. | |
1202 | */ | |
1203 | void fscache_mark_pages_cached(struct fscache_retrieval *op, | |
1204 | struct pagevec *pagevec) | |
1205 | { | |
b5108822 DH |
1206 | unsigned long loop; |
1207 | ||
c4d6d8db DH |
1208 | for (loop = 0; loop < pagevec->nr; loop++) |
1209 | fscache_mark_page_cached(op, pagevec->pages[loop]); | |
b5108822 | 1210 | |
b5108822 DH |
1211 | pagevec_reinit(pagevec); |
1212 | } | |
1213 | EXPORT_SYMBOL(fscache_mark_pages_cached); | |
c902ce1b DH |
1214 | |
1215 | /* | |
1216 | * Uncache all the pages in an inode that are marked PG_fscache, assuming them | |
1217 | * to be associated with the given cookie. | |
1218 | */ | |
1219 | void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, | |
1220 | struct inode *inode) | |
1221 | { | |
1222 | struct address_space *mapping = inode->i_mapping; | |
1223 | struct pagevec pvec; | |
1224 | pgoff_t next; | |
1225 | int i; | |
1226 | ||
1227 | _enter("%p,%p", cookie, inode); | |
1228 | ||
1229 | if (!mapping || mapping->nrpages == 0) { | |
1230 | _leave(" [no pages]"); | |
1231 | return; | |
1232 | } | |
1233 | ||
86679820 | 1234 | pagevec_init(&pvec); |
c902ce1b | 1235 | next = 0; |
b307d465 | 1236 | do { |
397162ff | 1237 | if (!pagevec_lookup(&pvec, mapping, &next)) |
b307d465 | 1238 | break; |
c902ce1b DH |
1239 | for (i = 0; i < pagevec_count(&pvec); i++) { |
1240 | struct page *page = pvec.pages[i]; | |
c902ce1b DH |
1241 | if (PageFsCache(page)) { |
1242 | __fscache_wait_on_page_write(cookie, page); | |
1243 | __fscache_uncache_page(cookie, page); | |
1244 | } | |
1245 | } | |
1246 | pagevec_release(&pvec); | |
1247 | cond_resched(); | |
d72dc8a2 | 1248 | } while (next); |
c902ce1b DH |
1249 | |
1250 | _leave(""); | |
1251 | } | |
1252 | EXPORT_SYMBOL(__fscache_uncache_all_inode_pages); |