]>
Commit | Line | Data |
---|---|---|
36c95590 DH |
1 | /* FS-Cache object state machine handler |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * See Documentation/filesystems/caching/object.txt for a description of the | |
12 | * object state machine and the in-kernel representations. | |
13 | */ | |
14 | ||
15 | #define FSCACHE_DEBUG_LEVEL COOKIE | |
16 | #include <linux/module.h> | |
ef778e7a | 17 | #include <linux/slab.h> |
caaef690 | 18 | #include <linux/prefetch.h> |
36c95590 DH |
19 | #include "internal.h" |
20 | ||
caaef690 DH |
21 | static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int); |
22 | static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int); | |
23 | static const struct fscache_state *fscache_drop_object(struct fscache_object *, int); | |
24 | static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int); | |
25 | static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int); | |
26 | static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int); | |
27 | static const struct fscache_state *fscache_kill_object(struct fscache_object *, int); | |
28 | static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int); | |
29 | static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int); | |
30 | static const struct fscache_state *fscache_object_available(struct fscache_object *, int); | |
31 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); | |
32 | static const struct fscache_state *fscache_update_object(struct fscache_object *, int); | |
e26bfebd | 33 | static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); |
caaef690 DH |
34 | |
35 | #define __STATE_NAME(n) fscache_osm_##n | |
36 | #define STATE(n) (&__STATE_NAME(n)) | |
37 | ||
38 | /* | |
39 | * Define a work state. Work states are execution states. No event processing | |
40 | * is performed by them. The function attached to a work state returns a | |
41 | * pointer indicating the next state to which the state machine should | |
42 | * transition. Returning NO_TRANSIT repeats the current state, but goes back | |
43 | * to the scheduler first. | |
44 | */ | |
45 | #define WORK_STATE(n, sn, f) \ | |
46 | const struct fscache_state __STATE_NAME(n) = { \ | |
47 | .name = #n, \ | |
48 | .short_name = sn, \ | |
49 | .work = f \ | |
50 | } | |
51 | ||
52 | /* | |
53 | * Returns from work states. | |
54 | */ | |
55 | #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) | |
56 | ||
57 | #define NO_TRANSIT ((struct fscache_state *)NULL) | |
58 | ||
59 | /* | |
60 | * Define a wait state. Wait states are event processing states. No execution | |
61 | * is performed by them. Wait states are just tables of "if event X occurs, | |
62 | * clear it and transition to state Y". The dispatcher returns to the | |
63 | * scheduler if none of the events in which the wait state has an interest are | |
64 | * currently pending. | |
65 | */ | |
66 | #define WAIT_STATE(n, sn, ...) \ | |
67 | const struct fscache_state __STATE_NAME(n) = { \ | |
68 | .name = #n, \ | |
69 | .short_name = sn, \ | |
70 | .work = NULL, \ | |
71 | .transitions = { __VA_ARGS__, { 0, NULL } } \ | |
72 | } | |
73 | ||
74 | #define TRANSIT_TO(state, emask) \ | |
75 | { .events = (emask), .transit_to = STATE(state) } | |
76 | ||
77 | /* | |
78 | * The object state machine. | |
79 | */ | |
80 | static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object); | |
81 | static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready); | |
82 | static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation); | |
83 | static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object); | |
84 | static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object); | |
85 | static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available); | |
86 | static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents); | |
87 | ||
88 | static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object); | |
89 | static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object); | |
90 | ||
91 | static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); | |
92 | static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); | |
93 | static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); | |
94 | static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); | |
e26bfebd | 95 | static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); |
caaef690 DH |
96 | |
97 | static WAIT_STATE(WAIT_FOR_INIT, "?INI", | |
98 | TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); | |
99 | ||
100 | static WAIT_STATE(WAIT_FOR_PARENT, "?PRN", | |
101 | TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY)); | |
102 | ||
103 | static WAIT_STATE(WAIT_FOR_CMD, "?CMD", | |
104 | TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE), | |
105 | TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE), | |
106 | TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); | |
107 | ||
108 | static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR", | |
109 | TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED)); | |
110 | ||
111 | /* | |
112 | * Out-of-band event transition tables. These are for handling unexpected | |
113 | * events, such as an I/O error. If an OOB event occurs, the state machine | |
114 | * clears and disables the event and forces a transition to the nominated work | |
115 | * state (acurrently executing work states will complete first). | |
116 | * | |
117 | * In such a situation, object->state remembers the state the machine should | |
118 | * have been in/gone to and returning NO_TRANSIT returns to that. | |
119 | */ | |
120 | static const struct fscache_transition fscache_osm_init_oob[] = { | |
121 | TRANSIT_TO(ABORT_INIT, | |
122 | (1 << FSCACHE_OBJECT_EV_ERROR) | | |
123 | (1 << FSCACHE_OBJECT_EV_KILL)), | |
124 | { 0, NULL } | |
36c95590 | 125 | }; |
caaef690 DH |
126 | |
127 | static const struct fscache_transition fscache_osm_lookup_oob[] = { | |
128 | TRANSIT_TO(LOOKUP_FAILURE, | |
129 | (1 << FSCACHE_OBJECT_EV_ERROR) | | |
130 | (1 << FSCACHE_OBJECT_EV_KILL)), | |
131 | { 0, NULL } | |
132 | }; | |
133 | ||
134 | static const struct fscache_transition fscache_osm_run_oob[] = { | |
135 | TRANSIT_TO(KILL_OBJECT, | |
136 | (1 << FSCACHE_OBJECT_EV_ERROR) | | |
137 | (1 << FSCACHE_OBJECT_EV_KILL)), | |
138 | { 0, NULL } | |
440f0aff DH |
139 | }; |
140 | ||
a18feb55 DH |
141 | static int fscache_get_object(struct fscache_object *, |
142 | enum fscache_obj_ref_trace); | |
143 | static void fscache_put_object(struct fscache_object *, | |
144 | enum fscache_obj_ref_trace); | |
caaef690 | 145 | static bool fscache_enqueue_dependents(struct fscache_object *, int); |
36c95590 | 146 | static void fscache_dequeue_object(struct fscache_object *); |
402cb8dd | 147 | static void fscache_update_aux_data(struct fscache_object *); |
36c95590 | 148 | |
36c95590 DH |
149 | /* |
150 | * we need to notify the parent when an op completes that we had outstanding | |
151 | * upon it | |
152 | */ | |
153 | static inline void fscache_done_parent_op(struct fscache_object *object) | |
154 | { | |
155 | struct fscache_object *parent = object->parent; | |
156 | ||
157 | _enter("OBJ%x {OBJ%x,%x}", | |
158 | object->debug_id, parent->debug_id, parent->n_ops); | |
159 | ||
160 | spin_lock_nested(&parent->lock, 1); | |
36c95590 | 161 | parent->n_obj_ops--; |
1362729b | 162 | parent->n_ops--; |
36c95590 DH |
163 | if (parent->n_ops == 0) |
164 | fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); | |
165 | spin_unlock(&parent->lock); | |
166 | } | |
167 | ||
ef778e7a | 168 | /* |
caaef690 | 169 | * Object state machine dispatcher. |
ef778e7a | 170 | */ |
caaef690 | 171 | static void fscache_object_sm_dispatcher(struct fscache_object *object) |
ef778e7a | 172 | { |
caaef690 DH |
173 | const struct fscache_transition *t; |
174 | const struct fscache_state *state, *new_state; | |
175 | unsigned long events, event_mask; | |
a18feb55 | 176 | bool oob; |
caaef690 | 177 | int event = -1; |
36c95590 DH |
178 | |
179 | ASSERT(object != NULL); | |
180 | ||
181 | _enter("{OBJ%x,%s,%lx}", | |
caaef690 DH |
182 | object->debug_id, object->state->name, object->events); |
183 | ||
184 | event_mask = object->event_mask; | |
185 | restart: | |
186 | object->event_mask = 0; /* Mask normal event handling */ | |
187 | state = object->state; | |
188 | restart_masked: | |
189 | events = object->events; | |
190 | ||
191 | /* Handle any out-of-band events (typically an error) */ | |
192 | if (events & object->oob_event_mask) { | |
193 | _debug("{OBJ%x} oob %lx", | |
194 | object->debug_id, events & object->oob_event_mask); | |
a18feb55 | 195 | oob = true; |
caaef690 DH |
196 | for (t = object->oob_table; t->events; t++) { |
197 | if (events & t->events) { | |
198 | state = t->transit_to; | |
199 | ASSERT(state->work != NULL); | |
200 | event = fls(events & t->events) - 1; | |
201 | __clear_bit(event, &object->oob_event_mask); | |
202 | clear_bit(event, &object->events); | |
203 | goto execute_work_state; | |
204 | } | |
d461d26d | 205 | } |
caaef690 | 206 | } |
a18feb55 | 207 | oob = false; |
36c95590 | 208 | |
caaef690 DH |
209 | /* Wait states are just transition tables */ |
210 | if (!state->work) { | |
211 | if (events & event_mask) { | |
212 | for (t = state->transitions; t->events; t++) { | |
213 | if (events & t->events) { | |
214 | new_state = t->transit_to; | |
215 | event = fls(events & t->events) - 1; | |
a18feb55 DH |
216 | trace_fscache_osm(object, state, |
217 | true, false, event); | |
caaef690 DH |
218 | clear_bit(event, &object->events); |
219 | _debug("{OBJ%x} ev %d: %s -> %s", | |
220 | object->debug_id, event, | |
221 | state->name, new_state->name); | |
222 | object->state = state = new_state; | |
223 | goto execute_work_state; | |
224 | } | |
225 | } | |
36c95590 | 226 | |
caaef690 DH |
227 | /* The event mask didn't include all the tabled bits */ |
228 | BUG(); | |
36c95590 | 229 | } |
caaef690 DH |
230 | /* Randomly woke up */ |
231 | goto unmask_events; | |
36c95590 DH |
232 | } |
233 | ||
caaef690 DH |
234 | execute_work_state: |
235 | _debug("{OBJ%x} exec %s", object->debug_id, state->name); | |
36c95590 | 236 | |
a18feb55 | 237 | trace_fscache_osm(object, state, false, oob, event); |
caaef690 DH |
238 | new_state = state->work(object, event); |
239 | event = -1; | |
240 | if (new_state == NO_TRANSIT) { | |
241 | _debug("{OBJ%x} %s notrans", object->debug_id, state->name); | |
e26bfebd DH |
242 | if (unlikely(state == STATE(OBJECT_DEAD))) { |
243 | _leave(" [dead]"); | |
244 | return; | |
245 | } | |
caaef690 DH |
246 | fscache_enqueue_object(object); |
247 | event_mask = object->oob_event_mask; | |
248 | goto unmask_events; | |
36c95590 DH |
249 | } |
250 | ||
caaef690 DH |
251 | _debug("{OBJ%x} %s -> %s", |
252 | object->debug_id, state->name, new_state->name); | |
253 | object->state = state = new_state; | |
36c95590 | 254 | |
caaef690 | 255 | if (state->work) { |
e26bfebd | 256 | if (unlikely(state == STATE(OBJECT_DEAD))) { |
caaef690 DH |
257 | _leave(" [dead]"); |
258 | return; | |
259 | } | |
260 | goto restart_masked; | |
261 | } | |
36c95590 | 262 | |
caaef690 DH |
263 | /* Transited to wait state */ |
264 | event_mask = object->oob_event_mask; | |
265 | for (t = state->transitions; t->events; t++) | |
266 | event_mask |= t->events; | |
267 | ||
268 | unmask_events: | |
269 | object->event_mask = event_mask; | |
270 | smp_mb(); | |
271 | events = object->events; | |
272 | if (events & event_mask) | |
273 | goto restart; | |
274 | _leave(" [msk %lx]", event_mask); | |
36c95590 DH |
275 | } |
276 | ||
277 | /* | |
278 | * execute an object | |
279 | */ | |
610be24e | 280 | static void fscache_object_work_func(struct work_struct *work) |
36c95590 DH |
281 | { |
282 | struct fscache_object *object = | |
283 | container_of(work, struct fscache_object, work); | |
284 | unsigned long start; | |
285 | ||
286 | _enter("{OBJ%x}", object->debug_id); | |
287 | ||
36c95590 | 288 | start = jiffies; |
caaef690 | 289 | fscache_object_sm_dispatcher(object); |
36c95590 | 290 | fscache_hist(fscache_objs_histogram, start); |
a18feb55 | 291 | fscache_put_object(object, fscache_obj_put_work); |
36c95590 | 292 | } |
610be24e DH |
293 | |
294 | /** | |
295 | * fscache_object_init - Initialise a cache object description | |
296 | * @object: Object description | |
297 | * @cookie: Cookie object will be attached to | |
298 | * @cache: Cache in which backing object will be found | |
299 | * | |
300 | * Initialise a cache object description to its basic values. | |
301 | * | |
302 | * See Documentation/filesystems/caching/backend-api.txt for a complete | |
303 | * description. | |
304 | */ | |
305 | void fscache_object_init(struct fscache_object *object, | |
306 | struct fscache_cookie *cookie, | |
307 | struct fscache_cache *cache) | |
308 | { | |
caaef690 DH |
309 | const struct fscache_transition *t; |
310 | ||
610be24e DH |
311 | atomic_inc(&cache->object_count); |
312 | ||
caaef690 DH |
313 | object->state = STATE(WAIT_FOR_INIT); |
314 | object->oob_table = fscache_osm_init_oob; | |
315 | object->flags = 1 << FSCACHE_OBJECT_IS_LIVE; | |
610be24e DH |
316 | spin_lock_init(&object->lock); |
317 | INIT_LIST_HEAD(&object->cache_link); | |
318 | INIT_HLIST_NODE(&object->cookie_link); | |
319 | INIT_WORK(&object->work, fscache_object_work_func); | |
320 | INIT_LIST_HEAD(&object->dependents); | |
321 | INIT_LIST_HEAD(&object->dep_link); | |
322 | INIT_LIST_HEAD(&object->pending_ops); | |
323 | object->n_children = 0; | |
324 | object->n_ops = object->n_in_progress = object->n_exclusive = 0; | |
caaef690 | 325 | object->events = 0; |
610be24e DH |
326 | object->store_limit = 0; |
327 | object->store_limit_l = 0; | |
328 | object->cache = cache; | |
329 | object->cookie = cookie; | |
f29507ce | 330 | fscache_cookie_get(cookie, fscache_cookie_get_attach_object); |
610be24e | 331 | object->parent = NULL; |
7026f192 DH |
332 | #ifdef CONFIG_FSCACHE_OBJECT_LIST |
333 | RB_CLEAR_NODE(&object->objlist_link); | |
334 | #endif | |
caaef690 DH |
335 | |
336 | object->oob_event_mask = 0; | |
337 | for (t = object->oob_table; t->events; t++) | |
338 | object->oob_event_mask |= t->events; | |
339 | object->event_mask = object->oob_event_mask; | |
340 | for (t = object->state->transitions; t->events; t++) | |
341 | object->event_mask |= t->events; | |
610be24e DH |
342 | } |
343 | EXPORT_SYMBOL(fscache_object_init); | |
440f0aff | 344 | |
f09b443d DH |
345 | /* |
346 | * Mark the object as no longer being live, making sure that we synchronise | |
347 | * against op submission. | |
348 | */ | |
349 | static inline void fscache_mark_object_dead(struct fscache_object *object) | |
350 | { | |
351 | spin_lock(&object->lock); | |
352 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | |
353 | spin_unlock(&object->lock); | |
354 | } | |
355 | ||
caaef690 DH |
356 | /* |
357 | * Abort object initialisation before we start it. | |
358 | */ | |
359 | static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, | |
360 | int event) | |
361 | { | |
caaef690 DH |
362 | _enter("{OBJ%x},%d", object->debug_id, event); |
363 | ||
364 | object->oob_event_mask = 0; | |
caaef690 | 365 | fscache_dequeue_object(object); |
caaef690 DH |
366 | return transit_to(KILL_OBJECT); |
367 | } | |
368 | ||
36c95590 DH |
369 | /* |
370 | * initialise an object | |
371 | * - check the specified object's parent to see if we can make use of it | |
372 | * immediately to do a creation | |
373 | * - we may need to start the process of creating a parent and we need to wait | |
374 | * for the parent's lookup and creation to complete if it's not there yet | |
36c95590 | 375 | */ |
caaef690 DH |
376 | static const struct fscache_state *fscache_initialise_object(struct fscache_object *object, |
377 | int event) | |
36c95590 DH |
378 | { |
379 | struct fscache_object *parent; | |
caaef690 | 380 | bool success; |
36c95590 | 381 | |
caaef690 | 382 | _enter("{OBJ%x},%d", object->debug_id, event); |
36c95590 | 383 | |
caaef690 | 384 | ASSERT(list_empty(&object->dep_link)); |
36c95590 DH |
385 | |
386 | parent = object->parent; | |
387 | if (!parent) { | |
caaef690 | 388 | _leave(" [no parent]"); |
1362729b | 389 | return transit_to(DROP_OBJECT); |
caaef690 | 390 | } |
36c95590 | 391 | |
1362729b | 392 | _debug("parent: %s of:%lx", parent->state->name, parent->flags); |
caaef690 DH |
393 | |
394 | if (fscache_object_is_dying(parent)) { | |
395 | _leave(" [bad parent]"); | |
1362729b | 396 | return transit_to(DROP_OBJECT); |
caaef690 DH |
397 | } |
398 | ||
399 | if (fscache_object_is_available(parent)) { | |
400 | _leave(" [ready]"); | |
401 | return transit_to(PARENT_READY); | |
402 | } | |
403 | ||
404 | _debug("wait"); | |
405 | ||
406 | spin_lock(&parent->lock); | |
407 | fscache_stat(&fscache_n_cop_grab_object); | |
408 | success = false; | |
409 | if (fscache_object_is_live(parent) && | |
a18feb55 | 410 | object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) { |
caaef690 DH |
411 | list_add(&object->dep_link, &parent->dependents); |
412 | success = true; | |
413 | } | |
414 | fscache_stat_d(&fscache_n_cop_grab_object); | |
415 | spin_unlock(&parent->lock); | |
416 | if (!success) { | |
417 | _leave(" [grab failed]"); | |
1362729b | 418 | return transit_to(DROP_OBJECT); |
36c95590 DH |
419 | } |
420 | ||
caaef690 DH |
421 | /* fscache_acquire_non_index_cookie() uses this |
422 | * to wake the chain up */ | |
423 | fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD); | |
424 | _leave(" [wait]"); | |
425 | return transit_to(WAIT_FOR_PARENT); | |
426 | } | |
427 | ||
428 | /* | |
429 | * Once the parent object is ready, we should kick off our lookup op. | |
430 | */ | |
431 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *object, | |
432 | int event) | |
433 | { | |
434 | struct fscache_object *parent = object->parent; | |
435 | ||
436 | _enter("{OBJ%x},%d", object->debug_id, event); | |
437 | ||
438 | ASSERT(parent != NULL); | |
439 | ||
440 | spin_lock(&parent->lock); | |
441 | parent->n_ops++; | |
442 | parent->n_obj_ops++; | |
443 | object->lookup_jif = jiffies; | |
444 | spin_unlock(&parent->lock); | |
445 | ||
36c95590 | 446 | _leave(""); |
caaef690 | 447 | return transit_to(LOOK_UP_OBJECT); |
36c95590 DH |
448 | } |
449 | ||
450 | /* | |
451 | * look an object up in the cache from which it was allocated | |
452 | * - we hold an "access lock" on the parent object, so the parent object cannot | |
453 | * be withdrawn by either party till we've finished | |
36c95590 | 454 | */ |
caaef690 DH |
455 | static const struct fscache_state *fscache_look_up_object(struct fscache_object *object, |
456 | int event) | |
36c95590 DH |
457 | { |
458 | struct fscache_cookie *cookie = object->cookie; | |
caaef690 | 459 | struct fscache_object *parent = object->parent; |
fee096de | 460 | int ret; |
36c95590 | 461 | |
caaef690 DH |
462 | _enter("{OBJ%x},%d", object->debug_id, event); |
463 | ||
464 | object->oob_table = fscache_osm_lookup_oob; | |
36c95590 | 465 | |
36c95590 DH |
466 | ASSERT(parent != NULL); |
467 | ASSERTCMP(parent->n_ops, >, 0); | |
468 | ASSERTCMP(parent->n_obj_ops, >, 0); | |
469 | ||
470 | /* make sure the parent is still available */ | |
493f7bc1 | 471 | ASSERT(fscache_object_is_available(parent)); |
36c95590 | 472 | |
493f7bc1 | 473 | if (fscache_object_is_dying(parent) || |
1362729b DH |
474 | test_bit(FSCACHE_IOERROR, &object->cache->flags) || |
475 | !fscache_use_cookie(object)) { | |
caaef690 DH |
476 | _leave(" [unavailable]"); |
477 | return transit_to(LOOKUP_FAILURE); | |
36c95590 DH |
478 | } |
479 | ||
1362729b DH |
480 | _debug("LOOKUP \"%s\" in \"%s\"", |
481 | cookie->def->name, object->cache->tag->name); | |
36c95590 DH |
482 | |
483 | fscache_stat(&fscache_n_object_lookups); | |
52bd75fd | 484 | fscache_stat(&fscache_n_cop_lookup_object); |
fee096de | 485 | ret = object->cache->ops->lookup_object(object); |
52bd75fd | 486 | fscache_stat_d(&fscache_n_cop_lookup_object); |
36c95590 | 487 | |
1362729b | 488 | fscache_unuse_cookie(object); |
36c95590 | 489 | |
fee096de DH |
490 | if (ret == -ETIMEDOUT) { |
491 | /* probably stuck behind another object, so move this one to | |
492 | * the back of the queue */ | |
493 | fscache_stat(&fscache_n_object_lookups_timed_out); | |
caaef690 DH |
494 | _leave(" [timeout]"); |
495 | return NO_TRANSIT; | |
fee096de DH |
496 | } |
497 | ||
caaef690 DH |
498 | if (ret < 0) { |
499 | _leave(" [error]"); | |
500 | return transit_to(LOOKUP_FAILURE); | |
501 | } | |
502 | ||
503 | _leave(" [ok]"); | |
504 | return transit_to(OBJECT_AVAILABLE); | |
36c95590 DH |
505 | } |
506 | ||
507 | /** | |
508 | * fscache_object_lookup_negative - Note negative cookie lookup | |
509 | * @object: Object pointing to cookie to mark | |
510 | * | |
511 | * Note negative lookup, permitting those waiting to read data from an already | |
512 | * existing backing object to continue as there's no data for them to read. | |
513 | */ | |
514 | void fscache_object_lookup_negative(struct fscache_object *object) | |
515 | { | |
516 | struct fscache_cookie *cookie = object->cookie; | |
517 | ||
caaef690 | 518 | _enter("{OBJ%x,%s}", object->debug_id, object->state->name); |
36c95590 | 519 | |
caaef690 | 520 | if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { |
36c95590 DH |
521 | fscache_stat(&fscache_n_object_lookups_negative); |
522 | ||
caaef690 DH |
523 | /* Allow write requests to begin stacking up and read requests to begin |
524 | * returning ENODATA. | |
525 | */ | |
36c95590 | 526 | set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); |
94d30ae9 | 527 | clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); |
36c95590 DH |
528 | |
529 | _debug("wake up lookup %p", &cookie->flags); | |
caaef690 | 530 | clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); |
36c95590 | 531 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); |
36c95590 | 532 | } |
36c95590 DH |
533 | _leave(""); |
534 | } | |
535 | EXPORT_SYMBOL(fscache_object_lookup_negative); | |
536 | ||
537 | /** | |
538 | * fscache_obtained_object - Note successful object lookup or creation | |
539 | * @object: Object pointing to cookie to mark | |
540 | * | |
541 | * Note successful lookup and/or creation, permitting those waiting to write | |
542 | * data to a backing object to continue. | |
543 | * | |
544 | * Note that after calling this, an object's cookie may be relinquished by the | |
545 | * netfs, and so must be accessed with object lock held. | |
546 | */ | |
547 | void fscache_obtained_object(struct fscache_object *object) | |
548 | { | |
549 | struct fscache_cookie *cookie = object->cookie; | |
550 | ||
caaef690 | 551 | _enter("{OBJ%x,%s}", object->debug_id, object->state->name); |
36c95590 DH |
552 | |
553 | /* if we were still looking up, then we must have a positive lookup | |
554 | * result, in which case there may be data available */ | |
caaef690 | 555 | if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { |
36c95590 DH |
556 | fscache_stat(&fscache_n_object_lookups_positive); |
557 | ||
caaef690 DH |
558 | /* We do (presumably) have data */ |
559 | clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | |
94d30ae9 | 560 | clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); |
36c95590 | 561 | |
caaef690 DH |
562 | /* Allow write requests to begin stacking up and read requests |
563 | * to begin shovelling data. | |
564 | */ | |
565 | clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); | |
36c95590 | 566 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); |
36c95590 | 567 | } else { |
36c95590 | 568 | fscache_stat(&fscache_n_object_created); |
36c95590 DH |
569 | } |
570 | ||
caaef690 | 571 | set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); |
36c95590 DH |
572 | _leave(""); |
573 | } | |
574 | EXPORT_SYMBOL(fscache_obtained_object); | |
575 | ||
576 | /* | |
577 | * handle an object that has just become available | |
578 | */ | |
caaef690 DH |
579 | static const struct fscache_state *fscache_object_available(struct fscache_object *object, |
580 | int event) | |
36c95590 | 581 | { |
caaef690 DH |
582 | _enter("{OBJ%x},%d", object->debug_id, event); |
583 | ||
584 | object->oob_table = fscache_osm_run_oob; | |
36c95590 DH |
585 | |
586 | spin_lock(&object->lock); | |
587 | ||
36c95590 DH |
588 | fscache_done_parent_op(object); |
589 | if (object->n_in_progress == 0) { | |
590 | if (object->n_ops > 0) { | |
591 | ASSERTCMP(object->n_ops, >=, object->n_obj_ops); | |
36c95590 DH |
592 | fscache_start_operations(object); |
593 | } else { | |
594 | ASSERT(list_empty(&object->pending_ops)); | |
595 | } | |
596 | } | |
597 | spin_unlock(&object->lock); | |
598 | ||
52bd75fd | 599 | fscache_stat(&fscache_n_cop_lookup_complete); |
36c95590 | 600 | object->cache->ops->lookup_complete(object); |
52bd75fd | 601 | fscache_stat_d(&fscache_n_cop_lookup_complete); |
36c95590 DH |
602 | |
603 | fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); | |
604 | fscache_stat(&fscache_n_object_avail); | |
605 | ||
606 | _leave(""); | |
caaef690 | 607 | return transit_to(JUMPSTART_DEPS); |
36c95590 DH |
608 | } |
609 | ||
610 | /* | |
caaef690 | 611 | * Wake up this object's dependent objects now that we've become available. |
36c95590 | 612 | */ |
caaef690 DH |
613 | static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object, |
614 | int event) | |
36c95590 | 615 | { |
caaef690 | 616 | _enter("{OBJ%x},%d", object->debug_id, event); |
36c95590 | 617 | |
caaef690 DH |
618 | if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY)) |
619 | return NO_TRANSIT; /* Not finished; requeue */ | |
620 | return transit_to(WAIT_FOR_CMD); | |
621 | } | |
36c95590 | 622 | |
caaef690 DH |
623 | /* |
624 | * Handle lookup or creation failute. | |
625 | */ | |
626 | static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object, | |
627 | int event) | |
628 | { | |
629 | struct fscache_cookie *cookie; | |
6897e3df | 630 | |
caaef690 | 631 | _enter("{OBJ%x},%d", object->debug_id, event); |
36c95590 | 632 | |
caaef690 | 633 | object->oob_event_mask = 0; |
36c95590 | 634 | |
caaef690 DH |
635 | fscache_stat(&fscache_n_cop_lookup_complete); |
636 | object->cache->ops->lookup_complete(object); | |
637 | fscache_stat_d(&fscache_n_cop_lookup_complete); | |
36c95590 | 638 | |
6515d1db DH |
639 | set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags); |
640 | ||
caaef690 DH |
641 | cookie = object->cookie; |
642 | set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); | |
1362729b | 643 | if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) |
caaef690 | 644 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); |
36c95590 | 645 | |
caaef690 DH |
646 | fscache_done_parent_op(object); |
647 | return transit_to(KILL_OBJECT); | |
648 | } | |
649 | ||
650 | /* | |
651 | * Wait for completion of all active operations on this object and the death of | |
652 | * all child objects of this object. | |
653 | */ | |
654 | static const struct fscache_state *fscache_kill_object(struct fscache_object *object, | |
655 | int event) | |
656 | { | |
657 | _enter("{OBJ%x,%d,%d},%d", | |
658 | object->debug_id, object->n_ops, object->n_children, event); | |
659 | ||
f09b443d | 660 | fscache_mark_object_dead(object); |
1362729b | 661 | object->oob_event_mask = 0; |
caaef690 | 662 | |
6bdded59 DH |
663 | if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { |
664 | /* Reject any new read/write ops and abort any that are pending. */ | |
665 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | |
666 | fscache_cancel_all_ops(object); | |
667 | } | |
668 | ||
caaef690 DH |
669 | if (list_empty(&object->dependents) && |
670 | object->n_ops == 0 && | |
671 | object->n_children == 0) | |
1362729b | 672 | return transit_to(DROP_OBJECT); |
caaef690 | 673 | |
1362729b DH |
674 | if (object->n_in_progress == 0) { |
675 | spin_lock(&object->lock); | |
676 | if (object->n_ops > 0 && object->n_in_progress == 0) | |
677 | fscache_start_operations(object); | |
678 | spin_unlock(&object->lock); | |
679 | } | |
caaef690 DH |
680 | |
681 | if (!list_empty(&object->dependents)) | |
682 | return transit_to(KILL_DEPENDENTS); | |
683 | ||
684 | return transit_to(WAIT_FOR_CLEARANCE); | |
36c95590 DH |
685 | } |
686 | ||
687 | /* | |
caaef690 | 688 | * Kill dependent objects. |
36c95590 | 689 | */ |
caaef690 DH |
690 | static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object, |
691 | int event) | |
36c95590 | 692 | { |
caaef690 | 693 | _enter("{OBJ%x},%d", object->debug_id, event); |
36c95590 | 694 | |
caaef690 DH |
695 | if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL)) |
696 | return NO_TRANSIT; /* Not finished */ | |
697 | return transit_to(WAIT_FOR_CLEARANCE); | |
36c95590 DH |
698 | } |
699 | ||
36c95590 | 700 | /* |
caaef690 DH |
701 | * Drop an object's attachments |
702 | */ | |
703 | static const struct fscache_state *fscache_drop_object(struct fscache_object *object, | |
704 | int event) | |
36c95590 | 705 | { |
caaef690 | 706 | struct fscache_object *parent = object->parent; |
1362729b | 707 | struct fscache_cookie *cookie = object->cookie; |
caaef690 | 708 | struct fscache_cache *cache = object->cache; |
1362729b | 709 | bool awaken = false; |
36c95590 | 710 | |
caaef690 | 711 | _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event); |
36c95590 | 712 | |
1362729b DH |
713 | ASSERT(cookie != NULL); |
714 | ASSERT(!hlist_unhashed(&object->cookie_link)); | |
715 | ||
402cb8dd DH |
716 | if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) { |
717 | _debug("final update"); | |
718 | fscache_update_aux_data(object); | |
719 | } | |
720 | ||
1362729b DH |
721 | /* Make sure the cookie no longer points here and that the netfs isn't |
722 | * waiting for us. | |
723 | */ | |
724 | spin_lock(&cookie->lock); | |
725 | hlist_del_init(&object->cookie_link); | |
94d30ae9 DH |
726 | if (hlist_empty(&cookie->backing_objects) && |
727 | test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) | |
1362729b DH |
728 | awaken = true; |
729 | spin_unlock(&cookie->lock); | |
730 | ||
731 | if (awaken) | |
732 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); | |
caaef690 DH |
733 | |
734 | /* Prevent a race with our last child, which has to signal EV_CLEARED | |
735 | * before dropping our spinlock. | |
736 | */ | |
36c95590 | 737 | spin_lock(&object->lock); |
36c95590 DH |
738 | spin_unlock(&object->lock); |
739 | ||
caaef690 DH |
740 | /* Discard from the cache's collection of objects */ |
741 | spin_lock(&cache->object_list_lock); | |
742 | list_del_init(&object->cache_link); | |
743 | spin_unlock(&cache->object_list_lock); | |
744 | ||
745 | fscache_stat(&fscache_n_cop_drop_object); | |
746 | cache->ops->drop_object(object); | |
747 | fscache_stat_d(&fscache_n_cop_drop_object); | |
748 | ||
749 | /* The parent object wants to know when all it dependents have gone */ | |
750 | if (parent) { | |
751 | _debug("release parent OBJ%x {%d}", | |
752 | parent->debug_id, parent->n_children); | |
753 | ||
754 | spin_lock(&parent->lock); | |
755 | parent->n_children--; | |
756 | if (parent->n_children == 0) | |
757 | fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); | |
758 | spin_unlock(&parent->lock); | |
759 | object->parent = NULL; | |
760 | } | |
761 | ||
762 | /* this just shifts the object release to the work processor */ | |
a18feb55 | 763 | fscache_put_object(object, fscache_obj_put_drop_obj); |
caaef690 | 764 | fscache_stat(&fscache_n_object_dead); |
36c95590 DH |
765 | |
766 | _leave(""); | |
caaef690 | 767 | return transit_to(OBJECT_DEAD); |
36c95590 DH |
768 | } |
769 | ||
770 | /* | |
8b8edefa | 771 | * get a ref on an object |
36c95590 | 772 | */ |
a18feb55 DH |
773 | static int fscache_get_object(struct fscache_object *object, |
774 | enum fscache_obj_ref_trace why) | |
36c95590 | 775 | { |
52bd75fd | 776 | int ret; |
36c95590 | 777 | |
52bd75fd | 778 | fscache_stat(&fscache_n_cop_grab_object); |
a18feb55 | 779 | ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN; |
52bd75fd DH |
780 | fscache_stat_d(&fscache_n_cop_grab_object); |
781 | return ret; | |
36c95590 DH |
782 | } |
783 | ||
784 | /* | |
caaef690 | 785 | * Discard a ref on an object |
36c95590 | 786 | */ |
a18feb55 DH |
787 | static void fscache_put_object(struct fscache_object *object, |
788 | enum fscache_obj_ref_trace why) | |
36c95590 | 789 | { |
52bd75fd | 790 | fscache_stat(&fscache_n_cop_put_object); |
a18feb55 | 791 | object->cache->ops->put_object(object, why); |
52bd75fd | 792 | fscache_stat_d(&fscache_n_cop_put_object); |
36c95590 DH |
793 | } |
794 | ||
1362729b DH |
795 | /** |
796 | * fscache_object_destroy - Note that a cache object is about to be destroyed | |
797 | * @object: The object to be destroyed | |
798 | * | |
799 | * Note the imminent destruction and deallocation of a cache object record. | |
800 | */ | |
801 | void fscache_object_destroy(struct fscache_object *object) | |
802 | { | |
803 | fscache_objlist_remove(object); | |
804 | ||
805 | /* We can get rid of the cookie now */ | |
a18feb55 | 806 | fscache_cookie_put(object->cookie, fscache_cookie_put_object); |
1362729b DH |
807 | object->cookie = NULL; |
808 | } | |
809 | EXPORT_SYMBOL(fscache_object_destroy); | |
810 | ||
36c95590 DH |
811 | /* |
812 | * enqueue an object for metadata-type processing | |
813 | */ | |
814 | void fscache_enqueue_object(struct fscache_object *object) | |
815 | { | |
816 | _enter("{OBJ%x}", object->debug_id); | |
817 | ||
a18feb55 | 818 | if (fscache_get_object(object, fscache_obj_get_queue) >= 0) { |
8b8edefa TH |
819 | wait_queue_head_t *cong_wq = |
820 | &get_cpu_var(fscache_object_cong_wait); | |
821 | ||
822 | if (queue_work(fscache_object_wq, &object->work)) { | |
823 | if (fscache_object_congested()) | |
824 | wake_up(cong_wq); | |
825 | } else | |
a18feb55 | 826 | fscache_put_object(object, fscache_obj_put_queue); |
8b8edefa TH |
827 | |
828 | put_cpu_var(fscache_object_cong_wait); | |
829 | } | |
830 | } | |
831 | ||
832 | /** | |
833 | * fscache_object_sleep_till_congested - Sleep until object wq is congested | |
caaef690 | 834 | * @timeoutp: Scheduler sleep timeout |
8b8edefa TH |
835 | * |
836 | * Allow an object handler to sleep until the object workqueue is congested. | |
837 | * | |
838 | * The caller must set up a wake up event before calling this and must have set | |
839 | * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own | |
840 | * condition before calling this function as no test is made here. | |
841 | * | |
842 | * %true is returned if the object wq is congested, %false otherwise. | |
843 | */ | |
844 | bool fscache_object_sleep_till_congested(signed long *timeoutp) | |
845 | { | |
170d800a | 846 | wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait); |
8b8edefa TH |
847 | DEFINE_WAIT(wait); |
848 | ||
849 | if (fscache_object_congested()) | |
850 | return true; | |
851 | ||
852 | add_wait_queue_exclusive(cong_wq, &wait); | |
853 | if (!fscache_object_congested()) | |
854 | *timeoutp = schedule_timeout(*timeoutp); | |
855 | finish_wait(cong_wq, &wait); | |
856 | ||
857 | return fscache_object_congested(); | |
36c95590 | 858 | } |
8b8edefa | 859 | EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested); |
36c95590 DH |
860 | |
861 | /* | |
caaef690 DH |
862 | * Enqueue the dependents of an object for metadata-type processing. |
863 | * | |
864 | * If we don't manage to finish the list before the scheduler wants to run | |
865 | * again then return false immediately. We return true if the list was | |
866 | * cleared. | |
36c95590 | 867 | */ |
caaef690 | 868 | static bool fscache_enqueue_dependents(struct fscache_object *object, int event) |
36c95590 DH |
869 | { |
870 | struct fscache_object *dep; | |
caaef690 | 871 | bool ret = true; |
36c95590 DH |
872 | |
873 | _enter("{OBJ%x}", object->debug_id); | |
874 | ||
875 | if (list_empty(&object->dependents)) | |
caaef690 | 876 | return true; |
36c95590 DH |
877 | |
878 | spin_lock(&object->lock); | |
879 | ||
880 | while (!list_empty(&object->dependents)) { | |
881 | dep = list_entry(object->dependents.next, | |
882 | struct fscache_object, dep_link); | |
883 | list_del_init(&dep->dep_link); | |
884 | ||
caaef690 | 885 | fscache_raise_event(dep, event); |
a18feb55 | 886 | fscache_put_object(dep, fscache_obj_put_enq_dep); |
36c95590 | 887 | |
caaef690 DH |
888 | if (!list_empty(&object->dependents) && need_resched()) { |
889 | ret = false; | |
890 | break; | |
891 | } | |
36c95590 DH |
892 | } |
893 | ||
894 | spin_unlock(&object->lock); | |
caaef690 | 895 | return ret; |
36c95590 DH |
896 | } |
897 | ||
898 | /* | |
899 | * remove an object from whatever queue it's waiting on | |
36c95590 | 900 | */ |
caaef690 | 901 | static void fscache_dequeue_object(struct fscache_object *object) |
36c95590 DH |
902 | { |
903 | _enter("{OBJ%x}", object->debug_id); | |
904 | ||
905 | if (!list_empty(&object->dep_link)) { | |
906 | spin_lock(&object->parent->lock); | |
907 | list_del_init(&object->dep_link); | |
908 | spin_unlock(&object->parent->lock); | |
909 | } | |
910 | ||
911 | _leave(""); | |
912 | } | |
913 | ||
914 | /** | |
915 | * fscache_check_aux - Ask the netfs whether an object on disk is still valid | |
916 | * @object: The object to ask about | |
917 | * @data: The auxiliary data for the object | |
918 | * @datalen: The size of the auxiliary data | |
919 | * | |
1362729b DH |
920 | * This function consults the netfs about the coherency state of an object. |
921 | * The caller must be holding a ref on cookie->n_active (held by | |
922 | * fscache_look_up_object() on behalf of the cache backend during object lookup | |
923 | * and creation). | |
36c95590 DH |
924 | */ |
925 | enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | |
ee1235a9 DH |
926 | const void *data, uint16_t datalen, |
927 | loff_t object_size) | |
36c95590 DH |
928 | { |
929 | enum fscache_checkaux result; | |
930 | ||
931 | if (!object->cookie->def->check_aux) { | |
932 | fscache_stat(&fscache_n_checkaux_none); | |
933 | return FSCACHE_CHECKAUX_OKAY; | |
934 | } | |
935 | ||
936 | result = object->cookie->def->check_aux(object->cookie->netfs_data, | |
ee1235a9 | 937 | data, datalen, object_size); |
36c95590 DH |
938 | switch (result) { |
939 | /* entry okay as is */ | |
940 | case FSCACHE_CHECKAUX_OKAY: | |
941 | fscache_stat(&fscache_n_checkaux_okay); | |
942 | break; | |
943 | ||
944 | /* entry requires update */ | |
945 | case FSCACHE_CHECKAUX_NEEDS_UPDATE: | |
946 | fscache_stat(&fscache_n_checkaux_update); | |
947 | break; | |
948 | ||
949 | /* entry requires deletion */ | |
950 | case FSCACHE_CHECKAUX_OBSOLETE: | |
951 | fscache_stat(&fscache_n_checkaux_obsolete); | |
952 | break; | |
953 | ||
954 | default: | |
955 | BUG(); | |
956 | } | |
957 | ||
958 | return result; | |
959 | } | |
960 | EXPORT_SYMBOL(fscache_check_aux); | |
ef778e7a DH |
961 | |
962 | /* | |
963 | * Asynchronously invalidate an object. | |
964 | */ | |
caaef690 DH |
965 | static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object, |
966 | int event) | |
ef778e7a DH |
967 | { |
968 | struct fscache_operation *op; | |
969 | struct fscache_cookie *cookie = object->cookie; | |
970 | ||
caaef690 DH |
971 | _enter("{OBJ%x},%d", object->debug_id, event); |
972 | ||
1362729b DH |
973 | /* We're going to need the cookie. If the cookie is not available then |
974 | * retire the object instead. | |
975 | */ | |
976 | if (!fscache_use_cookie(object)) { | |
e5a95541 | 977 | ASSERT(radix_tree_empty(&object->cookie->stores)); |
94d30ae9 | 978 | set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); |
1362729b DH |
979 | _leave(" [no cookie]"); |
980 | return transit_to(KILL_OBJECT); | |
981 | } | |
ef778e7a DH |
982 | |
983 | /* Reject any new read/write ops and abort any that are pending. */ | |
984 | fscache_invalidate_writes(cookie); | |
985 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | |
986 | fscache_cancel_all_ops(object); | |
987 | ||
988 | /* Now we have to wait for in-progress reads and writes */ | |
989 | op = kzalloc(sizeof(*op), GFP_KERNEL); | |
1362729b DH |
990 | if (!op) |
991 | goto nomem; | |
ef778e7a | 992 | |
08c2e3d0 | 993 | fscache_operation_init(cookie, op, object->cache->ops->invalidate_object, |
d3b97ca4 | 994 | NULL, NULL); |
1362729b DH |
995 | op->flags = FSCACHE_OP_ASYNC | |
996 | (1 << FSCACHE_OP_EXCLUSIVE) | | |
997 | (1 << FSCACHE_OP_UNUSE_COOKIE); | |
08c2e3d0 | 998 | trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate); |
ef778e7a DH |
999 | |
1000 | spin_lock(&cookie->lock); | |
1001 | if (fscache_submit_exclusive_op(object, op) < 0) | |
8d76349d | 1002 | goto submit_op_failed; |
ef778e7a DH |
1003 | spin_unlock(&cookie->lock); |
1004 | fscache_put_operation(op); | |
1005 | ||
1006 | /* Once we've completed the invalidation, we know there will be no data | |
1007 | * stored in the cache and thus we can reinstate the data-check-skip | |
1008 | * optimisation. | |
1009 | */ | |
1010 | set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | |
1011 | ||
1012 | /* We can allow read and write requests to come in once again. They'll | |
1013 | * queue up behind our exclusive invalidation operation. | |
1014 | */ | |
caaef690 DH |
1015 | if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) |
1016 | wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); | |
1017 | _leave(" [ok]"); | |
1018 | return transit_to(UPDATE_OBJECT); | |
8d76349d | 1019 | |
1362729b | 1020 | nomem: |
f09b443d | 1021 | fscache_mark_object_dead(object); |
1362729b DH |
1022 | fscache_unuse_cookie(object); |
1023 | _leave(" [ENOMEM]"); | |
1024 | return transit_to(KILL_OBJECT); | |
1025 | ||
8d76349d | 1026 | submit_op_failed: |
f09b443d | 1027 | fscache_mark_object_dead(object); |
8d76349d | 1028 | spin_unlock(&cookie->lock); |
920bce20 | 1029 | fscache_unuse_cookie(object); |
8d76349d | 1030 | kfree(op); |
8d76349d | 1031 | _leave(" [EIO]"); |
caaef690 DH |
1032 | return transit_to(KILL_OBJECT); |
1033 | } | |
1034 | ||
1035 | static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object, | |
1036 | int event) | |
1037 | { | |
1038 | const struct fscache_state *s; | |
1039 | ||
1040 | fscache_stat(&fscache_n_invalidates_run); | |
1041 | fscache_stat(&fscache_n_cop_invalidate_object); | |
1042 | s = _fscache_invalidate_object(object, event); | |
1043 | fscache_stat_d(&fscache_n_cop_invalidate_object); | |
1044 | return s; | |
1045 | } | |
1046 | ||
402cb8dd DH |
1047 | /* |
1048 | * Update auxiliary data. | |
1049 | */ | |
1050 | static void fscache_update_aux_data(struct fscache_object *object) | |
1051 | { | |
1052 | fscache_stat(&fscache_n_updates_run); | |
1053 | fscache_stat(&fscache_n_cop_update_object); | |
1054 | object->cache->ops->update_object(object); | |
1055 | fscache_stat_d(&fscache_n_cop_update_object); | |
1056 | } | |
1057 | ||
caaef690 DH |
1058 | /* |
1059 | * Asynchronously update an object. | |
1060 | */ | |
1061 | static const struct fscache_state *fscache_update_object(struct fscache_object *object, | |
1062 | int event) | |
1063 | { | |
1064 | _enter("{OBJ%x},%d", object->debug_id, event); | |
1065 | ||
402cb8dd | 1066 | fscache_update_aux_data(object); |
caaef690 DH |
1067 | |
1068 | _leave(""); | |
1069 | return transit_to(WAIT_FOR_CMD); | |
ef778e7a | 1070 | } |
182d919b DH |
1071 | |
1072 | /** | |
1073 | * fscache_object_retrying_stale - Note retrying stale object | |
1074 | * @object: The object that will be retried | |
1075 | * | |
1076 | * Note that an object lookup found an on-disk object that was adjudged to be | |
1077 | * stale and has been deleted. The lookup will be retried. | |
1078 | */ | |
1079 | void fscache_object_retrying_stale(struct fscache_object *object) | |
1080 | { | |
1081 | fscache_stat(&fscache_n_cache_no_space_reject); | |
1082 | } | |
1083 | EXPORT_SYMBOL(fscache_object_retrying_stale); | |
1084 | ||
1085 | /** | |
1086 | * fscache_object_mark_killed - Note that an object was killed | |
1087 | * @object: The object that was culled | |
1088 | * @why: The reason the object was killed. | |
1089 | * | |
1090 | * Note that an object was killed. Returns true if the object was | |
1091 | * already marked killed, false if it wasn't. | |
1092 | */ | |
1093 | void fscache_object_mark_killed(struct fscache_object *object, | |
1094 | enum fscache_why_object_killed why) | |
1095 | { | |
1096 | if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) { | |
1097 | pr_err("Error: Object already killed by cache [%s]\n", | |
1098 | object->cache->identifier); | |
1099 | return; | |
1100 | } | |
1101 | ||
1102 | switch (why) { | |
1103 | case FSCACHE_OBJECT_NO_SPACE: | |
1104 | fscache_stat(&fscache_n_cache_no_space_reject); | |
1105 | break; | |
1106 | case FSCACHE_OBJECT_IS_STALE: | |
1107 | fscache_stat(&fscache_n_cache_stale_objects); | |
1108 | break; | |
1109 | case FSCACHE_OBJECT_WAS_RETIRED: | |
1110 | fscache_stat(&fscache_n_cache_retired_objects); | |
1111 | break; | |
1112 | case FSCACHE_OBJECT_WAS_CULLED: | |
1113 | fscache_stat(&fscache_n_cache_culled_objects); | |
1114 | break; | |
1115 | } | |
1116 | } | |
1117 | EXPORT_SYMBOL(fscache_object_mark_killed); | |
e26bfebd DH |
1118 | |
1119 | /* | |
1120 | * The object is dead. We can get here if an object gets queued by an event | |
1121 | * that would lead to its death (such as EV_KILL) when the dispatcher is | |
1122 | * already running (and so can be requeued) but hasn't yet cleared the event | |
1123 | * mask. | |
1124 | */ | |
1125 | static const struct fscache_state *fscache_object_dead(struct fscache_object *object, | |
1126 | int event) | |
1127 | { | |
1128 | if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, | |
1129 | &object->flags)) | |
1130 | return NO_TRANSIT; | |
1131 | ||
1132 | WARN(true, "FS-Cache object redispatched after death"); | |
1133 | return NO_TRANSIT; | |
1134 | } |