]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
5 | */ | |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
8 | #include "xfs_shared.h" | |
9 | #include "xfs_format.h" | |
10 | #include "xfs_log_format.h" | |
11 | #include "xfs_trans_resv.h" | |
12 | #include "xfs_mount.h" | |
13 | #include "xfs_inode.h" | |
14 | #include "xfs_trans.h" | |
15 | #include "xfs_buf_item.h" | |
16 | #include "xfs_trans_priv.h" | |
17 | #include "xfs_error.h" | |
18 | #include "xfs_trace.h" | |
19 | ||
20 | /* | |
21 | * Check to see if a buffer matching the given parameters is already | |
22 | * a part of the given transaction. | |
23 | */ | |
24 | STATIC struct xfs_buf * | |
25 | xfs_trans_buf_item_match( | |
26 | struct xfs_trans *tp, | |
27 | struct xfs_buftarg *target, | |
28 | struct xfs_buf_map *map, | |
29 | int nmaps) | |
30 | { | |
31 | struct xfs_log_item *lip; | |
32 | struct xfs_buf_log_item *blip; | |
33 | int len = 0; | |
34 | int i; | |
35 | ||
36 | for (i = 0; i < nmaps; i++) | |
37 | len += map[i].bm_len; | |
38 | ||
39 | list_for_each_entry(lip, &tp->t_items, li_trans) { | |
40 | blip = (struct xfs_buf_log_item *)lip; | |
41 | if (blip->bli_item.li_type == XFS_LI_BUF && | |
42 | blip->bli_buf->b_target == target && | |
43 | XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn && | |
44 | blip->bli_buf->b_length == len) { | |
45 | ASSERT(blip->bli_buf->b_map_count == nmaps); | |
46 | return blip->bli_buf; | |
47 | } | |
48 | } | |
49 | ||
50 | return NULL; | |
51 | } | |
52 | ||
53 | /* | |
54 | * Add the locked buffer to the transaction. | |
55 | * | |
56 | * The buffer must be locked, and it cannot be associated with any | |
57 | * transaction. | |
58 | * | |
59 | * If the buffer does not yet have a buf log item associated with it, | |
60 | * then allocate one for it. Then add the buf item to the transaction. | |
61 | */ | |
62 | STATIC void | |
63 | _xfs_trans_bjoin( | |
64 | struct xfs_trans *tp, | |
65 | struct xfs_buf *bp, | |
66 | int reset_recur) | |
67 | { | |
68 | struct xfs_buf_log_item *bip; | |
69 | ||
70 | ASSERT(bp->b_transp == NULL); | |
71 | ||
72 | /* | |
73 | * The xfs_buf_log_item pointer is stored in b_log_item. If | |
74 | * it doesn't have one yet, then allocate one and initialize it. | |
75 | * The checks to see if one is there are in xfs_buf_item_init(). | |
76 | */ | |
77 | xfs_buf_item_init(bp, tp->t_mountp); | |
78 | bip = bp->b_log_item; | |
79 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | |
80 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); | |
81 | ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); | |
82 | if (reset_recur) | |
83 | bip->bli_recur = 0; | |
84 | ||
85 | /* | |
86 | * Take a reference for this transaction on the buf item. | |
87 | */ | |
88 | atomic_inc(&bip->bli_refcount); | |
89 | ||
90 | /* | |
91 | * Attach the item to the transaction so we can find it in | |
92 | * xfs_trans_get_buf() and friends. | |
93 | */ | |
94 | xfs_trans_add_item(tp, &bip->bli_item); | |
95 | bp->b_transp = tp; | |
96 | ||
97 | } | |
98 | ||
99 | void | |
100 | xfs_trans_bjoin( | |
101 | struct xfs_trans *tp, | |
102 | struct xfs_buf *bp) | |
103 | { | |
104 | _xfs_trans_bjoin(tp, bp, 0); | |
105 | trace_xfs_trans_bjoin(bp->b_log_item); | |
106 | } | |
107 | ||
108 | /* | |
109 | * Get and lock the buffer for the caller if it is not already | |
110 | * locked within the given transaction. If it is already locked | |
111 | * within the transaction, just increment its lock recursion count | |
112 | * and return a pointer to it. | |
113 | * | |
114 | * If the transaction pointer is NULL, make this just a normal | |
115 | * get_buf() call. | |
116 | */ | |
117 | struct xfs_buf * | |
118 | xfs_trans_get_buf_map( | |
119 | struct xfs_trans *tp, | |
120 | struct xfs_buftarg *target, | |
121 | struct xfs_buf_map *map, | |
122 | int nmaps, | |
123 | xfs_buf_flags_t flags) | |
124 | { | |
125 | xfs_buf_t *bp; | |
126 | struct xfs_buf_log_item *bip; | |
127 | ||
128 | if (!tp) | |
129 | return xfs_buf_get_map(target, map, nmaps, flags); | |
130 | ||
131 | /* | |
132 | * If we find the buffer in the cache with this transaction | |
133 | * pointer in its b_fsprivate2 field, then we know we already | |
134 | * have it locked. In this case we just increment the lock | |
135 | * recursion count and return the buffer to the caller. | |
136 | */ | |
137 | bp = xfs_trans_buf_item_match(tp, target, map, nmaps); | |
138 | if (bp != NULL) { | |
139 | ASSERT(xfs_buf_islocked(bp)); | |
140 | if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { | |
141 | xfs_buf_stale(bp); | |
142 | bp->b_flags |= XBF_DONE; | |
143 | } | |
144 | ||
145 | ASSERT(bp->b_transp == tp); | |
146 | bip = bp->b_log_item; | |
147 | ASSERT(bip != NULL); | |
148 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
149 | bip->bli_recur++; | |
150 | trace_xfs_trans_get_buf_recur(bip); | |
151 | return bp; | |
152 | } | |
153 | ||
154 | bp = xfs_buf_get_map(target, map, nmaps, flags); | |
155 | if (bp == NULL) { | |
156 | return NULL; | |
157 | } | |
158 | ||
159 | ASSERT(!bp->b_error); | |
160 | ||
161 | _xfs_trans_bjoin(tp, bp, 1); | |
162 | trace_xfs_trans_get_buf(bp->b_log_item); | |
163 | return bp; | |
164 | } | |
165 | ||
166 | /* | |
167 | * Get and lock the superblock buffer of this file system for the | |
168 | * given transaction. | |
169 | * | |
170 | * We don't need to use incore_match() here, because the superblock | |
171 | * buffer is a private buffer which we keep a pointer to in the | |
172 | * mount structure. | |
173 | */ | |
174 | xfs_buf_t * | |
175 | xfs_trans_getsb( | |
176 | xfs_trans_t *tp, | |
177 | struct xfs_mount *mp, | |
178 | int flags) | |
179 | { | |
180 | xfs_buf_t *bp; | |
181 | struct xfs_buf_log_item *bip; | |
182 | ||
183 | /* | |
184 | * Default to just trying to lock the superblock buffer | |
185 | * if tp is NULL. | |
186 | */ | |
187 | if (tp == NULL) | |
188 | return xfs_getsb(mp, flags); | |
189 | ||
190 | /* | |
191 | * If the superblock buffer already has this transaction | |
192 | * pointer in its b_fsprivate2 field, then we know we already | |
193 | * have it locked. In this case we just increment the lock | |
194 | * recursion count and return the buffer to the caller. | |
195 | */ | |
196 | bp = mp->m_sb_bp; | |
197 | if (bp->b_transp == tp) { | |
198 | bip = bp->b_log_item; | |
199 | ASSERT(bip != NULL); | |
200 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
201 | bip->bli_recur++; | |
202 | trace_xfs_trans_getsb_recur(bip); | |
203 | return bp; | |
204 | } | |
205 | ||
206 | bp = xfs_getsb(mp, flags); | |
207 | if (bp == NULL) | |
208 | return NULL; | |
209 | ||
210 | _xfs_trans_bjoin(tp, bp, 1); | |
211 | trace_xfs_trans_getsb(bp->b_log_item); | |
212 | return bp; | |
213 | } | |
214 | ||
215 | /* | |
216 | * Get and lock the buffer for the caller if it is not already | |
217 | * locked within the given transaction. If it has not yet been | |
218 | * read in, read it from disk. If it is already locked | |
219 | * within the transaction and already read in, just increment its | |
220 | * lock recursion count and return a pointer to it. | |
221 | * | |
222 | * If the transaction pointer is NULL, make this just a normal | |
223 | * read_buf() call. | |
224 | */ | |
225 | int | |
226 | xfs_trans_read_buf_map( | |
227 | struct xfs_mount *mp, | |
228 | struct xfs_trans *tp, | |
229 | struct xfs_buftarg *target, | |
230 | struct xfs_buf_map *map, | |
231 | int nmaps, | |
232 | xfs_buf_flags_t flags, | |
233 | struct xfs_buf **bpp, | |
234 | const struct xfs_buf_ops *ops) | |
235 | { | |
236 | struct xfs_buf *bp = NULL; | |
237 | struct xfs_buf_log_item *bip; | |
238 | int error; | |
239 | ||
240 | *bpp = NULL; | |
241 | /* | |
242 | * If we find the buffer in the cache with this transaction | |
243 | * pointer in its b_fsprivate2 field, then we know we already | |
244 | * have it locked. If it is already read in we just increment | |
245 | * the lock recursion count and return the buffer to the caller. | |
246 | * If the buffer is not yet read in, then we read it in, increment | |
247 | * the lock recursion count, and return it to the caller. | |
248 | */ | |
249 | if (tp) | |
250 | bp = xfs_trans_buf_item_match(tp, target, map, nmaps); | |
251 | if (bp) { | |
252 | ASSERT(xfs_buf_islocked(bp)); | |
253 | ASSERT(bp->b_transp == tp); | |
254 | ASSERT(bp->b_log_item != NULL); | |
255 | ASSERT(!bp->b_error); | |
256 | ASSERT(bp->b_flags & XBF_DONE); | |
257 | ||
258 | /* | |
259 | * We never locked this buf ourselves, so we shouldn't | |
260 | * brelse it either. Just get out. | |
261 | */ | |
262 | if (XFS_FORCED_SHUTDOWN(mp)) { | |
263 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); | |
264 | return -EIO; | |
265 | } | |
266 | ||
267 | bip = bp->b_log_item; | |
268 | bip->bli_recur++; | |
269 | ||
270 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
271 | trace_xfs_trans_read_buf_recur(bip); | |
272 | *bpp = bp; | |
273 | return 0; | |
274 | } | |
275 | ||
276 | bp = xfs_buf_read_map(target, map, nmaps, flags, ops); | |
277 | if (!bp) { | |
278 | if (!(flags & XBF_TRYLOCK)) | |
279 | return -ENOMEM; | |
280 | return tp ? 0 : -EAGAIN; | |
281 | } | |
282 | ||
283 | /* | |
284 | * If we've had a read error, then the contents of the buffer are | |
285 | * invalid and should not be used. To ensure that a followup read tries | |
286 | * to pull the buffer from disk again, we clear the XBF_DONE flag and | |
287 | * mark the buffer stale. This ensures that anyone who has a current | |
288 | * reference to the buffer will interpret it's contents correctly and | |
289 | * future cache lookups will also treat it as an empty, uninitialised | |
290 | * buffer. | |
291 | */ | |
292 | if (bp->b_error) { | |
293 | error = bp->b_error; | |
294 | if (!XFS_FORCED_SHUTDOWN(mp)) | |
295 | xfs_buf_ioerror_alert(bp, __func__); | |
296 | bp->b_flags &= ~XBF_DONE; | |
297 | xfs_buf_stale(bp); | |
298 | ||
299 | if (tp && (tp->t_flags & XFS_TRANS_DIRTY)) | |
300 | xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); | |
301 | xfs_buf_relse(bp); | |
302 | ||
303 | /* bad CRC means corrupted metadata */ | |
304 | if (error == -EFSBADCRC) | |
305 | error = -EFSCORRUPTED; | |
306 | return error; | |
307 | } | |
308 | ||
309 | if (XFS_FORCED_SHUTDOWN(mp)) { | |
310 | xfs_buf_relse(bp); | |
311 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); | |
312 | return -EIO; | |
313 | } | |
314 | ||
315 | if (tp) { | |
316 | _xfs_trans_bjoin(tp, bp, 1); | |
317 | trace_xfs_trans_read_buf(bp->b_log_item); | |
318 | } | |
319 | *bpp = bp; | |
320 | return 0; | |
321 | ||
322 | } | |
323 | ||
324 | /* | |
325 | * Release the buffer bp which was previously acquired with one of the | |
326 | * xfs_trans_... buffer allocation routines if the buffer has not | |
327 | * been modified within this transaction. If the buffer is modified | |
328 | * within this transaction, do decrement the recursion count but do | |
329 | * not release the buffer even if the count goes to 0. If the buffer is not | |
330 | * modified within the transaction, decrement the recursion count and | |
331 | * release the buffer if the recursion count goes to 0. | |
332 | * | |
333 | * If the buffer is to be released and it was not modified before | |
334 | * this transaction began, then free the buf_log_item associated with it. | |
335 | * | |
336 | * If the transaction pointer is NULL, make this just a normal | |
337 | * brelse() call. | |
338 | */ | |
339 | void | |
340 | xfs_trans_brelse( | |
341 | xfs_trans_t *tp, | |
342 | xfs_buf_t *bp) | |
343 | { | |
344 | struct xfs_buf_log_item *bip; | |
345 | int freed; | |
346 | ||
347 | /* | |
348 | * Default to a normal brelse() call if the tp is NULL. | |
349 | */ | |
350 | if (tp == NULL) { | |
351 | ASSERT(bp->b_transp == NULL); | |
352 | xfs_buf_relse(bp); | |
353 | return; | |
354 | } | |
355 | ||
356 | ASSERT(bp->b_transp == tp); | |
357 | bip = bp->b_log_item; | |
358 | ASSERT(bip->bli_item.li_type == XFS_LI_BUF); | |
359 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | |
360 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); | |
361 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
362 | ||
363 | trace_xfs_trans_brelse(bip); | |
364 | ||
365 | /* | |
366 | * If the release is just for a recursive lock, | |
367 | * then decrement the count and return. | |
368 | */ | |
369 | if (bip->bli_recur > 0) { | |
370 | bip->bli_recur--; | |
371 | return; | |
372 | } | |
373 | ||
374 | /* | |
375 | * If the buffer is dirty within this transaction, we can't | |
376 | * release it until we commit. | |
377 | */ | |
378 | if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)) | |
379 | return; | |
380 | ||
381 | /* | |
382 | * If the buffer has been invalidated, then we can't release | |
383 | * it until the transaction commits to disk unless it is re-dirtied | |
384 | * as part of this transaction. This prevents us from pulling | |
385 | * the item from the AIL before we should. | |
386 | */ | |
387 | if (bip->bli_flags & XFS_BLI_STALE) | |
388 | return; | |
389 | ||
390 | ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); | |
391 | ||
392 | /* | |
393 | * Free up the log item descriptor tracking the released item. | |
394 | */ | |
395 | xfs_trans_del_item(&bip->bli_item); | |
396 | ||
397 | /* | |
398 | * Clear the hold flag in the buf log item if it is set. | |
399 | * We wouldn't want the next user of the buffer to | |
400 | * get confused. | |
401 | */ | |
402 | if (bip->bli_flags & XFS_BLI_HOLD) { | |
403 | bip->bli_flags &= ~XFS_BLI_HOLD; | |
404 | } | |
405 | ||
406 | /* | |
407 | * Drop our reference to the buf log item. | |
408 | */ | |
409 | freed = atomic_dec_and_test(&bip->bli_refcount); | |
410 | ||
411 | /* | |
412 | * If the buf item is not tracking data in the log, then we must free it | |
413 | * before releasing the buffer back to the free pool. | |
414 | * | |
415 | * If the fs has shutdown and we dropped the last reference, it may fall | |
416 | * on us to release a (possibly dirty) bli if it never made it to the | |
417 | * AIL (e.g., the aborted unpin already happened and didn't release it | |
418 | * due to our reference). Since we're already shutdown and need | |
419 | * ail_lock, just force remove from the AIL and release the bli here. | |
420 | */ | |
421 | if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) { | |
422 | xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR); | |
423 | xfs_buf_item_relse(bp); | |
424 | } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) { | |
425 | /*** | |
426 | ASSERT(bp->b_pincount == 0); | |
427 | ***/ | |
428 | ASSERT(atomic_read(&bip->bli_refcount) == 0); | |
429 | ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags)); | |
430 | ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); | |
431 | xfs_buf_item_relse(bp); | |
432 | } | |
433 | ||
434 | bp->b_transp = NULL; | |
435 | xfs_buf_relse(bp); | |
436 | } | |
437 | ||
438 | /* | |
439 | * Mark the buffer as not needing to be unlocked when the buf item's | |
440 | * iop_unlock() routine is called. The buffer must already be locked | |
441 | * and associated with the given transaction. | |
442 | */ | |
443 | /* ARGSUSED */ | |
444 | void | |
445 | xfs_trans_bhold( | |
446 | xfs_trans_t *tp, | |
447 | xfs_buf_t *bp) | |
448 | { | |
449 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
450 | ||
451 | ASSERT(bp->b_transp == tp); | |
452 | ASSERT(bip != NULL); | |
453 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | |
454 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); | |
455 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
456 | ||
457 | bip->bli_flags |= XFS_BLI_HOLD; | |
458 | trace_xfs_trans_bhold(bip); | |
459 | } | |
460 | ||
461 | /* | |
462 | * Cancel the previous buffer hold request made on this buffer | |
463 | * for this transaction. | |
464 | */ | |
465 | void | |
466 | xfs_trans_bhold_release( | |
467 | xfs_trans_t *tp, | |
468 | xfs_buf_t *bp) | |
469 | { | |
470 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
471 | ||
472 | ASSERT(bp->b_transp == tp); | |
473 | ASSERT(bip != NULL); | |
474 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | |
475 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); | |
476 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
477 | ASSERT(bip->bli_flags & XFS_BLI_HOLD); | |
478 | ||
479 | bip->bli_flags &= ~XFS_BLI_HOLD; | |
480 | trace_xfs_trans_bhold_release(bip); | |
481 | } | |
482 | ||
483 | /* | |
484 | * Mark a buffer dirty in the transaction. | |
485 | */ | |
486 | void | |
487 | xfs_trans_dirty_buf( | |
488 | struct xfs_trans *tp, | |
489 | struct xfs_buf *bp) | |
490 | { | |
491 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
492 | ||
493 | ASSERT(bp->b_transp == tp); | |
494 | ASSERT(bip != NULL); | |
495 | ASSERT(bp->b_iodone == NULL || | |
496 | bp->b_iodone == xfs_buf_iodone_callbacks); | |
497 | ||
498 | /* | |
499 | * Mark the buffer as needing to be written out eventually, | |
500 | * and set its iodone function to remove the buffer's buf log | |
501 | * item from the AIL and free it when the buffer is flushed | |
502 | * to disk. See xfs_buf_attach_iodone() for more details | |
503 | * on li_cb and xfs_buf_iodone_callbacks(). | |
504 | * If we end up aborting this transaction, we trap this buffer | |
505 | * inside the b_bdstrat callback so that this won't get written to | |
506 | * disk. | |
507 | */ | |
508 | bp->b_flags |= XBF_DONE; | |
509 | ||
510 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
511 | bp->b_iodone = xfs_buf_iodone_callbacks; | |
512 | bip->bli_item.li_cb = xfs_buf_iodone; | |
513 | ||
514 | /* | |
515 | * If we invalidated the buffer within this transaction, then | |
516 | * cancel the invalidation now that we're dirtying the buffer | |
517 | * again. There are no races with the code in xfs_buf_item_unpin(), | |
518 | * because we have a reference to the buffer this entire time. | |
519 | */ | |
520 | if (bip->bli_flags & XFS_BLI_STALE) { | |
521 | bip->bli_flags &= ~XFS_BLI_STALE; | |
522 | ASSERT(bp->b_flags & XBF_STALE); | |
523 | bp->b_flags &= ~XBF_STALE; | |
524 | bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL; | |
525 | } | |
526 | bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED; | |
527 | ||
528 | tp->t_flags |= XFS_TRANS_DIRTY; | |
529 | set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); | |
530 | } | |
531 | ||
532 | /* | |
533 | * This is called to mark bytes first through last inclusive of the given | |
534 | * buffer as needing to be logged when the transaction is committed. | |
535 | * The buffer must already be associated with the given transaction. | |
536 | * | |
537 | * First and last are numbers relative to the beginning of this buffer, | |
538 | * so the first byte in the buffer is numbered 0 regardless of the | |
539 | * value of b_blkno. | |
540 | */ | |
541 | void | |
542 | xfs_trans_log_buf( | |
543 | struct xfs_trans *tp, | |
544 | struct xfs_buf *bp, | |
545 | uint first, | |
546 | uint last) | |
547 | { | |
548 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
549 | ||
550 | ASSERT(first <= last && last < BBTOB(bp->b_length)); | |
551 | ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED)); | |
552 | ||
553 | xfs_trans_dirty_buf(tp, bp); | |
554 | ||
555 | trace_xfs_trans_log_buf(bip); | |
556 | xfs_buf_item_log(bip, first, last); | |
557 | } | |
558 | ||
559 | ||
560 | /* | |
561 | * Invalidate a buffer that is being used within a transaction. | |
562 | * | |
563 | * Typically this is because the blocks in the buffer are being freed, so we | |
564 | * need to prevent it from being written out when we're done. Allowing it | |
565 | * to be written again might overwrite data in the free blocks if they are | |
566 | * reallocated to a file. | |
567 | * | |
568 | * We prevent the buffer from being written out by marking it stale. We can't | |
569 | * get rid of the buf log item at this point because the buffer may still be | |
570 | * pinned by another transaction. If that is the case, then we'll wait until | |
571 | * the buffer is committed to disk for the last time (we can tell by the ref | |
572 | * count) and free it in xfs_buf_item_unpin(). Until that happens we will | |
573 | * keep the buffer locked so that the buffer and buf log item are not reused. | |
574 | * | |
575 | * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log | |
576 | * the buf item. This will be used at recovery time to determine that copies | |
577 | * of the buffer in the log before this should not be replayed. | |
578 | * | |
579 | * We mark the item descriptor and the transaction dirty so that we'll hold | |
580 | * the buffer until after the commit. | |
581 | * | |
582 | * Since we're invalidating the buffer, we also clear the state about which | |
583 | * parts of the buffer have been logged. We also clear the flag indicating | |
584 | * that this is an inode buffer since the data in the buffer will no longer | |
585 | * be valid. | |
586 | * | |
587 | * We set the stale bit in the buffer as well since we're getting rid of it. | |
588 | */ | |
589 | void | |
590 | xfs_trans_binval( | |
591 | xfs_trans_t *tp, | |
592 | xfs_buf_t *bp) | |
593 | { | |
594 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
595 | int i; | |
596 | ||
597 | ASSERT(bp->b_transp == tp); | |
598 | ASSERT(bip != NULL); | |
599 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
600 | ||
601 | trace_xfs_trans_binval(bip); | |
602 | ||
603 | if (bip->bli_flags & XFS_BLI_STALE) { | |
604 | /* | |
605 | * If the buffer is already invalidated, then | |
606 | * just return. | |
607 | */ | |
608 | ASSERT(bp->b_flags & XBF_STALE); | |
609 | ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); | |
610 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF)); | |
611 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK)); | |
612 | ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); | |
613 | ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)); | |
614 | ASSERT(tp->t_flags & XFS_TRANS_DIRTY); | |
615 | return; | |
616 | } | |
617 | ||
618 | xfs_buf_stale(bp); | |
619 | ||
620 | bip->bli_flags |= XFS_BLI_STALE; | |
621 | bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY); | |
622 | bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF; | |
623 | bip->__bli_format.blf_flags |= XFS_BLF_CANCEL; | |
624 | bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK; | |
625 | for (i = 0; i < bip->bli_format_count; i++) { | |
626 | memset(bip->bli_formats[i].blf_data_map, 0, | |
627 | (bip->bli_formats[i].blf_map_size * sizeof(uint))); | |
628 | } | |
629 | set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); | |
630 | tp->t_flags |= XFS_TRANS_DIRTY; | |
631 | } | |
632 | ||
633 | /* | |
634 | * This call is used to indicate that the buffer contains on-disk inodes which | |
635 | * must be handled specially during recovery. They require special handling | |
636 | * because only the di_next_unlinked from the inodes in the buffer should be | |
637 | * recovered. The rest of the data in the buffer is logged via the inodes | |
638 | * themselves. | |
639 | * | |
640 | * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be | |
641 | * transferred to the buffer's log format structure so that we'll know what to | |
642 | * do at recovery time. | |
643 | */ | |
644 | void | |
645 | xfs_trans_inode_buf( | |
646 | xfs_trans_t *tp, | |
647 | xfs_buf_t *bp) | |
648 | { | |
649 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
650 | ||
651 | ASSERT(bp->b_transp == tp); | |
652 | ASSERT(bip != NULL); | |
653 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
654 | ||
655 | bip->bli_flags |= XFS_BLI_INODE_BUF; | |
656 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); | |
657 | } | |
658 | ||
659 | /* | |
660 | * This call is used to indicate that the buffer is going to | |
661 | * be staled and was an inode buffer. This means it gets | |
662 | * special processing during unpin - where any inodes | |
663 | * associated with the buffer should be removed from ail. | |
664 | * There is also special processing during recovery, | |
665 | * any replay of the inodes in the buffer needs to be | |
666 | * prevented as the buffer may have been reused. | |
667 | */ | |
668 | void | |
669 | xfs_trans_stale_inode_buf( | |
670 | xfs_trans_t *tp, | |
671 | xfs_buf_t *bp) | |
672 | { | |
673 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
674 | ||
675 | ASSERT(bp->b_transp == tp); | |
676 | ASSERT(bip != NULL); | |
677 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
678 | ||
679 | bip->bli_flags |= XFS_BLI_STALE_INODE; | |
680 | bip->bli_item.li_cb = xfs_buf_iodone; | |
681 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); | |
682 | } | |
683 | ||
684 | /* | |
685 | * Mark the buffer as being one which contains newly allocated | |
686 | * inodes. We need to make sure that even if this buffer is | |
687 | * relogged as an 'inode buf' we still recover all of the inode | |
688 | * images in the face of a crash. This works in coordination with | |
689 | * xfs_buf_item_committed() to ensure that the buffer remains in the | |
690 | * AIL at its original location even after it has been relogged. | |
691 | */ | |
692 | /* ARGSUSED */ | |
693 | void | |
694 | xfs_trans_inode_alloc_buf( | |
695 | xfs_trans_t *tp, | |
696 | xfs_buf_t *bp) | |
697 | { | |
698 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
699 | ||
700 | ASSERT(bp->b_transp == tp); | |
701 | ASSERT(bip != NULL); | |
702 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
703 | ||
704 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; | |
705 | xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); | |
706 | } | |
707 | ||
708 | /* | |
709 | * Mark the buffer as ordered for this transaction. This means that the contents | |
710 | * of the buffer are not recorded in the transaction but it is tracked in the | |
711 | * AIL as though it was. This allows us to record logical changes in | |
712 | * transactions rather than the physical changes we make to the buffer without | |
713 | * changing writeback ordering constraints of metadata buffers. | |
714 | */ | |
715 | bool | |
716 | xfs_trans_ordered_buf( | |
717 | struct xfs_trans *tp, | |
718 | struct xfs_buf *bp) | |
719 | { | |
720 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
721 | ||
722 | ASSERT(bp->b_transp == tp); | |
723 | ASSERT(bip != NULL); | |
724 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
725 | ||
726 | if (xfs_buf_item_dirty_format(bip)) | |
727 | return false; | |
728 | ||
729 | bip->bli_flags |= XFS_BLI_ORDERED; | |
730 | trace_xfs_buf_item_ordered(bip); | |
731 | ||
732 | /* | |
733 | * We don't log a dirty range of an ordered buffer but it still needs | |
734 | * to be marked dirty and that it has been logged. | |
735 | */ | |
736 | xfs_trans_dirty_buf(tp, bp); | |
737 | return true; | |
738 | } | |
739 | ||
740 | /* | |
741 | * Set the type of the buffer for log recovery so that it can correctly identify | |
742 | * and hence attach the correct buffer ops to the buffer after replay. | |
743 | */ | |
744 | void | |
745 | xfs_trans_buf_set_type( | |
746 | struct xfs_trans *tp, | |
747 | struct xfs_buf *bp, | |
748 | enum xfs_blft type) | |
749 | { | |
750 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
751 | ||
752 | if (!tp) | |
753 | return; | |
754 | ||
755 | ASSERT(bp->b_transp == tp); | |
756 | ASSERT(bip != NULL); | |
757 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
758 | ||
759 | xfs_blft_to_flags(&bip->__bli_format, type); | |
760 | } | |
761 | ||
762 | void | |
763 | xfs_trans_buf_copy_type( | |
764 | struct xfs_buf *dst_bp, | |
765 | struct xfs_buf *src_bp) | |
766 | { | |
767 | struct xfs_buf_log_item *sbip = src_bp->b_log_item; | |
768 | struct xfs_buf_log_item *dbip = dst_bp->b_log_item; | |
769 | enum xfs_blft type; | |
770 | ||
771 | type = xfs_blft_from_flags(&sbip->__bli_format); | |
772 | xfs_blft_to_flags(&dbip->__bli_format, type); | |
773 | } | |
774 | ||
775 | /* | |
776 | * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of | |
777 | * dquots. However, unlike in inode buffer recovery, dquot buffers get | |
778 | * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). | |
779 | * The only thing that makes dquot buffers different from regular | |
780 | * buffers is that we must not replay dquot bufs when recovering | |
781 | * if a _corresponding_ quotaoff has happened. We also have to distinguish | |
782 | * between usr dquot bufs and grp dquot bufs, because usr and grp quotas | |
783 | * can be turned off independently. | |
784 | */ | |
785 | /* ARGSUSED */ | |
786 | void | |
787 | xfs_trans_dquot_buf( | |
788 | xfs_trans_t *tp, | |
789 | xfs_buf_t *bp, | |
790 | uint type) | |
791 | { | |
792 | struct xfs_buf_log_item *bip = bp->b_log_item; | |
793 | ||
794 | ASSERT(type == XFS_BLF_UDQUOT_BUF || | |
795 | type == XFS_BLF_PDQUOT_BUF || | |
796 | type == XFS_BLF_GDQUOT_BUF); | |
797 | ||
798 | bip->__bli_format.blf_flags |= type; | |
799 | ||
800 | switch (type) { | |
801 | case XFS_BLF_UDQUOT_BUF: | |
802 | type = XFS_BLFT_UDQUOT_BUF; | |
803 | break; | |
804 | case XFS_BLF_PDQUOT_BUF: | |
805 | type = XFS_BLFT_PDQUOT_BUF; | |
806 | break; | |
807 | case XFS_BLF_GDQUOT_BUF: | |
808 | type = XFS_BLFT_GDQUOT_BUF; | |
809 | break; | |
810 | default: | |
811 | type = XFS_BLFT_UNKNOWN_BUF; | |
812 | break; | |
813 | } | |
814 | ||
815 | xfs_trans_buf_set_type(tp, bp, type); | |
816 | } |