]>
Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
71e330b5 DC |
2 | /* |
3 | * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. | |
71e330b5 DC |
4 | */ |
5 | ||
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
4fb6e8ad | 8 | #include "xfs_format.h" |
239880ef | 9 | #include "xfs_log_format.h" |
70a9883c | 10 | #include "xfs_shared.h" |
239880ef | 11 | #include "xfs_trans_resv.h" |
71e330b5 DC |
12 | #include "xfs_mount.h" |
13 | #include "xfs_error.h" | |
14 | #include "xfs_alloc.h" | |
efc27b52 | 15 | #include "xfs_extent_busy.h" |
e84661aa | 16 | #include "xfs_discard.h" |
239880ef DC |
17 | #include "xfs_trans.h" |
18 | #include "xfs_trans_priv.h" | |
19 | #include "xfs_log.h" | |
20 | #include "xfs_log_priv.h" | |
4560e78f CH |
21 | #include "xfs_trace.h" |
22 | ||
23 | struct workqueue_struct *xfs_discard_wq; | |
71e330b5 | 24 | |
71e330b5 DC |
25 | /* |
26 | * Allocate a new ticket. Failing to get a new ticket makes it really hard to | |
27 | * recover, so we don't allow failure here. Also, we allocate in a context that | |
28 | * we don't want to be issuing transactions from, so we need to tell the | |
29 | * allocation code this as well. | |
30 | * | |
31 | * We don't reserve any space for the ticket - we are going to steal whatever | |
32 | * space we require from transactions as they commit. To ensure we reserve all | |
33 | * the space required, we need to set the current reservation of the ticket to | |
34 | * zero so that we know to steal the initial transaction overhead from the | |
35 | * first transaction commit. | |
36 | */ | |
37 | static struct xlog_ticket * | |
38 | xlog_cil_ticket_alloc( | |
f7bdf03a | 39 | struct xlog *log) |
71e330b5 DC |
40 | { |
41 | struct xlog_ticket *tic; | |
42 | ||
43 | tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, | |
44 | KM_SLEEP|KM_NOFS); | |
71e330b5 DC |
45 | |
46 | /* | |
47 | * set the current reservation to zero so we know to steal the basic | |
48 | * transaction overhead reservation from the first transaction commit. | |
49 | */ | |
50 | tic->t_curr_res = 0; | |
51 | return tic; | |
52 | } | |
53 | ||
54 | /* | |
55 | * After the first stage of log recovery is done, we know where the head and | |
56 | * tail of the log are. We need this log initialisation done before we can | |
57 | * initialise the first CIL checkpoint context. | |
58 | * | |
59 | * Here we allocate a log ticket to track space usage during a CIL push. This | |
60 | * ticket is passed to xlog_write() directly so that we don't slowly leak log | |
61 | * space by failing to account for space used by log headers and additional | |
62 | * region headers for split regions. | |
63 | */ | |
64 | void | |
65 | xlog_cil_init_post_recovery( | |
f7bdf03a | 66 | struct xlog *log) |
71e330b5 | 67 | { |
71e330b5 DC |
68 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
69 | log->l_cilp->xc_ctx->sequence = 1; | |
71e330b5 DC |
70 | } |
71 | ||
b1c5ebb2 DC |
72 | static inline int |
73 | xlog_cil_iovec_space( | |
74 | uint niovecs) | |
75 | { | |
76 | return round_up((sizeof(struct xfs_log_vec) + | |
77 | niovecs * sizeof(struct xfs_log_iovec)), | |
78 | sizeof(uint64_t)); | |
79 | } | |
80 | ||
81 | /* | |
82 | * Allocate or pin log vector buffers for CIL insertion. | |
83 | * | |
84 | * The CIL currently uses disposable buffers for copying a snapshot of the | |
85 | * modified items into the log during a push. The biggest problem with this is | |
86 | * the requirement to allocate the disposable buffer during the commit if: | |
87 | * a) does not exist; or | |
88 | * b) it is too small | |
89 | * | |
90 | * If we do this allocation within xlog_cil_insert_format_items(), it is done | |
91 | * under the xc_ctx_lock, which means that a CIL push cannot occur during | |
92 | * the memory allocation. This means that we have a potential deadlock situation | |
93 | * under low memory conditions when we have lots of dirty metadata pinned in | |
94 | * the CIL and we need a CIL commit to occur to free memory. | |
95 | * | |
96 | * To avoid this, we need to move the memory allocation outside the | |
97 | * xc_ctx_lock, but because the log vector buffers are disposable, that opens | |
98 | * up a TOCTOU race condition w.r.t. the CIL committing and removing the log | |
99 | * vector buffers between the check and the formatting of the item into the | |
100 | * log vector buffer within the xc_ctx_lock. | |
101 | * | |
102 | * Because the log vector buffer needs to be unchanged during the CIL push | |
103 | * process, we cannot share the buffer between the transaction commit (which | |
104 | * modifies the buffer) and the CIL push context that is writing the changes | |
105 | * into the log. This means skipping preallocation of buffer space is | |
106 | * unreliable, but we most definitely do not want to be allocating and freeing | |
107 | * buffers unnecessarily during commits when overwrites can be done safely. | |
108 | * | |
109 | * The simplest solution to this problem is to allocate a shadow buffer when a | |
110 | * log item is committed for the second time, and then to only use this buffer | |
111 | * if necessary. The buffer can remain attached to the log item until such time | |
112 | * it is needed, and this is the buffer that is reallocated to match the size of | |
113 | * the incoming modification. Then during the formatting of the item we can swap | |
114 | * the active buffer with the new one if we can't reuse the existing buffer. We | |
115 | * don't free the old buffer as it may be reused on the next modification if | |
116 | * it's size is right, otherwise we'll free and reallocate it at that point. | |
117 | * | |
118 | * This function builds a vector for the changes in each log item in the | |
119 | * transaction. It then works out the length of the buffer needed for each log | |
120 | * item, allocates them and attaches the vector to the log item in preparation | |
121 | * for the formatting step which occurs under the xc_ctx_lock. | |
122 | * | |
123 | * While this means the memory footprint goes up, it avoids the repeated | |
124 | * alloc/free pattern that repeated modifications of an item would otherwise | |
125 | * cause, and hence minimises the CPU overhead of such behaviour. | |
126 | */ | |
127 | static void | |
128 | xlog_cil_alloc_shadow_bufs( | |
129 | struct xlog *log, | |
130 | struct xfs_trans *tp) | |
131 | { | |
e6631f85 | 132 | struct xfs_log_item *lip; |
b1c5ebb2 | 133 | |
e6631f85 | 134 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
b1c5ebb2 DC |
135 | struct xfs_log_vec *lv; |
136 | int niovecs = 0; | |
137 | int nbytes = 0; | |
138 | int buf_size; | |
139 | bool ordered = false; | |
140 | ||
141 | /* Skip items which aren't dirty in this transaction. */ | |
e6631f85 | 142 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
b1c5ebb2 DC |
143 | continue; |
144 | ||
145 | /* get number of vecs and size of data to be stored */ | |
146 | lip->li_ops->iop_size(lip, &niovecs, &nbytes); | |
147 | ||
148 | /* | |
149 | * Ordered items need to be tracked but we do not wish to write | |
150 | * them. We need a logvec to track the object, but we do not | |
151 | * need an iovec or buffer to be allocated for copying data. | |
152 | */ | |
153 | if (niovecs == XFS_LOG_VEC_ORDERED) { | |
154 | ordered = true; | |
155 | niovecs = 0; | |
156 | nbytes = 0; | |
157 | } | |
158 | ||
159 | /* | |
160 | * We 64-bit align the length of each iovec so that the start | |
161 | * of the next one is naturally aligned. We'll need to | |
162 | * account for that slack space here. Then round nbytes up | |
163 | * to 64-bit alignment so that the initial buffer alignment is | |
164 | * easy to calculate and verify. | |
165 | */ | |
166 | nbytes += niovecs * sizeof(uint64_t); | |
167 | nbytes = round_up(nbytes, sizeof(uint64_t)); | |
168 | ||
169 | /* | |
170 | * The data buffer needs to start 64-bit aligned, so round up | |
171 | * that space to ensure we can align it appropriately and not | |
172 | * overrun the buffer. | |
173 | */ | |
174 | buf_size = nbytes + xlog_cil_iovec_space(niovecs); | |
175 | ||
176 | /* | |
177 | * if we have no shadow buffer, or it is too small, we need to | |
178 | * reallocate it. | |
179 | */ | |
180 | if (!lip->li_lv_shadow || | |
181 | buf_size > lip->li_lv_shadow->lv_size) { | |
182 | ||
183 | /* | |
184 | * We free and allocate here as a realloc would copy | |
185 | * unecessary data. We don't use kmem_zalloc() for the | |
186 | * same reason - we don't need to zero the data area in | |
187 | * the buffer, only the log vector header and the iovec | |
188 | * storage. | |
189 | */ | |
190 | kmem_free(lip->li_lv_shadow); | |
191 | ||
cb0a8d23 | 192 | lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS); |
b1c5ebb2 DC |
193 | memset(lv, 0, xlog_cil_iovec_space(niovecs)); |
194 | ||
195 | lv->lv_item = lip; | |
196 | lv->lv_size = buf_size; | |
197 | if (ordered) | |
198 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | |
199 | else | |
200 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; | |
201 | lip->li_lv_shadow = lv; | |
202 | } else { | |
203 | /* same or smaller, optimise common overwrite case */ | |
204 | lv = lip->li_lv_shadow; | |
205 | if (ordered) | |
206 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | |
207 | else | |
208 | lv->lv_buf_len = 0; | |
209 | lv->lv_bytes = 0; | |
210 | lv->lv_next = NULL; | |
211 | } | |
212 | ||
213 | /* Ensure the lv is set up according to ->iop_size */ | |
214 | lv->lv_niovecs = niovecs; | |
215 | ||
216 | /* The allocated data region lies beyond the iovec region */ | |
217 | lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); | |
218 | } | |
219 | ||
220 | } | |
221 | ||
991aaf65 DC |
222 | /* |
223 | * Prepare the log item for insertion into the CIL. Calculate the difference in | |
224 | * log space and vectors it will consume, and if it is a new item pin it as | |
225 | * well. | |
226 | */ | |
227 | STATIC void | |
228 | xfs_cil_prepare_item( | |
229 | struct xlog *log, | |
230 | struct xfs_log_vec *lv, | |
231 | struct xfs_log_vec *old_lv, | |
232 | int *diff_len, | |
233 | int *diff_iovecs) | |
234 | { | |
235 | /* Account for the new LV being passed in */ | |
236 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { | |
110dc24a | 237 | *diff_len += lv->lv_bytes; |
991aaf65 DC |
238 | *diff_iovecs += lv->lv_niovecs; |
239 | } | |
240 | ||
241 | /* | |
242 | * If there is no old LV, this is the first time we've seen the item in | |
243 | * this CIL context and so we need to pin it. If we are replacing the | |
b1c5ebb2 DC |
244 | * old_lv, then remove the space it accounts for and make it the shadow |
245 | * buffer for later freeing. In both cases we are now switching to the | |
246 | * shadow buffer, so update the the pointer to it appropriately. | |
991aaf65 | 247 | */ |
b1c5ebb2 | 248 | if (!old_lv) { |
991aaf65 | 249 | lv->lv_item->li_ops->iop_pin(lv->lv_item); |
b1c5ebb2 DC |
250 | lv->lv_item->li_lv_shadow = NULL; |
251 | } else if (old_lv != lv) { | |
991aaf65 DC |
252 | ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); |
253 | ||
110dc24a | 254 | *diff_len -= old_lv->lv_bytes; |
991aaf65 | 255 | *diff_iovecs -= old_lv->lv_niovecs; |
b1c5ebb2 | 256 | lv->lv_item->li_lv_shadow = old_lv; |
991aaf65 DC |
257 | } |
258 | ||
259 | /* attach new log vector to log item */ | |
260 | lv->lv_item->li_lv = lv; | |
261 | ||
262 | /* | |
263 | * If this is the first time the item is being committed to the | |
264 | * CIL, store the sequence number on the log item so we can | |
265 | * tell in future commits whether this is the first checkpoint | |
266 | * the item is being committed into. | |
267 | */ | |
268 | if (!lv->lv_item->li_seq) | |
269 | lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; | |
270 | } | |
271 | ||
71e330b5 DC |
272 | /* |
273 | * Format log item into a flat buffers | |
274 | * | |
275 | * For delayed logging, we need to hold a formatted buffer containing all the | |
276 | * changes on the log item. This enables us to relog the item in memory and | |
277 | * write it out asynchronously without needing to relock the object that was | |
278 | * modified at the time it gets written into the iclog. | |
279 | * | |
b1c5ebb2 DC |
280 | * This function takes the prepared log vectors attached to each log item, and |
281 | * formats the changes into the log vector buffer. The buffer it uses is | |
282 | * dependent on the current state of the vector in the CIL - the shadow lv is | |
283 | * guaranteed to be large enough for the current modification, but we will only | |
284 | * use that if we can't reuse the existing lv. If we can't reuse the existing | |
285 | * lv, then simple swap it out for the shadow lv. We don't free it - that is | |
286 | * done lazily either by th enext modification or the freeing of the log item. | |
71e330b5 DC |
287 | * |
288 | * We don't set up region headers during this process; we simply copy the | |
289 | * regions into the flat buffer. We can do this because we still have to do a | |
290 | * formatting step to write the regions into the iclog buffer. Writing the | |
291 | * ophdrs during the iclog write means that we can support splitting large | |
292 | * regions across iclog boundares without needing a change in the format of the | |
293 | * item/region encapsulation. | |
294 | * | |
295 | * Hence what we need to do now is change the rewrite the vector array to point | |
296 | * to the copied region inside the buffer we just allocated. This allows us to | |
297 | * format the regions into the iclog as though they are being formatted | |
298 | * directly out of the objects themselves. | |
299 | */ | |
991aaf65 DC |
300 | static void |
301 | xlog_cil_insert_format_items( | |
302 | struct xlog *log, | |
303 | struct xfs_trans *tp, | |
304 | int *diff_len, | |
305 | int *diff_iovecs) | |
71e330b5 | 306 | { |
e6631f85 | 307 | struct xfs_log_item *lip; |
71e330b5 | 308 | |
0244b960 CH |
309 | |
310 | /* Bail out if we didn't find a log item. */ | |
311 | if (list_empty(&tp->t_items)) { | |
312 | ASSERT(0); | |
991aaf65 | 313 | return; |
0244b960 CH |
314 | } |
315 | ||
e6631f85 | 316 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
7492c5b4 | 317 | struct xfs_log_vec *lv; |
b1c5ebb2 DC |
318 | struct xfs_log_vec *old_lv = NULL; |
319 | struct xfs_log_vec *shadow; | |
fd63875c | 320 | bool ordered = false; |
71e330b5 | 321 | |
0244b960 | 322 | /* Skip items which aren't dirty in this transaction. */ |
e6631f85 | 323 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
0244b960 CH |
324 | continue; |
325 | ||
fd63875c | 326 | /* |
b1c5ebb2 DC |
327 | * The formatting size information is already attached to |
328 | * the shadow lv on the log item. | |
fd63875c | 329 | */ |
b1c5ebb2 DC |
330 | shadow = lip->li_lv_shadow; |
331 | if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) | |
fd63875c | 332 | ordered = true; |
fd63875c | 333 | |
b1c5ebb2 DC |
334 | /* Skip items that do not have any vectors for writing */ |
335 | if (!shadow->lv_niovecs && !ordered) | |
336 | continue; | |
0244b960 | 337 | |
f5baac35 | 338 | /* compare to existing item size */ |
b1c5ebb2 DC |
339 | old_lv = lip->li_lv; |
340 | if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { | |
f5baac35 DC |
341 | /* same or smaller, optimise common overwrite case */ |
342 | lv = lip->li_lv; | |
343 | lv->lv_next = NULL; | |
344 | ||
345 | if (ordered) | |
346 | goto insert; | |
347 | ||
991aaf65 DC |
348 | /* |
349 | * set the item up as though it is a new insertion so | |
350 | * that the space reservation accounting is correct. | |
351 | */ | |
352 | *diff_iovecs -= lv->lv_niovecs; | |
110dc24a | 353 | *diff_len -= lv->lv_bytes; |
b1c5ebb2 DC |
354 | |
355 | /* Ensure the lv is set up according to ->iop_size */ | |
356 | lv->lv_niovecs = shadow->lv_niovecs; | |
357 | ||
358 | /* reset the lv buffer information for new formatting */ | |
359 | lv->lv_buf_len = 0; | |
360 | lv->lv_bytes = 0; | |
361 | lv->lv_buf = (char *)lv + | |
362 | xlog_cil_iovec_space(lv->lv_niovecs); | |
9597df6b | 363 | } else { |
b1c5ebb2 DC |
364 | /* switch to shadow buffer! */ |
365 | lv = shadow; | |
9597df6b | 366 | lv->lv_item = lip; |
9597df6b CH |
367 | if (ordered) { |
368 | /* track as an ordered logvec */ | |
369 | ASSERT(lip->li_lv == NULL); | |
9597df6b CH |
370 | goto insert; |
371 | } | |
f5baac35 DC |
372 | } |
373 | ||
3895e51f | 374 | ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); |
bde7cff6 | 375 | lip->li_ops->iop_format(lip, lv); |
7492c5b4 | 376 | insert: |
991aaf65 | 377 | xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); |
3b93c7aa | 378 | } |
d1583a38 DC |
379 | } |
380 | ||
381 | /* | |
382 | * Insert the log items into the CIL and calculate the difference in space | |
383 | * consumed by the item. Add the space to the checkpoint ticket and calculate | |
384 | * if the change requires additional log metadata. If it does, take that space | |
42b2aa86 | 385 | * as well. Remove the amount of space we added to the checkpoint ticket from |
d1583a38 DC |
386 | * the current transaction ticket so that the accounting works out correctly. |
387 | */ | |
3b93c7aa DC |
388 | static void |
389 | xlog_cil_insert_items( | |
f7bdf03a | 390 | struct xlog *log, |
991aaf65 | 391 | struct xfs_trans *tp) |
3b93c7aa | 392 | { |
d1583a38 DC |
393 | struct xfs_cil *cil = log->l_cilp; |
394 | struct xfs_cil_ctx *ctx = cil->xc_ctx; | |
e6631f85 | 395 | struct xfs_log_item *lip; |
d1583a38 DC |
396 | int len = 0; |
397 | int diff_iovecs = 0; | |
398 | int iclog_space; | |
e2f23426 | 399 | int iovhdr_res = 0, split_res = 0, ctx_res = 0; |
3b93c7aa | 400 | |
991aaf65 | 401 | ASSERT(tp); |
d1583a38 DC |
402 | |
403 | /* | |
d1583a38 DC |
404 | * We can do this safely because the context can't checkpoint until we |
405 | * are done so it doesn't matter exactly how we update the CIL. | |
406 | */ | |
991aaf65 DC |
407 | xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); |
408 | ||
d1583a38 | 409 | spin_lock(&cil->xc_cil_lock); |
d1583a38 | 410 | |
fd63875c | 411 | /* account for space used by new iovec headers */ |
e2f23426 BF |
412 | iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t); |
413 | len += iovhdr_res; | |
d1583a38 DC |
414 | ctx->nvecs += diff_iovecs; |
415 | ||
991aaf65 DC |
416 | /* attach the transaction to the CIL if it has any busy extents */ |
417 | if (!list_empty(&tp->t_busy)) | |
418 | list_splice_init(&tp->t_busy, &ctx->busy_extents); | |
419 | ||
d1583a38 DC |
420 | /* |
421 | * Now transfer enough transaction reservation to the context ticket | |
422 | * for the checkpoint. The context ticket is special - the unit | |
423 | * reservation has to grow as well as the current reservation as we | |
424 | * steal from tickets so we can correctly determine the space used | |
425 | * during the transaction commit. | |
426 | */ | |
427 | if (ctx->ticket->t_curr_res == 0) { | |
e2f23426 BF |
428 | ctx_res = ctx->ticket->t_unit_res; |
429 | ctx->ticket->t_curr_res = ctx_res; | |
430 | tp->t_ticket->t_curr_res -= ctx_res; | |
d1583a38 DC |
431 | } |
432 | ||
433 | /* do we need space for more log record headers? */ | |
434 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; | |
435 | if (len > 0 && (ctx->space_used / iclog_space != | |
436 | (ctx->space_used + len) / iclog_space)) { | |
e2f23426 | 437 | split_res = (len + iclog_space - 1) / iclog_space; |
d1583a38 | 438 | /* need to take into account split region headers, too */ |
e2f23426 BF |
439 | split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header); |
440 | ctx->ticket->t_unit_res += split_res; | |
441 | ctx->ticket->t_curr_res += split_res; | |
442 | tp->t_ticket->t_curr_res -= split_res; | |
991aaf65 | 443 | ASSERT(tp->t_ticket->t_curr_res >= len); |
d1583a38 | 444 | } |
991aaf65 | 445 | tp->t_ticket->t_curr_res -= len; |
d1583a38 DC |
446 | ctx->space_used += len; |
447 | ||
d4ca1d55 BF |
448 | /* |
449 | * If we've overrun the reservation, dump the tx details before we move | |
450 | * the log items. Shutdown is imminent... | |
451 | */ | |
452 | if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { | |
453 | xfs_warn(log->l_mp, "Transaction log reservation overrun:"); | |
454 | xfs_warn(log->l_mp, | |
455 | " log items: %d bytes (iov hdrs: %d bytes)", | |
456 | len, iovhdr_res); | |
457 | xfs_warn(log->l_mp, " split region headers: %d bytes", | |
458 | split_res); | |
459 | xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); | |
460 | xlog_print_trans(tp); | |
461 | } | |
462 | ||
e2f23426 BF |
463 | /* |
464 | * Now (re-)position everything modified at the tail of the CIL. | |
465 | * We do this here so we only need to take the CIL lock once during | |
466 | * the transaction commit. | |
467 | */ | |
e6631f85 | 468 | list_for_each_entry(lip, &tp->t_items, li_trans) { |
e2f23426 BF |
469 | |
470 | /* Skip items which aren't dirty in this transaction. */ | |
e6631f85 | 471 | if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) |
e2f23426 BF |
472 | continue; |
473 | ||
474 | /* | |
475 | * Only move the item if it isn't already at the tail. This is | |
476 | * to prevent a transient list_empty() state when reinserting | |
477 | * an item that is already the only item in the CIL. | |
478 | */ | |
479 | if (!list_is_last(&lip->li_cil, &cil->xc_cil)) | |
480 | list_move_tail(&lip->li_cil, &cil->xc_cil); | |
481 | } | |
482 | ||
d1583a38 | 483 | spin_unlock(&cil->xc_cil_lock); |
d4ca1d55 BF |
484 | |
485 | if (tp->t_ticket->t_curr_res < 0) | |
486 | xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); | |
71e330b5 DC |
487 | } |
488 | ||
489 | static void | |
490 | xlog_cil_free_logvec( | |
491 | struct xfs_log_vec *log_vector) | |
492 | { | |
493 | struct xfs_log_vec *lv; | |
494 | ||
495 | for (lv = log_vector; lv; ) { | |
496 | struct xfs_log_vec *next = lv->lv_next; | |
71e330b5 DC |
497 | kmem_free(lv); |
498 | lv = next; | |
499 | } | |
500 | } | |
501 | ||
4560e78f CH |
502 | static void |
503 | xlog_discard_endio_work( | |
504 | struct work_struct *work) | |
505 | { | |
506 | struct xfs_cil_ctx *ctx = | |
507 | container_of(work, struct xfs_cil_ctx, discard_endio_work); | |
508 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; | |
509 | ||
510 | xfs_extent_busy_clear(mp, &ctx->busy_extents, false); | |
511 | kmem_free(ctx); | |
512 | } | |
513 | ||
514 | /* | |
515 | * Queue up the actual completion to a thread to avoid IRQ-safe locking for | |
516 | * pagb_lock. Note that we need a unbounded workqueue, otherwise we might | |
517 | * get the execution delayed up to 30 seconds for weird reasons. | |
518 | */ | |
519 | static void | |
520 | xlog_discard_endio( | |
521 | struct bio *bio) | |
522 | { | |
523 | struct xfs_cil_ctx *ctx = bio->bi_private; | |
524 | ||
525 | INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); | |
526 | queue_work(xfs_discard_wq, &ctx->discard_endio_work); | |
ea7bd56f | 527 | bio_put(bio); |
4560e78f CH |
528 | } |
529 | ||
530 | static void | |
531 | xlog_discard_busy_extents( | |
532 | struct xfs_mount *mp, | |
533 | struct xfs_cil_ctx *ctx) | |
534 | { | |
535 | struct list_head *list = &ctx->busy_extents; | |
536 | struct xfs_extent_busy *busyp; | |
537 | struct bio *bio = NULL; | |
538 | struct blk_plug plug; | |
539 | int error = 0; | |
540 | ||
541 | ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); | |
542 | ||
543 | blk_start_plug(&plug); | |
544 | list_for_each_entry(busyp, list, list) { | |
545 | trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, | |
546 | busyp->length); | |
547 | ||
548 | error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, | |
549 | XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), | |
550 | XFS_FSB_TO_BB(mp, busyp->length), | |
551 | GFP_NOFS, 0, &bio); | |
552 | if (error && error != -EOPNOTSUPP) { | |
553 | xfs_info(mp, | |
554 | "discard failed for extent [0x%llx,%u], error %d", | |
555 | (unsigned long long)busyp->bno, | |
556 | busyp->length, | |
557 | error); | |
558 | break; | |
559 | } | |
560 | } | |
561 | ||
562 | if (bio) { | |
563 | bio->bi_private = ctx; | |
564 | bio->bi_end_io = xlog_discard_endio; | |
565 | submit_bio(bio); | |
566 | } else { | |
567 | xlog_discard_endio_work(&ctx->discard_endio_work); | |
568 | } | |
569 | blk_finish_plug(&plug); | |
570 | } | |
571 | ||
71e330b5 DC |
572 | /* |
573 | * Mark all items committed and clear busy extents. We free the log vector | |
574 | * chains in a separate pass so that we unpin the log items as quickly as | |
575 | * possible. | |
576 | */ | |
577 | static void | |
578 | xlog_cil_committed( | |
579 | void *args, | |
580 | int abort) | |
581 | { | |
582 | struct xfs_cil_ctx *ctx = args; | |
e84661aa | 583 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
71e330b5 | 584 | |
0e57f6a3 DC |
585 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
586 | ctx->start_lsn, abort); | |
71e330b5 | 587 | |
4ecbfe63 DC |
588 | xfs_extent_busy_sort(&ctx->busy_extents); |
589 | xfs_extent_busy_clear(mp, &ctx->busy_extents, | |
e84661aa | 590 | (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); |
71e330b5 | 591 | |
ac983517 DC |
592 | /* |
593 | * If we are aborting the commit, wake up anyone waiting on the | |
594 | * committing list. If we don't, then a shutdown we can leave processes | |
595 | * waiting in xlog_cil_force_lsn() waiting on a sequence commit that | |
596 | * will never happen because we aborted it. | |
597 | */ | |
4bb928cd | 598 | spin_lock(&ctx->cil->xc_push_lock); |
ac983517 DC |
599 | if (abort) |
600 | wake_up_all(&ctx->cil->xc_commit_wait); | |
71e330b5 | 601 | list_del(&ctx->committing); |
4bb928cd | 602 | spin_unlock(&ctx->cil->xc_push_lock); |
71e330b5 DC |
603 | |
604 | xlog_cil_free_logvec(ctx->lv_chain); | |
e84661aa | 605 | |
4560e78f CH |
606 | if (!list_empty(&ctx->busy_extents)) |
607 | xlog_discard_busy_extents(mp, ctx); | |
608 | else | |
609 | kmem_free(ctx); | |
71e330b5 DC |
610 | } |
611 | ||
612 | /* | |
a44f13ed DC |
613 | * Push the Committed Item List to the log. If @push_seq flag is zero, then it |
614 | * is a background flush and so we can chose to ignore it. Otherwise, if the | |
615 | * current sequence is the same as @push_seq we need to do a flush. If | |
616 | * @push_seq is less than the current sequence, then it has already been | |
617 | * flushed and we don't need to do anything - the caller will wait for it to | |
618 | * complete if necessary. | |
619 | * | |
620 | * @push_seq is a value rather than a flag because that allows us to do an | |
621 | * unlocked check of the sequence number for a match. Hence we can allows log | |
622 | * forces to run racily and not issue pushes for the same sequence twice. If we | |
623 | * get a race between multiple pushes for the same sequence they will block on | |
624 | * the first one and then abort, hence avoiding needless pushes. | |
71e330b5 | 625 | */ |
a44f13ed | 626 | STATIC int |
71e330b5 | 627 | xlog_cil_push( |
f7bdf03a | 628 | struct xlog *log) |
71e330b5 DC |
629 | { |
630 | struct xfs_cil *cil = log->l_cilp; | |
631 | struct xfs_log_vec *lv; | |
632 | struct xfs_cil_ctx *ctx; | |
633 | struct xfs_cil_ctx *new_ctx; | |
634 | struct xlog_in_core *commit_iclog; | |
635 | struct xlog_ticket *tic; | |
71e330b5 | 636 | int num_iovecs; |
71e330b5 DC |
637 | int error = 0; |
638 | struct xfs_trans_header thdr; | |
639 | struct xfs_log_iovec lhdr; | |
640 | struct xfs_log_vec lvhdr = { NULL }; | |
641 | xfs_lsn_t commit_lsn; | |
4c2d542f | 642 | xfs_lsn_t push_seq; |
71e330b5 DC |
643 | |
644 | if (!cil) | |
645 | return 0; | |
646 | ||
71e330b5 DC |
647 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
648 | new_ctx->ticket = xlog_cil_ticket_alloc(log); | |
649 | ||
4c2d542f | 650 | down_write(&cil->xc_ctx_lock); |
71e330b5 DC |
651 | ctx = cil->xc_ctx; |
652 | ||
4bb928cd | 653 | spin_lock(&cil->xc_push_lock); |
4c2d542f DC |
654 | push_seq = cil->xc_push_seq; |
655 | ASSERT(push_seq <= ctx->sequence); | |
71e330b5 | 656 | |
4c2d542f DC |
657 | /* |
658 | * Check if we've anything to push. If there is nothing, then we don't | |
659 | * move on to a new sequence number and so we have to be able to push | |
660 | * this sequence again later. | |
661 | */ | |
662 | if (list_empty(&cil->xc_cil)) { | |
663 | cil->xc_push_seq = 0; | |
4bb928cd | 664 | spin_unlock(&cil->xc_push_lock); |
a44f13ed | 665 | goto out_skip; |
4c2d542f | 666 | } |
4c2d542f | 667 | |
a44f13ed DC |
668 | |
669 | /* check for a previously pushed seqeunce */ | |
8af3dcd3 DC |
670 | if (push_seq < cil->xc_ctx->sequence) { |
671 | spin_unlock(&cil->xc_push_lock); | |
df806158 | 672 | goto out_skip; |
8af3dcd3 DC |
673 | } |
674 | ||
675 | /* | |
676 | * We are now going to push this context, so add it to the committing | |
677 | * list before we do anything else. This ensures that anyone waiting on | |
678 | * this push can easily detect the difference between a "push in | |
679 | * progress" and "CIL is empty, nothing to do". | |
680 | * | |
681 | * IOWs, a wait loop can now check for: | |
682 | * the current sequence not being found on the committing list; | |
683 | * an empty CIL; and | |
684 | * an unchanged sequence number | |
685 | * to detect a push that had nothing to do and therefore does not need | |
686 | * waiting on. If the CIL is not empty, we get put on the committing | |
687 | * list before emptying the CIL and bumping the sequence number. Hence | |
688 | * an empty CIL and an unchanged sequence number means we jumped out | |
689 | * above after doing nothing. | |
690 | * | |
691 | * Hence the waiter will either find the commit sequence on the | |
692 | * committing list or the sequence number will be unchanged and the CIL | |
693 | * still dirty. In that latter case, the push has not yet started, and | |
694 | * so the waiter will have to continue trying to check the CIL | |
695 | * committing list until it is found. In extreme cases of delay, the | |
696 | * sequence may fully commit between the attempts the wait makes to wait | |
697 | * on the commit sequence. | |
698 | */ | |
699 | list_add(&ctx->committing, &cil->xc_committing); | |
700 | spin_unlock(&cil->xc_push_lock); | |
df806158 | 701 | |
71e330b5 DC |
702 | /* |
703 | * pull all the log vectors off the items in the CIL, and | |
704 | * remove the items from the CIL. We don't need the CIL lock | |
705 | * here because it's only needed on the transaction commit | |
706 | * side which is currently locked out by the flush lock. | |
707 | */ | |
708 | lv = NULL; | |
71e330b5 | 709 | num_iovecs = 0; |
71e330b5 DC |
710 | while (!list_empty(&cil->xc_cil)) { |
711 | struct xfs_log_item *item; | |
71e330b5 DC |
712 | |
713 | item = list_first_entry(&cil->xc_cil, | |
714 | struct xfs_log_item, li_cil); | |
715 | list_del_init(&item->li_cil); | |
716 | if (!ctx->lv_chain) | |
717 | ctx->lv_chain = item->li_lv; | |
718 | else | |
719 | lv->lv_next = item->li_lv; | |
720 | lv = item->li_lv; | |
721 | item->li_lv = NULL; | |
71e330b5 | 722 | num_iovecs += lv->lv_niovecs; |
71e330b5 DC |
723 | } |
724 | ||
725 | /* | |
726 | * initialise the new context and attach it to the CIL. Then attach | |
727 | * the current context to the CIL committing lsit so it can be found | |
728 | * during log forces to extract the commit lsn of the sequence that | |
729 | * needs to be forced. | |
730 | */ | |
731 | INIT_LIST_HEAD(&new_ctx->committing); | |
732 | INIT_LIST_HEAD(&new_ctx->busy_extents); | |
733 | new_ctx->sequence = ctx->sequence + 1; | |
734 | new_ctx->cil = cil; | |
735 | cil->xc_ctx = new_ctx; | |
736 | ||
737 | /* | |
738 | * The switch is now done, so we can drop the context lock and move out | |
739 | * of a shared context. We can't just go straight to the commit record, | |
740 | * though - we need to synchronise with previous and future commits so | |
741 | * that the commit records are correctly ordered in the log to ensure | |
742 | * that we process items during log IO completion in the correct order. | |
743 | * | |
744 | * For example, if we get an EFI in one checkpoint and the EFD in the | |
745 | * next (e.g. due to log forces), we do not want the checkpoint with | |
746 | * the EFD to be committed before the checkpoint with the EFI. Hence | |
747 | * we must strictly order the commit records of the checkpoints so | |
748 | * that: a) the checkpoint callbacks are attached to the iclogs in the | |
749 | * correct order; and b) the checkpoints are replayed in correct order | |
750 | * in log recovery. | |
751 | * | |
752 | * Hence we need to add this context to the committing context list so | |
753 | * that higher sequences will wait for us to write out a commit record | |
754 | * before they do. | |
f876e446 DC |
755 | * |
756 | * xfs_log_force_lsn requires us to mirror the new sequence into the cil | |
757 | * structure atomically with the addition of this sequence to the | |
758 | * committing list. This also ensures that we can do unlocked checks | |
759 | * against the current sequence in log forces without risking | |
760 | * deferencing a freed context pointer. | |
71e330b5 | 761 | */ |
4bb928cd | 762 | spin_lock(&cil->xc_push_lock); |
f876e446 | 763 | cil->xc_current_sequence = new_ctx->sequence; |
4bb928cd | 764 | spin_unlock(&cil->xc_push_lock); |
71e330b5 DC |
765 | up_write(&cil->xc_ctx_lock); |
766 | ||
767 | /* | |
768 | * Build a checkpoint transaction header and write it to the log to | |
769 | * begin the transaction. We need to account for the space used by the | |
770 | * transaction header here as it is not accounted for in xlog_write(). | |
771 | * | |
772 | * The LSN we need to pass to the log items on transaction commit is | |
773 | * the LSN reported by the first log vector write. If we use the commit | |
774 | * record lsn then we can move the tail beyond the grant write head. | |
775 | */ | |
776 | tic = ctx->ticket; | |
777 | thdr.th_magic = XFS_TRANS_HEADER_MAGIC; | |
778 | thdr.th_type = XFS_TRANS_CHECKPOINT; | |
779 | thdr.th_tid = tic->t_tid; | |
780 | thdr.th_num_items = num_iovecs; | |
4e0d5f92 | 781 | lhdr.i_addr = &thdr; |
71e330b5 DC |
782 | lhdr.i_len = sizeof(xfs_trans_header_t); |
783 | lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; | |
784 | tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); | |
785 | ||
786 | lvhdr.lv_niovecs = 1; | |
787 | lvhdr.lv_iovecp = &lhdr; | |
788 | lvhdr.lv_next = ctx->lv_chain; | |
789 | ||
790 | error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); | |
791 | if (error) | |
7db37c5e | 792 | goto out_abort_free_ticket; |
71e330b5 DC |
793 | |
794 | /* | |
795 | * now that we've written the checkpoint into the log, strictly | |
796 | * order the commit records so replay will get them in the right order. | |
797 | */ | |
798 | restart: | |
4bb928cd | 799 | spin_lock(&cil->xc_push_lock); |
71e330b5 | 800 | list_for_each_entry(new_ctx, &cil->xc_committing, committing) { |
ac983517 DC |
801 | /* |
802 | * Avoid getting stuck in this loop because we were woken by the | |
803 | * shutdown, but then went back to sleep once already in the | |
804 | * shutdown state. | |
805 | */ | |
806 | if (XLOG_FORCED_SHUTDOWN(log)) { | |
807 | spin_unlock(&cil->xc_push_lock); | |
808 | goto out_abort_free_ticket; | |
809 | } | |
810 | ||
71e330b5 DC |
811 | /* |
812 | * Higher sequences will wait for this one so skip them. | |
ac983517 | 813 | * Don't wait for our own sequence, either. |
71e330b5 DC |
814 | */ |
815 | if (new_ctx->sequence >= ctx->sequence) | |
816 | continue; | |
817 | if (!new_ctx->commit_lsn) { | |
818 | /* | |
819 | * It is still being pushed! Wait for the push to | |
820 | * complete, then start again from the beginning. | |
821 | */ | |
4bb928cd | 822 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
71e330b5 DC |
823 | goto restart; |
824 | } | |
825 | } | |
4bb928cd | 826 | spin_unlock(&cil->xc_push_lock); |
71e330b5 | 827 | |
7db37c5e | 828 | /* xfs_log_done always frees the ticket on error. */ |
f78c3901 | 829 | commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); |
7db37c5e | 830 | if (commit_lsn == -1) |
71e330b5 DC |
831 | goto out_abort; |
832 | ||
833 | /* attach all the transactions w/ busy extents to iclog */ | |
834 | ctx->log_cb.cb_func = xlog_cil_committed; | |
835 | ctx->log_cb.cb_arg = ctx; | |
a1f69417 | 836 | error = xfs_log_notify(commit_iclog, &ctx->log_cb); |
71e330b5 DC |
837 | if (error) |
838 | goto out_abort; | |
839 | ||
840 | /* | |
841 | * now the checkpoint commit is complete and we've attached the | |
842 | * callbacks to the iclog we can assign the commit LSN to the context | |
843 | * and wake up anyone who is waiting for the commit to complete. | |
844 | */ | |
4bb928cd | 845 | spin_lock(&cil->xc_push_lock); |
71e330b5 | 846 | ctx->commit_lsn = commit_lsn; |
eb40a875 | 847 | wake_up_all(&cil->xc_commit_wait); |
4bb928cd | 848 | spin_unlock(&cil->xc_push_lock); |
71e330b5 DC |
849 | |
850 | /* release the hounds! */ | |
851 | return xfs_log_release_iclog(log->l_mp, commit_iclog); | |
852 | ||
853 | out_skip: | |
854 | up_write(&cil->xc_ctx_lock); | |
855 | xfs_log_ticket_put(new_ctx->ticket); | |
856 | kmem_free(new_ctx); | |
857 | return 0; | |
858 | ||
7db37c5e DC |
859 | out_abort_free_ticket: |
860 | xfs_log_ticket_put(tic); | |
71e330b5 DC |
861 | out_abort: |
862 | xlog_cil_committed(ctx, XFS_LI_ABORTED); | |
2451337d | 863 | return -EIO; |
71e330b5 DC |
864 | } |
865 | ||
4c2d542f DC |
866 | static void |
867 | xlog_cil_push_work( | |
868 | struct work_struct *work) | |
869 | { | |
870 | struct xfs_cil *cil = container_of(work, struct xfs_cil, | |
871 | xc_push_work); | |
872 | xlog_cil_push(cil->xc_log); | |
873 | } | |
874 | ||
875 | /* | |
876 | * We need to push CIL every so often so we don't cache more than we can fit in | |
877 | * the log. The limit really is that a checkpoint can't be more than half the | |
878 | * log (the current checkpoint is not allowed to overwrite the previous | |
879 | * checkpoint), but commit latency and memory usage limit this to a smaller | |
880 | * size. | |
881 | */ | |
882 | static void | |
883 | xlog_cil_push_background( | |
f7bdf03a | 884 | struct xlog *log) |
4c2d542f DC |
885 | { |
886 | struct xfs_cil *cil = log->l_cilp; | |
887 | ||
888 | /* | |
889 | * The cil won't be empty because we are called while holding the | |
890 | * context lock so whatever we added to the CIL will still be there | |
891 | */ | |
892 | ASSERT(!list_empty(&cil->xc_cil)); | |
893 | ||
894 | /* | |
895 | * don't do a background push if we haven't used up all the | |
896 | * space available yet. | |
897 | */ | |
898 | if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) | |
899 | return; | |
900 | ||
4bb928cd | 901 | spin_lock(&cil->xc_push_lock); |
4c2d542f DC |
902 | if (cil->xc_push_seq < cil->xc_current_sequence) { |
903 | cil->xc_push_seq = cil->xc_current_sequence; | |
904 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); | |
905 | } | |
4bb928cd | 906 | spin_unlock(&cil->xc_push_lock); |
4c2d542f DC |
907 | |
908 | } | |
909 | ||
f876e446 DC |
910 | /* |
911 | * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence | |
912 | * number that is passed. When it returns, the work will be queued for | |
913 | * @push_seq, but it won't be completed. The caller is expected to do any | |
914 | * waiting for push_seq to complete if it is required. | |
915 | */ | |
4c2d542f | 916 | static void |
f876e446 | 917 | xlog_cil_push_now( |
f7bdf03a | 918 | struct xlog *log, |
4c2d542f DC |
919 | xfs_lsn_t push_seq) |
920 | { | |
921 | struct xfs_cil *cil = log->l_cilp; | |
922 | ||
923 | if (!cil) | |
924 | return; | |
925 | ||
926 | ASSERT(push_seq && push_seq <= cil->xc_current_sequence); | |
927 | ||
928 | /* start on any pending background push to minimise wait time on it */ | |
929 | flush_work(&cil->xc_push_work); | |
930 | ||
931 | /* | |
932 | * If the CIL is empty or we've already pushed the sequence then | |
933 | * there's no work we need to do. | |
934 | */ | |
4bb928cd | 935 | spin_lock(&cil->xc_push_lock); |
4c2d542f | 936 | if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { |
4bb928cd | 937 | spin_unlock(&cil->xc_push_lock); |
4c2d542f DC |
938 | return; |
939 | } | |
940 | ||
941 | cil->xc_push_seq = push_seq; | |
f876e446 | 942 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); |
4bb928cd | 943 | spin_unlock(&cil->xc_push_lock); |
4c2d542f DC |
944 | } |
945 | ||
2c6e24ce DC |
946 | bool |
947 | xlog_cil_empty( | |
948 | struct xlog *log) | |
949 | { | |
950 | struct xfs_cil *cil = log->l_cilp; | |
951 | bool empty = false; | |
952 | ||
953 | spin_lock(&cil->xc_push_lock); | |
954 | if (list_empty(&cil->xc_cil)) | |
955 | empty = true; | |
956 | spin_unlock(&cil->xc_push_lock); | |
957 | return empty; | |
958 | } | |
959 | ||
a44f13ed DC |
960 | /* |
961 | * Commit a transaction with the given vector to the Committed Item List. | |
962 | * | |
963 | * To do this, we need to format the item, pin it in memory if required and | |
964 | * account for the space used by the transaction. Once we have done that we | |
965 | * need to release the unused reservation for the transaction, attach the | |
966 | * transaction to the checkpoint context so we carry the busy extents through | |
967 | * to checkpoint completion, and then unlock all the items in the transaction. | |
968 | * | |
a44f13ed DC |
969 | * Called with the context lock already held in read mode to lock out |
970 | * background commit, returns without it held once background commits are | |
971 | * allowed again. | |
972 | */ | |
c6f97264 | 973 | void |
a44f13ed DC |
974 | xfs_log_commit_cil( |
975 | struct xfs_mount *mp, | |
976 | struct xfs_trans *tp, | |
a44f13ed | 977 | xfs_lsn_t *commit_lsn, |
70393313 | 978 | bool regrant) |
a44f13ed | 979 | { |
f7bdf03a | 980 | struct xlog *log = mp->m_log; |
991aaf65 | 981 | struct xfs_cil *cil = log->l_cilp; |
f990fc5a | 982 | xfs_lsn_t xc_commit_lsn; |
a44f13ed | 983 | |
b1c5ebb2 DC |
984 | /* |
985 | * Do all necessary memory allocation before we lock the CIL. | |
986 | * This ensures the allocation does not deadlock with a CIL | |
987 | * push in memory reclaim (e.g. from kswapd). | |
988 | */ | |
989 | xlog_cil_alloc_shadow_bufs(log, tp); | |
990 | ||
f5baac35 | 991 | /* lock out background commit */ |
991aaf65 | 992 | down_read(&cil->xc_ctx_lock); |
f5baac35 | 993 | |
991aaf65 | 994 | xlog_cil_insert_items(log, tp); |
a44f13ed | 995 | |
f990fc5a | 996 | xc_commit_lsn = cil->xc_ctx->sequence; |
991aaf65 | 997 | if (commit_lsn) |
f990fc5a | 998 | *commit_lsn = xc_commit_lsn; |
a44f13ed | 999 | |
f78c3901 | 1000 | xfs_log_done(mp, tp->t_ticket, NULL, regrant); |
ba18781b | 1001 | tp->t_ticket = NULL; |
a44f13ed DC |
1002 | xfs_trans_unreserve_and_mod_sb(tp); |
1003 | ||
1004 | /* | |
1005 | * Once all the items of the transaction have been copied to the CIL, | |
1006 | * the items can be unlocked and freed. | |
1007 | * | |
1008 | * This needs to be done before we drop the CIL context lock because we | |
1009 | * have to update state in the log items and unlock them before they go | |
1010 | * to disk. If we don't, then the CIL checkpoint can race with us and | |
1011 | * we can run checkpoint completion before we've updated and unlocked | |
1012 | * the log items. This affects (at least) processing of stale buffers, | |
1013 | * inodes and EFIs. | |
1014 | */ | |
f990fc5a | 1015 | xfs_trans_free_items(tp, xc_commit_lsn, false); |
a44f13ed | 1016 | |
4c2d542f | 1017 | xlog_cil_push_background(log); |
a44f13ed | 1018 | |
991aaf65 | 1019 | up_read(&cil->xc_ctx_lock); |
a44f13ed DC |
1020 | } |
1021 | ||
71e330b5 DC |
1022 | /* |
1023 | * Conditionally push the CIL based on the sequence passed in. | |
1024 | * | |
1025 | * We only need to push if we haven't already pushed the sequence | |
1026 | * number given. Hence the only time we will trigger a push here is | |
1027 | * if the push sequence is the same as the current context. | |
1028 | * | |
1029 | * We return the current commit lsn to allow the callers to determine if a | |
1030 | * iclog flush is necessary following this call. | |
71e330b5 DC |
1031 | */ |
1032 | xfs_lsn_t | |
a44f13ed | 1033 | xlog_cil_force_lsn( |
f7bdf03a | 1034 | struct xlog *log, |
a44f13ed | 1035 | xfs_lsn_t sequence) |
71e330b5 DC |
1036 | { |
1037 | struct xfs_cil *cil = log->l_cilp; | |
1038 | struct xfs_cil_ctx *ctx; | |
1039 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; | |
1040 | ||
a44f13ed DC |
1041 | ASSERT(sequence <= cil->xc_current_sequence); |
1042 | ||
1043 | /* | |
1044 | * check to see if we need to force out the current context. | |
1045 | * xlog_cil_push() handles racing pushes for the same sequence, | |
1046 | * so no need to deal with it here. | |
1047 | */ | |
f876e446 DC |
1048 | restart: |
1049 | xlog_cil_push_now(log, sequence); | |
71e330b5 DC |
1050 | |
1051 | /* | |
1052 | * See if we can find a previous sequence still committing. | |
71e330b5 DC |
1053 | * We need to wait for all previous sequence commits to complete |
1054 | * before allowing the force of push_seq to go ahead. Hence block | |
1055 | * on commits for those as well. | |
1056 | */ | |
4bb928cd | 1057 | spin_lock(&cil->xc_push_lock); |
71e330b5 | 1058 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
ac983517 DC |
1059 | /* |
1060 | * Avoid getting stuck in this loop because we were woken by the | |
1061 | * shutdown, but then went back to sleep once already in the | |
1062 | * shutdown state. | |
1063 | */ | |
1064 | if (XLOG_FORCED_SHUTDOWN(log)) | |
1065 | goto out_shutdown; | |
a44f13ed | 1066 | if (ctx->sequence > sequence) |
71e330b5 DC |
1067 | continue; |
1068 | if (!ctx->commit_lsn) { | |
1069 | /* | |
1070 | * It is still being pushed! Wait for the push to | |
1071 | * complete, then start again from the beginning. | |
1072 | */ | |
4bb928cd | 1073 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
71e330b5 DC |
1074 | goto restart; |
1075 | } | |
a44f13ed | 1076 | if (ctx->sequence != sequence) |
71e330b5 DC |
1077 | continue; |
1078 | /* found it! */ | |
1079 | commit_lsn = ctx->commit_lsn; | |
1080 | } | |
f876e446 DC |
1081 | |
1082 | /* | |
1083 | * The call to xlog_cil_push_now() executes the push in the background. | |
1084 | * Hence by the time we have got here it our sequence may not have been | |
1085 | * pushed yet. This is true if the current sequence still matches the | |
1086 | * push sequence after the above wait loop and the CIL still contains | |
8af3dcd3 DC |
1087 | * dirty objects. This is guaranteed by the push code first adding the |
1088 | * context to the committing list before emptying the CIL. | |
f876e446 | 1089 | * |
8af3dcd3 DC |
1090 | * Hence if we don't find the context in the committing list and the |
1091 | * current sequence number is unchanged then the CIL contents are | |
1092 | * significant. If the CIL is empty, if means there was nothing to push | |
1093 | * and that means there is nothing to wait for. If the CIL is not empty, | |
1094 | * it means we haven't yet started the push, because if it had started | |
1095 | * we would have found the context on the committing list. | |
f876e446 | 1096 | */ |
f876e446 DC |
1097 | if (sequence == cil->xc_current_sequence && |
1098 | !list_empty(&cil->xc_cil)) { | |
1099 | spin_unlock(&cil->xc_push_lock); | |
1100 | goto restart; | |
1101 | } | |
1102 | ||
4bb928cd | 1103 | spin_unlock(&cil->xc_push_lock); |
71e330b5 | 1104 | return commit_lsn; |
ac983517 DC |
1105 | |
1106 | /* | |
1107 | * We detected a shutdown in progress. We need to trigger the log force | |
1108 | * to pass through it's iclog state machine error handling, even though | |
1109 | * we are already in a shutdown state. Hence we can't return | |
1110 | * NULLCOMMITLSN here as that has special meaning to log forces (i.e. | |
1111 | * LSN is already stable), so we return a zero LSN instead. | |
1112 | */ | |
1113 | out_shutdown: | |
1114 | spin_unlock(&cil->xc_push_lock); | |
1115 | return 0; | |
71e330b5 | 1116 | } |
ccf7c23f DC |
1117 | |
1118 | /* | |
1119 | * Check if the current log item was first committed in this sequence. | |
1120 | * We can't rely on just the log item being in the CIL, we have to check | |
1121 | * the recorded commit sequence number. | |
1122 | * | |
1123 | * Note: for this to be used in a non-racy manner, it has to be called with | |
1124 | * CIL flushing locked out. As a result, it should only be used during the | |
1125 | * transaction commit process when deciding what to format into the item. | |
1126 | */ | |
1127 | bool | |
1128 | xfs_log_item_in_current_chkpt( | |
1129 | struct xfs_log_item *lip) | |
1130 | { | |
1131 | struct xfs_cil_ctx *ctx; | |
1132 | ||
ccf7c23f DC |
1133 | if (list_empty(&lip->li_cil)) |
1134 | return false; | |
1135 | ||
1136 | ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; | |
1137 | ||
1138 | /* | |
1139 | * li_seq is written on the first commit of a log item to record the | |
1140 | * first checkpoint it is written to. Hence if it is different to the | |
1141 | * current sequence, we're in a new checkpoint. | |
1142 | */ | |
1143 | if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) | |
1144 | return false; | |
1145 | return true; | |
1146 | } | |
4c2d542f DC |
1147 | |
1148 | /* | |
1149 | * Perform initial CIL structure initialisation. | |
1150 | */ | |
1151 | int | |
1152 | xlog_cil_init( | |
f7bdf03a | 1153 | struct xlog *log) |
4c2d542f DC |
1154 | { |
1155 | struct xfs_cil *cil; | |
1156 | struct xfs_cil_ctx *ctx; | |
1157 | ||
1158 | cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); | |
1159 | if (!cil) | |
2451337d | 1160 | return -ENOMEM; |
4c2d542f DC |
1161 | |
1162 | ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); | |
1163 | if (!ctx) { | |
1164 | kmem_free(cil); | |
2451337d | 1165 | return -ENOMEM; |
4c2d542f DC |
1166 | } |
1167 | ||
1168 | INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); | |
1169 | INIT_LIST_HEAD(&cil->xc_cil); | |
1170 | INIT_LIST_HEAD(&cil->xc_committing); | |
1171 | spin_lock_init(&cil->xc_cil_lock); | |
4bb928cd | 1172 | spin_lock_init(&cil->xc_push_lock); |
4c2d542f DC |
1173 | init_rwsem(&cil->xc_ctx_lock); |
1174 | init_waitqueue_head(&cil->xc_commit_wait); | |
1175 | ||
1176 | INIT_LIST_HEAD(&ctx->committing); | |
1177 | INIT_LIST_HEAD(&ctx->busy_extents); | |
1178 | ctx->sequence = 1; | |
1179 | ctx->cil = cil; | |
1180 | cil->xc_ctx = ctx; | |
1181 | cil->xc_current_sequence = ctx->sequence; | |
1182 | ||
1183 | cil->xc_log = log; | |
1184 | log->l_cilp = cil; | |
1185 | return 0; | |
1186 | } | |
1187 | ||
1188 | void | |
1189 | xlog_cil_destroy( | |
f7bdf03a | 1190 | struct xlog *log) |
4c2d542f DC |
1191 | { |
1192 | if (log->l_cilp->xc_ctx) { | |
1193 | if (log->l_cilp->xc_ctx->ticket) | |
1194 | xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); | |
1195 | kmem_free(log->l_cilp->xc_ctx); | |
1196 | } | |
1197 | ||
1198 | ASSERT(list_empty(&log->l_cilp->xc_cil)); | |
1199 | kmem_free(log->l_cilp); | |
1200 | } | |
1201 |