]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
da6dd40d | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
71b86f56 | 16 | #include <linux/crc32.h> |
a25311c8 | 17 | #include <linux/delay.h> |
ec69b188 SW |
18 | #include <linux/kthread.h> |
19 | #include <linux/freezer.h> | |
254db57f | 20 | #include <linux/bio.h> |
885bceca | 21 | #include <linux/blkdev.h> |
4667a0ec | 22 | #include <linux/writeback.h> |
4a36d08d | 23 | #include <linux/list_sort.h> |
b3b94faa DT |
24 | |
25 | #include "gfs2.h" | |
5c676f6d | 26 | #include "incore.h" |
b3b94faa DT |
27 | #include "bmap.h" |
28 | #include "glock.h" | |
29 | #include "log.h" | |
30 | #include "lops.h" | |
31 | #include "meta_io.h" | |
5c676f6d | 32 | #include "util.h" |
71b86f56 | 33 | #include "dir.h" |
63997775 | 34 | #include "trace_gfs2.h" |
b3b94faa | 35 | |
b3b94faa DT |
36 | /** |
37 | * gfs2_struct2blk - compute stuff | |
38 | * @sdp: the filesystem | |
39 | * @nstruct: the number of structures | |
40 | * @ssize: the size of the structures | |
41 | * | |
42 | * Compute the number of log descriptor blocks needed to hold a certain number | |
43 | * of structures of a certain size. | |
44 | * | |
45 | * Returns: the number of blocks needed (minimum is always 1) | |
46 | */ | |
47 | ||
48 | unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |
49 | unsigned int ssize) | |
50 | { | |
51 | unsigned int blks; | |
52 | unsigned int first, second; | |
53 | ||
54 | blks = 1; | |
faa31ce8 | 55 | first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; |
b3b94faa DT |
56 | |
57 | if (nstruct > first) { | |
568f4c96 SW |
58 | second = (sdp->sd_sb.sb_bsize - |
59 | sizeof(struct gfs2_meta_header)) / ssize; | |
5c676f6d | 60 | blks += DIV_ROUND_UP(nstruct - first, second); |
b3b94faa DT |
61 | } |
62 | ||
63 | return blks; | |
64 | } | |
65 | ||
1e1a3d03 SW |
66 | /** |
67 | * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters | |
68 | * @mapping: The associated mapping (maybe NULL) | |
69 | * @bd: The gfs2_bufdata to remove | |
70 | * | |
c618e87a | 71 | * The ail lock _must_ be held when calling this function |
1e1a3d03 SW |
72 | * |
73 | */ | |
74 | ||
f91a0d3e | 75 | void gfs2_remove_from_ail(struct gfs2_bufdata *bd) |
1e1a3d03 | 76 | { |
16ca9412 | 77 | bd->bd_tr = NULL; |
1ad38c43 SW |
78 | list_del_init(&bd->bd_ail_st_list); |
79 | list_del_init(&bd->bd_ail_gl_list); | |
1e1a3d03 | 80 | atomic_dec(&bd->bd_gl->gl_ail_count); |
1e1a3d03 SW |
81 | brelse(bd->bd_bh); |
82 | } | |
83 | ||
ddacfaf7 SW |
84 | /** |
85 | * gfs2_ail1_start_one - Start I/O on a part of the AIL | |
86 | * @sdp: the filesystem | |
4667a0ec SW |
87 | * @wbc: The writeback control structure |
88 | * @ai: The ail structure | |
ddacfaf7 SW |
89 | * |
90 | */ | |
91 | ||
4f1de018 SW |
92 | static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, |
93 | struct writeback_control *wbc, | |
16ca9412 | 94 | struct gfs2_trans *tr) |
d6a079e8 DC |
95 | __releases(&sdp->sd_ail_lock) |
96 | __acquires(&sdp->sd_ail_lock) | |
ddacfaf7 | 97 | { |
5ac048bb | 98 | struct gfs2_glock *gl = NULL; |
4667a0ec | 99 | struct address_space *mapping; |
ddacfaf7 SW |
100 | struct gfs2_bufdata *bd, *s; |
101 | struct buffer_head *bh; | |
ddacfaf7 | 102 | |
16ca9412 | 103 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) { |
4667a0ec | 104 | bh = bd->bd_bh; |
ddacfaf7 | 105 | |
16ca9412 | 106 | gfs2_assert(sdp, bd->bd_tr == tr); |
ddacfaf7 | 107 | |
4667a0ec SW |
108 | if (!buffer_busy(bh)) { |
109 | if (!buffer_uptodate(bh)) | |
110 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 111 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
4667a0ec SW |
112 | continue; |
113 | } | |
114 | ||
115 | if (!buffer_dirty(bh)) | |
116 | continue; | |
117 | if (gl == bd->bd_gl) | |
118 | continue; | |
119 | gl = bd->bd_gl; | |
16ca9412 | 120 | list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list); |
4667a0ec | 121 | mapping = bh->b_page->mapping; |
4f1de018 SW |
122 | if (!mapping) |
123 | continue; | |
4667a0ec SW |
124 | spin_unlock(&sdp->sd_ail_lock); |
125 | generic_writepages(mapping, wbc); | |
126 | spin_lock(&sdp->sd_ail_lock); | |
127 | if (wbc->nr_to_write <= 0) | |
128 | break; | |
4f1de018 | 129 | return 1; |
4667a0ec | 130 | } |
4f1de018 SW |
131 | |
132 | return 0; | |
4667a0ec | 133 | } |
ddacfaf7 | 134 | |
ddacfaf7 | 135 | |
4667a0ec SW |
136 | /** |
137 | * gfs2_ail1_flush - start writeback of some ail1 entries | |
138 | * @sdp: The super block | |
139 | * @wbc: The writeback control structure | |
140 | * | |
141 | * Writes back some ail1 entries, according to the limits in the | |
142 | * writeback control structure | |
143 | */ | |
ddacfaf7 | 144 | |
4667a0ec SW |
145 | void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) |
146 | { | |
147 | struct list_head *head = &sdp->sd_ail1_list; | |
16ca9412 | 148 | struct gfs2_trans *tr; |
885bceca | 149 | struct blk_plug plug; |
ddacfaf7 | 150 | |
c83ae9ca | 151 | trace_gfs2_ail_flush(sdp, wbc, 1); |
885bceca | 152 | blk_start_plug(&plug); |
4667a0ec | 153 | spin_lock(&sdp->sd_ail_lock); |
4f1de018 | 154 | restart: |
16ca9412 | 155 | list_for_each_entry_reverse(tr, head, tr_list) { |
4667a0ec | 156 | if (wbc->nr_to_write <= 0) |
ddacfaf7 | 157 | break; |
16ca9412 | 158 | if (gfs2_ail1_start_one(sdp, wbc, tr)) |
4f1de018 | 159 | goto restart; |
4667a0ec SW |
160 | } |
161 | spin_unlock(&sdp->sd_ail_lock); | |
885bceca | 162 | blk_finish_plug(&plug); |
c83ae9ca | 163 | trace_gfs2_ail_flush(sdp, wbc, 0); |
4667a0ec SW |
164 | } |
165 | ||
166 | /** | |
167 | * gfs2_ail1_start - start writeback of all ail1 entries | |
168 | * @sdp: The superblock | |
169 | */ | |
170 | ||
171 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) | |
172 | { | |
173 | struct writeback_control wbc = { | |
174 | .sync_mode = WB_SYNC_NONE, | |
175 | .nr_to_write = LONG_MAX, | |
176 | .range_start = 0, | |
177 | .range_end = LLONG_MAX, | |
178 | }; | |
179 | ||
180 | return gfs2_ail1_flush(sdp, &wbc); | |
ddacfaf7 SW |
181 | } |
182 | ||
183 | /** | |
184 | * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced | |
185 | * @sdp: the filesystem | |
186 | * @ai: the AIL entry | |
187 | * | |
188 | */ | |
189 | ||
16ca9412 | 190 | static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 SW |
191 | { |
192 | struct gfs2_bufdata *bd, *s; | |
193 | struct buffer_head *bh; | |
194 | ||
16ca9412 | 195 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, |
ddacfaf7 SW |
196 | bd_ail_st_list) { |
197 | bh = bd->bd_bh; | |
16ca9412 | 198 | gfs2_assert(sdp, bd->bd_tr == tr); |
4667a0ec SW |
199 | if (buffer_busy(bh)) |
200 | continue; | |
ddacfaf7 SW |
201 | if (!buffer_uptodate(bh)) |
202 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 203 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
ddacfaf7 SW |
204 | } |
205 | ||
ddacfaf7 SW |
206 | } |
207 | ||
4667a0ec SW |
208 | /** |
209 | * gfs2_ail1_empty - Try to empty the ail1 lists | |
210 | * @sdp: The superblock | |
211 | * | |
212 | * Tries to empty the ail1 lists, starting with the oldest first | |
213 | */ | |
b3b94faa | 214 | |
4667a0ec | 215 | static int gfs2_ail1_empty(struct gfs2_sbd *sdp) |
b3b94faa | 216 | { |
16ca9412 | 217 | struct gfs2_trans *tr, *s; |
5d054964 | 218 | int oldest_tr = 1; |
b3b94faa DT |
219 | int ret; |
220 | ||
d6a079e8 | 221 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
222 | list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { |
223 | gfs2_ail1_empty_one(sdp, tr); | |
5d054964 | 224 | if (list_empty(&tr->tr_ail1_list) && oldest_tr) |
16ca9412 | 225 | list_move(&tr->tr_list, &sdp->sd_ail2_list); |
4667a0ec | 226 | else |
5d054964 | 227 | oldest_tr = 0; |
b3b94faa | 228 | } |
b3b94faa | 229 | ret = list_empty(&sdp->sd_ail1_list); |
d6a079e8 | 230 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
26b06a69 SW |
235 | static void gfs2_ail1_wait(struct gfs2_sbd *sdp) |
236 | { | |
16ca9412 | 237 | struct gfs2_trans *tr; |
26b06a69 SW |
238 | struct gfs2_bufdata *bd; |
239 | struct buffer_head *bh; | |
240 | ||
241 | spin_lock(&sdp->sd_ail_lock); | |
16ca9412 BM |
242 | list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { |
243 | list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) { | |
26b06a69 SW |
244 | bh = bd->bd_bh; |
245 | if (!buffer_locked(bh)) | |
246 | continue; | |
247 | get_bh(bh); | |
248 | spin_unlock(&sdp->sd_ail_lock); | |
249 | wait_on_buffer(bh); | |
250 | brelse(bh); | |
251 | return; | |
252 | } | |
253 | } | |
254 | spin_unlock(&sdp->sd_ail_lock); | |
255 | } | |
ddacfaf7 SW |
256 | |
257 | /** | |
258 | * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced | |
259 | * @sdp: the filesystem | |
260 | * @ai: the AIL entry | |
261 | * | |
262 | */ | |
263 | ||
16ca9412 | 264 | static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 | 265 | { |
16ca9412 | 266 | struct list_head *head = &tr->tr_ail2_list; |
ddacfaf7 SW |
267 | struct gfs2_bufdata *bd; |
268 | ||
269 | while (!list_empty(head)) { | |
270 | bd = list_entry(head->prev, struct gfs2_bufdata, | |
271 | bd_ail_st_list); | |
16ca9412 | 272 | gfs2_assert(sdp, bd->bd_tr == tr); |
f91a0d3e | 273 | gfs2_remove_from_ail(bd); |
ddacfaf7 SW |
274 | } |
275 | } | |
276 | ||
b3b94faa DT |
277 | static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) |
278 | { | |
16ca9412 | 279 | struct gfs2_trans *tr, *safe; |
b3b94faa DT |
280 | unsigned int old_tail = sdp->sd_log_tail; |
281 | int wrap = (new_tail < old_tail); | |
282 | int a, b, rm; | |
283 | ||
d6a079e8 | 284 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 285 | |
16ca9412 BM |
286 | list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { |
287 | a = (old_tail <= tr->tr_first); | |
288 | b = (tr->tr_first < new_tail); | |
b3b94faa DT |
289 | rm = (wrap) ? (a || b) : (a && b); |
290 | if (!rm) | |
291 | continue; | |
292 | ||
16ca9412 BM |
293 | gfs2_ail2_empty_one(sdp, tr); |
294 | list_del(&tr->tr_list); | |
295 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); | |
296 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); | |
297 | kfree(tr); | |
b3b94faa DT |
298 | } |
299 | ||
d6a079e8 | 300 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
301 | } |
302 | ||
24972557 BM |
303 | /** |
304 | * gfs2_log_release - Release a given number of log blocks | |
305 | * @sdp: The GFS2 superblock | |
306 | * @blks: The number of blocks | |
307 | * | |
308 | */ | |
309 | ||
310 | void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | |
311 | { | |
312 | ||
313 | atomic_add(blks, &sdp->sd_log_blks_free); | |
314 | trace_gfs2_log_blocks(sdp, blks); | |
315 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= | |
316 | sdp->sd_jdesc->jd_blocks); | |
317 | up_read(&sdp->sd_log_flush_lock); | |
318 | } | |
319 | ||
b3b94faa DT |
320 | /** |
321 | * gfs2_log_reserve - Make a log reservation | |
322 | * @sdp: The GFS2 superblock | |
323 | * @blks: The number of blocks to reserve | |
324 | * | |
89918647 | 325 | * Note that we never give out the last few blocks of the journal. Thats |
2332c443 | 326 | * due to the fact that there is a small number of header blocks |
b004157a SW |
327 | * associated with each log flush. The exact number can't be known until |
328 | * flush time, so we ensure that we have just enough free blocks at all | |
329 | * times to avoid running out during a log flush. | |
330 | * | |
5e687eac BM |
331 | * We no longer flush the log here, instead we wake up logd to do that |
332 | * for us. To avoid the thundering herd and to ensure that we deal fairly | |
333 | * with queued waiters, we use an exclusive wait. This means that when we | |
334 | * get woken with enough journal space to get our reservation, we need to | |
335 | * wake the next waiter on the list. | |
336 | * | |
b3b94faa DT |
337 | * Returns: errno |
338 | */ | |
339 | ||
340 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |
341 | { | |
2e60d768 | 342 | int ret = 0; |
5d054964 | 343 | unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); |
5e687eac BM |
344 | unsigned wanted = blks + reserved_blks; |
345 | DEFINE_WAIT(wait); | |
346 | int did_wait = 0; | |
347 | unsigned int free_blocks; | |
b3b94faa DT |
348 | |
349 | if (gfs2_assert_warn(sdp, blks) || | |
350 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | |
351 | return -EINVAL; | |
5e687eac BM |
352 | retry: |
353 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
354 | if (unlikely(free_blocks <= wanted)) { | |
355 | do { | |
356 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, | |
357 | TASK_UNINTERRUPTIBLE); | |
358 | wake_up(&sdp->sd_logd_waitq); | |
359 | did_wait = 1; | |
360 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) | |
361 | io_schedule(); | |
362 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
363 | } while(free_blocks <= wanted); | |
364 | finish_wait(&sdp->sd_log_waitq, &wait); | |
b3b94faa | 365 | } |
2e60d768 | 366 | atomic_inc(&sdp->sd_reserving_log); |
5e687eac | 367 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
2e60d768 BM |
368 | free_blocks - blks) != free_blocks) { |
369 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) | |
370 | wake_up(&sdp->sd_reserving_log_wait); | |
5e687eac | 371 | goto retry; |
2e60d768 | 372 | } |
63997775 | 373 | trace_gfs2_log_blocks(sdp, -blks); |
5e687eac BM |
374 | |
375 | /* | |
376 | * If we waited, then so might others, wake them up _after_ we get | |
377 | * our share of the log. | |
378 | */ | |
379 | if (unlikely(did_wait)) | |
380 | wake_up(&sdp->sd_log_waitq); | |
484adff8 SW |
381 | |
382 | down_read(&sdp->sd_log_flush_lock); | |
24972557 BM |
383 | if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { |
384 | gfs2_log_release(sdp, blks); | |
2e60d768 | 385 | ret = -EROFS; |
24972557 | 386 | } |
2e60d768 BM |
387 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) |
388 | wake_up(&sdp->sd_reserving_log_wait); | |
389 | return ret; | |
b3b94faa DT |
390 | } |
391 | ||
b3b94faa DT |
392 | /** |
393 | * log_distance - Compute distance between two journal blocks | |
394 | * @sdp: The GFS2 superblock | |
395 | * @newer: The most recent journal block of the pair | |
396 | * @older: The older journal block of the pair | |
397 | * | |
398 | * Compute the distance (in the journal direction) between two | |
399 | * blocks in the journal | |
400 | * | |
401 | * Returns: the distance in blocks | |
402 | */ | |
403 | ||
faa31ce8 | 404 | static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, |
b3b94faa DT |
405 | unsigned int older) |
406 | { | |
407 | int dist; | |
408 | ||
409 | dist = newer - older; | |
410 | if (dist < 0) | |
411 | dist += sdp->sd_jdesc->jd_blocks; | |
412 | ||
413 | return dist; | |
414 | } | |
415 | ||
2332c443 RP |
416 | /** |
417 | * calc_reserved - Calculate the number of blocks to reserve when | |
418 | * refunding a transaction's unused buffers. | |
419 | * @sdp: The GFS2 superblock | |
420 | * | |
421 | * This is complex. We need to reserve room for all our currently used | |
422 | * metadata buffers (e.g. normal file I/O rewriting file time stamps) and | |
423 | * all our journaled data buffers for journaled files (e.g. files in the | |
424 | * meta_fs like rindex, or files for which chattr +j was done.) | |
425 | * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush | |
426 | * will count it as free space (sd_log_blks_free) and corruption will follow. | |
427 | * | |
428 | * We can have metadata bufs and jdata bufs in the same journal. So each | |
429 | * type gets its own log header, for which we need to reserve a block. | |
430 | * In fact, each type has the potential for needing more than one header | |
431 | * in cases where we have more buffers than will fit on a journal page. | |
432 | * Metadata journal entries take up half the space of journaled buffer entries. | |
433 | * Thus, metadata entries have buf_limit (502) and journaled buffers have | |
434 | * databuf_limit (251) before they cause a wrap around. | |
435 | * | |
436 | * Also, we need to reserve blocks for revoke journal entries and one for an | |
437 | * overall header for the lot. | |
438 | * | |
439 | * Returns: the number of blocks reserved | |
440 | */ | |
441 | static unsigned int calc_reserved(struct gfs2_sbd *sdp) | |
442 | { | |
443 | unsigned int reserved = 0; | |
022ef4fe SW |
444 | unsigned int mbuf; |
445 | unsigned int dbuf; | |
446 | struct gfs2_trans *tr = sdp->sd_log_tr; | |
2332c443 | 447 | |
022ef4fe SW |
448 | if (tr) { |
449 | mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; | |
450 | dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; | |
451 | reserved = mbuf + dbuf; | |
452 | /* Account for header blocks */ | |
453 | reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); | |
454 | reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); | |
455 | } | |
2332c443 | 456 | |
2e95e3f6 | 457 | if (sdp->sd_log_commited_revoke > 0) |
022ef4fe | 458 | reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, |
2332c443 | 459 | sizeof(u64)); |
2332c443 RP |
460 | /* One for the overall header */ |
461 | if (reserved) | |
462 | reserved++; | |
463 | return reserved; | |
464 | } | |
465 | ||
b3b94faa DT |
466 | static unsigned int current_tail(struct gfs2_sbd *sdp) |
467 | { | |
16ca9412 | 468 | struct gfs2_trans *tr; |
b3b94faa DT |
469 | unsigned int tail; |
470 | ||
d6a079e8 | 471 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 472 | |
faa31ce8 | 473 | if (list_empty(&sdp->sd_ail1_list)) { |
b3b94faa | 474 | tail = sdp->sd_log_head; |
faa31ce8 | 475 | } else { |
16ca9412 BM |
476 | tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans, |
477 | tr_list); | |
478 | tail = tr->tr_first; | |
b3b94faa DT |
479 | } |
480 | ||
d6a079e8 | 481 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
482 | |
483 | return tail; | |
484 | } | |
485 | ||
2332c443 | 486 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
b3b94faa DT |
487 | { |
488 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | |
489 | ||
490 | ail2_empty(sdp, new_tail); | |
491 | ||
fd041f0b | 492 | atomic_add(dist, &sdp->sd_log_blks_free); |
63997775 | 493 | trace_gfs2_log_blocks(sdp, dist); |
5e687eac BM |
494 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
495 | sdp->sd_jdesc->jd_blocks); | |
b3b94faa DT |
496 | |
497 | sdp->sd_log_tail = new_tail; | |
498 | } | |
499 | ||
b3b94faa | 500 | |
34cc1781 | 501 | static void log_flush_wait(struct gfs2_sbd *sdp) |
b3b94faa | 502 | { |
16615be1 SW |
503 | DEFINE_WAIT(wait); |
504 | ||
505 | if (atomic_read(&sdp->sd_log_in_flight)) { | |
506 | do { | |
507 | prepare_to_wait(&sdp->sd_log_flush_wait, &wait, | |
508 | TASK_UNINTERRUPTIBLE); | |
509 | if (atomic_read(&sdp->sd_log_in_flight)) | |
510 | io_schedule(); | |
511 | } while(atomic_read(&sdp->sd_log_in_flight)); | |
512 | finish_wait(&sdp->sd_log_flush_wait, &wait); | |
b3b94faa | 513 | } |
b3b94faa DT |
514 | } |
515 | ||
45138990 | 516 | static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) |
4a36d08d | 517 | { |
45138990 | 518 | struct gfs2_inode *ipa, *ipb; |
4a36d08d | 519 | |
45138990 SW |
520 | ipa = list_entry(a, struct gfs2_inode, i_ordered); |
521 | ipb = list_entry(b, struct gfs2_inode, i_ordered); | |
4a36d08d | 522 | |
45138990 | 523 | if (ipa->i_no_addr < ipb->i_no_addr) |
4a36d08d | 524 | return -1; |
45138990 | 525 | if (ipa->i_no_addr > ipb->i_no_addr) |
4a36d08d BP |
526 | return 1; |
527 | return 0; | |
528 | } | |
529 | ||
d7b616e2 SW |
530 | static void gfs2_ordered_write(struct gfs2_sbd *sdp) |
531 | { | |
45138990 | 532 | struct gfs2_inode *ip; |
d7b616e2 SW |
533 | LIST_HEAD(written); |
534 | ||
45138990 SW |
535 | spin_lock(&sdp->sd_ordered_lock); |
536 | list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp); | |
d7b616e2 | 537 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
538 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
539 | list_move(&ip->i_ordered, &written); | |
540 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 541 | continue; |
45138990 SW |
542 | spin_unlock(&sdp->sd_ordered_lock); |
543 | filemap_fdatawrite(ip->i_inode.i_mapping); | |
544 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
545 | } |
546 | list_splice(&written, &sdp->sd_log_le_ordered); | |
45138990 | 547 | spin_unlock(&sdp->sd_ordered_lock); |
d7b616e2 SW |
548 | } |
549 | ||
550 | static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |
551 | { | |
45138990 | 552 | struct gfs2_inode *ip; |
d7b616e2 | 553 | |
45138990 | 554 | spin_lock(&sdp->sd_ordered_lock); |
d7b616e2 | 555 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
556 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
557 | list_del(&ip->i_ordered); | |
558 | WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); | |
559 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 560 | continue; |
45138990 SW |
561 | spin_unlock(&sdp->sd_ordered_lock); |
562 | filemap_fdatawait(ip->i_inode.i_mapping); | |
563 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 | 564 | } |
45138990 SW |
565 | spin_unlock(&sdp->sd_ordered_lock); |
566 | } | |
567 | ||
568 | void gfs2_ordered_del_inode(struct gfs2_inode *ip) | |
569 | { | |
570 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | |
571 | ||
572 | spin_lock(&sdp->sd_ordered_lock); | |
573 | if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) | |
574 | list_del(&ip->i_ordered); | |
575 | spin_unlock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
576 | } |
577 | ||
5d054964 BM |
578 | void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
579 | { | |
580 | struct buffer_head *bh = bd->bd_bh; | |
581 | struct gfs2_glock *gl = bd->bd_gl; | |
582 | ||
5d054964 BM |
583 | bh->b_private = NULL; |
584 | bd->bd_blkno = bh->b_blocknr; | |
9290a9a7 BP |
585 | gfs2_remove_from_ail(bd); /* drops ref on bh */ |
586 | bd->bd_bh = NULL; | |
5d054964 BM |
587 | bd->bd_ops = &gfs2_revoke_lops; |
588 | sdp->sd_log_num_revoke++; | |
589 | atomic_inc(&gl->gl_revokes); | |
590 | set_bit(GLF_LFLUSH, &gl->gl_flags); | |
591 | list_add(&bd->bd_list, &sdp->sd_log_le_revoke); | |
592 | } | |
593 | ||
594 | void gfs2_write_revokes(struct gfs2_sbd *sdp) | |
595 | { | |
596 | struct gfs2_trans *tr; | |
597 | struct gfs2_bufdata *bd, *tmp; | |
598 | int have_revokes = 0; | |
599 | int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); | |
600 | ||
601 | gfs2_ail1_empty(sdp); | |
602 | spin_lock(&sdp->sd_ail_lock); | |
603 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
604 | list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { | |
605 | if (list_empty(&bd->bd_list)) { | |
606 | have_revokes = 1; | |
607 | goto done; | |
608 | } | |
609 | } | |
610 | } | |
611 | done: | |
612 | spin_unlock(&sdp->sd_ail_lock); | |
613 | if (have_revokes == 0) | |
614 | return; | |
615 | while (sdp->sd_log_num_revoke > max_revokes) | |
616 | max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); | |
617 | max_revokes -= sdp->sd_log_num_revoke; | |
618 | if (!sdp->sd_log_num_revoke) { | |
619 | atomic_dec(&sdp->sd_log_blks_free); | |
620 | /* If no blocks have been reserved, we need to also | |
621 | * reserve a block for the header */ | |
622 | if (!sdp->sd_log_blks_reserved) | |
623 | atomic_dec(&sdp->sd_log_blks_free); | |
624 | } | |
625 | gfs2_log_lock(sdp); | |
626 | spin_lock(&sdp->sd_ail_lock); | |
627 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
628 | list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { | |
629 | if (max_revokes == 0) | |
630 | goto out_of_blocks; | |
631 | if (!list_empty(&bd->bd_list)) | |
632 | continue; | |
633 | gfs2_add_revoke(sdp, bd); | |
634 | max_revokes--; | |
635 | } | |
636 | } | |
637 | out_of_blocks: | |
638 | spin_unlock(&sdp->sd_ail_lock); | |
639 | gfs2_log_unlock(sdp); | |
640 | ||
641 | if (!sdp->sd_log_num_revoke) { | |
642 | atomic_inc(&sdp->sd_log_blks_free); | |
643 | if (!sdp->sd_log_blks_reserved) | |
644 | atomic_inc(&sdp->sd_log_blks_free); | |
645 | } | |
646 | } | |
647 | ||
34cc1781 SW |
648 | /** |
649 | * log_write_header - Get and initialize a journal header buffer | |
650 | * @sdp: The GFS2 superblock | |
651 | * | |
652 | * Returns: the initialized log buffer descriptor | |
653 | */ | |
654 | ||
fdb76a42 | 655 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
34cc1781 | 656 | { |
34cc1781 SW |
657 | struct gfs2_log_header *lh; |
658 | unsigned int tail; | |
659 | u32 hash; | |
e8c92ed7 SW |
660 | int rw = WRITE_FLUSH_FUA | REQ_META; |
661 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | |
2e60d768 | 662 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
e8c92ed7 SW |
663 | lh = page_address(page); |
664 | clear_page(lh); | |
34cc1781 | 665 | |
2e60d768 BM |
666 | gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); |
667 | ||
34cc1781 SW |
668 | tail = current_tail(sdp); |
669 | ||
34cc1781 SW |
670 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
671 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | |
672 | lh->lh_header.__pad0 = cpu_to_be64(0); | |
673 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | |
674 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | |
675 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); | |
676 | lh->lh_flags = cpu_to_be32(flags); | |
677 | lh->lh_tail = cpu_to_be32(tail); | |
678 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | |
e8c92ed7 | 679 | hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header)); |
34cc1781 SW |
680 | lh->lh_hash = cpu_to_be32(hash); |
681 | ||
34cc1781 SW |
682 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
683 | gfs2_ordered_wait(sdp); | |
684 | log_flush_wait(sdp); | |
e8c92ed7 | 685 | rw = WRITE_SYNC | REQ_META | REQ_PRIO; |
34cc1781 | 686 | } |
34cc1781 | 687 | |
e8c92ed7 SW |
688 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
689 | gfs2_log_write_page(sdp, page); | |
690 | gfs2_log_flush_bio(sdp, rw); | |
691 | log_flush_wait(sdp); | |
34cc1781 SW |
692 | |
693 | if (sdp->sd_log_tail != tail) | |
694 | log_pull_tail(sdp, tail); | |
34cc1781 SW |
695 | } |
696 | ||
b3b94faa | 697 | /** |
b09e593d | 698 | * gfs2_log_flush - flush incore transaction(s) |
b3b94faa DT |
699 | * @sdp: the filesystem |
700 | * @gl: The glock structure to flush. If NULL, flush the whole incore log | |
701 | * | |
702 | */ | |
703 | ||
24972557 BM |
704 | void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, |
705 | enum gfs2_flush_type type) | |
b3b94faa | 706 | { |
16ca9412 | 707 | struct gfs2_trans *tr; |
2e60d768 | 708 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
b3b94faa | 709 | |
484adff8 | 710 | down_write(&sdp->sd_log_flush_lock); |
f55ab26a | 711 | |
2bcd610d SW |
712 | /* Log might have been flushed while we waited for the flush lock */ |
713 | if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { | |
714 | up_write(&sdp->sd_log_flush_lock); | |
715 | return; | |
f55ab26a | 716 | } |
63997775 | 717 | trace_gfs2_log_flush(sdp, 1); |
f55ab26a | 718 | |
b1ab1e44 SW |
719 | sdp->sd_log_flush_head = sdp->sd_log_head; |
720 | sdp->sd_log_flush_wrapped = 0; | |
16ca9412 BM |
721 | tr = sdp->sd_log_tr; |
722 | if (tr) { | |
723 | sdp->sd_log_tr = NULL; | |
724 | INIT_LIST_HEAD(&tr->tr_ail1_list); | |
725 | INIT_LIST_HEAD(&tr->tr_ail2_list); | |
b1ab1e44 | 726 | tr->tr_first = sdp->sd_log_flush_head; |
2e60d768 BM |
727 | if (unlikely (state == SFS_FROZEN)) |
728 | gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); | |
16ca9412 | 729 | } |
b3b94faa | 730 | |
2e60d768 BM |
731 | if (unlikely(state == SFS_FROZEN)) |
732 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | |
b3b94faa DT |
733 | gfs2_assert_withdraw(sdp, |
734 | sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); | |
735 | ||
d7b616e2 | 736 | gfs2_ordered_write(sdp); |
d69a3c65 | 737 | lops_before_commit(sdp, tr); |
e8c92ed7 | 738 | gfs2_log_flush_bio(sdp, WRITE); |
d7b616e2 | 739 | |
34cc1781 | 740 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
428fd95d | 741 | log_flush_wait(sdp); |
fdb76a42 | 742 | log_write_header(sdp, 0); |
34cc1781 | 743 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
fd041f0b | 744 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
63997775 | 745 | trace_gfs2_log_blocks(sdp, -1); |
fdb76a42 | 746 | log_write_header(sdp, 0); |
2332c443 | 747 | } |
16ca9412 | 748 | lops_after_commit(sdp, tr); |
b09e593d | 749 | |
fe1a698f SW |
750 | gfs2_log_lock(sdp); |
751 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
faa31ce8 | 752 | sdp->sd_log_blks_reserved = 0; |
faa31ce8 | 753 | sdp->sd_log_commited_revoke = 0; |
b3b94faa | 754 | |
d6a079e8 | 755 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
756 | if (tr && !list_empty(&tr->tr_ail1_list)) { |
757 | list_add(&tr->tr_list, &sdp->sd_ail1_list); | |
758 | tr = NULL; | |
b3b94faa | 759 | } |
d6a079e8 | 760 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa | 761 | gfs2_log_unlock(sdp); |
24972557 | 762 | |
24972557 BM |
763 | if (type != NORMAL_FLUSH) { |
764 | if (!sdp->sd_log_idle) { | |
765 | for (;;) { | |
766 | gfs2_ail1_start(sdp); | |
767 | gfs2_ail1_wait(sdp); | |
768 | if (gfs2_ail1_empty(sdp)) | |
769 | break; | |
770 | } | |
771 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ | |
772 | trace_gfs2_log_blocks(sdp, -1); | |
773 | sdp->sd_log_flush_wrapped = 0; | |
774 | log_write_header(sdp, 0); | |
775 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
776 | } | |
777 | if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH) | |
778 | gfs2_log_shutdown(sdp); | |
2e60d768 BM |
779 | if (type == FREEZE_FLUSH) |
780 | atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); | |
24972557 BM |
781 | } |
782 | ||
63997775 | 783 | trace_gfs2_log_flush(sdp, 0); |
484adff8 | 784 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa | 785 | |
16ca9412 | 786 | kfree(tr); |
b3b94faa DT |
787 | } |
788 | ||
d69a3c65 SW |
789 | /** |
790 | * gfs2_merge_trans - Merge a new transaction into a cached transaction | |
791 | * @old: Original transaction to be expanded | |
792 | * @new: New transaction to be merged | |
793 | */ | |
794 | ||
795 | static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) | |
796 | { | |
797 | WARN_ON_ONCE(old->tr_attached != 1); | |
798 | ||
799 | old->tr_num_buf_new += new->tr_num_buf_new; | |
800 | old->tr_num_databuf_new += new->tr_num_databuf_new; | |
801 | old->tr_num_buf_rm += new->tr_num_buf_rm; | |
802 | old->tr_num_databuf_rm += new->tr_num_databuf_rm; | |
803 | old->tr_num_revoke += new->tr_num_revoke; | |
804 | old->tr_num_revoke_rm += new->tr_num_revoke_rm; | |
805 | ||
806 | list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); | |
807 | list_splice_tail_init(&new->tr_buf, &old->tr_buf); | |
808 | } | |
809 | ||
b3b94faa DT |
810 | static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
811 | { | |
2332c443 | 812 | unsigned int reserved; |
ac39aadd | 813 | unsigned int unused; |
022ef4fe | 814 | unsigned int maxres; |
b3b94faa DT |
815 | |
816 | gfs2_log_lock(sdp); | |
817 | ||
022ef4fe SW |
818 | if (sdp->sd_log_tr) { |
819 | gfs2_merge_trans(sdp->sd_log_tr, tr); | |
820 | } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { | |
24972557 | 821 | gfs2_assert_withdraw(sdp, tr->tr_alloced); |
022ef4fe SW |
822 | sdp->sd_log_tr = tr; |
823 | tr->tr_attached = 1; | |
824 | } | |
825 | ||
b3b94faa | 826 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
2332c443 | 827 | reserved = calc_reserved(sdp); |
022ef4fe SW |
828 | maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; |
829 | gfs2_assert_withdraw(sdp, maxres >= reserved); | |
830 | unused = maxres - reserved; | |
ac39aadd | 831 | atomic_add(unused, &sdp->sd_log_blks_free); |
63997775 | 832 | trace_gfs2_log_blocks(sdp, unused); |
fd041f0b | 833 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
2332c443 | 834 | sdp->sd_jdesc->jd_blocks); |
b3b94faa DT |
835 | sdp->sd_log_blks_reserved = reserved; |
836 | ||
837 | gfs2_log_unlock(sdp); | |
838 | } | |
839 | ||
840 | /** | |
841 | * gfs2_log_commit - Commit a transaction to the log | |
842 | * @sdp: the filesystem | |
843 | * @tr: the transaction | |
844 | * | |
5e687eac BM |
845 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 |
846 | * or the total number of used blocks (pinned blocks plus AIL blocks) | |
847 | * is greater than thresh2. | |
848 | * | |
849 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | |
850 | * journal size. | |
851 | * | |
b3b94faa DT |
852 | * Returns: errno |
853 | */ | |
854 | ||
855 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
856 | { | |
857 | log_refund(sdp, tr); | |
b3b94faa | 858 | |
5e687eac BM |
859 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
860 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > | |
861 | atomic_read(&sdp->sd_log_thresh2))) | |
862 | wake_up(&sdp->sd_logd_waitq); | |
b3b94faa DT |
863 | } |
864 | ||
865 | /** | |
866 | * gfs2_log_shutdown - write a shutdown header into a journal | |
867 | * @sdp: the filesystem | |
868 | * | |
869 | */ | |
870 | ||
871 | void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |
872 | { | |
b3b94faa | 873 | gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); |
b3b94faa | 874 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
b3b94faa DT |
875 | gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); |
876 | ||
877 | sdp->sd_log_flush_head = sdp->sd_log_head; | |
878 | sdp->sd_log_flush_wrapped = 0; | |
879 | ||
fdb76a42 | 880 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT); |
b3b94faa | 881 | |
a74604be SW |
882 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
883 | gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); | |
b3b94faa DT |
884 | |
885 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
b3b94faa | 886 | sdp->sd_log_tail = sdp->sd_log_head; |
a25311c8 SW |
887 | } |
888 | ||
5e687eac BM |
889 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
890 | { | |
2e60d768 | 891 | return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1)); |
5e687eac BM |
892 | } |
893 | ||
894 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | |
895 | { | |
896 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | |
897 | return used_blocks >= atomic_read(&sdp->sd_log_thresh2); | |
898 | } | |
ec69b188 SW |
899 | |
900 | /** | |
901 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | |
902 | * @sdp: Pointer to GFS2 superblock | |
903 | * | |
904 | * Also, periodically check to make sure that we're using the most recent | |
905 | * journal index. | |
906 | */ | |
907 | ||
908 | int gfs2_logd(void *data) | |
909 | { | |
910 | struct gfs2_sbd *sdp = data; | |
5e687eac BM |
911 | unsigned long t = 1; |
912 | DEFINE_WAIT(wait); | |
ec69b188 SW |
913 | |
914 | while (!kthread_should_stop()) { | |
ec69b188 | 915 | |
5e687eac | 916 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
4667a0ec | 917 | gfs2_ail1_empty(sdp); |
24972557 | 918 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
5e687eac | 919 | } |
ec69b188 | 920 | |
5e687eac BM |
921 | if (gfs2_ail_flush_reqd(sdp)) { |
922 | gfs2_ail1_start(sdp); | |
26b06a69 | 923 | gfs2_ail1_wait(sdp); |
4667a0ec | 924 | gfs2_ail1_empty(sdp); |
24972557 | 925 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
ec69b188 SW |
926 | } |
927 | ||
26b06a69 SW |
928 | if (!gfs2_ail_flush_reqd(sdp)) |
929 | wake_up(&sdp->sd_log_waitq); | |
930 | ||
ec69b188 | 931 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
a0acae0e TH |
932 | |
933 | try_to_freeze(); | |
5e687eac BM |
934 | |
935 | do { | |
936 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | |
5f487490 | 937 | TASK_INTERRUPTIBLE); |
5e687eac BM |
938 | if (!gfs2_ail_flush_reqd(sdp) && |
939 | !gfs2_jrnl_flush_reqd(sdp) && | |
940 | !kthread_should_stop()) | |
941 | t = schedule_timeout(t); | |
942 | } while(t && !gfs2_ail_flush_reqd(sdp) && | |
943 | !gfs2_jrnl_flush_reqd(sdp) && | |
944 | !kthread_should_stop()); | |
945 | finish_wait(&sdp->sd_logd_waitq, &wait); | |
ec69b188 SW |
946 | } |
947 | ||
948 | return 0; | |
949 | } | |
950 |