]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
da6dd40d | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
71b86f56 | 16 | #include <linux/crc32.h> |
a25311c8 | 17 | #include <linux/delay.h> |
ec69b188 SW |
18 | #include <linux/kthread.h> |
19 | #include <linux/freezer.h> | |
254db57f | 20 | #include <linux/bio.h> |
b3b94faa DT |
21 | |
22 | #include "gfs2.h" | |
5c676f6d | 23 | #include "incore.h" |
b3b94faa DT |
24 | #include "bmap.h" |
25 | #include "glock.h" | |
26 | #include "log.h" | |
27 | #include "lops.h" | |
28 | #include "meta_io.h" | |
5c676f6d | 29 | #include "util.h" |
71b86f56 | 30 | #include "dir.h" |
63997775 | 31 | #include "trace_gfs2.h" |
b3b94faa DT |
32 | |
33 | #define PULL 1 | |
34 | ||
b3b94faa DT |
35 | /** |
36 | * gfs2_struct2blk - compute stuff | |
37 | * @sdp: the filesystem | |
38 | * @nstruct: the number of structures | |
39 | * @ssize: the size of the structures | |
40 | * | |
41 | * Compute the number of log descriptor blocks needed to hold a certain number | |
42 | * of structures of a certain size. | |
43 | * | |
44 | * Returns: the number of blocks needed (minimum is always 1) | |
45 | */ | |
46 | ||
47 | unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |
48 | unsigned int ssize) | |
49 | { | |
50 | unsigned int blks; | |
51 | unsigned int first, second; | |
52 | ||
53 | blks = 1; | |
faa31ce8 | 54 | first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; |
b3b94faa DT |
55 | |
56 | if (nstruct > first) { | |
568f4c96 SW |
57 | second = (sdp->sd_sb.sb_bsize - |
58 | sizeof(struct gfs2_meta_header)) / ssize; | |
5c676f6d | 59 | blks += DIV_ROUND_UP(nstruct - first, second); |
b3b94faa DT |
60 | } |
61 | ||
62 | return blks; | |
63 | } | |
64 | ||
1e1a3d03 SW |
65 | /** |
66 | * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters | |
67 | * @mapping: The associated mapping (maybe NULL) | |
68 | * @bd: The gfs2_bufdata to remove | |
69 | * | |
70 | * The log lock _must_ be held when calling this function | |
71 | * | |
72 | */ | |
73 | ||
f91a0d3e | 74 | void gfs2_remove_from_ail(struct gfs2_bufdata *bd) |
1e1a3d03 SW |
75 | { |
76 | bd->bd_ail = NULL; | |
1ad38c43 SW |
77 | list_del_init(&bd->bd_ail_st_list); |
78 | list_del_init(&bd->bd_ail_gl_list); | |
1e1a3d03 | 79 | atomic_dec(&bd->bd_gl->gl_ail_count); |
1e1a3d03 SW |
80 | brelse(bd->bd_bh); |
81 | } | |
82 | ||
ddacfaf7 SW |
83 | /** |
84 | * gfs2_ail1_start_one - Start I/O on a part of the AIL | |
85 | * @sdp: the filesystem | |
86 | * @tr: the part of the AIL | |
87 | * | |
88 | */ | |
89 | ||
90 | static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | |
2d81afb8 HH |
91 | __releases(&sdp->sd_log_lock) |
92 | __acquires(&sdp->sd_log_lock) | |
ddacfaf7 SW |
93 | { |
94 | struct gfs2_bufdata *bd, *s; | |
95 | struct buffer_head *bh; | |
96 | int retry; | |
97 | ||
ddacfaf7 SW |
98 | do { |
99 | retry = 0; | |
100 | ||
101 | list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, | |
102 | bd_ail_st_list) { | |
103 | bh = bd->bd_bh; | |
104 | ||
105 | gfs2_assert(sdp, bd->bd_ail == ai); | |
106 | ||
107 | if (!buffer_busy(bh)) { | |
16615be1 | 108 | if (!buffer_uptodate(bh)) |
ddacfaf7 | 109 | gfs2_io_error_bh(sdp, bh); |
ddacfaf7 SW |
110 | list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); |
111 | continue; | |
112 | } | |
113 | ||
114 | if (!buffer_dirty(bh)) | |
115 | continue; | |
116 | ||
117 | list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list); | |
118 | ||
16615be1 | 119 | get_bh(bh); |
ddacfaf7 | 120 | gfs2_log_unlock(sdp); |
16615be1 SW |
121 | lock_buffer(bh); |
122 | if (test_clear_buffer_dirty(bh)) { | |
123 | bh->b_end_io = end_buffer_write_sync; | |
c969f58c | 124 | submit_bh(WRITE_SYNC_PLUG, bh); |
16615be1 SW |
125 | } else { |
126 | unlock_buffer(bh); | |
127 | brelse(bh); | |
128 | } | |
ddacfaf7 SW |
129 | gfs2_log_lock(sdp); |
130 | ||
131 | retry = 1; | |
132 | break; | |
133 | } | |
134 | } while (retry); | |
135 | } | |
136 | ||
137 | /** | |
138 | * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced | |
139 | * @sdp: the filesystem | |
140 | * @ai: the AIL entry | |
141 | * | |
142 | */ | |
143 | ||
144 | static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags) | |
145 | { | |
146 | struct gfs2_bufdata *bd, *s; | |
147 | struct buffer_head *bh; | |
148 | ||
149 | list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, | |
150 | bd_ail_st_list) { | |
151 | bh = bd->bd_bh; | |
152 | ||
153 | gfs2_assert(sdp, bd->bd_ail == ai); | |
154 | ||
155 | if (buffer_busy(bh)) { | |
156 | if (flags & DIO_ALL) | |
157 | continue; | |
158 | else | |
159 | break; | |
160 | } | |
161 | ||
162 | if (!buffer_uptodate(bh)) | |
163 | gfs2_io_error_bh(sdp, bh); | |
164 | ||
165 | list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); | |
166 | } | |
167 | ||
168 | return list_empty(&ai->ai_ail1_list); | |
169 | } | |
170 | ||
5e687eac | 171 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) |
b3b94faa | 172 | { |
693ddeab | 173 | struct list_head *head; |
cd915493 | 174 | u64 sync_gen; |
5e687eac | 175 | struct gfs2_ail *ai; |
74669416 | 176 | int done = 0; |
b3b94faa DT |
177 | |
178 | gfs2_log_lock(sdp); | |
693ddeab | 179 | head = &sdp->sd_ail1_list; |
b3b94faa DT |
180 | if (list_empty(head)) { |
181 | gfs2_log_unlock(sdp); | |
182 | return; | |
183 | } | |
184 | sync_gen = sdp->sd_ail_sync_gen++; | |
185 | ||
74669416 | 186 | while(!done) { |
74669416 | 187 | done = 1; |
5e687eac | 188 | list_for_each_entry_reverse(ai, head, ai_list) { |
b3b94faa DT |
189 | if (ai->ai_sync_gen >= sync_gen) |
190 | continue; | |
191 | ai->ai_sync_gen = sync_gen; | |
74669416 SW |
192 | gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */ |
193 | done = 0; | |
b3b94faa DT |
194 | break; |
195 | } | |
b3b94faa DT |
196 | } |
197 | ||
198 | gfs2_log_unlock(sdp); | |
199 | } | |
200 | ||
ec69b188 | 201 | static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags) |
b3b94faa DT |
202 | { |
203 | struct gfs2_ail *ai, *s; | |
204 | int ret; | |
205 | ||
206 | gfs2_log_lock(sdp); | |
207 | ||
208 | list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) { | |
209 | if (gfs2_ail1_empty_one(sdp, ai, flags)) | |
210 | list_move(&ai->ai_list, &sdp->sd_ail2_list); | |
211 | else if (!(flags & DIO_ALL)) | |
212 | break; | |
213 | } | |
214 | ||
215 | ret = list_empty(&sdp->sd_ail1_list); | |
216 | ||
217 | gfs2_log_unlock(sdp); | |
218 | ||
219 | return ret; | |
220 | } | |
221 | ||
ddacfaf7 SW |
222 | |
223 | /** | |
224 | * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced | |
225 | * @sdp: the filesystem | |
226 | * @ai: the AIL entry | |
227 | * | |
228 | */ | |
229 | ||
230 | static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | |
231 | { | |
232 | struct list_head *head = &ai->ai_ail2_list; | |
233 | struct gfs2_bufdata *bd; | |
234 | ||
235 | while (!list_empty(head)) { | |
236 | bd = list_entry(head->prev, struct gfs2_bufdata, | |
237 | bd_ail_st_list); | |
238 | gfs2_assert(sdp, bd->bd_ail == ai); | |
f91a0d3e | 239 | gfs2_remove_from_ail(bd); |
ddacfaf7 SW |
240 | } |
241 | } | |
242 | ||
b3b94faa DT |
243 | static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) |
244 | { | |
245 | struct gfs2_ail *ai, *safe; | |
246 | unsigned int old_tail = sdp->sd_log_tail; | |
247 | int wrap = (new_tail < old_tail); | |
248 | int a, b, rm; | |
249 | ||
250 | gfs2_log_lock(sdp); | |
251 | ||
252 | list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) { | |
253 | a = (old_tail <= ai->ai_first); | |
254 | b = (ai->ai_first < new_tail); | |
255 | rm = (wrap) ? (a || b) : (a && b); | |
256 | if (!rm) | |
257 | continue; | |
258 | ||
259 | gfs2_ail2_empty_one(sdp, ai); | |
260 | list_del(&ai->ai_list); | |
261 | gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list)); | |
262 | gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list)); | |
263 | kfree(ai); | |
264 | } | |
265 | ||
266 | gfs2_log_unlock(sdp); | |
267 | } | |
268 | ||
269 | /** | |
270 | * gfs2_log_reserve - Make a log reservation | |
271 | * @sdp: The GFS2 superblock | |
272 | * @blks: The number of blocks to reserve | |
273 | * | |
89918647 | 274 | * Note that we never give out the last few blocks of the journal. Thats |
2332c443 | 275 | * due to the fact that there is a small number of header blocks |
b004157a SW |
276 | * associated with each log flush. The exact number can't be known until |
277 | * flush time, so we ensure that we have just enough free blocks at all | |
278 | * times to avoid running out during a log flush. | |
279 | * | |
5e687eac BM |
280 | * We no longer flush the log here, instead we wake up logd to do that |
281 | * for us. To avoid the thundering herd and to ensure that we deal fairly | |
282 | * with queued waiters, we use an exclusive wait. This means that when we | |
283 | * get woken with enough journal space to get our reservation, we need to | |
284 | * wake the next waiter on the list. | |
285 | * | |
b3b94faa DT |
286 | * Returns: errno |
287 | */ | |
288 | ||
289 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |
290 | { | |
89918647 | 291 | unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); |
5e687eac BM |
292 | unsigned wanted = blks + reserved_blks; |
293 | DEFINE_WAIT(wait); | |
294 | int did_wait = 0; | |
295 | unsigned int free_blocks; | |
b3b94faa DT |
296 | |
297 | if (gfs2_assert_warn(sdp, blks) || | |
298 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | |
299 | return -EINVAL; | |
5e687eac BM |
300 | retry: |
301 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
302 | if (unlikely(free_blocks <= wanted)) { | |
303 | do { | |
304 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, | |
305 | TASK_UNINTERRUPTIBLE); | |
306 | wake_up(&sdp->sd_logd_waitq); | |
307 | did_wait = 1; | |
308 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) | |
309 | io_schedule(); | |
310 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
311 | } while(free_blocks <= wanted); | |
312 | finish_wait(&sdp->sd_log_waitq, &wait); | |
b3b94faa | 313 | } |
5e687eac BM |
314 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
315 | free_blocks - blks) != free_blocks) | |
316 | goto retry; | |
63997775 | 317 | trace_gfs2_log_blocks(sdp, -blks); |
5e687eac BM |
318 | |
319 | /* | |
320 | * If we waited, then so might others, wake them up _after_ we get | |
321 | * our share of the log. | |
322 | */ | |
323 | if (unlikely(did_wait)) | |
324 | wake_up(&sdp->sd_log_waitq); | |
484adff8 SW |
325 | |
326 | down_read(&sdp->sd_log_flush_lock); | |
b3b94faa DT |
327 | |
328 | return 0; | |
329 | } | |
330 | ||
cd915493 | 331 | static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) |
b3b94faa | 332 | { |
da6dd40d BP |
333 | struct gfs2_journal_extent *je; |
334 | ||
335 | list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) { | |
336 | if (lbn >= je->lblock && lbn < je->lblock + je->blocks) | |
ff91cc9b | 337 | return je->dblock + lbn - je->lblock; |
da6dd40d BP |
338 | } |
339 | ||
340 | return -1; | |
b3b94faa DT |
341 | } |
342 | ||
343 | /** | |
344 | * log_distance - Compute distance between two journal blocks | |
345 | * @sdp: The GFS2 superblock | |
346 | * @newer: The most recent journal block of the pair | |
347 | * @older: The older journal block of the pair | |
348 | * | |
349 | * Compute the distance (in the journal direction) between two | |
350 | * blocks in the journal | |
351 | * | |
352 | * Returns: the distance in blocks | |
353 | */ | |
354 | ||
faa31ce8 | 355 | static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, |
b3b94faa DT |
356 | unsigned int older) |
357 | { | |
358 | int dist; | |
359 | ||
360 | dist = newer - older; | |
361 | if (dist < 0) | |
362 | dist += sdp->sd_jdesc->jd_blocks; | |
363 | ||
364 | return dist; | |
365 | } | |
366 | ||
2332c443 RP |
367 | /** |
368 | * calc_reserved - Calculate the number of blocks to reserve when | |
369 | * refunding a transaction's unused buffers. | |
370 | * @sdp: The GFS2 superblock | |
371 | * | |
372 | * This is complex. We need to reserve room for all our currently used | |
373 | * metadata buffers (e.g. normal file I/O rewriting file time stamps) and | |
374 | * all our journaled data buffers for journaled files (e.g. files in the | |
375 | * meta_fs like rindex, or files for which chattr +j was done.) | |
376 | * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush | |
377 | * will count it as free space (sd_log_blks_free) and corruption will follow. | |
378 | * | |
379 | * We can have metadata bufs and jdata bufs in the same journal. So each | |
380 | * type gets its own log header, for which we need to reserve a block. | |
381 | * In fact, each type has the potential for needing more than one header | |
382 | * in cases where we have more buffers than will fit on a journal page. | |
383 | * Metadata journal entries take up half the space of journaled buffer entries. | |
384 | * Thus, metadata entries have buf_limit (502) and journaled buffers have | |
385 | * databuf_limit (251) before they cause a wrap around. | |
386 | * | |
387 | * Also, we need to reserve blocks for revoke journal entries and one for an | |
388 | * overall header for the lot. | |
389 | * | |
390 | * Returns: the number of blocks reserved | |
391 | */ | |
392 | static unsigned int calc_reserved(struct gfs2_sbd *sdp) | |
393 | { | |
394 | unsigned int reserved = 0; | |
395 | unsigned int mbuf_limit, metabufhdrs_needed; | |
396 | unsigned int dbuf_limit, databufhdrs_needed; | |
397 | unsigned int revokes = 0; | |
398 | ||
399 | mbuf_limit = buf_limit(sdp); | |
400 | metabufhdrs_needed = (sdp->sd_log_commited_buf + | |
401 | (mbuf_limit - 1)) / mbuf_limit; | |
402 | dbuf_limit = databuf_limit(sdp); | |
403 | databufhdrs_needed = (sdp->sd_log_commited_databuf + | |
404 | (dbuf_limit - 1)) / dbuf_limit; | |
405 | ||
2e95e3f6 | 406 | if (sdp->sd_log_commited_revoke > 0) |
2332c443 RP |
407 | revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, |
408 | sizeof(u64)); | |
409 | ||
410 | reserved = sdp->sd_log_commited_buf + metabufhdrs_needed + | |
411 | sdp->sd_log_commited_databuf + databufhdrs_needed + | |
412 | revokes; | |
413 | /* One for the overall header */ | |
414 | if (reserved) | |
415 | reserved++; | |
416 | return reserved; | |
417 | } | |
418 | ||
b3b94faa DT |
419 | static unsigned int current_tail(struct gfs2_sbd *sdp) |
420 | { | |
421 | struct gfs2_ail *ai; | |
422 | unsigned int tail; | |
423 | ||
424 | gfs2_log_lock(sdp); | |
425 | ||
faa31ce8 | 426 | if (list_empty(&sdp->sd_ail1_list)) { |
b3b94faa | 427 | tail = sdp->sd_log_head; |
faa31ce8 SW |
428 | } else { |
429 | ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list); | |
b3b94faa DT |
430 | tail = ai->ai_first; |
431 | } | |
432 | ||
433 | gfs2_log_unlock(sdp); | |
434 | ||
435 | return tail; | |
436 | } | |
437 | ||
16615be1 | 438 | void gfs2_log_incr_head(struct gfs2_sbd *sdp) |
b3b94faa DT |
439 | { |
440 | if (sdp->sd_log_flush_head == sdp->sd_log_tail) | |
16615be1 | 441 | BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head); |
b3b94faa DT |
442 | |
443 | if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { | |
444 | sdp->sd_log_flush_head = 0; | |
445 | sdp->sd_log_flush_wrapped = 1; | |
446 | } | |
447 | } | |
448 | ||
16615be1 SW |
449 | /** |
450 | * gfs2_log_write_endio - End of I/O for a log buffer | |
451 | * @bh: The buffer head | |
452 | * @uptodate: I/O Status | |
453 | * | |
454 | */ | |
455 | ||
456 | static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate) | |
457 | { | |
458 | struct gfs2_sbd *sdp = bh->b_private; | |
459 | bh->b_private = NULL; | |
460 | ||
461 | end_buffer_write_sync(bh, uptodate); | |
462 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) | |
463 | wake_up(&sdp->sd_log_flush_wait); | |
464 | } | |
465 | ||
b3b94faa DT |
466 | /** |
467 | * gfs2_log_get_buf - Get and initialize a buffer to use for log control data | |
468 | * @sdp: The GFS2 superblock | |
469 | * | |
470 | * Returns: the buffer_head | |
471 | */ | |
472 | ||
473 | struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp) | |
474 | { | |
cd915493 | 475 | u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); |
b3b94faa DT |
476 | struct buffer_head *bh; |
477 | ||
16615be1 | 478 | bh = sb_getblk(sdp->sd_vfs, blkno); |
b3b94faa DT |
479 | lock_buffer(bh); |
480 | memset(bh->b_data, 0, bh->b_size); | |
481 | set_buffer_uptodate(bh); | |
482 | clear_buffer_dirty(bh); | |
16615be1 SW |
483 | gfs2_log_incr_head(sdp); |
484 | atomic_inc(&sdp->sd_log_in_flight); | |
485 | bh->b_private = sdp; | |
486 | bh->b_end_io = gfs2_log_write_endio; | |
b3b94faa DT |
487 | |
488 | return bh; | |
489 | } | |
490 | ||
16615be1 SW |
491 | /** |
492 | * gfs2_fake_write_endio - | |
493 | * @bh: The buffer head | |
494 | * @uptodate: The I/O Status | |
495 | * | |
496 | */ | |
497 | ||
498 | static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate) | |
499 | { | |
500 | struct buffer_head *real_bh = bh->b_private; | |
5a60c532 SW |
501 | struct gfs2_bufdata *bd = real_bh->b_private; |
502 | struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd; | |
16615be1 SW |
503 | |
504 | end_buffer_write_sync(bh, uptodate); | |
505 | free_buffer_head(bh); | |
506 | unlock_buffer(real_bh); | |
507 | brelse(real_bh); | |
508 | if (atomic_dec_and_test(&sdp->sd_log_in_flight)) | |
509 | wake_up(&sdp->sd_log_flush_wait); | |
510 | } | |
511 | ||
b3b94faa DT |
512 | /** |
513 | * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log | |
514 | * @sdp: the filesystem | |
515 | * @data: the data the buffer_head should point to | |
516 | * | |
517 | * Returns: the log buffer descriptor | |
518 | */ | |
519 | ||
520 | struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp, | |
521 | struct buffer_head *real) | |
522 | { | |
cd915493 | 523 | u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); |
b3b94faa DT |
524 | struct buffer_head *bh; |
525 | ||
16615be1 | 526 | bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL); |
b3b94faa | 527 | atomic_set(&bh->b_count, 1); |
16615be1 | 528 | bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock); |
18ec7d5c | 529 | set_bh_page(bh, real->b_page, bh_offset(real)); |
b3b94faa DT |
530 | bh->b_blocknr = blkno; |
531 | bh->b_size = sdp->sd_sb.sb_bsize; | |
532 | bh->b_bdev = sdp->sd_vfs->s_bdev; | |
16615be1 SW |
533 | bh->b_private = real; |
534 | bh->b_end_io = gfs2_fake_write_endio; | |
b3b94faa | 535 | |
16615be1 SW |
536 | gfs2_log_incr_head(sdp); |
537 | atomic_inc(&sdp->sd_log_in_flight); | |
b3b94faa DT |
538 | |
539 | return bh; | |
540 | } | |
541 | ||
2332c443 | 542 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
b3b94faa DT |
543 | { |
544 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | |
545 | ||
546 | ail2_empty(sdp, new_tail); | |
547 | ||
fd041f0b | 548 | atomic_add(dist, &sdp->sd_log_blks_free); |
63997775 | 549 | trace_gfs2_log_blocks(sdp, dist); |
5e687eac BM |
550 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
551 | sdp->sd_jdesc->jd_blocks); | |
b3b94faa DT |
552 | |
553 | sdp->sd_log_tail = new_tail; | |
554 | } | |
555 | ||
556 | /** | |
557 | * log_write_header - Get and initialize a journal header buffer | |
558 | * @sdp: The GFS2 superblock | |
559 | * | |
560 | * Returns: the initialized log buffer descriptor | |
561 | */ | |
562 | ||
cd915493 | 563 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) |
b3b94faa | 564 | { |
cd915493 | 565 | u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head); |
b3b94faa DT |
566 | struct buffer_head *bh; |
567 | struct gfs2_log_header *lh; | |
568 | unsigned int tail; | |
cd915493 | 569 | u32 hash; |
b3b94faa | 570 | |
b3b94faa DT |
571 | bh = sb_getblk(sdp->sd_vfs, blkno); |
572 | lock_buffer(bh); | |
573 | memset(bh->b_data, 0, bh->b_size); | |
574 | set_buffer_uptodate(bh); | |
575 | clear_buffer_dirty(bh); | |
b3b94faa DT |
576 | |
577 | gfs2_ail1_empty(sdp, 0); | |
578 | tail = current_tail(sdp); | |
579 | ||
580 | lh = (struct gfs2_log_header *)bh->b_data; | |
581 | memset(lh, 0, sizeof(struct gfs2_log_header)); | |
582 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); | |
e3167ded | 583 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); |
0ab7d13f | 584 | lh->lh_header.__pad0 = cpu_to_be64(0); |
e3167ded | 585 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); |
0ab7d13f | 586 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); |
e0f2bf78 SW |
587 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); |
588 | lh->lh_flags = cpu_to_be32(flags); | |
589 | lh->lh_tail = cpu_to_be32(tail); | |
590 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | |
b3b94faa DT |
591 | hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); |
592 | lh->lh_hash = cpu_to_be32(hash); | |
593 | ||
254db57f SW |
594 | bh->b_end_io = end_buffer_write_sync; |
595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) | |
596 | goto skip_barrier; | |
597 | get_bh(bh); | |
c969f58c | 598 | submit_bh(WRITE_SYNC | (1 << BIO_RW_BARRIER) | (1 << BIO_RW_META), bh); |
254db57f SW |
599 | wait_on_buffer(bh); |
600 | if (buffer_eopnotsupp(bh)) { | |
601 | clear_buffer_eopnotsupp(bh); | |
602 | set_buffer_uptodate(bh); | |
913a71d2 | 603 | fs_info(sdp, "barrier sync failed - disabling barriers\n"); |
254db57f SW |
604 | set_bit(SDF_NOBARRIERS, &sdp->sd_flags); |
605 | lock_buffer(bh); | |
606 | skip_barrier: | |
607 | get_bh(bh); | |
608 | submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh); | |
609 | wait_on_buffer(bh); | |
610 | } | |
611 | if (!buffer_uptodate(bh)) | |
b3b94faa DT |
612 | gfs2_io_error_bh(sdp, bh); |
613 | brelse(bh); | |
614 | ||
615 | if (sdp->sd_log_tail != tail) | |
2332c443 | 616 | log_pull_tail(sdp, tail); |
b3b94faa DT |
617 | else |
618 | gfs2_assert_withdraw(sdp, !pull); | |
619 | ||
620 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); | |
16615be1 | 621 | gfs2_log_incr_head(sdp); |
b3b94faa DT |
622 | } |
623 | ||
624 | static void log_flush_commit(struct gfs2_sbd *sdp) | |
625 | { | |
16615be1 SW |
626 | DEFINE_WAIT(wait); |
627 | ||
628 | if (atomic_read(&sdp->sd_log_in_flight)) { | |
629 | do { | |
630 | prepare_to_wait(&sdp->sd_log_flush_wait, &wait, | |
631 | TASK_UNINTERRUPTIBLE); | |
632 | if (atomic_read(&sdp->sd_log_in_flight)) | |
633 | io_schedule(); | |
634 | } while(atomic_read(&sdp->sd_log_in_flight)); | |
635 | finish_wait(&sdp->sd_log_flush_wait, &wait); | |
b3b94faa DT |
636 | } |
637 | ||
16615be1 | 638 | log_write_header(sdp, 0, 0); |
b3b94faa DT |
639 | } |
640 | ||
d7b616e2 SW |
641 | static void gfs2_ordered_write(struct gfs2_sbd *sdp) |
642 | { | |
643 | struct gfs2_bufdata *bd; | |
644 | struct buffer_head *bh; | |
645 | LIST_HEAD(written); | |
646 | ||
647 | gfs2_log_lock(sdp); | |
648 | while (!list_empty(&sdp->sd_log_le_ordered)) { | |
649 | bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list); | |
650 | list_move(&bd->bd_le.le_list, &written); | |
651 | bh = bd->bd_bh; | |
652 | if (!buffer_dirty(bh)) | |
653 | continue; | |
654 | get_bh(bh); | |
655 | gfs2_log_unlock(sdp); | |
656 | lock_buffer(bh); | |
b8e7cbb6 | 657 | if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { |
d7b616e2 | 658 | bh->b_end_io = end_buffer_write_sync; |
c969f58c | 659 | submit_bh(WRITE_SYNC_PLUG, bh); |
d7b616e2 SW |
660 | } else { |
661 | unlock_buffer(bh); | |
662 | brelse(bh); | |
663 | } | |
664 | gfs2_log_lock(sdp); | |
665 | } | |
666 | list_splice(&written, &sdp->sd_log_le_ordered); | |
667 | gfs2_log_unlock(sdp); | |
668 | } | |
669 | ||
670 | static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |
671 | { | |
672 | struct gfs2_bufdata *bd; | |
673 | struct buffer_head *bh; | |
674 | ||
675 | gfs2_log_lock(sdp); | |
676 | while (!list_empty(&sdp->sd_log_le_ordered)) { | |
677 | bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list); | |
678 | bh = bd->bd_bh; | |
679 | if (buffer_locked(bh)) { | |
680 | get_bh(bh); | |
681 | gfs2_log_unlock(sdp); | |
682 | wait_on_buffer(bh); | |
683 | brelse(bh); | |
684 | gfs2_log_lock(sdp); | |
685 | continue; | |
686 | } | |
687 | list_del_init(&bd->bd_le.le_list); | |
688 | } | |
689 | gfs2_log_unlock(sdp); | |
690 | } | |
691 | ||
b3b94faa | 692 | /** |
b09e593d | 693 | * gfs2_log_flush - flush incore transaction(s) |
b3b94faa DT |
694 | * @sdp: the filesystem |
695 | * @gl: The glock structure to flush. If NULL, flush the whole incore log | |
696 | * | |
697 | */ | |
698 | ||
ed4878e8 | 699 | void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) |
b3b94faa DT |
700 | { |
701 | struct gfs2_ail *ai; | |
702 | ||
484adff8 | 703 | down_write(&sdp->sd_log_flush_lock); |
f55ab26a | 704 | |
2bcd610d SW |
705 | /* Log might have been flushed while we waited for the flush lock */ |
706 | if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { | |
707 | up_write(&sdp->sd_log_flush_lock); | |
708 | return; | |
f55ab26a | 709 | } |
63997775 | 710 | trace_gfs2_log_flush(sdp, 1); |
f55ab26a | 711 | |
b09e593d SW |
712 | ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); |
713 | INIT_LIST_HEAD(&ai->ai_ail1_list); | |
714 | INIT_LIST_HEAD(&ai->ai_ail2_list); | |
b3b94faa | 715 | |
16615be1 SW |
716 | if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) { |
717 | printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf, | |
718 | sdp->sd_log_commited_buf); | |
719 | gfs2_assert_withdraw(sdp, 0); | |
720 | } | |
721 | if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) { | |
722 | printk(KERN_INFO "GFS2: log databuf %u %u\n", | |
723 | sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf); | |
724 | gfs2_assert_withdraw(sdp, 0); | |
725 | } | |
b3b94faa DT |
726 | gfs2_assert_withdraw(sdp, |
727 | sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); | |
728 | ||
b3b94faa DT |
729 | sdp->sd_log_flush_head = sdp->sd_log_head; |
730 | sdp->sd_log_flush_wrapped = 0; | |
731 | ai->ai_first = sdp->sd_log_flush_head; | |
732 | ||
d7b616e2 | 733 | gfs2_ordered_write(sdp); |
b3b94faa | 734 | lops_before_commit(sdp); |
d7b616e2 SW |
735 | gfs2_ordered_wait(sdp); |
736 | ||
16615be1 | 737 | if (sdp->sd_log_head != sdp->sd_log_flush_head) |
b3b94faa | 738 | log_flush_commit(sdp); |
2332c443 RP |
739 | else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
740 | gfs2_log_lock(sdp); | |
fd041f0b | 741 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
63997775 | 742 | trace_gfs2_log_blocks(sdp, -1); |
2332c443 | 743 | gfs2_log_unlock(sdp); |
b3b94faa | 744 | log_write_header(sdp, 0, PULL); |
2332c443 | 745 | } |
b3b94faa | 746 | lops_after_commit(sdp, ai); |
b09e593d | 747 | |
fe1a698f SW |
748 | gfs2_log_lock(sdp); |
749 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
faa31ce8 SW |
750 | sdp->sd_log_blks_reserved = 0; |
751 | sdp->sd_log_commited_buf = 0; | |
2332c443 | 752 | sdp->sd_log_commited_databuf = 0; |
faa31ce8 | 753 | sdp->sd_log_commited_revoke = 0; |
b3b94faa | 754 | |
b3b94faa DT |
755 | if (!list_empty(&ai->ai_ail1_list)) { |
756 | list_add(&ai->ai_list, &sdp->sd_ail1_list); | |
757 | ai = NULL; | |
758 | } | |
759 | gfs2_log_unlock(sdp); | |
63997775 | 760 | trace_gfs2_log_flush(sdp, 0); |
484adff8 | 761 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa DT |
762 | |
763 | kfree(ai); | |
764 | } | |
765 | ||
766 | static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
767 | { | |
2332c443 | 768 | unsigned int reserved; |
ac39aadd | 769 | unsigned int unused; |
b3b94faa DT |
770 | |
771 | gfs2_log_lock(sdp); | |
772 | ||
773 | sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm; | |
2332c443 RP |
774 | sdp->sd_log_commited_databuf += tr->tr_num_databuf_new - |
775 | tr->tr_num_databuf_rm; | |
776 | gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) || | |
777 | (((int)sdp->sd_log_commited_databuf) >= 0)); | |
b3b94faa | 778 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
2332c443 | 779 | reserved = calc_reserved(sdp); |
62be1f71 | 780 | gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved); |
ac39aadd | 781 | unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved; |
ac39aadd | 782 | atomic_add(unused, &sdp->sd_log_blks_free); |
63997775 | 783 | trace_gfs2_log_blocks(sdp, unused); |
fd041f0b | 784 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
2332c443 | 785 | sdp->sd_jdesc->jd_blocks); |
b3b94faa DT |
786 | sdp->sd_log_blks_reserved = reserved; |
787 | ||
788 | gfs2_log_unlock(sdp); | |
789 | } | |
790 | ||
d0109bfa BP |
791 | static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
792 | { | |
793 | struct list_head *head = &tr->tr_list_buf; | |
794 | struct gfs2_bufdata *bd; | |
795 | ||
796 | gfs2_log_lock(sdp); | |
797 | while (!list_empty(head)) { | |
798 | bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr); | |
799 | list_del_init(&bd->bd_list_tr); | |
800 | tr->tr_num_buf--; | |
801 | } | |
802 | gfs2_log_unlock(sdp); | |
803 | gfs2_assert_warn(sdp, !tr->tr_num_buf); | |
804 | } | |
805 | ||
b3b94faa DT |
806 | /** |
807 | * gfs2_log_commit - Commit a transaction to the log | |
808 | * @sdp: the filesystem | |
809 | * @tr: the transaction | |
810 | * | |
5e687eac BM |
811 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 |
812 | * or the total number of used blocks (pinned blocks plus AIL blocks) | |
813 | * is greater than thresh2. | |
814 | * | |
815 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | |
816 | * journal size. | |
817 | * | |
b3b94faa DT |
818 | * Returns: errno |
819 | */ | |
820 | ||
821 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
822 | { | |
823 | log_refund(sdp, tr); | |
d0109bfa | 824 | buf_lo_incore_commit(sdp, tr); |
b3b94faa | 825 | |
484adff8 | 826 | up_read(&sdp->sd_log_flush_lock); |
b3b94faa | 827 | |
5e687eac BM |
828 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
829 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > | |
830 | atomic_read(&sdp->sd_log_thresh2))) | |
831 | wake_up(&sdp->sd_logd_waitq); | |
b3b94faa DT |
832 | } |
833 | ||
834 | /** | |
835 | * gfs2_log_shutdown - write a shutdown header into a journal | |
836 | * @sdp: the filesystem | |
837 | * | |
838 | */ | |
839 | ||
840 | void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |
841 | { | |
484adff8 | 842 | down_write(&sdp->sd_log_flush_lock); |
b3b94faa | 843 | |
b3b94faa | 844 | gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); |
b3b94faa DT |
845 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf); |
846 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | |
847 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg); | |
848 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf); | |
849 | gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); | |
850 | ||
851 | sdp->sd_log_flush_head = sdp->sd_log_head; | |
852 | sdp->sd_log_flush_wrapped = 0; | |
853 | ||
2332c443 RP |
854 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, |
855 | (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL); | |
b3b94faa | 856 | |
fd041f0b | 857 | gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); |
a74604be SW |
858 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
859 | gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); | |
b3b94faa DT |
860 | |
861 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
b3b94faa DT |
862 | sdp->sd_log_tail = sdp->sd_log_head; |
863 | ||
484adff8 | 864 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa DT |
865 | } |
866 | ||
a25311c8 SW |
867 | |
868 | /** | |
869 | * gfs2_meta_syncfs - sync all the buffers in a filesystem | |
870 | * @sdp: the filesystem | |
871 | * | |
872 | */ | |
873 | ||
874 | void gfs2_meta_syncfs(struct gfs2_sbd *sdp) | |
875 | { | |
876 | gfs2_log_flush(sdp, NULL); | |
877 | for (;;) { | |
5e687eac | 878 | gfs2_ail1_start(sdp); |
a25311c8 SW |
879 | if (gfs2_ail1_empty(sdp, DIO_ALL)) |
880 | break; | |
881 | msleep(10); | |
882 | } | |
883 | } | |
884 | ||
5e687eac BM |
885 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
886 | { | |
887 | return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1)); | |
888 | } | |
889 | ||
890 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | |
891 | { | |
892 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | |
893 | return used_blocks >= atomic_read(&sdp->sd_log_thresh2); | |
894 | } | |
ec69b188 SW |
895 | |
896 | /** | |
897 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | |
898 | * @sdp: Pointer to GFS2 superblock | |
899 | * | |
900 | * Also, periodically check to make sure that we're using the most recent | |
901 | * journal index. | |
902 | */ | |
903 | ||
904 | int gfs2_logd(void *data) | |
905 | { | |
906 | struct gfs2_sbd *sdp = data; | |
5e687eac BM |
907 | unsigned long t = 1; |
908 | DEFINE_WAIT(wait); | |
909 | unsigned preflush; | |
ec69b188 SW |
910 | |
911 | while (!kthread_should_stop()) { | |
ec69b188 | 912 | |
5e687eac BM |
913 | preflush = atomic_read(&sdp->sd_log_pinned); |
914 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { | |
915 | gfs2_ail1_empty(sdp, DIO_ALL); | |
916 | gfs2_log_flush(sdp, NULL); | |
917 | gfs2_ail1_empty(sdp, DIO_ALL); | |
918 | } | |
ec69b188 | 919 | |
5e687eac BM |
920 | if (gfs2_ail_flush_reqd(sdp)) { |
921 | gfs2_ail1_start(sdp); | |
922 | io_schedule(); | |
923 | gfs2_ail1_empty(sdp, 0); | |
ec69b188 | 924 | gfs2_log_flush(sdp, NULL); |
5e687eac | 925 | gfs2_ail1_empty(sdp, DIO_ALL); |
ec69b188 SW |
926 | } |
927 | ||
5e687eac | 928 | wake_up(&sdp->sd_log_waitq); |
ec69b188 SW |
929 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
930 | if (freezing(current)) | |
931 | refrigerator(); | |
5e687eac BM |
932 | |
933 | do { | |
934 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | |
935 | TASK_UNINTERRUPTIBLE); | |
936 | if (!gfs2_ail_flush_reqd(sdp) && | |
937 | !gfs2_jrnl_flush_reqd(sdp) && | |
938 | !kthread_should_stop()) | |
939 | t = schedule_timeout(t); | |
940 | } while(t && !gfs2_ail_flush_reqd(sdp) && | |
941 | !gfs2_jrnl_flush_reqd(sdp) && | |
942 | !kthread_should_stop()); | |
943 | finish_wait(&sdp->sd_logd_waitq, &wait); | |
ec69b188 SW |
944 | } |
945 | ||
946 | return 0; | |
947 | } | |
948 |