]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
7eabb77e | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
15 | #include <linux/pagemap.h> | |
fd88de56 | 16 | #include <linux/pagevec.h> |
9b124fbb | 17 | #include <linux/mpage.h> |
d1665e41 | 18 | #include <linux/fs.h> |
a8d638e3 | 19 | #include <linux/writeback.h> |
7765ec26 | 20 | #include <linux/swap.h> |
5c676f6d | 21 | #include <linux/gfs2_ondisk.h> |
47e83b50 | 22 | #include <linux/backing-dev.h> |
e2e40f2c | 23 | #include <linux/uio.h> |
774016b2 | 24 | #include <trace/events/writeback.h> |
64bc06bb | 25 | #include <linux/sched/signal.h> |
b3b94faa DT |
26 | |
27 | #include "gfs2.h" | |
5c676f6d | 28 | #include "incore.h" |
b3b94faa DT |
29 | #include "bmap.h" |
30 | #include "glock.h" | |
31 | #include "inode.h" | |
b3b94faa DT |
32 | #include "log.h" |
33 | #include "meta_io.h" | |
b3b94faa DT |
34 | #include "quota.h" |
35 | #include "trans.h" | |
18ec7d5c | 36 | #include "rgrp.h" |
cd81a4ba | 37 | #include "super.h" |
5c676f6d | 38 | #include "util.h" |
4340fe62 | 39 | #include "glops.h" |
64bc06bb | 40 | #include "aops.h" |
b3b94faa | 41 | |
ba7f7290 | 42 | |
64bc06bb AG |
43 | void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, |
44 | unsigned int from, unsigned int len) | |
ba7f7290 SW |
45 | { |
46 | struct buffer_head *head = page_buffers(page); | |
47 | unsigned int bsize = head->b_size; | |
48 | struct buffer_head *bh; | |
88b65ce5 | 49 | unsigned int to = from + len; |
ba7f7290 SW |
50 | unsigned int start, end; |
51 | ||
52 | for (bh = head, start = 0; bh != head || !start; | |
53 | bh = bh->b_this_page, start = end) { | |
54 | end = start + bsize; | |
88b65ce5 | 55 | if (end <= from) |
ba7f7290 | 56 | continue; |
88b65ce5 AG |
57 | if (start >= to) |
58 | break; | |
845802b1 | 59 | set_buffer_uptodate(bh); |
350a9b0a | 60 | gfs2_trans_add_data(ip->i_gl, bh); |
ba7f7290 SW |
61 | } |
62 | } | |
63 | ||
b3b94faa | 64 | /** |
7a6bbacb | 65 | * gfs2_get_block_noalloc - Fills in a buffer head with details about a block |
b3b94faa DT |
66 | * @inode: The inode |
67 | * @lblock: The block number to look up | |
68 | * @bh_result: The buffer head to return the result in | |
69 | * @create: Non-zero if we may add block to the file | |
70 | * | |
71 | * Returns: errno | |
72 | */ | |
73 | ||
7a6bbacb SW |
74 | static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, |
75 | struct buffer_head *bh_result, int create) | |
b3b94faa | 76 | { |
b3b94faa DT |
77 | int error; |
78 | ||
e9e1ef2b | 79 | error = gfs2_block_map(inode, lblock, bh_result, 0); |
b3b94faa DT |
80 | if (error) |
81 | return error; | |
de986e85 | 82 | if (!buffer_mapped(bh_result)) |
7a6bbacb SW |
83 | return -EIO; |
84 | return 0; | |
b3b94faa DT |
85 | } |
86 | ||
b3b94faa | 87 | /** |
9ff8ec32 SW |
88 | * gfs2_writepage_common - Common bits of writepage |
89 | * @page: The page to be written | |
90 | * @wbc: The writeback control | |
b3b94faa | 91 | * |
9ff8ec32 | 92 | * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. |
b3b94faa DT |
93 | */ |
94 | ||
9ff8ec32 SW |
95 | static int gfs2_writepage_common(struct page *page, |
96 | struct writeback_control *wbc) | |
b3b94faa | 97 | { |
18ec7d5c | 98 | struct inode *inode = page->mapping->host; |
f4387149 SW |
99 | struct gfs2_inode *ip = GFS2_I(inode); |
100 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
18ec7d5c | 101 | loff_t i_size = i_size_read(inode); |
09cbfeaf | 102 | pgoff_t end_index = i_size >> PAGE_SHIFT; |
18ec7d5c | 103 | unsigned offset; |
b3b94faa | 104 | |
9ff8ec32 SW |
105 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) |
106 | goto out; | |
5c676f6d | 107 | if (current->journal_info) |
9ff8ec32 | 108 | goto redirty; |
18ec7d5c | 109 | /* Is the page fully outside i_size? (truncate in progress) */ |
09cbfeaf | 110 | offset = i_size & (PAGE_SIZE-1); |
d2d7b8a2 | 111 | if (page->index > end_index || (page->index == end_index && !offset)) { |
09cbfeaf | 112 | page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); |
9ff8ec32 | 113 | goto out; |
b3b94faa | 114 | } |
9ff8ec32 SW |
115 | return 1; |
116 | redirty: | |
117 | redirty_page_for_writepage(wbc, page); | |
118 | out: | |
119 | unlock_page(page); | |
120 | return 0; | |
121 | } | |
122 | ||
123 | /** | |
9d358143 | 124 | * gfs2_writepage - Write page for writeback mappings |
9ff8ec32 SW |
125 | * @page: The page |
126 | * @wbc: The writeback control | |
127 | * | |
128 | */ | |
129 | ||
9d358143 | 130 | static int gfs2_writepage(struct page *page, struct writeback_control *wbc) |
9ff8ec32 SW |
131 | { |
132 | int ret; | |
133 | ||
134 | ret = gfs2_writepage_common(page, wbc); | |
135 | if (ret <= 0) | |
136 | return ret; | |
137 | ||
30116ff6 | 138 | return nobh_writepage(page, gfs2_get_block_noalloc, wbc); |
9ff8ec32 SW |
139 | } |
140 | ||
fd4c5748 BM |
141 | /* This is the same as calling block_write_full_page, but it also |
142 | * writes pages outside of i_size | |
143 | */ | |
c548a1c1 AP |
144 | static int gfs2_write_full_page(struct page *page, get_block_t *get_block, |
145 | struct writeback_control *wbc) | |
fd4c5748 BM |
146 | { |
147 | struct inode * const inode = page->mapping->host; | |
148 | loff_t i_size = i_size_read(inode); | |
149 | const pgoff_t end_index = i_size >> PAGE_SHIFT; | |
150 | unsigned offset; | |
151 | ||
152 | /* | |
153 | * The page straddles i_size. It must be zeroed out on each and every | |
154 | * writepage invocation because it may be mmapped. "A file is mapped | |
155 | * in multiples of the page size. For a file that is not a multiple of | |
156 | * the page size, the remaining memory is zeroed when mapped, and | |
157 | * writes to that region are not written out to the file." | |
158 | */ | |
159 | offset = i_size & (PAGE_SIZE-1); | |
160 | if (page->index == end_index && offset) | |
161 | zero_user_segment(page, offset, PAGE_SIZE); | |
162 | ||
163 | return __block_write_full_page(inode, page, get_block, wbc, | |
164 | end_buffer_async_write); | |
165 | } | |
166 | ||
b8e7cbb6 SW |
167 | /** |
168 | * __gfs2_jdata_writepage - The core of jdata writepage | |
169 | * @page: The page to write | |
170 | * @wbc: The writeback control | |
171 | * | |
172 | * This is shared between writepage and writepages and implements the | |
173 | * core of the writepage operation. If a transaction is required then | |
174 | * PageChecked will have been set and the transaction will have | |
175 | * already been started before this is called. | |
176 | */ | |
177 | ||
178 | static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) | |
179 | { | |
180 | struct inode *inode = page->mapping->host; | |
181 | struct gfs2_inode *ip = GFS2_I(inode); | |
182 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
183 | ||
184 | if (PageChecked(page)) { | |
185 | ClearPageChecked(page); | |
186 | if (!page_has_buffers(page)) { | |
187 | create_empty_buffers(page, inode->i_sb->s_blocksize, | |
47a9a527 | 188 | BIT(BH_Dirty)|BIT(BH_Uptodate)); |
b8e7cbb6 | 189 | } |
88b65ce5 | 190 | gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize); |
b8e7cbb6 | 191 | } |
fd4c5748 | 192 | return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); |
b8e7cbb6 SW |
193 | } |
194 | ||
9ff8ec32 SW |
195 | /** |
196 | * gfs2_jdata_writepage - Write complete page | |
197 | * @page: Page to write | |
1272574b | 198 | * @wbc: The writeback control |
9ff8ec32 SW |
199 | * |
200 | * Returns: errno | |
201 | * | |
202 | */ | |
203 | ||
204 | static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) | |
205 | { | |
206 | struct inode *inode = page->mapping->host; | |
fd4c5748 | 207 | struct gfs2_inode *ip = GFS2_I(inode); |
9ff8ec32 | 208 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
1bb7322f | 209 | int ret; |
9ff8ec32 | 210 | |
fd4c5748 BM |
211 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) |
212 | goto out; | |
213 | if (PageChecked(page) || current->journal_info) | |
214 | goto out_ignore; | |
215 | ret = __gfs2_jdata_writepage(page, wbc); | |
1bb7322f | 216 | return ret; |
18ec7d5c SW |
217 | |
218 | out_ignore: | |
219 | redirty_page_for_writepage(wbc, page); | |
fd4c5748 | 220 | out: |
18ec7d5c SW |
221 | unlock_page(page); |
222 | return 0; | |
b3b94faa DT |
223 | } |
224 | ||
a8d638e3 | 225 | /** |
45138990 | 226 | * gfs2_writepages - Write a bunch of dirty pages back to disk |
a8d638e3 SW |
227 | * @mapping: The mapping to write |
228 | * @wbc: Write-back control | |
229 | * | |
45138990 | 230 | * Used for both ordered and writeback modes. |
a8d638e3 | 231 | */ |
45138990 SW |
232 | static int gfs2_writepages(struct address_space *mapping, |
233 | struct writeback_control *wbc) | |
a8d638e3 | 234 | { |
b066a4ee AD |
235 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
236 | int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); | |
237 | ||
238 | /* | |
239 | * Even if we didn't write any pages here, we might still be holding | |
240 | * dirty pages in the ail. We forcibly flush the ail because we don't | |
241 | * want balance_dirty_pages() to loop indefinitely trying to write out | |
242 | * pages held in the ail that it can't find. | |
243 | */ | |
244 | if (ret == 0) | |
245 | set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); | |
246 | ||
247 | return ret; | |
a8d638e3 SW |
248 | } |
249 | ||
b8e7cbb6 SW |
250 | /** |
251 | * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages | |
252 | * @mapping: The mapping | |
253 | * @wbc: The writeback control | |
b8e7cbb6 SW |
254 | * @pvec: The vector of pages |
255 | * @nr_pages: The number of pages to write | |
1272574b | 256 | * @done_index: Page index |
b8e7cbb6 SW |
257 | * |
258 | * Returns: non-zero if loop should terminate, zero otherwise | |
259 | */ | |
260 | ||
261 | static int gfs2_write_jdata_pagevec(struct address_space *mapping, | |
262 | struct writeback_control *wbc, | |
263 | struct pagevec *pvec, | |
9aa01593 | 264 | int nr_pages, |
774016b2 | 265 | pgoff_t *done_index) |
b8e7cbb6 SW |
266 | { |
267 | struct inode *inode = mapping->host; | |
268 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
09cbfeaf | 269 | unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); |
b8e7cbb6 SW |
270 | int i; |
271 | int ret; | |
272 | ||
20b95bf2 | 273 | ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); |
b8e7cbb6 SW |
274 | if (ret < 0) |
275 | return ret; | |
276 | ||
277 | for(i = 0; i < nr_pages; i++) { | |
278 | struct page *page = pvec->pages[i]; | |
279 | ||
774016b2 SW |
280 | *done_index = page->index; |
281 | ||
b8e7cbb6 SW |
282 | lock_page(page); |
283 | ||
284 | if (unlikely(page->mapping != mapping)) { | |
774016b2 | 285 | continue_unlock: |
b8e7cbb6 SW |
286 | unlock_page(page); |
287 | continue; | |
288 | } | |
289 | ||
774016b2 SW |
290 | if (!PageDirty(page)) { |
291 | /* someone wrote it for us */ | |
292 | goto continue_unlock; | |
b8e7cbb6 SW |
293 | } |
294 | ||
774016b2 SW |
295 | if (PageWriteback(page)) { |
296 | if (wbc->sync_mode != WB_SYNC_NONE) | |
297 | wait_on_page_writeback(page); | |
298 | else | |
299 | goto continue_unlock; | |
b8e7cbb6 SW |
300 | } |
301 | ||
774016b2 SW |
302 | BUG_ON(PageWriteback(page)); |
303 | if (!clear_page_dirty_for_io(page)) | |
304 | goto continue_unlock; | |
305 | ||
de1414a6 | 306 | trace_wbc_writepage(wbc, inode_to_bdi(inode)); |
b8e7cbb6 SW |
307 | |
308 | ret = __gfs2_jdata_writepage(page, wbc); | |
774016b2 SW |
309 | if (unlikely(ret)) { |
310 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | |
311 | unlock_page(page); | |
312 | ret = 0; | |
313 | } else { | |
314 | ||
315 | /* | |
316 | * done_index is set past this page, | |
317 | * so media errors will not choke | |
318 | * background writeout for the entire | |
319 | * file. This has consequences for | |
320 | * range_cyclic semantics (ie. it may | |
321 | * not be suitable for data integrity | |
322 | * writeout). | |
323 | */ | |
324 | *done_index = page->index + 1; | |
325 | ret = 1; | |
326 | break; | |
327 | } | |
328 | } | |
b8e7cbb6 | 329 | |
774016b2 SW |
330 | /* |
331 | * We stop writing back only if we are not doing | |
332 | * integrity sync. In case of integrity sync we have to | |
333 | * keep going until we have written all the pages | |
334 | * we tagged for writeback prior to entering this loop. | |
335 | */ | |
336 | if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { | |
b8e7cbb6 | 337 | ret = 1; |
774016b2 SW |
338 | break; |
339 | } | |
340 | ||
b8e7cbb6 SW |
341 | } |
342 | gfs2_trans_end(sdp); | |
343 | return ret; | |
344 | } | |
345 | ||
346 | /** | |
347 | * gfs2_write_cache_jdata - Like write_cache_pages but different | |
348 | * @mapping: The mapping to write | |
349 | * @wbc: The writeback control | |
b8e7cbb6 SW |
350 | * |
351 | * The reason that we use our own function here is that we need to | |
352 | * start transactions before we grab page locks. This allows us | |
353 | * to get the ordering right. | |
354 | */ | |
355 | ||
356 | static int gfs2_write_cache_jdata(struct address_space *mapping, | |
357 | struct writeback_control *wbc) | |
358 | { | |
b8e7cbb6 SW |
359 | int ret = 0; |
360 | int done = 0; | |
361 | struct pagevec pvec; | |
362 | int nr_pages; | |
774016b2 | 363 | pgoff_t uninitialized_var(writeback_index); |
b8e7cbb6 SW |
364 | pgoff_t index; |
365 | pgoff_t end; | |
774016b2 SW |
366 | pgoff_t done_index; |
367 | int cycled; | |
b8e7cbb6 | 368 | int range_whole = 0; |
10bbd235 | 369 | xa_mark_t tag; |
b8e7cbb6 | 370 | |
86679820 | 371 | pagevec_init(&pvec); |
b8e7cbb6 | 372 | if (wbc->range_cyclic) { |
774016b2 SW |
373 | writeback_index = mapping->writeback_index; /* prev offset */ |
374 | index = writeback_index; | |
375 | if (index == 0) | |
376 | cycled = 1; | |
377 | else | |
378 | cycled = 0; | |
b8e7cbb6 SW |
379 | end = -1; |
380 | } else { | |
09cbfeaf KS |
381 | index = wbc->range_start >> PAGE_SHIFT; |
382 | end = wbc->range_end >> PAGE_SHIFT; | |
b8e7cbb6 SW |
383 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
384 | range_whole = 1; | |
774016b2 | 385 | cycled = 1; /* ignore range_cyclic tests */ |
b8e7cbb6 | 386 | } |
774016b2 SW |
387 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
388 | tag = PAGECACHE_TAG_TOWRITE; | |
389 | else | |
390 | tag = PAGECACHE_TAG_DIRTY; | |
b8e7cbb6 SW |
391 | |
392 | retry: | |
774016b2 SW |
393 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
394 | tag_pages_for_writeback(mapping, index, end); | |
395 | done_index = index; | |
396 | while (!done && (index <= end)) { | |
d2bc5b3c | 397 | nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, |
67fd707f | 398 | tag); |
774016b2 SW |
399 | if (nr_pages == 0) |
400 | break; | |
401 | ||
9aa01593 | 402 | ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); |
b8e7cbb6 SW |
403 | if (ret) |
404 | done = 1; | |
405 | if (ret > 0) | |
406 | ret = 0; | |
b8e7cbb6 SW |
407 | pagevec_release(&pvec); |
408 | cond_resched(); | |
409 | } | |
410 | ||
774016b2 | 411 | if (!cycled && !done) { |
b8e7cbb6 | 412 | /* |
774016b2 | 413 | * range_cyclic: |
b8e7cbb6 SW |
414 | * We hit the last page and there is more work to be done: wrap |
415 | * back to the start of the file | |
416 | */ | |
774016b2 | 417 | cycled = 1; |
b8e7cbb6 | 418 | index = 0; |
774016b2 | 419 | end = writeback_index - 1; |
b8e7cbb6 SW |
420 | goto retry; |
421 | } | |
422 | ||
423 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | |
774016b2 SW |
424 | mapping->writeback_index = done_index; |
425 | ||
b8e7cbb6 SW |
426 | return ret; |
427 | } | |
428 | ||
429 | ||
430 | /** | |
431 | * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk | |
432 | * @mapping: The mapping to write | |
433 | * @wbc: The writeback control | |
434 | * | |
435 | */ | |
436 | ||
437 | static int gfs2_jdata_writepages(struct address_space *mapping, | |
438 | struct writeback_control *wbc) | |
439 | { | |
440 | struct gfs2_inode *ip = GFS2_I(mapping->host); | |
441 | struct gfs2_sbd *sdp = GFS2_SB(mapping->host); | |
442 | int ret; | |
443 | ||
444 | ret = gfs2_write_cache_jdata(mapping, wbc); | |
445 | if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { | |
805c0907 BP |
446 | gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | |
447 | GFS2_LFC_JDATA_WPAGES); | |
b8e7cbb6 SW |
448 | ret = gfs2_write_cache_jdata(mapping, wbc); |
449 | } | |
450 | return ret; | |
451 | } | |
452 | ||
b3b94faa DT |
453 | /** |
454 | * stuffed_readpage - Fill in a Linux page with stuffed file data | |
455 | * @ip: the inode | |
456 | * @page: the page | |
457 | * | |
458 | * Returns: errno | |
459 | */ | |
460 | ||
64bc06bb | 461 | int stuffed_readpage(struct gfs2_inode *ip, struct page *page) |
b3b94faa DT |
462 | { |
463 | struct buffer_head *dibh; | |
602c89d2 | 464 | u64 dsize = i_size_read(&ip->i_inode); |
b3b94faa DT |
465 | void *kaddr; |
466 | int error; | |
467 | ||
bf126aee | 468 | /* |
3c18ddd1 | 469 | * Due to the order of unstuffing files and ->fault(), we can be |
bf126aee SW |
470 | * asked for a zero page in the case of a stuffed file being extended, |
471 | * so we need to supply one here. It doesn't happen often. | |
472 | */ | |
473 | if (unlikely(page->index)) { | |
09cbfeaf | 474 | zero_user(page, 0, PAGE_SIZE); |
0a7ab79c | 475 | SetPageUptodate(page); |
bf126aee SW |
476 | return 0; |
477 | } | |
fd88de56 | 478 | |
b3b94faa DT |
479 | error = gfs2_meta_inode_buffer(ip, &dibh); |
480 | if (error) | |
481 | return error; | |
482 | ||
d9349285 | 483 | kaddr = kmap_atomic(page); |
235628c5 AG |
484 | if (dsize > gfs2_max_stuffed_size(ip)) |
485 | dsize = gfs2_max_stuffed_size(ip); | |
602c89d2 | 486 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); |
09cbfeaf | 487 | memset(kaddr + dsize, 0, PAGE_SIZE - dsize); |
d9349285 | 488 | kunmap_atomic(kaddr); |
bf126aee | 489 | flush_dcache_page(page); |
b3b94faa | 490 | brelse(dibh); |
b3b94faa DT |
491 | SetPageUptodate(page); |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
b3b94faa | 496 | |
b3b94faa | 497 | /** |
51ff87bd SW |
498 | * __gfs2_readpage - readpage |
499 | * @file: The file to read a page for | |
b3b94faa DT |
500 | * @page: The page to read |
501 | * | |
9db115a0 AG |
502 | * This is the core of gfs2's readpage. It's used by the internal file |
503 | * reading code as in that case we already hold the glock. Also it's | |
51ff87bd | 504 | * called by gfs2_readpage() once the required lock has been granted. |
b3b94faa DT |
505 | */ |
506 | ||
51ff87bd | 507 | static int __gfs2_readpage(void *file, struct page *page) |
b3b94faa | 508 | { |
feaa7bba SW |
509 | struct gfs2_inode *ip = GFS2_I(page->mapping->host); |
510 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); | |
f95cbb44 | 511 | |
b3b94faa DT |
512 | int error; |
513 | ||
f95cbb44 AG |
514 | if (i_blocksize(page->mapping->host) == PAGE_SIZE && |
515 | !page_has_buffers(page)) { | |
516 | error = iomap_readpage(page, &gfs2_iomap_ops); | |
517 | } else if (gfs2_is_stuffed(ip)) { | |
fd88de56 SW |
518 | error = stuffed_readpage(ip, page); |
519 | unlock_page(page); | |
51ff87bd | 520 | } else { |
e9e1ef2b | 521 | error = mpage_readpage(page, gfs2_block_map); |
51ff87bd | 522 | } |
b3b94faa DT |
523 | |
524 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | |
51ff87bd | 525 | return -EIO; |
b3b94faa | 526 | |
51ff87bd SW |
527 | return error; |
528 | } | |
529 | ||
530 | /** | |
531 | * gfs2_readpage - read a page of a file | |
532 | * @file: The file to read | |
533 | * @page: The page of the file | |
534 | * | |
01b7c7ae SW |
535 | * This deals with the locking required. We have to unlock and |
536 | * relock the page in order to get the locking in the right | |
537 | * order. | |
51ff87bd SW |
538 | */ |
539 | ||
540 | static int gfs2_readpage(struct file *file, struct page *page) | |
541 | { | |
01b7c7ae SW |
542 | struct address_space *mapping = page->mapping; |
543 | struct gfs2_inode *ip = GFS2_I(mapping->host); | |
6802e340 | 544 | struct gfs2_holder gh; |
51ff87bd SW |
545 | int error; |
546 | ||
01b7c7ae | 547 | unlock_page(page); |
719ee344 SW |
548 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); |
549 | error = gfs2_glock_nq(&gh); | |
01b7c7ae | 550 | if (unlikely(error)) |
6802e340 | 551 | goto out; |
01b7c7ae SW |
552 | error = AOP_TRUNCATED_PAGE; |
553 | lock_page(page); | |
554 | if (page->mapping == mapping && !PageUptodate(page)) | |
555 | error = __gfs2_readpage(file, page); | |
556 | else | |
557 | unlock_page(page); | |
6802e340 | 558 | gfs2_glock_dq(&gh); |
18ec7d5c | 559 | out: |
6802e340 | 560 | gfs2_holder_uninit(&gh); |
01b7c7ae SW |
561 | if (error && error != AOP_TRUNCATED_PAGE) |
562 | lock_page(page); | |
51ff87bd SW |
563 | return error; |
564 | } | |
565 | ||
566 | /** | |
567 | * gfs2_internal_read - read an internal file | |
568 | * @ip: The gfs2 inode | |
51ff87bd SW |
569 | * @buf: The buffer to fill |
570 | * @pos: The file position | |
571 | * @size: The amount to read | |
572 | * | |
573 | */ | |
574 | ||
4306629e AP |
575 | int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, |
576 | unsigned size) | |
51ff87bd SW |
577 | { |
578 | struct address_space *mapping = ip->i_inode.i_mapping; | |
09cbfeaf KS |
579 | unsigned long index = *pos / PAGE_SIZE; |
580 | unsigned offset = *pos & (PAGE_SIZE - 1); | |
51ff87bd SW |
581 | unsigned copied = 0; |
582 | unsigned amt; | |
583 | struct page *page; | |
584 | void *p; | |
585 | ||
586 | do { | |
587 | amt = size - copied; | |
09cbfeaf KS |
588 | if (offset + size > PAGE_SIZE) |
589 | amt = PAGE_SIZE - offset; | |
51ff87bd SW |
590 | page = read_cache_page(mapping, index, __gfs2_readpage, NULL); |
591 | if (IS_ERR(page)) | |
592 | return PTR_ERR(page); | |
d9349285 | 593 | p = kmap_atomic(page); |
51ff87bd | 594 | memcpy(buf + copied, p + offset, amt); |
d9349285 | 595 | kunmap_atomic(p); |
09cbfeaf | 596 | put_page(page); |
51ff87bd SW |
597 | copied += amt; |
598 | index++; | |
599 | offset = 0; | |
600 | } while(copied < size); | |
601 | (*pos) += size; | |
602 | return size; | |
fd88de56 SW |
603 | } |
604 | ||
fd88de56 SW |
605 | /** |
606 | * gfs2_readpages - Read a bunch of pages at once | |
1272574b FF |
607 | * @file: The file to read from |
608 | * @mapping: Address space info | |
609 | * @pages: List of pages to read | |
610 | * @nr_pages: Number of pages to read | |
fd88de56 SW |
611 | * |
612 | * Some notes: | |
613 | * 1. This is only for readahead, so we can simply ignore any things | |
614 | * which are slightly inconvenient (such as locking conflicts between | |
615 | * the page lock and the glock) and return having done no I/O. Its | |
616 | * obviously not something we'd want to do on too regular a basis. | |
617 | * Any I/O we ignore at this time will be done via readpage later. | |
e1d5b18a | 618 | * 2. We don't handle stuffed files here we let readpage do the honours. |
fd88de56 | 619 | * 3. mpage_readpages() does most of the heavy lifting in the common case. |
e9e1ef2b | 620 | * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. |
fd88de56 | 621 | */ |
3cc3f710 | 622 | |
fd88de56 SW |
623 | static int gfs2_readpages(struct file *file, struct address_space *mapping, |
624 | struct list_head *pages, unsigned nr_pages) | |
625 | { | |
626 | struct inode *inode = mapping->host; | |
feaa7bba SW |
627 | struct gfs2_inode *ip = GFS2_I(inode); |
628 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
fd88de56 | 629 | struct gfs2_holder gh; |
3cc3f710 | 630 | int ret; |
fd88de56 | 631 | |
719ee344 SW |
632 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); |
633 | ret = gfs2_glock_nq(&gh); | |
51ff87bd | 634 | if (unlikely(ret)) |
3cc3f710 | 635 | goto out_uninit; |
e1d5b18a | 636 | if (!gfs2_is_stuffed(ip)) |
e9e1ef2b | 637 | ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); |
3cc3f710 SW |
638 | gfs2_glock_dq(&gh); |
639 | out_uninit: | |
640 | gfs2_holder_uninit(&gh); | |
fd88de56 SW |
641 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
642 | ret = -EIO; | |
643 | return ret; | |
b3b94faa DT |
644 | } |
645 | ||
7ae8fa84 RP |
646 | /** |
647 | * adjust_fs_space - Adjusts the free space available due to gfs2_grow | |
648 | * @inode: the rindex inode | |
649 | */ | |
64bc06bb | 650 | void adjust_fs_space(struct inode *inode) |
7ae8fa84 RP |
651 | { |
652 | struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; | |
1946f70a BM |
653 | struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); |
654 | struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); | |
7ae8fa84 RP |
655 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; |
656 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; | |
1946f70a | 657 | struct buffer_head *m_bh, *l_bh; |
7ae8fa84 RP |
658 | u64 fs_total, new_free; |
659 | ||
660 | /* Total up the file system space, according to the latest rindex. */ | |
661 | fs_total = gfs2_ri_total(sdp); | |
1946f70a BM |
662 | if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) |
663 | return; | |
7ae8fa84 RP |
664 | |
665 | spin_lock(&sdp->sd_statfs_spin); | |
1946f70a BM |
666 | gfs2_statfs_change_in(m_sc, m_bh->b_data + |
667 | sizeof(struct gfs2_dinode)); | |
7ae8fa84 RP |
668 | if (fs_total > (m_sc->sc_total + l_sc->sc_total)) |
669 | new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); | |
670 | else | |
671 | new_free = 0; | |
672 | spin_unlock(&sdp->sd_statfs_spin); | |
6c53267f RP |
673 | fs_warn(sdp, "File system extended by %llu blocks.\n", |
674 | (unsigned long long)new_free); | |
7ae8fa84 | 675 | gfs2_statfs_change(sdp, new_free, new_free, 0); |
1946f70a BM |
676 | |
677 | if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) | |
678 | goto out; | |
679 | update_statfs(sdp, m_bh, l_bh); | |
680 | brelse(l_bh); | |
681 | out: | |
682 | brelse(m_bh); | |
7ae8fa84 RP |
683 | } |
684 | ||
b3b94faa | 685 | /** |
7765ec26 SW |
686 | * gfs2_stuffed_write_end - Write end for stuffed files |
687 | * @inode: The inode | |
688 | * @dibh: The buffer_head containing the on-disk inode | |
689 | * @pos: The file position | |
7765ec26 SW |
690 | * @copied: How much was actually copied by the VFS |
691 | * @page: The page | |
692 | * | |
693 | * This copies the data from the page into the inode block after | |
694 | * the inode data structure itself. | |
695 | * | |
64bc06bb | 696 | * Returns: copied bytes or errno |
7765ec26 | 697 | */ |
64bc06bb AG |
698 | int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, |
699 | loff_t pos, unsigned copied, | |
700 | struct page *page) | |
7765ec26 SW |
701 | { |
702 | struct gfs2_inode *ip = GFS2_I(inode); | |
7765ec26 SW |
703 | u64 to = pos + copied; |
704 | void *kaddr; | |
705 | unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); | |
7765ec26 | 706 | |
d6382a35 | 707 | BUG_ON(pos + copied > gfs2_max_stuffed_size(ip)); |
235628c5 | 708 | |
d9349285 | 709 | kaddr = kmap_atomic(page); |
7765ec26 | 710 | memcpy(buf + pos, kaddr + pos, copied); |
7765ec26 | 711 | flush_dcache_page(page); |
d9349285 | 712 | kunmap_atomic(kaddr); |
7765ec26 | 713 | |
43388b21 | 714 | WARN_ON(!PageUptodate(page)); |
7765ec26 | 715 | unlock_page(page); |
09cbfeaf | 716 | put_page(page); |
7765ec26 | 717 | |
7537d81a | 718 | if (copied) { |
a2e0f799 | 719 | if (inode->i_size < to) |
7537d81a | 720 | i_size_write(inode, to); |
7765ec26 SW |
721 | mark_inode_dirty(inode); |
722 | } | |
7765ec26 SW |
723 | return copied; |
724 | } | |
725 | ||
8fb68595 | 726 | /** |
b9e03f18 | 727 | * jdata_set_page_dirty - Page dirtying function |
8fb68595 RP |
728 | * @page: The page to dirty |
729 | * | |
730 | * Returns: 1 if it dirtyed the page, or 0 otherwise | |
731 | */ | |
732 | ||
b9e03f18 | 733 | static int jdata_set_page_dirty(struct page *page) |
8fb68595 | 734 | { |
5561093e | 735 | SetPageChecked(page); |
8fb68595 RP |
736 | return __set_page_dirty_buffers(page); |
737 | } | |
738 | ||
b3b94faa DT |
739 | /** |
740 | * gfs2_bmap - Block map function | |
741 | * @mapping: Address space info | |
742 | * @lblock: The block to map | |
743 | * | |
744 | * Returns: The disk address for the block or 0 on hole or error | |
745 | */ | |
746 | ||
747 | static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) | |
748 | { | |
feaa7bba | 749 | struct gfs2_inode *ip = GFS2_I(mapping->host); |
b3b94faa DT |
750 | struct gfs2_holder i_gh; |
751 | sector_t dblock = 0; | |
752 | int error; | |
753 | ||
b3b94faa DT |
754 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); |
755 | if (error) | |
756 | return 0; | |
757 | ||
758 | if (!gfs2_is_stuffed(ip)) | |
e9e1ef2b | 759 | dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); |
b3b94faa DT |
760 | |
761 | gfs2_glock_dq_uninit(&i_gh); | |
762 | ||
763 | return dblock; | |
764 | } | |
765 | ||
d7b616e2 SW |
766 | static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) |
767 | { | |
768 | struct gfs2_bufdata *bd; | |
769 | ||
770 | lock_buffer(bh); | |
771 | gfs2_log_lock(sdp); | |
772 | clear_buffer_dirty(bh); | |
773 | bd = bh->b_private; | |
774 | if (bd) { | |
c0752aa7 BP |
775 | if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) |
776 | list_del_init(&bd->bd_list); | |
16615be1 | 777 | else |
68cd4ce2 | 778 | gfs2_remove_from_journal(bh, REMOVE_JDATA); |
d7b616e2 SW |
779 | } |
780 | bh->b_bdev = NULL; | |
781 | clear_buffer_mapped(bh); | |
782 | clear_buffer_req(bh); | |
783 | clear_buffer_new(bh); | |
784 | gfs2_log_unlock(sdp); | |
785 | unlock_buffer(bh); | |
786 | } | |
787 | ||
d47992f8 LC |
788 | static void gfs2_invalidatepage(struct page *page, unsigned int offset, |
789 | unsigned int length) | |
b3b94faa | 790 | { |
d7b616e2 | 791 | struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); |
5c0bb97c | 792 | unsigned int stop = offset + length; |
09cbfeaf | 793 | int partial_page = (offset || length < PAGE_SIZE); |
d7b616e2 SW |
794 | struct buffer_head *bh, *head; |
795 | unsigned long pos = 0; | |
796 | ||
b3b94faa | 797 | BUG_ON(!PageLocked(page)); |
5c0bb97c | 798 | if (!partial_page) |
8fb68595 | 799 | ClearPageChecked(page); |
d7b616e2 SW |
800 | if (!page_has_buffers(page)) |
801 | goto out; | |
b3b94faa | 802 | |
d7b616e2 SW |
803 | bh = head = page_buffers(page); |
804 | do { | |
5c0bb97c LC |
805 | if (pos + bh->b_size > stop) |
806 | return; | |
807 | ||
d7b616e2 SW |
808 | if (offset <= pos) |
809 | gfs2_discard(sdp, bh); | |
810 | pos += bh->b_size; | |
811 | bh = bh->b_this_page; | |
812 | } while (bh != head); | |
813 | out: | |
5c0bb97c | 814 | if (!partial_page) |
d7b616e2 | 815 | try_to_release_page(page, 0); |
b3b94faa DT |
816 | } |
817 | ||
4340fe62 | 818 | /** |
623d9355 | 819 | * gfs2_releasepage - free the metadata associated with a page |
4340fe62 SW |
820 | * @page: the page that's being released |
821 | * @gfp_mask: passed from Linux VFS, ignored by us | |
822 | * | |
0ebbe4f9 AG |
823 | * Calls try_to_free_buffers() to free the buffers and put the page if the |
824 | * buffers can be released. | |
4340fe62 | 825 | * |
0ebbe4f9 | 826 | * Returns: 1 if the page was put or else 0 |
4340fe62 SW |
827 | */ |
828 | ||
829 | int gfs2_releasepage(struct page *page, gfp_t gfp_mask) | |
830 | { | |
009d8518 SW |
831 | struct address_space *mapping = page->mapping; |
832 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); | |
4340fe62 SW |
833 | struct buffer_head *bh, *head; |
834 | struct gfs2_bufdata *bd; | |
4340fe62 SW |
835 | |
836 | if (!page_has_buffers(page)) | |
891ba6d4 | 837 | return 0; |
4340fe62 | 838 | |
1c185c02 AG |
839 | /* |
840 | * From xfs_vm_releasepage: mm accommodates an old ext3 case where | |
841 | * clean pages might not have had the dirty bit cleared. Thus, it can | |
842 | * send actual dirty pages to ->releasepage() via shrink_active_list(). | |
843 | * | |
844 | * As a workaround, we skip pages that contain dirty buffers below. | |
845 | * Once ->releasepage isn't called on dirty pages anymore, we can warn | |
846 | * on dirty buffers like we used to here again. | |
847 | */ | |
848 | ||
bb3b0e3d | 849 | gfs2_log_lock(sdp); |
380f7c65 | 850 | spin_lock(&sdp->sd_ail_lock); |
4340fe62 SW |
851 | head = bh = page_buffers(page); |
852 | do { | |
bb3b0e3d SW |
853 | if (atomic_read(&bh->b_count)) |
854 | goto cannot_release; | |
855 | bd = bh->b_private; | |
16ca9412 | 856 | if (bd && bd->bd_tr) |
bb3b0e3d | 857 | goto cannot_release; |
1c185c02 AG |
858 | if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) |
859 | goto cannot_release; | |
bb3b0e3d SW |
860 | bh = bh->b_this_page; |
861 | } while(bh != head); | |
380f7c65 | 862 | spin_unlock(&sdp->sd_ail_lock); |
4340fe62 | 863 | |
bb3b0e3d SW |
864 | head = bh = page_buffers(page); |
865 | do { | |
4340fe62 SW |
866 | bd = bh->b_private; |
867 | if (bd) { | |
868 | gfs2_assert_warn(sdp, bd->bd_bh == bh); | |
e4f29206 SW |
869 | if (!list_empty(&bd->bd_list)) |
870 | list_del_init(&bd->bd_list); | |
871 | bd->bd_bh = NULL; | |
4340fe62 | 872 | bh->b_private = NULL; |
623d9355 | 873 | kmem_cache_free(gfs2_bufdata_cachep, bd); |
e4f29206 | 874 | } |
4340fe62 SW |
875 | |
876 | bh = bh->b_this_page; | |
166afccd | 877 | } while (bh != head); |
e4f29206 | 878 | gfs2_log_unlock(sdp); |
4340fe62 | 879 | |
4340fe62 | 880 | return try_to_free_buffers(page); |
8f065d36 | 881 | |
bb3b0e3d | 882 | cannot_release: |
380f7c65 | 883 | spin_unlock(&sdp->sd_ail_lock); |
bb3b0e3d SW |
884 | gfs2_log_unlock(sdp); |
885 | return 0; | |
4340fe62 SW |
886 | } |
887 | ||
5561093e | 888 | static const struct address_space_operations gfs2_writeback_aops = { |
9d358143 | 889 | .writepage = gfs2_writepage, |
45138990 | 890 | .writepages = gfs2_writepages, |
5561093e SW |
891 | .readpage = gfs2_readpage, |
892 | .readpages = gfs2_readpages, | |
5561093e SW |
893 | .bmap = gfs2_bmap, |
894 | .invalidatepage = gfs2_invalidatepage, | |
895 | .releasepage = gfs2_releasepage, | |
967bcc91 | 896 | .direct_IO = noop_direct_IO, |
e5d9dc27 | 897 | .migratepage = buffer_migrate_page, |
229615de | 898 | .is_partially_uptodate = block_is_partially_uptodate, |
aa261f54 | 899 | .error_remove_page = generic_error_remove_page, |
5561093e SW |
900 | }; |
901 | ||
902 | static const struct address_space_operations gfs2_ordered_aops = { | |
9d358143 | 903 | .writepage = gfs2_writepage, |
45138990 | 904 | .writepages = gfs2_writepages, |
b3b94faa | 905 | .readpage = gfs2_readpage, |
fd88de56 | 906 | .readpages = gfs2_readpages, |
b9e03f18 | 907 | .set_page_dirty = __set_page_dirty_buffers, |
b3b94faa DT |
908 | .bmap = gfs2_bmap, |
909 | .invalidatepage = gfs2_invalidatepage, | |
4340fe62 | 910 | .releasepage = gfs2_releasepage, |
967bcc91 | 911 | .direct_IO = noop_direct_IO, |
e5d9dc27 | 912 | .migratepage = buffer_migrate_page, |
229615de | 913 | .is_partially_uptodate = block_is_partially_uptodate, |
aa261f54 | 914 | .error_remove_page = generic_error_remove_page, |
b3b94faa DT |
915 | }; |
916 | ||
5561093e | 917 | static const struct address_space_operations gfs2_jdata_aops = { |
9ff8ec32 | 918 | .writepage = gfs2_jdata_writepage, |
b8e7cbb6 | 919 | .writepages = gfs2_jdata_writepages, |
5561093e SW |
920 | .readpage = gfs2_readpage, |
921 | .readpages = gfs2_readpages, | |
b9e03f18 | 922 | .set_page_dirty = jdata_set_page_dirty, |
5561093e SW |
923 | .bmap = gfs2_bmap, |
924 | .invalidatepage = gfs2_invalidatepage, | |
925 | .releasepage = gfs2_releasepage, | |
229615de | 926 | .is_partially_uptodate = block_is_partially_uptodate, |
aa261f54 | 927 | .error_remove_page = generic_error_remove_page, |
5561093e SW |
928 | }; |
929 | ||
930 | void gfs2_set_aops(struct inode *inode) | |
931 | { | |
932 | struct gfs2_inode *ip = GFS2_I(inode); | |
977767a7 | 933 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
5561093e | 934 | |
977767a7 AG |
935 | if (gfs2_is_jdata(ip)) |
936 | inode->i_mapping->a_ops = &gfs2_jdata_aops; | |
937 | else if (gfs2_is_writeback(sdp)) | |
5561093e | 938 | inode->i_mapping->a_ops = &gfs2_writeback_aops; |
977767a7 | 939 | else if (gfs2_is_ordered(sdp)) |
5561093e | 940 | inode->i_mapping->a_ops = &gfs2_ordered_aops; |
5561093e SW |
941 | else |
942 | BUG(); | |
943 | } |