Commit | Line | Data |
---|---|---|
7336d0e6 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3b94faa DT |
2 | /* |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
7eabb77e | 4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
5 | */ |
6 | ||
7 | #include <linux/sched.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/spinlock.h> | |
10 | #include <linux/completion.h> | |
11 | #include <linux/buffer_head.h> | |
12 | #include <linux/pagemap.h> | |
fd88de56 | 13 | #include <linux/pagevec.h> |
9b124fbb | 14 | #include <linux/mpage.h> |
d1665e41 | 15 | #include <linux/fs.h> |
a8d638e3 | 16 | #include <linux/writeback.h> |
7765ec26 | 17 | #include <linux/swap.h> |
5c676f6d | 18 | #include <linux/gfs2_ondisk.h> |
47e83b50 | 19 | #include <linux/backing-dev.h> |
e2e40f2c | 20 | #include <linux/uio.h> |
774016b2 | 21 | #include <trace/events/writeback.h> |
64bc06bb | 22 | #include <linux/sched/signal.h> |
b3b94faa DT |
23 | |
24 | #include "gfs2.h" | |
5c676f6d | 25 | #include "incore.h" |
b3b94faa DT |
26 | #include "bmap.h" |
27 | #include "glock.h" | |
28 | #include "inode.h" | |
b3b94faa DT |
29 | #include "log.h" |
30 | #include "meta_io.h" | |
b3b94faa DT |
31 | #include "quota.h" |
32 | #include "trans.h" | |
18ec7d5c | 33 | #include "rgrp.h" |
cd81a4ba | 34 | #include "super.h" |
5c676f6d | 35 | #include "util.h" |
4340fe62 | 36 | #include "glops.h" |
64bc06bb | 37 | #include "aops.h" |
b3b94faa | 38 | |
ba7f7290 | 39 | |
c1b0c3cf | 40 | void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, |
285e0fc9 | 41 | size_t from, size_t len) |
ba7f7290 | 42 | { |
c1b0c3cf | 43 | struct buffer_head *head = folio_buffers(folio); |
ba7f7290 SW |
44 | unsigned int bsize = head->b_size; |
45 | struct buffer_head *bh; | |
285e0fc9 MWO |
46 | size_t to = from + len; |
47 | size_t start, end; | |
ba7f7290 SW |
48 | |
49 | for (bh = head, start = 0; bh != head || !start; | |
50 | bh = bh->b_this_page, start = end) { | |
51 | end = start + bsize; | |
88b65ce5 | 52 | if (end <= from) |
ba7f7290 | 53 | continue; |
88b65ce5 AG |
54 | if (start >= to) |
55 | break; | |
845802b1 | 56 | set_buffer_uptodate(bh); |
350a9b0a | 57 | gfs2_trans_add_data(ip->i_gl, bh); |
ba7f7290 SW |
58 | } |
59 | } | |
60 | ||
b3b94faa | 61 | /** |
7a6bbacb | 62 | * gfs2_get_block_noalloc - Fills in a buffer head with details about a block |
b3b94faa DT |
63 | * @inode: The inode |
64 | * @lblock: The block number to look up | |
65 | * @bh_result: The buffer head to return the result in | |
66 | * @create: Non-zero if we may add block to the file | |
67 | * | |
68 | * Returns: errno | |
69 | */ | |
70 | ||
7a6bbacb SW |
71 | static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, |
72 | struct buffer_head *bh_result, int create) | |
b3b94faa | 73 | { |
b3b94faa DT |
74 | int error; |
75 | ||
e9e1ef2b | 76 | error = gfs2_block_map(inode, lblock, bh_result, 0); |
b3b94faa DT |
77 | if (error) |
78 | return error; | |
de986e85 | 79 | if (!buffer_mapped(bh_result)) |
4e79e3f0 | 80 | return -ENODATA; |
7a6bbacb | 81 | return 0; |
b3b94faa DT |
82 | } |
83 | ||
21b6924b | 84 | /** |
17bf23a9 | 85 | * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio |
c1401fd1 | 86 | * @folio: The folio to write |
21b6924b BP |
87 | * @wbc: The writeback control |
88 | * | |
17bf23a9 | 89 | * This is the same as calling block_write_full_folio, but it also |
fd4c5748 BM |
90 | * writes pages outside of i_size |
91 | */ | |
c1401fd1 | 92 | static int gfs2_write_jdata_folio(struct folio *folio, |
21b6924b | 93 | struct writeback_control *wbc) |
fd4c5748 | 94 | { |
c1401fd1 | 95 | struct inode * const inode = folio->mapping->host; |
fd4c5748 | 96 | loff_t i_size = i_size_read(inode); |
fd4c5748 BM |
97 | |
98 | /* | |
c1401fd1 | 99 | * The folio straddles i_size. It must be zeroed out on each and every |
fd4c5748 BM |
100 | * writepage invocation because it may be mmapped. "A file is mapped |
101 | * in multiples of the page size. For a file that is not a multiple of | |
c1401fd1 | 102 | * the page size, the remaining memory is zeroed when mapped, and |
fd4c5748 BM |
103 | * writes to that region are not written out to the file." |
104 | */ | |
c1401fd1 MWO |
105 | if (folio_pos(folio) < i_size && |
106 | i_size < folio_pos(folio) + folio_size(folio)) | |
107 | folio_zero_segment(folio, offset_in_folio(folio, i_size), | |
108 | folio_size(folio)); | |
fd4c5748 | 109 | |
53418a18 | 110 | return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, |
14059f66 | 111 | wbc); |
fd4c5748 BM |
112 | } |
113 | ||
b8e7cbb6 | 114 | /** |
d0cfcaee MWO |
115 | * __gfs2_jdata_write_folio - The core of jdata writepage |
116 | * @folio: The folio to write | |
b8e7cbb6 SW |
117 | * @wbc: The writeback control |
118 | * | |
b8440480 | 119 | * Implements the core of write back. If a transaction is required then |
d0cfcaee | 120 | * the checked flag will have been set and the transaction will have |
b8e7cbb6 SW |
121 | * already been started before this is called. |
122 | */ | |
d0cfcaee MWO |
123 | static int __gfs2_jdata_write_folio(struct folio *folio, |
124 | struct writeback_control *wbc) | |
b8e7cbb6 | 125 | { |
d0cfcaee | 126 | struct inode *inode = folio->mapping->host; |
b8e7cbb6 | 127 | struct gfs2_inode *ip = GFS2_I(inode); |
b8e7cbb6 | 128 | |
d0cfcaee MWO |
129 | if (folio_test_checked(folio)) { |
130 | folio_clear_checked(folio); | |
131 | if (!folio_buffers(folio)) { | |
0a88810d | 132 | create_empty_buffers(folio, |
d0cfcaee MWO |
133 | inode->i_sb->s_blocksize, |
134 | BIT(BH_Dirty)|BIT(BH_Uptodate)); | |
b8e7cbb6 | 135 | } |
d0cfcaee | 136 | gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio)); |
b8e7cbb6 | 137 | } |
c1401fd1 | 138 | return gfs2_write_jdata_folio(folio, wbc); |
b8e7cbb6 SW |
139 | } |
140 | ||
a8d638e3 | 141 | /** |
45138990 | 142 | * gfs2_writepages - Write a bunch of dirty pages back to disk |
a8d638e3 SW |
143 | * @mapping: The mapping to write |
144 | * @wbc: Write-back control | |
145 | * | |
45138990 | 146 | * Used for both ordered and writeback modes. |
a8d638e3 | 147 | */ |
45138990 SW |
148 | static int gfs2_writepages(struct address_space *mapping, |
149 | struct writeback_control *wbc) | |
a8d638e3 | 150 | { |
b066a4ee | 151 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
2164f9b9 CH |
152 | struct iomap_writepage_ctx wpc = { }; |
153 | int ret; | |
b066a4ee AD |
154 | |
155 | /* | |
b74cd55a | 156 | * Even if we didn't write enough pages here, we might still be holding |
b066a4ee AD |
157 | * dirty pages in the ail. We forcibly flush the ail because we don't |
158 | * want balance_dirty_pages() to loop indefinitely trying to write out | |
159 | * pages held in the ail that it can't find. | |
160 | */ | |
2164f9b9 | 161 | ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); |
b74cd55a | 162 | if (ret == 0 && wbc->nr_to_write > 0) |
b066a4ee | 163 | set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); |
b066a4ee | 164 | return ret; |
a8d638e3 SW |
165 | } |
166 | ||
b8e7cbb6 | 167 | /** |
87ed37e6 | 168 | * gfs2_write_jdata_batch - Write back a folio batch's worth of folios |
b8e7cbb6 SW |
169 | * @mapping: The mapping |
170 | * @wbc: The writeback control | |
87ed37e6 | 171 | * @fbatch: The batch of folios |
1272574b | 172 | * @done_index: Page index |
b8e7cbb6 SW |
173 | * |
174 | * Returns: non-zero if loop should terminate, zero otherwise | |
175 | */ | |
176 | ||
87ed37e6 | 177 | static int gfs2_write_jdata_batch(struct address_space *mapping, |
b8e7cbb6 | 178 | struct writeback_control *wbc, |
87ed37e6 | 179 | struct folio_batch *fbatch, |
774016b2 | 180 | pgoff_t *done_index) |
b8e7cbb6 SW |
181 | { |
182 | struct inode *inode = mapping->host; | |
183 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
87ed37e6 | 184 | unsigned nrblocks; |
b8e7cbb6 SW |
185 | int i; |
186 | int ret; | |
d6d64dac | 187 | size_t size = 0; |
87ed37e6 VMO |
188 | int nr_folios = folio_batch_count(fbatch); |
189 | ||
190 | for (i = 0; i < nr_folios; i++) | |
d6d64dac AG |
191 | size += folio_size(fbatch->folios[i]); |
192 | nrblocks = size >> inode->i_blkbits; | |
b8e7cbb6 | 193 | |
20b95bf2 | 194 | ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); |
b8e7cbb6 SW |
195 | if (ret < 0) |
196 | return ret; | |
197 | ||
87ed37e6 VMO |
198 | for (i = 0; i < nr_folios; i++) { |
199 | struct folio *folio = fbatch->folios[i]; | |
b8e7cbb6 | 200 | |
87ed37e6 | 201 | *done_index = folio->index; |
774016b2 | 202 | |
87ed37e6 | 203 | folio_lock(folio); |
b8e7cbb6 | 204 | |
87ed37e6 | 205 | if (unlikely(folio->mapping != mapping)) { |
774016b2 | 206 | continue_unlock: |
87ed37e6 | 207 | folio_unlock(folio); |
b8e7cbb6 SW |
208 | continue; |
209 | } | |
210 | ||
87ed37e6 | 211 | if (!folio_test_dirty(folio)) { |
774016b2 SW |
212 | /* someone wrote it for us */ |
213 | goto continue_unlock; | |
b8e7cbb6 SW |
214 | } |
215 | ||
87ed37e6 | 216 | if (folio_test_writeback(folio)) { |
774016b2 | 217 | if (wbc->sync_mode != WB_SYNC_NONE) |
87ed37e6 | 218 | folio_wait_writeback(folio); |
774016b2 SW |
219 | else |
220 | goto continue_unlock; | |
b8e7cbb6 SW |
221 | } |
222 | ||
87ed37e6 VMO |
223 | BUG_ON(folio_test_writeback(folio)); |
224 | if (!folio_clear_dirty_for_io(folio)) | |
774016b2 SW |
225 | goto continue_unlock; |
226 | ||
de1414a6 | 227 | trace_wbc_writepage(wbc, inode_to_bdi(inode)); |
b8e7cbb6 | 228 | |
d0cfcaee | 229 | ret = __gfs2_jdata_write_folio(folio, wbc); |
774016b2 SW |
230 | if (unlikely(ret)) { |
231 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | |
87ed37e6 | 232 | folio_unlock(folio); |
774016b2 SW |
233 | ret = 0; |
234 | } else { | |
235 | ||
236 | /* | |
237 | * done_index is set past this page, | |
238 | * so media errors will not choke | |
239 | * background writeout for the entire | |
240 | * file. This has consequences for | |
241 | * range_cyclic semantics (ie. it may | |
242 | * not be suitable for data integrity | |
243 | * writeout). | |
244 | */ | |
5f02d168 | 245 | *done_index = folio_next_index(folio); |
774016b2 SW |
246 | ret = 1; |
247 | break; | |
248 | } | |
249 | } | |
b8e7cbb6 | 250 | |
774016b2 SW |
251 | /* |
252 | * We stop writing back only if we are not doing | |
253 | * integrity sync. In case of integrity sync we have to | |
254 | * keep going until we have written all the pages | |
255 | * we tagged for writeback prior to entering this loop. | |
256 | */ | |
257 | if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { | |
b8e7cbb6 | 258 | ret = 1; |
774016b2 SW |
259 | break; |
260 | } | |
261 | ||
b8e7cbb6 SW |
262 | } |
263 | gfs2_trans_end(sdp); | |
264 | return ret; | |
265 | } | |
266 | ||
267 | /** | |
268 | * gfs2_write_cache_jdata - Like write_cache_pages but different | |
269 | * @mapping: The mapping to write | |
270 | * @wbc: The writeback control | |
b8e7cbb6 SW |
271 | * |
272 | * The reason that we use our own function here is that we need to | |
273 | * start transactions before we grab page locks. This allows us | |
274 | * to get the ordering right. | |
275 | */ | |
276 | ||
277 | static int gfs2_write_cache_jdata(struct address_space *mapping, | |
278 | struct writeback_control *wbc) | |
279 | { | |
b8e7cbb6 SW |
280 | int ret = 0; |
281 | int done = 0; | |
87ed37e6 VMO |
282 | struct folio_batch fbatch; |
283 | int nr_folios; | |
3f649ab7 | 284 | pgoff_t writeback_index; |
b8e7cbb6 SW |
285 | pgoff_t index; |
286 | pgoff_t end; | |
774016b2 SW |
287 | pgoff_t done_index; |
288 | int cycled; | |
b8e7cbb6 | 289 | int range_whole = 0; |
10bbd235 | 290 | xa_mark_t tag; |
b8e7cbb6 | 291 | |
87ed37e6 | 292 | folio_batch_init(&fbatch); |
b8e7cbb6 | 293 | if (wbc->range_cyclic) { |
774016b2 SW |
294 | writeback_index = mapping->writeback_index; /* prev offset */ |
295 | index = writeback_index; | |
296 | if (index == 0) | |
297 | cycled = 1; | |
298 | else | |
299 | cycled = 0; | |
b8e7cbb6 SW |
300 | end = -1; |
301 | } else { | |
09cbfeaf KS |
302 | index = wbc->range_start >> PAGE_SHIFT; |
303 | end = wbc->range_end >> PAGE_SHIFT; | |
b8e7cbb6 SW |
304 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
305 | range_whole = 1; | |
774016b2 | 306 | cycled = 1; /* ignore range_cyclic tests */ |
b8e7cbb6 | 307 | } |
774016b2 SW |
308 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
309 | tag = PAGECACHE_TAG_TOWRITE; | |
310 | else | |
311 | tag = PAGECACHE_TAG_DIRTY; | |
b8e7cbb6 SW |
312 | |
313 | retry: | |
774016b2 SW |
314 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
315 | tag_pages_for_writeback(mapping, index, end); | |
316 | done_index = index; | |
317 | while (!done && (index <= end)) { | |
87ed37e6 VMO |
318 | nr_folios = filemap_get_folios_tag(mapping, &index, end, |
319 | tag, &fbatch); | |
320 | if (nr_folios == 0) | |
774016b2 SW |
321 | break; |
322 | ||
87ed37e6 VMO |
323 | ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, |
324 | &done_index); | |
b8e7cbb6 SW |
325 | if (ret) |
326 | done = 1; | |
327 | if (ret > 0) | |
328 | ret = 0; | |
87ed37e6 | 329 | folio_batch_release(&fbatch); |
b8e7cbb6 SW |
330 | cond_resched(); |
331 | } | |
332 | ||
774016b2 | 333 | if (!cycled && !done) { |
b8e7cbb6 | 334 | /* |
774016b2 | 335 | * range_cyclic: |
b8e7cbb6 SW |
336 | * We hit the last page and there is more work to be done: wrap |
337 | * back to the start of the file | |
338 | */ | |
774016b2 | 339 | cycled = 1; |
b8e7cbb6 | 340 | index = 0; |
774016b2 | 341 | end = writeback_index - 1; |
b8e7cbb6 SW |
342 | goto retry; |
343 | } | |
344 | ||
345 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | |
774016b2 SW |
346 | mapping->writeback_index = done_index; |
347 | ||
b8e7cbb6 SW |
348 | return ret; |
349 | } | |
350 | ||
351 | ||
352 | /** | |
353 | * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk | |
354 | * @mapping: The mapping to write | |
355 | * @wbc: The writeback control | |
356 | * | |
357 | */ | |
358 | ||
359 | static int gfs2_jdata_writepages(struct address_space *mapping, | |
360 | struct writeback_control *wbc) | |
361 | { | |
362 | struct gfs2_inode *ip = GFS2_I(mapping->host); | |
363 | struct gfs2_sbd *sdp = GFS2_SB(mapping->host); | |
364 | int ret; | |
365 | ||
366 | ret = gfs2_write_cache_jdata(mapping, wbc); | |
367 | if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { | |
805c0907 BP |
368 | gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | |
369 | GFS2_LFC_JDATA_WPAGES); | |
b8e7cbb6 SW |
370 | ret = gfs2_write_cache_jdata(mapping, wbc); |
371 | } | |
372 | return ret; | |
373 | } | |
374 | ||
b3b94faa | 375 | /** |
78c3c112 | 376 | * stuffed_read_folio - Fill in a Linux folio with stuffed file data |
b3b94faa | 377 | * @ip: the inode |
7fa4964b | 378 | * @folio: the folio |
b3b94faa DT |
379 | * |
380 | * Returns: errno | |
381 | */ | |
78c3c112 | 382 | static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio) |
b3b94faa | 383 | { |
78c3c112 MWO |
384 | struct buffer_head *dibh = NULL; |
385 | size_t dsize = i_size_read(&ip->i_inode); | |
386 | void *from = NULL; | |
387 | int error = 0; | |
b3b94faa | 388 | |
bf126aee | 389 | /* |
3c18ddd1 | 390 | * Due to the order of unstuffing files and ->fault(), we can be |
7fa4964b | 391 | * asked for a zero folio in the case of a stuffed file being extended, |
bf126aee SW |
392 | * so we need to supply one here. It doesn't happen often. |
393 | */ | |
7fa4964b | 394 | if (unlikely(folio->index)) { |
78c3c112 MWO |
395 | dsize = 0; |
396 | } else { | |
397 | error = gfs2_meta_inode_buffer(ip, &dibh); | |
398 | if (error) | |
399 | goto out; | |
400 | from = dibh->b_data + sizeof(struct gfs2_dinode); | |
bf126aee | 401 | } |
fd88de56 | 402 | |
78c3c112 | 403 | folio_fill_tail(folio, 0, from, dsize); |
b3b94faa | 404 | brelse(dibh); |
78c3c112 MWO |
405 | out: |
406 | folio_end_read(folio, error == 0); | |
b3b94faa | 407 | |
78c3c112 | 408 | return error; |
b3b94faa DT |
409 | } |
410 | ||
e9b5b23e MWO |
411 | /** |
412 | * gfs2_read_folio - read a folio from a file | |
413 | * @file: The file to read | |
414 | * @folio: The folio in the file | |
415 | */ | |
416 | static int gfs2_read_folio(struct file *file, struct folio *folio) | |
b3b94faa | 417 | { |
e9b5b23e | 418 | struct inode *inode = folio->mapping->host; |
2164f9b9 CH |
419 | struct gfs2_inode *ip = GFS2_I(inode); |
420 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
b3b94faa DT |
421 | int error; |
422 | ||
2164f9b9 | 423 | if (!gfs2_is_jdata(ip) || |
e9b5b23e | 424 | (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { |
7479c505 | 425 | error = iomap_read_folio(folio, &gfs2_iomap_ops); |
f95cbb44 | 426 | } else if (gfs2_is_stuffed(ip)) { |
78c3c112 | 427 | error = stuffed_read_folio(ip, folio); |
51ff87bd | 428 | } else { |
f132ab7d | 429 | error = mpage_read_folio(folio, gfs2_block_map); |
51ff87bd | 430 | } |
b3b94faa | 431 | |
4d927b03 | 432 | if (gfs2_withdrawing_or_withdrawn(sdp)) |
51ff87bd | 433 | return -EIO; |
b3b94faa | 434 | |
51ff87bd SW |
435 | return error; |
436 | } | |
437 | ||
51ff87bd SW |
438 | /** |
439 | * gfs2_internal_read - read an internal file | |
440 | * @ip: The gfs2 inode | |
51ff87bd SW |
441 | * @buf: The buffer to fill |
442 | * @pos: The file position | |
443 | * @size: The amount to read | |
444 | * | |
445 | */ | |
446 | ||
be7f6a6b AG |
447 | ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, |
448 | size_t size) | |
51ff87bd SW |
449 | { |
450 | struct address_space *mapping = ip->i_inode.i_mapping; | |
45eb0504 | 451 | unsigned long index = *pos >> PAGE_SHIFT; |
be7f6a6b | 452 | size_t copied = 0; |
51ff87bd SW |
453 | |
454 | do { | |
be7f6a6b AG |
455 | size_t offset, chunk; |
456 | struct folio *folio; | |
457 | ||
458 | folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL); | |
459 | if (IS_ERR(folio)) { | |
460 | if (PTR_ERR(folio) == -EINTR) | |
cea44032 | 461 | continue; |
be7f6a6b | 462 | return PTR_ERR(folio); |
cea44032 | 463 | } |
be7f6a6b AG |
464 | offset = *pos + copied - folio_pos(folio); |
465 | chunk = min(size - copied, folio_size(folio) - offset); | |
466 | memcpy_from_folio(buf + copied, folio, offset, chunk); | |
467 | index = folio_next_index(folio); | |
468 | folio_put(folio); | |
469 | copied += chunk; | |
51ff87bd SW |
470 | } while(copied < size); |
471 | (*pos) += size; | |
472 | return size; | |
fd88de56 SW |
473 | } |
474 | ||
fd88de56 | 475 | /** |
d4388340 | 476 | * gfs2_readahead - Read a bunch of pages at once |
c551f66c | 477 | * @rac: Read-ahead control structure |
fd88de56 SW |
478 | * |
479 | * Some notes: | |
480 | * 1. This is only for readahead, so we can simply ignore any things | |
481 | * which are slightly inconvenient (such as locking conflicts between | |
482 | * the page lock and the glock) and return having done no I/O. Its | |
483 | * obviously not something we'd want to do on too regular a basis. | |
484 | * Any I/O we ignore at this time will be done via readpage later. | |
e1d5b18a | 485 | * 2. We don't handle stuffed files here we let readpage do the honours. |
d4388340 | 486 | * 3. mpage_readahead() does most of the heavy lifting in the common case. |
e9e1ef2b | 487 | * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. |
fd88de56 | 488 | */ |
3cc3f710 | 489 | |
d4388340 | 490 | static void gfs2_readahead(struct readahead_control *rac) |
fd88de56 | 491 | { |
d4388340 | 492 | struct inode *inode = rac->mapping->host; |
feaa7bba | 493 | struct gfs2_inode *ip = GFS2_I(inode); |
fd88de56 | 494 | |
2164f9b9 CH |
495 | if (gfs2_is_stuffed(ip)) |
496 | ; | |
497 | else if (gfs2_is_jdata(ip)) | |
d4388340 | 498 | mpage_readahead(rac, gfs2_block_map); |
2164f9b9 CH |
499 | else |
500 | iomap_readahead(rac, &gfs2_iomap_ops); | |
b3b94faa DT |
501 | } |
502 | ||
7ae8fa84 RP |
503 | /** |
504 | * adjust_fs_space - Adjusts the free space available due to gfs2_grow | |
505 | * @inode: the rindex inode | |
506 | */ | |
64bc06bb | 507 | void adjust_fs_space(struct inode *inode) |
7ae8fa84 | 508 | { |
d0a22a4b | 509 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
1946f70a | 510 | struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); |
7ae8fa84 RP |
511 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; |
512 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; | |
70c11ba8 | 513 | struct buffer_head *m_bh; |
7ae8fa84 RP |
514 | u64 fs_total, new_free; |
515 | ||
d0a22a4b AG |
516 | if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) |
517 | return; | |
518 | ||
7ae8fa84 RP |
519 | /* Total up the file system space, according to the latest rindex. */ |
520 | fs_total = gfs2_ri_total(sdp); | |
1946f70a | 521 | if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) |
d0a22a4b | 522 | goto out; |
7ae8fa84 RP |
523 | |
524 | spin_lock(&sdp->sd_statfs_spin); | |
1946f70a BM |
525 | gfs2_statfs_change_in(m_sc, m_bh->b_data + |
526 | sizeof(struct gfs2_dinode)); | |
7ae8fa84 RP |
527 | if (fs_total > (m_sc->sc_total + l_sc->sc_total)) |
528 | new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); | |
529 | else | |
530 | new_free = 0; | |
531 | spin_unlock(&sdp->sd_statfs_spin); | |
6c53267f RP |
532 | fs_warn(sdp, "File system extended by %llu blocks.\n", |
533 | (unsigned long long)new_free); | |
7ae8fa84 | 534 | gfs2_statfs_change(sdp, new_free, new_free, 0); |
1946f70a | 535 | |
70c11ba8 | 536 | update_statfs(sdp, m_bh); |
1946f70a | 537 | brelse(m_bh); |
d0a22a4b AG |
538 | out: |
539 | sdp->sd_rindex_uptodate = 0; | |
540 | gfs2_trans_end(sdp); | |
7ae8fa84 RP |
541 | } |
542 | ||
e621900a MWO |
543 | static bool jdata_dirty_folio(struct address_space *mapping, |
544 | struct folio *folio) | |
8fb68595 | 545 | { |
6302d6f4 | 546 | if (current->journal_info) |
e621900a MWO |
547 | folio_set_checked(folio); |
548 | return block_dirty_folio(mapping, folio); | |
8fb68595 RP |
549 | } |
550 | ||
b3b94faa DT |
551 | /** |
552 | * gfs2_bmap - Block map function | |
553 | * @mapping: Address space info | |
554 | * @lblock: The block to map | |
555 | * | |
556 | * Returns: The disk address for the block or 0 on hole or error | |
557 | */ | |
558 | ||
559 | static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) | |
560 | { | |
feaa7bba | 561 | struct gfs2_inode *ip = GFS2_I(mapping->host); |
b3b94faa DT |
562 | struct gfs2_holder i_gh; |
563 | sector_t dblock = 0; | |
564 | int error; | |
565 | ||
b3b94faa DT |
566 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); |
567 | if (error) | |
568 | return 0; | |
569 | ||
570 | if (!gfs2_is_stuffed(ip)) | |
7770c93a | 571 | dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); |
b3b94faa DT |
572 | |
573 | gfs2_glock_dq_uninit(&i_gh); | |
574 | ||
575 | return dblock; | |
576 | } | |
577 | ||
d7b616e2 SW |
578 | static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) |
579 | { | |
580 | struct gfs2_bufdata *bd; | |
581 | ||
582 | lock_buffer(bh); | |
583 | gfs2_log_lock(sdp); | |
584 | clear_buffer_dirty(bh); | |
585 | bd = bh->b_private; | |
586 | if (bd) { | |
c0752aa7 BP |
587 | if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) |
588 | list_del_init(&bd->bd_list); | |
68942870 BP |
589 | else { |
590 | spin_lock(&sdp->sd_ail_lock); | |
68cd4ce2 | 591 | gfs2_remove_from_journal(bh, REMOVE_JDATA); |
68942870 BP |
592 | spin_unlock(&sdp->sd_ail_lock); |
593 | } | |
d7b616e2 SW |
594 | } |
595 | bh->b_bdev = NULL; | |
596 | clear_buffer_mapped(bh); | |
597 | clear_buffer_req(bh); | |
598 | clear_buffer_new(bh); | |
599 | gfs2_log_unlock(sdp); | |
600 | unlock_buffer(bh); | |
601 | } | |
602 | ||
5f4b2976 MWO |
603 | static void gfs2_invalidate_folio(struct folio *folio, size_t offset, |
604 | size_t length) | |
b3b94faa | 605 | { |
5f4b2976 MWO |
606 | struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); |
607 | size_t stop = offset + length; | |
608 | int partial_page = (offset || length < folio_size(folio)); | |
d7b616e2 SW |
609 | struct buffer_head *bh, *head; |
610 | unsigned long pos = 0; | |
611 | ||
5f4b2976 | 612 | BUG_ON(!folio_test_locked(folio)); |
5c0bb97c | 613 | if (!partial_page) |
5f4b2976 MWO |
614 | folio_clear_checked(folio); |
615 | head = folio_buffers(folio); | |
616 | if (!head) | |
d7b616e2 | 617 | goto out; |
b3b94faa | 618 | |
5f4b2976 | 619 | bh = head; |
d7b616e2 | 620 | do { |
5c0bb97c LC |
621 | if (pos + bh->b_size > stop) |
622 | return; | |
623 | ||
d7b616e2 SW |
624 | if (offset <= pos) |
625 | gfs2_discard(sdp, bh); | |
626 | pos += bh->b_size; | |
627 | bh = bh->b_this_page; | |
628 | } while (bh != head); | |
629 | out: | |
5c0bb97c | 630 | if (!partial_page) |
5f4b2976 | 631 | filemap_release_folio(folio, 0); |
b3b94faa DT |
632 | } |
633 | ||
4340fe62 | 634 | /** |
e45c20d1 MWO |
635 | * gfs2_release_folio - free the metadata associated with a folio |
636 | * @folio: the folio that's being released | |
4340fe62 SW |
637 | * @gfp_mask: passed from Linux VFS, ignored by us |
638 | * | |
e45c20d1 | 639 | * Calls try_to_free_buffers() to free the buffers and put the folio if the |
0ebbe4f9 | 640 | * buffers can be released. |
4340fe62 | 641 | * |
e45c20d1 | 642 | * Returns: true if the folio was put or else false |
4340fe62 SW |
643 | */ |
644 | ||
e45c20d1 | 645 | bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) |
4340fe62 | 646 | { |
e45c20d1 | 647 | struct address_space *mapping = folio->mapping; |
009d8518 | 648 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
4340fe62 SW |
649 | struct buffer_head *bh, *head; |
650 | struct gfs2_bufdata *bd; | |
4340fe62 | 651 | |
e45c20d1 MWO |
652 | head = folio_buffers(folio); |
653 | if (!head) | |
654 | return false; | |
4340fe62 | 655 | |
1c185c02 | 656 | /* |
e45c20d1 MWO |
657 | * mm accommodates an old ext3 case where clean folios might |
658 | * not have had the dirty bit cleared. Thus, it can send actual | |
659 | * dirty folios to ->release_folio() via shrink_active_list(). | |
1c185c02 | 660 | * |
e45c20d1 MWO |
661 | * As a workaround, we skip folios that contain dirty buffers |
662 | * below. Once ->release_folio isn't called on dirty folios | |
663 | * anymore, we can warn on dirty buffers like we used to here | |
664 | * again. | |
1c185c02 AG |
665 | */ |
666 | ||
bb3b0e3d | 667 | gfs2_log_lock(sdp); |
e45c20d1 | 668 | bh = head; |
4340fe62 | 669 | do { |
bb3b0e3d SW |
670 | if (atomic_read(&bh->b_count)) |
671 | goto cannot_release; | |
672 | bd = bh->b_private; | |
16ca9412 | 673 | if (bd && bd->bd_tr) |
bb3b0e3d | 674 | goto cannot_release; |
1c185c02 AG |
675 | if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) |
676 | goto cannot_release; | |
bb3b0e3d | 677 | bh = bh->b_this_page; |
e45c20d1 | 678 | } while (bh != head); |
4340fe62 | 679 | |
e45c20d1 | 680 | bh = head; |
bb3b0e3d | 681 | do { |
4340fe62 SW |
682 | bd = bh->b_private; |
683 | if (bd) { | |
684 | gfs2_assert_warn(sdp, bd->bd_bh == bh); | |
e4f29206 | 685 | bd->bd_bh = NULL; |
4340fe62 | 686 | bh->b_private = NULL; |
019dd669 BP |
687 | /* |
688 | * The bd may still be queued as a revoke, in which | |
689 | * case we must not dequeue nor free it. | |
690 | */ | |
691 | if (!bd->bd_blkno && !list_empty(&bd->bd_list)) | |
692 | list_del_init(&bd->bd_list); | |
693 | if (list_empty(&bd->bd_list)) | |
694 | kmem_cache_free(gfs2_bufdata_cachep, bd); | |
e4f29206 | 695 | } |
4340fe62 SW |
696 | |
697 | bh = bh->b_this_page; | |
166afccd | 698 | } while (bh != head); |
e4f29206 | 699 | gfs2_log_unlock(sdp); |
4340fe62 | 700 | |
68189fef | 701 | return try_to_free_buffers(folio); |
8f065d36 | 702 | |
bb3b0e3d SW |
703 | cannot_release: |
704 | gfs2_log_unlock(sdp); | |
e45c20d1 | 705 | return false; |
4340fe62 SW |
706 | } |
707 | ||
eadd7535 | 708 | static const struct address_space_operations gfs2_aops = { |
45138990 | 709 | .writepages = gfs2_writepages, |
f132ab7d | 710 | .read_folio = gfs2_read_folio, |
d4388340 | 711 | .readahead = gfs2_readahead, |
4ce02c67 | 712 | .dirty_folio = iomap_dirty_folio, |
8597447d | 713 | .release_folio = iomap_release_folio, |
d82354f6 | 714 | .invalidate_folio = iomap_invalidate_folio, |
5561093e | 715 | .bmap = gfs2_bmap, |
2ec810d5 | 716 | .migrate_folio = filemap_migrate_folio, |
2164f9b9 | 717 | .is_partially_uptodate = iomap_is_partially_uptodate, |
af7628d6 | 718 | .error_remove_folio = generic_error_remove_folio, |
5561093e SW |
719 | }; |
720 | ||
5561093e | 721 | static const struct address_space_operations gfs2_jdata_aops = { |
b8e7cbb6 | 722 | .writepages = gfs2_jdata_writepages, |
f132ab7d | 723 | .read_folio = gfs2_read_folio, |
d4388340 | 724 | .readahead = gfs2_readahead, |
e621900a | 725 | .dirty_folio = jdata_dirty_folio, |
5561093e | 726 | .bmap = gfs2_bmap, |
b8440480 | 727 | .migrate_folio = buffer_migrate_folio, |
5f4b2976 | 728 | .invalidate_folio = gfs2_invalidate_folio, |
e45c20d1 | 729 | .release_folio = gfs2_release_folio, |
229615de | 730 | .is_partially_uptodate = block_is_partially_uptodate, |
af7628d6 | 731 | .error_remove_folio = generic_error_remove_folio, |
5561093e SW |
732 | }; |
733 | ||
734 | void gfs2_set_aops(struct inode *inode) | |
735 | { | |
eadd7535 | 736 | if (gfs2_is_jdata(GFS2_I(inode))) |
977767a7 | 737 | inode->i_mapping->a_ops = &gfs2_jdata_aops; |
5561093e | 738 | else |
eadd7535 | 739 | inode->i_mapping->a_ops = &gfs2_aops; |
5561093e | 740 | } |