]>
Commit | Line | Data |
---|---|---|
7336d0e6 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3b94faa DT |
2 | /* |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
7eabb77e | 4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
5 | */ |
6 | ||
7 | #include <linux/sched.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/spinlock.h> | |
10 | #include <linux/completion.h> | |
11 | #include <linux/buffer_head.h> | |
12 | #include <linux/pagemap.h> | |
fd88de56 | 13 | #include <linux/pagevec.h> |
9b124fbb | 14 | #include <linux/mpage.h> |
d1665e41 | 15 | #include <linux/fs.h> |
a8d638e3 | 16 | #include <linux/writeback.h> |
7765ec26 | 17 | #include <linux/swap.h> |
5c676f6d | 18 | #include <linux/gfs2_ondisk.h> |
47e83b50 | 19 | #include <linux/backing-dev.h> |
e2e40f2c | 20 | #include <linux/uio.h> |
774016b2 | 21 | #include <trace/events/writeback.h> |
64bc06bb | 22 | #include <linux/sched/signal.h> |
b3b94faa DT |
23 | |
24 | #include "gfs2.h" | |
5c676f6d | 25 | #include "incore.h" |
b3b94faa DT |
26 | #include "bmap.h" |
27 | #include "glock.h" | |
28 | #include "inode.h" | |
b3b94faa DT |
29 | #include "log.h" |
30 | #include "meta_io.h" | |
b3b94faa DT |
31 | #include "quota.h" |
32 | #include "trans.h" | |
18ec7d5c | 33 | #include "rgrp.h" |
cd81a4ba | 34 | #include "super.h" |
5c676f6d | 35 | #include "util.h" |
4340fe62 | 36 | #include "glops.h" |
64bc06bb | 37 | #include "aops.h" |
b3b94faa | 38 | |
ba7f7290 | 39 | |
c1b0c3cf | 40 | void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, |
285e0fc9 | 41 | size_t from, size_t len) |
ba7f7290 | 42 | { |
c1b0c3cf | 43 | struct buffer_head *head = folio_buffers(folio); |
ba7f7290 SW |
44 | unsigned int bsize = head->b_size; |
45 | struct buffer_head *bh; | |
285e0fc9 MWO |
46 | size_t to = from + len; |
47 | size_t start, end; | |
ba7f7290 SW |
48 | |
49 | for (bh = head, start = 0; bh != head || !start; | |
50 | bh = bh->b_this_page, start = end) { | |
51 | end = start + bsize; | |
88b65ce5 | 52 | if (end <= from) |
ba7f7290 | 53 | continue; |
88b65ce5 AG |
54 | if (start >= to) |
55 | break; | |
845802b1 | 56 | set_buffer_uptodate(bh); |
350a9b0a | 57 | gfs2_trans_add_data(ip->i_gl, bh); |
ba7f7290 SW |
58 | } |
59 | } | |
60 | ||
b3b94faa | 61 | /** |
7a6bbacb | 62 | * gfs2_get_block_noalloc - Fills in a buffer head with details about a block |
b3b94faa DT |
63 | * @inode: The inode |
64 | * @lblock: The block number to look up | |
65 | * @bh_result: The buffer head to return the result in | |
66 | * @create: Non-zero if we may add block to the file | |
67 | * | |
68 | * Returns: errno | |
69 | */ | |
70 | ||
7a6bbacb SW |
71 | static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, |
72 | struct buffer_head *bh_result, int create) | |
b3b94faa | 73 | { |
b3b94faa DT |
74 | int error; |
75 | ||
e9e1ef2b | 76 | error = gfs2_block_map(inode, lblock, bh_result, 0); |
b3b94faa DT |
77 | if (error) |
78 | return error; | |
de986e85 | 79 | if (!buffer_mapped(bh_result)) |
4e79e3f0 | 80 | return -ENODATA; |
7a6bbacb | 81 | return 0; |
b3b94faa DT |
82 | } |
83 | ||
21b6924b | 84 | /** |
c1401fd1 MWO |
85 | * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page |
86 | * @folio: The folio to write | |
21b6924b BP |
87 | * @wbc: The writeback control |
88 | * | |
89 | * This is the same as calling block_write_full_page, but it also | |
fd4c5748 BM |
90 | * writes pages outside of i_size |
91 | */ | |
c1401fd1 | 92 | static int gfs2_write_jdata_folio(struct folio *folio, |
21b6924b | 93 | struct writeback_control *wbc) |
fd4c5748 | 94 | { |
c1401fd1 | 95 | struct inode * const inode = folio->mapping->host; |
fd4c5748 | 96 | loff_t i_size = i_size_read(inode); |
fd4c5748 BM |
97 | |
98 | /* | |
c1401fd1 | 99 | * The folio straddles i_size. It must be zeroed out on each and every |
fd4c5748 BM |
100 | * writepage invocation because it may be mmapped. "A file is mapped |
101 | * in multiples of the page size. For a file that is not a multiple of | |
c1401fd1 | 102 | * the page size, the remaining memory is zeroed when mapped, and |
fd4c5748 BM |
103 | * writes to that region are not written out to the file." |
104 | */ | |
c1401fd1 MWO |
105 | if (folio_pos(folio) < i_size && |
106 | i_size < folio_pos(folio) + folio_size(folio)) | |
107 | folio_zero_segment(folio, offset_in_folio(folio, i_size), | |
108 | folio_size(folio)); | |
fd4c5748 | 109 | |
53418a18 MWO |
110 | return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, |
111 | wbc, end_buffer_async_write); | |
fd4c5748 BM |
112 | } |
113 | ||
b8e7cbb6 | 114 | /** |
d0cfcaee MWO |
115 | * __gfs2_jdata_write_folio - The core of jdata writepage |
116 | * @folio: The folio to write | |
b8e7cbb6 SW |
117 | * @wbc: The writeback control |
118 | * | |
119 | * This is shared between writepage and writepages and implements the | |
120 | * core of the writepage operation. If a transaction is required then | |
d0cfcaee | 121 | * the checked flag will have been set and the transaction will have |
b8e7cbb6 SW |
122 | * already been started before this is called. |
123 | */ | |
d0cfcaee MWO |
124 | static int __gfs2_jdata_write_folio(struct folio *folio, |
125 | struct writeback_control *wbc) | |
b8e7cbb6 | 126 | { |
d0cfcaee | 127 | struct inode *inode = folio->mapping->host; |
b8e7cbb6 | 128 | struct gfs2_inode *ip = GFS2_I(inode); |
b8e7cbb6 | 129 | |
d0cfcaee MWO |
130 | if (folio_test_checked(folio)) { |
131 | folio_clear_checked(folio); | |
132 | if (!folio_buffers(folio)) { | |
133 | folio_create_empty_buffers(folio, | |
134 | inode->i_sb->s_blocksize, | |
135 | BIT(BH_Dirty)|BIT(BH_Uptodate)); | |
b8e7cbb6 | 136 | } |
d0cfcaee | 137 | gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio)); |
b8e7cbb6 | 138 | } |
c1401fd1 | 139 | return gfs2_write_jdata_folio(folio, wbc); |
b8e7cbb6 SW |
140 | } |
141 | ||
9ff8ec32 SW |
142 | /** |
143 | * gfs2_jdata_writepage - Write complete page | |
144 | * @page: Page to write | |
1272574b | 145 | * @wbc: The writeback control |
9ff8ec32 SW |
146 | * |
147 | * Returns: errno | |
148 | * | |
149 | */ | |
150 | ||
151 | static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) | |
152 | { | |
c0ba597d | 153 | struct folio *folio = page_folio(page); |
9ff8ec32 | 154 | struct inode *inode = page->mapping->host; |
fd4c5748 | 155 | struct gfs2_inode *ip = GFS2_I(inode); |
9ff8ec32 | 156 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
9ff8ec32 | 157 | |
fd4c5748 BM |
158 | if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) |
159 | goto out; | |
c0ba597d | 160 | if (folio_test_checked(folio) || current->journal_info) |
fd4c5748 | 161 | goto out_ignore; |
d0cfcaee | 162 | return __gfs2_jdata_write_folio(folio, wbc); |
18ec7d5c SW |
163 | |
164 | out_ignore: | |
c0ba597d | 165 | folio_redirty_for_writepage(wbc, folio); |
fd4c5748 | 166 | out: |
c0ba597d | 167 | folio_unlock(folio); |
18ec7d5c | 168 | return 0; |
b3b94faa DT |
169 | } |
170 | ||
a8d638e3 | 171 | /** |
45138990 | 172 | * gfs2_writepages - Write a bunch of dirty pages back to disk |
a8d638e3 SW |
173 | * @mapping: The mapping to write |
174 | * @wbc: Write-back control | |
175 | * | |
45138990 | 176 | * Used for both ordered and writeback modes. |
a8d638e3 | 177 | */ |
45138990 SW |
178 | static int gfs2_writepages(struct address_space *mapping, |
179 | struct writeback_control *wbc) | |
a8d638e3 | 180 | { |
b066a4ee | 181 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
2164f9b9 CH |
182 | struct iomap_writepage_ctx wpc = { }; |
183 | int ret; | |
b066a4ee AD |
184 | |
185 | /* | |
186 | * Even if we didn't write any pages here, we might still be holding | |
187 | * dirty pages in the ail. We forcibly flush the ail because we don't | |
188 | * want balance_dirty_pages() to loop indefinitely trying to write out | |
189 | * pages held in the ail that it can't find. | |
190 | */ | |
2164f9b9 | 191 | ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); |
b066a4ee AD |
192 | if (ret == 0) |
193 | set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); | |
b066a4ee | 194 | return ret; |
a8d638e3 SW |
195 | } |
196 | ||
b8e7cbb6 | 197 | /** |
87ed37e6 | 198 | * gfs2_write_jdata_batch - Write back a folio batch's worth of folios |
b8e7cbb6 SW |
199 | * @mapping: The mapping |
200 | * @wbc: The writeback control | |
87ed37e6 | 201 | * @fbatch: The batch of folios |
1272574b | 202 | * @done_index: Page index |
b8e7cbb6 SW |
203 | * |
204 | * Returns: non-zero if loop should terminate, zero otherwise | |
205 | */ | |
206 | ||
87ed37e6 | 207 | static int gfs2_write_jdata_batch(struct address_space *mapping, |
b8e7cbb6 | 208 | struct writeback_control *wbc, |
87ed37e6 | 209 | struct folio_batch *fbatch, |
774016b2 | 210 | pgoff_t *done_index) |
b8e7cbb6 SW |
211 | { |
212 | struct inode *inode = mapping->host; | |
213 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
87ed37e6 | 214 | unsigned nrblocks; |
b8e7cbb6 SW |
215 | int i; |
216 | int ret; | |
87ed37e6 VMO |
217 | int nr_pages = 0; |
218 | int nr_folios = folio_batch_count(fbatch); | |
219 | ||
220 | for (i = 0; i < nr_folios; i++) | |
221 | nr_pages += folio_nr_pages(fbatch->folios[i]); | |
222 | nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits); | |
b8e7cbb6 | 223 | |
20b95bf2 | 224 | ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); |
b8e7cbb6 SW |
225 | if (ret < 0) |
226 | return ret; | |
227 | ||
87ed37e6 VMO |
228 | for (i = 0; i < nr_folios; i++) { |
229 | struct folio *folio = fbatch->folios[i]; | |
b8e7cbb6 | 230 | |
87ed37e6 | 231 | *done_index = folio->index; |
774016b2 | 232 | |
87ed37e6 | 233 | folio_lock(folio); |
b8e7cbb6 | 234 | |
87ed37e6 | 235 | if (unlikely(folio->mapping != mapping)) { |
774016b2 | 236 | continue_unlock: |
87ed37e6 | 237 | folio_unlock(folio); |
b8e7cbb6 SW |
238 | continue; |
239 | } | |
240 | ||
87ed37e6 | 241 | if (!folio_test_dirty(folio)) { |
774016b2 SW |
242 | /* someone wrote it for us */ |
243 | goto continue_unlock; | |
b8e7cbb6 SW |
244 | } |
245 | ||
87ed37e6 | 246 | if (folio_test_writeback(folio)) { |
774016b2 | 247 | if (wbc->sync_mode != WB_SYNC_NONE) |
87ed37e6 | 248 | folio_wait_writeback(folio); |
774016b2 SW |
249 | else |
250 | goto continue_unlock; | |
b8e7cbb6 SW |
251 | } |
252 | ||
87ed37e6 VMO |
253 | BUG_ON(folio_test_writeback(folio)); |
254 | if (!folio_clear_dirty_for_io(folio)) | |
774016b2 SW |
255 | goto continue_unlock; |
256 | ||
de1414a6 | 257 | trace_wbc_writepage(wbc, inode_to_bdi(inode)); |
b8e7cbb6 | 258 | |
d0cfcaee | 259 | ret = __gfs2_jdata_write_folio(folio, wbc); |
774016b2 SW |
260 | if (unlikely(ret)) { |
261 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | |
87ed37e6 | 262 | folio_unlock(folio); |
774016b2 SW |
263 | ret = 0; |
264 | } else { | |
265 | ||
266 | /* | |
267 | * done_index is set past this page, | |
268 | * so media errors will not choke | |
269 | * background writeout for the entire | |
270 | * file. This has consequences for | |
271 | * range_cyclic semantics (ie. it may | |
272 | * not be suitable for data integrity | |
273 | * writeout). | |
274 | */ | |
87ed37e6 VMO |
275 | *done_index = folio->index + |
276 | folio_nr_pages(folio); | |
774016b2 SW |
277 | ret = 1; |
278 | break; | |
279 | } | |
280 | } | |
b8e7cbb6 | 281 | |
774016b2 SW |
282 | /* |
283 | * We stop writing back only if we are not doing | |
284 | * integrity sync. In case of integrity sync we have to | |
285 | * keep going until we have written all the pages | |
286 | * we tagged for writeback prior to entering this loop. | |
287 | */ | |
288 | if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { | |
b8e7cbb6 | 289 | ret = 1; |
774016b2 SW |
290 | break; |
291 | } | |
292 | ||
b8e7cbb6 SW |
293 | } |
294 | gfs2_trans_end(sdp); | |
295 | return ret; | |
296 | } | |
297 | ||
298 | /** | |
299 | * gfs2_write_cache_jdata - Like write_cache_pages but different | |
300 | * @mapping: The mapping to write | |
301 | * @wbc: The writeback control | |
b8e7cbb6 SW |
302 | * |
303 | * The reason that we use our own function here is that we need to | |
304 | * start transactions before we grab page locks. This allows us | |
305 | * to get the ordering right. | |
306 | */ | |
307 | ||
308 | static int gfs2_write_cache_jdata(struct address_space *mapping, | |
309 | struct writeback_control *wbc) | |
310 | { | |
b8e7cbb6 SW |
311 | int ret = 0; |
312 | int done = 0; | |
87ed37e6 VMO |
313 | struct folio_batch fbatch; |
314 | int nr_folios; | |
3f649ab7 | 315 | pgoff_t writeback_index; |
b8e7cbb6 SW |
316 | pgoff_t index; |
317 | pgoff_t end; | |
774016b2 SW |
318 | pgoff_t done_index; |
319 | int cycled; | |
b8e7cbb6 | 320 | int range_whole = 0; |
10bbd235 | 321 | xa_mark_t tag; |
b8e7cbb6 | 322 | |
87ed37e6 | 323 | folio_batch_init(&fbatch); |
b8e7cbb6 | 324 | if (wbc->range_cyclic) { |
774016b2 SW |
325 | writeback_index = mapping->writeback_index; /* prev offset */ |
326 | index = writeback_index; | |
327 | if (index == 0) | |
328 | cycled = 1; | |
329 | else | |
330 | cycled = 0; | |
b8e7cbb6 SW |
331 | end = -1; |
332 | } else { | |
09cbfeaf KS |
333 | index = wbc->range_start >> PAGE_SHIFT; |
334 | end = wbc->range_end >> PAGE_SHIFT; | |
b8e7cbb6 SW |
335 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
336 | range_whole = 1; | |
774016b2 | 337 | cycled = 1; /* ignore range_cyclic tests */ |
b8e7cbb6 | 338 | } |
774016b2 SW |
339 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
340 | tag = PAGECACHE_TAG_TOWRITE; | |
341 | else | |
342 | tag = PAGECACHE_TAG_DIRTY; | |
b8e7cbb6 SW |
343 | |
344 | retry: | |
774016b2 SW |
345 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
346 | tag_pages_for_writeback(mapping, index, end); | |
347 | done_index = index; | |
348 | while (!done && (index <= end)) { | |
87ed37e6 VMO |
349 | nr_folios = filemap_get_folios_tag(mapping, &index, end, |
350 | tag, &fbatch); | |
351 | if (nr_folios == 0) | |
774016b2 SW |
352 | break; |
353 | ||
87ed37e6 VMO |
354 | ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, |
355 | &done_index); | |
b8e7cbb6 SW |
356 | if (ret) |
357 | done = 1; | |
358 | if (ret > 0) | |
359 | ret = 0; | |
87ed37e6 | 360 | folio_batch_release(&fbatch); |
b8e7cbb6 SW |
361 | cond_resched(); |
362 | } | |
363 | ||
774016b2 | 364 | if (!cycled && !done) { |
b8e7cbb6 | 365 | /* |
774016b2 | 366 | * range_cyclic: |
b8e7cbb6 SW |
367 | * We hit the last page and there is more work to be done: wrap |
368 | * back to the start of the file | |
369 | */ | |
774016b2 | 370 | cycled = 1; |
b8e7cbb6 | 371 | index = 0; |
774016b2 | 372 | end = writeback_index - 1; |
b8e7cbb6 SW |
373 | goto retry; |
374 | } | |
375 | ||
376 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | |
774016b2 SW |
377 | mapping->writeback_index = done_index; |
378 | ||
b8e7cbb6 SW |
379 | return ret; |
380 | } | |
381 | ||
382 | ||
383 | /** | |
384 | * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk | |
385 | * @mapping: The mapping to write | |
386 | * @wbc: The writeback control | |
387 | * | |
388 | */ | |
389 | ||
390 | static int gfs2_jdata_writepages(struct address_space *mapping, | |
391 | struct writeback_control *wbc) | |
392 | { | |
393 | struct gfs2_inode *ip = GFS2_I(mapping->host); | |
394 | struct gfs2_sbd *sdp = GFS2_SB(mapping->host); | |
395 | int ret; | |
396 | ||
397 | ret = gfs2_write_cache_jdata(mapping, wbc); | |
398 | if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { | |
805c0907 BP |
399 | gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | |
400 | GFS2_LFC_JDATA_WPAGES); | |
b8e7cbb6 SW |
401 | ret = gfs2_write_cache_jdata(mapping, wbc); |
402 | } | |
403 | return ret; | |
404 | } | |
405 | ||
b3b94faa DT |
406 | /** |
407 | * stuffed_readpage - Fill in a Linux page with stuffed file data | |
408 | * @ip: the inode | |
409 | * @page: the page | |
410 | * | |
411 | * Returns: errno | |
412 | */ | |
378b6cbf | 413 | static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) |
b3b94faa DT |
414 | { |
415 | struct buffer_head *dibh; | |
602c89d2 | 416 | u64 dsize = i_size_read(&ip->i_inode); |
b3b94faa DT |
417 | void *kaddr; |
418 | int error; | |
419 | ||
bf126aee | 420 | /* |
3c18ddd1 | 421 | * Due to the order of unstuffing files and ->fault(), we can be |
bf126aee SW |
422 | * asked for a zero page in the case of a stuffed file being extended, |
423 | * so we need to supply one here. It doesn't happen often. | |
424 | */ | |
425 | if (unlikely(page->index)) { | |
09cbfeaf | 426 | zero_user(page, 0, PAGE_SIZE); |
0a7ab79c | 427 | SetPageUptodate(page); |
bf126aee SW |
428 | return 0; |
429 | } | |
fd88de56 | 430 | |
b3b94faa DT |
431 | error = gfs2_meta_inode_buffer(ip, &dibh); |
432 | if (error) | |
433 | return error; | |
434 | ||
58721bd4 | 435 | kaddr = kmap_local_page(page); |
602c89d2 | 436 | memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); |
09cbfeaf | 437 | memset(kaddr + dsize, 0, PAGE_SIZE - dsize); |
58721bd4 | 438 | kunmap_local(kaddr); |
bf126aee | 439 | flush_dcache_page(page); |
b3b94faa | 440 | brelse(dibh); |
b3b94faa DT |
441 | SetPageUptodate(page); |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
e9b5b23e MWO |
446 | /** |
447 | * gfs2_read_folio - read a folio from a file | |
448 | * @file: The file to read | |
449 | * @folio: The folio in the file | |
450 | */ | |
451 | static int gfs2_read_folio(struct file *file, struct folio *folio) | |
b3b94faa | 452 | { |
e9b5b23e | 453 | struct inode *inode = folio->mapping->host; |
2164f9b9 CH |
454 | struct gfs2_inode *ip = GFS2_I(inode); |
455 | struct gfs2_sbd *sdp = GFS2_SB(inode); | |
b3b94faa DT |
456 | int error; |
457 | ||
2164f9b9 | 458 | if (!gfs2_is_jdata(ip) || |
e9b5b23e | 459 | (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { |
7479c505 | 460 | error = iomap_read_folio(folio, &gfs2_iomap_ops); |
f95cbb44 | 461 | } else if (gfs2_is_stuffed(ip)) { |
e9b5b23e MWO |
462 | error = stuffed_readpage(ip, &folio->page); |
463 | folio_unlock(folio); | |
51ff87bd | 464 | } else { |
f132ab7d | 465 | error = mpage_read_folio(folio, gfs2_block_map); |
51ff87bd | 466 | } |
b3b94faa | 467 | |
eb43e660 | 468 | if (unlikely(gfs2_withdrawn(sdp))) |
51ff87bd | 469 | return -EIO; |
b3b94faa | 470 | |
51ff87bd SW |
471 | return error; |
472 | } | |
473 | ||
51ff87bd SW |
474 | /** |
475 | * gfs2_internal_read - read an internal file | |
476 | * @ip: The gfs2 inode | |
51ff87bd SW |
477 | * @buf: The buffer to fill |
478 | * @pos: The file position | |
479 | * @size: The amount to read | |
480 | * | |
481 | */ | |
482 | ||
4306629e AP |
483 | int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, |
484 | unsigned size) | |
51ff87bd SW |
485 | { |
486 | struct address_space *mapping = ip->i_inode.i_mapping; | |
45eb0504 | 487 | unsigned long index = *pos >> PAGE_SHIFT; |
09cbfeaf | 488 | unsigned offset = *pos & (PAGE_SIZE - 1); |
51ff87bd SW |
489 | unsigned copied = 0; |
490 | unsigned amt; | |
491 | struct page *page; | |
51ff87bd SW |
492 | |
493 | do { | |
e9b5b23e | 494 | page = read_cache_page(mapping, index, gfs2_read_folio, NULL); |
cea44032 AG |
495 | if (IS_ERR(page)) { |
496 | if (PTR_ERR(page) == -EINTR) | |
497 | continue; | |
51ff87bd | 498 | return PTR_ERR(page); |
cea44032 | 499 | } |
51ff87bd | 500 | amt = size - copied; |
09cbfeaf KS |
501 | if (offset + size > PAGE_SIZE) |
502 | amt = PAGE_SIZE - offset; | |
d68d0c6c | 503 | memcpy_from_page(buf + copied, page, offset, amt); |
09cbfeaf | 504 | put_page(page); |
51ff87bd SW |
505 | copied += amt; |
506 | index++; | |
507 | offset = 0; | |
508 | } while(copied < size); | |
509 | (*pos) += size; | |
510 | return size; | |
fd88de56 SW |
511 | } |
512 | ||
fd88de56 | 513 | /** |
d4388340 | 514 | * gfs2_readahead - Read a bunch of pages at once |
c551f66c | 515 | * @rac: Read-ahead control structure |
fd88de56 SW |
516 | * |
517 | * Some notes: | |
518 | * 1. This is only for readahead, so we can simply ignore any things | |
519 | * which are slightly inconvenient (such as locking conflicts between | |
520 | * the page lock and the glock) and return having done no I/O. Its | |
521 | * obviously not something we'd want to do on too regular a basis. | |
522 | * Any I/O we ignore at this time will be done via readpage later. | |
e1d5b18a | 523 | * 2. We don't handle stuffed files here we let readpage do the honours. |
d4388340 | 524 | * 3. mpage_readahead() does most of the heavy lifting in the common case. |
e9e1ef2b | 525 | * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. |
fd88de56 | 526 | */ |
3cc3f710 | 527 | |
d4388340 | 528 | static void gfs2_readahead(struct readahead_control *rac) |
fd88de56 | 529 | { |
d4388340 | 530 | struct inode *inode = rac->mapping->host; |
feaa7bba | 531 | struct gfs2_inode *ip = GFS2_I(inode); |
fd88de56 | 532 | |
2164f9b9 CH |
533 | if (gfs2_is_stuffed(ip)) |
534 | ; | |
535 | else if (gfs2_is_jdata(ip)) | |
d4388340 | 536 | mpage_readahead(rac, gfs2_block_map); |
2164f9b9 CH |
537 | else |
538 | iomap_readahead(rac, &gfs2_iomap_ops); | |
b3b94faa DT |
539 | } |
540 | ||
7ae8fa84 RP |
541 | /** |
542 | * adjust_fs_space - Adjusts the free space available due to gfs2_grow | |
543 | * @inode: the rindex inode | |
544 | */ | |
64bc06bb | 545 | void adjust_fs_space(struct inode *inode) |
7ae8fa84 | 546 | { |
d0a22a4b | 547 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
1946f70a | 548 | struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); |
7ae8fa84 RP |
549 | struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; |
550 | struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; | |
70c11ba8 | 551 | struct buffer_head *m_bh; |
7ae8fa84 RP |
552 | u64 fs_total, new_free; |
553 | ||
d0a22a4b AG |
554 | if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0) |
555 | return; | |
556 | ||
7ae8fa84 RP |
557 | /* Total up the file system space, according to the latest rindex. */ |
558 | fs_total = gfs2_ri_total(sdp); | |
1946f70a | 559 | if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) |
d0a22a4b | 560 | goto out; |
7ae8fa84 RP |
561 | |
562 | spin_lock(&sdp->sd_statfs_spin); | |
1946f70a BM |
563 | gfs2_statfs_change_in(m_sc, m_bh->b_data + |
564 | sizeof(struct gfs2_dinode)); | |
7ae8fa84 RP |
565 | if (fs_total > (m_sc->sc_total + l_sc->sc_total)) |
566 | new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); | |
567 | else | |
568 | new_free = 0; | |
569 | spin_unlock(&sdp->sd_statfs_spin); | |
6c53267f RP |
570 | fs_warn(sdp, "File system extended by %llu blocks.\n", |
571 | (unsigned long long)new_free); | |
7ae8fa84 | 572 | gfs2_statfs_change(sdp, new_free, new_free, 0); |
1946f70a | 573 | |
70c11ba8 | 574 | update_statfs(sdp, m_bh); |
1946f70a | 575 | brelse(m_bh); |
d0a22a4b AG |
576 | out: |
577 | sdp->sd_rindex_uptodate = 0; | |
578 | gfs2_trans_end(sdp); | |
7ae8fa84 RP |
579 | } |
580 | ||
e621900a MWO |
581 | static bool jdata_dirty_folio(struct address_space *mapping, |
582 | struct folio *folio) | |
8fb68595 | 583 | { |
6302d6f4 | 584 | if (current->journal_info) |
e621900a MWO |
585 | folio_set_checked(folio); |
586 | return block_dirty_folio(mapping, folio); | |
8fb68595 RP |
587 | } |
588 | ||
b3b94faa DT |
589 | /** |
590 | * gfs2_bmap - Block map function | |
591 | * @mapping: Address space info | |
592 | * @lblock: The block to map | |
593 | * | |
594 | * Returns: The disk address for the block or 0 on hole or error | |
595 | */ | |
596 | ||
597 | static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) | |
598 | { | |
feaa7bba | 599 | struct gfs2_inode *ip = GFS2_I(mapping->host); |
b3b94faa DT |
600 | struct gfs2_holder i_gh; |
601 | sector_t dblock = 0; | |
602 | int error; | |
603 | ||
b3b94faa DT |
604 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); |
605 | if (error) | |
606 | return 0; | |
607 | ||
608 | if (!gfs2_is_stuffed(ip)) | |
7770c93a | 609 | dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); |
b3b94faa DT |
610 | |
611 | gfs2_glock_dq_uninit(&i_gh); | |
612 | ||
613 | return dblock; | |
614 | } | |
615 | ||
d7b616e2 SW |
616 | static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) |
617 | { | |
618 | struct gfs2_bufdata *bd; | |
619 | ||
620 | lock_buffer(bh); | |
621 | gfs2_log_lock(sdp); | |
622 | clear_buffer_dirty(bh); | |
623 | bd = bh->b_private; | |
624 | if (bd) { | |
c0752aa7 BP |
625 | if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) |
626 | list_del_init(&bd->bd_list); | |
68942870 BP |
627 | else { |
628 | spin_lock(&sdp->sd_ail_lock); | |
68cd4ce2 | 629 | gfs2_remove_from_journal(bh, REMOVE_JDATA); |
68942870 BP |
630 | spin_unlock(&sdp->sd_ail_lock); |
631 | } | |
d7b616e2 SW |
632 | } |
633 | bh->b_bdev = NULL; | |
634 | clear_buffer_mapped(bh); | |
635 | clear_buffer_req(bh); | |
636 | clear_buffer_new(bh); | |
637 | gfs2_log_unlock(sdp); | |
638 | unlock_buffer(bh); | |
639 | } | |
640 | ||
5f4b2976 MWO |
641 | static void gfs2_invalidate_folio(struct folio *folio, size_t offset, |
642 | size_t length) | |
b3b94faa | 643 | { |
5f4b2976 MWO |
644 | struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host); |
645 | size_t stop = offset + length; | |
646 | int partial_page = (offset || length < folio_size(folio)); | |
d7b616e2 SW |
647 | struct buffer_head *bh, *head; |
648 | unsigned long pos = 0; | |
649 | ||
5f4b2976 | 650 | BUG_ON(!folio_test_locked(folio)); |
5c0bb97c | 651 | if (!partial_page) |
5f4b2976 MWO |
652 | folio_clear_checked(folio); |
653 | head = folio_buffers(folio); | |
654 | if (!head) | |
d7b616e2 | 655 | goto out; |
b3b94faa | 656 | |
5f4b2976 | 657 | bh = head; |
d7b616e2 | 658 | do { |
5c0bb97c LC |
659 | if (pos + bh->b_size > stop) |
660 | return; | |
661 | ||
d7b616e2 SW |
662 | if (offset <= pos) |
663 | gfs2_discard(sdp, bh); | |
664 | pos += bh->b_size; | |
665 | bh = bh->b_this_page; | |
666 | } while (bh != head); | |
667 | out: | |
5c0bb97c | 668 | if (!partial_page) |
5f4b2976 | 669 | filemap_release_folio(folio, 0); |
b3b94faa DT |
670 | } |
671 | ||
4340fe62 | 672 | /** |
e45c20d1 MWO |
673 | * gfs2_release_folio - free the metadata associated with a folio |
674 | * @folio: the folio that's being released | |
4340fe62 SW |
675 | * @gfp_mask: passed from Linux VFS, ignored by us |
676 | * | |
e45c20d1 | 677 | * Calls try_to_free_buffers() to free the buffers and put the folio if the |
0ebbe4f9 | 678 | * buffers can be released. |
4340fe62 | 679 | * |
e45c20d1 | 680 | * Returns: true if the folio was put or else false |
4340fe62 SW |
681 | */ |
682 | ||
e45c20d1 | 683 | bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) |
4340fe62 | 684 | { |
e45c20d1 | 685 | struct address_space *mapping = folio->mapping; |
009d8518 | 686 | struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); |
4340fe62 SW |
687 | struct buffer_head *bh, *head; |
688 | struct gfs2_bufdata *bd; | |
4340fe62 | 689 | |
e45c20d1 MWO |
690 | head = folio_buffers(folio); |
691 | if (!head) | |
692 | return false; | |
4340fe62 | 693 | |
1c185c02 | 694 | /* |
e45c20d1 MWO |
695 | * mm accommodates an old ext3 case where clean folios might |
696 | * not have had the dirty bit cleared. Thus, it can send actual | |
697 | * dirty folios to ->release_folio() via shrink_active_list(). | |
1c185c02 | 698 | * |
e45c20d1 MWO |
699 | * As a workaround, we skip folios that contain dirty buffers |
700 | * below. Once ->release_folio isn't called on dirty folios | |
701 | * anymore, we can warn on dirty buffers like we used to here | |
702 | * again. | |
1c185c02 AG |
703 | */ |
704 | ||
bb3b0e3d | 705 | gfs2_log_lock(sdp); |
e45c20d1 | 706 | bh = head; |
4340fe62 | 707 | do { |
bb3b0e3d SW |
708 | if (atomic_read(&bh->b_count)) |
709 | goto cannot_release; | |
710 | bd = bh->b_private; | |
16ca9412 | 711 | if (bd && bd->bd_tr) |
bb3b0e3d | 712 | goto cannot_release; |
1c185c02 AG |
713 | if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) |
714 | goto cannot_release; | |
bb3b0e3d | 715 | bh = bh->b_this_page; |
e45c20d1 | 716 | } while (bh != head); |
4340fe62 | 717 | |
e45c20d1 | 718 | bh = head; |
bb3b0e3d | 719 | do { |
4340fe62 SW |
720 | bd = bh->b_private; |
721 | if (bd) { | |
722 | gfs2_assert_warn(sdp, bd->bd_bh == bh); | |
e4f29206 | 723 | bd->bd_bh = NULL; |
4340fe62 | 724 | bh->b_private = NULL; |
019dd669 BP |
725 | /* |
726 | * The bd may still be queued as a revoke, in which | |
727 | * case we must not dequeue nor free it. | |
728 | */ | |
729 | if (!bd->bd_blkno && !list_empty(&bd->bd_list)) | |
730 | list_del_init(&bd->bd_list); | |
731 | if (list_empty(&bd->bd_list)) | |
732 | kmem_cache_free(gfs2_bufdata_cachep, bd); | |
e4f29206 | 733 | } |
4340fe62 SW |
734 | |
735 | bh = bh->b_this_page; | |
166afccd | 736 | } while (bh != head); |
e4f29206 | 737 | gfs2_log_unlock(sdp); |
4340fe62 | 738 | |
68189fef | 739 | return try_to_free_buffers(folio); |
8f065d36 | 740 | |
bb3b0e3d SW |
741 | cannot_release: |
742 | gfs2_log_unlock(sdp); | |
e45c20d1 | 743 | return false; |
4340fe62 SW |
744 | } |
745 | ||
eadd7535 | 746 | static const struct address_space_operations gfs2_aops = { |
45138990 | 747 | .writepages = gfs2_writepages, |
f132ab7d | 748 | .read_folio = gfs2_read_folio, |
d4388340 | 749 | .readahead = gfs2_readahead, |
4ce02c67 | 750 | .dirty_folio = iomap_dirty_folio, |
8597447d | 751 | .release_folio = iomap_release_folio, |
d82354f6 | 752 | .invalidate_folio = iomap_invalidate_folio, |
5561093e | 753 | .bmap = gfs2_bmap, |
2ec810d5 | 754 | .migrate_folio = filemap_migrate_folio, |
2164f9b9 | 755 | .is_partially_uptodate = iomap_is_partially_uptodate, |
aa261f54 | 756 | .error_remove_page = generic_error_remove_page, |
5561093e SW |
757 | }; |
758 | ||
5561093e | 759 | static const struct address_space_operations gfs2_jdata_aops = { |
9ff8ec32 | 760 | .writepage = gfs2_jdata_writepage, |
b8e7cbb6 | 761 | .writepages = gfs2_jdata_writepages, |
f132ab7d | 762 | .read_folio = gfs2_read_folio, |
d4388340 | 763 | .readahead = gfs2_readahead, |
e621900a | 764 | .dirty_folio = jdata_dirty_folio, |
5561093e | 765 | .bmap = gfs2_bmap, |
5f4b2976 | 766 | .invalidate_folio = gfs2_invalidate_folio, |
e45c20d1 | 767 | .release_folio = gfs2_release_folio, |
229615de | 768 | .is_partially_uptodate = block_is_partially_uptodate, |
aa261f54 | 769 | .error_remove_page = generic_error_remove_page, |
5561093e SW |
770 | }; |
771 | ||
772 | void gfs2_set_aops(struct inode *inode) | |
773 | { | |
eadd7535 | 774 | if (gfs2_is_jdata(GFS2_I(inode))) |
977767a7 | 775 | inode->i_mapping->a_ops = &gfs2_jdata_aops; |
5561093e | 776 | else |
eadd7535 | 777 | inode->i_mapping->a_ops = &gfs2_aops; |
5561093e | 778 | } |