]>
Commit | Line | Data |
---|---|---|
ae98043f | 1 | // SPDX-License-Identifier: GPL-2.0+ |
0bd49f94 | 2 | /* |
94ee1d91 | 3 | * Buffer/page management specific to NILFS |
0bd49f94 RK |
4 | * |
5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | |
6 | * | |
4b420ab4 | 7 | * Written by Ryusuke Konishi and Seiji Kihara. |
0bd49f94 RK |
8 | */ |
9 | ||
10 | #include <linux/pagemap.h> | |
11 | #include <linux/writeback.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/bitops.h> | |
14 | #include <linux/page-flags.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/pagevec.h> | |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
0bd49f94 RK |
19 | #include "nilfs.h" |
20 | #include "page.h" | |
21 | #include "mdt.h" | |
22 | ||
23 | ||
4ce5c342 RK |
24 | #define NILFS_BUFFER_INHERENT_BITS \ |
25 | (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \ | |
26 | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked)) | |
0bd49f94 | 27 | |
c5521c76 MWO |
28 | static struct buffer_head *__nilfs_get_folio_block(struct folio *folio, |
29 | unsigned long block, pgoff_t index, int blkbits, | |
30 | unsigned long b_state) | |
0bd49f94 RK |
31 | |
32 | { | |
33 | unsigned long first_block; | |
c5521c76 | 34 | struct buffer_head *bh = folio_buffers(folio); |
0bd49f94 | 35 | |
c5521c76 | 36 | if (!bh) |
0a88810d | 37 | bh = create_empty_buffers(folio, 1 << blkbits, b_state); |
0bd49f94 | 38 | |
09cbfeaf | 39 | first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); |
c5521c76 | 40 | bh = get_nth_bh(bh, block - first_block); |
0bd49f94 RK |
41 | |
42 | touch_buffer(bh); | |
43 | wait_on_buffer(bh); | |
44 | return bh; | |
45 | } | |
46 | ||
0bd49f94 RK |
47 | struct buffer_head *nilfs_grab_buffer(struct inode *inode, |
48 | struct address_space *mapping, | |
49 | unsigned long blkoff, | |
50 | unsigned long b_state) | |
51 | { | |
52 | int blkbits = inode->i_blkbits; | |
09cbfeaf | 53 | pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); |
c5521c76 | 54 | struct folio *folio; |
c1c1d709 | 55 | struct buffer_head *bh; |
0bd49f94 | 56 | |
c5521c76 MWO |
57 | folio = filemap_grab_folio(mapping, index); |
58 | if (IS_ERR(folio)) | |
0bd49f94 RK |
59 | return NULL; |
60 | ||
c5521c76 | 61 | bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state); |
0bd49f94 | 62 | if (unlikely(!bh)) { |
c5521c76 MWO |
63 | folio_unlock(folio); |
64 | folio_put(folio); | |
0bd49f94 RK |
65 | return NULL; |
66 | } | |
0bd49f94 RK |
67 | return bh; |
68 | } | |
69 | ||
70 | /** | |
71 | * nilfs_forget_buffer - discard dirty state | |
0bd49f94 RK |
72 | * @bh: buffer head of the buffer to be discarded |
73 | */ | |
74 | void nilfs_forget_buffer(struct buffer_head *bh) | |
75 | { | |
76 | struct page *page = bh->b_page; | |
ead8ecff | 77 | const unsigned long clear_bits = |
4ce5c342 RK |
78 | (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | |
79 | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | | |
80 | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); | |
0bd49f94 RK |
81 | |
82 | lock_buffer(bh); | |
ead8ecff | 83 | set_mask_bits(&bh->b_state, clear_bits, 0); |
84338237 | 84 | if (nilfs_page_buffers_clean(page)) |
0bd49f94 RK |
85 | __nilfs_clear_page_dirty(page); |
86 | ||
0bd49f94 RK |
87 | bh->b_blocknr = -1; |
88 | ClearPageUptodate(page); | |
89 | ClearPageMappedToDisk(page); | |
90 | unlock_buffer(bh); | |
91 | brelse(bh); | |
92 | } | |
93 | ||
94 | /** | |
95 | * nilfs_copy_buffer -- copy buffer data and flags | |
96 | * @dbh: destination buffer | |
97 | * @sbh: source buffer | |
98 | */ | |
99 | void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) | |
100 | { | |
101 | void *kaddr0, *kaddr1; | |
102 | unsigned long bits; | |
103 | struct page *spage = sbh->b_page, *dpage = dbh->b_page; | |
104 | struct buffer_head *bh; | |
105 | ||
7b9c0976 CW |
106 | kaddr0 = kmap_atomic(spage); |
107 | kaddr1 = kmap_atomic(dpage); | |
0bd49f94 | 108 | memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); |
7b9c0976 CW |
109 | kunmap_atomic(kaddr1); |
110 | kunmap_atomic(kaddr0); | |
0bd49f94 RK |
111 | |
112 | dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; | |
113 | dbh->b_blocknr = sbh->b_blocknr; | |
114 | dbh->b_bdev = sbh->b_bdev; | |
115 | ||
116 | bh = dbh; | |
4ce5c342 | 117 | bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped)); |
0bd49f94 RK |
118 | while ((bh = bh->b_this_page) != dbh) { |
119 | lock_buffer(bh); | |
120 | bits &= bh->b_state; | |
121 | unlock_buffer(bh); | |
122 | } | |
4ce5c342 | 123 | if (bits & BIT(BH_Uptodate)) |
0bd49f94 RK |
124 | SetPageUptodate(dpage); |
125 | else | |
126 | ClearPageUptodate(dpage); | |
4ce5c342 | 127 | if (bits & BIT(BH_Mapped)) |
0bd49f94 RK |
128 | SetPageMappedToDisk(dpage); |
129 | else | |
130 | ClearPageMappedToDisk(dpage); | |
131 | } | |
132 | ||
133 | /** | |
134 | * nilfs_page_buffers_clean - check if a page has dirty buffers or not. | |
135 | * @page: page to be checked | |
136 | * | |
137 | * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. | |
138 | * Otherwise, it returns non-zero value. | |
139 | */ | |
140 | int nilfs_page_buffers_clean(struct page *page) | |
141 | { | |
142 | struct buffer_head *bh, *head; | |
143 | ||
144 | bh = head = page_buffers(page); | |
145 | do { | |
146 | if (buffer_dirty(bh)) | |
147 | return 0; | |
148 | bh = bh->b_this_page; | |
149 | } while (bh != head); | |
150 | return 1; | |
151 | } | |
152 | ||
153 | void nilfs_page_bug(struct page *page) | |
154 | { | |
155 | struct address_space *m; | |
aa405b1f | 156 | unsigned long ino; |
0bd49f94 RK |
157 | |
158 | if (unlikely(!page)) { | |
159 | printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); | |
160 | return; | |
161 | } | |
162 | ||
163 | m = page->mapping; | |
aa405b1f RK |
164 | ino = m ? m->host->i_ino : 0; |
165 | ||
0bd49f94 RK |
166 | printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " |
167 | "mapping=%p ino=%lu\n", | |
fe896d18 | 168 | page, page_ref_count(page), |
0bd49f94 RK |
169 | (unsigned long long)page->index, page->flags, m, ino); |
170 | ||
171 | if (page_has_buffers(page)) { | |
172 | struct buffer_head *bh, *head; | |
173 | int i = 0; | |
174 | ||
175 | bh = head = page_buffers(page); | |
176 | do { | |
177 | printk(KERN_CRIT | |
178 | " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", | |
179 | i++, bh, atomic_read(&bh->b_count), | |
180 | (unsigned long long)bh->b_blocknr, bh->b_state); | |
181 | bh = bh->b_this_page; | |
182 | } while (bh != head); | |
183 | } | |
184 | } | |
185 | ||
0bd49f94 | 186 | /** |
4093602d MWO |
187 | * nilfs_copy_folio -- copy the folio with buffers |
188 | * @dst: destination folio | |
189 | * @src: source folio | |
190 | * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads. | |
0bd49f94 | 191 | * |
4093602d MWO |
192 | * This function is for both data folios and btnode folios. The dirty flag |
193 | * should be treated by caller. The folio must not be under i/o. | |
194 | * Both src and dst folio must be locked | |
0bd49f94 | 195 | */ |
4093602d MWO |
196 | static void nilfs_copy_folio(struct folio *dst, struct folio *src, |
197 | bool copy_dirty) | |
0bd49f94 | 198 | { |
e1ce8a97 | 199 | struct buffer_head *dbh, *dbufs, *sbh; |
0bd49f94 RK |
200 | unsigned long mask = NILFS_BUFFER_INHERENT_BITS; |
201 | ||
4093602d | 202 | BUG_ON(folio_test_writeback(dst)); |
0bd49f94 | 203 | |
4093602d MWO |
204 | sbh = folio_buffers(src); |
205 | dbh = folio_buffers(dst); | |
206 | if (!dbh) | |
0a88810d | 207 | dbh = create_empty_buffers(dst, sbh->b_size, 0); |
0bd49f94 RK |
208 | |
209 | if (copy_dirty) | |
4ce5c342 | 210 | mask |= BIT(BH_Dirty); |
0bd49f94 | 211 | |
4093602d | 212 | dbufs = dbh; |
0bd49f94 RK |
213 | do { |
214 | lock_buffer(sbh); | |
215 | lock_buffer(dbh); | |
216 | dbh->b_state = sbh->b_state & mask; | |
217 | dbh->b_blocknr = sbh->b_blocknr; | |
218 | dbh->b_bdev = sbh->b_bdev; | |
219 | sbh = sbh->b_this_page; | |
220 | dbh = dbh->b_this_page; | |
221 | } while (dbh != dbufs); | |
222 | ||
4093602d | 223 | folio_copy(dst, src); |
0bd49f94 | 224 | |
4093602d MWO |
225 | if (folio_test_uptodate(src) && !folio_test_uptodate(dst)) |
226 | folio_mark_uptodate(dst); | |
227 | else if (!folio_test_uptodate(src) && folio_test_uptodate(dst)) | |
228 | folio_clear_uptodate(dst); | |
229 | if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst)) | |
230 | folio_set_mappedtodisk(dst); | |
231 | else if (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst)) | |
232 | folio_clear_mappedtodisk(dst); | |
0bd49f94 RK |
233 | |
234 | do { | |
235 | unlock_buffer(sbh); | |
236 | unlock_buffer(dbh); | |
237 | sbh = sbh->b_this_page; | |
238 | dbh = dbh->b_this_page; | |
239 | } while (dbh != dbufs); | |
240 | } | |
241 | ||
242 | int nilfs_copy_dirty_pages(struct address_space *dmap, | |
243 | struct address_space *smap) | |
244 | { | |
d4a16d31 | 245 | struct folio_batch fbatch; |
0bd49f94 RK |
246 | unsigned int i; |
247 | pgoff_t index = 0; | |
248 | int err = 0; | |
249 | ||
d4a16d31 | 250 | folio_batch_init(&fbatch); |
0bd49f94 | 251 | repeat: |
d4a16d31 VMO |
252 | if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1, |
253 | PAGECACHE_TAG_DIRTY, &fbatch)) | |
0bd49f94 RK |
254 | return 0; |
255 | ||
d4a16d31 VMO |
256 | for (i = 0; i < folio_batch_count(&fbatch); i++) { |
257 | struct folio *folio = fbatch.folios[i], *dfolio; | |
0bd49f94 | 258 | |
d4a16d31 VMO |
259 | folio_lock(folio); |
260 | if (unlikely(!folio_test_dirty(folio))) | |
261 | NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state"); | |
0bd49f94 | 262 | |
d4a16d31 | 263 | dfolio = filemap_grab_folio(dmap, folio->index); |
66dabbb6 | 264 | if (unlikely(IS_ERR(dfolio))) { |
0bd49f94 | 265 | /* No empty page is added to the page cache */ |
d4a16d31 | 266 | folio_unlock(folio); |
66dabbb6 | 267 | err = PTR_ERR(dfolio); |
0bd49f94 RK |
268 | break; |
269 | } | |
d4a16d31 VMO |
270 | if (unlikely(!folio_buffers(folio))) |
271 | NILFS_PAGE_BUG(&folio->page, | |
0bd49f94 RK |
272 | "found empty page in dat page cache"); |
273 | ||
4093602d | 274 | nilfs_copy_folio(dfolio, folio, true); |
d4a16d31 | 275 | filemap_dirty_folio(folio_mapping(dfolio), dfolio); |
0bd49f94 | 276 | |
d4a16d31 VMO |
277 | folio_unlock(dfolio); |
278 | folio_put(dfolio); | |
279 | folio_unlock(folio); | |
0bd49f94 | 280 | } |
d4a16d31 | 281 | folio_batch_release(&fbatch); |
0bd49f94 RK |
282 | cond_resched(); |
283 | ||
284 | if (likely(!err)) | |
285 | goto repeat; | |
286 | return err; | |
287 | } | |
288 | ||
289 | /** | |
7a65004b | 290 | * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache |
0bd49f94 RK |
291 | * @dmap: destination page cache |
292 | * @smap: source page cache | |
293 | * | |
f611ff63 | 294 | * No pages must be added to the cache during this process. |
0bd49f94 RK |
295 | * This must be ensured by the caller. |
296 | */ | |
297 | void nilfs_copy_back_pages(struct address_space *dmap, | |
298 | struct address_space *smap) | |
299 | { | |
f6e0e173 | 300 | struct folio_batch fbatch; |
0bd49f94 | 301 | unsigned int i, n; |
f6e0e173 | 302 | pgoff_t start = 0; |
0bd49f94 | 303 | |
f6e0e173 | 304 | folio_batch_init(&fbatch); |
0bd49f94 | 305 | repeat: |
f6e0e173 | 306 | n = filemap_get_folios(smap, &start, ~0UL, &fbatch); |
0bd49f94 RK |
307 | if (!n) |
308 | return; | |
0bd49f94 | 309 | |
f6e0e173 MWO |
310 | for (i = 0; i < folio_batch_count(&fbatch); i++) { |
311 | struct folio *folio = fbatch.folios[i], *dfolio; | |
312 | pgoff_t index = folio->index; | |
313 | ||
314 | folio_lock(folio); | |
315 | dfolio = filemap_lock_folio(dmap, index); | |
66dabbb6 | 316 | if (!IS_ERR(dfolio)) { |
f6e0e173 MWO |
317 | /* overwrite existing folio in the destination cache */ |
318 | WARN_ON(folio_test_dirty(dfolio)); | |
4093602d | 319 | nilfs_copy_folio(dfolio, folio, false); |
f6e0e173 MWO |
320 | folio_unlock(dfolio); |
321 | folio_put(dfolio); | |
322 | /* Do we not need to remove folio from smap here? */ | |
0bd49f94 | 323 | } else { |
f6e0e173 | 324 | struct folio *f; |
0bd49f94 | 325 | |
f6e0e173 | 326 | /* move the folio to the destination cache */ |
b93b0163 | 327 | xa_lock_irq(&smap->i_pages); |
f6e0e173 MWO |
328 | f = __xa_erase(&smap->i_pages, index); |
329 | WARN_ON(folio != f); | |
0bd49f94 | 330 | smap->nrpages--; |
b93b0163 | 331 | xa_unlock_irq(&smap->i_pages); |
0bd49f94 | 332 | |
b93b0163 | 333 | xa_lock_irq(&dmap->i_pages); |
f6e0e173 MWO |
334 | f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS); |
335 | if (unlikely(f)) { | |
f611ff63 | 336 | /* Probably -ENOMEM */ |
f6e0e173 MWO |
337 | folio->mapping = NULL; |
338 | folio_put(folio); | |
0bd49f94 | 339 | } else { |
f6e0e173 | 340 | folio->mapping = dmap; |
0bd49f94 | 341 | dmap->nrpages++; |
f6e0e173 MWO |
342 | if (folio_test_dirty(folio)) |
343 | __xa_set_mark(&dmap->i_pages, index, | |
f611ff63 | 344 | PAGECACHE_TAG_DIRTY); |
0bd49f94 | 345 | } |
b93b0163 | 346 | xa_unlock_irq(&dmap->i_pages); |
0bd49f94 | 347 | } |
f6e0e173 | 348 | folio_unlock(folio); |
0bd49f94 | 349 | } |
f6e0e173 | 350 | folio_batch_release(&fbatch); |
0bd49f94 RK |
351 | cond_resched(); |
352 | ||
353 | goto repeat; | |
354 | } | |
355 | ||
8c26c4e2 VD |
356 | /** |
357 | * nilfs_clear_dirty_pages - discard dirty pages in address space | |
358 | * @mapping: address space with dirty pages for discarding | |
359 | * @silent: suppress [true] or print [false] warning messages | |
360 | */ | |
361 | void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) | |
0bd49f94 | 362 | { |
243c5ea4 | 363 | struct folio_batch fbatch; |
0bd49f94 RK |
364 | unsigned int i; |
365 | pgoff_t index = 0; | |
366 | ||
243c5ea4 | 367 | folio_batch_init(&fbatch); |
0bd49f94 | 368 | |
243c5ea4 VMO |
369 | while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1, |
370 | PAGECACHE_TAG_DIRTY, &fbatch)) { | |
371 | for (i = 0; i < folio_batch_count(&fbatch); i++) { | |
372 | struct folio *folio = fbatch.folios[i]; | |
0bd49f94 | 373 | |
243c5ea4 | 374 | folio_lock(folio); |
782e53d0 RK |
375 | |
376 | /* | |
377 | * This folio may have been removed from the address | |
378 | * space by truncation or invalidation when the lock | |
379 | * was acquired. Skip processing in that case. | |
380 | */ | |
381 | if (likely(folio->mapping == mapping)) | |
382 | nilfs_clear_dirty_page(&folio->page, silent); | |
383 | ||
243c5ea4 | 384 | folio_unlock(folio); |
0bd49f94 | 385 | } |
243c5ea4 | 386 | folio_batch_release(&fbatch); |
0bd49f94 RK |
387 | cond_resched(); |
388 | } | |
389 | } | |
390 | ||
8c26c4e2 VD |
391 | /** |
392 | * nilfs_clear_dirty_page - discard dirty page | |
393 | * @page: dirty page that will be discarded | |
394 | * @silent: suppress [true] or print [false] warning messages | |
395 | */ | |
396 | void nilfs_clear_dirty_page(struct page *page, bool silent) | |
397 | { | |
398 | struct inode *inode = page->mapping->host; | |
399 | struct super_block *sb = inode->i_sb; | |
400 | ||
dc33f5f3 | 401 | BUG_ON(!PageLocked(page)); |
8c26c4e2 | 402 | |
d6517deb | 403 | if (!silent) |
a1d0747a JP |
404 | nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu", |
405 | page_offset(page), inode->i_ino); | |
8c26c4e2 VD |
406 | |
407 | ClearPageUptodate(page); | |
408 | ClearPageMappedToDisk(page); | |
409 | ||
410 | if (page_has_buffers(page)) { | |
411 | struct buffer_head *bh, *head; | |
ead8ecff | 412 | const unsigned long clear_bits = |
4ce5c342 RK |
413 | (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | |
414 | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | | |
415 | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); | |
8c26c4e2 VD |
416 | |
417 | bh = head = page_buffers(page); | |
418 | do { | |
419 | lock_buffer(bh); | |
d6517deb | 420 | if (!silent) |
a1d0747a JP |
421 | nilfs_warn(sb, |
422 | "discard dirty block: blocknr=%llu, size=%zu", | |
423 | (u64)bh->b_blocknr, bh->b_size); | |
d6517deb | 424 | |
ead8ecff | 425 | set_mask_bits(&bh->b_state, clear_bits, 0); |
8c26c4e2 VD |
426 | unlock_buffer(bh); |
427 | } while (bh = bh->b_this_page, bh != head); | |
428 | } | |
429 | ||
430 | __nilfs_clear_page_dirty(page); | |
431 | } | |
432 | ||
0c6c44cb RK |
433 | unsigned int nilfs_page_count_clean_buffers(struct page *page, |
434 | unsigned int from, unsigned int to) | |
0bd49f94 | 435 | { |
0c6c44cb | 436 | unsigned int block_start, block_end; |
0bd49f94 | 437 | struct buffer_head *bh, *head; |
0c6c44cb | 438 | unsigned int nc = 0; |
0bd49f94 RK |
439 | |
440 | for (bh = head = page_buffers(page), block_start = 0; | |
441 | bh != head || !block_start; | |
442 | block_start = block_end, bh = bh->b_this_page) { | |
443 | block_end = block_start + bh->b_size; | |
444 | if (block_end > from && block_start < to && !buffer_dirty(bh)) | |
445 | nc++; | |
446 | } | |
447 | return nc; | |
448 | } | |
ae53a0a2 | 449 | |
0bd49f94 RK |
450 | /* |
451 | * NILFS2 needs clear_page_dirty() in the following two cases: | |
452 | * | |
e897be17 RK |
453 | * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty |
454 | * flag of pages when it copies back pages from shadow cache to the | |
455 | * original cache. | |
0bd49f94 RK |
456 | * |
457 | * 2) Some B-tree operations like insertion or deletion may dispose buffers | |
458 | * in dirty state, and this needs to cancel the dirty state of their pages. | |
459 | */ | |
460 | int __nilfs_clear_page_dirty(struct page *page) | |
461 | { | |
462 | struct address_space *mapping = page->mapping; | |
463 | ||
464 | if (mapping) { | |
b93b0163 | 465 | xa_lock_irq(&mapping->i_pages); |
0bd49f94 | 466 | if (test_bit(PG_dirty, &page->flags)) { |
f611ff63 | 467 | __xa_clear_mark(&mapping->i_pages, page_index(page), |
0bd49f94 | 468 | PAGECACHE_TAG_DIRTY); |
b93b0163 | 469 | xa_unlock_irq(&mapping->i_pages); |
0bd49f94 RK |
470 | return clear_page_dirty_for_io(page); |
471 | } | |
b93b0163 | 472 | xa_unlock_irq(&mapping->i_pages); |
0bd49f94 RK |
473 | return 0; |
474 | } | |
475 | return TestClearPageDirty(page); | |
476 | } | |
622daaff RK |
477 | |
478 | /** | |
479 | * nilfs_find_uncommitted_extent - find extent of uncommitted data | |
480 | * @inode: inode | |
481 | * @start_blk: start block offset (in) | |
482 | * @blkoff: start offset of the found extent (out) | |
483 | * | |
484 | * This function searches an extent of buffers marked "delayed" which | |
485 | * starts from a block offset equal to or larger than @start_blk. If | |
486 | * such an extent was found, this will store the start offset in | |
487 | * @blkoff and return its length in blocks. Otherwise, zero is | |
488 | * returned. | |
489 | */ | |
490 | unsigned long nilfs_find_uncommitted_extent(struct inode *inode, | |
491 | sector_t start_blk, | |
492 | sector_t *blkoff) | |
493 | { | |
24a1efb4 | 494 | unsigned int i, nr_folios; |
622daaff | 495 | pgoff_t index; |
622daaff | 496 | unsigned long length = 0; |
24a1efb4 VMO |
497 | struct folio_batch fbatch; |
498 | struct folio *folio; | |
622daaff RK |
499 | |
500 | if (inode->i_mapping->nrpages == 0) | |
501 | return 0; | |
502 | ||
09cbfeaf | 503 | index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); |
622daaff | 504 | |
24a1efb4 | 505 | folio_batch_init(&fbatch); |
622daaff RK |
506 | |
507 | repeat: | |
24a1efb4 VMO |
508 | nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX, |
509 | &fbatch); | |
510 | if (nr_folios == 0) | |
622daaff RK |
511 | return length; |
512 | ||
622daaff RK |
513 | i = 0; |
514 | do { | |
24a1efb4 | 515 | folio = fbatch.folios[i]; |
622daaff | 516 | |
24a1efb4 VMO |
517 | folio_lock(folio); |
518 | if (folio_buffers(folio)) { | |
622daaff | 519 | struct buffer_head *bh, *head; |
24a1efb4 | 520 | sector_t b; |
622daaff | 521 | |
24a1efb4 VMO |
522 | b = folio->index << (PAGE_SHIFT - inode->i_blkbits); |
523 | bh = head = folio_buffers(folio); | |
622daaff RK |
524 | do { |
525 | if (b < start_blk) | |
526 | continue; | |
527 | if (buffer_delay(bh)) { | |
528 | if (length == 0) | |
529 | *blkoff = b; | |
530 | length++; | |
531 | } else if (length > 0) { | |
532 | goto out_locked; | |
533 | } | |
534 | } while (++b, bh = bh->b_this_page, bh != head); | |
535 | } else { | |
536 | if (length > 0) | |
537 | goto out_locked; | |
622daaff | 538 | } |
24a1efb4 | 539 | folio_unlock(folio); |
622daaff | 540 | |
24a1efb4 | 541 | } while (++i < nr_folios); |
622daaff | 542 | |
24a1efb4 | 543 | folio_batch_release(&fbatch); |
622daaff RK |
544 | cond_resched(); |
545 | goto repeat; | |
546 | ||
547 | out_locked: | |
24a1efb4 VMO |
548 | folio_unlock(folio); |
549 | folio_batch_release(&fbatch); | |
622daaff RK |
550 | return length; |
551 | } |