]> Git Repo - J-linux.git/blob - fs/nilfs2/page.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / nilfs2 / page.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Buffer/page management specific to NILFS
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi and Seiji Kihara.
8  */
9
10 #include <linux/pagemap.h>
11 #include <linux/writeback.h>
12 #include <linux/swap.h>
13 #include <linux/bitops.h>
14 #include <linux/page-flags.h>
15 #include <linux/list.h>
16 #include <linux/highmem.h>
17 #include <linux/pagevec.h>
18 #include <linux/gfp.h>
19 #include "nilfs.h"
20 #include "page.h"
21 #include "mdt.h"
22
23
24 #define NILFS_BUFFER_INHERENT_BITS                                      \
25         (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |       \
26          BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28 static struct buffer_head *__nilfs_get_folio_block(struct folio *folio,
29                 unsigned long block, pgoff_t index, int blkbits,
30                 unsigned long b_state)
31
32 {
33         unsigned long first_block;
34         struct buffer_head *bh = folio_buffers(folio);
35
36         if (!bh)
37                 bh = create_empty_buffers(folio, 1 << blkbits, b_state);
38
39         first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40         bh = get_nth_bh(bh, block - first_block);
41
42         wait_on_buffer(bh);
43         return bh;
44 }
45
46 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
47                                       struct address_space *mapping,
48                                       unsigned long blkoff,
49                                       unsigned long b_state)
50 {
51         int blkbits = inode->i_blkbits;
52         pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
53         struct folio *folio;
54         struct buffer_head *bh;
55
56         folio = filemap_grab_folio(mapping, index);
57         if (IS_ERR(folio))
58                 return NULL;
59
60         bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state);
61         if (unlikely(!bh)) {
62                 folio_unlock(folio);
63                 folio_put(folio);
64                 return NULL;
65         }
66         bh->b_bdev = inode->i_sb->s_bdev;
67         return bh;
68 }
69
70 /**
71  * nilfs_forget_buffer - discard dirty state
72  * @bh: buffer head of the buffer to be discarded
73  */
74 void nilfs_forget_buffer(struct buffer_head *bh)
75 {
76         struct folio *folio = bh->b_folio;
77         const unsigned long clear_bits =
78                 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79                  BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80                  BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
81                  BIT(BH_Delay));
82
83         lock_buffer(bh);
84         set_mask_bits(&bh->b_state, clear_bits, 0);
85         if (nilfs_folio_buffers_clean(folio))
86                 __nilfs_clear_folio_dirty(folio);
87
88         bh->b_blocknr = -1;
89         folio_clear_uptodate(folio);
90         folio_clear_mappedtodisk(folio);
91         unlock_buffer(bh);
92         brelse(bh);
93 }
94
95 /**
96  * nilfs_copy_buffer -- copy buffer data and flags
97  * @dbh: destination buffer
98  * @sbh: source buffer
99  */
100 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
101 {
102         void *saddr, *daddr;
103         unsigned long bits;
104         struct folio *sfolio = sbh->b_folio, *dfolio = dbh->b_folio;
105         struct buffer_head *bh;
106
107         saddr = kmap_local_folio(sfolio, bh_offset(sbh));
108         daddr = kmap_local_folio(dfolio, bh_offset(dbh));
109         memcpy(daddr, saddr, sbh->b_size);
110         kunmap_local(daddr);
111         kunmap_local(saddr);
112
113         dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
114         dbh->b_blocknr = sbh->b_blocknr;
115         dbh->b_bdev = sbh->b_bdev;
116
117         bh = dbh;
118         bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
119         while ((bh = bh->b_this_page) != dbh) {
120                 lock_buffer(bh);
121                 bits &= bh->b_state;
122                 unlock_buffer(bh);
123         }
124         if (bits & BIT(BH_Uptodate))
125                 folio_mark_uptodate(dfolio);
126         else
127                 folio_clear_uptodate(dfolio);
128         if (bits & BIT(BH_Mapped))
129                 folio_set_mappedtodisk(dfolio);
130         else
131                 folio_clear_mappedtodisk(dfolio);
132 }
133
134 /**
135  * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
136  * @folio: Folio to be checked.
137  *
138  * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
139  * Otherwise, it returns true.
140  */
141 bool nilfs_folio_buffers_clean(struct folio *folio)
142 {
143         struct buffer_head *bh, *head;
144
145         bh = head = folio_buffers(folio);
146         do {
147                 if (buffer_dirty(bh))
148                         return false;
149                 bh = bh->b_this_page;
150         } while (bh != head);
151         return true;
152 }
153
154 void nilfs_folio_bug(struct folio *folio)
155 {
156         struct buffer_head *bh, *head;
157         struct address_space *m;
158         unsigned long ino;
159
160         if (unlikely(!folio)) {
161                 printk(KERN_CRIT "NILFS_FOLIO_BUG(NULL)\n");
162                 return;
163         }
164
165         m = folio->mapping;
166         ino = m ? m->host->i_ino : 0;
167
168         printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
169                "mapping=%p ino=%lu\n",
170                folio, folio_ref_count(folio),
171                (unsigned long long)folio->index, folio->flags, m, ino);
172
173         head = folio_buffers(folio);
174         if (head) {
175                 int i = 0;
176
177                 bh = head;
178                 do {
179                         printk(KERN_CRIT
180                                " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
181                                i++, bh, atomic_read(&bh->b_count),
182                                (unsigned long long)bh->b_blocknr, bh->b_state);
183                         bh = bh->b_this_page;
184                 } while (bh != head);
185         }
186 }
187
188 /**
189  * nilfs_copy_folio -- copy the folio with buffers
190  * @dst: destination folio
191  * @src: source folio
192  * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads.
193  *
194  * This function is for both data folios and btnode folios.  The dirty flag
195  * should be treated by caller.  The folio must not be under i/o.
196  * Both src and dst folio must be locked
197  */
198 static void nilfs_copy_folio(struct folio *dst, struct folio *src,
199                 bool copy_dirty)
200 {
201         struct buffer_head *dbh, *dbufs, *sbh;
202         unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
203
204         BUG_ON(folio_test_writeback(dst));
205
206         sbh = folio_buffers(src);
207         dbh = folio_buffers(dst);
208         if (!dbh)
209                 dbh = create_empty_buffers(dst, sbh->b_size, 0);
210
211         if (copy_dirty)
212                 mask |= BIT(BH_Dirty);
213
214         dbufs = dbh;
215         do {
216                 lock_buffer(sbh);
217                 lock_buffer(dbh);
218                 dbh->b_state = sbh->b_state & mask;
219                 dbh->b_blocknr = sbh->b_blocknr;
220                 dbh->b_bdev = sbh->b_bdev;
221                 sbh = sbh->b_this_page;
222                 dbh = dbh->b_this_page;
223         } while (dbh != dbufs);
224
225         folio_copy(dst, src);
226
227         if (folio_test_uptodate(src) && !folio_test_uptodate(dst))
228                 folio_mark_uptodate(dst);
229         else if (!folio_test_uptodate(src) && folio_test_uptodate(dst))
230                 folio_clear_uptodate(dst);
231         if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst))
232                 folio_set_mappedtodisk(dst);
233         else if (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst))
234                 folio_clear_mappedtodisk(dst);
235
236         do {
237                 unlock_buffer(sbh);
238                 unlock_buffer(dbh);
239                 sbh = sbh->b_this_page;
240                 dbh = dbh->b_this_page;
241         } while (dbh != dbufs);
242 }
243
244 int nilfs_copy_dirty_pages(struct address_space *dmap,
245                            struct address_space *smap)
246 {
247         struct folio_batch fbatch;
248         unsigned int i;
249         pgoff_t index = 0;
250         int err = 0;
251
252         folio_batch_init(&fbatch);
253 repeat:
254         if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
255                                 PAGECACHE_TAG_DIRTY, &fbatch))
256                 return 0;
257
258         for (i = 0; i < folio_batch_count(&fbatch); i++) {
259                 struct folio *folio = fbatch.folios[i], *dfolio;
260
261                 folio_lock(folio);
262                 if (unlikely(!folio_test_dirty(folio)))
263                         NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
264
265                 dfolio = filemap_grab_folio(dmap, folio->index);
266                 if (IS_ERR(dfolio)) {
267                         /* No empty page is added to the page cache */
268                         folio_unlock(folio);
269                         err = PTR_ERR(dfolio);
270                         break;
271                 }
272                 if (unlikely(!folio_buffers(folio)))
273                         NILFS_FOLIO_BUG(folio,
274                                        "found empty page in dat page cache");
275
276                 nilfs_copy_folio(dfolio, folio, true);
277                 filemap_dirty_folio(folio_mapping(dfolio), dfolio);
278
279                 folio_unlock(dfolio);
280                 folio_put(dfolio);
281                 folio_unlock(folio);
282         }
283         folio_batch_release(&fbatch);
284         cond_resched();
285
286         if (likely(!err))
287                 goto repeat;
288         return err;
289 }
290
291 /**
292  * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
293  * @dmap: destination page cache
294  * @smap: source page cache
295  *
296  * No pages must be added to the cache during this process.
297  * This must be ensured by the caller.
298  */
299 void nilfs_copy_back_pages(struct address_space *dmap,
300                            struct address_space *smap)
301 {
302         struct folio_batch fbatch;
303         unsigned int i, n;
304         pgoff_t start = 0;
305
306         folio_batch_init(&fbatch);
307 repeat:
308         n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
309         if (!n)
310                 return;
311
312         for (i = 0; i < folio_batch_count(&fbatch); i++) {
313                 struct folio *folio = fbatch.folios[i], *dfolio;
314                 pgoff_t index = folio->index;
315
316                 folio_lock(folio);
317                 dfolio = filemap_lock_folio(dmap, index);
318                 if (!IS_ERR(dfolio)) {
319                         /* overwrite existing folio in the destination cache */
320                         WARN_ON(folio_test_dirty(dfolio));
321                         nilfs_copy_folio(dfolio, folio, false);
322                         folio_unlock(dfolio);
323                         folio_put(dfolio);
324                         /* Do we not need to remove folio from smap here? */
325                 } else {
326                         struct folio *f;
327
328                         /* move the folio to the destination cache */
329                         xa_lock_irq(&smap->i_pages);
330                         f = __xa_erase(&smap->i_pages, index);
331                         WARN_ON(folio != f);
332                         smap->nrpages--;
333                         xa_unlock_irq(&smap->i_pages);
334
335                         xa_lock_irq(&dmap->i_pages);
336                         f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
337                         if (unlikely(f)) {
338                                 /* Probably -ENOMEM */
339                                 folio->mapping = NULL;
340                                 folio_put(folio);
341                         } else {
342                                 folio->mapping = dmap;
343                                 dmap->nrpages++;
344                                 if (folio_test_dirty(folio))
345                                         __xa_set_mark(&dmap->i_pages, index,
346                                                         PAGECACHE_TAG_DIRTY);
347                         }
348                         xa_unlock_irq(&dmap->i_pages);
349                 }
350                 folio_unlock(folio);
351         }
352         folio_batch_release(&fbatch);
353         cond_resched();
354
355         goto repeat;
356 }
357
358 /**
359  * nilfs_clear_dirty_pages - discard dirty pages in address space
360  * @mapping: address space with dirty pages for discarding
361  */
362 void nilfs_clear_dirty_pages(struct address_space *mapping)
363 {
364         struct folio_batch fbatch;
365         unsigned int i;
366         pgoff_t index = 0;
367
368         folio_batch_init(&fbatch);
369
370         while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
371                                 PAGECACHE_TAG_DIRTY, &fbatch)) {
372                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
373                         struct folio *folio = fbatch.folios[i];
374
375                         folio_lock(folio);
376
377                         /*
378                          * This folio may have been removed from the address
379                          * space by truncation or invalidation when the lock
380                          * was acquired.  Skip processing in that case.
381                          */
382                         if (likely(folio->mapping == mapping))
383                                 nilfs_clear_folio_dirty(folio);
384
385                         folio_unlock(folio);
386                 }
387                 folio_batch_release(&fbatch);
388                 cond_resched();
389         }
390 }
391
392 /**
393  * nilfs_clear_folio_dirty - discard dirty folio
394  * @folio: dirty folio that will be discarded
395  */
396 void nilfs_clear_folio_dirty(struct folio *folio)
397 {
398         struct buffer_head *bh, *head;
399
400         BUG_ON(!folio_test_locked(folio));
401
402         folio_clear_uptodate(folio);
403         folio_clear_mappedtodisk(folio);
404         folio_clear_checked(folio);
405
406         head = folio_buffers(folio);
407         if (head) {
408                 const unsigned long clear_bits =
409                         (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
410                          BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
411                          BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
412                          BIT(BH_Delay));
413
414                 bh = head;
415                 do {
416                         lock_buffer(bh);
417                         set_mask_bits(&bh->b_state, clear_bits, 0);
418                         unlock_buffer(bh);
419                 } while (bh = bh->b_this_page, bh != head);
420         }
421
422         __nilfs_clear_folio_dirty(folio);
423 }
424
425 unsigned int nilfs_page_count_clean_buffers(struct folio *folio,
426                                             unsigned int from, unsigned int to)
427 {
428         unsigned int block_start, block_end;
429         struct buffer_head *bh, *head;
430         unsigned int nc = 0;
431
432         for (bh = head = folio_buffers(folio), block_start = 0;
433              bh != head || !block_start;
434              block_start = block_end, bh = bh->b_this_page) {
435                 block_end = block_start + bh->b_size;
436                 if (block_end > from && block_start < to && !buffer_dirty(bh))
437                         nc++;
438         }
439         return nc;
440 }
441
442 /*
443  * NILFS2 needs clear_page_dirty() in the following two cases:
444  *
445  * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
446  *    flag of pages when it copies back pages from shadow cache to the
447  *    original cache.
448  *
449  * 2) Some B-tree operations like insertion or deletion may dispose buffers
450  *    in dirty state, and this needs to cancel the dirty state of their pages.
451  */
452 void __nilfs_clear_folio_dirty(struct folio *folio)
453 {
454         struct address_space *mapping = folio->mapping;
455
456         if (mapping) {
457                 xa_lock_irq(&mapping->i_pages);
458                 if (folio_test_dirty(folio)) {
459                         __xa_clear_mark(&mapping->i_pages, folio->index,
460                                              PAGECACHE_TAG_DIRTY);
461                         xa_unlock_irq(&mapping->i_pages);
462                         folio_clear_dirty_for_io(folio);
463                         return;
464                 }
465                 xa_unlock_irq(&mapping->i_pages);
466                 return;
467         }
468         folio_clear_dirty(folio);
469 }
470
471 /**
472  * nilfs_find_uncommitted_extent - find extent of uncommitted data
473  * @inode: inode
474  * @start_blk: start block offset (in)
475  * @blkoff: start offset of the found extent (out)
476  *
477  * This function searches an extent of buffers marked "delayed" which
478  * starts from a block offset equal to or larger than @start_blk.  If
479  * such an extent was found, this will store the start offset in
480  * @blkoff and return its length in blocks.  Otherwise, zero is
481  * returned.
482  */
483 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
484                                             sector_t start_blk,
485                                             sector_t *blkoff)
486 {
487         unsigned int i, nr_folios;
488         pgoff_t index;
489         unsigned long length = 0;
490         struct folio_batch fbatch;
491         struct folio *folio;
492
493         if (inode->i_mapping->nrpages == 0)
494                 return 0;
495
496         index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
497
498         folio_batch_init(&fbatch);
499
500 repeat:
501         nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
502                         &fbatch);
503         if (nr_folios == 0)
504                 return length;
505
506         i = 0;
507         do {
508                 folio = fbatch.folios[i];
509
510                 folio_lock(folio);
511                 if (folio_buffers(folio)) {
512                         struct buffer_head *bh, *head;
513                         sector_t b;
514
515                         b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
516                         bh = head = folio_buffers(folio);
517                         do {
518                                 if (b < start_blk)
519                                         continue;
520                                 if (buffer_delay(bh)) {
521                                         if (length == 0)
522                                                 *blkoff = b;
523                                         length++;
524                                 } else if (length > 0) {
525                                         goto out_locked;
526                                 }
527                         } while (++b, bh = bh->b_this_page, bh != head);
528                 } else {
529                         if (length > 0)
530                                 goto out_locked;
531                 }
532                 folio_unlock(folio);
533
534         } while (++i < nr_folios);
535
536         folio_batch_release(&fbatch);
537         cond_resched();
538         goto repeat;
539
540 out_locked:
541         folio_unlock(folio);
542         folio_batch_release(&fbatch);
543         return length;
544 }
This page took 0.061246 seconds and 4 git commands to generate.