]> Git Repo - J-linux.git/blob - fs/buffer.c
Merge tag 'vfs-6.13.pagecache' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <[email protected]>
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53
54 #include "internal.h"
55
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58                           enum rw_hint hint, struct writeback_control *wbc);
59
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
62 inline void touch_buffer(struct buffer_head *bh)
63 {
64         trace_block_touch_buffer(bh);
65         folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
69 void __lock_buffer(struct buffer_head *bh)
70 {
71         wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
75 void unlock_buffer(struct buffer_head *bh)
76 {
77         clear_bit_unlock(BH_Lock, &bh->b_state);
78         smp_mb__after_atomic();
79         wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84  * Returns if the folio has dirty or writeback buffers. If all the buffers
85  * are unlocked and clean then the folio_test_dirty information is stale. If
86  * any of the buffers are locked, it is assumed they are locked for IO.
87  */
88 void buffer_check_dirty_writeback(struct folio *folio,
89                                      bool *dirty, bool *writeback)
90 {
91         struct buffer_head *head, *bh;
92         *dirty = false;
93         *writeback = false;
94
95         BUG_ON(!folio_test_locked(folio));
96
97         head = folio_buffers(folio);
98         if (!head)
99                 return;
100
101         if (folio_test_writeback(folio))
102                 *writeback = true;
103
104         bh = head;
105         do {
106                 if (buffer_locked(bh))
107                         *writeback = true;
108
109                 if (buffer_dirty(bh))
110                         *dirty = true;
111
112                 bh = bh->b_this_page;
113         } while (bh != head);
114 }
115
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123         wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129         if (!test_bit(BH_Quiet, &bh->b_state))
130                 printk_ratelimited(KERN_ERR
131                         "Buffer I/O error on dev %pg, logical block %llu%s\n",
132                         bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136  * End-of-IO handler helper function which does not touch the bh after
137  * unlocking it.
138  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139  * a race there is benign: unlock_buffer() only use the bh's address for
140  * hashing after unlocking the buffer, so it doesn't actually touch the bh
141  * itself.
142  */
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145         if (uptodate) {
146                 set_buffer_uptodate(bh);
147         } else {
148                 /* This happens, due to failed read-ahead attempts. */
149                 clear_buffer_uptodate(bh);
150         }
151         unlock_buffer(bh);
152 }
153
154 /*
155  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156  * unlock the buffer.
157  */
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160         __end_buffer_read_notouch(bh, uptodate);
161         put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167         if (uptodate) {
168                 set_buffer_uptodate(bh);
169         } else {
170                 buffer_io_error(bh, ", lost sync page write");
171                 mark_buffer_write_io_error(bh);
172                 clear_buffer_uptodate(bh);
173         }
174         unlock_buffer(bh);
175         put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 /*
180  * Various filesystems appear to want __find_get_block to be non-blocking.
181  * But it's the page lock which protects the buffers.  To get around this,
182  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183  * i_private_lock.
184  *
185  * Hack idea: for the blockdev mapping, i_private_lock contention
186  * may be quite high.  This code could TryLock the page, and if that
187  * succeeds, there is no need to take i_private_lock.
188  */
189 static struct buffer_head *
190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192         struct address_space *bd_mapping = bdev->bd_mapping;
193         const int blkbits = bd_mapping->host->i_blkbits;
194         struct buffer_head *ret = NULL;
195         pgoff_t index;
196         struct buffer_head *bh;
197         struct buffer_head *head;
198         struct folio *folio;
199         int all_mapped = 1;
200         static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202         index = ((loff_t)block << blkbits) / PAGE_SIZE;
203         folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204         if (IS_ERR(folio))
205                 goto out;
206
207         spin_lock(&bd_mapping->i_private_lock);
208         head = folio_buffers(folio);
209         if (!head)
210                 goto out_unlock;
211         bh = head;
212         do {
213                 if (!buffer_mapped(bh))
214                         all_mapped = 0;
215                 else if (bh->b_blocknr == block) {
216                         ret = bh;
217                         get_bh(bh);
218                         goto out_unlock;
219                 }
220                 bh = bh->b_this_page;
221         } while (bh != head);
222
223         /* we might be here because some of the buffers on this page are
224          * not mapped.  This is due to various races between
225          * file io on the block device and getblk.  It gets dealt with
226          * elsewhere, don't buffer_error if we had some unmapped buffers
227          */
228         ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229         if (all_mapped && __ratelimit(&last_warned)) {
230                 printk("__find_get_block_slow() failed. block=%llu, "
231                        "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232                        "device %pg blocksize: %d\n",
233                        (unsigned long long)block,
234                        (unsigned long long)bh->b_blocknr,
235                        bh->b_state, bh->b_size, bdev,
236                        1 << blkbits);
237         }
238 out_unlock:
239         spin_unlock(&bd_mapping->i_private_lock);
240         folio_put(folio);
241 out:
242         return ret;
243 }
244
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247         unsigned long flags;
248         struct buffer_head *first;
249         struct buffer_head *tmp;
250         struct folio *folio;
251         int folio_uptodate = 1;
252
253         BUG_ON(!buffer_async_read(bh));
254
255         folio = bh->b_folio;
256         if (uptodate) {
257                 set_buffer_uptodate(bh);
258         } else {
259                 clear_buffer_uptodate(bh);
260                 buffer_io_error(bh, ", async page read");
261         }
262
263         /*
264          * Be _very_ careful from here on. Bad things can happen if
265          * two buffer heads end IO at almost the same time and both
266          * decide that the page is now completely done.
267          */
268         first = folio_buffers(folio);
269         spin_lock_irqsave(&first->b_uptodate_lock, flags);
270         clear_buffer_async_read(bh);
271         unlock_buffer(bh);
272         tmp = bh;
273         do {
274                 if (!buffer_uptodate(tmp))
275                         folio_uptodate = 0;
276                 if (buffer_async_read(tmp)) {
277                         BUG_ON(!buffer_locked(tmp));
278                         goto still_busy;
279                 }
280                 tmp = tmp->b_this_page;
281         } while (tmp != bh);
282         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
283
284         folio_end_read(folio, folio_uptodate);
285         return;
286
287 still_busy:
288         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
289         return;
290 }
291
292 struct postprocess_bh_ctx {
293         struct work_struct work;
294         struct buffer_head *bh;
295 };
296
297 static void verify_bh(struct work_struct *work)
298 {
299         struct postprocess_bh_ctx *ctx =
300                 container_of(work, struct postprocess_bh_ctx, work);
301         struct buffer_head *bh = ctx->bh;
302         bool valid;
303
304         valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
305         end_buffer_async_read(bh, valid);
306         kfree(ctx);
307 }
308
309 static bool need_fsverity(struct buffer_head *bh)
310 {
311         struct folio *folio = bh->b_folio;
312         struct inode *inode = folio->mapping->host;
313
314         return fsverity_active(inode) &&
315                 /* needed by ext4 */
316                 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
317 }
318
319 static void decrypt_bh(struct work_struct *work)
320 {
321         struct postprocess_bh_ctx *ctx =
322                 container_of(work, struct postprocess_bh_ctx, work);
323         struct buffer_head *bh = ctx->bh;
324         int err;
325
326         err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
327                                                bh_offset(bh));
328         if (err == 0 && need_fsverity(bh)) {
329                 /*
330                  * We use different work queues for decryption and for verity
331                  * because verity may require reading metadata pages that need
332                  * decryption, and we shouldn't recurse to the same workqueue.
333                  */
334                 INIT_WORK(&ctx->work, verify_bh);
335                 fsverity_enqueue_verify_work(&ctx->work);
336                 return;
337         }
338         end_buffer_async_read(bh, err == 0);
339         kfree(ctx);
340 }
341
342 /*
343  * I/O completion handler for block_read_full_folio() - pages
344  * which come unlocked at the end of I/O.
345  */
346 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
347 {
348         struct inode *inode = bh->b_folio->mapping->host;
349         bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
350         bool verify = need_fsverity(bh);
351
352         /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
353         if (uptodate && (decrypt || verify)) {
354                 struct postprocess_bh_ctx *ctx =
355                         kmalloc(sizeof(*ctx), GFP_ATOMIC);
356
357                 if (ctx) {
358                         ctx->bh = bh;
359                         if (decrypt) {
360                                 INIT_WORK(&ctx->work, decrypt_bh);
361                                 fscrypt_enqueue_decrypt_work(&ctx->work);
362                         } else {
363                                 INIT_WORK(&ctx->work, verify_bh);
364                                 fsverity_enqueue_verify_work(&ctx->work);
365                         }
366                         return;
367                 }
368                 uptodate = 0;
369         }
370         end_buffer_async_read(bh, uptodate);
371 }
372
373 /*
374  * Completion handler for block_write_full_folio() - folios which are unlocked
375  * during I/O, and which have the writeback flag cleared upon I/O completion.
376  */
377 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
378 {
379         unsigned long flags;
380         struct buffer_head *first;
381         struct buffer_head *tmp;
382         struct folio *folio;
383
384         BUG_ON(!buffer_async_write(bh));
385
386         folio = bh->b_folio;
387         if (uptodate) {
388                 set_buffer_uptodate(bh);
389         } else {
390                 buffer_io_error(bh, ", lost async page write");
391                 mark_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393         }
394
395         first = folio_buffers(folio);
396         spin_lock_irqsave(&first->b_uptodate_lock, flags);
397
398         clear_buffer_async_write(bh);
399         unlock_buffer(bh);
400         tmp = bh->b_this_page;
401         while (tmp != bh) {
402                 if (buffer_async_write(tmp)) {
403                         BUG_ON(!buffer_locked(tmp));
404                         goto still_busy;
405                 }
406                 tmp = tmp->b_this_page;
407         }
408         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
409         folio_end_writeback(folio);
410         return;
411
412 still_busy:
413         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
414         return;
415 }
416
417 /*
418  * If a page's buffers are under async readin (end_buffer_async_read
419  * completion) then there is a possibility that another thread of
420  * control could lock one of the buffers after it has completed
421  * but while some of the other buffers have not completed.  This
422  * locked buffer would confuse end_buffer_async_read() into not unlocking
423  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
424  * that this buffer is not under async I/O.
425  *
426  * The page comes unlocked when it has no locked buffer_async buffers
427  * left.
428  *
429  * PageLocked prevents anyone starting new async I/O reads any of
430  * the buffers.
431  *
432  * PageWriteback is used to prevent simultaneous writeout of the same
433  * page.
434  *
435  * PageLocked prevents anyone from starting writeback of a page which is
436  * under read I/O (PageWriteback is only ever set against a locked page).
437  */
438 static void mark_buffer_async_read(struct buffer_head *bh)
439 {
440         bh->b_end_io = end_buffer_async_read_io;
441         set_buffer_async_read(bh);
442 }
443
444 static void mark_buffer_async_write_endio(struct buffer_head *bh,
445                                           bh_end_io_t *handler)
446 {
447         bh->b_end_io = handler;
448         set_buffer_async_write(bh);
449 }
450
451 void mark_buffer_async_write(struct buffer_head *bh)
452 {
453         mark_buffer_async_write_endio(bh, end_buffer_async_write);
454 }
455 EXPORT_SYMBOL(mark_buffer_async_write);
456
457
458 /*
459  * fs/buffer.c contains helper functions for buffer-backed address space's
460  * fsync functions.  A common requirement for buffer-based filesystems is
461  * that certain data from the backing blockdev needs to be written out for
462  * a successful fsync().  For example, ext2 indirect blocks need to be
463  * written back and waited upon before fsync() returns.
464  *
465  * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
466  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
467  * management of a list of dependent buffers at ->i_mapping->i_private_list.
468  *
469  * Locking is a little subtle: try_to_free_buffers() will remove buffers
470  * from their controlling inode's queue when they are being freed.  But
471  * try_to_free_buffers() will be operating against the *blockdev* mapping
472  * at the time, not against the S_ISREG file which depends on those buffers.
473  * So the locking for i_private_list is via the i_private_lock in the address_space
474  * which backs the buffers.  Which is different from the address_space 
475  * against which the buffers are listed.  So for a particular address_space,
476  * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
477  * mapping->i_private_list will always be protected by the backing blockdev's
478  * ->i_private_lock.
479  *
480  * Which introduces a requirement: all buffers on an address_space's
481  * ->i_private_list must be from the same address_space: the blockdev's.
482  *
483  * address_spaces which do not place buffers at ->i_private_list via these
484  * utility functions are free to use i_private_lock and i_private_list for
485  * whatever they want.  The only requirement is that list_empty(i_private_list)
486  * be true at clear_inode() time.
487  *
488  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
489  * filesystems should do that.  invalidate_inode_buffers() should just go
490  * BUG_ON(!list_empty).
491  *
492  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
493  * take an address_space, not an inode.  And it should be called
494  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
495  * queued up.
496  *
497  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
498  * list if it is already on a list.  Because if the buffer is on a list,
499  * it *must* already be on the right one.  If not, the filesystem is being
500  * silly.  This will save a ton of locking.  But first we have to ensure
501  * that buffers are taken *off* the old inode's list when they are freed
502  * (presumably in truncate).  That requires careful auditing of all
503  * filesystems (do it inside bforget()).  It could also be done by bringing
504  * b_inode back.
505  */
506
507 /*
508  * The buffer's backing address_space's i_private_lock must be held
509  */
510 static void __remove_assoc_queue(struct buffer_head *bh)
511 {
512         list_del_init(&bh->b_assoc_buffers);
513         WARN_ON(!bh->b_assoc_map);
514         bh->b_assoc_map = NULL;
515 }
516
517 int inode_has_buffers(struct inode *inode)
518 {
519         return !list_empty(&inode->i_data.i_private_list);
520 }
521
522 /*
523  * osync is designed to support O_SYNC io.  It waits synchronously for
524  * all already-submitted IO to complete, but does not queue any new
525  * writes to the disk.
526  *
527  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
528  * as you dirty the buffers, and then use osync_inode_buffers to wait for
529  * completion.  Any other dirty buffers which are not yet queued for
530  * write will not be flushed to disk by the osync.
531  */
532 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
533 {
534         struct buffer_head *bh;
535         struct list_head *p;
536         int err = 0;
537
538         spin_lock(lock);
539 repeat:
540         list_for_each_prev(p, list) {
541                 bh = BH_ENTRY(p);
542                 if (buffer_locked(bh)) {
543                         get_bh(bh);
544                         spin_unlock(lock);
545                         wait_on_buffer(bh);
546                         if (!buffer_uptodate(bh))
547                                 err = -EIO;
548                         brelse(bh);
549                         spin_lock(lock);
550                         goto repeat;
551                 }
552         }
553         spin_unlock(lock);
554         return err;
555 }
556
557 /**
558  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
559  * @mapping: the mapping which wants those buffers written
560  *
561  * Starts I/O against the buffers at mapping->i_private_list, and waits upon
562  * that I/O.
563  *
564  * Basically, this is a convenience function for fsync().
565  * @mapping is a file or directory which needs those buffers to be written for
566  * a successful fsync().
567  */
568 int sync_mapping_buffers(struct address_space *mapping)
569 {
570         struct address_space *buffer_mapping = mapping->i_private_data;
571
572         if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
573                 return 0;
574
575         return fsync_buffers_list(&buffer_mapping->i_private_lock,
576                                         &mapping->i_private_list);
577 }
578 EXPORT_SYMBOL(sync_mapping_buffers);
579
580 /**
581  * generic_buffers_fsync_noflush - generic buffer fsync implementation
582  * for simple filesystems with no inode lock
583  *
584  * @file:       file to synchronize
585  * @start:      start offset in bytes
586  * @end:        end offset in bytes (inclusive)
587  * @datasync:   only synchronize essential metadata if true
588  *
589  * This is a generic implementation of the fsync method for simple
590  * filesystems which track all non-inode metadata in the buffers list
591  * hanging off the address_space structure.
592  */
593 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
594                                   bool datasync)
595 {
596         struct inode *inode = file->f_mapping->host;
597         int err;
598         int ret;
599
600         err = file_write_and_wait_range(file, start, end);
601         if (err)
602                 return err;
603
604         ret = sync_mapping_buffers(inode->i_mapping);
605         if (!(inode->i_state & I_DIRTY_ALL))
606                 goto out;
607         if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
608                 goto out;
609
610         err = sync_inode_metadata(inode, 1);
611         if (ret == 0)
612                 ret = err;
613
614 out:
615         /* check and advance again to catch errors after syncing out buffers */
616         err = file_check_and_advance_wb_err(file);
617         if (ret == 0)
618                 ret = err;
619         return ret;
620 }
621 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
622
623 /**
624  * generic_buffers_fsync - generic buffer fsync implementation
625  * for simple filesystems with no inode lock
626  *
627  * @file:       file to synchronize
628  * @start:      start offset in bytes
629  * @end:        end offset in bytes (inclusive)
630  * @datasync:   only synchronize essential metadata if true
631  *
632  * This is a generic implementation of the fsync method for simple
633  * filesystems which track all non-inode metadata in the buffers list
634  * hanging off the address_space structure. This also makes sure that
635  * a device cache flush operation is called at the end.
636  */
637 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
638                           bool datasync)
639 {
640         struct inode *inode = file->f_mapping->host;
641         int ret;
642
643         ret = generic_buffers_fsync_noflush(file, start, end, datasync);
644         if (!ret)
645                 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
646         return ret;
647 }
648 EXPORT_SYMBOL(generic_buffers_fsync);
649
650 /*
651  * Called when we've recently written block `bblock', and it is known that
652  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
653  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
654  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
655  */
656 void write_boundary_block(struct block_device *bdev,
657                         sector_t bblock, unsigned blocksize)
658 {
659         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
660         if (bh) {
661                 if (buffer_dirty(bh))
662                         write_dirty_buffer(bh, 0);
663                 put_bh(bh);
664         }
665 }
666
667 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
668 {
669         struct address_space *mapping = inode->i_mapping;
670         struct address_space *buffer_mapping = bh->b_folio->mapping;
671
672         mark_buffer_dirty(bh);
673         if (!mapping->i_private_data) {
674                 mapping->i_private_data = buffer_mapping;
675         } else {
676                 BUG_ON(mapping->i_private_data != buffer_mapping);
677         }
678         if (!bh->b_assoc_map) {
679                 spin_lock(&buffer_mapping->i_private_lock);
680                 list_move_tail(&bh->b_assoc_buffers,
681                                 &mapping->i_private_list);
682                 bh->b_assoc_map = mapping;
683                 spin_unlock(&buffer_mapping->i_private_lock);
684         }
685 }
686 EXPORT_SYMBOL(mark_buffer_dirty_inode);
687
688 /**
689  * block_dirty_folio - Mark a folio as dirty.
690  * @mapping: The address space containing this folio.
691  * @folio: The folio to mark dirty.
692  *
693  * Filesystems which use buffer_heads can use this function as their
694  * ->dirty_folio implementation.  Some filesystems need to do a little
695  * work before calling this function.  Filesystems which do not use
696  * buffer_heads should call filemap_dirty_folio() instead.
697  *
698  * If the folio has buffers, the uptodate buffers are set dirty, to
699  * preserve dirty-state coherency between the folio and the buffers.
700  * Buffers added to a dirty folio are created dirty.
701  *
702  * The buffers are dirtied before the folio is dirtied.  There's a small
703  * race window in which writeback may see the folio cleanness but not the
704  * buffer dirtiness.  That's fine.  If this code were to set the folio
705  * dirty before the buffers, writeback could clear the folio dirty flag,
706  * see a bunch of clean buffers and we'd end up with dirty buffers/clean
707  * folio on the dirty folio list.
708  *
709  * We use i_private_lock to lock against try_to_free_buffers() while
710  * using the folio's buffer list.  This also prevents clean buffers
711  * being added to the folio after it was set dirty.
712  *
713  * Context: May only be called from process context.  Does not sleep.
714  * Caller must ensure that @folio cannot be truncated during this call,
715  * typically by holding the folio lock or having a page in the folio
716  * mapped and holding the page table lock.
717  *
718  * Return: True if the folio was dirtied; false if it was already dirtied.
719  */
720 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
721 {
722         struct buffer_head *head;
723         bool newly_dirty;
724
725         spin_lock(&mapping->i_private_lock);
726         head = folio_buffers(folio);
727         if (head) {
728                 struct buffer_head *bh = head;
729
730                 do {
731                         set_buffer_dirty(bh);
732                         bh = bh->b_this_page;
733                 } while (bh != head);
734         }
735         /*
736          * Lock out page's memcg migration to keep PageDirty
737          * synchronized with per-memcg dirty page counters.
738          */
739         folio_memcg_lock(folio);
740         newly_dirty = !folio_test_set_dirty(folio);
741         spin_unlock(&mapping->i_private_lock);
742
743         if (newly_dirty)
744                 __folio_mark_dirty(folio, mapping, 1);
745
746         folio_memcg_unlock(folio);
747
748         if (newly_dirty)
749                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
750
751         return newly_dirty;
752 }
753 EXPORT_SYMBOL(block_dirty_folio);
754
755 /*
756  * Write out and wait upon a list of buffers.
757  *
758  * We have conflicting pressures: we want to make sure that all
759  * initially dirty buffers get waited on, but that any subsequently
760  * dirtied buffers don't.  After all, we don't want fsync to last
761  * forever if somebody is actively writing to the file.
762  *
763  * Do this in two main stages: first we copy dirty buffers to a
764  * temporary inode list, queueing the writes as we go.  Then we clean
765  * up, waiting for those writes to complete.
766  * 
767  * During this second stage, any subsequent updates to the file may end
768  * up refiling the buffer on the original inode's dirty list again, so
769  * there is a chance we will end up with a buffer queued for write but
770  * not yet completed on that list.  So, as a final cleanup we go through
771  * the osync code to catch these locked, dirty buffers without requeuing
772  * any newly dirty buffers for write.
773  */
774 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
775 {
776         struct buffer_head *bh;
777         struct address_space *mapping;
778         int err = 0, err2;
779         struct blk_plug plug;
780         LIST_HEAD(tmp);
781
782         blk_start_plug(&plug);
783
784         spin_lock(lock);
785         while (!list_empty(list)) {
786                 bh = BH_ENTRY(list->next);
787                 mapping = bh->b_assoc_map;
788                 __remove_assoc_queue(bh);
789                 /* Avoid race with mark_buffer_dirty_inode() which does
790                  * a lockless check and we rely on seeing the dirty bit */
791                 smp_mb();
792                 if (buffer_dirty(bh) || buffer_locked(bh)) {
793                         list_add(&bh->b_assoc_buffers, &tmp);
794                         bh->b_assoc_map = mapping;
795                         if (buffer_dirty(bh)) {
796                                 get_bh(bh);
797                                 spin_unlock(lock);
798                                 /*
799                                  * Ensure any pending I/O completes so that
800                                  * write_dirty_buffer() actually writes the
801                                  * current contents - it is a noop if I/O is
802                                  * still in flight on potentially older
803                                  * contents.
804                                  */
805                                 write_dirty_buffer(bh, REQ_SYNC);
806
807                                 /*
808                                  * Kick off IO for the previous mapping. Note
809                                  * that we will not run the very last mapping,
810                                  * wait_on_buffer() will do that for us
811                                  * through sync_buffer().
812                                  */
813                                 brelse(bh);
814                                 spin_lock(lock);
815                         }
816                 }
817         }
818
819         spin_unlock(lock);
820         blk_finish_plug(&plug);
821         spin_lock(lock);
822
823         while (!list_empty(&tmp)) {
824                 bh = BH_ENTRY(tmp.prev);
825                 get_bh(bh);
826                 mapping = bh->b_assoc_map;
827                 __remove_assoc_queue(bh);
828                 /* Avoid race with mark_buffer_dirty_inode() which does
829                  * a lockless check and we rely on seeing the dirty bit */
830                 smp_mb();
831                 if (buffer_dirty(bh)) {
832                         list_add(&bh->b_assoc_buffers,
833                                  &mapping->i_private_list);
834                         bh->b_assoc_map = mapping;
835                 }
836                 spin_unlock(lock);
837                 wait_on_buffer(bh);
838                 if (!buffer_uptodate(bh))
839                         err = -EIO;
840                 brelse(bh);
841                 spin_lock(lock);
842         }
843         
844         spin_unlock(lock);
845         err2 = osync_buffers_list(lock, list);
846         if (err)
847                 return err;
848         else
849                 return err2;
850 }
851
852 /*
853  * Invalidate any and all dirty buffers on a given inode.  We are
854  * probably unmounting the fs, but that doesn't mean we have already
855  * done a sync().  Just drop the buffers from the inode list.
856  *
857  * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
858  * assumes that all the buffers are against the blockdev.  Not true
859  * for reiserfs.
860  */
861 void invalidate_inode_buffers(struct inode *inode)
862 {
863         if (inode_has_buffers(inode)) {
864                 struct address_space *mapping = &inode->i_data;
865                 struct list_head *list = &mapping->i_private_list;
866                 struct address_space *buffer_mapping = mapping->i_private_data;
867
868                 spin_lock(&buffer_mapping->i_private_lock);
869                 while (!list_empty(list))
870                         __remove_assoc_queue(BH_ENTRY(list->next));
871                 spin_unlock(&buffer_mapping->i_private_lock);
872         }
873 }
874 EXPORT_SYMBOL(invalidate_inode_buffers);
875
876 /*
877  * Remove any clean buffers from the inode's buffer list.  This is called
878  * when we're trying to free the inode itself.  Those buffers can pin it.
879  *
880  * Returns true if all buffers were removed.
881  */
882 int remove_inode_buffers(struct inode *inode)
883 {
884         int ret = 1;
885
886         if (inode_has_buffers(inode)) {
887                 struct address_space *mapping = &inode->i_data;
888                 struct list_head *list = &mapping->i_private_list;
889                 struct address_space *buffer_mapping = mapping->i_private_data;
890
891                 spin_lock(&buffer_mapping->i_private_lock);
892                 while (!list_empty(list)) {
893                         struct buffer_head *bh = BH_ENTRY(list->next);
894                         if (buffer_dirty(bh)) {
895                                 ret = 0;
896                                 break;
897                         }
898                         __remove_assoc_queue(bh);
899                 }
900                 spin_unlock(&buffer_mapping->i_private_lock);
901         }
902         return ret;
903 }
904
905 /*
906  * Create the appropriate buffers when given a folio for data area and
907  * the size of each buffer.. Use the bh->b_this_page linked list to
908  * follow the buffers created.  Return NULL if unable to create more
909  * buffers.
910  *
911  * The retry flag is used to differentiate async IO (paging, swapping)
912  * which may not fail from ordinary buffer allocations.
913  */
914 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
915                                         gfp_t gfp)
916 {
917         struct buffer_head *bh, *head;
918         long offset;
919         struct mem_cgroup *memcg, *old_memcg;
920
921         /* The folio lock pins the memcg */
922         memcg = folio_memcg(folio);
923         old_memcg = set_active_memcg(memcg);
924
925         head = NULL;
926         offset = folio_size(folio);
927         while ((offset -= size) >= 0) {
928                 bh = alloc_buffer_head(gfp);
929                 if (!bh)
930                         goto no_grow;
931
932                 bh->b_this_page = head;
933                 bh->b_blocknr = -1;
934                 head = bh;
935
936                 bh->b_size = size;
937
938                 /* Link the buffer to its folio */
939                 folio_set_bh(bh, folio, offset);
940         }
941 out:
942         set_active_memcg(old_memcg);
943         return head;
944 /*
945  * In case anything failed, we just free everything we got.
946  */
947 no_grow:
948         if (head) {
949                 do {
950                         bh = head;
951                         head = head->b_this_page;
952                         free_buffer_head(bh);
953                 } while (head);
954         }
955
956         goto out;
957 }
958 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
959
960 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
961 {
962         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
963
964         return folio_alloc_buffers(page_folio(page), size, gfp);
965 }
966 EXPORT_SYMBOL_GPL(alloc_page_buffers);
967
968 static inline void link_dev_buffers(struct folio *folio,
969                 struct buffer_head *head)
970 {
971         struct buffer_head *bh, *tail;
972
973         bh = head;
974         do {
975                 tail = bh;
976                 bh = bh->b_this_page;
977         } while (bh);
978         tail->b_this_page = head;
979         folio_attach_private(folio, head);
980 }
981
982 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
983 {
984         sector_t retval = ~((sector_t)0);
985         loff_t sz = bdev_nr_bytes(bdev);
986
987         if (sz) {
988                 unsigned int sizebits = blksize_bits(size);
989                 retval = (sz >> sizebits);
990         }
991         return retval;
992 }
993
994 /*
995  * Initialise the state of a blockdev folio's buffers.
996  */ 
997 static sector_t folio_init_buffers(struct folio *folio,
998                 struct block_device *bdev, unsigned size)
999 {
1000         struct buffer_head *head = folio_buffers(folio);
1001         struct buffer_head *bh = head;
1002         bool uptodate = folio_test_uptodate(folio);
1003         sector_t block = div_u64(folio_pos(folio), size);
1004         sector_t end_block = blkdev_max_block(bdev, size);
1005
1006         do {
1007                 if (!buffer_mapped(bh)) {
1008                         bh->b_end_io = NULL;
1009                         bh->b_private = NULL;
1010                         bh->b_bdev = bdev;
1011                         bh->b_blocknr = block;
1012                         if (uptodate)
1013                                 set_buffer_uptodate(bh);
1014                         if (block < end_block)
1015                                 set_buffer_mapped(bh);
1016                 }
1017                 block++;
1018                 bh = bh->b_this_page;
1019         } while (bh != head);
1020
1021         /*
1022          * Caller needs to validate requested block against end of device.
1023          */
1024         return end_block;
1025 }
1026
1027 /*
1028  * Create the page-cache folio that contains the requested block.
1029  *
1030  * This is used purely for blockdev mappings.
1031  *
1032  * Returns false if we have a failure which cannot be cured by retrying
1033  * without sleeping.  Returns true if we succeeded, or the caller should retry.
1034  */
1035 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1036                 pgoff_t index, unsigned size, gfp_t gfp)
1037 {
1038         struct address_space *mapping = bdev->bd_mapping;
1039         struct folio *folio;
1040         struct buffer_head *bh;
1041         sector_t end_block = 0;
1042
1043         folio = __filemap_get_folio(mapping, index,
1044                         FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1045         if (IS_ERR(folio))
1046                 return false;
1047
1048         bh = folio_buffers(folio);
1049         if (bh) {
1050                 if (bh->b_size == size) {
1051                         end_block = folio_init_buffers(folio, bdev, size);
1052                         goto unlock;
1053                 }
1054
1055                 /*
1056                  * Retrying may succeed; for example the folio may finish
1057                  * writeback, or buffers may be cleaned.  This should not
1058                  * happen very often; maybe we have old buffers attached to
1059                  * this blockdev's page cache and we're trying to change
1060                  * the block size?
1061                  */
1062                 if (!try_to_free_buffers(folio)) {
1063                         end_block = ~0ULL;
1064                         goto unlock;
1065                 }
1066         }
1067
1068         bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1069         if (!bh)
1070                 goto unlock;
1071
1072         /*
1073          * Link the folio to the buffers and initialise them.  Take the
1074          * lock to be atomic wrt __find_get_block(), which does not
1075          * run under the folio lock.
1076          */
1077         spin_lock(&mapping->i_private_lock);
1078         link_dev_buffers(folio, bh);
1079         end_block = folio_init_buffers(folio, bdev, size);
1080         spin_unlock(&mapping->i_private_lock);
1081 unlock:
1082         folio_unlock(folio);
1083         folio_put(folio);
1084         return block < end_block;
1085 }
1086
1087 /*
1088  * Create buffers for the specified block device block's folio.  If
1089  * that folio was dirty, the buffers are set dirty also.  Returns false
1090  * if we've hit a permanent error.
1091  */
1092 static bool grow_buffers(struct block_device *bdev, sector_t block,
1093                 unsigned size, gfp_t gfp)
1094 {
1095         loff_t pos;
1096
1097         /*
1098          * Check for a block which lies outside our maximum possible
1099          * pagecache index.
1100          */
1101         if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1102                 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1103                         __func__, (unsigned long long)block,
1104                         bdev);
1105                 return false;
1106         }
1107
1108         /* Create a folio with the proper size buffers */
1109         return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1110 }
1111
1112 static struct buffer_head *
1113 __getblk_slow(struct block_device *bdev, sector_t block,
1114              unsigned size, gfp_t gfp)
1115 {
1116         /* Size must be multiple of hard sectorsize */
1117         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1118                         (size < 512 || size > PAGE_SIZE))) {
1119                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1120                                         size);
1121                 printk(KERN_ERR "logical block size: %d\n",
1122                                         bdev_logical_block_size(bdev));
1123
1124                 dump_stack();
1125                 return NULL;
1126         }
1127
1128         for (;;) {
1129                 struct buffer_head *bh;
1130
1131                 bh = __find_get_block(bdev, block, size);
1132                 if (bh)
1133                         return bh;
1134
1135                 if (!grow_buffers(bdev, block, size, gfp))
1136                         return NULL;
1137         }
1138 }
1139
1140 /*
1141  * The relationship between dirty buffers and dirty pages:
1142  *
1143  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1144  * the page is tagged dirty in the page cache.
1145  *
1146  * At all times, the dirtiness of the buffers represents the dirtiness of
1147  * subsections of the page.  If the page has buffers, the page dirty bit is
1148  * merely a hint about the true dirty state.
1149  *
1150  * When a page is set dirty in its entirety, all its buffers are marked dirty
1151  * (if the page has buffers).
1152  *
1153  * When a buffer is marked dirty, its page is dirtied, but the page's other
1154  * buffers are not.
1155  *
1156  * Also.  When blockdev buffers are explicitly read with bread(), they
1157  * individually become uptodate.  But their backing page remains not
1158  * uptodate - even if all of its buffers are uptodate.  A subsequent
1159  * block_read_full_folio() against that folio will discover all the uptodate
1160  * buffers, will set the folio uptodate and will perform no I/O.
1161  */
1162
1163 /**
1164  * mark_buffer_dirty - mark a buffer_head as needing writeout
1165  * @bh: the buffer_head to mark dirty
1166  *
1167  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1168  * its backing page dirty, then tag the page as dirty in the page cache
1169  * and then attach the address_space's inode to its superblock's dirty
1170  * inode list.
1171  *
1172  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1173  * i_pages lock and mapping->host->i_lock.
1174  */
1175 void mark_buffer_dirty(struct buffer_head *bh)
1176 {
1177         WARN_ON_ONCE(!buffer_uptodate(bh));
1178
1179         trace_block_dirty_buffer(bh);
1180
1181         /*
1182          * Very *carefully* optimize the it-is-already-dirty case.
1183          *
1184          * Don't let the final "is it dirty" escape to before we
1185          * perhaps modified the buffer.
1186          */
1187         if (buffer_dirty(bh)) {
1188                 smp_mb();
1189                 if (buffer_dirty(bh))
1190                         return;
1191         }
1192
1193         if (!test_set_buffer_dirty(bh)) {
1194                 struct folio *folio = bh->b_folio;
1195                 struct address_space *mapping = NULL;
1196
1197                 folio_memcg_lock(folio);
1198                 if (!folio_test_set_dirty(folio)) {
1199                         mapping = folio->mapping;
1200                         if (mapping)
1201                                 __folio_mark_dirty(folio, mapping, 0);
1202                 }
1203                 folio_memcg_unlock(folio);
1204                 if (mapping)
1205                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1206         }
1207 }
1208 EXPORT_SYMBOL(mark_buffer_dirty);
1209
1210 void mark_buffer_write_io_error(struct buffer_head *bh)
1211 {
1212         set_buffer_write_io_error(bh);
1213         /* FIXME: do we need to set this in both places? */
1214         if (bh->b_folio && bh->b_folio->mapping)
1215                 mapping_set_error(bh->b_folio->mapping, -EIO);
1216         if (bh->b_assoc_map) {
1217                 mapping_set_error(bh->b_assoc_map, -EIO);
1218                 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1219         }
1220 }
1221 EXPORT_SYMBOL(mark_buffer_write_io_error);
1222
1223 /**
1224  * __brelse - Release a buffer.
1225  * @bh: The buffer to release.
1226  *
1227  * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1228  */
1229 void __brelse(struct buffer_head *bh)
1230 {
1231         if (atomic_read(&bh->b_count)) {
1232                 put_bh(bh);
1233                 return;
1234         }
1235         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1236 }
1237 EXPORT_SYMBOL(__brelse);
1238
1239 /**
1240  * __bforget - Discard any dirty data in a buffer.
1241  * @bh: The buffer to forget.
1242  *
1243  * This variant of bforget() can be called if @bh is guaranteed to not
1244  * be NULL.
1245  */
1246 void __bforget(struct buffer_head *bh)
1247 {
1248         clear_buffer_dirty(bh);
1249         if (bh->b_assoc_map) {
1250                 struct address_space *buffer_mapping = bh->b_folio->mapping;
1251
1252                 spin_lock(&buffer_mapping->i_private_lock);
1253                 list_del_init(&bh->b_assoc_buffers);
1254                 bh->b_assoc_map = NULL;
1255                 spin_unlock(&buffer_mapping->i_private_lock);
1256         }
1257         __brelse(bh);
1258 }
1259 EXPORT_SYMBOL(__bforget);
1260
1261 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1262 {
1263         lock_buffer(bh);
1264         if (buffer_uptodate(bh)) {
1265                 unlock_buffer(bh);
1266                 return bh;
1267         } else {
1268                 get_bh(bh);
1269                 bh->b_end_io = end_buffer_read_sync;
1270                 submit_bh(REQ_OP_READ, bh);
1271                 wait_on_buffer(bh);
1272                 if (buffer_uptodate(bh))
1273                         return bh;
1274         }
1275         brelse(bh);
1276         return NULL;
1277 }
1278
1279 /*
1280  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1281  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1282  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1283  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1284  * CPU's LRUs at the same time.
1285  *
1286  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1287  * sb_find_get_block().
1288  *
1289  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1290  * a local interrupt disable for that.
1291  */
1292
1293 #define BH_LRU_SIZE     16
1294
1295 struct bh_lru {
1296         struct buffer_head *bhs[BH_LRU_SIZE];
1297 };
1298
1299 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1300
1301 #ifdef CONFIG_SMP
1302 #define bh_lru_lock()   local_irq_disable()
1303 #define bh_lru_unlock() local_irq_enable()
1304 #else
1305 #define bh_lru_lock()   preempt_disable()
1306 #define bh_lru_unlock() preempt_enable()
1307 #endif
1308
1309 static inline void check_irqs_on(void)
1310 {
1311 #ifdef irqs_disabled
1312         BUG_ON(irqs_disabled());
1313 #endif
1314 }
1315
1316 /*
1317  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1318  * inserted at the front, and the buffer_head at the back if any is evicted.
1319  * Or, if already in the LRU it is moved to the front.
1320  */
1321 static void bh_lru_install(struct buffer_head *bh)
1322 {
1323         struct buffer_head *evictee = bh;
1324         struct bh_lru *b;
1325         int i;
1326
1327         check_irqs_on();
1328         bh_lru_lock();
1329
1330         /*
1331          * the refcount of buffer_head in bh_lru prevents dropping the
1332          * attached page(i.e., try_to_free_buffers) so it could cause
1333          * failing page migration.
1334          * Skip putting upcoming bh into bh_lru until migration is done.
1335          */
1336         if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1337                 bh_lru_unlock();
1338                 return;
1339         }
1340
1341         b = this_cpu_ptr(&bh_lrus);
1342         for (i = 0; i < BH_LRU_SIZE; i++) {
1343                 swap(evictee, b->bhs[i]);
1344                 if (evictee == bh) {
1345                         bh_lru_unlock();
1346                         return;
1347                 }
1348         }
1349
1350         get_bh(bh);
1351         bh_lru_unlock();
1352         brelse(evictee);
1353 }
1354
1355 /*
1356  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1357  */
1358 static struct buffer_head *
1359 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1360 {
1361         struct buffer_head *ret = NULL;
1362         unsigned int i;
1363
1364         check_irqs_on();
1365         bh_lru_lock();
1366         if (cpu_is_isolated(smp_processor_id())) {
1367                 bh_lru_unlock();
1368                 return NULL;
1369         }
1370         for (i = 0; i < BH_LRU_SIZE; i++) {
1371                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1372
1373                 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1374                     bh->b_size == size) {
1375                         if (i) {
1376                                 while (i) {
1377                                         __this_cpu_write(bh_lrus.bhs[i],
1378                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1379                                         i--;
1380                                 }
1381                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1382                         }
1383                         get_bh(bh);
1384                         ret = bh;
1385                         break;
1386                 }
1387         }
1388         bh_lru_unlock();
1389         return ret;
1390 }
1391
1392 /*
1393  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1394  * it in the LRU and mark it as accessed.  If it is not present then return
1395  * NULL
1396  */
1397 struct buffer_head *
1398 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1399 {
1400         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1401
1402         if (bh == NULL) {
1403                 /* __find_get_block_slow will mark the page accessed */
1404                 bh = __find_get_block_slow(bdev, block);
1405                 if (bh)
1406                         bh_lru_install(bh);
1407         } else
1408                 touch_buffer(bh);
1409
1410         return bh;
1411 }
1412 EXPORT_SYMBOL(__find_get_block);
1413
1414 /**
1415  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1416  * @bdev: The block device.
1417  * @block: The block number.
1418  * @size: The size of buffer_heads for this @bdev.
1419  * @gfp: The memory allocation flags to use.
1420  *
1421  * The returned buffer head has its reference count incremented, but is
1422  * not locked.  The caller should call brelse() when it has finished
1423  * with the buffer.  The buffer may not be uptodate.  If needed, the
1424  * caller can bring it uptodate either by reading it or overwriting it.
1425  *
1426  * Return: The buffer head, or NULL if memory could not be allocated.
1427  */
1428 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1429                 unsigned size, gfp_t gfp)
1430 {
1431         struct buffer_head *bh = __find_get_block(bdev, block, size);
1432
1433         might_alloc(gfp);
1434         if (bh)
1435                 return bh;
1436
1437         return __getblk_slow(bdev, block, size, gfp);
1438 }
1439 EXPORT_SYMBOL(bdev_getblk);
1440
1441 /*
1442  * Do async read-ahead on a buffer..
1443  */
1444 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1445 {
1446         struct buffer_head *bh = bdev_getblk(bdev, block, size,
1447                         GFP_NOWAIT | __GFP_MOVABLE);
1448
1449         if (likely(bh)) {
1450                 bh_readahead(bh, REQ_RAHEAD);
1451                 brelse(bh);
1452         }
1453 }
1454 EXPORT_SYMBOL(__breadahead);
1455
1456 /**
1457  * __bread_gfp() - Read a block.
1458  * @bdev: The block device to read from.
1459  * @block: Block number in units of block size.
1460  * @size: The block size of this device in bytes.
1461  * @gfp: Not page allocation flags; see below.
1462  *
1463  * You are not expected to call this function.  You should use one of
1464  * sb_bread(), sb_bread_unmovable() or __bread().
1465  *
1466  * Read a specified block, and return the buffer head that refers to it.
1467  * If @gfp is 0, the memory will be allocated using the block device's
1468  * default GFP flags.  If @gfp is __GFP_MOVABLE, the memory may be
1469  * allocated from a movable area.  Do not pass in a complete set of
1470  * GFP flags.
1471  *
1472  * The returned buffer head has its refcount increased.  The caller should
1473  * call brelse() when it has finished with the buffer.
1474  *
1475  * Context: May sleep waiting for I/O.
1476  * Return: NULL if the block was unreadable.
1477  */
1478 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1479                 unsigned size, gfp_t gfp)
1480 {
1481         struct buffer_head *bh;
1482
1483         gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1484
1485         /*
1486          * Prefer looping in the allocator rather than here, at least that
1487          * code knows what it's doing.
1488          */
1489         gfp |= __GFP_NOFAIL;
1490
1491         bh = bdev_getblk(bdev, block, size, gfp);
1492
1493         if (likely(bh) && !buffer_uptodate(bh))
1494                 bh = __bread_slow(bh);
1495         return bh;
1496 }
1497 EXPORT_SYMBOL(__bread_gfp);
1498
1499 static void __invalidate_bh_lrus(struct bh_lru *b)
1500 {
1501         int i;
1502
1503         for (i = 0; i < BH_LRU_SIZE; i++) {
1504                 brelse(b->bhs[i]);
1505                 b->bhs[i] = NULL;
1506         }
1507 }
1508 /*
1509  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1510  * This doesn't race because it runs in each cpu either in irq
1511  * or with preempt disabled.
1512  */
1513 static void invalidate_bh_lru(void *arg)
1514 {
1515         struct bh_lru *b = &get_cpu_var(bh_lrus);
1516
1517         __invalidate_bh_lrus(b);
1518         put_cpu_var(bh_lrus);
1519 }
1520
1521 bool has_bh_in_lru(int cpu, void *dummy)
1522 {
1523         struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1524         int i;
1525         
1526         for (i = 0; i < BH_LRU_SIZE; i++) {
1527                 if (b->bhs[i])
1528                         return true;
1529         }
1530
1531         return false;
1532 }
1533
1534 void invalidate_bh_lrus(void)
1535 {
1536         on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1537 }
1538 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1539
1540 /*
1541  * It's called from workqueue context so we need a bh_lru_lock to close
1542  * the race with preemption/irq.
1543  */
1544 void invalidate_bh_lrus_cpu(void)
1545 {
1546         struct bh_lru *b;
1547
1548         bh_lru_lock();
1549         b = this_cpu_ptr(&bh_lrus);
1550         __invalidate_bh_lrus(b);
1551         bh_lru_unlock();
1552 }
1553
1554 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1555                   unsigned long offset)
1556 {
1557         bh->b_folio = folio;
1558         BUG_ON(offset >= folio_size(folio));
1559         if (folio_test_highmem(folio))
1560                 /*
1561                  * This catches illegal uses and preserves the offset:
1562                  */
1563                 bh->b_data = (char *)(0 + offset);
1564         else
1565                 bh->b_data = folio_address(folio) + offset;
1566 }
1567 EXPORT_SYMBOL(folio_set_bh);
1568
1569 /*
1570  * Called when truncating a buffer on a page completely.
1571  */
1572
1573 /* Bits that are cleared during an invalidate */
1574 #define BUFFER_FLAGS_DISCARD \
1575         (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1576          1 << BH_Delay | 1 << BH_Unwritten)
1577
1578 static void discard_buffer(struct buffer_head * bh)
1579 {
1580         unsigned long b_state;
1581
1582         lock_buffer(bh);
1583         clear_buffer_dirty(bh);
1584         bh->b_bdev = NULL;
1585         b_state = READ_ONCE(bh->b_state);
1586         do {
1587         } while (!try_cmpxchg(&bh->b_state, &b_state,
1588                               b_state & ~BUFFER_FLAGS_DISCARD));
1589         unlock_buffer(bh);
1590 }
1591
1592 /**
1593  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1594  * @folio: The folio which is affected.
1595  * @offset: start of the range to invalidate
1596  * @length: length of the range to invalidate
1597  *
1598  * block_invalidate_folio() is called when all or part of the folio has been
1599  * invalidated by a truncate operation.
1600  *
1601  * block_invalidate_folio() does not have to release all buffers, but it must
1602  * ensure that no dirty buffer is left outside @offset and that no I/O
1603  * is underway against any of the blocks which are outside the truncation
1604  * point.  Because the caller is about to free (and possibly reuse) those
1605  * blocks on-disk.
1606  */
1607 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1608 {
1609         struct buffer_head *head, *bh, *next;
1610         size_t curr_off = 0;
1611         size_t stop = length + offset;
1612
1613         BUG_ON(!folio_test_locked(folio));
1614
1615         /*
1616          * Check for overflow
1617          */
1618         BUG_ON(stop > folio_size(folio) || stop < length);
1619
1620         head = folio_buffers(folio);
1621         if (!head)
1622                 return;
1623
1624         bh = head;
1625         do {
1626                 size_t next_off = curr_off + bh->b_size;
1627                 next = bh->b_this_page;
1628
1629                 /*
1630                  * Are we still fully in range ?
1631                  */
1632                 if (next_off > stop)
1633                         goto out;
1634
1635                 /*
1636                  * is this block fully invalidated?
1637                  */
1638                 if (offset <= curr_off)
1639                         discard_buffer(bh);
1640                 curr_off = next_off;
1641                 bh = next;
1642         } while (bh != head);
1643
1644         /*
1645          * We release buffers only if the entire folio is being invalidated.
1646          * The get_block cached value has been unconditionally invalidated,
1647          * so real IO is not possible anymore.
1648          */
1649         if (length == folio_size(folio))
1650                 filemap_release_folio(folio, 0);
1651 out:
1652         folio_clear_mappedtodisk(folio);
1653         return;
1654 }
1655 EXPORT_SYMBOL(block_invalidate_folio);
1656
1657 /*
1658  * We attach and possibly dirty the buffers atomically wrt
1659  * block_dirty_folio() via i_private_lock.  try_to_free_buffers
1660  * is already excluded via the folio lock.
1661  */
1662 struct buffer_head *create_empty_buffers(struct folio *folio,
1663                 unsigned long blocksize, unsigned long b_state)
1664 {
1665         struct buffer_head *bh, *head, *tail;
1666         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1667
1668         head = folio_alloc_buffers(folio, blocksize, gfp);
1669         bh = head;
1670         do {
1671                 bh->b_state |= b_state;
1672                 tail = bh;
1673                 bh = bh->b_this_page;
1674         } while (bh);
1675         tail->b_this_page = head;
1676
1677         spin_lock(&folio->mapping->i_private_lock);
1678         if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1679                 bh = head;
1680                 do {
1681                         if (folio_test_dirty(folio))
1682                                 set_buffer_dirty(bh);
1683                         if (folio_test_uptodate(folio))
1684                                 set_buffer_uptodate(bh);
1685                         bh = bh->b_this_page;
1686                 } while (bh != head);
1687         }
1688         folio_attach_private(folio, head);
1689         spin_unlock(&folio->mapping->i_private_lock);
1690
1691         return head;
1692 }
1693 EXPORT_SYMBOL(create_empty_buffers);
1694
1695 /**
1696  * clean_bdev_aliases: clean a range of buffers in block device
1697  * @bdev: Block device to clean buffers in
1698  * @block: Start of a range of blocks to clean
1699  * @len: Number of blocks to clean
1700  *
1701  * We are taking a range of blocks for data and we don't want writeback of any
1702  * buffer-cache aliases starting from return from this function and until the
1703  * moment when something will explicitly mark the buffer dirty (hopefully that
1704  * will not happen until we will free that block ;-) We don't even need to mark
1705  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1706  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1707  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1708  * would confuse anyone who might pick it with bread() afterwards...
1709  *
1710  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1711  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1712  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1713  * need to.  That happens here.
1714  */
1715 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1716 {
1717         struct address_space *bd_mapping = bdev->bd_mapping;
1718         const int blkbits = bd_mapping->host->i_blkbits;
1719         struct folio_batch fbatch;
1720         pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1721         pgoff_t end;
1722         int i, count;
1723         struct buffer_head *bh;
1724         struct buffer_head *head;
1725
1726         end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1727         folio_batch_init(&fbatch);
1728         while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1729                 count = folio_batch_count(&fbatch);
1730                 for (i = 0; i < count; i++) {
1731                         struct folio *folio = fbatch.folios[i];
1732
1733                         if (!folio_buffers(folio))
1734                                 continue;
1735                         /*
1736                          * We use folio lock instead of bd_mapping->i_private_lock
1737                          * to pin buffers here since we can afford to sleep and
1738                          * it scales better than a global spinlock lock.
1739                          */
1740                         folio_lock(folio);
1741                         /* Recheck when the folio is locked which pins bhs */
1742                         head = folio_buffers(folio);
1743                         if (!head)
1744                                 goto unlock_page;
1745                         bh = head;
1746                         do {
1747                                 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1748                                         goto next;
1749                                 if (bh->b_blocknr >= block + len)
1750                                         break;
1751                                 clear_buffer_dirty(bh);
1752                                 wait_on_buffer(bh);
1753                                 clear_buffer_req(bh);
1754 next:
1755                                 bh = bh->b_this_page;
1756                         } while (bh != head);
1757 unlock_page:
1758                         folio_unlock(folio);
1759                 }
1760                 folio_batch_release(&fbatch);
1761                 cond_resched();
1762                 /* End of range already reached? */
1763                 if (index > end || !index)
1764                         break;
1765         }
1766 }
1767 EXPORT_SYMBOL(clean_bdev_aliases);
1768
1769 static struct buffer_head *folio_create_buffers(struct folio *folio,
1770                                                 struct inode *inode,
1771                                                 unsigned int b_state)
1772 {
1773         struct buffer_head *bh;
1774
1775         BUG_ON(!folio_test_locked(folio));
1776
1777         bh = folio_buffers(folio);
1778         if (!bh)
1779                 bh = create_empty_buffers(folio,
1780                                 1 << READ_ONCE(inode->i_blkbits), b_state);
1781         return bh;
1782 }
1783
1784 /*
1785  * NOTE! All mapped/uptodate combinations are valid:
1786  *
1787  *      Mapped  Uptodate        Meaning
1788  *
1789  *      No      No              "unknown" - must do get_block()
1790  *      No      Yes             "hole" - zero-filled
1791  *      Yes     No              "allocated" - allocated on disk, not read in
1792  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1793  *
1794  * "Dirty" is valid only with the last case (mapped+uptodate).
1795  */
1796
1797 /*
1798  * While block_write_full_folio is writing back the dirty buffers under
1799  * the page lock, whoever dirtied the buffers may decide to clean them
1800  * again at any time.  We handle that by only looking at the buffer
1801  * state inside lock_buffer().
1802  *
1803  * If block_write_full_folio() is called for regular writeback
1804  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1805  * locked buffer.   This only can happen if someone has written the buffer
1806  * directly, with submit_bh().  At the address_space level PageWriteback
1807  * prevents this contention from occurring.
1808  *
1809  * If block_write_full_folio() is called with wbc->sync_mode ==
1810  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1811  * causes the writes to be flagged as synchronous writes.
1812  */
1813 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1814                         get_block_t *get_block, struct writeback_control *wbc)
1815 {
1816         int err;
1817         sector_t block;
1818         sector_t last_block;
1819         struct buffer_head *bh, *head;
1820         size_t blocksize;
1821         int nr_underway = 0;
1822         blk_opf_t write_flags = wbc_to_write_flags(wbc);
1823
1824         head = folio_create_buffers(folio, inode,
1825                                     (1 << BH_Dirty) | (1 << BH_Uptodate));
1826
1827         /*
1828          * Be very careful.  We have no exclusion from block_dirty_folio
1829          * here, and the (potentially unmapped) buffers may become dirty at
1830          * any time.  If a buffer becomes dirty here after we've inspected it
1831          * then we just miss that fact, and the folio stays dirty.
1832          *
1833          * Buffers outside i_size may be dirtied by block_dirty_folio;
1834          * handle that here by just cleaning them.
1835          */
1836
1837         bh = head;
1838         blocksize = bh->b_size;
1839
1840         block = div_u64(folio_pos(folio), blocksize);
1841         last_block = div_u64(i_size_read(inode) - 1, blocksize);
1842
1843         /*
1844          * Get all the dirty buffers mapped to disk addresses and
1845          * handle any aliases from the underlying blockdev's mapping.
1846          */
1847         do {
1848                 if (block > last_block) {
1849                         /*
1850                          * mapped buffers outside i_size will occur, because
1851                          * this folio can be outside i_size when there is a
1852                          * truncate in progress.
1853                          */
1854                         /*
1855                          * The buffer was zeroed by block_write_full_folio()
1856                          */
1857                         clear_buffer_dirty(bh);
1858                         set_buffer_uptodate(bh);
1859                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1860                            buffer_dirty(bh)) {
1861                         WARN_ON(bh->b_size != blocksize);
1862                         err = get_block(inode, block, bh, 1);
1863                         if (err)
1864                                 goto recover;
1865                         clear_buffer_delay(bh);
1866                         if (buffer_new(bh)) {
1867                                 /* blockdev mappings never come here */
1868                                 clear_buffer_new(bh);
1869                                 clean_bdev_bh_alias(bh);
1870                         }
1871                 }
1872                 bh = bh->b_this_page;
1873                 block++;
1874         } while (bh != head);
1875
1876         do {
1877                 if (!buffer_mapped(bh))
1878                         continue;
1879                 /*
1880                  * If it's a fully non-blocking write attempt and we cannot
1881                  * lock the buffer then redirty the folio.  Note that this can
1882                  * potentially cause a busy-wait loop from writeback threads
1883                  * and kswapd activity, but those code paths have their own
1884                  * higher-level throttling.
1885                  */
1886                 if (wbc->sync_mode != WB_SYNC_NONE) {
1887                         lock_buffer(bh);
1888                 } else if (!trylock_buffer(bh)) {
1889                         folio_redirty_for_writepage(wbc, folio);
1890                         continue;
1891                 }
1892                 if (test_clear_buffer_dirty(bh)) {
1893                         mark_buffer_async_write_endio(bh,
1894                                 end_buffer_async_write);
1895                 } else {
1896                         unlock_buffer(bh);
1897                 }
1898         } while ((bh = bh->b_this_page) != head);
1899
1900         /*
1901          * The folio and its buffers are protected by the writeback flag,
1902          * so we can drop the bh refcounts early.
1903          */
1904         BUG_ON(folio_test_writeback(folio));
1905         folio_start_writeback(folio);
1906
1907         do {
1908                 struct buffer_head *next = bh->b_this_page;
1909                 if (buffer_async_write(bh)) {
1910                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1911                                       inode->i_write_hint, wbc);
1912                         nr_underway++;
1913                 }
1914                 bh = next;
1915         } while (bh != head);
1916         folio_unlock(folio);
1917
1918         err = 0;
1919 done:
1920         if (nr_underway == 0) {
1921                 /*
1922                  * The folio was marked dirty, but the buffers were
1923                  * clean.  Someone wrote them back by hand with
1924                  * write_dirty_buffer/submit_bh.  A rare case.
1925                  */
1926                 folio_end_writeback(folio);
1927
1928                 /*
1929                  * The folio and buffer_heads can be released at any time from
1930                  * here on.
1931                  */
1932         }
1933         return err;
1934
1935 recover:
1936         /*
1937          * ENOSPC, or some other error.  We may already have added some
1938          * blocks to the file, so we need to write these out to avoid
1939          * exposing stale data.
1940          * The folio is currently locked and not marked for writeback
1941          */
1942         bh = head;
1943         /* Recovery: lock and submit the mapped buffers */
1944         do {
1945                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1946                     !buffer_delay(bh)) {
1947                         lock_buffer(bh);
1948                         mark_buffer_async_write_endio(bh,
1949                                 end_buffer_async_write);
1950                 } else {
1951                         /*
1952                          * The buffer may have been set dirty during
1953                          * attachment to a dirty folio.
1954                          */
1955                         clear_buffer_dirty(bh);
1956                 }
1957         } while ((bh = bh->b_this_page) != head);
1958         BUG_ON(folio_test_writeback(folio));
1959         mapping_set_error(folio->mapping, err);
1960         folio_start_writeback(folio);
1961         do {
1962                 struct buffer_head *next = bh->b_this_page;
1963                 if (buffer_async_write(bh)) {
1964                         clear_buffer_dirty(bh);
1965                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1966                                       inode->i_write_hint, wbc);
1967                         nr_underway++;
1968                 }
1969                 bh = next;
1970         } while (bh != head);
1971         folio_unlock(folio);
1972         goto done;
1973 }
1974 EXPORT_SYMBOL(__block_write_full_folio);
1975
1976 /*
1977  * If a folio has any new buffers, zero them out here, and mark them uptodate
1978  * and dirty so they'll be written out (in order to prevent uninitialised
1979  * block data from leaking). And clear the new bit.
1980  */
1981 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1982 {
1983         size_t block_start, block_end;
1984         struct buffer_head *head, *bh;
1985
1986         BUG_ON(!folio_test_locked(folio));
1987         head = folio_buffers(folio);
1988         if (!head)
1989                 return;
1990
1991         bh = head;
1992         block_start = 0;
1993         do {
1994                 block_end = block_start + bh->b_size;
1995
1996                 if (buffer_new(bh)) {
1997                         if (block_end > from && block_start < to) {
1998                                 if (!folio_test_uptodate(folio)) {
1999                                         size_t start, xend;
2000
2001                                         start = max(from, block_start);
2002                                         xend = min(to, block_end);
2003
2004                                         folio_zero_segment(folio, start, xend);
2005                                         set_buffer_uptodate(bh);
2006                                 }
2007
2008                                 clear_buffer_new(bh);
2009                                 mark_buffer_dirty(bh);
2010                         }
2011                 }
2012
2013                 block_start = block_end;
2014                 bh = bh->b_this_page;
2015         } while (bh != head);
2016 }
2017 EXPORT_SYMBOL(folio_zero_new_buffers);
2018
2019 static int
2020 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2021                 const struct iomap *iomap)
2022 {
2023         loff_t offset = (loff_t)block << inode->i_blkbits;
2024
2025         bh->b_bdev = iomap->bdev;
2026
2027         /*
2028          * Block points to offset in file we need to map, iomap contains
2029          * the offset at which the map starts. If the map ends before the
2030          * current block, then do not map the buffer and let the caller
2031          * handle it.
2032          */
2033         if (offset >= iomap->offset + iomap->length)
2034                 return -EIO;
2035
2036         switch (iomap->type) {
2037         case IOMAP_HOLE:
2038                 /*
2039                  * If the buffer is not up to date or beyond the current EOF,
2040                  * we need to mark it as new to ensure sub-block zeroing is
2041                  * executed if necessary.
2042                  */
2043                 if (!buffer_uptodate(bh) ||
2044                     (offset >= i_size_read(inode)))
2045                         set_buffer_new(bh);
2046                 return 0;
2047         case IOMAP_DELALLOC:
2048                 if (!buffer_uptodate(bh) ||
2049                     (offset >= i_size_read(inode)))
2050                         set_buffer_new(bh);
2051                 set_buffer_uptodate(bh);
2052                 set_buffer_mapped(bh);
2053                 set_buffer_delay(bh);
2054                 return 0;
2055         case IOMAP_UNWRITTEN:
2056                 /*
2057                  * For unwritten regions, we always need to ensure that regions
2058                  * in the block we are not writing to are zeroed. Mark the
2059                  * buffer as new to ensure this.
2060                  */
2061                 set_buffer_new(bh);
2062                 set_buffer_unwritten(bh);
2063                 fallthrough;
2064         case IOMAP_MAPPED:
2065                 if ((iomap->flags & IOMAP_F_NEW) ||
2066                     offset >= i_size_read(inode)) {
2067                         /*
2068                          * This can happen if truncating the block device races
2069                          * with the check in the caller as i_size updates on
2070                          * block devices aren't synchronized by i_rwsem for
2071                          * block devices.
2072                          */
2073                         if (S_ISBLK(inode->i_mode))
2074                                 return -EIO;
2075                         set_buffer_new(bh);
2076                 }
2077                 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2078                                 inode->i_blkbits;
2079                 set_buffer_mapped(bh);
2080                 return 0;
2081         default:
2082                 WARN_ON_ONCE(1);
2083                 return -EIO;
2084         }
2085 }
2086
2087 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2088                 get_block_t *get_block, const struct iomap *iomap)
2089 {
2090         size_t from = offset_in_folio(folio, pos);
2091         size_t to = from + len;
2092         struct inode *inode = folio->mapping->host;
2093         size_t block_start, block_end;
2094         sector_t block;
2095         int err = 0;
2096         size_t blocksize;
2097         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2098
2099         BUG_ON(!folio_test_locked(folio));
2100         BUG_ON(to > folio_size(folio));
2101         BUG_ON(from > to);
2102
2103         head = folio_create_buffers(folio, inode, 0);
2104         blocksize = head->b_size;
2105         block = div_u64(folio_pos(folio), blocksize);
2106
2107         for (bh = head, block_start = 0; bh != head || !block_start;
2108             block++, block_start=block_end, bh = bh->b_this_page) {
2109                 block_end = block_start + blocksize;
2110                 if (block_end <= from || block_start >= to) {
2111                         if (folio_test_uptodate(folio)) {
2112                                 if (!buffer_uptodate(bh))
2113                                         set_buffer_uptodate(bh);
2114                         }
2115                         continue;
2116                 }
2117                 if (buffer_new(bh))
2118                         clear_buffer_new(bh);
2119                 if (!buffer_mapped(bh)) {
2120                         WARN_ON(bh->b_size != blocksize);
2121                         if (get_block)
2122                                 err = get_block(inode, block, bh, 1);
2123                         else
2124                                 err = iomap_to_bh(inode, block, bh, iomap);
2125                         if (err)
2126                                 break;
2127
2128                         if (buffer_new(bh)) {
2129                                 clean_bdev_bh_alias(bh);
2130                                 if (folio_test_uptodate(folio)) {
2131                                         clear_buffer_new(bh);
2132                                         set_buffer_uptodate(bh);
2133                                         mark_buffer_dirty(bh);
2134                                         continue;
2135                                 }
2136                                 if (block_end > to || block_start < from)
2137                                         folio_zero_segments(folio,
2138                                                 to, block_end,
2139                                                 block_start, from);
2140                                 continue;
2141                         }
2142                 }
2143                 if (folio_test_uptodate(folio)) {
2144                         if (!buffer_uptodate(bh))
2145                                 set_buffer_uptodate(bh);
2146                         continue; 
2147                 }
2148                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2149                     !buffer_unwritten(bh) &&
2150                      (block_start < from || block_end > to)) {
2151                         bh_read_nowait(bh, 0);
2152                         *wait_bh++=bh;
2153                 }
2154         }
2155         /*
2156          * If we issued read requests - let them complete.
2157          */
2158         while(wait_bh > wait) {
2159                 wait_on_buffer(*--wait_bh);
2160                 if (!buffer_uptodate(*wait_bh))
2161                         err = -EIO;
2162         }
2163         if (unlikely(err))
2164                 folio_zero_new_buffers(folio, from, to);
2165         return err;
2166 }
2167
2168 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2169                 get_block_t *get_block)
2170 {
2171         return __block_write_begin_int(folio, pos, len, get_block, NULL);
2172 }
2173 EXPORT_SYMBOL(__block_write_begin);
2174
2175 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2176 {
2177         size_t block_start, block_end;
2178         bool partial = false;
2179         unsigned blocksize;
2180         struct buffer_head *bh, *head;
2181
2182         bh = head = folio_buffers(folio);
2183         if (!bh)
2184                 return;
2185         blocksize = bh->b_size;
2186
2187         block_start = 0;
2188         do {
2189                 block_end = block_start + blocksize;
2190                 if (block_end <= from || block_start >= to) {
2191                         if (!buffer_uptodate(bh))
2192                                 partial = true;
2193                 } else {
2194                         set_buffer_uptodate(bh);
2195                         mark_buffer_dirty(bh);
2196                 }
2197                 if (buffer_new(bh))
2198                         clear_buffer_new(bh);
2199
2200                 block_start = block_end;
2201                 bh = bh->b_this_page;
2202         } while (bh != head);
2203
2204         /*
2205          * If this is a partial write which happened to make all buffers
2206          * uptodate then we can optimize away a bogus read_folio() for
2207          * the next read(). Here we 'discover' whether the folio went
2208          * uptodate as a result of this (potentially partial) write.
2209          */
2210         if (!partial)
2211                 folio_mark_uptodate(folio);
2212 }
2213
2214 /*
2215  * block_write_begin takes care of the basic task of block allocation and
2216  * bringing partial write blocks uptodate first.
2217  *
2218  * The filesystem needs to handle block truncation upon failure.
2219  */
2220 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2221                 struct folio **foliop, get_block_t *get_block)
2222 {
2223         pgoff_t index = pos >> PAGE_SHIFT;
2224         struct folio *folio;
2225         int status;
2226
2227         folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2228                         mapping_gfp_mask(mapping));
2229         if (IS_ERR(folio))
2230                 return PTR_ERR(folio);
2231
2232         status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2233         if (unlikely(status)) {
2234                 folio_unlock(folio);
2235                 folio_put(folio);
2236                 folio = NULL;
2237         }
2238
2239         *foliop = folio;
2240         return status;
2241 }
2242 EXPORT_SYMBOL(block_write_begin);
2243
2244 int block_write_end(struct file *file, struct address_space *mapping,
2245                         loff_t pos, unsigned len, unsigned copied,
2246                         struct folio *folio, void *fsdata)
2247 {
2248         size_t start = pos - folio_pos(folio);
2249
2250         if (unlikely(copied < len)) {
2251                 /*
2252                  * The buffers that were written will now be uptodate, so
2253                  * we don't have to worry about a read_folio reading them
2254                  * and overwriting a partial write. However if we have
2255                  * encountered a short write and only partially written
2256                  * into a buffer, it will not be marked uptodate, so a
2257                  * read_folio might come in and destroy our partial write.
2258                  *
2259                  * Do the simplest thing, and just treat any short write to a
2260                  * non uptodate folio as a zero-length write, and force the
2261                  * caller to redo the whole thing.
2262                  */
2263                 if (!folio_test_uptodate(folio))
2264                         copied = 0;
2265
2266                 folio_zero_new_buffers(folio, start+copied, start+len);
2267         }
2268         flush_dcache_folio(folio);
2269
2270         /* This could be a short (even 0-length) commit */
2271         __block_commit_write(folio, start, start + copied);
2272
2273         return copied;
2274 }
2275 EXPORT_SYMBOL(block_write_end);
2276
2277 int generic_write_end(struct file *file, struct address_space *mapping,
2278                         loff_t pos, unsigned len, unsigned copied,
2279                         struct folio *folio, void *fsdata)
2280 {
2281         struct inode *inode = mapping->host;
2282         loff_t old_size = inode->i_size;
2283         bool i_size_changed = false;
2284
2285         copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
2286
2287         /*
2288          * No need to use i_size_read() here, the i_size cannot change under us
2289          * because we hold i_rwsem.
2290          *
2291          * But it's important to update i_size while still holding folio lock:
2292          * page writeout could otherwise come in and zero beyond i_size.
2293          */
2294         if (pos + copied > inode->i_size) {
2295                 i_size_write(inode, pos + copied);
2296                 i_size_changed = true;
2297         }
2298
2299         folio_unlock(folio);
2300         folio_put(folio);
2301
2302         if (old_size < pos)
2303                 pagecache_isize_extended(inode, old_size, pos);
2304         /*
2305          * Don't mark the inode dirty under page lock. First, it unnecessarily
2306          * makes the holding time of page lock longer. Second, it forces lock
2307          * ordering of page lock and transaction start for journaling
2308          * filesystems.
2309          */
2310         if (i_size_changed)
2311                 mark_inode_dirty(inode);
2312         return copied;
2313 }
2314 EXPORT_SYMBOL(generic_write_end);
2315
2316 /*
2317  * block_is_partially_uptodate checks whether buffers within a folio are
2318  * uptodate or not.
2319  *
2320  * Returns true if all buffers which correspond to the specified part
2321  * of the folio are uptodate.
2322  */
2323 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2324 {
2325         unsigned block_start, block_end, blocksize;
2326         unsigned to;
2327         struct buffer_head *bh, *head;
2328         bool ret = true;
2329
2330         head = folio_buffers(folio);
2331         if (!head)
2332                 return false;
2333         blocksize = head->b_size;
2334         to = min_t(unsigned, folio_size(folio) - from, count);
2335         to = from + to;
2336         if (from < blocksize && to > folio_size(folio) - blocksize)
2337                 return false;
2338
2339         bh = head;
2340         block_start = 0;
2341         do {
2342                 block_end = block_start + blocksize;
2343                 if (block_end > from && block_start < to) {
2344                         if (!buffer_uptodate(bh)) {
2345                                 ret = false;
2346                                 break;
2347                         }
2348                         if (block_end >= to)
2349                                 break;
2350                 }
2351                 block_start = block_end;
2352                 bh = bh->b_this_page;
2353         } while (bh != head);
2354
2355         return ret;
2356 }
2357 EXPORT_SYMBOL(block_is_partially_uptodate);
2358
2359 /*
2360  * Generic "read_folio" function for block devices that have the normal
2361  * get_block functionality. This is most of the block device filesystems.
2362  * Reads the folio asynchronously --- the unlock_buffer() and
2363  * set/clear_buffer_uptodate() functions propagate buffer state into the
2364  * folio once IO has completed.
2365  */
2366 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2367 {
2368         struct inode *inode = folio->mapping->host;
2369         sector_t iblock, lblock;
2370         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2371         size_t blocksize;
2372         int nr, i;
2373         int fully_mapped = 1;
2374         bool page_error = false;
2375         loff_t limit = i_size_read(inode);
2376
2377         /* This is needed for ext4. */
2378         if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2379                 limit = inode->i_sb->s_maxbytes;
2380
2381         VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2382
2383         head = folio_create_buffers(folio, inode, 0);
2384         blocksize = head->b_size;
2385
2386         iblock = div_u64(folio_pos(folio), blocksize);
2387         lblock = div_u64(limit + blocksize - 1, blocksize);
2388         bh = head;
2389         nr = 0;
2390         i = 0;
2391
2392         do {
2393                 if (buffer_uptodate(bh))
2394                         continue;
2395
2396                 if (!buffer_mapped(bh)) {
2397                         int err = 0;
2398
2399                         fully_mapped = 0;
2400                         if (iblock < lblock) {
2401                                 WARN_ON(bh->b_size != blocksize);
2402                                 err = get_block(inode, iblock, bh, 0);
2403                                 if (err)
2404                                         page_error = true;
2405                         }
2406                         if (!buffer_mapped(bh)) {
2407                                 folio_zero_range(folio, i * blocksize,
2408                                                 blocksize);
2409                                 if (!err)
2410                                         set_buffer_uptodate(bh);
2411                                 continue;
2412                         }
2413                         /*
2414                          * get_block() might have updated the buffer
2415                          * synchronously
2416                          */
2417                         if (buffer_uptodate(bh))
2418                                 continue;
2419                 }
2420                 arr[nr++] = bh;
2421         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2422
2423         if (fully_mapped)
2424                 folio_set_mappedtodisk(folio);
2425
2426         if (!nr) {
2427                 /*
2428                  * All buffers are uptodate or get_block() returned an
2429                  * error when trying to map them - we can finish the read.
2430                  */
2431                 folio_end_read(folio, !page_error);
2432                 return 0;
2433         }
2434
2435         /* Stage two: lock the buffers */
2436         for (i = 0; i < nr; i++) {
2437                 bh = arr[i];
2438                 lock_buffer(bh);
2439                 mark_buffer_async_read(bh);
2440         }
2441
2442         /*
2443          * Stage 3: start the IO.  Check for uptodateness
2444          * inside the buffer lock in case another process reading
2445          * the underlying blockdev brought it uptodate (the sct fix).
2446          */
2447         for (i = 0; i < nr; i++) {
2448                 bh = arr[i];
2449                 if (buffer_uptodate(bh))
2450                         end_buffer_async_read(bh, 1);
2451                 else
2452                         submit_bh(REQ_OP_READ, bh);
2453         }
2454         return 0;
2455 }
2456 EXPORT_SYMBOL(block_read_full_folio);
2457
2458 /* utility function for filesystems that need to do work on expanding
2459  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2460  * deal with the hole.  
2461  */
2462 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2463 {
2464         struct address_space *mapping = inode->i_mapping;
2465         const struct address_space_operations *aops = mapping->a_ops;
2466         struct folio *folio;
2467         void *fsdata = NULL;
2468         int err;
2469
2470         err = inode_newsize_ok(inode, size);
2471         if (err)
2472                 goto out;
2473
2474         err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2475         if (err)
2476                 goto out;
2477
2478         err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2479         BUG_ON(err > 0);
2480
2481 out:
2482         return err;
2483 }
2484 EXPORT_SYMBOL(generic_cont_expand_simple);
2485
2486 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2487                             loff_t pos, loff_t *bytes)
2488 {
2489         struct inode *inode = mapping->host;
2490         const struct address_space_operations *aops = mapping->a_ops;
2491         unsigned int blocksize = i_blocksize(inode);
2492         struct folio *folio;
2493         void *fsdata = NULL;
2494         pgoff_t index, curidx;
2495         loff_t curpos;
2496         unsigned zerofrom, offset, len;
2497         int err = 0;
2498
2499         index = pos >> PAGE_SHIFT;
2500         offset = pos & ~PAGE_MASK;
2501
2502         while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2503                 zerofrom = curpos & ~PAGE_MASK;
2504                 if (zerofrom & (blocksize-1)) {
2505                         *bytes |= (blocksize-1);
2506                         (*bytes)++;
2507                 }
2508                 len = PAGE_SIZE - zerofrom;
2509
2510                 err = aops->write_begin(file, mapping, curpos, len,
2511                                             &folio, &fsdata);
2512                 if (err)
2513                         goto out;
2514                 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2515                 err = aops->write_end(file, mapping, curpos, len, len,
2516                                                 folio, fsdata);
2517                 if (err < 0)
2518                         goto out;
2519                 BUG_ON(err != len);
2520                 err = 0;
2521
2522                 balance_dirty_pages_ratelimited(mapping);
2523
2524                 if (fatal_signal_pending(current)) {
2525                         err = -EINTR;
2526                         goto out;
2527                 }
2528         }
2529
2530         /* page covers the boundary, find the boundary offset */
2531         if (index == curidx) {
2532                 zerofrom = curpos & ~PAGE_MASK;
2533                 /* if we will expand the thing last block will be filled */
2534                 if (offset <= zerofrom) {
2535                         goto out;
2536                 }
2537                 if (zerofrom & (blocksize-1)) {
2538                         *bytes |= (blocksize-1);
2539                         (*bytes)++;
2540                 }
2541                 len = offset - zerofrom;
2542
2543                 err = aops->write_begin(file, mapping, curpos, len,
2544                                             &folio, &fsdata);
2545                 if (err)
2546                         goto out;
2547                 folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2548                 err = aops->write_end(file, mapping, curpos, len, len,
2549                                                 folio, fsdata);
2550                 if (err < 0)
2551                         goto out;
2552                 BUG_ON(err != len);
2553                 err = 0;
2554         }
2555 out:
2556         return err;
2557 }
2558
2559 /*
2560  * For moronic filesystems that do not allow holes in file.
2561  * We may have to extend the file.
2562  */
2563 int cont_write_begin(struct file *file, struct address_space *mapping,
2564                         loff_t pos, unsigned len,
2565                         struct folio **foliop, void **fsdata,
2566                         get_block_t *get_block, loff_t *bytes)
2567 {
2568         struct inode *inode = mapping->host;
2569         unsigned int blocksize = i_blocksize(inode);
2570         unsigned int zerofrom;
2571         int err;
2572
2573         err = cont_expand_zero(file, mapping, pos, bytes);
2574         if (err)
2575                 return err;
2576
2577         zerofrom = *bytes & ~PAGE_MASK;
2578         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2579                 *bytes |= (blocksize-1);
2580                 (*bytes)++;
2581         }
2582
2583         return block_write_begin(mapping, pos, len, foliop, get_block);
2584 }
2585 EXPORT_SYMBOL(cont_write_begin);
2586
2587 void block_commit_write(struct page *page, unsigned from, unsigned to)
2588 {
2589         struct folio *folio = page_folio(page);
2590         __block_commit_write(folio, from, to);
2591 }
2592 EXPORT_SYMBOL(block_commit_write);
2593
2594 /*
2595  * block_page_mkwrite() is not allowed to change the file size as it gets
2596  * called from a page fault handler when a page is first dirtied. Hence we must
2597  * be careful to check for EOF conditions here. We set the page up correctly
2598  * for a written page which means we get ENOSPC checking when writing into
2599  * holes and correct delalloc and unwritten extent mapping on filesystems that
2600  * support these features.
2601  *
2602  * We are not allowed to take the i_mutex here so we have to play games to
2603  * protect against truncate races as the page could now be beyond EOF.  Because
2604  * truncate writes the inode size before removing pages, once we have the
2605  * page lock we can determine safely if the page is beyond EOF. If it is not
2606  * beyond EOF, then the page is guaranteed safe against truncation until we
2607  * unlock the page.
2608  *
2609  * Direct callers of this function should protect against filesystem freezing
2610  * using sb_start_pagefault() - sb_end_pagefault() functions.
2611  */
2612 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2613                          get_block_t get_block)
2614 {
2615         struct folio *folio = page_folio(vmf->page);
2616         struct inode *inode = file_inode(vma->vm_file);
2617         unsigned long end;
2618         loff_t size;
2619         int ret;
2620
2621         folio_lock(folio);
2622         size = i_size_read(inode);
2623         if ((folio->mapping != inode->i_mapping) ||
2624             (folio_pos(folio) >= size)) {
2625                 /* We overload EFAULT to mean page got truncated */
2626                 ret = -EFAULT;
2627                 goto out_unlock;
2628         }
2629
2630         end = folio_size(folio);
2631         /* folio is wholly or partially inside EOF */
2632         if (folio_pos(folio) + end > size)
2633                 end = size - folio_pos(folio);
2634
2635         ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2636         if (unlikely(ret))
2637                 goto out_unlock;
2638
2639         __block_commit_write(folio, 0, end);
2640
2641         folio_mark_dirty(folio);
2642         folio_wait_stable(folio);
2643         return 0;
2644 out_unlock:
2645         folio_unlock(folio);
2646         return ret;
2647 }
2648 EXPORT_SYMBOL(block_page_mkwrite);
2649
2650 int block_truncate_page(struct address_space *mapping,
2651                         loff_t from, get_block_t *get_block)
2652 {
2653         pgoff_t index = from >> PAGE_SHIFT;
2654         unsigned blocksize;
2655         sector_t iblock;
2656         size_t offset, length, pos;
2657         struct inode *inode = mapping->host;
2658         struct folio *folio;
2659         struct buffer_head *bh;
2660         int err = 0;
2661
2662         blocksize = i_blocksize(inode);
2663         length = from & (blocksize - 1);
2664
2665         /* Block boundary? Nothing to do */
2666         if (!length)
2667                 return 0;
2668
2669         length = blocksize - length;
2670         iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2671
2672         folio = filemap_grab_folio(mapping, index);
2673         if (IS_ERR(folio))
2674                 return PTR_ERR(folio);
2675
2676         bh = folio_buffers(folio);
2677         if (!bh)
2678                 bh = create_empty_buffers(folio, blocksize, 0);
2679
2680         /* Find the buffer that contains "offset" */
2681         offset = offset_in_folio(folio, from);
2682         pos = blocksize;
2683         while (offset >= pos) {
2684                 bh = bh->b_this_page;
2685                 iblock++;
2686                 pos += blocksize;
2687         }
2688
2689         if (!buffer_mapped(bh)) {
2690                 WARN_ON(bh->b_size != blocksize);
2691                 err = get_block(inode, iblock, bh, 0);
2692                 if (err)
2693                         goto unlock;
2694                 /* unmapped? It's a hole - nothing to do */
2695                 if (!buffer_mapped(bh))
2696                         goto unlock;
2697         }
2698
2699         /* Ok, it's mapped. Make sure it's up-to-date */
2700         if (folio_test_uptodate(folio))
2701                 set_buffer_uptodate(bh);
2702
2703         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2704                 err = bh_read(bh, 0);
2705                 /* Uhhuh. Read error. Complain and punt. */
2706                 if (err < 0)
2707                         goto unlock;
2708         }
2709
2710         folio_zero_range(folio, offset, length);
2711         mark_buffer_dirty(bh);
2712
2713 unlock:
2714         folio_unlock(folio);
2715         folio_put(folio);
2716
2717         return err;
2718 }
2719 EXPORT_SYMBOL(block_truncate_page);
2720
2721 /*
2722  * The generic ->writepage function for buffer-backed address_spaces
2723  */
2724 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2725                 void *get_block)
2726 {
2727         struct inode * const inode = folio->mapping->host;
2728         loff_t i_size = i_size_read(inode);
2729
2730         /* Is the folio fully inside i_size? */
2731         if (folio_pos(folio) + folio_size(folio) <= i_size)
2732                 return __block_write_full_folio(inode, folio, get_block, wbc);
2733
2734         /* Is the folio fully outside i_size? (truncate in progress) */
2735         if (folio_pos(folio) >= i_size) {
2736                 folio_unlock(folio);
2737                 return 0; /* don't care */
2738         }
2739
2740         /*
2741          * The folio straddles i_size.  It must be zeroed out on each and every
2742          * writepage invocation because it may be mmapped.  "A file is mapped
2743          * in multiples of the page size.  For a file that is not a multiple of
2744          * the page size, the remaining memory is zeroed when mapped, and
2745          * writes to that region are not written out to the file."
2746          */
2747         folio_zero_segment(folio, offset_in_folio(folio, i_size),
2748                         folio_size(folio));
2749         return __block_write_full_folio(inode, folio, get_block, wbc);
2750 }
2751
2752 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2753                             get_block_t *get_block)
2754 {
2755         struct inode *inode = mapping->host;
2756         struct buffer_head tmp = {
2757                 .b_size = i_blocksize(inode),
2758         };
2759
2760         get_block(inode, block, &tmp, 0);
2761         return tmp.b_blocknr;
2762 }
2763 EXPORT_SYMBOL(generic_block_bmap);
2764
2765 static void end_bio_bh_io_sync(struct bio *bio)
2766 {
2767         struct buffer_head *bh = bio->bi_private;
2768
2769         if (unlikely(bio_flagged(bio, BIO_QUIET)))
2770                 set_bit(BH_Quiet, &bh->b_state);
2771
2772         bh->b_end_io(bh, !bio->bi_status);
2773         bio_put(bio);
2774 }
2775
2776 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2777                           enum rw_hint write_hint,
2778                           struct writeback_control *wbc)
2779 {
2780         const enum req_op op = opf & REQ_OP_MASK;
2781         struct bio *bio;
2782
2783         BUG_ON(!buffer_locked(bh));
2784         BUG_ON(!buffer_mapped(bh));
2785         BUG_ON(!bh->b_end_io);
2786         BUG_ON(buffer_delay(bh));
2787         BUG_ON(buffer_unwritten(bh));
2788
2789         /*
2790          * Only clear out a write error when rewriting
2791          */
2792         if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2793                 clear_buffer_write_io_error(bh);
2794
2795         if (buffer_meta(bh))
2796                 opf |= REQ_META;
2797         if (buffer_prio(bh))
2798                 opf |= REQ_PRIO;
2799
2800         bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2801
2802         fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2803
2804         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2805         bio->bi_write_hint = write_hint;
2806
2807         bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
2808
2809         bio->bi_end_io = end_bio_bh_io_sync;
2810         bio->bi_private = bh;
2811
2812         /* Take care of bh's that straddle the end of the device */
2813         guard_bio_eod(bio);
2814
2815         if (wbc) {
2816                 wbc_init_bio(wbc, bio);
2817                 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2818         }
2819
2820         submit_bio(bio);
2821 }
2822
2823 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2824 {
2825         submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2826 }
2827 EXPORT_SYMBOL(submit_bh);
2828
2829 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2830 {
2831         lock_buffer(bh);
2832         if (!test_clear_buffer_dirty(bh)) {
2833                 unlock_buffer(bh);
2834                 return;
2835         }
2836         bh->b_end_io = end_buffer_write_sync;
2837         get_bh(bh);
2838         submit_bh(REQ_OP_WRITE | op_flags, bh);
2839 }
2840 EXPORT_SYMBOL(write_dirty_buffer);
2841
2842 /*
2843  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2844  * and then start new I/O and then wait upon it.  The caller must have a ref on
2845  * the buffer_head.
2846  */
2847 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2848 {
2849         WARN_ON(atomic_read(&bh->b_count) < 1);
2850         lock_buffer(bh);
2851         if (test_clear_buffer_dirty(bh)) {
2852                 /*
2853                  * The bh should be mapped, but it might not be if the
2854                  * device was hot-removed. Not much we can do but fail the I/O.
2855                  */
2856                 if (!buffer_mapped(bh)) {
2857                         unlock_buffer(bh);
2858                         return -EIO;
2859                 }
2860
2861                 get_bh(bh);
2862                 bh->b_end_io = end_buffer_write_sync;
2863                 submit_bh(REQ_OP_WRITE | op_flags, bh);
2864                 wait_on_buffer(bh);
2865                 if (!buffer_uptodate(bh))
2866                         return -EIO;
2867         } else {
2868                 unlock_buffer(bh);
2869         }
2870         return 0;
2871 }
2872 EXPORT_SYMBOL(__sync_dirty_buffer);
2873
2874 int sync_dirty_buffer(struct buffer_head *bh)
2875 {
2876         return __sync_dirty_buffer(bh, REQ_SYNC);
2877 }
2878 EXPORT_SYMBOL(sync_dirty_buffer);
2879
2880 static inline int buffer_busy(struct buffer_head *bh)
2881 {
2882         return atomic_read(&bh->b_count) |
2883                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2884 }
2885
2886 static bool
2887 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2888 {
2889         struct buffer_head *head = folio_buffers(folio);
2890         struct buffer_head *bh;
2891
2892         bh = head;
2893         do {
2894                 if (buffer_busy(bh))
2895                         goto failed;
2896                 bh = bh->b_this_page;
2897         } while (bh != head);
2898
2899         do {
2900                 struct buffer_head *next = bh->b_this_page;
2901
2902                 if (bh->b_assoc_map)
2903                         __remove_assoc_queue(bh);
2904                 bh = next;
2905         } while (bh != head);
2906         *buffers_to_free = head;
2907         folio_detach_private(folio);
2908         return true;
2909 failed:
2910         return false;
2911 }
2912
2913 /**
2914  * try_to_free_buffers - Release buffers attached to this folio.
2915  * @folio: The folio.
2916  *
2917  * If any buffers are in use (dirty, under writeback, elevated refcount),
2918  * no buffers will be freed.
2919  *
2920  * If the folio is dirty but all the buffers are clean then we need to
2921  * be sure to mark the folio clean as well.  This is because the folio
2922  * may be against a block device, and a later reattachment of buffers
2923  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2924  * filesystem data on the same device.
2925  *
2926  * The same applies to regular filesystem folios: if all the buffers are
2927  * clean then we set the folio clean and proceed.  To do that, we require
2928  * total exclusion from block_dirty_folio().  That is obtained with
2929  * i_private_lock.
2930  *
2931  * Exclusion against try_to_free_buffers may be obtained by either
2932  * locking the folio or by holding its mapping's i_private_lock.
2933  *
2934  * Context: Process context.  @folio must be locked.  Will not sleep.
2935  * Return: true if all buffers attached to this folio were freed.
2936  */
2937 bool try_to_free_buffers(struct folio *folio)
2938 {
2939         struct address_space * const mapping = folio->mapping;
2940         struct buffer_head *buffers_to_free = NULL;
2941         bool ret = 0;
2942
2943         BUG_ON(!folio_test_locked(folio));
2944         if (folio_test_writeback(folio))
2945                 return false;
2946
2947         if (mapping == NULL) {          /* can this still happen? */
2948                 ret = drop_buffers(folio, &buffers_to_free);
2949                 goto out;
2950         }
2951
2952         spin_lock(&mapping->i_private_lock);
2953         ret = drop_buffers(folio, &buffers_to_free);
2954
2955         /*
2956          * If the filesystem writes its buffers by hand (eg ext3)
2957          * then we can have clean buffers against a dirty folio.  We
2958          * clean the folio here; otherwise the VM will never notice
2959          * that the filesystem did any IO at all.
2960          *
2961          * Also, during truncate, discard_buffer will have marked all
2962          * the folio's buffers clean.  We discover that here and clean
2963          * the folio also.
2964          *
2965          * i_private_lock must be held over this entire operation in order
2966          * to synchronise against block_dirty_folio and prevent the
2967          * dirty bit from being lost.
2968          */
2969         if (ret)
2970                 folio_cancel_dirty(folio);
2971         spin_unlock(&mapping->i_private_lock);
2972 out:
2973         if (buffers_to_free) {
2974                 struct buffer_head *bh = buffers_to_free;
2975
2976                 do {
2977                         struct buffer_head *next = bh->b_this_page;
2978                         free_buffer_head(bh);
2979                         bh = next;
2980                 } while (bh != buffers_to_free);
2981         }
2982         return ret;
2983 }
2984 EXPORT_SYMBOL(try_to_free_buffers);
2985
2986 /*
2987  * Buffer-head allocation
2988  */
2989 static struct kmem_cache *bh_cachep __ro_after_init;
2990
2991 /*
2992  * Once the number of bh's in the machine exceeds this level, we start
2993  * stripping them in writeback.
2994  */
2995 static unsigned long max_buffer_heads __ro_after_init;
2996
2997 int buffer_heads_over_limit;
2998
2999 struct bh_accounting {
3000         int nr;                 /* Number of live bh's */
3001         int ratelimit;          /* Limit cacheline bouncing */
3002 };
3003
3004 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3005
3006 static void recalc_bh_state(void)
3007 {
3008         int i;
3009         int tot = 0;
3010
3011         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3012                 return;
3013         __this_cpu_write(bh_accounting.ratelimit, 0);
3014         for_each_online_cpu(i)
3015                 tot += per_cpu(bh_accounting, i).nr;
3016         buffer_heads_over_limit = (tot > max_buffer_heads);
3017 }
3018
3019 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3020 {
3021         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3022         if (ret) {
3023                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3024                 spin_lock_init(&ret->b_uptodate_lock);
3025                 preempt_disable();
3026                 __this_cpu_inc(bh_accounting.nr);
3027                 recalc_bh_state();
3028                 preempt_enable();
3029         }
3030         return ret;
3031 }
3032 EXPORT_SYMBOL(alloc_buffer_head);
3033
3034 void free_buffer_head(struct buffer_head *bh)
3035 {
3036         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3037         kmem_cache_free(bh_cachep, bh);
3038         preempt_disable();
3039         __this_cpu_dec(bh_accounting.nr);
3040         recalc_bh_state();
3041         preempt_enable();
3042 }
3043 EXPORT_SYMBOL(free_buffer_head);
3044
3045 static int buffer_exit_cpu_dead(unsigned int cpu)
3046 {
3047         int i;
3048         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3049
3050         for (i = 0; i < BH_LRU_SIZE; i++) {
3051                 brelse(b->bhs[i]);
3052                 b->bhs[i] = NULL;
3053         }
3054         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3055         per_cpu(bh_accounting, cpu).nr = 0;
3056         return 0;
3057 }
3058
3059 /**
3060  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3061  * @bh: struct buffer_head
3062  *
3063  * Return true if the buffer is up-to-date and false,
3064  * with the buffer locked, if not.
3065  */
3066 int bh_uptodate_or_lock(struct buffer_head *bh)
3067 {
3068         if (!buffer_uptodate(bh)) {
3069                 lock_buffer(bh);
3070                 if (!buffer_uptodate(bh))
3071                         return 0;
3072                 unlock_buffer(bh);
3073         }
3074         return 1;
3075 }
3076 EXPORT_SYMBOL(bh_uptodate_or_lock);
3077
3078 /**
3079  * __bh_read - Submit read for a locked buffer
3080  * @bh: struct buffer_head
3081  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3082  * @wait: wait until reading finish
3083  *
3084  * Returns zero on success or don't wait, and -EIO on error.
3085  */
3086 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3087 {
3088         int ret = 0;
3089
3090         BUG_ON(!buffer_locked(bh));
3091
3092         get_bh(bh);
3093         bh->b_end_io = end_buffer_read_sync;
3094         submit_bh(REQ_OP_READ | op_flags, bh);
3095         if (wait) {
3096                 wait_on_buffer(bh);
3097                 if (!buffer_uptodate(bh))
3098                         ret = -EIO;
3099         }
3100         return ret;
3101 }
3102 EXPORT_SYMBOL(__bh_read);
3103
3104 /**
3105  * __bh_read_batch - Submit read for a batch of unlocked buffers
3106  * @nr: entry number of the buffer batch
3107  * @bhs: a batch of struct buffer_head
3108  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3109  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3110  *              buffer that cannot lock.
3111  *
3112  * Returns zero on success or don't wait, and -EIO on error.
3113  */
3114 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3115                      blk_opf_t op_flags, bool force_lock)
3116 {
3117         int i;
3118
3119         for (i = 0; i < nr; i++) {
3120                 struct buffer_head *bh = bhs[i];
3121
3122                 if (buffer_uptodate(bh))
3123                         continue;
3124
3125                 if (force_lock)
3126                         lock_buffer(bh);
3127                 else
3128                         if (!trylock_buffer(bh))
3129                                 continue;
3130
3131                 if (buffer_uptodate(bh)) {
3132                         unlock_buffer(bh);
3133                         continue;
3134                 }
3135
3136                 bh->b_end_io = end_buffer_read_sync;
3137                 get_bh(bh);
3138                 submit_bh(REQ_OP_READ | op_flags, bh);
3139         }
3140 }
3141 EXPORT_SYMBOL(__bh_read_batch);
3142
3143 void __init buffer_init(void)
3144 {
3145         unsigned long nrpages;
3146         int ret;
3147
3148         bh_cachep = KMEM_CACHE(buffer_head,
3149                                 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3150         /*
3151          * Limit the bh occupancy to 10% of ZONE_NORMAL
3152          */
3153         nrpages = (nr_free_buffer_pages() * 10) / 100;
3154         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3155         ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3156                                         NULL, buffer_exit_cpu_dead);
3157         WARN_ON(ret < 0);
3158 }
This page took 0.215787 seconds and 4 git commands to generate.