1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/sched/mm.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/blkdev.h>
15 #include <linux/bio.h>
16 #include <linux/blk-crypto.h>
17 #include <linux/swap.h>
18 #include <linux/prefetch.h>
19 #include <linux/uio.h>
20 #include <linux/sched/signal.h>
21 #include <linux/fiemap.h>
22 #include <linux/iomap.h>
28 #include <trace/events/f2fs.h>
30 #define NUM_PREALLOC_POST_READ_CTXS 128
32 static struct kmem_cache *bio_post_read_ctx_cache;
33 static struct kmem_cache *bio_entry_slab;
34 static mempool_t *bio_post_read_ctx_pool;
35 static struct bio_set f2fs_bioset;
37 #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39 int __init f2fs_init_bioset(void)
41 return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
42 0, BIOSET_NEED_BVECS);
45 void f2fs_destroy_bioset(void)
47 bioset_exit(&f2fs_bioset);
50 bool f2fs_is_cp_guaranteed(struct page *page)
52 struct address_space *mapping = page->mapping;
54 struct f2fs_sb_info *sbi;
59 inode = mapping->host;
60 sbi = F2FS_I_SB(inode);
62 if (inode->i_ino == F2FS_META_INO(sbi) ||
63 inode->i_ino == F2FS_NODE_INO(sbi) ||
64 S_ISDIR(inode->i_mode))
67 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
68 page_private_gcing(page))
73 static enum count_type __read_io_type(struct page *page)
75 struct address_space *mapping = page_file_mapping(page);
78 struct inode *inode = mapping->host;
79 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
81 if (inode->i_ino == F2FS_META_INO(sbi))
84 if (inode->i_ino == F2FS_NODE_INO(sbi))
90 /* postprocessing steps for read bios */
91 enum bio_post_read_step {
92 #ifdef CONFIG_FS_ENCRYPTION
93 STEP_DECRYPT = BIT(0),
95 STEP_DECRYPT = 0, /* compile out the decryption-related code */
97 #ifdef CONFIG_F2FS_FS_COMPRESSION
98 STEP_DECOMPRESS = BIT(1),
100 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
102 #ifdef CONFIG_FS_VERITY
103 STEP_VERITY = BIT(2),
105 STEP_VERITY = 0, /* compile out the verity-related code */
109 struct bio_post_read_ctx {
111 struct f2fs_sb_info *sbi;
112 struct work_struct work;
113 unsigned int enabled_steps;
115 * decompression_attempted keeps track of whether
116 * f2fs_end_read_compressed_page() has been called on the pages in the
117 * bio that belong to a compressed cluster yet.
119 bool decompression_attempted;
124 * Update and unlock a bio's pages, and free the bio.
126 * This marks pages up-to-date only if there was no error in the bio (I/O error,
127 * decryption error, or verity error), as indicated by bio->bi_status.
129 * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
130 * aren't marked up-to-date here, as decompression is done on a per-compression-
131 * cluster basis rather than a per-bio basis. Instead, we only must do two
132 * things for each compressed page here: call f2fs_end_read_compressed_page()
133 * with failed=true if an error occurred before it would have normally gotten
134 * called (i.e., I/O error or decryption error, but *not* verity error), and
135 * release the bio's reference to the decompress_io_ctx of the page's cluster.
137 static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
140 struct bvec_iter_all iter_all;
141 struct bio_post_read_ctx *ctx = bio->bi_private;
143 bio_for_each_segment_all(bv, bio, iter_all) {
144 struct page *page = bv->bv_page;
146 if (f2fs_is_compressed_page(page)) {
147 if (ctx && !ctx->decompression_attempted)
148 f2fs_end_read_compressed_page(page, true, 0,
150 f2fs_put_page_dic(page, in_task);
155 ClearPageUptodate(page);
157 SetPageUptodate(page);
158 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
163 mempool_free(ctx, bio_post_read_ctx_pool);
167 static void f2fs_verify_bio(struct work_struct *work)
169 struct bio_post_read_ctx *ctx =
170 container_of(work, struct bio_post_read_ctx, work);
171 struct bio *bio = ctx->bio;
172 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
175 * fsverity_verify_bio() may call readahead() again, and while verity
176 * will be disabled for this, decryption and/or decompression may still
177 * be needed, resulting in another bio_post_read_ctx being allocated.
178 * So to prevent deadlocks we need to release the current ctx to the
179 * mempool first. This assumes that verity is the last post-read step.
181 mempool_free(ctx, bio_post_read_ctx_pool);
182 bio->bi_private = NULL;
185 * Verify the bio's pages with fs-verity. Exclude compressed pages,
186 * as those were handled separately by f2fs_end_read_compressed_page().
188 if (may_have_compressed_pages) {
190 struct bvec_iter_all iter_all;
192 bio_for_each_segment_all(bv, bio, iter_all) {
193 struct page *page = bv->bv_page;
195 if (!f2fs_is_compressed_page(page) &&
196 !fsverity_verify_page(page)) {
197 bio->bi_status = BLK_STS_IOERR;
202 fsverity_verify_bio(bio);
205 f2fs_finish_read_bio(bio, true);
209 * If the bio's data needs to be verified with fs-verity, then enqueue the
210 * verity work for the bio. Otherwise finish the bio now.
212 * Note that to avoid deadlocks, the verity work can't be done on the
213 * decryption/decompression workqueue. This is because verifying the data pages
214 * can involve reading verity metadata pages from the file, and these verity
215 * metadata pages may be encrypted and/or compressed.
217 static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
219 struct bio_post_read_ctx *ctx = bio->bi_private;
221 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
222 INIT_WORK(&ctx->work, f2fs_verify_bio);
223 fsverity_enqueue_verify_work(&ctx->work);
225 f2fs_finish_read_bio(bio, in_task);
230 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
231 * remaining page was read by @ctx->bio.
233 * Note that a bio may span clusters (even a mix of compressed and uncompressed
234 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
235 * that the bio includes at least one compressed page. The actual decompression
236 * is done on a per-cluster basis, not a per-bio basis.
238 static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
242 struct bvec_iter_all iter_all;
243 bool all_compressed = true;
244 block_t blkaddr = ctx->fs_blkaddr;
246 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
247 struct page *page = bv->bv_page;
249 if (f2fs_is_compressed_page(page))
250 f2fs_end_read_compressed_page(page, false, blkaddr,
253 all_compressed = false;
258 ctx->decompression_attempted = true;
261 * Optimization: if all the bio's pages are compressed, then scheduling
262 * the per-bio verity work is unnecessary, as verity will be fully
263 * handled at the compression cluster level.
266 ctx->enabled_steps &= ~STEP_VERITY;
269 static void f2fs_post_read_work(struct work_struct *work)
271 struct bio_post_read_ctx *ctx =
272 container_of(work, struct bio_post_read_ctx, work);
273 struct bio *bio = ctx->bio;
275 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
276 f2fs_finish_read_bio(bio, true);
280 if (ctx->enabled_steps & STEP_DECOMPRESS)
281 f2fs_handle_step_decompress(ctx, true);
283 f2fs_verify_and_finish_bio(bio, true);
286 static void f2fs_read_end_io(struct bio *bio)
288 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
289 struct bio_post_read_ctx *ctx;
290 bool intask = in_task();
292 iostat_update_and_unbind_ctx(bio);
293 ctx = bio->bi_private;
295 if (time_to_inject(sbi, FAULT_READ_IO))
296 bio->bi_status = BLK_STS_IOERR;
298 if (bio->bi_status) {
299 f2fs_finish_read_bio(bio, intask);
304 unsigned int enabled_steps = ctx->enabled_steps &
305 (STEP_DECRYPT | STEP_DECOMPRESS);
308 * If we have only decompression step between decompression and
309 * decrypt, we don't need post processing for this.
311 if (enabled_steps == STEP_DECOMPRESS &&
312 !f2fs_low_mem_mode(sbi)) {
313 f2fs_handle_step_decompress(ctx, intask);
314 } else if (enabled_steps) {
315 INIT_WORK(&ctx->work, f2fs_post_read_work);
316 queue_work(ctx->sbi->post_read_wq, &ctx->work);
321 f2fs_verify_and_finish_bio(bio, intask);
324 static void f2fs_write_end_io(struct bio *bio)
326 struct f2fs_sb_info *sbi;
327 struct bio_vec *bvec;
328 struct bvec_iter_all iter_all;
330 iostat_update_and_unbind_ctx(bio);
331 sbi = bio->bi_private;
333 if (time_to_inject(sbi, FAULT_WRITE_IO))
334 bio->bi_status = BLK_STS_IOERR;
336 bio_for_each_segment_all(bvec, bio, iter_all) {
337 struct page *page = bvec->bv_page;
338 enum count_type type = WB_DATA_TYPE(page, false);
340 fscrypt_finalize_bounce_page(&page);
342 #ifdef CONFIG_F2FS_FS_COMPRESSION
343 if (f2fs_is_compressed_page(page)) {
344 f2fs_compress_write_end_io(bio, page);
349 if (unlikely(bio->bi_status)) {
350 mapping_set_error(page->mapping, -EIO);
351 if (type == F2FS_WB_CP_DATA)
352 f2fs_stop_checkpoint(sbi, true,
353 STOP_CP_REASON_WRITE_FAIL);
356 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
357 page_folio(page)->index != nid_of_node(page));
359 dec_page_count(sbi, type);
360 if (f2fs_in_warm_node_list(sbi, page))
361 f2fs_del_fsync_node_entry(sbi, page);
362 clear_page_private_gcing(page);
363 end_page_writeback(page);
365 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
366 wq_has_sleeper(&sbi->cp_wait))
367 wake_up(&sbi->cp_wait);
372 #ifdef CONFIG_BLK_DEV_ZONED
373 static void f2fs_zone_write_end_io(struct bio *bio)
375 struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
377 bio->bi_private = io->bi_private;
378 complete(&io->zone_wait);
379 f2fs_write_end_io(bio);
383 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
384 block_t blk_addr, sector_t *sector)
386 struct block_device *bdev = sbi->sb->s_bdev;
389 if (f2fs_is_multi_device(sbi)) {
390 for (i = 0; i < sbi->s_ndevs; i++) {
391 if (FDEV(i).start_blk <= blk_addr &&
392 FDEV(i).end_blk >= blk_addr) {
393 blk_addr -= FDEV(i).start_blk;
401 *sector = SECTOR_FROM_BLOCK(blk_addr);
405 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
409 if (!f2fs_is_multi_device(sbi))
412 for (i = 0; i < sbi->s_ndevs; i++)
413 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
418 static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
420 unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
421 unsigned int fua_flag, meta_flag, io_flag;
422 blk_opf_t op_flags = 0;
424 if (fio->op != REQ_OP_WRITE)
426 if (fio->type == DATA)
427 io_flag = fio->sbi->data_io_flag;
428 else if (fio->type == NODE)
429 io_flag = fio->sbi->node_io_flag;
433 fua_flag = io_flag & temp_mask;
434 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
437 * data/node io flag bits per temp:
438 * REQ_META | REQ_FUA |
439 * 5 | 4 | 3 | 2 | 1 | 0 |
440 * Cold | Warm | Hot | Cold | Warm | Hot |
442 if (BIT(fio->temp) & meta_flag)
443 op_flags |= REQ_META;
444 if (BIT(fio->temp) & fua_flag)
449 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
451 struct f2fs_sb_info *sbi = fio->sbi;
452 struct block_device *bdev;
456 bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
457 bio = bio_alloc_bioset(bdev, npages,
458 fio->op | fio->op_flags | f2fs_io_flags(fio),
459 GFP_NOIO, &f2fs_bioset);
460 bio->bi_iter.bi_sector = sector;
461 if (is_read_io(fio->op)) {
462 bio->bi_end_io = f2fs_read_end_io;
463 bio->bi_private = NULL;
465 bio->bi_end_io = f2fs_write_end_io;
466 bio->bi_private = sbi;
467 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
468 fio->type, fio->temp);
470 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
473 wbc_init_bio(fio->io_wbc, bio);
478 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
480 const struct f2fs_io_info *fio,
484 * The f2fs garbage collector sets ->encrypted_page when it wants to
485 * read/write raw data without encryption.
487 if (!fio || !fio->encrypted_page)
488 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
491 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
493 const struct f2fs_io_info *fio)
496 * The f2fs garbage collector sets ->encrypted_page when it wants to
497 * read/write raw data without encryption.
499 if (fio && fio->encrypted_page)
500 return !bio_has_crypt_ctx(bio);
502 return fscrypt_mergeable_bio(bio, inode, next_idx);
505 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
508 WARN_ON_ONCE(!is_read_io(bio_op(bio)));
509 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
511 iostat_update_submit_ctx(bio, type);
515 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
518 WARN_ON_ONCE(is_read_io(bio_op(bio)));
520 if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
521 blk_finish_plug(current->plug);
523 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
524 iostat_update_submit_ctx(bio, type);
528 static void __submit_merged_bio(struct f2fs_bio_info *io)
530 struct f2fs_io_info *fio = &io->fio;
535 if (is_read_io(fio->op)) {
536 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
537 f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
539 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
540 f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
545 static bool __has_merged_page(struct bio *bio, struct inode *inode,
546 struct page *page, nid_t ino)
548 struct bio_vec *bvec;
549 struct bvec_iter_all iter_all;
554 if (!inode && !page && !ino)
557 bio_for_each_segment_all(bvec, bio, iter_all) {
558 struct page *target = bvec->bv_page;
560 if (fscrypt_is_bounce_page(target)) {
561 target = fscrypt_pagecache_page(target);
565 if (f2fs_is_compressed_page(target)) {
566 target = f2fs_compress_control_page(target);
571 if (inode && inode == target->mapping->host)
573 if (page && page == target)
575 if (ino && ino == ino_of_node(target))
582 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
586 for (i = 0; i < NR_PAGE_TYPE; i++) {
587 int n = (i == META) ? 1 : NR_TEMP_TYPE;
590 sbi->write_io[i] = f2fs_kmalloc(sbi,
591 array_size(n, sizeof(struct f2fs_bio_info)),
593 if (!sbi->write_io[i])
596 for (j = HOT; j < n; j++) {
597 struct f2fs_bio_info *io = &sbi->write_io[i][j];
599 init_f2fs_rwsem(&io->io_rwsem);
602 io->last_block_in_bio = 0;
603 spin_lock_init(&io->io_lock);
604 INIT_LIST_HEAD(&io->io_list);
605 INIT_LIST_HEAD(&io->bio_list);
606 init_f2fs_rwsem(&io->bio_list_lock);
607 #ifdef CONFIG_BLK_DEV_ZONED
608 init_completion(&io->zone_wait);
609 io->zone_pending_bio = NULL;
610 io->bi_private = NULL;
618 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
619 enum page_type type, enum temp_type temp)
621 enum page_type btype = PAGE_TYPE_OF_BIO(type);
622 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
624 f2fs_down_write(&io->io_rwsem);
629 /* change META to META_FLUSH in the checkpoint procedure */
630 if (type >= META_FLUSH) {
631 io->fio.type = META_FLUSH;
632 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
633 if (!test_opt(sbi, NOBARRIER))
634 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
636 __submit_merged_bio(io);
638 f2fs_up_write(&io->io_rwsem);
641 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
642 struct inode *inode, struct page *page,
643 nid_t ino, enum page_type type, bool force)
648 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
650 enum page_type btype = PAGE_TYPE_OF_BIO(type);
651 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
653 f2fs_down_read(&io->io_rwsem);
654 ret = __has_merged_page(io->bio, inode, page, ino);
655 f2fs_up_read(&io->io_rwsem);
658 __f2fs_submit_merged_write(sbi, type, temp);
660 /* TODO: use HOT temp only for meta pages now. */
666 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
668 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
671 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
672 struct inode *inode, struct page *page,
673 nid_t ino, enum page_type type)
675 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
678 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
680 f2fs_submit_merged_write(sbi, DATA);
681 f2fs_submit_merged_write(sbi, NODE);
682 f2fs_submit_merged_write(sbi, META);
686 * Fill the locked page with data located in the block address.
687 * A caller needs to unlock the page on failure.
689 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
692 struct page *page = fio->encrypted_page ?
693 fio->encrypted_page : fio->page;
695 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
696 fio->is_por ? META_POR : (__is_meta_io(fio) ?
697 META_GENERIC : DATA_GENERIC_ENHANCE)))
698 return -EFSCORRUPTED;
700 trace_f2fs_submit_page_bio(page, fio);
702 /* Allocate a new bio */
703 bio = __bio_alloc(fio, 1);
705 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
706 page_folio(fio->page)->index, fio, GFP_NOIO);
708 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
713 if (fio->io_wbc && !is_read_io(fio->op))
714 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
717 inc_page_count(fio->sbi, is_read_io(fio->op) ?
718 __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
720 if (is_read_io(bio_op(bio)))
721 f2fs_submit_read_bio(fio->sbi, bio, fio->type);
723 f2fs_submit_write_bio(fio->sbi, bio, fio->type);
727 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
728 block_t last_blkaddr, block_t cur_blkaddr)
730 if (unlikely(sbi->max_io_bytes &&
731 bio->bi_iter.bi_size >= sbi->max_io_bytes))
733 if (last_blkaddr + 1 != cur_blkaddr)
735 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
738 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
739 struct f2fs_io_info *fio)
741 if (io->fio.op != fio->op)
743 return io->fio.op_flags == fio->op_flags;
746 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
747 struct f2fs_bio_info *io,
748 struct f2fs_io_info *fio,
749 block_t last_blkaddr,
752 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
754 return io_type_is_mergeable(io, fio);
757 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
758 struct page *page, enum temp_type temp)
760 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
761 struct bio_entry *be;
763 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
767 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
770 f2fs_down_write(&io->bio_list_lock);
771 list_add_tail(&be->list, &io->bio_list);
772 f2fs_up_write(&io->bio_list_lock);
775 static void del_bio_entry(struct bio_entry *be)
778 kmem_cache_free(bio_entry_slab, be);
781 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
784 struct f2fs_sb_info *sbi = fio->sbi;
789 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
790 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
791 struct list_head *head = &io->bio_list;
792 struct bio_entry *be;
794 f2fs_down_write(&io->bio_list_lock);
795 list_for_each_entry(be, head, list) {
801 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
804 if (f2fs_crypt_mergeable_bio(*bio,
805 fio->page->mapping->host,
806 page_folio(fio->page)->index, fio) &&
807 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
813 /* page can't be merged into bio; submit the bio */
815 f2fs_submit_write_bio(sbi, *bio, DATA);
818 f2fs_up_write(&io->bio_list_lock);
829 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
830 struct bio **bio, struct page *page)
834 struct bio *target = bio ? *bio : NULL;
836 f2fs_bug_on(sbi, !target && !page);
838 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
839 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
840 struct list_head *head = &io->bio_list;
841 struct bio_entry *be;
843 if (list_empty(head))
846 f2fs_down_read(&io->bio_list_lock);
847 list_for_each_entry(be, head, list) {
849 found = (target == be->bio);
851 found = __has_merged_page(be->bio, NULL,
856 f2fs_up_read(&io->bio_list_lock);
863 f2fs_down_write(&io->bio_list_lock);
864 list_for_each_entry(be, head, list) {
866 found = (target == be->bio);
868 found = __has_merged_page(be->bio, NULL,
876 f2fs_up_write(&io->bio_list_lock);
880 f2fs_submit_write_bio(sbi, target, DATA);
887 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
889 struct bio *bio = *fio->bio;
890 struct page *page = fio->encrypted_page ?
891 fio->encrypted_page : fio->page;
893 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
894 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
895 return -EFSCORRUPTED;
897 trace_f2fs_submit_page_bio(page, fio);
899 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
901 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
904 bio = __bio_alloc(fio, BIO_MAX_VECS);
905 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
906 page_folio(fio->page)->index, fio, GFP_NOIO);
908 add_bio_entry(fio->sbi, bio, page, fio->temp);
910 if (add_ipu_page(fio, &bio, page))
915 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
918 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
920 *fio->last_block = fio->new_blkaddr;
926 #ifdef CONFIG_BLK_DEV_ZONED
927 static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
929 struct block_device *bdev = sbi->sb->s_bdev;
932 if (f2fs_is_multi_device(sbi)) {
933 devi = f2fs_target_device_index(sbi, blkaddr);
934 if (blkaddr < FDEV(devi).start_blk ||
935 blkaddr > FDEV(devi).end_blk) {
936 f2fs_err(sbi, "Invalid block %x", blkaddr);
939 blkaddr -= FDEV(devi).start_blk;
940 bdev = FDEV(devi).bdev;
942 return bdev_is_zoned(bdev) &&
943 f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
944 (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
948 void f2fs_submit_page_write(struct f2fs_io_info *fio)
950 struct f2fs_sb_info *sbi = fio->sbi;
951 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
952 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
953 struct page *bio_page;
954 enum count_type type;
956 f2fs_bug_on(sbi, is_read_io(fio->op));
958 f2fs_down_write(&io->io_rwsem);
960 #ifdef CONFIG_BLK_DEV_ZONED
961 if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
962 wait_for_completion_io(&io->zone_wait);
963 bio_put(io->zone_pending_bio);
964 io->zone_pending_bio = NULL;
965 io->bi_private = NULL;
970 spin_lock(&io->io_lock);
971 if (list_empty(&io->io_list)) {
972 spin_unlock(&io->io_lock);
975 fio = list_first_entry(&io->io_list,
976 struct f2fs_io_info, list);
977 list_del(&fio->list);
978 spin_unlock(&io->io_lock);
981 verify_fio_blkaddr(fio);
983 if (fio->encrypted_page)
984 bio_page = fio->encrypted_page;
985 else if (fio->compressed_page)
986 bio_page = fio->compressed_page;
988 bio_page = fio->page;
990 /* set submitted = true as a return value */
993 type = WB_DATA_TYPE(bio_page, fio->compressed_page);
994 inc_page_count(sbi, type);
997 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
999 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
1000 page_folio(bio_page)->index, fio)))
1001 __submit_merged_bio(io);
1003 if (io->bio == NULL) {
1004 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
1005 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1006 page_folio(bio_page)->index, fio, GFP_NOIO);
1010 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1011 __submit_merged_bio(io);
1016 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
1019 io->last_block_in_bio = fio->new_blkaddr;
1021 trace_f2fs_submit_page_write(fio->page, fio);
1022 #ifdef CONFIG_BLK_DEV_ZONED
1023 if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
1024 is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1026 reinit_completion(&io->zone_wait);
1027 io->bi_private = io->bio->bi_private;
1028 io->bio->bi_private = io;
1029 io->bio->bi_end_io = f2fs_zone_write_end_io;
1030 io->zone_pending_bio = io->bio;
1031 __submit_merged_bio(io);
1037 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1038 !f2fs_is_checkpoint_ready(sbi))
1039 __submit_merged_bio(io);
1040 f2fs_up_write(&io->io_rwsem);
1043 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1044 unsigned nr_pages, blk_opf_t op_flag,
1045 pgoff_t first_idx, bool for_write)
1047 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1049 struct bio_post_read_ctx *ctx = NULL;
1050 unsigned int post_read_steps = 0;
1052 struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or);
1054 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1055 REQ_OP_READ | op_flag,
1056 for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
1058 return ERR_PTR(-ENOMEM);
1059 bio->bi_iter.bi_sector = sector;
1060 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1061 bio->bi_end_io = f2fs_read_end_io;
1063 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1064 post_read_steps |= STEP_DECRYPT;
1066 if (f2fs_need_verity(inode, first_idx))
1067 post_read_steps |= STEP_VERITY;
1070 * STEP_DECOMPRESS is handled specially, since a compressed file might
1071 * contain both compressed and uncompressed clusters. We'll allocate a
1072 * bio_post_read_ctx if the file is compressed, but the caller is
1073 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1076 if (post_read_steps || f2fs_compressed_file(inode)) {
1077 /* Due to the mempool, this never fails. */
1078 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1081 ctx->enabled_steps = post_read_steps;
1082 ctx->fs_blkaddr = blkaddr;
1083 ctx->decompression_attempted = false;
1084 bio->bi_private = ctx;
1086 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1091 /* This can handle encryption stuffs */
1092 static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
1093 block_t blkaddr, blk_opf_t op_flags,
1096 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1099 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1100 folio->index, for_write);
1102 return PTR_ERR(bio);
1104 /* wait for GCed page writeback via META_MAPPING */
1105 f2fs_wait_on_block_writeback(inode, blkaddr);
1107 if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) {
1108 iostat_update_and_unbind_ctx(bio);
1109 if (bio->bi_private)
1110 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1114 inc_page_count(sbi, F2FS_RD_DATA);
1115 f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
1116 f2fs_submit_read_bio(sbi, bio, DATA);
1120 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1122 __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
1124 dn->data_blkaddr = blkaddr;
1125 addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1129 * Lock ordering for the change of data block address:
1132 * update block addresses in the node page
1134 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1136 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1137 __set_data_blkaddr(dn, blkaddr);
1138 if (set_page_dirty(dn->node_page))
1139 dn->node_changed = true;
1142 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1144 f2fs_set_data_blkaddr(dn, blkaddr);
1145 f2fs_update_read_extent_cache(dn);
1148 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1149 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1151 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1157 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1159 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1163 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1164 dn->ofs_in_node, count);
1166 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1168 for (; count > 0; dn->ofs_in_node++) {
1169 block_t blkaddr = f2fs_data_blkaddr(dn);
1171 if (blkaddr == NULL_ADDR) {
1172 __set_data_blkaddr(dn, NEW_ADDR);
1177 if (set_page_dirty(dn->node_page))
1178 dn->node_changed = true;
1182 /* Should keep dn->ofs_in_node unchanged */
1183 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1185 unsigned int ofs_in_node = dn->ofs_in_node;
1188 ret = f2fs_reserve_new_blocks(dn, 1);
1189 dn->ofs_in_node = ofs_in_node;
1193 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1195 bool need_put = dn->inode_page ? false : true;
1198 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1202 if (dn->data_blkaddr == NULL_ADDR)
1203 err = f2fs_reserve_new_block(dn);
1204 if (err || need_put)
1209 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1210 blk_opf_t op_flags, bool for_write,
1211 pgoff_t *next_pgofs)
1213 struct address_space *mapping = inode->i_mapping;
1214 struct dnode_of_data dn;
1218 page = f2fs_grab_cache_page(mapping, index, for_write);
1220 return ERR_PTR(-ENOMEM);
1222 if (f2fs_lookup_read_extent_cache_block(inode, index,
1223 &dn.data_blkaddr)) {
1224 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1225 DATA_GENERIC_ENHANCE_READ)) {
1226 err = -EFSCORRUPTED;
1232 set_new_dnode(&dn, inode, NULL, NULL, 0);
1233 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1235 if (err == -ENOENT && next_pgofs)
1236 *next_pgofs = f2fs_get_next_page_offset(&dn, index);
1239 f2fs_put_dnode(&dn);
1241 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1244 *next_pgofs = index + 1;
1247 if (dn.data_blkaddr != NEW_ADDR &&
1248 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1250 DATA_GENERIC_ENHANCE)) {
1251 err = -EFSCORRUPTED;
1255 if (PageUptodate(page)) {
1261 * A new dentry page is allocated but not able to be written, since its
1262 * new inode page couldn't be allocated due to -ENOSPC.
1263 * In such the case, its blkaddr can be remained as NEW_ADDR.
1264 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1265 * f2fs_init_inode_metadata.
1267 if (dn.data_blkaddr == NEW_ADDR) {
1268 zero_user_segment(page, 0, PAGE_SIZE);
1269 if (!PageUptodate(page))
1270 SetPageUptodate(page);
1275 err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
1276 op_flags, for_write);
1282 f2fs_put_page(page, 1);
1283 return ERR_PTR(err);
1286 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1287 pgoff_t *next_pgofs)
1289 struct address_space *mapping = inode->i_mapping;
1292 page = find_get_page(mapping, index);
1293 if (page && PageUptodate(page))
1295 f2fs_put_page(page, 0);
1297 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
1301 if (PageUptodate(page))
1304 wait_on_page_locked(page);
1305 if (unlikely(!PageUptodate(page))) {
1306 f2fs_put_page(page, 0);
1307 return ERR_PTR(-EIO);
1313 * If it tries to access a hole, return an error.
1314 * Because, the callers, functions in dir.c and GC, should be able to know
1315 * whether this page exists or not.
1317 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1320 struct address_space *mapping = inode->i_mapping;
1323 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
1327 /* wait for read completion */
1329 if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
1330 f2fs_put_page(page, 1);
1331 return ERR_PTR(-EIO);
1337 * Caller ensures that this data page is never allocated.
1338 * A new zero-filled data page is allocated in the page cache.
1340 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1342 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1343 * ipage should be released by this function.
1345 struct page *f2fs_get_new_data_page(struct inode *inode,
1346 struct page *ipage, pgoff_t index, bool new_i_size)
1348 struct address_space *mapping = inode->i_mapping;
1350 struct dnode_of_data dn;
1353 page = f2fs_grab_cache_page(mapping, index, true);
1356 * before exiting, we should make sure ipage will be released
1357 * if any error occur.
1359 f2fs_put_page(ipage, 1);
1360 return ERR_PTR(-ENOMEM);
1363 set_new_dnode(&dn, inode, ipage, NULL, 0);
1364 err = f2fs_reserve_block(&dn, index);
1366 f2fs_put_page(page, 1);
1367 return ERR_PTR(err);
1370 f2fs_put_dnode(&dn);
1372 if (PageUptodate(page))
1375 if (dn.data_blkaddr == NEW_ADDR) {
1376 zero_user_segment(page, 0, PAGE_SIZE);
1377 if (!PageUptodate(page))
1378 SetPageUptodate(page);
1380 f2fs_put_page(page, 1);
1382 /* if ipage exists, blkaddr should be NEW_ADDR */
1383 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1384 page = f2fs_get_lock_data_page(inode, index, true);
1389 if (new_i_size && i_size_read(inode) <
1390 ((loff_t)(index + 1) << PAGE_SHIFT))
1391 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1395 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1397 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1398 struct f2fs_summary sum;
1399 struct node_info ni;
1400 block_t old_blkaddr;
1404 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1407 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
1411 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412 if (dn->data_blkaddr == NULL_ADDR) {
1413 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1418 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1419 old_blkaddr = dn->data_blkaddr;
1420 err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr,
1421 &dn->data_blkaddr, &sum, seg_type, NULL);
1425 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1426 f2fs_invalidate_internal_cache(sbi, old_blkaddr);
1428 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1432 static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
1434 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1435 f2fs_down_read(&sbi->node_change);
1440 static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
1442 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1443 f2fs_up_read(&sbi->node_change);
1445 f2fs_unlock_op(sbi);
1448 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
1450 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1453 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1454 if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1456 err = f2fs_reserve_block(dn, index);
1457 f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1462 static int f2fs_map_no_dnode(struct inode *inode,
1463 struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1466 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1469 * There is one exceptional case that read_node_page() may return
1470 * -ENOENT due to filesystem has been shutdown or cp_error, return
1471 * -EIO in that case.
1473 if (map->m_may_create &&
1474 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
1477 if (map->m_next_pgofs)
1478 *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1479 if (map->m_next_extent)
1480 *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1484 static bool f2fs_map_blocks_cached(struct inode *inode,
1485 struct f2fs_map_blocks *map, int flag)
1487 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1488 unsigned int maxblocks = map->m_len;
1489 pgoff_t pgoff = (pgoff_t)map->m_lblk;
1490 struct extent_info ei = {};
1492 if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
1495 map->m_pblk = ei.blk + pgoff - ei.fofs;
1496 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
1497 map->m_flags = F2FS_MAP_MAPPED;
1498 if (map->m_next_extent)
1499 *map->m_next_extent = pgoff + map->m_len;
1501 /* for hardware encryption, but to avoid potential issue in future */
1502 if (flag == F2FS_GET_BLOCK_DIO)
1503 f2fs_wait_on_block_writeback_range(inode,
1504 map->m_pblk, map->m_len);
1506 if (f2fs_allow_multi_device_dio(sbi, flag)) {
1507 int bidx = f2fs_target_device_index(sbi, map->m_pblk);
1508 struct f2fs_dev_info *dev = &sbi->devs[bidx];
1510 map->m_bdev = dev->bdev;
1511 map->m_pblk -= dev->start_blk;
1512 map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
1514 map->m_bdev = inode->i_sb->s_bdev;
1519 static bool map_is_mergeable(struct f2fs_sb_info *sbi,
1520 struct f2fs_map_blocks *map,
1521 block_t blkaddr, int flag, int bidx,
1524 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1526 if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs))
1528 if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR)
1530 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1532 if (flag == F2FS_GET_BLOCK_DIO &&
1533 map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR)
1539 * f2fs_map_blocks() tries to find or build mapping relationship which
1540 * maps continuous logical blocks to physical blocks, and return such
1541 * info via f2fs_map_blocks structure.
1543 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1545 unsigned int maxblocks = map->m_len;
1546 struct dnode_of_data dn;
1547 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1548 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1549 pgoff_t pgofs, end_offset, end;
1550 int err = 0, ofs = 1;
1551 unsigned int ofs_in_node, last_ofs_in_node;
1554 unsigned int start_pgofs;
1561 if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
1564 map->m_bdev = inode->i_sb->s_bdev;
1565 map->m_multidev_dio =
1566 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1571 /* it only supports block size == page size */
1572 pgofs = (pgoff_t)map->m_lblk;
1573 end = pgofs + maxblocks;
1576 if (map->m_may_create)
1577 f2fs_map_lock(sbi, flag);
1579 /* When reading holes, we need its node page */
1580 set_new_dnode(&dn, inode, NULL, NULL, 0);
1581 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1583 if (flag == F2FS_GET_BLOCK_BMAP)
1586 err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1590 start_pgofs = pgofs;
1592 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1593 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1596 blkaddr = f2fs_data_blkaddr(&dn);
1597 is_hole = !__is_valid_data_blkaddr(blkaddr);
1599 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1600 err = -EFSCORRUPTED;
1604 /* use out-place-update for direct IO under LFS mode */
1605 if (map->m_may_create && (is_hole ||
1606 (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
1607 !f2fs_is_pinned_file(inode)))) {
1608 if (unlikely(f2fs_cp_error(sbi))) {
1614 case F2FS_GET_BLOCK_PRE_AIO:
1615 if (blkaddr == NULL_ADDR) {
1617 last_ofs_in_node = dn.ofs_in_node;
1620 case F2FS_GET_BLOCK_PRE_DIO:
1621 case F2FS_GET_BLOCK_DIO:
1622 err = __allocate_data_block(&dn, map->m_seg_type);
1625 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1626 file_need_truncate(inode);
1627 set_inode_flag(inode, FI_APPEND_WRITE);
1635 blkaddr = dn.data_blkaddr;
1637 map->m_flags |= F2FS_MAP_NEW;
1638 } else if (is_hole) {
1639 if (f2fs_compressed_file(inode) &&
1640 f2fs_sanity_check_cluster(&dn)) {
1641 err = -EFSCORRUPTED;
1642 f2fs_handle_error(sbi,
1643 ERROR_CORRUPTED_CLUSTER);
1648 case F2FS_GET_BLOCK_PRECACHE:
1650 case F2FS_GET_BLOCK_BMAP:
1653 case F2FS_GET_BLOCK_FIEMAP:
1654 if (blkaddr == NULL_ADDR) {
1655 if (map->m_next_pgofs)
1656 *map->m_next_pgofs = pgofs + 1;
1660 case F2FS_GET_BLOCK_DIO:
1661 if (map->m_next_pgofs)
1662 *map->m_next_pgofs = pgofs + 1;
1665 /* for defragment case */
1666 if (map->m_next_pgofs)
1667 *map->m_next_pgofs = pgofs + 1;
1672 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1675 if (map->m_multidev_dio)
1676 bidx = f2fs_target_device_index(sbi, blkaddr);
1678 if (map->m_len == 0) {
1679 /* reserved delalloc block should be mapped for fiemap. */
1680 if (blkaddr == NEW_ADDR)
1681 map->m_flags |= F2FS_MAP_DELALLOC;
1682 /* DIO READ and hole case, should not map the blocks. */
1683 if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create))
1684 map->m_flags |= F2FS_MAP_MAPPED;
1686 map->m_pblk = blkaddr;
1689 if (map->m_multidev_dio)
1690 map->m_bdev = FDEV(bidx).bdev;
1691 } else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
1702 /* preallocate blocks in batch for one dnode page */
1703 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1704 (pgofs == end || dn.ofs_in_node == end_offset)) {
1706 dn.ofs_in_node = ofs_in_node;
1707 err = f2fs_reserve_new_blocks(&dn, prealloc);
1711 map->m_len += dn.ofs_in_node - ofs_in_node;
1712 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1716 dn.ofs_in_node = end_offset;
1719 if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
1720 map->m_may_create) {
1721 /* the next block to be allocated may not be contiguous. */
1722 if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
1723 CAP_BLKS_PER_SEC(sbi) - 1)
1729 else if (dn.ofs_in_node < end_offset)
1732 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1733 if (map->m_flags & F2FS_MAP_MAPPED) {
1734 unsigned int ofs = start_pgofs - map->m_lblk;
1736 f2fs_update_read_extent_cache_range(&dn,
1737 start_pgofs, map->m_pblk + ofs,
1742 f2fs_put_dnode(&dn);
1744 if (map->m_may_create) {
1745 f2fs_map_unlock(sbi, flag);
1746 f2fs_balance_fs(sbi, dn.node_changed);
1752 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1754 * for hardware encryption, but to avoid potential issue
1757 f2fs_wait_on_block_writeback_range(inode,
1758 map->m_pblk, map->m_len);
1760 if (map->m_multidev_dio) {
1761 block_t blk_addr = map->m_pblk;
1763 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1765 map->m_bdev = FDEV(bidx).bdev;
1766 map->m_pblk -= FDEV(bidx).start_blk;
1768 if (map->m_may_create)
1769 f2fs_update_device_state(sbi, inode->i_ino,
1770 blk_addr, map->m_len);
1772 f2fs_bug_on(sbi, blk_addr + map->m_len >
1773 FDEV(bidx).end_blk + 1);
1777 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1778 if (map->m_flags & F2FS_MAP_MAPPED) {
1779 unsigned int ofs = start_pgofs - map->m_lblk;
1781 f2fs_update_read_extent_cache_range(&dn,
1782 start_pgofs, map->m_pblk + ofs,
1785 if (map->m_next_extent)
1786 *map->m_next_extent = pgofs + 1;
1788 f2fs_put_dnode(&dn);
1790 if (map->m_may_create) {
1791 f2fs_map_unlock(sbi, flag);
1792 f2fs_balance_fs(sbi, dn.node_changed);
1795 trace_f2fs_map_blocks(inode, map, flag, err);
1799 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1801 struct f2fs_map_blocks map;
1805 if (pos + len > i_size_read(inode))
1808 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1809 map.m_next_pgofs = NULL;
1810 map.m_next_extent = NULL;
1811 map.m_seg_type = NO_CHECK_TYPE;
1812 map.m_may_create = false;
1813 last_lblk = F2FS_BLK_ALIGN(pos + len);
1815 while (map.m_lblk < last_lblk) {
1816 map.m_len = last_lblk - map.m_lblk;
1817 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1818 if (err || map.m_len == 0)
1820 map.m_lblk += map.m_len;
1825 static int f2fs_xattr_fiemap(struct inode *inode,
1826 struct fiemap_extent_info *fieinfo)
1828 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1830 struct node_info ni;
1831 __u64 phys = 0, len;
1833 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1836 if (f2fs_has_inline_xattr(inode)) {
1839 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1840 inode->i_ino, false);
1844 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1846 f2fs_put_page(page, 1);
1850 phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
1851 offset = offsetof(struct f2fs_inode, i_addr) +
1852 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1853 get_inline_xattr_addrs(inode));
1856 len = inline_xattr_size(inode);
1858 f2fs_put_page(page, 1);
1860 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1863 flags |= FIEMAP_EXTENT_LAST;
1865 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1866 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1872 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1876 err = f2fs_get_node_info(sbi, xnid, &ni, false);
1878 f2fs_put_page(page, 1);
1882 phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
1883 len = inode->i_sb->s_blocksize;
1885 f2fs_put_page(page, 1);
1887 flags = FIEMAP_EXTENT_LAST;
1891 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1892 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1895 return (err < 0 ? err : 0);
1898 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1901 struct f2fs_map_blocks map;
1902 sector_t start_blk, last_blk, blk_len, max_len;
1904 u64 logical = 0, phys = 0, size = 0;
1907 bool compr_cluster = false, compr_appended;
1908 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1909 unsigned int count_in_cluster = 0;
1912 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1913 ret = f2fs_precache_extents(inode);
1918 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1922 inode_lock_shared(inode);
1924 maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
1925 if (start > maxbytes) {
1930 if (len > maxbytes || (maxbytes - len) < start)
1931 len = maxbytes - start;
1933 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1934 ret = f2fs_xattr_fiemap(inode, fieinfo);
1938 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1939 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1944 start_blk = F2FS_BYTES_TO_BLK(start);
1945 last_blk = F2FS_BYTES_TO_BLK(start + len - 1);
1946 blk_len = last_blk - start_blk + 1;
1947 max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk;
1950 memset(&map, 0, sizeof(map));
1951 map.m_lblk = start_blk;
1952 map.m_len = blk_len;
1953 map.m_next_pgofs = &next_pgofs;
1954 map.m_seg_type = NO_CHECK_TYPE;
1956 if (compr_cluster) {
1958 map.m_len = cluster_size - count_in_cluster;
1961 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
1966 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1967 start_blk = next_pgofs;
1969 if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes)
1972 flags |= FIEMAP_EXTENT_LAST;
1976 * current extent may cross boundary of inquiry, increase len to
1979 if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) &&
1980 map.m_lblk + map.m_len - 1 == last_blk &&
1981 blk_len != max_len) {
1986 compr_appended = false;
1987 /* In a case of compressed cluster, append this to the last extent */
1988 if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
1989 !(map.m_flags & F2FS_MAP_FLAGS))) {
1990 compr_appended = true;
1995 flags |= FIEMAP_EXTENT_MERGED;
1996 if (IS_ENCRYPTED(inode))
1997 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1999 ret = fiemap_fill_next_extent(fieinfo, logical,
2001 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2007 if (start_blk > last_blk)
2011 if (map.m_pblk == COMPRESS_ADDR) {
2012 compr_cluster = true;
2013 count_in_cluster = 1;
2014 } else if (compr_appended) {
2015 unsigned int appended_blks = cluster_size -
2016 count_in_cluster + 1;
2017 size += F2FS_BLK_TO_BYTES(appended_blks);
2018 start_blk += appended_blks;
2019 compr_cluster = false;
2021 logical = F2FS_BLK_TO_BYTES(start_blk);
2022 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2023 F2FS_BLK_TO_BYTES(map.m_pblk) : 0;
2024 size = F2FS_BLK_TO_BYTES(map.m_len);
2027 if (compr_cluster) {
2028 flags = FIEMAP_EXTENT_ENCODED;
2029 count_in_cluster += map.m_len;
2030 if (count_in_cluster == cluster_size) {
2031 compr_cluster = false;
2032 size += F2FS_BLKSIZE;
2034 } else if (map.m_flags & F2FS_MAP_DELALLOC) {
2035 flags = FIEMAP_EXTENT_UNWRITTEN;
2038 start_blk += F2FS_BYTES_TO_BLK(size);
2043 if (fatal_signal_pending(current))
2051 inode_unlock_shared(inode);
2055 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2057 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2058 return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
2060 return i_size_read(inode);
2063 static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
2065 return rac ? REQ_RAHEAD : 0;
2068 static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
2070 struct f2fs_map_blocks *map,
2071 struct bio **bio_ret,
2072 sector_t *last_block_in_bio,
2073 struct readahead_control *rac)
2075 struct bio *bio = *bio_ret;
2076 const unsigned int blocksize = F2FS_BLKSIZE;
2077 sector_t block_in_file;
2078 sector_t last_block;
2079 sector_t last_block_in_file;
2081 pgoff_t index = folio_index(folio);
2084 block_in_file = (sector_t)index;
2085 last_block = block_in_file + nr_pages;
2086 last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
2088 if (last_block > last_block_in_file)
2089 last_block = last_block_in_file;
2091 /* just zeroing out page which is beyond EOF */
2092 if (block_in_file >= last_block)
2095 * Map blocks using the previous result first.
2097 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2098 block_in_file > map->m_lblk &&
2099 block_in_file < (map->m_lblk + map->m_len))
2103 * Then do more f2fs_map_blocks() calls until we are
2104 * done with this page.
2106 map->m_lblk = block_in_file;
2107 map->m_len = last_block - block_in_file;
2109 ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
2113 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2114 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2115 folio_set_mappedtodisk(folio);
2117 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2118 DATA_GENERIC_ENHANCE_READ)) {
2119 ret = -EFSCORRUPTED;
2124 folio_zero_segment(folio, 0, folio_size(folio));
2125 if (f2fs_need_verity(inode, index) &&
2126 !fsverity_verify_folio(folio)) {
2130 if (!folio_test_uptodate(folio))
2131 folio_mark_uptodate(folio);
2132 folio_unlock(folio);
2137 * This page will go to BIO. Do we need to send this
2140 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2141 *last_block_in_bio, block_nr) ||
2142 !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
2144 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2148 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2149 f2fs_ra_op_flags(rac), index,
2159 * If the page is under writeback, we need to wait for
2160 * its completion to see the correct decrypted data.
2162 f2fs_wait_on_block_writeback(inode, block_nr);
2164 if (!bio_add_folio(bio, folio, blocksize, 0))
2165 goto submit_and_realloc;
2167 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2168 f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2170 *last_block_in_bio = block_nr;
2176 #ifdef CONFIG_F2FS_FS_COMPRESSION
2177 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2178 unsigned nr_pages, sector_t *last_block_in_bio,
2179 struct readahead_control *rac, bool for_write)
2181 struct dnode_of_data dn;
2182 struct inode *inode = cc->inode;
2183 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2184 struct bio *bio = *bio_ret;
2185 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2186 sector_t last_block_in_file;
2187 const unsigned int blocksize = F2FS_BLKSIZE;
2188 struct decompress_io_ctx *dic = NULL;
2189 struct extent_info ei = {};
2190 bool from_dnode = true;
2194 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2196 last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
2199 /* get rid of pages beyond EOF */
2200 for (i = 0; i < cc->cluster_size; i++) {
2201 struct page *page = cc->rpages[i];
2202 struct folio *folio;
2207 folio = page_folio(page);
2208 if ((sector_t)folio->index >= last_block_in_file) {
2209 folio_zero_segment(folio, 0, folio_size(folio));
2210 if (!folio_test_uptodate(folio))
2211 folio_mark_uptodate(folio);
2212 } else if (!folio_test_uptodate(folio)) {
2215 folio_unlock(folio);
2218 cc->rpages[i] = NULL;
2222 /* we are done since all pages are beyond EOF */
2223 if (f2fs_cluster_is_empty(cc))
2226 if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
2230 goto skip_reading_dnode;
2232 set_new_dnode(&dn, inode, NULL, NULL, 0);
2233 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2237 if (unlikely(f2fs_cp_error(sbi))) {
2241 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2244 for (i = 1; i < cc->cluster_size; i++) {
2247 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2248 dn.ofs_in_node + i) :
2251 if (!__is_valid_data_blkaddr(blkaddr))
2254 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2260 if (!from_dnode && i >= ei.c_len)
2264 /* nothing to decompress */
2265 if (cc->nr_cpages == 0) {
2270 dic = f2fs_alloc_dic(cc);
2276 for (i = 0; i < cc->nr_cpages; i++) {
2277 struct folio *folio = page_folio(dic->cpages[i]);
2279 struct bio_post_read_ctx *ctx;
2281 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2282 dn.ofs_in_node + i + 1) :
2285 f2fs_wait_on_block_writeback(inode, blkaddr);
2287 if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
2289 if (atomic_dec_and_test(&dic->remaining_pages)) {
2290 f2fs_decompress_cluster(dic, true);
2296 if (bio && (!page_is_mergeable(sbi, bio,
2297 *last_block_in_bio, blkaddr) ||
2298 !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
2300 f2fs_submit_read_bio(sbi, bio, DATA);
2305 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2306 f2fs_ra_op_flags(rac),
2307 folio->index, for_write);
2310 f2fs_decompress_end_io(dic, ret, true);
2311 f2fs_put_dnode(&dn);
2317 if (!bio_add_folio(bio, folio, blocksize, 0))
2318 goto submit_and_realloc;
2320 ctx = get_post_read_ctx(bio);
2321 ctx->enabled_steps |= STEP_DECOMPRESS;
2322 refcount_inc(&dic->refcnt);
2324 inc_page_count(sbi, F2FS_RD_DATA);
2325 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
2326 *last_block_in_bio = blkaddr;
2330 f2fs_put_dnode(&dn);
2337 f2fs_put_dnode(&dn);
2339 for (i = 0; i < cc->cluster_size; i++) {
2340 if (cc->rpages[i]) {
2341 ClearPageUptodate(cc->rpages[i]);
2342 unlock_page(cc->rpages[i]);
2351 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2352 * Major change was from block_size == page_size in f2fs by default.
2354 static int f2fs_mpage_readpages(struct inode *inode,
2355 struct readahead_control *rac, struct folio *folio)
2357 struct bio *bio = NULL;
2358 sector_t last_block_in_bio = 0;
2359 struct f2fs_map_blocks map;
2360 #ifdef CONFIG_F2FS_FS_COMPRESSION
2361 struct compress_ctx cc = {
2363 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2364 .cluster_size = F2FS_I(inode)->i_cluster_size,
2365 .cluster_idx = NULL_CLUSTER,
2371 pgoff_t nc_cluster_idx = NULL_CLUSTER;
2374 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2375 unsigned max_nr_pages = nr_pages;
2382 map.m_next_pgofs = NULL;
2383 map.m_next_extent = NULL;
2384 map.m_seg_type = NO_CHECK_TYPE;
2385 map.m_may_create = false;
2387 for (; nr_pages; nr_pages--) {
2389 folio = readahead_folio(rac);
2390 prefetchw(&folio->flags);
2393 #ifdef CONFIG_F2FS_FS_COMPRESSION
2394 index = folio_index(folio);
2396 if (!f2fs_compressed_file(inode))
2397 goto read_single_page;
2399 /* there are remained compressed pages, submit them */
2400 if (!f2fs_cluster_can_merge_page(&cc, index)) {
2401 ret = f2fs_read_multi_pages(&cc, &bio,
2405 f2fs_destroy_compress_ctx(&cc, false);
2407 goto set_error_page;
2409 if (cc.cluster_idx == NULL_CLUSTER) {
2410 if (nc_cluster_idx == index >> cc.log_cluster_size)
2411 goto read_single_page;
2413 ret = f2fs_is_compressed_cluster(inode, index);
2415 goto set_error_page;
2418 index >> cc.log_cluster_size;
2419 goto read_single_page;
2422 nc_cluster_idx = NULL_CLUSTER;
2424 ret = f2fs_init_compress_ctx(&cc);
2426 goto set_error_page;
2428 f2fs_compress_ctx_add_page(&cc, folio);
2434 ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map,
2435 &bio, &last_block_in_bio, rac);
2437 #ifdef CONFIG_F2FS_FS_COMPRESSION
2440 folio_zero_segment(folio, 0, folio_size(folio));
2441 folio_unlock(folio);
2443 #ifdef CONFIG_F2FS_FS_COMPRESSION
2447 #ifdef CONFIG_F2FS_FS_COMPRESSION
2448 if (f2fs_compressed_file(inode)) {
2450 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2451 ret = f2fs_read_multi_pages(&cc, &bio,
2455 f2fs_destroy_compress_ctx(&cc, false);
2461 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2465 static int f2fs_read_data_folio(struct file *file, struct folio *folio)
2467 struct inode *inode = folio_file_mapping(folio)->host;
2470 trace_f2fs_readpage(folio, DATA);
2472 if (!f2fs_is_compress_backend_ready(inode)) {
2473 folio_unlock(folio);
2477 /* If the file has inline data, try to read it directly */
2478 if (f2fs_has_inline_data(inode))
2479 ret = f2fs_read_inline_data(inode, folio);
2481 ret = f2fs_mpage_readpages(inode, NULL, folio);
2485 static void f2fs_readahead(struct readahead_control *rac)
2487 struct inode *inode = rac->mapping->host;
2489 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2491 if (!f2fs_is_compress_backend_ready(inode))
2494 /* If the file has inline data, skip readahead */
2495 if (f2fs_has_inline_data(inode))
2498 f2fs_mpage_readpages(inode, rac, NULL);
2501 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2503 struct inode *inode = fio->page->mapping->host;
2504 struct page *mpage, *page;
2505 gfp_t gfp_flags = GFP_NOFS;
2507 if (!f2fs_encrypted_file(inode))
2510 page = fio->compressed_page ? fio->compressed_page : fio->page;
2512 if (fscrypt_inode_uses_inline_crypto(inode))
2516 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2517 PAGE_SIZE, 0, gfp_flags);
2518 if (IS_ERR(fio->encrypted_page)) {
2519 /* flush pending IOs and wait for a while in the ENOMEM case */
2520 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2521 f2fs_flush_merged_writes(fio->sbi);
2522 memalloc_retry_wait(GFP_NOFS);
2523 gfp_flags |= __GFP_NOFAIL;
2526 return PTR_ERR(fio->encrypted_page);
2529 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2531 if (PageUptodate(mpage))
2532 memcpy(page_address(mpage),
2533 page_address(fio->encrypted_page), PAGE_SIZE);
2534 f2fs_put_page(mpage, 1);
2539 static inline bool check_inplace_update_policy(struct inode *inode,
2540 struct f2fs_io_info *fio)
2542 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2544 if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
2545 is_inode_flag_set(inode, FI_OPU_WRITE))
2547 if (IS_F2FS_IPU_FORCE(sbi))
2549 if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
2551 if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
2553 if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
2554 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2558 * IPU for rewrite async pages
2560 if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2561 !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2564 /* this is only set during fdatasync */
2565 if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2568 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2569 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2575 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2577 /* swap file is migrating in aligned write mode */
2578 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2581 if (f2fs_is_pinned_file(inode))
2584 /* if this is cold file, we should overwrite to avoid fragmentation */
2585 if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2588 return check_inplace_update_policy(inode, fio);
2591 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2593 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2595 /* The below cases were checked when setting it. */
2596 if (f2fs_is_pinned_file(inode))
2598 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2600 if (f2fs_lfs_mode(sbi))
2602 if (S_ISDIR(inode->i_mode))
2604 if (IS_NOQUOTA(inode))
2606 if (f2fs_used_in_atomic_write(inode))
2608 /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
2609 if (f2fs_compressed_file(inode) &&
2610 F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER &&
2611 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
2614 /* swap file is migrating in aligned write mode */
2615 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2618 if (is_inode_flag_set(inode, FI_OPU_WRITE))
2622 if (page_private_gcing(fio->page))
2624 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2625 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2631 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2633 struct inode *inode = fio->page->mapping->host;
2635 if (f2fs_should_update_outplace(inode, fio))
2638 return f2fs_should_update_inplace(inode, fio);
2641 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2643 struct folio *folio = page_folio(fio->page);
2644 struct inode *inode = folio->mapping->host;
2645 struct dnode_of_data dn;
2646 struct node_info ni;
2647 bool ipu_force = false;
2651 /* Use COW inode to make dnode_of_data for atomic write */
2652 atomic_commit = f2fs_is_atomic_file(inode) &&
2653 page_private_atomic(folio_page(folio, 0));
2655 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2657 set_new_dnode(&dn, inode, NULL, NULL, 0);
2659 if (need_inplace_update(fio) &&
2660 f2fs_lookup_read_extent_cache_block(inode, folio->index,
2661 &fio->old_blkaddr)) {
2662 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2663 DATA_GENERIC_ENHANCE))
2664 return -EFSCORRUPTED;
2667 fio->need_lock = LOCK_DONE;
2671 /* Deadlock due to between page->lock and f2fs_lock_op */
2672 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2675 err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
2679 fio->old_blkaddr = dn.data_blkaddr;
2681 /* This page is already truncated */
2682 if (fio->old_blkaddr == NULL_ADDR) {
2683 folio_clear_uptodate(folio);
2684 clear_page_private_gcing(folio_page(folio, 0));
2688 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2689 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2690 DATA_GENERIC_ENHANCE)) {
2691 err = -EFSCORRUPTED;
2695 /* wait for GCed page writeback via META_MAPPING */
2697 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2700 * If current allocation needs SSR,
2701 * it had better in-place writes for updated data.
2704 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2705 need_inplace_update(fio))) {
2706 err = f2fs_encrypt_one_page(fio);
2710 folio_start_writeback(folio);
2711 f2fs_put_dnode(&dn);
2712 if (fio->need_lock == LOCK_REQ)
2713 f2fs_unlock_op(fio->sbi);
2714 err = f2fs_inplace_write_data(fio);
2716 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2717 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2718 folio_end_writeback(folio);
2720 set_inode_flag(inode, FI_UPDATE_WRITE);
2722 trace_f2fs_do_write_data_page(folio, IPU);
2726 if (fio->need_lock == LOCK_RETRY) {
2727 if (!f2fs_trylock_op(fio->sbi)) {
2731 fio->need_lock = LOCK_REQ;
2734 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
2738 fio->version = ni.version;
2740 err = f2fs_encrypt_one_page(fio);
2744 folio_start_writeback(folio);
2746 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2747 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2749 /* LFS mode write path */
2750 f2fs_outplace_write_data(&dn, fio);
2751 trace_f2fs_do_write_data_page(folio, OPU);
2752 set_inode_flag(inode, FI_APPEND_WRITE);
2754 clear_page_private_atomic(folio_page(folio, 0));
2756 f2fs_put_dnode(&dn);
2758 if (fio->need_lock == LOCK_REQ)
2759 f2fs_unlock_op(fio->sbi);
2763 int f2fs_write_single_data_page(struct folio *folio, int *submitted,
2765 sector_t *last_block,
2766 struct writeback_control *wbc,
2767 enum iostat_type io_type,
2771 struct inode *inode = folio->mapping->host;
2772 struct page *page = folio_page(folio, 0);
2773 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2774 loff_t i_size = i_size_read(inode);
2775 const pgoff_t end_index = ((unsigned long long)i_size)
2777 loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
2778 unsigned offset = 0;
2779 bool need_balance_fs = false;
2780 bool quota_inode = IS_NOQUOTA(inode);
2782 struct f2fs_io_info fio = {
2784 .ino = inode->i_ino,
2787 .op_flags = wbc_to_write_flags(wbc),
2788 .old_blkaddr = NULL_ADDR,
2790 .encrypted_page = NULL,
2792 .compr_blocks = compr_blocks,
2793 .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
2794 .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
2798 .last_block = last_block,
2801 trace_f2fs_writepage(folio, DATA);
2803 /* we should bypass data pages to proceed the kworker jobs */
2804 if (unlikely(f2fs_cp_error(sbi))) {
2805 mapping_set_error(folio->mapping, -EIO);
2807 * don't drop any dirty dentry pages for keeping lastest
2808 * directory structure.
2810 if (S_ISDIR(inode->i_mode) &&
2811 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2814 /* keep data pages in remount-ro mode */
2815 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2820 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2823 if (folio->index < end_index ||
2824 f2fs_verity_in_progress(inode) ||
2829 * If the offset is out-of-range of file size,
2830 * this page does not have to be written to disk.
2832 offset = i_size & (PAGE_SIZE - 1);
2833 if ((folio->index >= end_index + 1) || !offset)
2836 folio_zero_segment(folio, offset, folio_size(folio));
2838 /* Dentry/quota blocks are controlled by checkpoint */
2839 if (S_ISDIR(inode->i_mode) || quota_inode) {
2841 * We need to wait for node_write to avoid block allocation during
2842 * checkpoint. This can only happen to quota writes which can cause
2843 * the below discard race condition.
2846 f2fs_down_read(&sbi->node_write);
2848 fio.need_lock = LOCK_DONE;
2849 err = f2fs_do_write_data_page(&fio);
2852 f2fs_up_read(&sbi->node_write);
2857 if (!wbc->for_reclaim)
2858 need_balance_fs = true;
2859 else if (has_not_enough_free_secs(sbi, 0, 0))
2862 set_inode_flag(inode, FI_HOT_DATA);
2865 if (f2fs_has_inline_data(inode)) {
2866 err = f2fs_write_inline_data(inode, folio);
2871 if (err == -EAGAIN) {
2872 err = f2fs_do_write_data_page(&fio);
2873 if (err == -EAGAIN) {
2874 f2fs_bug_on(sbi, compr_blocks);
2875 fio.need_lock = LOCK_REQ;
2876 err = f2fs_do_write_data_page(&fio);
2881 file_set_keep_isize(inode);
2883 spin_lock(&F2FS_I(inode)->i_size_lock);
2884 if (F2FS_I(inode)->last_disk_size < psize)
2885 F2FS_I(inode)->last_disk_size = psize;
2886 spin_unlock(&F2FS_I(inode)->i_size_lock);
2890 if (err && err != -ENOENT)
2894 inode_dec_dirty_pages(inode);
2896 folio_clear_uptodate(folio);
2897 clear_page_private_gcing(page);
2900 if (wbc->for_reclaim) {
2901 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2902 clear_inode_flag(inode, FI_HOT_DATA);
2903 f2fs_remove_dirty_inode(inode);
2906 folio_unlock(folio);
2907 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2908 !F2FS_I(inode)->wb_task && allow_balance)
2909 f2fs_balance_fs(sbi, need_balance_fs);
2911 if (unlikely(f2fs_cp_error(sbi))) {
2912 f2fs_submit_merged_write(sbi, DATA);
2914 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2919 *submitted = fio.submitted;
2924 folio_redirty_for_writepage(wbc, folio);
2926 * pageout() in MM translates EAGAIN, so calls handle_write_error()
2927 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2928 * file_write_and_wait_range() will see EIO error, which is critical
2929 * to return value of fsync() followed by atomic_write failure to user.
2931 if (!err || wbc->for_reclaim)
2932 return AOP_WRITEPAGE_ACTIVATE;
2933 folio_unlock(folio);
2937 static int f2fs_write_data_page(struct page *page,
2938 struct writeback_control *wbc)
2940 struct folio *folio = page_folio(page);
2941 #ifdef CONFIG_F2FS_FS_COMPRESSION
2942 struct inode *inode = folio->mapping->host;
2944 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2947 if (f2fs_compressed_file(inode)) {
2948 if (f2fs_is_compressed_cluster(inode, folio->index)) {
2949 folio_redirty_for_writepage(wbc, folio);
2950 return AOP_WRITEPAGE_ACTIVATE;
2956 return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
2957 wbc, FS_DATA_IO, 0, true);
2961 * This function was copied from write_cache_pages from mm/page-writeback.c.
2962 * The major change is making write step of cold data page separately from
2963 * warm/hot data page.
2965 static int f2fs_write_cache_pages(struct address_space *mapping,
2966 struct writeback_control *wbc,
2967 enum iostat_type io_type)
2970 int done = 0, retry = 0;
2971 struct page *pages_local[F2FS_ONSTACK_PAGES];
2972 struct page **pages = pages_local;
2973 struct folio_batch fbatch;
2974 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2975 struct bio *bio = NULL;
2976 sector_t last_block;
2977 #ifdef CONFIG_F2FS_FS_COMPRESSION
2978 struct inode *inode = mapping->host;
2979 struct compress_ctx cc = {
2981 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2982 .cluster_size = F2FS_I(inode)->i_cluster_size,
2983 .cluster_idx = NULL_CLUSTER,
2987 .valid_nr_cpages = 0,
2990 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2994 int nr_folios, p, idx;
2996 unsigned int max_pages = F2FS_ONSTACK_PAGES;
2998 pgoff_t end; /* Inclusive */
3000 int range_whole = 0;
3006 #ifdef CONFIG_F2FS_FS_COMPRESSION
3007 if (f2fs_compressed_file(inode) &&
3008 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
3009 pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
3010 cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
3011 max_pages = 1 << cc.log_cluster_size;
3015 folio_batch_init(&fbatch);
3017 if (get_dirty_pages(mapping->host) <=
3018 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3019 set_inode_flag(mapping->host, FI_HOT_DATA);
3021 clear_inode_flag(mapping->host, FI_HOT_DATA);
3023 if (wbc->range_cyclic) {
3024 index = mapping->writeback_index; /* prev offset */
3027 index = wbc->range_start >> PAGE_SHIFT;
3028 end = wbc->range_end >> PAGE_SHIFT;
3029 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3032 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3033 tag = PAGECACHE_TAG_TOWRITE;
3035 tag = PAGECACHE_TAG_DIRTY;
3038 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3039 tag_pages_for_writeback(mapping, index, end);
3041 while (!done && !retry && (index <= end)) {
3044 nr_folios = filemap_get_folios_tag(mapping, &index, end,
3046 if (nr_folios == 0) {
3052 for (i = 0; i < nr_folios; i++) {
3053 struct folio *folio = fbatch.folios[i];
3056 p = folio_nr_pages(folio);
3058 pages[nr_pages] = folio_page(folio, idx);
3060 if (++nr_pages == max_pages) {
3061 index = folio->index + idx + 1;
3062 folio_batch_release(&fbatch);
3068 folio_batch_release(&fbatch);
3071 for (i = 0; i < nr_pages; i++) {
3072 struct page *page = pages[i];
3073 struct folio *folio = page_folio(page);
3077 #ifdef CONFIG_F2FS_FS_COMPRESSION
3078 if (f2fs_compressed_file(inode)) {
3079 void *fsdata = NULL;
3083 ret = f2fs_init_compress_ctx(&cc);
3089 if (!f2fs_cluster_can_merge_page(&cc,
3091 ret = f2fs_write_multi_pages(&cc,
3092 &submitted, wbc, io_type);
3098 if (unlikely(f2fs_cp_error(sbi)))
3101 if (!f2fs_cluster_is_empty(&cc))
3104 if (f2fs_all_cluster_page_ready(&cc,
3105 pages, i, nr_pages, true))
3108 ret2 = f2fs_prepare_compress_overwrite(
3110 folio->index, &fsdata);
3116 (!f2fs_compress_write_end(inode,
3117 fsdata, folio->index, 1) ||
3118 !f2fs_all_cluster_page_ready(&cc,
3126 /* give a priority to WB_SYNC threads */
3127 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3128 wbc->sync_mode == WB_SYNC_NONE) {
3132 #ifdef CONFIG_F2FS_FS_COMPRESSION
3135 done_index = folio->index;
3139 if (unlikely(folio->mapping != mapping)) {
3141 folio_unlock(folio);
3145 if (!folio_test_dirty(folio)) {
3146 /* someone wrote it for us */
3147 goto continue_unlock;
3150 if (folio_test_writeback(folio)) {
3151 if (wbc->sync_mode == WB_SYNC_NONE)
3152 goto continue_unlock;
3153 f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
3156 if (!folio_clear_dirty_for_io(folio))
3157 goto continue_unlock;
3159 #ifdef CONFIG_F2FS_FS_COMPRESSION
3160 if (f2fs_compressed_file(inode)) {
3162 f2fs_compress_ctx_add_page(&cc, folio);
3166 ret = f2fs_write_single_data_page(folio,
3167 &submitted, &bio, &last_block,
3168 wbc, io_type, 0, true);
3169 if (ret == AOP_WRITEPAGE_ACTIVATE)
3170 folio_unlock(folio);
3171 #ifdef CONFIG_F2FS_FS_COMPRESSION
3174 nwritten += submitted;
3175 wbc->nr_to_write -= submitted;
3177 if (unlikely(ret)) {
3179 * keep nr_to_write, since vfs uses this to
3180 * get # of written pages.
3182 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3185 } else if (ret == -EAGAIN) {
3187 if (wbc->sync_mode == WB_SYNC_ALL) {
3188 f2fs_io_schedule_timeout(
3189 DEFAULT_IO_TIMEOUT);
3194 done_index = folio_next_index(folio);
3199 if (wbc->nr_to_write <= 0 &&
3200 wbc->sync_mode == WB_SYNC_NONE) {
3208 release_pages(pages, nr_pages);
3211 #ifdef CONFIG_F2FS_FS_COMPRESSION
3212 /* flush remained pages in compress cluster */
3213 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3214 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3215 nwritten += submitted;
3216 wbc->nr_to_write -= submitted;
3222 if (f2fs_compressed_file(inode))
3223 f2fs_destroy_compress_ctx(&cc, false);
3230 if (wbc->range_cyclic && !done)
3232 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3233 mapping->writeback_index = done_index;
3236 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3238 /* submit cached bio of IPU write */
3240 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3242 #ifdef CONFIG_F2FS_FS_COMPRESSION
3243 if (pages != pages_local)
3250 static inline bool __should_serialize_io(struct inode *inode,
3251 struct writeback_control *wbc)
3253 /* to avoid deadlock in path of data flush */
3254 if (F2FS_I(inode)->wb_task)
3257 if (!S_ISREG(inode->i_mode))
3259 if (IS_NOQUOTA(inode))
3262 if (f2fs_need_compress_data(inode))
3264 if (wbc->sync_mode != WB_SYNC_ALL)
3266 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3271 static int __f2fs_write_data_pages(struct address_space *mapping,
3272 struct writeback_control *wbc,
3273 enum iostat_type io_type)
3275 struct inode *inode = mapping->host;
3276 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3277 struct blk_plug plug;
3279 bool locked = false;
3281 /* deal with chardevs and other special file */
3282 if (!mapping->a_ops->writepage)
3285 /* skip writing if there is no dirty page in this inode */
3286 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3289 /* during POR, we don't need to trigger writepage at all. */
3290 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3293 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3294 wbc->sync_mode == WB_SYNC_NONE &&
3295 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3296 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3299 /* skip writing in file defragment preparing stage */
3300 if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3303 trace_f2fs_writepages(mapping->host, wbc, DATA);
3305 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3306 if (wbc->sync_mode == WB_SYNC_ALL)
3307 atomic_inc(&sbi->wb_sync_req[DATA]);
3308 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3309 /* to avoid potential deadlock */
3311 blk_finish_plug(current->plug);
3315 if (__should_serialize_io(inode, wbc)) {
3316 mutex_lock(&sbi->writepages);
3320 blk_start_plug(&plug);
3321 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3322 blk_finish_plug(&plug);
3325 mutex_unlock(&sbi->writepages);
3327 if (wbc->sync_mode == WB_SYNC_ALL)
3328 atomic_dec(&sbi->wb_sync_req[DATA]);
3330 * if some pages were truncated, we cannot guarantee its mapping->host
3331 * to detect pending bios.
3334 f2fs_remove_dirty_inode(inode);
3338 wbc->pages_skipped += get_dirty_pages(inode);
3339 trace_f2fs_writepages(mapping->host, wbc, DATA);
3343 static int f2fs_write_data_pages(struct address_space *mapping,
3344 struct writeback_control *wbc)
3346 struct inode *inode = mapping->host;
3348 return __f2fs_write_data_pages(mapping, wbc,
3349 F2FS_I(inode)->cp_task == current ?
3350 FS_CP_DATA_IO : FS_DATA_IO);
3353 void f2fs_write_failed(struct inode *inode, loff_t to)
3355 loff_t i_size = i_size_read(inode);
3357 if (IS_NOQUOTA(inode))
3360 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3361 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3362 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3363 filemap_invalidate_lock(inode->i_mapping);
3365 truncate_pagecache(inode, i_size);
3366 f2fs_truncate_blocks(inode, i_size, true);
3368 filemap_invalidate_unlock(inode->i_mapping);
3369 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3373 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3374 struct folio *folio, loff_t pos, unsigned int len,
3375 block_t *blk_addr, bool *node_changed)
3377 struct inode *inode = folio->mapping->host;
3378 pgoff_t index = folio->index;
3379 struct dnode_of_data dn;
3381 bool locked = false;
3382 int flag = F2FS_GET_BLOCK_PRE_AIO;
3386 * If a whole page is being written and we already preallocated all the
3387 * blocks, then there is no need to get a block address now.
3389 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
3392 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3393 if (f2fs_has_inline_data(inode)) {
3394 if (pos + len > MAX_INLINE_DATA(inode))
3395 flag = F2FS_GET_BLOCK_DEFAULT;
3396 f2fs_map_lock(sbi, flag);
3398 } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
3399 f2fs_map_lock(sbi, flag);
3404 /* check inline_data */
3405 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3406 if (IS_ERR(ipage)) {
3407 err = PTR_ERR(ipage);
3411 set_new_dnode(&dn, inode, ipage, ipage, 0);
3413 if (f2fs_has_inline_data(inode)) {
3414 if (pos + len <= MAX_INLINE_DATA(inode)) {
3415 f2fs_do_read_inline_data(folio, ipage);
3416 set_inode_flag(inode, FI_DATA_EXIST);
3418 set_page_private_inline(ipage);
3421 err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
3422 if (err || dn.data_blkaddr != NULL_ADDR)
3426 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3427 &dn.data_blkaddr)) {
3428 if (IS_DEVICE_ALIASING(inode)) {
3434 err = f2fs_reserve_block(&dn, index);
3439 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3440 if (!err && dn.data_blkaddr != NULL_ADDR)
3442 f2fs_put_dnode(&dn);
3443 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3444 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3450 /* convert_inline_page can make node_changed */
3451 *blk_addr = dn.data_blkaddr;
3452 *node_changed = dn.node_changed;
3454 f2fs_put_dnode(&dn);
3457 f2fs_map_unlock(sbi, flag);
3461 static int __find_data_block(struct inode *inode, pgoff_t index,
3464 struct dnode_of_data dn;
3468 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
3470 return PTR_ERR(ipage);
3472 set_new_dnode(&dn, inode, ipage, ipage, 0);
3474 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3475 &dn.data_blkaddr)) {
3477 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3479 dn.data_blkaddr = NULL_ADDR;
3483 *blk_addr = dn.data_blkaddr;
3484 f2fs_put_dnode(&dn);
3488 static int __reserve_data_block(struct inode *inode, pgoff_t index,
3489 block_t *blk_addr, bool *node_changed)
3491 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3492 struct dnode_of_data dn;
3496 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3498 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3499 if (IS_ERR(ipage)) {
3500 err = PTR_ERR(ipage);
3503 set_new_dnode(&dn, inode, ipage, ipage, 0);
3505 if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3507 err = f2fs_reserve_block(&dn, index);
3509 *blk_addr = dn.data_blkaddr;
3510 *node_changed = dn.node_changed;
3511 f2fs_put_dnode(&dn);
3514 f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3518 static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
3519 struct folio *folio, loff_t pos, unsigned int len,
3520 block_t *blk_addr, bool *node_changed, bool *use_cow)
3522 struct inode *inode = folio->mapping->host;
3523 struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3524 pgoff_t index = folio->index;
3526 block_t ori_blk_addr = NULL_ADDR;
3528 /* If pos is beyond the end of file, reserve a new block in COW inode */
3529 if ((pos & PAGE_MASK) >= i_size_read(inode))
3532 /* Look for the block in COW inode first */
3533 err = __find_data_block(cow_inode, index, blk_addr);
3536 } else if (*blk_addr != NULL_ADDR) {
3541 if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
3544 /* Look for the block in the original inode */
3545 err = __find_data_block(inode, index, &ori_blk_addr);
3550 /* Finally, we should reserve a new block in COW inode for the update */
3551 err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
3554 inc_atomic_write_cnt(inode);
3556 if (ori_blk_addr != NULL_ADDR)
3557 *blk_addr = ori_blk_addr;
3561 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3562 loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
3564 struct inode *inode = mapping->host;
3565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3566 struct folio *folio;
3567 pgoff_t index = pos >> PAGE_SHIFT;
3568 bool need_balance = false;
3569 bool use_cow = false;
3570 block_t blkaddr = NULL_ADDR;
3573 trace_f2fs_write_begin(inode, pos, len);
3575 if (!f2fs_is_checkpoint_ready(sbi)) {
3581 * We should check this at this moment to avoid deadlock on inode page
3582 * and #0 page. The locking rule for inline_data conversion should be:
3583 * folio_lock(folio #0) -> folio_lock(inode_page)
3586 err = f2fs_convert_inline_inode(inode);
3591 #ifdef CONFIG_F2FS_FS_COMPRESSION
3592 if (f2fs_compressed_file(inode)) {
3598 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3601 ret = f2fs_prepare_compress_overwrite(inode, &page,
3607 *foliop = page_folio(page);
3615 * Do not use FGP_STABLE to avoid deadlock.
3616 * Will wait that below with our IO control.
3618 folio = __filemap_get_folio(mapping, index,
3619 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3620 if (IS_ERR(folio)) {
3621 err = PTR_ERR(folio);
3625 /* TODO: cluster can be compressed due to race with .writepage */
3629 if (f2fs_is_atomic_file(inode))
3630 err = prepare_atomic_write_begin(sbi, folio, pos, len,
3631 &blkaddr, &need_balance, &use_cow);
3633 err = prepare_write_begin(sbi, folio, pos, len,
3634 &blkaddr, &need_balance);
3638 if (need_balance && !IS_NOQUOTA(inode) &&
3639 has_not_enough_free_secs(sbi, 0, 0)) {
3640 folio_unlock(folio);
3641 f2fs_balance_fs(sbi, true);
3643 if (folio->mapping != mapping) {
3644 /* The folio got truncated from under us */
3645 folio_unlock(folio);
3651 f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
3653 if (len == folio_size(folio) || folio_test_uptodate(folio))
3656 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3657 !f2fs_verity_in_progress(inode)) {
3658 folio_zero_segment(folio, len, folio_size(folio));
3662 if (blkaddr == NEW_ADDR) {
3663 folio_zero_segment(folio, 0, folio_size(folio));
3664 folio_mark_uptodate(folio);
3666 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3667 DATA_GENERIC_ENHANCE_READ)) {
3668 err = -EFSCORRUPTED;
3671 err = f2fs_submit_page_read(use_cow ?
3672 F2FS_I(inode)->cow_inode : inode,
3673 folio, blkaddr, 0, true);
3678 if (unlikely(folio->mapping != mapping)) {
3679 folio_unlock(folio);
3683 if (unlikely(!folio_test_uptodate(folio))) {
3691 folio_unlock(folio);
3694 f2fs_write_failed(inode, pos + len);
3698 static int f2fs_write_end(struct file *file,
3699 struct address_space *mapping,
3700 loff_t pos, unsigned len, unsigned copied,
3701 struct folio *folio, void *fsdata)
3703 struct inode *inode = folio->mapping->host;
3705 trace_f2fs_write_end(inode, pos, len, copied);
3708 * This should be come from len == PAGE_SIZE, and we expect copied
3709 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3710 * let generic_perform_write() try to copy data again through copied=0.
3712 if (!folio_test_uptodate(folio)) {
3713 if (unlikely(copied != len))
3716 folio_mark_uptodate(folio);
3719 #ifdef CONFIG_F2FS_FS_COMPRESSION
3720 /* overwrite compressed file */
3721 if (f2fs_compressed_file(inode) && fsdata) {
3722 f2fs_compress_write_end(inode, fsdata, folio->index, copied);
3723 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3725 if (pos + copied > i_size_read(inode) &&
3726 !f2fs_verity_in_progress(inode))
3727 f2fs_i_size_write(inode, pos + copied);
3735 folio_mark_dirty(folio);
3737 if (f2fs_is_atomic_file(inode))
3738 set_page_private_atomic(folio_page(folio, 0));
3740 if (pos + copied > i_size_read(inode) &&
3741 !f2fs_verity_in_progress(inode)) {
3742 f2fs_i_size_write(inode, pos + copied);
3743 if (f2fs_is_atomic_file(inode))
3744 f2fs_i_size_write(F2FS_I(inode)->cow_inode,
3748 folio_unlock(folio);
3750 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3754 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
3756 struct inode *inode = folio->mapping->host;
3757 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3759 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3760 (offset || length != folio_size(folio)))
3763 if (folio_test_dirty(folio)) {
3764 if (inode->i_ino == F2FS_META_INO(sbi)) {
3765 dec_page_count(sbi, F2FS_DIRTY_META);
3766 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3767 dec_page_count(sbi, F2FS_DIRTY_NODES);
3769 inode_dec_dirty_pages(inode);
3770 f2fs_remove_dirty_inode(inode);
3773 clear_page_private_all(&folio->page);
3776 bool f2fs_release_folio(struct folio *folio, gfp_t wait)
3778 /* If this is dirty folio, keep private data */
3779 if (folio_test_dirty(folio))
3782 clear_page_private_all(&folio->page);
3786 static bool f2fs_dirty_data_folio(struct address_space *mapping,
3787 struct folio *folio)
3789 struct inode *inode = mapping->host;
3791 trace_f2fs_set_page_dirty(folio, DATA);
3793 if (!folio_test_uptodate(folio))
3794 folio_mark_uptodate(folio);
3795 BUG_ON(folio_test_swapcache(folio));
3797 if (filemap_dirty_folio(mapping, folio)) {
3798 f2fs_update_dirty_folio(inode, folio);
3805 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3807 #ifdef CONFIG_F2FS_FS_COMPRESSION
3808 struct dnode_of_data dn;
3809 sector_t start_idx, blknr = 0;
3812 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3814 set_new_dnode(&dn, inode, NULL, NULL, 0);
3815 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3819 if (dn.data_blkaddr != COMPRESS_ADDR) {
3820 dn.ofs_in_node += block - start_idx;
3821 blknr = f2fs_data_blkaddr(&dn);
3822 if (!__is_valid_data_blkaddr(blknr))
3826 f2fs_put_dnode(&dn);
3834 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3836 struct inode *inode = mapping->host;
3839 if (f2fs_has_inline_data(inode))
3842 /* make sure allocating whole blocks */
3843 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3844 filemap_write_and_wait(mapping);
3846 /* Block number less than F2FS MAX BLOCKS */
3847 if (unlikely(block >= max_file_blocks(inode)))
3850 if (f2fs_compressed_file(inode)) {
3851 blknr = f2fs_bmap_compress(inode, block);
3853 struct f2fs_map_blocks map;
3855 memset(&map, 0, sizeof(map));
3858 map.m_next_pgofs = NULL;
3859 map.m_seg_type = NO_CHECK_TYPE;
3861 if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
3865 trace_f2fs_bmap(inode, block, blknr);
3870 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3871 unsigned int blkcnt)
3873 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3874 unsigned int blkofs;
3875 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3876 unsigned int end_blk = start_blk + blkcnt - 1;
3877 unsigned int secidx = start_blk / blk_per_sec;
3878 unsigned int end_sec;
3883 end_sec = end_blk / blk_per_sec;
3885 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3886 filemap_invalidate_lock(inode->i_mapping);
3888 set_inode_flag(inode, FI_ALIGNED_WRITE);
3889 set_inode_flag(inode, FI_OPU_WRITE);
3891 for (; secidx <= end_sec; secidx++) {
3892 unsigned int blkofs_end = secidx == end_sec ?
3893 end_blk % blk_per_sec : blk_per_sec - 1;
3895 f2fs_down_write(&sbi->pin_sem);
3897 ret = f2fs_allocate_pinning_section(sbi);
3899 f2fs_up_write(&sbi->pin_sem);
3903 set_inode_flag(inode, FI_SKIP_WRITES);
3905 for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
3907 unsigned int blkidx = secidx * blk_per_sec + blkofs;
3909 page = f2fs_get_lock_data_page(inode, blkidx, true);
3911 f2fs_up_write(&sbi->pin_sem);
3912 ret = PTR_ERR(page);
3916 set_page_dirty(page);
3917 f2fs_put_page(page, 1);
3920 clear_inode_flag(inode, FI_SKIP_WRITES);
3922 ret = filemap_fdatawrite(inode->i_mapping);
3924 f2fs_up_write(&sbi->pin_sem);
3931 clear_inode_flag(inode, FI_SKIP_WRITES);
3932 clear_inode_flag(inode, FI_OPU_WRITE);
3933 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3935 filemap_invalidate_unlock(inode->i_mapping);
3936 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3941 static int check_swap_activate(struct swap_info_struct *sis,
3942 struct file *swap_file, sector_t *span)
3944 struct address_space *mapping = swap_file->f_mapping;
3945 struct inode *inode = mapping->host;
3946 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3948 block_t last_lblock;
3950 block_t lowest_pblock = -1;
3951 block_t highest_pblock = 0;
3953 unsigned int nr_pblocks;
3954 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3955 unsigned int not_aligned = 0;
3959 * Map all the blocks into the extent list. This code doesn't try
3963 last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode));
3965 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3966 struct f2fs_map_blocks map;
3970 memset(&map, 0, sizeof(map));
3971 map.m_lblk = cur_lblock;
3972 map.m_len = last_lblock - cur_lblock;
3973 map.m_next_pgofs = NULL;
3974 map.m_next_extent = NULL;
3975 map.m_seg_type = NO_CHECK_TYPE;
3976 map.m_may_create = false;
3978 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
3983 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3984 f2fs_err(sbi, "Swapfile has holes");
3989 pblock = map.m_pblk;
3990 nr_pblocks = map.m_len;
3992 if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
3993 nr_pblocks % blks_per_sec ||
3994 !f2fs_valid_pinned_area(sbi, pblock)) {
3995 bool last_extent = false;
3999 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4000 if (cur_lblock + nr_pblocks > sis->max)
4001 nr_pblocks -= blks_per_sec;
4003 /* this extent is last one */
4005 nr_pblocks = last_lblock - cur_lblock;
4009 ret = f2fs_migrate_blocks(inode, cur_lblock,
4021 if (cur_lblock + nr_pblocks >= sis->max)
4022 nr_pblocks = sis->max - cur_lblock;
4024 if (cur_lblock) { /* exclude the header page */
4025 if (pblock < lowest_pblock)
4026 lowest_pblock = pblock;
4027 if (pblock + nr_pblocks - 1 > highest_pblock)
4028 highest_pblock = pblock + nr_pblocks - 1;
4032 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4034 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4038 cur_lblock += nr_pblocks;
4041 *span = 1 + highest_pblock - lowest_pblock;
4042 if (cur_lblock == 0)
4043 cur_lblock = 1; /* force Empty message */
4044 sis->max = cur_lblock;
4045 sis->pages = cur_lblock - 1;
4046 sis->highest_bit = cur_lblock - 1;
4049 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
4050 not_aligned, blks_per_sec * F2FS_BLKSIZE);
4054 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4057 struct inode *inode = file_inode(file);
4058 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4061 if (!S_ISREG(inode->i_mode))
4064 if (f2fs_readonly(sbi->sb))
4067 if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
4068 f2fs_err(sbi, "Swapfile not supported in LFS mode");
4072 ret = f2fs_convert_inline_inode(inode);
4076 if (!f2fs_disable_compressed_file(inode))
4079 ret = filemap_fdatawrite(inode->i_mapping);
4083 f2fs_precache_extents(inode);
4085 ret = check_swap_activate(sis, file, span);
4089 stat_inc_swapfile_inode(inode);
4090 set_inode_flag(inode, FI_PIN_FILE);
4091 f2fs_update_time(sbi, REQ_TIME);
4095 static void f2fs_swap_deactivate(struct file *file)
4097 struct inode *inode = file_inode(file);
4099 stat_dec_swapfile_inode(inode);
4100 clear_inode_flag(inode, FI_PIN_FILE);
4103 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4109 static void f2fs_swap_deactivate(struct file *file)
4114 const struct address_space_operations f2fs_dblock_aops = {
4115 .read_folio = f2fs_read_data_folio,
4116 .readahead = f2fs_readahead,
4117 .writepage = f2fs_write_data_page,
4118 .writepages = f2fs_write_data_pages,
4119 .write_begin = f2fs_write_begin,
4120 .write_end = f2fs_write_end,
4121 .dirty_folio = f2fs_dirty_data_folio,
4122 .migrate_folio = filemap_migrate_folio,
4123 .invalidate_folio = f2fs_invalidate_folio,
4124 .release_folio = f2fs_release_folio,
4126 .swap_activate = f2fs_swap_activate,
4127 .swap_deactivate = f2fs_swap_deactivate,
4130 void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
4132 struct address_space *mapping = folio->mapping;
4133 unsigned long flags;
4135 xa_lock_irqsave(&mapping->i_pages, flags);
4136 __xa_clear_mark(&mapping->i_pages, folio->index,
4137 PAGECACHE_TAG_DIRTY);
4138 xa_unlock_irqrestore(&mapping->i_pages, flags);
4141 int __init f2fs_init_post_read_processing(void)
4143 bio_post_read_ctx_cache =
4144 kmem_cache_create("f2fs_bio_post_read_ctx",
4145 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4146 if (!bio_post_read_ctx_cache)
4148 bio_post_read_ctx_pool =
4149 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4150 bio_post_read_ctx_cache);
4151 if (!bio_post_read_ctx_pool)
4152 goto fail_free_cache;
4156 kmem_cache_destroy(bio_post_read_ctx_cache);
4161 void f2fs_destroy_post_read_processing(void)
4163 mempool_destroy(bio_post_read_ctx_pool);
4164 kmem_cache_destroy(bio_post_read_ctx_cache);
4167 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4169 if (!f2fs_sb_has_encrypt(sbi) &&
4170 !f2fs_sb_has_verity(sbi) &&
4171 !f2fs_sb_has_compression(sbi))
4174 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4175 WQ_UNBOUND | WQ_HIGHPRI,
4177 return sbi->post_read_wq ? 0 : -ENOMEM;
4180 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4182 if (sbi->post_read_wq)
4183 destroy_workqueue(sbi->post_read_wq);
4186 int __init f2fs_init_bio_entry_cache(void)
4188 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4189 sizeof(struct bio_entry));
4190 return bio_entry_slab ? 0 : -ENOMEM;
4193 void f2fs_destroy_bio_entry_cache(void)
4195 kmem_cache_destroy(bio_entry_slab);
4198 static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4199 unsigned int flags, struct iomap *iomap,
4200 struct iomap *srcmap)
4202 struct f2fs_map_blocks map = {};
4203 pgoff_t next_pgofs = 0;
4206 map.m_lblk = F2FS_BYTES_TO_BLK(offset);
4207 map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1;
4208 map.m_next_pgofs = &next_pgofs;
4209 map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
4210 inode->i_write_hint);
4211 if (flags & IOMAP_WRITE)
4212 map.m_may_create = true;
4214 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
4218 iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk);
4221 * When inline encryption is enabled, sometimes I/O to an encrypted file
4222 * has to be broken up to guarantee DUN contiguity. Handle this by
4223 * limiting the length of the mapping returned.
4225 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4228 * We should never see delalloc or compressed extents here based on
4229 * prior flushing and checks.
4231 if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
4234 if (map.m_flags & F2FS_MAP_MAPPED) {
4235 if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
4238 iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
4239 iomap->type = IOMAP_MAPPED;
4240 iomap->flags |= IOMAP_F_MERGED;
4241 iomap->bdev = map.m_bdev;
4242 iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
4244 if (flags & IOMAP_WRITE)
4247 if (map.m_pblk == NULL_ADDR) {
4248 iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) -
4250 iomap->type = IOMAP_HOLE;
4251 } else if (map.m_pblk == NEW_ADDR) {
4252 iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
4253 iomap->type = IOMAP_UNWRITTEN;
4255 f2fs_bug_on(F2FS_I_SB(inode), 1);
4257 iomap->addr = IOMAP_NULL_ADDR;
4260 if (map.m_flags & F2FS_MAP_NEW)
4261 iomap->flags |= IOMAP_F_NEW;
4262 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4263 offset + length > i_size_read(inode))
4264 iomap->flags |= IOMAP_F_DIRTY;
4269 const struct iomap_ops f2fs_iomap_ops = {
4270 .iomap_begin = f2fs_iomap_begin,