1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/inode.c
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
12 * linux/fs/minix/inode.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23 #include <linux/mount.h>
24 #include <linux/time.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/string.h>
30 #include <linux/buffer_head.h>
31 #include <linux/writeback.h>
32 #include <linux/pagevec.h>
33 #include <linux/mpage.h>
34 #include <linux/namei.h>
35 #include <linux/uio.h>
36 #include <linux/bio.h>
37 #include <linux/workqueue.h>
38 #include <linux/kernel.h>
39 #include <linux/printk.h>
40 #include <linux/slab.h>
41 #include <linux/bitops.h>
42 #include <linux/iomap.h>
43 #include <linux/iversion.h>
45 #include "ext4_jbd2.h"
50 #include <trace/events/ext4.h>
52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 struct ext4_inode_info *ei)
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
58 int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 unsigned int csum_size = sizeof(dummy_csum);
61 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 EXT4_GOOD_OLD_INODE_SIZE - offset);
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 offset = offsetof(struct ext4_inode, i_checksum_hi);
69 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 EXT4_GOOD_OLD_INODE_SIZE,
71 offset - EXT4_GOOD_OLD_INODE_SIZE);
72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
77 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 struct ext4_inode_info *ei)
87 __u32 provided, calculated;
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 cpu_to_le32(EXT4_OS_LINUX) ||
91 !ext4_has_metadata_csum(inode->i_sb))
94 provided = le16_to_cpu(raw->i_checksum_lo);
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
100 calculated &= 0xFFFF;
102 return provided == calculated;
105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 struct ext4_inode_info *ei)
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 cpu_to_le32(EXT4_OS_LINUX) ||
112 !ext4_has_metadata_csum(inode->i_sb))
115 csum = ext4_inode_csum(inode, raw, ei);
116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
122 static inline int ext4_begin_ordered_truncate(struct inode *inode,
125 trace_ext4_begin_ordered_truncate(inode, new_size);
127 * If jinode is zero, then we never opened the file for
128 * writing, so there's no need to call
129 * jbd2_journal_begin_ordered_truncate() since there's no
130 * outstanding writes we need to flush.
132 if (!EXT4_I(inode)->jinode)
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
143 * Test whether an inode is a fast symlink.
144 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
146 int ext4_inode_is_fast_symlink(struct inode *inode)
148 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149 int ea_blocks = EXT4_I(inode)->i_file_acl ?
150 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
152 if (ext4_has_inline_data(inode))
155 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
157 return S_ISLNK(inode->i_mode) && inode->i_size &&
158 (inode->i_size < EXT4_N_BLOCKS * 4);
162 * Called at the last iput() if i_nlink is zero.
164 void ext4_evict_inode(struct inode *inode)
169 * Credits for final inode cleanup and freeing:
170 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171 * (xattr block freeing), bitmap, group descriptor (inode freeing)
173 int extra_credits = 6;
174 struct ext4_xattr_inode_array *ea_inode_array = NULL;
175 bool freeze_protected = false;
177 trace_ext4_evict_inode(inode);
179 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180 ext4_evict_ea_inode(inode);
181 if (inode->i_nlink) {
182 truncate_inode_pages_final(&inode->i_data);
187 if (is_bad_inode(inode))
189 dquot_initialize(inode);
191 if (ext4_should_order_data(inode))
192 ext4_begin_ordered_truncate(inode, 0);
193 truncate_inode_pages_final(&inode->i_data);
196 * For inodes with journalled data, transaction commit could have
197 * dirtied the inode. And for inodes with dioread_nolock, unwritten
198 * extents converting worker could merge extents and also have dirtied
199 * the inode. Flush worker is ignoring it because of I_FREEING flag but
200 * we still need to remove the inode from the writeback lists.
202 if (!list_empty_careful(&inode->i_io_list))
203 inode_io_list_del(inode);
206 * Protect us against freezing - iput() caller didn't have to have any
207 * protection against it. When we are in a running transaction though,
208 * we are already protected against freezing and we cannot grab further
209 * protection due to lock ordering constraints.
211 if (!ext4_journal_current_handle()) {
212 sb_start_intwrite(inode->i_sb);
213 freeze_protected = true;
216 if (!IS_NOQUOTA(inode))
217 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
220 * Block bitmap, group descriptor, and inode are accounted in both
221 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
223 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
224 ext4_blocks_for_truncate(inode) + extra_credits - 3);
225 if (IS_ERR(handle)) {
226 ext4_std_error(inode->i_sb, PTR_ERR(handle));
228 * If we're going to skip the normal cleanup, we still need to
229 * make sure that the in-core orphan linked list is properly
232 ext4_orphan_del(NULL, inode);
233 if (freeze_protected)
234 sb_end_intwrite(inode->i_sb);
239 ext4_handle_sync(handle);
242 * Set inode->i_size to 0 before calling ext4_truncate(). We need
243 * special handling of symlinks here because i_size is used to
244 * determine whether ext4_inode_info->i_data contains symlink data or
245 * block mappings. Setting i_size to 0 will remove its fast symlink
246 * status. Erase i_data so that it becomes a valid empty block map.
248 if (ext4_inode_is_fast_symlink(inode))
249 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
251 err = ext4_mark_inode_dirty(handle, inode);
253 ext4_warning(inode->i_sb,
254 "couldn't mark inode dirty (err %d)", err);
257 if (inode->i_blocks) {
258 err = ext4_truncate(inode);
260 ext4_error_err(inode->i_sb, -err,
261 "couldn't truncate inode %lu (err %d)",
267 /* Remove xattr references. */
268 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
271 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
273 ext4_journal_stop(handle);
274 ext4_orphan_del(NULL, inode);
275 if (freeze_protected)
276 sb_end_intwrite(inode->i_sb);
277 ext4_xattr_inode_array_free(ea_inode_array);
282 * Kill off the orphan record which ext4_truncate created.
283 * AKPM: I think this can be inside the above `if'.
284 * Note that ext4_orphan_del() has to be able to cope with the
285 * deletion of a non-existent orphan - this is because we don't
286 * know if ext4_truncate() actually created an orphan record.
287 * (Well, we could do this if we need to, but heck - it works)
289 ext4_orphan_del(handle, inode);
290 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
293 * One subtle ordering requirement: if anything has gone wrong
294 * (transaction abort, IO errors, whatever), then we can still
295 * do these next steps (the fs will already have been marked as
296 * having errors), but we can't free the inode if the mark_dirty
299 if (ext4_mark_inode_dirty(handle, inode))
300 /* If that failed, just do the required in-core inode clear. */
301 ext4_clear_inode(inode);
303 ext4_free_inode(handle, inode);
304 ext4_journal_stop(handle);
305 if (freeze_protected)
306 sb_end_intwrite(inode->i_sb);
307 ext4_xattr_inode_array_free(ea_inode_array);
311 * Check out some where else accidentally dirty the evicting inode,
312 * which may probably cause inode use-after-free issues later.
314 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
316 if (!list_empty(&EXT4_I(inode)->i_fc_list))
317 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
318 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
322 qsize_t *ext4_get_reserved_space(struct inode *inode)
324 return &EXT4_I(inode)->i_reserved_quota;
329 * Called with i_data_sem down, which is important since we can call
330 * ext4_discard_preallocations() from here.
332 void ext4_da_update_reserve_space(struct inode *inode,
333 int used, int quota_claim)
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 struct ext4_inode_info *ei = EXT4_I(inode);
338 spin_lock(&ei->i_block_reservation_lock);
339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340 if (unlikely(used > ei->i_reserved_data_blocks)) {
341 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
342 "with only %d reserved data blocks",
343 __func__, inode->i_ino, used,
344 ei->i_reserved_data_blocks);
346 used = ei->i_reserved_data_blocks;
349 /* Update per-inode reservations */
350 ei->i_reserved_data_blocks -= used;
351 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
353 spin_unlock(&ei->i_block_reservation_lock);
355 /* Update quota subsystem for data blocks */
357 dquot_claim_block(inode, EXT4_C2B(sbi, used));
360 * We did fallocate with an offset that is already delayed
361 * allocated. So on delayed allocated writeback we should
362 * not re-claim the quota for fallocated blocks.
364 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
368 * If we have done all the pending block allocations and if
369 * there aren't any writers on the inode, we can discard the
370 * inode's preallocations.
372 if ((ei->i_reserved_data_blocks == 0) &&
373 !inode_is_open_for_write(inode))
374 ext4_discard_preallocations(inode, 0);
377 static int __check_block_validity(struct inode *inode, const char *func,
379 struct ext4_map_blocks *map)
381 if (ext4_has_feature_journal(inode->i_sb) &&
383 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
387 "lblock %lu mapped to illegal pblock %llu "
388 "(length %d)", (unsigned long) map->m_lblk,
389 map->m_pblk, map->m_len);
390 return -EFSCORRUPTED;
395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
400 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
401 return fscrypt_zeroout_range(inode, lblk, pblk, len);
403 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
413 #ifdef ES_AGGRESSIVE_TEST
414 static void ext4_map_blocks_es_recheck(handle_t *handle,
416 struct ext4_map_blocks *es_map,
417 struct ext4_map_blocks *map,
424 * There is a race window that the result is not the same.
425 * e.g. xfstests #223 when dioread_nolock enables. The reason
426 * is that we lookup a block mapping in extent status tree with
427 * out taking i_data_sem. So at the time the unwritten extent
428 * could be converted.
430 down_read(&EXT4_I(inode)->i_data_sem);
431 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
436 up_read((&EXT4_I(inode)->i_data_sem));
439 * We don't check m_len because extent will be collpased in status
440 * tree. So the m_len might not equal.
442 if (es_map->m_lblk != map->m_lblk ||
443 es_map->m_flags != map->m_flags ||
444 es_map->m_pblk != map->m_pblk) {
445 printk("ES cache assertion failed for inode: %lu "
446 "es_cached ex [%d/%d/%llu/%x] != "
447 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
448 inode->i_ino, es_map->m_lblk, es_map->m_len,
449 es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 map->m_len, map->m_pblk, map->m_flags,
454 #endif /* ES_AGGRESSIVE_TEST */
457 * The ext4_map_blocks() function tries to look up the requested blocks,
458 * and returns if the blocks are already mapped.
460 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
461 * and store the allocated blocks in the result buffer head and mark it
464 * If file type is extents based, it will call ext4_ext_map_blocks(),
465 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
468 * On success, it returns the number of blocks being mapped or allocated. if
469 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
470 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
472 * It returns 0 if plain look up failed (blocks have not been allocated), in
473 * that case, @map is returned as unmapped but we still do fill map->m_len to
474 * indicate the length of a hole starting at map->m_lblk.
476 * It returns the error in case of allocation failure.
478 int ext4_map_blocks(handle_t *handle, struct inode *inode,
479 struct ext4_map_blocks *map, int flags)
481 struct extent_status es;
484 #ifdef ES_AGGRESSIVE_TEST
485 struct ext4_map_blocks orig_map;
487 memcpy(&orig_map, map, sizeof(*map));
491 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
492 flags, map->m_len, (unsigned long) map->m_lblk);
495 * ext4_map_blocks returns an int, and m_len is an unsigned int
497 if (unlikely(map->m_len > INT_MAX))
498 map->m_len = INT_MAX;
500 /* We can handle the block number less than EXT_MAX_BLOCKS */
501 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
502 return -EFSCORRUPTED;
504 /* Lookup extent status tree firstly */
505 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
506 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
507 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
508 map->m_pblk = ext4_es_pblock(&es) +
509 map->m_lblk - es.es_lblk;
510 map->m_flags |= ext4_es_is_written(&es) ?
511 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
512 retval = es.es_len - (map->m_lblk - es.es_lblk);
513 if (retval > map->m_len)
516 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
518 retval = es.es_len - (map->m_lblk - es.es_lblk);
519 if (retval > map->m_len)
527 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
529 #ifdef ES_AGGRESSIVE_TEST
530 ext4_map_blocks_es_recheck(handle, inode, map,
536 * In the query cache no-wait mode, nothing we can do more if we
537 * cannot find extent in the cache.
539 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
543 * Try to see if we can get the block without requesting a new
546 down_read(&EXT4_I(inode)->i_data_sem);
547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548 retval = ext4_ext_map_blocks(handle, inode, map, 0);
550 retval = ext4_ind_map_blocks(handle, inode, map, 0);
555 if (unlikely(retval != map->m_len)) {
556 ext4_warning(inode->i_sb,
557 "ES len assertion failed for inode "
558 "%lu: retval %d != map->m_len %d",
559 inode->i_ino, retval, map->m_len);
563 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
564 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
565 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
566 !(status & EXTENT_STATUS_WRITTEN) &&
567 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
568 map->m_lblk + map->m_len - 1))
569 status |= EXTENT_STATUS_DELAYED;
570 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
571 map->m_pblk, status);
573 up_read((&EXT4_I(inode)->i_data_sem));
576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577 ret = check_block_validity(inode, map);
582 /* If it is only a block(s) look up */
583 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
587 * Returns if the blocks have already allocated
589 * Note that if blocks have been preallocated
590 * ext4_ext_get_block() returns the create = 0
591 * with buffer head unmapped.
593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
595 * If we need to convert extent to unwritten
596 * we continue and do the actual work in
597 * ext4_ext_map_blocks()
599 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
603 * Here we clear m_flags because after allocating an new extent,
604 * it will be set again.
606 map->m_flags &= ~EXT4_MAP_FLAGS;
609 * New blocks allocate and/or writing to unwritten extent
610 * will possibly result in updating i_data, so we take
611 * the write lock of i_data_sem, and call get_block()
612 * with create == 1 flag.
614 down_write(&EXT4_I(inode)->i_data_sem);
617 * We need to check for EXT4 here because migrate
618 * could have changed the inode type in between
620 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
621 retval = ext4_ext_map_blocks(handle, inode, map, flags);
623 retval = ext4_ind_map_blocks(handle, inode, map, flags);
625 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
627 * We allocated new blocks which will result in
628 * i_data's format changing. Force the migrate
629 * to fail by clearing migrate flags
631 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
638 if (unlikely(retval != map->m_len)) {
639 ext4_warning(inode->i_sb,
640 "ES len assertion failed for inode "
641 "%lu: retval %d != map->m_len %d",
642 inode->i_ino, retval, map->m_len);
647 * We have to zeroout blocks before inserting them into extent
648 * status tree. Otherwise someone could look them up there and
649 * use them before they are really zeroed. We also have to
650 * unmap metadata before zeroing as otherwise writeback can
651 * overwrite zeros with stale data from block device.
653 if (flags & EXT4_GET_BLOCKS_ZERO &&
654 map->m_flags & EXT4_MAP_MAPPED &&
655 map->m_flags & EXT4_MAP_NEW) {
656 ret = ext4_issue_zeroout(inode, map->m_lblk,
657 map->m_pblk, map->m_len);
665 * If the extent has been zeroed out, we don't need to update
666 * extent status tree.
668 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
669 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
670 if (ext4_es_is_written(&es))
673 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
674 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
675 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
676 !(status & EXTENT_STATUS_WRITTEN) &&
677 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
678 map->m_lblk + map->m_len - 1))
679 status |= EXTENT_STATUS_DELAYED;
680 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
681 map->m_pblk, status);
685 up_write((&EXT4_I(inode)->i_data_sem));
686 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
687 ret = check_block_validity(inode, map);
692 * Inodes with freshly allocated blocks where contents will be
693 * visible after transaction commit must be on transaction's
696 if (map->m_flags & EXT4_MAP_NEW &&
697 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
698 !(flags & EXT4_GET_BLOCKS_ZERO) &&
699 !ext4_is_quota_file(inode) &&
700 ext4_should_order_data(inode)) {
702 (loff_t)map->m_lblk << inode->i_blkbits;
703 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
705 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
706 ret = ext4_jbd2_inode_add_wait(handle, inode,
709 ret = ext4_jbd2_inode_add_write(handle, inode,
715 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
716 map->m_flags & EXT4_MAP_MAPPED))
717 ext4_fc_track_range(handle, inode, map->m_lblk,
718 map->m_lblk + map->m_len - 1);
720 ext_debug(inode, "failed with err %d\n", retval);
725 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
726 * we have to be careful as someone else may be manipulating b_state as well.
728 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
730 unsigned long old_state;
731 unsigned long new_state;
733 flags &= EXT4_MAP_FLAGS;
735 /* Dummy buffer_head? Set non-atomically. */
737 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
741 * Someone else may be modifying b_state. Be careful! This is ugly but
742 * once we get rid of using bh as a container for mapping information
743 * to pass to / from get_block functions, this can go away.
745 old_state = READ_ONCE(bh->b_state);
747 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
748 } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
751 static int _ext4_get_block(struct inode *inode, sector_t iblock,
752 struct buffer_head *bh, int flags)
754 struct ext4_map_blocks map;
757 if (ext4_has_inline_data(inode))
761 map.m_len = bh->b_size >> inode->i_blkbits;
763 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
766 map_bh(bh, inode->i_sb, map.m_pblk);
767 ext4_update_bh_state(bh, map.m_flags);
768 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
770 } else if (ret == 0) {
771 /* hole case, need to fill in bh->b_size */
772 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
777 int ext4_get_block(struct inode *inode, sector_t iblock,
778 struct buffer_head *bh, int create)
780 return _ext4_get_block(inode, iblock, bh,
781 create ? EXT4_GET_BLOCKS_CREATE : 0);
785 * Get block function used when preparing for buffered write if we require
786 * creating an unwritten extent if blocks haven't been allocated. The extent
787 * will be converted to written after the IO is complete.
789 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
790 struct buffer_head *bh_result, int create)
794 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
795 inode->i_ino, create);
796 ret = _ext4_get_block(inode, iblock, bh_result,
797 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
800 * If the buffer is marked unwritten, mark it as new to make sure it is
801 * zeroed out correctly in case of partial writes. Otherwise, there is
802 * a chance of stale data getting exposed.
804 if (ret == 0 && buffer_unwritten(bh_result))
805 set_buffer_new(bh_result);
810 /* Maximum number of blocks we map for direct IO at once. */
811 #define DIO_MAX_BLOCKS 4096
814 * `handle' can be NULL if create is zero
816 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
817 ext4_lblk_t block, int map_flags)
819 struct ext4_map_blocks map;
820 struct buffer_head *bh;
821 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
822 bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
825 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
826 || handle != NULL || create == 0);
827 ASSERT(create == 0 || !nowait);
831 err = ext4_map_blocks(handle, inode, &map, map_flags);
834 return create ? ERR_PTR(-ENOSPC) : NULL;
839 return sb_find_get_block(inode->i_sb, map.m_pblk);
841 bh = sb_getblk(inode->i_sb, map.m_pblk);
843 return ERR_PTR(-ENOMEM);
844 if (map.m_flags & EXT4_MAP_NEW) {
846 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
847 || (handle != NULL));
850 * Now that we do not always journal data, we should
851 * keep in mind whether this should always journal the
852 * new buffer as metadata. For now, regular file
853 * writes use ext4_get_block instead, so it's not a
857 BUFFER_TRACE(bh, "call get_create_access");
858 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
864 if (!buffer_uptodate(bh)) {
865 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
866 set_buffer_uptodate(bh);
869 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
870 err = ext4_handle_dirty_metadata(handle, inode, bh);
874 BUFFER_TRACE(bh, "not a new buffer");
881 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
882 ext4_lblk_t block, int map_flags)
884 struct buffer_head *bh;
887 bh = ext4_getblk(handle, inode, block, map_flags);
890 if (!bh || ext4_buffer_uptodate(bh))
893 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
901 /* Read a contiguous batch of blocks. */
902 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
903 bool wait, struct buffer_head **bhs)
907 for (i = 0; i < bh_count; i++) {
908 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
909 if (IS_ERR(bhs[i])) {
910 err = PTR_ERR(bhs[i]);
916 for (i = 0; i < bh_count; i++)
917 /* Note that NULL bhs[i] is valid because of holes. */
918 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
919 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
924 for (i = 0; i < bh_count; i++)
926 wait_on_buffer(bhs[i]);
928 for (i = 0; i < bh_count; i++) {
929 if (bhs[i] && !buffer_uptodate(bhs[i])) {
937 for (i = 0; i < bh_count; i++) {
944 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
945 struct buffer_head *head,
949 int (*fn)(handle_t *handle, struct inode *inode,
950 struct buffer_head *bh))
952 struct buffer_head *bh;
953 unsigned block_start, block_end;
954 unsigned blocksize = head->b_size;
956 struct buffer_head *next;
958 for (bh = head, block_start = 0;
959 ret == 0 && (bh != head || !block_start);
960 block_start = block_end, bh = next) {
961 next = bh->b_this_page;
962 block_end = block_start + blocksize;
963 if (block_end <= from || block_start >= to) {
964 if (partial && !buffer_uptodate(bh))
968 err = (*fn)(handle, inode, bh);
976 * Helper for handling dirtying of journalled data. We also mark the folio as
977 * dirty so that writeback code knows about this page (and inode) contains
978 * dirty data. ext4_writepages() then commits appropriate transaction to
981 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
983 folio_mark_dirty(bh->b_folio);
984 return ext4_handle_dirty_metadata(handle, NULL, bh);
987 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
988 struct buffer_head *bh)
990 int dirty = buffer_dirty(bh);
993 if (!buffer_mapped(bh) || buffer_freed(bh))
996 * __block_write_begin() could have dirtied some buffers. Clean
997 * the dirty bit as jbd2_journal_get_write_access() could complain
998 * otherwise about fs integrity issues. Setting of the dirty bit
999 * by __block_write_begin() isn't a real problem here as we clear
1000 * the bit before releasing a page lock and thus writeback cannot
1001 * ever write the buffer.
1004 clear_buffer_dirty(bh);
1005 BUFFER_TRACE(bh, "get write access");
1006 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1009 ret = ext4_dirty_journalled_data(handle, bh);
1013 #ifdef CONFIG_FS_ENCRYPTION
1014 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1015 get_block_t *get_block)
1017 unsigned from = pos & (PAGE_SIZE - 1);
1018 unsigned to = from + len;
1019 struct inode *inode = folio->mapping->host;
1020 unsigned block_start, block_end;
1023 unsigned blocksize = inode->i_sb->s_blocksize;
1025 struct buffer_head *bh, *head, *wait[2];
1029 BUG_ON(!folio_test_locked(folio));
1030 BUG_ON(from > PAGE_SIZE);
1031 BUG_ON(to > PAGE_SIZE);
1034 head = folio_buffers(folio);
1036 head = create_empty_buffers(folio, blocksize, 0);
1037 bbits = ilog2(blocksize);
1038 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1040 for (bh = head, block_start = 0; bh != head || !block_start;
1041 block++, block_start = block_end, bh = bh->b_this_page) {
1042 block_end = block_start + blocksize;
1043 if (block_end <= from || block_start >= to) {
1044 if (folio_test_uptodate(folio)) {
1045 set_buffer_uptodate(bh);
1050 clear_buffer_new(bh);
1051 if (!buffer_mapped(bh)) {
1052 WARN_ON(bh->b_size != blocksize);
1053 err = get_block(inode, block, bh, 1);
1056 if (buffer_new(bh)) {
1057 if (folio_test_uptodate(folio)) {
1058 clear_buffer_new(bh);
1059 set_buffer_uptodate(bh);
1060 mark_buffer_dirty(bh);
1063 if (block_end > to || block_start < from)
1064 folio_zero_segments(folio, to,
1070 if (folio_test_uptodate(folio)) {
1071 set_buffer_uptodate(bh);
1074 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1075 !buffer_unwritten(bh) &&
1076 (block_start < from || block_end > to)) {
1077 ext4_read_bh_lock(bh, 0, false);
1078 wait[nr_wait++] = bh;
1082 * If we issued read requests, let them complete.
1084 for (i = 0; i < nr_wait; i++) {
1085 wait_on_buffer(wait[i]);
1086 if (!buffer_uptodate(wait[i]))
1089 if (unlikely(err)) {
1090 folio_zero_new_buffers(folio, from, to);
1091 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1092 for (i = 0; i < nr_wait; i++) {
1095 err2 = fscrypt_decrypt_pagecache_blocks(folio,
1096 blocksize, bh_offset(wait[i]));
1098 clear_buffer_uptodate(wait[i]);
1109 * To preserve ordering, it is essential that the hole instantiation and
1110 * the data write be encapsulated in a single transaction. We cannot
1111 * close off a transaction and start a new one between the ext4_get_block()
1112 * and the ext4_write_end(). So doing the jbd2_journal_start at the start of
1113 * ext4_write_begin() is the right place.
1115 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1116 loff_t pos, unsigned len,
1117 struct page **pagep, void **fsdata)
1119 struct inode *inode = mapping->host;
1120 int ret, needed_blocks;
1123 struct folio *folio;
1127 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1130 trace_ext4_write_begin(inode, pos, len);
1132 * Reserve one block more for addition to orphan list in case
1133 * we allocate blocks but write fails for some reason
1135 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1136 index = pos >> PAGE_SHIFT;
1137 from = pos & (PAGE_SIZE - 1);
1140 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1141 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1150 * __filemap_get_folio() can take a long time if the
1151 * system is thrashing due to memory pressure, or if the folio
1152 * is being written back. So grab it first before we start
1153 * the transaction handle. This also allows us to allocate
1154 * the folio (if needed) without using GFP_NOFS.
1157 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1158 mapping_gfp_mask(mapping));
1160 return PTR_ERR(folio);
1162 * The same as page allocation, we prealloc buffer heads before
1163 * starting the handle.
1165 if (!folio_buffers(folio))
1166 create_empty_buffers(folio, inode->i_sb->s_blocksize, 0);
1168 folio_unlock(folio);
1171 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1172 if (IS_ERR(handle)) {
1174 return PTR_ERR(handle);
1178 if (folio->mapping != mapping) {
1179 /* The folio got truncated from under us */
1180 folio_unlock(folio);
1182 ext4_journal_stop(handle);
1185 /* In case writeback began while the folio was unlocked */
1186 folio_wait_stable(folio);
1188 #ifdef CONFIG_FS_ENCRYPTION
1189 if (ext4_should_dioread_nolock(inode))
1190 ret = ext4_block_write_begin(folio, pos, len,
1191 ext4_get_block_unwritten);
1193 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
1195 if (ext4_should_dioread_nolock(inode))
1196 ret = __block_write_begin(&folio->page, pos, len,
1197 ext4_get_block_unwritten);
1199 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1201 if (!ret && ext4_should_journal_data(inode)) {
1202 ret = ext4_walk_page_buffers(handle, inode,
1203 folio_buffers(folio), from, to,
1204 NULL, do_journal_get_write_access);
1208 bool extended = (pos + len > inode->i_size) &&
1209 !ext4_verity_in_progress(inode);
1211 folio_unlock(folio);
1213 * __block_write_begin may have instantiated a few blocks
1214 * outside i_size. Trim these off again. Don't need
1215 * i_size_read because we hold i_rwsem.
1217 * Add inode to orphan list in case we crash before
1220 if (extended && ext4_can_truncate(inode))
1221 ext4_orphan_add(handle, inode);
1223 ext4_journal_stop(handle);
1225 ext4_truncate_failed_write(inode);
1227 * If truncate failed early the inode might
1228 * still be on the orphan list; we need to
1229 * make sure the inode is removed from the
1230 * orphan list in that case.
1233 ext4_orphan_del(NULL, inode);
1236 if (ret == -ENOSPC &&
1237 ext4_should_retry_alloc(inode->i_sb, &retries))
1242 *pagep = &folio->page;
1246 /* For write_end() in data=journal mode */
1247 static int write_end_fn(handle_t *handle, struct inode *inode,
1248 struct buffer_head *bh)
1251 if (!buffer_mapped(bh) || buffer_freed(bh))
1253 set_buffer_uptodate(bh);
1254 ret = ext4_dirty_journalled_data(handle, bh);
1255 clear_buffer_meta(bh);
1256 clear_buffer_prio(bh);
1261 * We need to pick up the new inode size which generic_commit_write gave us
1262 * `file' can be NULL - eg, when called from page_symlink().
1264 * ext4 never places buffers on inode->i_mapping->i_private_list. metadata
1265 * buffers are managed internally.
1267 static int ext4_write_end(struct file *file,
1268 struct address_space *mapping,
1269 loff_t pos, unsigned len, unsigned copied,
1270 struct page *page, void *fsdata)
1272 struct folio *folio = page_folio(page);
1273 handle_t *handle = ext4_journal_current_handle();
1274 struct inode *inode = mapping->host;
1275 loff_t old_size = inode->i_size;
1277 int i_size_changed = 0;
1278 bool verity = ext4_verity_in_progress(inode);
1280 trace_ext4_write_end(inode, pos, len, copied);
1282 if (ext4_has_inline_data(inode) &&
1283 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1284 return ext4_write_inline_data_end(inode, pos, len, copied,
1287 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1289 * it's important to update i_size while still holding folio lock:
1290 * page writeout could otherwise come in and zero beyond i_size.
1292 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1293 * blocks are being written past EOF, so skip the i_size update.
1296 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1297 folio_unlock(folio);
1300 if (old_size < pos && !verity)
1301 pagecache_isize_extended(inode, old_size, pos);
1303 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1304 * makes the holding time of folio lock longer. Second, it forces lock
1305 * ordering of folio lock and transaction start for journaling
1309 ret = ext4_mark_inode_dirty(handle, inode);
1311 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1312 /* if we have allocated more blocks and copied
1313 * less. We will have blocks allocated outside
1314 * inode->i_size. So truncate them
1316 ext4_orphan_add(handle, inode);
1318 ret2 = ext4_journal_stop(handle);
1322 if (pos + len > inode->i_size && !verity) {
1323 ext4_truncate_failed_write(inode);
1325 * If truncate failed early the inode might still be
1326 * on the orphan list; we need to make sure the inode
1327 * is removed from the orphan list in that case.
1330 ext4_orphan_del(NULL, inode);
1333 return ret ? ret : copied;
1337 * This is a private version of folio_zero_new_buffers() which doesn't
1338 * set the buffer to be dirty, since in data=journalled mode we need
1339 * to call ext4_dirty_journalled_data() instead.
1341 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1342 struct inode *inode,
1343 struct folio *folio,
1344 unsigned from, unsigned to)
1346 unsigned int block_start = 0, block_end;
1347 struct buffer_head *head, *bh;
1349 bh = head = folio_buffers(folio);
1351 block_end = block_start + bh->b_size;
1352 if (buffer_new(bh)) {
1353 if (block_end > from && block_start < to) {
1354 if (!folio_test_uptodate(folio)) {
1355 unsigned start, size;
1357 start = max(from, block_start);
1358 size = min(to, block_end) - start;
1360 folio_zero_range(folio, start, size);
1361 write_end_fn(handle, inode, bh);
1363 clear_buffer_new(bh);
1366 block_start = block_end;
1367 bh = bh->b_this_page;
1368 } while (bh != head);
1371 static int ext4_journalled_write_end(struct file *file,
1372 struct address_space *mapping,
1373 loff_t pos, unsigned len, unsigned copied,
1374 struct page *page, void *fsdata)
1376 struct folio *folio = page_folio(page);
1377 handle_t *handle = ext4_journal_current_handle();
1378 struct inode *inode = mapping->host;
1379 loff_t old_size = inode->i_size;
1383 int size_changed = 0;
1384 bool verity = ext4_verity_in_progress(inode);
1386 trace_ext4_journalled_write_end(inode, pos, len, copied);
1387 from = pos & (PAGE_SIZE - 1);
1390 BUG_ON(!ext4_handle_valid(handle));
1392 if (ext4_has_inline_data(inode))
1393 return ext4_write_inline_data_end(inode, pos, len, copied,
1396 if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1398 ext4_journalled_zero_new_buffers(handle, inode, folio,
1401 if (unlikely(copied < len))
1402 ext4_journalled_zero_new_buffers(handle, inode, folio,
1404 ret = ext4_walk_page_buffers(handle, inode,
1405 folio_buffers(folio),
1406 from, from + copied, &partial,
1409 folio_mark_uptodate(folio);
1412 size_changed = ext4_update_inode_size(inode, pos + copied);
1413 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1414 folio_unlock(folio);
1417 if (old_size < pos && !verity)
1418 pagecache_isize_extended(inode, old_size, pos);
1421 ret2 = ext4_mark_inode_dirty(handle, inode);
1426 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1427 /* if we have allocated more blocks and copied
1428 * less. We will have blocks allocated outside
1429 * inode->i_size. So truncate them
1431 ext4_orphan_add(handle, inode);
1433 ret2 = ext4_journal_stop(handle);
1436 if (pos + len > inode->i_size && !verity) {
1437 ext4_truncate_failed_write(inode);
1439 * If truncate failed early the inode might still be
1440 * on the orphan list; we need to make sure the inode
1441 * is removed from the orphan list in that case.
1444 ext4_orphan_del(NULL, inode);
1447 return ret ? ret : copied;
1451 * Reserve space for a single cluster
1453 static int ext4_da_reserve_space(struct inode *inode)
1455 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1456 struct ext4_inode_info *ei = EXT4_I(inode);
1460 * We will charge metadata quota at writeout time; this saves
1461 * us from metadata over-estimation, though we may go over by
1462 * a small amount in the end. Here we just reserve for data.
1464 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1468 spin_lock(&ei->i_block_reservation_lock);
1469 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1470 spin_unlock(&ei->i_block_reservation_lock);
1471 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1474 ei->i_reserved_data_blocks++;
1475 trace_ext4_da_reserve_space(inode);
1476 spin_unlock(&ei->i_block_reservation_lock);
1478 return 0; /* success */
1481 void ext4_da_release_space(struct inode *inode, int to_free)
1483 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1484 struct ext4_inode_info *ei = EXT4_I(inode);
1487 return; /* Nothing to release, exit */
1489 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1491 trace_ext4_da_release_space(inode, to_free);
1492 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1494 * if there aren't enough reserved blocks, then the
1495 * counter is messed up somewhere. Since this
1496 * function is called from invalidate page, it's
1497 * harmless to return without any action.
1499 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1500 "ino %lu, to_free %d with only %d reserved "
1501 "data blocks", inode->i_ino, to_free,
1502 ei->i_reserved_data_blocks);
1504 to_free = ei->i_reserved_data_blocks;
1506 ei->i_reserved_data_blocks -= to_free;
1508 /* update fs dirty data blocks counter */
1509 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1511 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1513 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1517 * Delayed allocation stuff
1520 struct mpage_da_data {
1521 /* These are input fields for ext4_do_writepages() */
1522 struct inode *inode;
1523 struct writeback_control *wbc;
1524 unsigned int can_map:1; /* Can writepages call map blocks? */
1526 /* These are internal state of ext4_do_writepages() */
1527 pgoff_t first_page; /* The first page to write */
1528 pgoff_t next_page; /* Current page to examine */
1529 pgoff_t last_page; /* Last page to examine */
1531 * Extent to map - this can be after first_page because that can be
1532 * fully mapped. We somewhat abuse m_flags to store whether the extent
1533 * is delalloc or unwritten.
1535 struct ext4_map_blocks map;
1536 struct ext4_io_submit io_submit; /* IO submission data */
1537 unsigned int do_map:1;
1538 unsigned int scanned_until_end:1;
1539 unsigned int journalled_more_data:1;
1542 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1547 struct folio_batch fbatch;
1548 struct inode *inode = mpd->inode;
1549 struct address_space *mapping = inode->i_mapping;
1551 /* This is necessary when next_page == 0. */
1552 if (mpd->first_page >= mpd->next_page)
1555 mpd->scanned_until_end = 0;
1556 index = mpd->first_page;
1557 end = mpd->next_page - 1;
1559 ext4_lblk_t start, last;
1560 start = index << (PAGE_SHIFT - inode->i_blkbits);
1561 last = end << (PAGE_SHIFT - inode->i_blkbits);
1564 * avoid racing with extent status tree scans made by
1565 * ext4_insert_delayed_block()
1567 down_write(&EXT4_I(inode)->i_data_sem);
1568 ext4_es_remove_extent(inode, start, last - start + 1);
1569 up_write(&EXT4_I(inode)->i_data_sem);
1572 folio_batch_init(&fbatch);
1573 while (index <= end) {
1574 nr = filemap_get_folios(mapping, &index, end, &fbatch);
1577 for (i = 0; i < nr; i++) {
1578 struct folio *folio = fbatch.folios[i];
1580 if (folio->index < mpd->first_page)
1582 if (folio_next_index(folio) - 1 > end)
1584 BUG_ON(!folio_test_locked(folio));
1585 BUG_ON(folio_test_writeback(folio));
1587 if (folio_mapped(folio))
1588 folio_clear_dirty_for_io(folio);
1589 block_invalidate_folio(folio, 0,
1591 folio_clear_uptodate(folio);
1593 folio_unlock(folio);
1595 folio_batch_release(&fbatch);
1599 static void ext4_print_free_blocks(struct inode *inode)
1601 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1602 struct super_block *sb = inode->i_sb;
1603 struct ext4_inode_info *ei = EXT4_I(inode);
1605 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1606 EXT4_C2B(EXT4_SB(inode->i_sb),
1607 ext4_count_free_clusters(sb)));
1608 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1609 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1610 (long long) EXT4_C2B(EXT4_SB(sb),
1611 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1612 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1613 (long long) EXT4_C2B(EXT4_SB(sb),
1614 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1615 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1616 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1617 ei->i_reserved_data_blocks);
1622 * ext4_insert_delayed_block - adds a delayed block to the extents status
1623 * tree, incrementing the reserved cluster/block
1624 * count or making a pending reservation
1627 * @inode - file containing the newly added block
1628 * @lblk - logical block to be added
1630 * Returns 0 on success, negative error code on failure.
1632 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1634 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1636 bool allocated = false;
1639 * If the cluster containing lblk is shared with a delayed,
1640 * written, or unwritten extent in a bigalloc file system, it's
1641 * already been accounted for and does not need to be reserved.
1642 * A pending reservation must be made for the cluster if it's
1643 * shared with a written or unwritten extent and doesn't already
1644 * have one. Written and unwritten extents can be purged from the
1645 * extents status tree if the system is under memory pressure, so
1646 * it's necessary to examine the extent tree if a search of the
1647 * extents status tree doesn't get a match.
1649 if (sbi->s_cluster_ratio == 1) {
1650 ret = ext4_da_reserve_space(inode);
1651 if (ret != 0) /* ENOSPC */
1653 } else { /* bigalloc */
1654 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1655 if (!ext4_es_scan_clu(inode,
1656 &ext4_es_is_mapped, lblk)) {
1657 ret = ext4_clu_mapped(inode,
1658 EXT4_B2C(sbi, lblk));
1662 ret = ext4_da_reserve_space(inode);
1663 if (ret != 0) /* ENOSPC */
1674 ext4_es_insert_delayed_block(inode, lblk, allocated);
1679 * This function is grabs code from the very beginning of
1680 * ext4_map_blocks, but assumes that the caller is from delayed write
1681 * time. This function looks up the requested blocks and sets the
1682 * buffer delay bit under the protection of i_data_sem.
1684 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1685 struct ext4_map_blocks *map,
1686 struct buffer_head *bh)
1688 struct extent_status es;
1690 sector_t invalid_block = ~((sector_t) 0xffff);
1691 #ifdef ES_AGGRESSIVE_TEST
1692 struct ext4_map_blocks orig_map;
1694 memcpy(&orig_map, map, sizeof(*map));
1697 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1701 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1702 (unsigned long) map->m_lblk);
1704 /* Lookup extent status tree firstly */
1705 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1706 if (ext4_es_is_hole(&es)) {
1708 down_read(&EXT4_I(inode)->i_data_sem);
1713 * Delayed extent could be allocated by fallocate.
1714 * So we need to check it.
1716 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1717 map_bh(bh, inode->i_sb, invalid_block);
1719 set_buffer_delay(bh);
1723 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1724 retval = es.es_len - (iblock - es.es_lblk);
1725 if (retval > map->m_len)
1726 retval = map->m_len;
1727 map->m_len = retval;
1728 if (ext4_es_is_written(&es))
1729 map->m_flags |= EXT4_MAP_MAPPED;
1730 else if (ext4_es_is_unwritten(&es))
1731 map->m_flags |= EXT4_MAP_UNWRITTEN;
1735 #ifdef ES_AGGRESSIVE_TEST
1736 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1742 * Try to see if we can get the block without requesting a new
1743 * file system block.
1745 down_read(&EXT4_I(inode)->i_data_sem);
1746 if (ext4_has_inline_data(inode))
1748 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1749 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1751 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1758 * XXX: __block_prepare_write() unmaps passed block,
1762 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1768 map_bh(bh, inode->i_sb, invalid_block);
1770 set_buffer_delay(bh);
1771 } else if (retval > 0) {
1772 unsigned int status;
1774 if (unlikely(retval != map->m_len)) {
1775 ext4_warning(inode->i_sb,
1776 "ES len assertion failed for inode "
1777 "%lu: retval %d != map->m_len %d",
1778 inode->i_ino, retval, map->m_len);
1782 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1783 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1784 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1785 map->m_pblk, status);
1789 up_read((&EXT4_I(inode)->i_data_sem));
1795 * This is a special get_block_t callback which is used by
1796 * ext4_da_write_begin(). It will either return mapped block or
1797 * reserve space for a single block.
1799 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1800 * We also have b_blocknr = -1 and b_bdev initialized properly
1802 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1803 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1804 * initialized properly.
1806 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1807 struct buffer_head *bh, int create)
1809 struct ext4_map_blocks map;
1812 BUG_ON(create == 0);
1813 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1815 map.m_lblk = iblock;
1819 * first, we need to know whether the block is allocated already
1820 * preallocated blocks are unmapped but should treated
1821 * the same as allocated blocks.
1823 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1827 map_bh(bh, inode->i_sb, map.m_pblk);
1828 ext4_update_bh_state(bh, map.m_flags);
1830 if (buffer_unwritten(bh)) {
1831 /* A delayed write to unwritten bh should be marked
1832 * new and mapped. Mapped ensures that we don't do
1833 * get_block multiple times when we write to the same
1834 * offset and new ensures that we do proper zero out
1835 * for partial write.
1838 set_buffer_mapped(bh);
1843 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
1845 mpd->first_page += folio_nr_pages(folio);
1846 folio_unlock(folio);
1849 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
1855 BUG_ON(folio->index != mpd->first_page);
1856 folio_clear_dirty_for_io(folio);
1858 * We have to be very careful here! Nothing protects writeback path
1859 * against i_size changes and the page can be writeably mapped into
1860 * page tables. So an application can be growing i_size and writing
1861 * data through mmap while writeback runs. folio_clear_dirty_for_io()
1862 * write-protects our page in page tables and the page cannot get
1863 * written to again until we release folio lock. So only after
1864 * folio_clear_dirty_for_io() we are safe to sample i_size for
1865 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1866 * on the barrier provided by folio_test_clear_dirty() in
1867 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
1868 * after page tables are updated.
1870 size = i_size_read(mpd->inode);
1871 len = folio_size(folio);
1872 if (folio_pos(folio) + len > size &&
1873 !ext4_verity_in_progress(mpd->inode))
1874 len = size & ~PAGE_MASK;
1875 err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
1877 mpd->wbc->nr_to_write--;
1882 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
1885 * mballoc gives us at most this number of blocks...
1886 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1887 * The rest of mballoc seems to handle chunks up to full group size.
1889 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1892 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1894 * @mpd - extent of blocks
1895 * @lblk - logical number of the block in the file
1896 * @bh - buffer head we want to add to the extent
1898 * The function is used to collect contig. blocks in the same state. If the
1899 * buffer doesn't require mapping for writeback and we haven't started the
1900 * extent of buffers to map yet, the function returns 'true' immediately - the
1901 * caller can write the buffer right away. Otherwise the function returns true
1902 * if the block has been added to the extent, false if the block couldn't be
1905 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1906 struct buffer_head *bh)
1908 struct ext4_map_blocks *map = &mpd->map;
1910 /* Buffer that doesn't need mapping for writeback? */
1911 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1912 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1913 /* So far no extent to map => we write the buffer right away */
1914 if (map->m_len == 0)
1919 /* First block in the extent? */
1920 if (map->m_len == 0) {
1921 /* We cannot map unless handle is started... */
1926 map->m_flags = bh->b_state & BH_FLAGS;
1930 /* Don't go larger than mballoc is willing to allocate */
1931 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1934 /* Can we merge the block to our big extent? */
1935 if (lblk == map->m_lblk + map->m_len &&
1936 (bh->b_state & BH_FLAGS) == map->m_flags) {
1944 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1946 * @mpd - extent of blocks for mapping
1947 * @head - the first buffer in the page
1948 * @bh - buffer we should start processing from
1949 * @lblk - logical number of the block in the file corresponding to @bh
1951 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1952 * the page for IO if all buffers in this page were mapped and there's no
1953 * accumulated extent of buffers to map or add buffers in the page to the
1954 * extent of buffers to map. The function returns 1 if the caller can continue
1955 * by processing the next page, 0 if it should stop adding buffers to the
1956 * extent to map because we cannot extend it anymore. It can also return value
1957 * < 0 in case of error during IO submission.
1959 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1960 struct buffer_head *head,
1961 struct buffer_head *bh,
1964 struct inode *inode = mpd->inode;
1966 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
1967 >> inode->i_blkbits;
1969 if (ext4_verity_in_progress(inode))
1970 blocks = EXT_MAX_BLOCKS;
1973 BUG_ON(buffer_locked(bh));
1975 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1976 /* Found extent to map? */
1979 /* Buffer needs mapping and handle is not started? */
1982 /* Everything mapped so far and we hit EOF */
1985 } while (lblk++, (bh = bh->b_this_page) != head);
1986 /* So far everything mapped? Submit the page for IO. */
1987 if (mpd->map.m_len == 0) {
1988 err = mpage_submit_folio(mpd, head->b_folio);
1991 mpage_folio_done(mpd, head->b_folio);
1993 if (lblk >= blocks) {
1994 mpd->scanned_until_end = 1;
2001 * mpage_process_folio - update folio buffers corresponding to changed extent
2002 * and may submit fully mapped page for IO
2003 * @mpd: description of extent to map, on return next extent to map
2004 * @folio: Contains these buffers.
2005 * @m_lblk: logical block mapping.
2006 * @m_pblk: corresponding physical mapping.
2007 * @map_bh: determines on return whether this page requires any further
2010 * Scan given folio buffers corresponding to changed extent and update buffer
2011 * state according to new extent state.
2012 * We map delalloc buffers to their physical location, clear unwritten bits.
2013 * If the given folio is not fully mapped, we update @mpd to the next extent in
2014 * the given folio that needs mapping & return @map_bh as true.
2016 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2017 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2020 struct buffer_head *head, *bh;
2021 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2022 ext4_lblk_t lblk = *m_lblk;
2023 ext4_fsblk_t pblock = *m_pblk;
2025 int blkbits = mpd->inode->i_blkbits;
2026 ssize_t io_end_size = 0;
2027 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2029 bh = head = folio_buffers(folio);
2031 if (lblk < mpd->map.m_lblk)
2033 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2035 * Buffer after end of mapped extent.
2036 * Find next buffer in the folio to map.
2039 mpd->map.m_flags = 0;
2040 io_end_vec->size += io_end_size;
2042 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2045 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2046 io_end_vec = ext4_alloc_io_end_vec(io_end);
2047 if (IS_ERR(io_end_vec)) {
2048 err = PTR_ERR(io_end_vec);
2051 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2056 if (buffer_delay(bh)) {
2057 clear_buffer_delay(bh);
2058 bh->b_blocknr = pblock++;
2060 clear_buffer_unwritten(bh);
2061 io_end_size += (1 << blkbits);
2062 } while (lblk++, (bh = bh->b_this_page) != head);
2064 io_end_vec->size += io_end_size;
2073 * mpage_map_buffers - update buffers corresponding to changed extent and
2074 * submit fully mapped pages for IO
2076 * @mpd - description of extent to map, on return next extent to map
2078 * Scan buffers corresponding to changed extent (we expect corresponding pages
2079 * to be already locked) and update buffer state according to new extent state.
2080 * We map delalloc buffers to their physical location, clear unwritten bits,
2081 * and mark buffers as uninit when we perform writes to unwritten extents
2082 * and do extent conversion after IO is finished. If the last page is not fully
2083 * mapped, we update @map to the next extent in the last page that needs
2084 * mapping. Otherwise we submit the page for IO.
2086 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2088 struct folio_batch fbatch;
2090 struct inode *inode = mpd->inode;
2091 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2094 ext4_fsblk_t pblock;
2096 bool map_bh = false;
2098 start = mpd->map.m_lblk >> bpp_bits;
2099 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2100 lblk = start << bpp_bits;
2101 pblock = mpd->map.m_pblk;
2103 folio_batch_init(&fbatch);
2104 while (start <= end) {
2105 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2108 for (i = 0; i < nr; i++) {
2109 struct folio *folio = fbatch.folios[i];
2111 err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2114 * If map_bh is true, means page may require further bh
2115 * mapping, or maybe the page was submitted for IO.
2116 * So we return to call further extent mapping.
2118 if (err < 0 || map_bh)
2120 /* Page fully mapped - let IO run! */
2121 err = mpage_submit_folio(mpd, folio);
2124 mpage_folio_done(mpd, folio);
2126 folio_batch_release(&fbatch);
2128 /* Extent fully mapped and matches with page boundary. We are done. */
2130 mpd->map.m_flags = 0;
2133 folio_batch_release(&fbatch);
2137 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2139 struct inode *inode = mpd->inode;
2140 struct ext4_map_blocks *map = &mpd->map;
2141 int get_blocks_flags;
2142 int err, dioread_nolock;
2144 trace_ext4_da_write_pages_extent(inode, map);
2146 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2147 * to convert an unwritten extent to be initialized (in the case
2148 * where we have written into one or more preallocated blocks). It is
2149 * possible that we're going to need more metadata blocks than
2150 * previously reserved. However we must not fail because we're in
2151 * writeback and there is nothing we can do about it so it might result
2152 * in data loss. So use reserved blocks to allocate metadata if
2155 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2156 * the blocks in question are delalloc blocks. This indicates
2157 * that the blocks and quotas has already been checked when
2158 * the data was copied into the page cache.
2160 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2161 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2162 EXT4_GET_BLOCKS_IO_SUBMIT;
2163 dioread_nolock = ext4_should_dioread_nolock(inode);
2165 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2166 if (map->m_flags & BIT(BH_Delay))
2167 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2169 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2172 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2173 if (!mpd->io_submit.io_end->handle &&
2174 ext4_handle_valid(handle)) {
2175 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2176 handle->h_rsv_handle = NULL;
2178 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2181 BUG_ON(map->m_len == 0);
2186 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2187 * mpd->len and submit pages underlying it for IO
2189 * @handle - handle for journal operations
2190 * @mpd - extent to map
2191 * @give_up_on_write - we set this to true iff there is a fatal error and there
2192 * is no hope of writing the data. The caller should discard
2193 * dirty pages to avoid infinite loops.
2195 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2196 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2197 * them to initialized or split the described range from larger unwritten
2198 * extent. Note that we need not map all the described range since allocation
2199 * can return less blocks or the range is covered by more unwritten extents. We
2200 * cannot map more because we are limited by reserved transaction credits. On
2201 * the other hand we always make sure that the last touched page is fully
2202 * mapped so that it can be written out (and thus forward progress is
2203 * guaranteed). After mapping we submit all mapped pages for IO.
2205 static int mpage_map_and_submit_extent(handle_t *handle,
2206 struct mpage_da_data *mpd,
2207 bool *give_up_on_write)
2209 struct inode *inode = mpd->inode;
2210 struct ext4_map_blocks *map = &mpd->map;
2214 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2215 struct ext4_io_end_vec *io_end_vec;
2217 io_end_vec = ext4_alloc_io_end_vec(io_end);
2218 if (IS_ERR(io_end_vec))
2219 return PTR_ERR(io_end_vec);
2220 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2222 err = mpage_map_one_extent(handle, mpd);
2224 struct super_block *sb = inode->i_sb;
2226 if (ext4_forced_shutdown(sb))
2227 goto invalidate_dirty_pages;
2229 * Let the uper layers retry transient errors.
2230 * In the case of ENOSPC, if ext4_count_free_blocks()
2231 * is non-zero, a commit should free up blocks.
2233 if ((err == -ENOMEM) ||
2234 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2236 goto update_disksize;
2239 ext4_msg(sb, KERN_CRIT,
2240 "Delayed block allocation failed for "
2241 "inode %lu at logical offset %llu with"
2242 " max blocks %u with error %d",
2244 (unsigned long long)map->m_lblk,
2245 (unsigned)map->m_len, -err);
2246 ext4_msg(sb, KERN_CRIT,
2247 "This should not happen!! Data will "
2250 ext4_print_free_blocks(inode);
2251 invalidate_dirty_pages:
2252 *give_up_on_write = true;
2257 * Update buffer state, submit mapped pages, and get us new
2260 err = mpage_map_and_submit_buffers(mpd);
2262 goto update_disksize;
2263 } while (map->m_len);
2267 * Update on-disk size after IO is submitted. Races with
2268 * truncate are avoided by checking i_size under i_data_sem.
2270 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2271 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2275 down_write(&EXT4_I(inode)->i_data_sem);
2276 i_size = i_size_read(inode);
2277 if (disksize > i_size)
2279 if (disksize > EXT4_I(inode)->i_disksize)
2280 EXT4_I(inode)->i_disksize = disksize;
2281 up_write(&EXT4_I(inode)->i_data_sem);
2282 err2 = ext4_mark_inode_dirty(handle, inode);
2284 ext4_error_err(inode->i_sb, -err2,
2285 "Failed to mark inode %lu dirty",
2295 * Calculate the total number of credits to reserve for one writepages
2296 * iteration. This is called from ext4_writepages(). We map an extent of
2297 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2298 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2299 * bpp - 1 blocks in bpp different extents.
2301 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2303 int bpp = ext4_journal_blocks_per_page(inode);
2305 return ext4_meta_trans_blocks(inode,
2306 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2309 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2312 struct buffer_head *page_bufs = folio_buffers(folio);
2313 struct inode *inode = folio->mapping->host;
2316 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2317 NULL, do_journal_get_write_access);
2318 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2319 NULL, write_end_fn);
2322 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2325 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2330 static int mpage_journal_page_buffers(handle_t *handle,
2331 struct mpage_da_data *mpd,
2332 struct folio *folio)
2334 struct inode *inode = mpd->inode;
2335 loff_t size = i_size_read(inode);
2336 size_t len = folio_size(folio);
2338 folio_clear_checked(folio);
2339 mpd->wbc->nr_to_write--;
2341 if (folio_pos(folio) + len > size &&
2342 !ext4_verity_in_progress(inode))
2343 len = size - folio_pos(folio);
2345 return ext4_journal_folio_buffers(handle, folio, len);
2349 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2350 * needing mapping, submit mapped pages
2352 * @mpd - where to look for pages
2354 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2355 * IO immediately. If we cannot map blocks, we submit just already mapped
2356 * buffers in the page for IO and keep page dirty. When we can map blocks and
2357 * we find a page which isn't mapped we start accumulating extent of buffers
2358 * underlying these pages that needs mapping (formed by either delayed or
2359 * unwritten buffers). We also lock the pages containing these buffers. The
2360 * extent found is returned in @mpd structure (starting at mpd->lblk with
2361 * length mpd->len blocks).
2363 * Note that this function can attach bios to one io_end structure which are
2364 * neither logically nor physically contiguous. Although it may seem as an
2365 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2366 * case as we need to track IO to all buffers underlying a page in one io_end.
2368 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2370 struct address_space *mapping = mpd->inode->i_mapping;
2371 struct folio_batch fbatch;
2372 unsigned int nr_folios;
2373 pgoff_t index = mpd->first_page;
2374 pgoff_t end = mpd->last_page;
2377 int blkbits = mpd->inode->i_blkbits;
2379 struct buffer_head *head;
2380 handle_t *handle = NULL;
2381 int bpp = ext4_journal_blocks_per_page(mpd->inode);
2383 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2384 tag = PAGECACHE_TAG_TOWRITE;
2386 tag = PAGECACHE_TAG_DIRTY;
2389 mpd->next_page = index;
2390 if (ext4_should_journal_data(mpd->inode)) {
2391 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2394 return PTR_ERR(handle);
2396 folio_batch_init(&fbatch);
2397 while (index <= end) {
2398 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2403 for (i = 0; i < nr_folios; i++) {
2404 struct folio *folio = fbatch.folios[i];
2407 * Accumulated enough dirty pages? This doesn't apply
2408 * to WB_SYNC_ALL mode. For integrity sync we have to
2409 * keep going because someone may be concurrently
2410 * dirtying pages, and we might have synced a lot of
2411 * newly appeared dirty pages, but have not synced all
2412 * of the old dirty pages.
2414 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2415 mpd->wbc->nr_to_write <=
2416 mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2419 /* If we can't merge this page, we are done. */
2420 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2424 err = ext4_journal_ensure_credits(handle, bpp,
2432 * If the page is no longer dirty, or its mapping no
2433 * longer corresponds to inode we are writing (which
2434 * means it has been truncated or invalidated), or the
2435 * page is already under writeback and we are not doing
2436 * a data integrity writeback, skip the page
2438 if (!folio_test_dirty(folio) ||
2439 (folio_test_writeback(folio) &&
2440 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2441 unlikely(folio->mapping != mapping)) {
2442 folio_unlock(folio);
2446 folio_wait_writeback(folio);
2447 BUG_ON(folio_test_writeback(folio));
2450 * Should never happen but for buggy code in
2451 * other subsystems that call
2452 * set_page_dirty() without properly warning
2453 * the file system first. See [1] for more
2456 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2458 if (!folio_buffers(folio)) {
2459 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2460 folio_clear_dirty(folio);
2461 folio_unlock(folio);
2465 if (mpd->map.m_len == 0)
2466 mpd->first_page = folio->index;
2467 mpd->next_page = folio_next_index(folio);
2469 * Writeout when we cannot modify metadata is simple.
2470 * Just submit the page. For data=journal mode we
2471 * first handle writeout of the page for checkpoint and
2472 * only after that handle delayed page dirtying. This
2473 * makes sure current data is checkpointed to the final
2474 * location before possibly journalling it again which
2475 * is desirable when the page is frequently dirtied
2478 if (!mpd->can_map) {
2479 err = mpage_submit_folio(mpd, folio);
2482 /* Pending dirtying of journalled data? */
2483 if (folio_test_checked(folio)) {
2484 err = mpage_journal_page_buffers(handle,
2488 mpd->journalled_more_data = 1;
2490 mpage_folio_done(mpd, folio);
2492 /* Add all dirty buffers to mpd */
2493 lblk = ((ext4_lblk_t)folio->index) <<
2494 (PAGE_SHIFT - blkbits);
2495 head = folio_buffers(folio);
2496 err = mpage_process_page_bufs(mpd, head, head,
2503 folio_batch_release(&fbatch);
2506 mpd->scanned_until_end = 1;
2508 ext4_journal_stop(handle);
2511 folio_batch_release(&fbatch);
2513 ext4_journal_stop(handle);
2517 static int ext4_do_writepages(struct mpage_da_data *mpd)
2519 struct writeback_control *wbc = mpd->wbc;
2520 pgoff_t writeback_index = 0;
2521 long nr_to_write = wbc->nr_to_write;
2522 int range_whole = 0;
2524 handle_t *handle = NULL;
2525 struct inode *inode = mpd->inode;
2526 struct address_space *mapping = inode->i_mapping;
2527 int needed_blocks, rsv_blocks = 0, ret = 0;
2528 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2529 struct blk_plug plug;
2530 bool give_up_on_write = false;
2532 trace_ext4_writepages(inode, wbc);
2535 * No pages to write? This is mainly a kludge to avoid starting
2536 * a transaction for special inodes like journal inode on last iput()
2537 * because that could violate lock ordering on umount
2539 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2540 goto out_writepages;
2543 * If the filesystem has aborted, it is read-only, so return
2544 * right away instead of dumping stack traces later on that
2545 * will obscure the real source of the problem. We test
2546 * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2547 * the latter could be true if the filesystem is mounted
2548 * read-only, and in that case, ext4_writepages should
2549 * *never* be called, so if that ever happens, we would want
2552 if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
2554 goto out_writepages;
2558 * If we have inline data and arrive here, it means that
2559 * we will soon create the block for the 1st page, so
2560 * we'd better clear the inline data here.
2562 if (ext4_has_inline_data(inode)) {
2563 /* Just inode will be modified... */
2564 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2565 if (IS_ERR(handle)) {
2566 ret = PTR_ERR(handle);
2567 goto out_writepages;
2569 BUG_ON(ext4_test_inode_state(inode,
2570 EXT4_STATE_MAY_INLINE_DATA));
2571 ext4_destroy_inline_data(handle, inode);
2572 ext4_journal_stop(handle);
2576 * data=journal mode does not do delalloc so we just need to writeout /
2577 * journal already mapped buffers. On the other hand we need to commit
2578 * transaction to make data stable. We expect all the data to be
2579 * already in the journal (the only exception are DMA pinned pages
2580 * dirtied behind our back) so we commit transaction here and run the
2581 * writeback loop to checkpoint them. The checkpointing is not actually
2582 * necessary to make data persistent *but* quite a few places (extent
2583 * shifting operations, fsverity, ...) depend on being able to drop
2584 * pagecache pages after calling filemap_write_and_wait() and for that
2585 * checkpointing needs to happen.
2587 if (ext4_should_journal_data(inode)) {
2589 if (wbc->sync_mode == WB_SYNC_ALL)
2590 ext4_fc_commit(sbi->s_journal,
2591 EXT4_I(inode)->i_datasync_tid);
2593 mpd->journalled_more_data = 0;
2595 if (ext4_should_dioread_nolock(inode)) {
2597 * We may need to convert up to one extent per block in
2598 * the page and we may dirty the inode.
2600 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2601 PAGE_SIZE >> inode->i_blkbits);
2604 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2607 if (wbc->range_cyclic) {
2608 writeback_index = mapping->writeback_index;
2609 if (writeback_index)
2611 mpd->first_page = writeback_index;
2612 mpd->last_page = -1;
2614 mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2615 mpd->last_page = wbc->range_end >> PAGE_SHIFT;
2618 ext4_io_submit_init(&mpd->io_submit, wbc);
2620 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2621 tag_pages_for_writeback(mapping, mpd->first_page,
2623 blk_start_plug(&plug);
2626 * First writeback pages that don't need mapping - we can avoid
2627 * starting a transaction unnecessarily and also avoid being blocked
2628 * in the block layer on device congestion while having transaction
2632 mpd->scanned_until_end = 0;
2633 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2634 if (!mpd->io_submit.io_end) {
2638 ret = mpage_prepare_extent_to_map(mpd);
2639 /* Unlock pages we didn't use */
2640 mpage_release_unused_pages(mpd, false);
2641 /* Submit prepared bio */
2642 ext4_io_submit(&mpd->io_submit);
2643 ext4_put_io_end_defer(mpd->io_submit.io_end);
2644 mpd->io_submit.io_end = NULL;
2648 while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2649 /* For each extent of pages we use new io_end */
2650 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2651 if (!mpd->io_submit.io_end) {
2656 WARN_ON_ONCE(!mpd->can_map);
2658 * We have two constraints: We find one extent to map and we
2659 * must always write out whole page (makes a difference when
2660 * blocksize < pagesize) so that we don't block on IO when we
2661 * try to write out the rest of the page. Journalled mode is
2662 * not supported by delalloc.
2664 BUG_ON(ext4_should_journal_data(inode));
2665 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2667 /* start a new transaction */
2668 handle = ext4_journal_start_with_reserve(inode,
2669 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2670 if (IS_ERR(handle)) {
2671 ret = PTR_ERR(handle);
2672 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2673 "%ld pages, ino %lu; err %d", __func__,
2674 wbc->nr_to_write, inode->i_ino, ret);
2675 /* Release allocated io_end */
2676 ext4_put_io_end(mpd->io_submit.io_end);
2677 mpd->io_submit.io_end = NULL;
2682 trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2683 ret = mpage_prepare_extent_to_map(mpd);
2684 if (!ret && mpd->map.m_len)
2685 ret = mpage_map_and_submit_extent(handle, mpd,
2688 * Caution: If the handle is synchronous,
2689 * ext4_journal_stop() can wait for transaction commit
2690 * to finish which may depend on writeback of pages to
2691 * complete or on page lock to be released. In that
2692 * case, we have to wait until after we have
2693 * submitted all the IO, released page locks we hold,
2694 * and dropped io_end reference (for extent conversion
2695 * to be able to complete) before stopping the handle.
2697 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2698 ext4_journal_stop(handle);
2702 /* Unlock pages we didn't use */
2703 mpage_release_unused_pages(mpd, give_up_on_write);
2704 /* Submit prepared bio */
2705 ext4_io_submit(&mpd->io_submit);
2708 * Drop our io_end reference we got from init. We have
2709 * to be careful and use deferred io_end finishing if
2710 * we are still holding the transaction as we can
2711 * release the last reference to io_end which may end
2712 * up doing unwritten extent conversion.
2715 ext4_put_io_end_defer(mpd->io_submit.io_end);
2716 ext4_journal_stop(handle);
2718 ext4_put_io_end(mpd->io_submit.io_end);
2719 mpd->io_submit.io_end = NULL;
2721 if (ret == -ENOSPC && sbi->s_journal) {
2723 * Commit the transaction which would
2724 * free blocks released in the transaction
2727 jbd2_journal_force_commit_nested(sbi->s_journal);
2731 /* Fatal error - ENOMEM, EIO... */
2736 blk_finish_plug(&plug);
2737 if (!ret && !cycled && wbc->nr_to_write > 0) {
2739 mpd->last_page = writeback_index - 1;
2740 mpd->first_page = 0;
2745 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2747 * Set the writeback_index so that range_cyclic
2748 * mode will write it back later
2750 mapping->writeback_index = mpd->first_page;
2753 trace_ext4_writepages_result(inode, wbc, ret,
2754 nr_to_write - wbc->nr_to_write);
2758 static int ext4_writepages(struct address_space *mapping,
2759 struct writeback_control *wbc)
2761 struct super_block *sb = mapping->host->i_sb;
2762 struct mpage_da_data mpd = {
2763 .inode = mapping->host,
2770 if (unlikely(ext4_forced_shutdown(sb)))
2773 alloc_ctx = ext4_writepages_down_read(sb);
2774 ret = ext4_do_writepages(&mpd);
2776 * For data=journal writeback we could have come across pages marked
2777 * for delayed dirtying (PageChecked) which were just added to the
2778 * running transaction. Try once more to get them to stable storage.
2780 if (!ret && mpd.journalled_more_data)
2781 ret = ext4_do_writepages(&mpd);
2782 ext4_writepages_up_read(sb, alloc_ctx);
2787 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2789 struct writeback_control wbc = {
2790 .sync_mode = WB_SYNC_ALL,
2791 .nr_to_write = LONG_MAX,
2792 .range_start = jinode->i_dirty_start,
2793 .range_end = jinode->i_dirty_end,
2795 struct mpage_da_data mpd = {
2796 .inode = jinode->i_vfs_inode,
2800 return ext4_do_writepages(&mpd);
2803 static int ext4_dax_writepages(struct address_space *mapping,
2804 struct writeback_control *wbc)
2807 long nr_to_write = wbc->nr_to_write;
2808 struct inode *inode = mapping->host;
2811 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2814 alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2815 trace_ext4_writepages(inode, wbc);
2817 ret = dax_writeback_mapping_range(mapping,
2818 EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2819 trace_ext4_writepages_result(inode, wbc, ret,
2820 nr_to_write - wbc->nr_to_write);
2821 ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2825 static int ext4_nonda_switch(struct super_block *sb)
2827 s64 free_clusters, dirty_clusters;
2828 struct ext4_sb_info *sbi = EXT4_SB(sb);
2831 * switch to non delalloc mode if we are running low
2832 * on free block. The free block accounting via percpu
2833 * counters can get slightly wrong with percpu_counter_batch getting
2834 * accumulated on each CPU without updating global counters
2835 * Delalloc need an accurate free block accounting. So switch
2836 * to non delalloc when we are near to error range.
2839 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2841 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2843 * Start pushing delalloc when 1/2 of free blocks are dirty.
2845 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2846 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2848 if (2 * free_clusters < 3 * dirty_clusters ||
2849 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2851 * free block count is less than 150% of dirty blocks
2852 * or free blocks is less than watermark
2859 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2860 loff_t pos, unsigned len,
2861 struct page **pagep, void **fsdata)
2863 int ret, retries = 0;
2864 struct folio *folio;
2866 struct inode *inode = mapping->host;
2868 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2871 index = pos >> PAGE_SHIFT;
2873 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
2874 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2875 return ext4_write_begin(file, mapping, pos,
2876 len, pagep, fsdata);
2878 *fsdata = (void *)0;
2879 trace_ext4_da_write_begin(inode, pos, len);
2881 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2882 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
2891 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2892 mapping_gfp_mask(mapping));
2894 return PTR_ERR(folio);
2896 /* In case writeback began while the folio was unlocked */
2897 folio_wait_stable(folio);
2899 #ifdef CONFIG_FS_ENCRYPTION
2900 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2902 ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2905 folio_unlock(folio);
2908 * block_write_begin may have instantiated a few blocks
2909 * outside i_size. Trim these off again. Don't need
2910 * i_size_read because we hold inode lock.
2912 if (pos + len > inode->i_size)
2913 ext4_truncate_failed_write(inode);
2915 if (ret == -ENOSPC &&
2916 ext4_should_retry_alloc(inode->i_sb, &retries))
2921 *pagep = &folio->page;
2926 * Check if we should update i_disksize
2927 * when write to the end of file but not require block allocation
2929 static int ext4_da_should_update_i_disksize(struct folio *folio,
2930 unsigned long offset)
2932 struct buffer_head *bh;
2933 struct inode *inode = folio->mapping->host;
2937 bh = folio_buffers(folio);
2938 idx = offset >> inode->i_blkbits;
2940 for (i = 0; i < idx; i++)
2941 bh = bh->b_this_page;
2943 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2948 static int ext4_da_do_write_end(struct address_space *mapping,
2949 loff_t pos, unsigned len, unsigned copied,
2950 struct folio *folio)
2952 struct inode *inode = mapping->host;
2953 loff_t old_size = inode->i_size;
2954 bool disksize_changed = false;
2958 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
2959 * flag, which all that's needed to trigger page writeback.
2961 copied = block_write_end(NULL, mapping, pos, len, copied,
2962 &folio->page, NULL);
2963 new_i_size = pos + copied;
2966 * It's important to update i_size while still holding folio lock,
2967 * because folio writeout could otherwise come in and zero beyond
2970 * Since we are holding inode lock, we are sure i_disksize <=
2971 * i_size. We also know that if i_disksize < i_size, there are
2972 * delalloc writes pending in the range up to i_size. If the end of
2973 * the current write is <= i_size, there's no need to touch
2974 * i_disksize since writeback will push i_disksize up to i_size
2975 * eventually. If the end of the current write is > i_size and
2976 * inside an allocated block which ext4_da_should_update_i_disksize()
2977 * checked, we need to update i_disksize here as certain
2978 * ext4_writepages() paths not allocating blocks and update i_disksize.
2980 if (new_i_size > inode->i_size) {
2983 i_size_write(inode, new_i_size);
2984 end = (new_i_size - 1) & (PAGE_SIZE - 1);
2985 if (copied && ext4_da_should_update_i_disksize(folio, end)) {
2986 ext4_update_i_disksize(inode, new_i_size);
2987 disksize_changed = true;
2991 folio_unlock(folio);
2995 pagecache_isize_extended(inode, old_size, pos);
2997 if (disksize_changed) {
3000 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3002 return PTR_ERR(handle);
3003 ext4_mark_inode_dirty(handle, inode);
3004 ext4_journal_stop(handle);
3010 static int ext4_da_write_end(struct file *file,
3011 struct address_space *mapping,
3012 loff_t pos, unsigned len, unsigned copied,
3013 struct page *page, void *fsdata)
3015 struct inode *inode = mapping->host;
3016 int write_mode = (int)(unsigned long)fsdata;
3017 struct folio *folio = page_folio(page);
3019 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3020 return ext4_write_end(file, mapping, pos,
3021 len, copied, &folio->page, fsdata);
3023 trace_ext4_da_write_end(inode, pos, len, copied);
3025 if (write_mode != CONVERT_INLINE_DATA &&
3026 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3027 ext4_has_inline_data(inode))
3028 return ext4_write_inline_data_end(inode, pos, len, copied,
3031 if (unlikely(copied < len) && !folio_test_uptodate(folio))
3034 return ext4_da_do_write_end(mapping, pos, len, copied, folio);
3038 * Force all delayed allocation blocks to be allocated for a given inode.
3040 int ext4_alloc_da_blocks(struct inode *inode)
3042 trace_ext4_alloc_da_blocks(inode);
3044 if (!EXT4_I(inode)->i_reserved_data_blocks)
3048 * We do something simple for now. The filemap_flush() will
3049 * also start triggering a write of the data blocks, which is
3050 * not strictly speaking necessary (and for users of
3051 * laptop_mode, not even desirable). However, to do otherwise
3052 * would require replicating code paths in:
3054 * ext4_writepages() ->
3055 * write_cache_pages() ---> (via passed in callback function)
3056 * __mpage_da_writepage() -->
3057 * mpage_add_bh_to_extent()
3058 * mpage_da_map_blocks()
3060 * The problem is that write_cache_pages(), located in
3061 * mm/page-writeback.c, marks pages clean in preparation for
3062 * doing I/O, which is not desirable if we're not planning on
3065 * We could call write_cache_pages(), and then redirty all of
3066 * the pages by calling redirty_page_for_writepage() but that
3067 * would be ugly in the extreme. So instead we would need to
3068 * replicate parts of the code in the above functions,
3069 * simplifying them because we wouldn't actually intend to
3070 * write out the pages, but rather only collect contiguous
3071 * logical block extents, call the multi-block allocator, and
3072 * then update the buffer heads with the block allocations.
3074 * For now, though, we'll cheat by calling filemap_flush(),
3075 * which will map the blocks, and start the I/O, but not
3076 * actually wait for the I/O to complete.
3078 return filemap_flush(inode->i_mapping);
3082 * bmap() is special. It gets used by applications such as lilo and by
3083 * the swapper to find the on-disk block of a specific piece of data.
3085 * Naturally, this is dangerous if the block concerned is still in the
3086 * journal. If somebody makes a swapfile on an ext4 data-journaling
3087 * filesystem and enables swap, then they may get a nasty shock when the
3088 * data getting swapped to that swapfile suddenly gets overwritten by
3089 * the original zero's written out previously to the journal and
3090 * awaiting writeback in the kernel's buffer cache.
3092 * So, if we see any bmap calls here on a modified, data-journaled file,
3093 * take extra steps to flush any blocks which might be in the cache.
3095 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3097 struct inode *inode = mapping->host;
3100 inode_lock_shared(inode);
3102 * We can get here for an inline file via the FIBMAP ioctl
3104 if (ext4_has_inline_data(inode))
3107 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3108 (test_opt(inode->i_sb, DELALLOC) ||
3109 ext4_should_journal_data(inode))) {
3111 * With delalloc or journalled data we want to sync the file so
3112 * that we can make sure we allocate blocks for file and data
3113 * is in place for the user to see it
3115 filemap_write_and_wait(mapping);
3118 ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3121 inode_unlock_shared(inode);
3125 static int ext4_read_folio(struct file *file, struct folio *folio)
3128 struct inode *inode = folio->mapping->host;
3130 trace_ext4_read_folio(inode, folio);
3132 if (ext4_has_inline_data(inode))
3133 ret = ext4_readpage_inline(inode, folio);
3136 return ext4_mpage_readpages(inode, NULL, folio);
3141 static void ext4_readahead(struct readahead_control *rac)
3143 struct inode *inode = rac->mapping->host;
3145 /* If the file has inline data, no need to do readahead. */
3146 if (ext4_has_inline_data(inode))
3149 ext4_mpage_readpages(inode, rac, NULL);
3152 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3155 trace_ext4_invalidate_folio(folio, offset, length);
3157 /* No journalling happens on data buffers when this function is used */
3158 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3160 block_invalidate_folio(folio, offset, length);
3163 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3164 size_t offset, size_t length)
3166 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3168 trace_ext4_journalled_invalidate_folio(folio, offset, length);
3171 * If it's a full truncate we just forget about the pending dirtying
3173 if (offset == 0 && length == folio_size(folio))
3174 folio_clear_checked(folio);
3176 return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3179 /* Wrapper for aops... */
3180 static void ext4_journalled_invalidate_folio(struct folio *folio,
3184 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3187 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3189 struct inode *inode = folio->mapping->host;
3190 journal_t *journal = EXT4_JOURNAL(inode);
3192 trace_ext4_release_folio(inode, folio);
3194 /* Page has dirty journalled data -> cannot release */
3195 if (folio_test_checked(folio))
3198 return jbd2_journal_try_to_free_buffers(journal, folio);
3200 return try_to_free_buffers(folio);
3203 static bool ext4_inode_datasync_dirty(struct inode *inode)
3205 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3208 if (jbd2_transaction_committed(journal,
3209 EXT4_I(inode)->i_datasync_tid))
3211 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3212 return !list_empty(&EXT4_I(inode)->i_fc_list);
3216 /* Any metadata buffers to write? */
3217 if (!list_empty(&inode->i_mapping->i_private_list))
3219 return inode->i_state & I_DIRTY_DATASYNC;
3222 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3223 struct ext4_map_blocks *map, loff_t offset,
3224 loff_t length, unsigned int flags)
3226 u8 blkbits = inode->i_blkbits;
3229 * Writes that span EOF might trigger an I/O size update on completion,
3230 * so consider them to be dirty for the purpose of O_DSYNC, even if
3231 * there is no other metadata changes being made or are pending.
3234 if (ext4_inode_datasync_dirty(inode) ||
3235 offset + length > i_size_read(inode))
3236 iomap->flags |= IOMAP_F_DIRTY;
3238 if (map->m_flags & EXT4_MAP_NEW)
3239 iomap->flags |= IOMAP_F_NEW;
3241 if (flags & IOMAP_DAX)
3242 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3244 iomap->bdev = inode->i_sb->s_bdev;
3245 iomap->offset = (u64) map->m_lblk << blkbits;
3246 iomap->length = (u64) map->m_len << blkbits;
3248 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3249 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3250 iomap->flags |= IOMAP_F_MERGED;
3253 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3254 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3255 * set. In order for any allocated unwritten extents to be converted
3256 * into written extents correctly within the ->end_io() handler, we
3257 * need to ensure that the iomap->type is set appropriately. Hence, the
3258 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3261 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3262 iomap->type = IOMAP_UNWRITTEN;
3263 iomap->addr = (u64) map->m_pblk << blkbits;
3264 if (flags & IOMAP_DAX)
3265 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3266 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3267 iomap->type = IOMAP_MAPPED;
3268 iomap->addr = (u64) map->m_pblk << blkbits;
3269 if (flags & IOMAP_DAX)
3270 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3272 iomap->type = IOMAP_HOLE;
3273 iomap->addr = IOMAP_NULL_ADDR;
3277 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3281 u8 blkbits = inode->i_blkbits;
3282 int ret, dio_credits, m_flags = 0, retries = 0;
3285 * Trim the mapping request to the maximum value that we can map at
3286 * once for direct I/O.
3288 if (map->m_len > DIO_MAX_BLOCKS)
3289 map->m_len = DIO_MAX_BLOCKS;
3290 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3294 * Either we allocate blocks and then don't get an unwritten extent, so
3295 * in that case we have reserved enough credits. Or, the blocks are
3296 * already allocated and unwritten. In that case, the extent conversion
3297 * fits into the credits as well.
3299 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3301 return PTR_ERR(handle);
3304 * DAX and direct I/O are the only two operations that are currently
3305 * supported with IOMAP_WRITE.
3307 WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3308 if (flags & IOMAP_DAX)
3309 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3311 * We use i_size instead of i_disksize here because delalloc writeback
3312 * can complete at any point during the I/O and subsequently push the
3313 * i_disksize out to i_size. This could be beyond where direct I/O is
3314 * happening and thus expose allocated blocks to direct I/O reads.
3316 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3317 m_flags = EXT4_GET_BLOCKS_CREATE;
3318 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3319 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3321 ret = ext4_map_blocks(handle, inode, map, m_flags);
3324 * We cannot fill holes in indirect tree based inodes as that could
3325 * expose stale data in the case of a crash. Use the magic error code
3326 * to fallback to buffered I/O.
3328 if (!m_flags && !ret)
3331 ext4_journal_stop(handle);
3332 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3339 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3340 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3343 struct ext4_map_blocks map;
3344 u8 blkbits = inode->i_blkbits;
3346 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3349 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3353 * Calculate the first and last logical blocks respectively.
3355 map.m_lblk = offset >> blkbits;
3356 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3357 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3359 if (flags & IOMAP_WRITE) {
3361 * We check here if the blocks are already allocated, then we
3362 * don't need to start a journal txn and we can directly return
3363 * the mapping information. This could boost performance
3364 * especially in multi-threaded overwrite requests.
3366 if (offset + length <= i_size_read(inode)) {
3367 ret = ext4_map_blocks(NULL, inode, &map, 0);
3368 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3371 ret = ext4_iomap_alloc(inode, &map, flags);
3373 ret = ext4_map_blocks(NULL, inode, &map, 0);
3380 * When inline encryption is enabled, sometimes I/O to an encrypted file
3381 * has to be broken up to guarantee DUN contiguity. Handle this by
3382 * limiting the length of the mapping returned.
3384 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3386 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3391 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3392 loff_t length, unsigned flags, struct iomap *iomap,
3393 struct iomap *srcmap)
3398 * Even for writes we don't need to allocate blocks, so just pretend
3399 * we are reading to save overhead of starting a transaction.
3401 flags &= ~IOMAP_WRITE;
3402 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3403 WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
3407 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3408 ssize_t written, unsigned flags, struct iomap *iomap)
3411 * Check to see whether an error occurred while writing out the data to
3412 * the allocated blocks. If so, return the magic error code so that we
3413 * fallback to buffered I/O and attempt to complete the remainder of
3414 * the I/O. Any blocks that may have been allocated in preparation for
3415 * the direct I/O will be reused during buffered I/O.
3417 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3423 const struct iomap_ops ext4_iomap_ops = {
3424 .iomap_begin = ext4_iomap_begin,
3425 .iomap_end = ext4_iomap_end,
3428 const struct iomap_ops ext4_iomap_overwrite_ops = {
3429 .iomap_begin = ext4_iomap_overwrite_begin,
3430 .iomap_end = ext4_iomap_end,
3433 static bool ext4_iomap_is_delalloc(struct inode *inode,
3434 struct ext4_map_blocks *map)
3436 struct extent_status es;
3437 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3439 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3440 map->m_lblk, end, &es);
3442 if (!es.es_len || es.es_lblk > end)
3445 if (es.es_lblk > map->m_lblk) {
3446 map->m_len = es.es_lblk - map->m_lblk;
3450 offset = map->m_lblk - es.es_lblk;
3451 map->m_len = es.es_len - offset;
3456 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3457 loff_t length, unsigned int flags,
3458 struct iomap *iomap, struct iomap *srcmap)
3461 bool delalloc = false;
3462 struct ext4_map_blocks map;
3463 u8 blkbits = inode->i_blkbits;
3465 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3468 if (ext4_has_inline_data(inode)) {
3469 ret = ext4_inline_data_iomap(inode, iomap);
3470 if (ret != -EAGAIN) {
3471 if (ret == 0 && offset >= iomap->length)
3478 * Calculate the first and last logical block respectively.
3480 map.m_lblk = offset >> blkbits;
3481 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3482 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3485 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3486 * So handle it here itself instead of querying ext4_map_blocks().
3487 * Since ext4_map_blocks() will warn about it and will return
3490 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3491 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3493 if (offset >= sbi->s_bitmap_maxbytes) {
3499 ret = ext4_map_blocks(NULL, inode, &map, 0);
3503 delalloc = ext4_iomap_is_delalloc(inode, &map);
3506 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3507 if (delalloc && iomap->type == IOMAP_HOLE)
3508 iomap->type = IOMAP_DELALLOC;
3513 const struct iomap_ops ext4_iomap_report_ops = {
3514 .iomap_begin = ext4_iomap_begin_report,
3518 * For data=journal mode, folio should be marked dirty only when it was
3519 * writeably mapped. When that happens, it was already attached to the
3520 * transaction and marked as jbddirty (we take care of this in
3521 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3522 * so we should have nothing to do here, except for the case when someone
3523 * had the page pinned and dirtied the page through this pin (e.g. by doing
3524 * direct IO to it). In that case we'd need to attach buffers here to the
3525 * transaction but we cannot due to lock ordering. We cannot just dirty the
3526 * folio and leave attached buffers clean, because the buffers' dirty state is
3527 * "definitive". We cannot just set the buffers dirty or jbddirty because all
3528 * the journalling code will explode. So what we do is to mark the folio
3529 * "pending dirty" and next time ext4_writepages() is called, attach buffers
3530 * to the transaction appropriately.
3532 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3533 struct folio *folio)
3535 WARN_ON_ONCE(!folio_buffers(folio));
3536 if (folio_maybe_dma_pinned(folio))
3537 folio_set_checked(folio);
3538 return filemap_dirty_folio(mapping, folio);
3541 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3543 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3544 WARN_ON_ONCE(!folio_buffers(folio));
3545 return block_dirty_folio(mapping, folio);
3548 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3549 struct file *file, sector_t *span)
3551 return iomap_swapfile_activate(sis, file, span,
3552 &ext4_iomap_report_ops);
3555 static const struct address_space_operations ext4_aops = {
3556 .read_folio = ext4_read_folio,
3557 .readahead = ext4_readahead,
3558 .writepages = ext4_writepages,
3559 .write_begin = ext4_write_begin,
3560 .write_end = ext4_write_end,
3561 .dirty_folio = ext4_dirty_folio,
3563 .invalidate_folio = ext4_invalidate_folio,
3564 .release_folio = ext4_release_folio,
3565 .direct_IO = noop_direct_IO,
3566 .migrate_folio = buffer_migrate_folio,
3567 .is_partially_uptodate = block_is_partially_uptodate,
3568 .error_remove_folio = generic_error_remove_folio,
3569 .swap_activate = ext4_iomap_swap_activate,
3572 static const struct address_space_operations ext4_journalled_aops = {
3573 .read_folio = ext4_read_folio,
3574 .readahead = ext4_readahead,
3575 .writepages = ext4_writepages,
3576 .write_begin = ext4_write_begin,
3577 .write_end = ext4_journalled_write_end,
3578 .dirty_folio = ext4_journalled_dirty_folio,
3580 .invalidate_folio = ext4_journalled_invalidate_folio,
3581 .release_folio = ext4_release_folio,
3582 .direct_IO = noop_direct_IO,
3583 .migrate_folio = buffer_migrate_folio_norefs,
3584 .is_partially_uptodate = block_is_partially_uptodate,
3585 .error_remove_folio = generic_error_remove_folio,
3586 .swap_activate = ext4_iomap_swap_activate,
3589 static const struct address_space_operations ext4_da_aops = {
3590 .read_folio = ext4_read_folio,
3591 .readahead = ext4_readahead,
3592 .writepages = ext4_writepages,
3593 .write_begin = ext4_da_write_begin,
3594 .write_end = ext4_da_write_end,
3595 .dirty_folio = ext4_dirty_folio,
3597 .invalidate_folio = ext4_invalidate_folio,
3598 .release_folio = ext4_release_folio,
3599 .direct_IO = noop_direct_IO,
3600 .migrate_folio = buffer_migrate_folio,
3601 .is_partially_uptodate = block_is_partially_uptodate,
3602 .error_remove_folio = generic_error_remove_folio,
3603 .swap_activate = ext4_iomap_swap_activate,
3606 static const struct address_space_operations ext4_dax_aops = {
3607 .writepages = ext4_dax_writepages,
3608 .direct_IO = noop_direct_IO,
3609 .dirty_folio = noop_dirty_folio,
3611 .swap_activate = ext4_iomap_swap_activate,
3614 void ext4_set_aops(struct inode *inode)
3616 switch (ext4_inode_journal_mode(inode)) {
3617 case EXT4_INODE_ORDERED_DATA_MODE:
3618 case EXT4_INODE_WRITEBACK_DATA_MODE:
3620 case EXT4_INODE_JOURNAL_DATA_MODE:
3621 inode->i_mapping->a_ops = &ext4_journalled_aops;
3627 inode->i_mapping->a_ops = &ext4_dax_aops;
3628 else if (test_opt(inode->i_sb, DELALLOC))
3629 inode->i_mapping->a_ops = &ext4_da_aops;
3631 inode->i_mapping->a_ops = &ext4_aops;
3635 * Here we can't skip an unwritten buffer even though it usually reads zero
3636 * because it might have data in pagecache (eg, if called from ext4_zero_range,
3637 * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
3638 * racing writeback can come later and flush the stale pagecache to disk.
3640 static int __ext4_block_zero_page_range(handle_t *handle,
3641 struct address_space *mapping, loff_t from, loff_t length)
3643 ext4_fsblk_t index = from >> PAGE_SHIFT;
3644 unsigned offset = from & (PAGE_SIZE-1);
3645 unsigned blocksize, pos;
3647 struct inode *inode = mapping->host;
3648 struct buffer_head *bh;
3649 struct folio *folio;
3652 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3653 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3654 mapping_gfp_constraint(mapping, ~__GFP_FS));
3656 return PTR_ERR(folio);
3658 blocksize = inode->i_sb->s_blocksize;
3660 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3662 bh = folio_buffers(folio);
3664 bh = create_empty_buffers(folio, blocksize, 0);
3666 /* Find the buffer that contains "offset" */
3668 while (offset >= pos) {
3669 bh = bh->b_this_page;
3673 if (buffer_freed(bh)) {
3674 BUFFER_TRACE(bh, "freed: skip");
3677 if (!buffer_mapped(bh)) {
3678 BUFFER_TRACE(bh, "unmapped");
3679 ext4_get_block(inode, iblock, bh, 0);
3680 /* unmapped? It's a hole - nothing to do */
3681 if (!buffer_mapped(bh)) {
3682 BUFFER_TRACE(bh, "still unmapped");
3687 /* Ok, it's mapped. Make sure it's up-to-date */
3688 if (folio_test_uptodate(folio))
3689 set_buffer_uptodate(bh);
3691 if (!buffer_uptodate(bh)) {
3692 err = ext4_read_bh_lock(bh, 0, true);
3695 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3696 /* We expect the key to be set. */
3697 BUG_ON(!fscrypt_has_encryption_key(inode));
3698 err = fscrypt_decrypt_pagecache_blocks(folio,
3702 clear_buffer_uptodate(bh);
3707 if (ext4_should_journal_data(inode)) {
3708 BUFFER_TRACE(bh, "get write access");
3709 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3714 folio_zero_range(folio, offset, length);
3715 BUFFER_TRACE(bh, "zeroed end of block");
3717 if (ext4_should_journal_data(inode)) {
3718 err = ext4_dirty_journalled_data(handle, bh);
3721 mark_buffer_dirty(bh);
3722 if (ext4_should_order_data(inode))
3723 err = ext4_jbd2_inode_add_write(handle, inode, from,
3728 folio_unlock(folio);
3734 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3735 * starting from file offset 'from'. The range to be zero'd must
3736 * be contained with in one block. If the specified range exceeds
3737 * the end of the block it will be shortened to end of the block
3738 * that corresponds to 'from'
3740 static int ext4_block_zero_page_range(handle_t *handle,
3741 struct address_space *mapping, loff_t from, loff_t length)
3743 struct inode *inode = mapping->host;
3744 unsigned offset = from & (PAGE_SIZE-1);
3745 unsigned blocksize = inode->i_sb->s_blocksize;
3746 unsigned max = blocksize - (offset & (blocksize - 1));
3749 * correct length if it does not fall between
3750 * 'from' and the end of the block
3752 if (length > max || length < 0)
3755 if (IS_DAX(inode)) {
3756 return dax_zero_range(inode, from, length, NULL,
3759 return __ext4_block_zero_page_range(handle, mapping, from, length);
3763 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3764 * up to the end of the block which corresponds to `from'.
3765 * This required during truncate. We need to physically zero the tail end
3766 * of that block so it doesn't yield old data if the file is later grown.
3768 static int ext4_block_truncate_page(handle_t *handle,
3769 struct address_space *mapping, loff_t from)
3771 unsigned offset = from & (PAGE_SIZE-1);
3774 struct inode *inode = mapping->host;
3776 /* If we are processing an encrypted inode during orphan list handling */
3777 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3780 blocksize = inode->i_sb->s_blocksize;
3781 length = blocksize - (offset & (blocksize - 1));
3783 return ext4_block_zero_page_range(handle, mapping, from, length);
3786 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3787 loff_t lstart, loff_t length)
3789 struct super_block *sb = inode->i_sb;
3790 struct address_space *mapping = inode->i_mapping;
3791 unsigned partial_start, partial_end;
3792 ext4_fsblk_t start, end;
3793 loff_t byte_end = (lstart + length - 1);
3796 partial_start = lstart & (sb->s_blocksize - 1);
3797 partial_end = byte_end & (sb->s_blocksize - 1);
3799 start = lstart >> sb->s_blocksize_bits;
3800 end = byte_end >> sb->s_blocksize_bits;
3802 /* Handle partial zero within the single block */
3804 (partial_start || (partial_end != sb->s_blocksize - 1))) {
3805 err = ext4_block_zero_page_range(handle, mapping,
3809 /* Handle partial zero out on the start of the range */
3810 if (partial_start) {
3811 err = ext4_block_zero_page_range(handle, mapping,
3812 lstart, sb->s_blocksize);
3816 /* Handle partial zero out on the end of the range */
3817 if (partial_end != sb->s_blocksize - 1)
3818 err = ext4_block_zero_page_range(handle, mapping,
3819 byte_end - partial_end,
3824 int ext4_can_truncate(struct inode *inode)
3826 if (S_ISREG(inode->i_mode))
3828 if (S_ISDIR(inode->i_mode))
3830 if (S_ISLNK(inode->i_mode))
3831 return !ext4_inode_is_fast_symlink(inode);
3836 * We have to make sure i_disksize gets properly updated before we truncate
3837 * page cache due to hole punching or zero range. Otherwise i_disksize update
3838 * can get lost as it may have been postponed to submission of writeback but
3839 * that will never happen after we truncate page cache.
3841 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3847 loff_t size = i_size_read(inode);
3849 WARN_ON(!inode_is_locked(inode));
3850 if (offset > size || offset + len < size)
3853 if (EXT4_I(inode)->i_disksize >= size)
3856 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3858 return PTR_ERR(handle);
3859 ext4_update_i_disksize(inode, size);
3860 ret = ext4_mark_inode_dirty(handle, inode);
3861 ext4_journal_stop(handle);
3866 static void ext4_wait_dax_page(struct inode *inode)
3868 filemap_invalidate_unlock(inode->i_mapping);
3870 filemap_invalidate_lock(inode->i_mapping);
3873 int ext4_break_layouts(struct inode *inode)
3878 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3882 page = dax_layout_busy_page(inode->i_mapping);
3886 error = ___wait_var_event(&page->_refcount,
3887 atomic_read(&page->_refcount) == 1,
3888 TASK_INTERRUPTIBLE, 0, 0,
3889 ext4_wait_dax_page(inode));
3890 } while (error == 0);
3896 * ext4_punch_hole: punches a hole in a file by releasing the blocks
3897 * associated with the given offset and length
3899 * @inode: File inode
3900 * @offset: The offset where the hole will begin
3901 * @len: The length of the hole
3903 * Returns: 0 on success or negative on failure
3906 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3908 struct inode *inode = file_inode(file);
3909 struct super_block *sb = inode->i_sb;
3910 ext4_lblk_t first_block, stop_block;
3911 struct address_space *mapping = inode->i_mapping;
3912 loff_t first_block_offset, last_block_offset, max_length;
3913 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3915 unsigned int credits;
3916 int ret = 0, ret2 = 0;
3918 trace_ext4_punch_hole(inode, offset, length, 0);
3921 * Write out all dirty pages to avoid race conditions
3922 * Then release them.
3924 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3925 ret = filemap_write_and_wait_range(mapping, offset,
3926 offset + length - 1);
3933 /* No need to punch hole beyond i_size */
3934 if (offset >= inode->i_size)
3938 * If the hole extends beyond i_size, set the hole
3939 * to end after the page that contains i_size
3941 if (offset + length > inode->i_size) {
3942 length = inode->i_size +
3943 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3948 * For punch hole the length + offset needs to be within one block
3949 * before last range. Adjust the length if it goes beyond that limit.
3951 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3952 if (offset + length > max_length)
3953 length = max_length - offset;
3955 if (offset & (sb->s_blocksize - 1) ||
3956 (offset + length) & (sb->s_blocksize - 1)) {
3958 * Attach jinode to inode for jbd2 if we do any zeroing of
3961 ret = ext4_inode_attach_jinode(inode);
3967 /* Wait all existing dio workers, newcomers will block on i_rwsem */
3968 inode_dio_wait(inode);
3970 ret = file_modified(file);
3975 * Prevent page faults from reinstantiating pages we have released from
3978 filemap_invalidate_lock(mapping);
3980 ret = ext4_break_layouts(inode);
3984 first_block_offset = round_up(offset, sb->s_blocksize);
3985 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3987 /* Now release the pages and zero block aligned part of pages*/
3988 if (last_block_offset > first_block_offset) {
3989 ret = ext4_update_disksize_before_punch(inode, offset, length);
3992 truncate_pagecache_range(inode, first_block_offset,
3996 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3997 credits = ext4_writepage_trans_blocks(inode);
3999 credits = ext4_blocks_for_truncate(inode);
4000 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4001 if (IS_ERR(handle)) {
4002 ret = PTR_ERR(handle);
4003 ext4_std_error(sb, ret);
4007 ret = ext4_zero_partial_blocks(handle, inode, offset,
4012 first_block = (offset + sb->s_blocksize - 1) >>
4013 EXT4_BLOCK_SIZE_BITS(sb);
4014 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4016 /* If there are blocks to remove, do it */
4017 if (stop_block > first_block) {
4019 down_write(&EXT4_I(inode)->i_data_sem);
4020 ext4_discard_preallocations(inode, 0);
4022 ext4_es_remove_extent(inode, first_block,
4023 stop_block - first_block);
4025 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4026 ret = ext4_ext_remove_space(inode, first_block,
4029 ret = ext4_ind_remove_space(handle, inode, first_block,
4032 up_write(&EXT4_I(inode)->i_data_sem);
4034 ext4_fc_track_range(handle, inode, first_block, stop_block);
4036 ext4_handle_sync(handle);
4038 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4039 ret2 = ext4_mark_inode_dirty(handle, inode);
4043 ext4_update_inode_fsync_trans(handle, inode, 1);
4045 ext4_journal_stop(handle);
4047 filemap_invalidate_unlock(mapping);
4049 inode_unlock(inode);
4053 int ext4_inode_attach_jinode(struct inode *inode)
4055 struct ext4_inode_info *ei = EXT4_I(inode);
4056 struct jbd2_inode *jinode;
4058 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4061 jinode = jbd2_alloc_inode(GFP_KERNEL);
4062 spin_lock(&inode->i_lock);
4065 spin_unlock(&inode->i_lock);
4068 ei->jinode = jinode;
4069 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4072 spin_unlock(&inode->i_lock);
4073 if (unlikely(jinode != NULL))
4074 jbd2_free_inode(jinode);
4081 * We block out ext4_get_block() block instantiations across the entire
4082 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4083 * simultaneously on behalf of the same inode.
4085 * As we work through the truncate and commit bits of it to the journal there
4086 * is one core, guiding principle: the file's tree must always be consistent on
4087 * disk. We must be able to restart the truncate after a crash.
4089 * The file's tree may be transiently inconsistent in memory (although it
4090 * probably isn't), but whenever we close off and commit a journal transaction,
4091 * the contents of (the filesystem + the journal) must be consistent and
4092 * restartable. It's pretty simple, really: bottom up, right to left (although
4093 * left-to-right works OK too).
4095 * Note that at recovery time, journal replay occurs *before* the restart of
4096 * truncate against the orphan inode list.
4098 * The committed inode has the new, desired i_size (which is the same as
4099 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4100 * that this inode's truncate did not complete and it will again call
4101 * ext4_truncate() to have another go. So there will be instantiated blocks
4102 * to the right of the truncation point in a crashed ext4 filesystem. But
4103 * that's fine - as long as they are linked from the inode, the post-crash
4104 * ext4_truncate() run will find them and release them.
4106 int ext4_truncate(struct inode *inode)
4108 struct ext4_inode_info *ei = EXT4_I(inode);
4109 unsigned int credits;
4112 struct address_space *mapping = inode->i_mapping;
4115 * There is a possibility that we're either freeing the inode
4116 * or it's a completely new inode. In those cases we might not
4117 * have i_rwsem locked because it's not necessary.
4119 if (!(inode->i_state & (I_NEW|I_FREEING)))
4120 WARN_ON(!inode_is_locked(inode));
4121 trace_ext4_truncate_enter(inode);
4123 if (!ext4_can_truncate(inode))
4126 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4127 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4129 if (ext4_has_inline_data(inode)) {
4132 err = ext4_inline_data_truncate(inode, &has_inline);
4133 if (err || has_inline)
4137 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4138 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4139 err = ext4_inode_attach_jinode(inode);
4144 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4145 credits = ext4_writepage_trans_blocks(inode);
4147 credits = ext4_blocks_for_truncate(inode);
4149 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4150 if (IS_ERR(handle)) {
4151 err = PTR_ERR(handle);
4155 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4156 ext4_block_truncate_page(handle, mapping, inode->i_size);
4159 * We add the inode to the orphan list, so that if this
4160 * truncate spans multiple transactions, and we crash, we will
4161 * resume the truncate when the filesystem recovers. It also
4162 * marks the inode dirty, to catch the new size.
4164 * Implication: the file must always be in a sane, consistent
4165 * truncatable state while each transaction commits.
4167 err = ext4_orphan_add(handle, inode);
4171 down_write(&EXT4_I(inode)->i_data_sem);
4173 ext4_discard_preallocations(inode, 0);
4175 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4176 err = ext4_ext_truncate(handle, inode);
4178 ext4_ind_truncate(handle, inode);
4180 up_write(&ei->i_data_sem);
4185 ext4_handle_sync(handle);
4189 * If this was a simple ftruncate() and the file will remain alive,
4190 * then we need to clear up the orphan record which we created above.
4191 * However, if this was a real unlink then we were called by
4192 * ext4_evict_inode(), and we allow that function to clean up the
4193 * orphan info for us.
4196 ext4_orphan_del(handle, inode);
4198 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4199 err2 = ext4_mark_inode_dirty(handle, inode);
4200 if (unlikely(err2 && !err))
4202 ext4_journal_stop(handle);
4205 trace_ext4_truncate_exit(inode);
4209 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4211 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4212 return inode_peek_iversion_raw(inode);
4214 return inode_peek_iversion(inode);
4217 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4218 struct ext4_inode_info *ei)
4220 struct inode *inode = &(ei->vfs_inode);
4221 u64 i_blocks = READ_ONCE(inode->i_blocks);
4222 struct super_block *sb = inode->i_sb;
4224 if (i_blocks <= ~0U) {
4226 * i_blocks can be represented in a 32 bit variable
4227 * as multiple of 512 bytes
4229 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4230 raw_inode->i_blocks_high = 0;
4231 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4236 * This should never happen since sb->s_maxbytes should not have
4237 * allowed this, sb->s_maxbytes was set according to the huge_file
4238 * feature in ext4_fill_super().
4240 if (!ext4_has_feature_huge_file(sb))
4241 return -EFSCORRUPTED;
4243 if (i_blocks <= 0xffffffffffffULL) {
4245 * i_blocks can be represented in a 48 bit variable
4246 * as multiple of 512 bytes
4248 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4249 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4250 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4252 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4253 /* i_block is stored in file system block size */
4254 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4255 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4256 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4261 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4263 struct ext4_inode_info *ei = EXT4_I(inode);
4270 err = ext4_inode_blocks_set(raw_inode, ei);
4272 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4273 i_uid = i_uid_read(inode);
4274 i_gid = i_gid_read(inode);
4275 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4276 if (!(test_opt(inode->i_sb, NO_UID32))) {
4277 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4278 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4280 * Fix up interoperability with old kernels. Otherwise,
4281 * old inodes get re-used with the upper 16 bits of the
4284 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4285 raw_inode->i_uid_high = 0;
4286 raw_inode->i_gid_high = 0;
4288 raw_inode->i_uid_high =
4289 cpu_to_le16(high_16_bits(i_uid));
4290 raw_inode->i_gid_high =
4291 cpu_to_le16(high_16_bits(i_gid));
4294 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4295 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4296 raw_inode->i_uid_high = 0;
4297 raw_inode->i_gid_high = 0;
4299 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4301 EXT4_INODE_SET_CTIME(inode, raw_inode);
4302 EXT4_INODE_SET_MTIME(inode, raw_inode);
4303 EXT4_INODE_SET_ATIME(inode, raw_inode);
4304 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4306 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4307 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4308 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4309 raw_inode->i_file_acl_high =
4310 cpu_to_le16(ei->i_file_acl >> 32);
4311 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4312 ext4_isize_set(raw_inode, ei->i_disksize);
4314 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4315 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4316 if (old_valid_dev(inode->i_rdev)) {
4317 raw_inode->i_block[0] =
4318 cpu_to_le32(old_encode_dev(inode->i_rdev));
4319 raw_inode->i_block[1] = 0;
4321 raw_inode->i_block[0] = 0;
4322 raw_inode->i_block[1] =
4323 cpu_to_le32(new_encode_dev(inode->i_rdev));
4324 raw_inode->i_block[2] = 0;
4326 } else if (!ext4_has_inline_data(inode)) {
4327 for (block = 0; block < EXT4_N_BLOCKS; block++)
4328 raw_inode->i_block[block] = ei->i_data[block];
4331 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4332 u64 ivers = ext4_inode_peek_iversion(inode);
4334 raw_inode->i_disk_version = cpu_to_le32(ivers);
4335 if (ei->i_extra_isize) {
4336 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4337 raw_inode->i_version_hi =
4338 cpu_to_le32(ivers >> 32);
4339 raw_inode->i_extra_isize =
4340 cpu_to_le16(ei->i_extra_isize);
4344 if (i_projid != EXT4_DEF_PROJID &&
4345 !ext4_has_feature_project(inode->i_sb))
4346 err = err ?: -EFSCORRUPTED;
4348 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4349 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4350 raw_inode->i_projid = cpu_to_le32(i_projid);
4352 ext4_inode_csum_set(inode, raw_inode, ei);
4357 * ext4_get_inode_loc returns with an extra refcount against the inode's
4358 * underlying buffer_head on success. If we pass 'inode' and it does not
4359 * have in-inode xattr, we have all inode data in memory that is needed
4360 * to recreate the on-disk version of this inode.
4362 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4363 struct inode *inode, struct ext4_iloc *iloc,
4364 ext4_fsblk_t *ret_block)
4366 struct ext4_group_desc *gdp;
4367 struct buffer_head *bh;
4369 struct blk_plug plug;
4370 int inodes_per_block, inode_offset;
4373 if (ino < EXT4_ROOT_INO ||
4374 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4375 return -EFSCORRUPTED;
4377 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4378 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4383 * Figure out the offset within the block group inode table
4385 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4386 inode_offset = ((ino - 1) %
4387 EXT4_INODES_PER_GROUP(sb));
4388 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4390 block = ext4_inode_table(sb, gdp);
4391 if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4392 (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4393 ext4_error(sb, "Invalid inode table block %llu in "
4394 "block_group %u", block, iloc->block_group);
4395 return -EFSCORRUPTED;
4397 block += (inode_offset / inodes_per_block);
4399 bh = sb_getblk(sb, block);
4402 if (ext4_buffer_uptodate(bh))
4406 if (ext4_buffer_uptodate(bh)) {
4407 /* Someone brought it uptodate while we waited */
4413 * If we have all information of the inode in memory and this
4414 * is the only valid inode in the block, we need not read the
4417 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4418 struct buffer_head *bitmap_bh;
4421 start = inode_offset & ~(inodes_per_block - 1);
4423 /* Is the inode bitmap in cache? */
4424 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4425 if (unlikely(!bitmap_bh))
4429 * If the inode bitmap isn't in cache then the
4430 * optimisation may end up performing two reads instead
4431 * of one, so skip it.
4433 if (!buffer_uptodate(bitmap_bh)) {
4437 for (i = start; i < start + inodes_per_block; i++) {
4438 if (i == inode_offset)
4440 if (ext4_test_bit(i, bitmap_bh->b_data))
4444 if (i == start + inodes_per_block) {
4445 struct ext4_inode *raw_inode =
4446 (struct ext4_inode *) (bh->b_data + iloc->offset);
4448 /* all other inodes are free, so skip I/O */
4449 memset(bh->b_data, 0, bh->b_size);
4450 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4451 ext4_fill_raw_inode(inode, raw_inode);
4452 set_buffer_uptodate(bh);
4460 * If we need to do any I/O, try to pre-readahead extra
4461 * blocks from the inode table.
4463 blk_start_plug(&plug);
4464 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4465 ext4_fsblk_t b, end, table;
4467 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4469 table = ext4_inode_table(sb, gdp);
4470 /* s_inode_readahead_blks is always a power of 2 */
4471 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4475 num = EXT4_INODES_PER_GROUP(sb);
4476 if (ext4_has_group_desc_csum(sb))
4477 num -= ext4_itable_unused_count(sb, gdp);
4478 table += num / inodes_per_block;
4482 ext4_sb_breadahead_unmovable(sb, b++);
4486 * There are other valid inodes in the buffer, this inode
4487 * has in-inode xattrs, or we don't have this inode in memory.
4488 * Read the block from disk.
4490 trace_ext4_load_inode(sb, ino);
4491 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4492 blk_finish_plug(&plug);
4494 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4495 if (!buffer_uptodate(bh)) {
4506 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4507 struct ext4_iloc *iloc)
4509 ext4_fsblk_t err_blk = 0;
4512 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4516 ext4_error_inode_block(inode, err_blk, EIO,
4517 "unable to read itable block");
4522 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4524 ext4_fsblk_t err_blk = 0;
4527 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4531 ext4_error_inode_block(inode, err_blk, EIO,
4532 "unable to read itable block");
4538 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4539 struct ext4_iloc *iloc)
4541 return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
4544 static bool ext4_should_enable_dax(struct inode *inode)
4546 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4548 if (test_opt2(inode->i_sb, DAX_NEVER))
4550 if (!S_ISREG(inode->i_mode))
4552 if (ext4_should_journal_data(inode))
4554 if (ext4_has_inline_data(inode))
4556 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4558 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4560 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4562 if (test_opt(inode->i_sb, DAX_ALWAYS))
4565 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4568 void ext4_set_inode_flags(struct inode *inode, bool init)
4570 unsigned int flags = EXT4_I(inode)->i_flags;
4571 unsigned int new_fl = 0;
4573 WARN_ON_ONCE(IS_DAX(inode) && init);
4575 if (flags & EXT4_SYNC_FL)
4577 if (flags & EXT4_APPEND_FL)
4579 if (flags & EXT4_IMMUTABLE_FL)
4580 new_fl |= S_IMMUTABLE;
4581 if (flags & EXT4_NOATIME_FL)
4582 new_fl |= S_NOATIME;
4583 if (flags & EXT4_DIRSYNC_FL)
4584 new_fl |= S_DIRSYNC;
4586 /* Because of the way inode_set_flags() works we must preserve S_DAX
4587 * here if already set. */
4588 new_fl |= (inode->i_flags & S_DAX);
4589 if (init && ext4_should_enable_dax(inode))
4592 if (flags & EXT4_ENCRYPT_FL)
4593 new_fl |= S_ENCRYPTED;
4594 if (flags & EXT4_CASEFOLD_FL)
4595 new_fl |= S_CASEFOLD;
4596 if (flags & EXT4_VERITY_FL)
4598 inode_set_flags(inode, new_fl,
4599 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4600 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4603 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4604 struct ext4_inode_info *ei)
4607 struct inode *inode = &(ei->vfs_inode);
4608 struct super_block *sb = inode->i_sb;
4610 if (ext4_has_feature_huge_file(sb)) {
4611 /* we are using combined 48 bit field */
4612 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4613 le32_to_cpu(raw_inode->i_blocks_lo);
4614 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4615 /* i_blocks represent file system block size */
4616 return i_blocks << (inode->i_blkbits - 9);
4621 return le32_to_cpu(raw_inode->i_blocks_lo);
4625 static inline int ext4_iget_extra_inode(struct inode *inode,
4626 struct ext4_inode *raw_inode,
4627 struct ext4_inode_info *ei)
4629 __le32 *magic = (void *)raw_inode +
4630 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4632 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
4633 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4636 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4637 err = ext4_find_inline_data_nolock(inode);
4638 if (!err && ext4_has_inline_data(inode))
4639 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4642 EXT4_I(inode)->i_inline_off = 0;
4646 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4648 if (!ext4_has_feature_project(inode->i_sb))
4650 *projid = EXT4_I(inode)->i_projid;
4655 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4656 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4659 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4661 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4662 inode_set_iversion_raw(inode, val);
4664 inode_set_iversion_queried(inode, val);
4667 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4670 if (flags & EXT4_IGET_EA_INODE) {
4671 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4672 return "missing EA_INODE flag";
4673 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4674 EXT4_I(inode)->i_file_acl)
4675 return "ea_inode with extended attributes";
4677 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4678 return "unexpected EA_INODE flag";
4680 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4681 return "unexpected bad inode w/o EXT4_IGET_BAD";
4685 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4686 ext4_iget_flags flags, const char *function,
4689 struct ext4_iloc iloc;
4690 struct ext4_inode *raw_inode;
4691 struct ext4_inode_info *ei;
4692 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4693 struct inode *inode;
4694 const char *err_str;
4695 journal_t *journal = EXT4_SB(sb)->s_journal;
4703 if ((!(flags & EXT4_IGET_SPECIAL) &&
4704 ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4705 ino == le32_to_cpu(es->s_usr_quota_inum) ||
4706 ino == le32_to_cpu(es->s_grp_quota_inum) ||
4707 ino == le32_to_cpu(es->s_prj_quota_inum) ||
4708 ino == le32_to_cpu(es->s_orphan_file_inum))) ||
4709 (ino < EXT4_ROOT_INO) ||
4710 (ino > le32_to_cpu(es->s_inodes_count))) {
4711 if (flags & EXT4_IGET_HANDLE)
4712 return ERR_PTR(-ESTALE);
4713 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4714 "inode #%lu: comm %s: iget: illegal inode #",
4715 ino, current->comm);
4716 return ERR_PTR(-EFSCORRUPTED);
4719 inode = iget_locked(sb, ino);
4721 return ERR_PTR(-ENOMEM);
4722 if (!(inode->i_state & I_NEW)) {
4723 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4724 ext4_error_inode(inode, function, line, 0, err_str);
4726 return ERR_PTR(-EFSCORRUPTED);
4734 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4737 raw_inode = ext4_raw_inode(&iloc);
4739 if ((flags & EXT4_IGET_HANDLE) &&
4740 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4745 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4746 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4747 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4748 EXT4_INODE_SIZE(inode->i_sb) ||
4749 (ei->i_extra_isize & 3)) {
4750 ext4_error_inode(inode, function, line, 0,
4751 "iget: bad extra_isize %u "
4754 EXT4_INODE_SIZE(inode->i_sb));
4755 ret = -EFSCORRUPTED;
4759 ei->i_extra_isize = 0;
4761 /* Precompute checksum seed for inode metadata */
4762 if (ext4_has_metadata_csum(sb)) {
4763 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4765 __le32 inum = cpu_to_le32(inode->i_ino);
4766 __le32 gen = raw_inode->i_generation;
4767 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4769 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4773 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4774 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4775 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4776 ext4_error_inode_err(inode, function, line, 0,
4777 EFSBADCRC, "iget: checksum invalid");
4782 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4783 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4784 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4785 if (ext4_has_feature_project(sb) &&
4786 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4787 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4788 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4790 i_projid = EXT4_DEF_PROJID;
4792 if (!(test_opt(inode->i_sb, NO_UID32))) {
4793 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4794 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4796 i_uid_write(inode, i_uid);
4797 i_gid_write(inode, i_gid);
4798 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4799 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4801 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4802 ei->i_inline_off = 0;
4803 ei->i_dir_start_lookup = 0;
4804 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4805 /* We now have enough fields to check if the inode was active or not.
4806 * This is needed because nfsd might try to access dead inodes
4807 * the test is that same one that e2fsck uses
4808 * NeilBrown 1999oct15
4810 if (inode->i_nlink == 0) {
4811 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4812 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4813 ino != EXT4_BOOT_LOADER_INO) {
4814 /* this inode is deleted or unallocated */
4815 if (flags & EXT4_IGET_SPECIAL) {
4816 ext4_error_inode(inode, function, line, 0,
4817 "iget: special inode unallocated");
4818 ret = -EFSCORRUPTED;
4823 /* The only unlinked inodes we let through here have
4824 * valid i_mode and are being read by the orphan
4825 * recovery code: that's fine, we're about to complete
4826 * the process of deleting those.
4827 * OR it is the EXT4_BOOT_LOADER_INO which is
4828 * not initialized on a new filesystem. */
4830 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4831 ext4_set_inode_flags(inode, true);
4832 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4833 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4834 if (ext4_has_feature_64bit(sb))
4836 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4837 inode->i_size = ext4_isize(sb, raw_inode);
4838 if ((size = i_size_read(inode)) < 0) {
4839 ext4_error_inode(inode, function, line, 0,
4840 "iget: bad i_size value: %lld", size);
4841 ret = -EFSCORRUPTED;
4845 * If dir_index is not enabled but there's dir with INDEX flag set,
4846 * we'd normally treat htree data as empty space. But with metadata
4847 * checksumming that corrupts checksums so forbid that.
4849 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4850 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4851 ext4_error_inode(inode, function, line, 0,
4852 "iget: Dir with htree data on filesystem without dir_index feature.");
4853 ret = -EFSCORRUPTED;
4856 ei->i_disksize = inode->i_size;
4858 ei->i_reserved_quota = 0;
4860 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4861 ei->i_block_group = iloc.block_group;
4862 ei->i_last_alloc_group = ~0;
4864 * NOTE! The in-memory inode i_data array is in little-endian order
4865 * even on big-endian machines: we do NOT byteswap the block numbers!
4867 for (block = 0; block < EXT4_N_BLOCKS; block++)
4868 ei->i_data[block] = raw_inode->i_block[block];
4869 INIT_LIST_HEAD(&ei->i_orphan);
4870 ext4_fc_init_inode(&ei->vfs_inode);
4873 * Set transaction id's of transactions that have to be committed
4874 * to finish f[data]sync. We set them to currently running transaction
4875 * as we cannot be sure that the inode or some of its metadata isn't
4876 * part of the transaction - the inode could have been reclaimed and
4877 * now it is reread from disk.
4880 transaction_t *transaction;
4883 read_lock(&journal->j_state_lock);
4884 if (journal->j_running_transaction)
4885 transaction = journal->j_running_transaction;
4887 transaction = journal->j_committing_transaction;
4889 tid = transaction->t_tid;
4891 tid = journal->j_commit_sequence;
4892 read_unlock(&journal->j_state_lock);
4893 ei->i_sync_tid = tid;
4894 ei->i_datasync_tid = tid;
4897 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4898 if (ei->i_extra_isize == 0) {
4899 /* The extra space is currently unused. Use it. */
4900 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4901 ei->i_extra_isize = sizeof(struct ext4_inode) -
4902 EXT4_GOOD_OLD_INODE_SIZE;
4904 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4910 EXT4_INODE_GET_CTIME(inode, raw_inode);
4911 EXT4_INODE_GET_ATIME(inode, raw_inode);
4912 EXT4_INODE_GET_MTIME(inode, raw_inode);
4913 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4915 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4916 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4918 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4919 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4921 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4923 ext4_inode_set_iversion_queried(inode, ivers);
4927 if (ei->i_file_acl &&
4928 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4929 ext4_error_inode(inode, function, line, 0,
4930 "iget: bad extended attribute block %llu",
4932 ret = -EFSCORRUPTED;
4934 } else if (!ext4_has_inline_data(inode)) {
4935 /* validate the block references in the inode */
4936 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4937 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4938 (S_ISLNK(inode->i_mode) &&
4939 !ext4_inode_is_fast_symlink(inode)))) {
4940 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4941 ret = ext4_ext_check_inode(inode);
4943 ret = ext4_ind_check_inode(inode);
4949 if (S_ISREG(inode->i_mode)) {
4950 inode->i_op = &ext4_file_inode_operations;
4951 inode->i_fop = &ext4_file_operations;
4952 ext4_set_aops(inode);
4953 } else if (S_ISDIR(inode->i_mode)) {
4954 inode->i_op = &ext4_dir_inode_operations;
4955 inode->i_fop = &ext4_dir_operations;
4956 } else if (S_ISLNK(inode->i_mode)) {
4957 /* VFS does not allow setting these so must be corruption */
4958 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4959 ext4_error_inode(inode, function, line, 0,
4960 "iget: immutable or append flags "
4961 "not allowed on symlinks");
4962 ret = -EFSCORRUPTED;
4965 if (IS_ENCRYPTED(inode)) {
4966 inode->i_op = &ext4_encrypted_symlink_inode_operations;
4967 } else if (ext4_inode_is_fast_symlink(inode)) {
4968 inode->i_link = (char *)ei->i_data;
4969 inode->i_op = &ext4_fast_symlink_inode_operations;
4970 nd_terminate_link(ei->i_data, inode->i_size,
4971 sizeof(ei->i_data) - 1);
4973 inode->i_op = &ext4_symlink_inode_operations;
4975 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4976 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4977 inode->i_op = &ext4_special_inode_operations;
4978 if (raw_inode->i_block[0])
4979 init_special_inode(inode, inode->i_mode,
4980 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4982 init_special_inode(inode, inode->i_mode,
4983 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4984 } else if (ino == EXT4_BOOT_LOADER_INO) {
4985 make_bad_inode(inode);
4987 ret = -EFSCORRUPTED;
4988 ext4_error_inode(inode, function, line, 0,
4989 "iget: bogus i_mode (%o)", inode->i_mode);
4992 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
4993 ext4_error_inode(inode, function, line, 0,
4994 "casefold flag without casefold feature");
4995 ret = -EFSCORRUPTED;
4998 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4999 ext4_error_inode(inode, function, line, 0, err_str);
5000 ret = -EFSCORRUPTED;
5005 unlock_new_inode(inode);
5011 return ERR_PTR(ret);
5014 static void __ext4_update_other_inode_time(struct super_block *sb,
5015 unsigned long orig_ino,
5017 struct ext4_inode *raw_inode)
5019 struct inode *inode;
5021 inode = find_inode_by_ino_rcu(sb, ino);
5025 if (!inode_is_dirtytime_only(inode))
5028 spin_lock(&inode->i_lock);
5029 if (inode_is_dirtytime_only(inode)) {
5030 struct ext4_inode_info *ei = EXT4_I(inode);
5032 inode->i_state &= ~I_DIRTY_TIME;
5033 spin_unlock(&inode->i_lock);
5035 spin_lock(&ei->i_raw_lock);
5036 EXT4_INODE_SET_CTIME(inode, raw_inode);
5037 EXT4_INODE_SET_MTIME(inode, raw_inode);
5038 EXT4_INODE_SET_ATIME(inode, raw_inode);
5039 ext4_inode_csum_set(inode, raw_inode, ei);
5040 spin_unlock(&ei->i_raw_lock);
5041 trace_ext4_other_inode_update_time(inode, orig_ino);
5044 spin_unlock(&inode->i_lock);
5048 * Opportunistically update the other time fields for other inodes in
5049 * the same inode table block.
5051 static void ext4_update_other_inodes_time(struct super_block *sb,
5052 unsigned long orig_ino, char *buf)
5055 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5056 int inode_size = EXT4_INODE_SIZE(sb);
5059 * Calculate the first inode in the inode table block. Inode
5060 * numbers are one-based. That is, the first inode in a block
5061 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5063 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5065 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5066 if (ino == orig_ino)
5068 __ext4_update_other_inode_time(sb, orig_ino, ino,
5069 (struct ext4_inode *)buf);
5075 * Post the struct inode info into an on-disk inode location in the
5076 * buffer-cache. This gobbles the caller's reference to the
5077 * buffer_head in the inode location struct.
5079 * The caller must have write access to iloc->bh.
5081 static int ext4_do_update_inode(handle_t *handle,
5082 struct inode *inode,
5083 struct ext4_iloc *iloc)
5085 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5086 struct ext4_inode_info *ei = EXT4_I(inode);
5087 struct buffer_head *bh = iloc->bh;
5088 struct super_block *sb = inode->i_sb;
5090 int need_datasync = 0, set_large_file = 0;
5092 spin_lock(&ei->i_raw_lock);
5095 * For fields not tracked in the in-memory inode, initialise them
5096 * to zero for new inodes.
5098 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5099 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5101 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5103 if (ei->i_disksize > 0x7fffffffULL) {
5104 if (!ext4_has_feature_large_file(sb) ||
5105 EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5109 err = ext4_fill_raw_inode(inode, raw_inode);
5110 spin_unlock(&ei->i_raw_lock);
5112 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5116 if (inode->i_sb->s_flags & SB_LAZYTIME)
5117 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5120 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5121 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5124 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5125 if (set_large_file) {
5126 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5127 err = ext4_journal_get_write_access(handle, sb,
5132 lock_buffer(EXT4_SB(sb)->s_sbh);
5133 ext4_set_feature_large_file(sb);
5134 ext4_superblock_csum_set(sb);
5135 unlock_buffer(EXT4_SB(sb)->s_sbh);
5136 ext4_handle_sync(handle);
5137 err = ext4_handle_dirty_metadata(handle, NULL,
5138 EXT4_SB(sb)->s_sbh);
5140 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5142 ext4_std_error(inode->i_sb, err);
5149 * ext4_write_inode()
5151 * We are called from a few places:
5153 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5154 * Here, there will be no transaction running. We wait for any running
5155 * transaction to commit.
5157 * - Within flush work (sys_sync(), kupdate and such).
5158 * We wait on commit, if told to.
5160 * - Within iput_final() -> write_inode_now()
5161 * We wait on commit, if told to.
5163 * In all cases it is actually safe for us to return without doing anything,
5164 * because the inode has been copied into a raw inode buffer in
5165 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5168 * Note that we are absolutely dependent upon all inode dirtiers doing the
5169 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5170 * which we are interested.
5172 * It would be a bug for them to not do this. The code:
5174 * mark_inode_dirty(inode)
5176 * inode->i_size = expr;
5178 * is in error because write_inode() could occur while `stuff()' is running,
5179 * and the new i_size will be lost. Plus the inode will no longer be on the
5180 * superblock's dirty inode list.
5182 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5186 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5189 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5192 if (EXT4_SB(inode->i_sb)->s_journal) {
5193 if (ext4_journal_current_handle()) {
5194 ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5200 * No need to force transaction in WB_SYNC_NONE mode. Also
5201 * ext4_sync_fs() will force the commit after everything is
5204 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5207 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5208 EXT4_I(inode)->i_sync_tid);
5210 struct ext4_iloc iloc;
5212 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5216 * sync(2) will flush the whole buffer cache. No need to do
5217 * it here separately for each inode.
5219 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5220 sync_dirty_buffer(iloc.bh);
5221 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5222 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5223 "IO error syncing inode");
5232 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5233 * buffers that are attached to a folio straddling i_size and are undergoing
5234 * commit. In that case we have to wait for commit to finish and try again.
5236 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5239 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5240 tid_t commit_tid = 0;
5243 offset = inode->i_size & (PAGE_SIZE - 1);
5245 * If the folio is fully truncated, we don't need to wait for any commit
5246 * (and we even should not as __ext4_journalled_invalidate_folio() may
5247 * strip all buffers from the folio but keep the folio dirty which can then
5248 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5249 * buffers). Also we don't need to wait for any commit if all buffers in
5250 * the folio remain valid. This is most beneficial for the common case of
5251 * blocksize == PAGESIZE.
5253 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5256 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5257 inode->i_size >> PAGE_SHIFT);
5260 ret = __ext4_journalled_invalidate_folio(folio, offset,
5261 folio_size(folio) - offset);
5262 folio_unlock(folio);
5267 read_lock(&journal->j_state_lock);
5268 if (journal->j_committing_transaction)
5269 commit_tid = journal->j_committing_transaction->t_tid;
5270 read_unlock(&journal->j_state_lock);
5272 jbd2_log_wait_commit(journal, commit_tid);
5279 * Called from notify_change.
5281 * We want to trap VFS attempts to truncate the file as soon as
5282 * possible. In particular, we want to make sure that when the VFS
5283 * shrinks i_size, we put the inode on the orphan list and modify
5284 * i_disksize immediately, so that during the subsequent flushing of
5285 * dirty pages and freeing of disk blocks, we can guarantee that any
5286 * commit will leave the blocks being flushed in an unused state on
5287 * disk. (On recovery, the inode will get truncated and the blocks will
5288 * be freed, so we have a strong guarantee that no future commit will
5289 * leave these blocks visible to the user.)
5291 * Another thing we have to assure is that if we are in ordered mode
5292 * and inode is still attached to the committing transaction, we must
5293 * we start writeout of all the dirty pages which are being truncated.
5294 * This way we are sure that all the data written in the previous
5295 * transaction are already on disk (truncate waits for pages under
5298 * Called with inode->i_rwsem down.
5300 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5303 struct inode *inode = d_inode(dentry);
5306 const unsigned int ia_valid = attr->ia_valid;
5307 bool inc_ivers = true;
5309 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5312 if (unlikely(IS_IMMUTABLE(inode)))
5315 if (unlikely(IS_APPEND(inode) &&
5316 (ia_valid & (ATTR_MODE | ATTR_UID |
5317 ATTR_GID | ATTR_TIMES_SET))))
5320 error = setattr_prepare(idmap, dentry, attr);
5324 error = fscrypt_prepare_setattr(dentry, attr);
5328 error = fsverity_prepare_setattr(dentry, attr);
5332 if (is_quota_modification(idmap, inode, attr)) {
5333 error = dquot_initialize(inode);
5338 if (i_uid_needs_update(idmap, attr, inode) ||
5339 i_gid_needs_update(idmap, attr, inode)) {
5342 /* (user+group)*(old+new) structure, inode write (sb,
5343 * inode block, ? - but truncate inode update has it) */
5344 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5345 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5346 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5347 if (IS_ERR(handle)) {
5348 error = PTR_ERR(handle);
5352 /* dquot_transfer() calls back ext4_get_inode_usage() which
5353 * counts xattr inode references.
5355 down_read(&EXT4_I(inode)->xattr_sem);
5356 error = dquot_transfer(idmap, inode, attr);
5357 up_read(&EXT4_I(inode)->xattr_sem);
5360 ext4_journal_stop(handle);
5363 /* Update corresponding info in inode so that everything is in
5364 * one transaction */
5365 i_uid_update(idmap, attr, inode);
5366 i_gid_update(idmap, attr, inode);
5367 error = ext4_mark_inode_dirty(handle, inode);
5368 ext4_journal_stop(handle);
5369 if (unlikely(error)) {
5374 if (attr->ia_valid & ATTR_SIZE) {
5376 loff_t oldsize = inode->i_size;
5377 loff_t old_disksize;
5378 int shrink = (attr->ia_size < inode->i_size);
5380 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5381 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5383 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5387 if (!S_ISREG(inode->i_mode)) {
5391 if (attr->ia_size == inode->i_size)
5395 if (ext4_should_order_data(inode)) {
5396 error = ext4_begin_ordered_truncate(inode,
5402 * Blocks are going to be removed from the inode. Wait
5403 * for dio in flight.
5405 inode_dio_wait(inode);
5408 filemap_invalidate_lock(inode->i_mapping);
5410 rc = ext4_break_layouts(inode);
5412 filemap_invalidate_unlock(inode->i_mapping);
5416 if (attr->ia_size != inode->i_size) {
5417 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5418 if (IS_ERR(handle)) {
5419 error = PTR_ERR(handle);
5422 if (ext4_handle_valid(handle) && shrink) {
5423 error = ext4_orphan_add(handle, inode);
5427 * Update c/mtime on truncate up, ext4_truncate() will
5428 * update c/mtime in shrink case below
5431 inode_set_mtime_to_ts(inode,
5432 inode_set_ctime_current(inode));
5435 ext4_fc_track_range(handle, inode,
5436 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5437 inode->i_sb->s_blocksize_bits,
5438 EXT_MAX_BLOCKS - 1);
5440 ext4_fc_track_range(
5442 (oldsize > 0 ? oldsize - 1 : oldsize) >>
5443 inode->i_sb->s_blocksize_bits,
5444 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5445 inode->i_sb->s_blocksize_bits);
5447 down_write(&EXT4_I(inode)->i_data_sem);
5448 old_disksize = EXT4_I(inode)->i_disksize;
5449 EXT4_I(inode)->i_disksize = attr->ia_size;
5450 rc = ext4_mark_inode_dirty(handle, inode);
5454 * We have to update i_size under i_data_sem together
5455 * with i_disksize to avoid races with writeback code
5456 * running ext4_wb_update_i_disksize().
5459 i_size_write(inode, attr->ia_size);
5461 EXT4_I(inode)->i_disksize = old_disksize;
5462 up_write(&EXT4_I(inode)->i_data_sem);
5463 ext4_journal_stop(handle);
5467 pagecache_isize_extended(inode, oldsize,
5469 } else if (ext4_should_journal_data(inode)) {
5470 ext4_wait_for_tail_page_commit(inode);
5475 * Truncate pagecache after we've waited for commit
5476 * in data=journal mode to make pages freeable.
5478 truncate_pagecache(inode, inode->i_size);
5480 * Call ext4_truncate() even if i_size didn't change to
5481 * truncate possible preallocated blocks.
5483 if (attr->ia_size <= oldsize) {
5484 rc = ext4_truncate(inode);
5489 filemap_invalidate_unlock(inode->i_mapping);
5494 inode_inc_iversion(inode);
5495 setattr_copy(idmap, inode, attr);
5496 mark_inode_dirty(inode);
5500 * If the call to ext4_truncate failed to get a transaction handle at
5501 * all, we need to clean up the in-core orphan list manually.
5503 if (orphan && inode->i_nlink)
5504 ext4_orphan_del(NULL, inode);
5506 if (!error && (ia_valid & ATTR_MODE))
5507 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5511 ext4_std_error(inode->i_sb, error);
5517 u32 ext4_dio_alignment(struct inode *inode)
5519 if (fsverity_active(inode))
5521 if (ext4_should_journal_data(inode))
5523 if (ext4_has_inline_data(inode))
5525 if (IS_ENCRYPTED(inode)) {
5526 if (!fscrypt_dio_supported(inode))
5528 return i_blocksize(inode);
5530 return 1; /* use the iomap defaults */
5533 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
5534 struct kstat *stat, u32 request_mask, unsigned int query_flags)
5536 struct inode *inode = d_inode(path->dentry);
5537 struct ext4_inode *raw_inode;
5538 struct ext4_inode_info *ei = EXT4_I(inode);
5541 if ((request_mask & STATX_BTIME) &&
5542 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5543 stat->result_mask |= STATX_BTIME;
5544 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5545 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5549 * Return the DIO alignment restrictions if requested. We only return
5550 * this information when requested, since on encrypted files it might
5551 * take a fair bit of work to get if the file wasn't opened recently.
5553 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5554 u32 dio_align = ext4_dio_alignment(inode);
5556 stat->result_mask |= STATX_DIOALIGN;
5557 if (dio_align == 1) {
5558 struct block_device *bdev = inode->i_sb->s_bdev;
5560 /* iomap defaults */
5561 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5562 stat->dio_offset_align = bdev_logical_block_size(bdev);
5564 stat->dio_mem_align = dio_align;
5565 stat->dio_offset_align = dio_align;
5569 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5570 if (flags & EXT4_APPEND_FL)
5571 stat->attributes |= STATX_ATTR_APPEND;
5572 if (flags & EXT4_COMPR_FL)
5573 stat->attributes |= STATX_ATTR_COMPRESSED;
5574 if (flags & EXT4_ENCRYPT_FL)
5575 stat->attributes |= STATX_ATTR_ENCRYPTED;
5576 if (flags & EXT4_IMMUTABLE_FL)
5577 stat->attributes |= STATX_ATTR_IMMUTABLE;
5578 if (flags & EXT4_NODUMP_FL)
5579 stat->attributes |= STATX_ATTR_NODUMP;
5580 if (flags & EXT4_VERITY_FL)
5581 stat->attributes |= STATX_ATTR_VERITY;
5583 stat->attributes_mask |= (STATX_ATTR_APPEND |
5584 STATX_ATTR_COMPRESSED |
5585 STATX_ATTR_ENCRYPTED |
5586 STATX_ATTR_IMMUTABLE |
5590 generic_fillattr(idmap, request_mask, inode, stat);
5594 int ext4_file_getattr(struct mnt_idmap *idmap,
5595 const struct path *path, struct kstat *stat,
5596 u32 request_mask, unsigned int query_flags)
5598 struct inode *inode = d_inode(path->dentry);
5599 u64 delalloc_blocks;
5601 ext4_getattr(idmap, path, stat, request_mask, query_flags);
5604 * If there is inline data in the inode, the inode will normally not
5605 * have data blocks allocated (it may have an external xattr block).
5606 * Report at least one sector for such files, so tools like tar, rsync,
5607 * others don't incorrectly think the file is completely sparse.
5609 if (unlikely(ext4_has_inline_data(inode)))
5610 stat->blocks += (stat->size + 511) >> 9;
5613 * We can't update i_blocks if the block allocation is delayed
5614 * otherwise in the case of system crash before the real block
5615 * allocation is done, we will have i_blocks inconsistent with
5616 * on-disk file blocks.
5617 * We always keep i_blocks updated together with real
5618 * allocation. But to not confuse with user, stat
5619 * will return the blocks that include the delayed allocation
5620 * blocks for this file.
5622 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5623 EXT4_I(inode)->i_reserved_data_blocks);
5624 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5628 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5631 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5632 return ext4_ind_trans_blocks(inode, lblocks);
5633 return ext4_ext_index_trans_blocks(inode, pextents);
5637 * Account for index blocks, block groups bitmaps and block group
5638 * descriptor blocks if modify datablocks and index blocks
5639 * worse case, the indexs blocks spread over different block groups
5641 * If datablocks are discontiguous, they are possible to spread over
5642 * different block groups too. If they are contiguous, with flexbg,
5643 * they could still across block group boundary.
5645 * Also account for superblock, inode, quota and xattr blocks
5647 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5650 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5656 * How many index blocks need to touch to map @lblocks logical blocks
5657 * to @pextents physical extents?
5659 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5664 * Now let's see how many group bitmaps and group descriptors need
5667 groups = idxblocks + pextents;
5669 if (groups > ngroups)
5671 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5672 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5674 /* bitmaps and block group descriptor blocks */
5675 ret += groups + gdpblocks;
5677 /* Blocks for super block, inode, quota and xattr blocks */
5678 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5684 * Calculate the total number of credits to reserve to fit
5685 * the modification of a single pages into a single transaction,
5686 * which may include multiple chunks of block allocations.
5688 * This could be called via ext4_write_begin()
5690 * We need to consider the worse case, when
5691 * one new block per extent.
5693 int ext4_writepage_trans_blocks(struct inode *inode)
5695 int bpp = ext4_journal_blocks_per_page(inode);
5698 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5700 /* Account for data blocks for journalled mode */
5701 if (ext4_should_journal_data(inode))
5707 * Calculate the journal credits for a chunk of data modification.
5709 * This is called from DIO, fallocate or whoever calling
5710 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5712 * journal buffers for data blocks are not included here, as DIO
5713 * and fallocate do no need to journal data buffers.
5715 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5717 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5721 * The caller must have previously called ext4_reserve_inode_write().
5722 * Give this, we know that the caller already has write access to iloc->bh.
5724 int ext4_mark_iloc_dirty(handle_t *handle,
5725 struct inode *inode, struct ext4_iloc *iloc)
5729 if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5733 ext4_fc_track_inode(handle, inode);
5735 /* the do_update_inode consumes one bh->b_count */
5738 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5739 err = ext4_do_update_inode(handle, inode, iloc);
5745 * On success, We end up with an outstanding reference count against
5746 * iloc->bh. This _must_ be cleaned up later.
5750 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5751 struct ext4_iloc *iloc)
5755 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5758 err = ext4_get_inode_loc(inode, iloc);
5760 BUFFER_TRACE(iloc->bh, "get_write_access");
5761 err = ext4_journal_get_write_access(handle, inode->i_sb,
5762 iloc->bh, EXT4_JTR_NONE);
5768 ext4_std_error(inode->i_sb, err);
5772 static int __ext4_expand_extra_isize(struct inode *inode,
5773 unsigned int new_extra_isize,
5774 struct ext4_iloc *iloc,
5775 handle_t *handle, int *no_expand)
5777 struct ext4_inode *raw_inode;
5778 struct ext4_xattr_ibody_header *header;
5779 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5780 struct ext4_inode_info *ei = EXT4_I(inode);
5783 /* this was checked at iget time, but double check for good measure */
5784 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5785 (ei->i_extra_isize & 3)) {
5786 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5788 EXT4_INODE_SIZE(inode->i_sb));
5789 return -EFSCORRUPTED;
5791 if ((new_extra_isize < ei->i_extra_isize) ||
5792 (new_extra_isize < 4) ||
5793 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5794 return -EINVAL; /* Should never happen */
5796 raw_inode = ext4_raw_inode(iloc);
5798 header = IHDR(inode, raw_inode);
5800 /* No extended attributes present */
5801 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5802 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5803 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5804 EXT4_I(inode)->i_extra_isize, 0,
5805 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5806 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5811 * We may need to allocate external xattr block so we need quotas
5812 * initialized. Here we can be called with various locks held so we
5813 * cannot affort to initialize quotas ourselves. So just bail.
5815 if (dquot_initialize_needed(inode))
5818 /* try to expand with EAs present */
5819 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5823 * Inode size expansion failed; don't try again
5832 * Expand an inode by new_extra_isize bytes.
5833 * Returns 0 on success or negative error number on failure.
5835 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5836 unsigned int new_extra_isize,
5837 struct ext4_iloc iloc,
5843 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5847 * In nojournal mode, we can immediately attempt to expand
5848 * the inode. When journaled, we first need to obtain extra
5849 * buffer credits since we may write into the EA block
5850 * with this same handle. If journal_extend fails, then it will
5851 * only result in a minor loss of functionality for that inode.
5852 * If this is felt to be critical, then e2fsck should be run to
5853 * force a large enough s_min_extra_isize.
5855 if (ext4_journal_extend(handle,
5856 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5859 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5862 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5863 handle, &no_expand);
5864 ext4_write_unlock_xattr(inode, &no_expand);
5869 int ext4_expand_extra_isize(struct inode *inode,
5870 unsigned int new_extra_isize,
5871 struct ext4_iloc *iloc)
5877 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5882 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5883 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5884 if (IS_ERR(handle)) {
5885 error = PTR_ERR(handle);
5890 ext4_write_lock_xattr(inode, &no_expand);
5892 BUFFER_TRACE(iloc->bh, "get_write_access");
5893 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5900 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5901 handle, &no_expand);
5903 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5908 ext4_write_unlock_xattr(inode, &no_expand);
5909 ext4_journal_stop(handle);
5914 * What we do here is to mark the in-core inode as clean with respect to inode
5915 * dirtiness (it may still be data-dirty).
5916 * This means that the in-core inode may be reaped by prune_icache
5917 * without having to perform any I/O. This is a very good thing,
5918 * because *any* task may call prune_icache - even ones which
5919 * have a transaction open against a different journal.
5921 * Is this cheating? Not really. Sure, we haven't written the
5922 * inode out, but prune_icache isn't a user-visible syncing function.
5923 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5924 * we start and wait on commits.
5926 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5927 const char *func, unsigned int line)
5929 struct ext4_iloc iloc;
5930 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5934 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5935 err = ext4_reserve_inode_write(handle, inode, &iloc);
5939 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5940 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5943 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5946 ext4_error_inode_err(inode, func, line, 0, err,
5947 "mark_inode_dirty error");
5952 * ext4_dirty_inode() is called from __mark_inode_dirty()
5954 * We're really interested in the case where a file is being extended.
5955 * i_size has been changed by generic_commit_write() and we thus need
5956 * to include the updated inode in the current transaction.
5958 * Also, dquot_alloc_block() will always dirty the inode when blocks
5959 * are allocated to the file.
5961 * If the inode is marked synchronous, we don't honour that here - doing
5962 * so would cause a commit on atime updates, which we don't bother doing.
5963 * We handle synchronous inodes at the highest possible level.
5965 void ext4_dirty_inode(struct inode *inode, int flags)
5969 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5972 ext4_mark_inode_dirty(handle, inode);
5973 ext4_journal_stop(handle);
5976 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5984 * We have to be very careful here: changing a data block's
5985 * journaling status dynamically is dangerous. If we write a
5986 * data block to the journal, change the status and then delete
5987 * that block, we risk forgetting to revoke the old log record
5988 * from the journal and so a subsequent replay can corrupt data.
5989 * So, first we make sure that the journal is empty and that
5990 * nobody is changing anything.
5993 journal = EXT4_JOURNAL(inode);
5996 if (is_journal_aborted(journal))
5999 /* Wait for all existing dio workers */
6000 inode_dio_wait(inode);
6003 * Before flushing the journal and switching inode's aops, we have
6004 * to flush all dirty data the inode has. There can be outstanding
6005 * delayed allocations, there can be unwritten extents created by
6006 * fallocate or buffered writes in dioread_nolock mode covered by
6007 * dirty data which can be converted only after flushing the dirty
6008 * data (and journalled aops don't know how to handle these cases).
6011 filemap_invalidate_lock(inode->i_mapping);
6012 err = filemap_write_and_wait(inode->i_mapping);
6014 filemap_invalidate_unlock(inode->i_mapping);
6019 alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6020 jbd2_journal_lock_updates(journal);
6023 * OK, there are no updates running now, and all cached data is
6024 * synced to disk. We are now in a completely consistent state
6025 * which doesn't have anything in the journal, and we know that
6026 * no filesystem updates are running, so it is safe to modify
6027 * the inode's in-core data-journaling state flag now.
6031 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6033 err = jbd2_journal_flush(journal, 0);
6035 jbd2_journal_unlock_updates(journal);
6036 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6039 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6041 ext4_set_aops(inode);
6043 jbd2_journal_unlock_updates(journal);
6044 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6047 filemap_invalidate_unlock(inode->i_mapping);
6049 /* Finally we can mark the inode as dirty. */
6051 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6053 return PTR_ERR(handle);
6055 ext4_fc_mark_ineligible(inode->i_sb,
6056 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6057 err = ext4_mark_inode_dirty(handle, inode);
6058 ext4_handle_sync(handle);
6059 ext4_journal_stop(handle);
6060 ext4_std_error(inode->i_sb, err);
6065 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6066 struct buffer_head *bh)
6068 return !buffer_mapped(bh);
6071 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6073 struct vm_area_struct *vma = vmf->vma;
6074 struct folio *folio = page_folio(vmf->page);
6079 struct file *file = vma->vm_file;
6080 struct inode *inode = file_inode(file);
6081 struct address_space *mapping = inode->i_mapping;
6083 get_block_t *get_block;
6086 if (unlikely(IS_IMMUTABLE(inode)))
6087 return VM_FAULT_SIGBUS;
6089 sb_start_pagefault(inode->i_sb);
6090 file_update_time(vma->vm_file);
6092 filemap_invalidate_lock_shared(mapping);
6094 err = ext4_convert_inline_data(inode);
6099 * On data journalling we skip straight to the transaction handle:
6100 * there's no delalloc; page truncated will be checked later; the
6101 * early return w/ all buffers mapped (calculates size/len) can't
6102 * be used; and there's no dioread_nolock, so only ext4_get_block.
6104 if (ext4_should_journal_data(inode))
6107 /* Delalloc case is easy... */
6108 if (test_opt(inode->i_sb, DELALLOC) &&
6109 !ext4_nonda_switch(inode->i_sb)) {
6111 err = block_page_mkwrite(vma, vmf,
6112 ext4_da_get_block_prep);
6113 } while (err == -ENOSPC &&
6114 ext4_should_retry_alloc(inode->i_sb, &retries));
6119 size = i_size_read(inode);
6120 /* Page got truncated from under us? */
6121 if (folio->mapping != mapping || folio_pos(folio) > size) {
6122 folio_unlock(folio);
6123 ret = VM_FAULT_NOPAGE;
6127 len = folio_size(folio);
6128 if (folio_pos(folio) + len > size)
6129 len = size - folio_pos(folio);
6131 * Return if we have all the buffers mapped. This avoids the need to do
6132 * journal_start/journal_stop which can block and take a long time
6134 * This cannot be done for data journalling, as we have to add the
6135 * inode to the transaction's list to writeprotect pages on commit.
6137 if (folio_buffers(folio)) {
6138 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6140 ext4_bh_unmapped)) {
6141 /* Wait so that we don't change page under IO */
6142 folio_wait_stable(folio);
6143 ret = VM_FAULT_LOCKED;
6147 folio_unlock(folio);
6148 /* OK, we need to fill the hole... */
6149 if (ext4_should_dioread_nolock(inode))
6150 get_block = ext4_get_block_unwritten;
6152 get_block = ext4_get_block;
6154 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6155 ext4_writepage_trans_blocks(inode));
6156 if (IS_ERR(handle)) {
6157 ret = VM_FAULT_SIGBUS;
6161 * Data journalling can't use block_page_mkwrite() because it
6162 * will set_buffer_dirty() before do_journal_get_write_access()
6163 * thus might hit warning messages for dirty metadata buffers.
6165 if (!ext4_should_journal_data(inode)) {
6166 err = block_page_mkwrite(vma, vmf, get_block);
6169 size = i_size_read(inode);
6170 /* Page got truncated from under us? */
6171 if (folio->mapping != mapping || folio_pos(folio) > size) {
6172 ret = VM_FAULT_NOPAGE;
6176 len = folio_size(folio);
6177 if (folio_pos(folio) + len > size)
6178 len = size - folio_pos(folio);
6180 err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
6182 ret = VM_FAULT_SIGBUS;
6183 if (ext4_journal_folio_buffers(handle, folio, len))
6186 folio_unlock(folio);
6189 ext4_journal_stop(handle);
6190 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6193 ret = vmf_fs_error(err);
6195 filemap_invalidate_unlock_shared(mapping);
6196 sb_end_pagefault(inode->i_sb);
6199 folio_unlock(folio);
6200 ext4_journal_stop(handle);