leds: class: If no default trigger is given, make hw_control trigger the default...
[linux.git] / fs / ext4 / inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/inode.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/inode.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  64-bit file support on 64-bit platforms by Jakub Jelinek
17  *      (jj@sunsite.ms.mff.cuni.cz)
18  *
19  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20  */
21
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/time.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/string.h>
30 #include <linux/buffer_head.h>
31 #include <linux/writeback.h>
32 #include <linux/pagevec.h>
33 #include <linux/mpage.h>
34 #include <linux/namei.h>
35 #include <linux/uio.h>
36 #include <linux/bio.h>
37 #include <linux/workqueue.h>
38 #include <linux/kernel.h>
39 #include <linux/printk.h>
40 #include <linux/slab.h>
41 #include <linux/bitops.h>
42 #include <linux/iomap.h>
43 #include <linux/iversion.h>
44
45 #include "ext4_jbd2.h"
46 #include "xattr.h"
47 #include "acl.h"
48 #include "truncate.h"
49
50 #include <trace/events/ext4.h>
51
52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53                               struct ext4_inode_info *ei)
54 {
55         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56         __u32 csum;
57         __u16 dummy_csum = 0;
58         int offset = offsetof(struct ext4_inode, i_checksum_lo);
59         unsigned int csum_size = sizeof(dummy_csum);
60
61         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62         csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63         offset += csum_size;
64         csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65                            EXT4_GOOD_OLD_INODE_SIZE - offset);
66
67         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68                 offset = offsetof(struct ext4_inode, i_checksum_hi);
69                 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70                                    EXT4_GOOD_OLD_INODE_SIZE,
71                                    offset - EXT4_GOOD_OLD_INODE_SIZE);
72                 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73                         csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74                                            csum_size);
75                         offset += csum_size;
76                 }
77                 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78                                    EXT4_INODE_SIZE(inode->i_sb) - offset);
79         }
80
81         return csum;
82 }
83
84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85                                   struct ext4_inode_info *ei)
86 {
87         __u32 provided, calculated;
88
89         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90             cpu_to_le32(EXT4_OS_LINUX) ||
91             !ext4_has_metadata_csum(inode->i_sb))
92                 return 1;
93
94         provided = le16_to_cpu(raw->i_checksum_lo);
95         calculated = ext4_inode_csum(inode, raw, ei);
96         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97             EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98                 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99         else
100                 calculated &= 0xFFFF;
101
102         return provided == calculated;
103 }
104
105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106                          struct ext4_inode_info *ei)
107 {
108         __u32 csum;
109
110         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111             cpu_to_le32(EXT4_OS_LINUX) ||
112             !ext4_has_metadata_csum(inode->i_sb))
113                 return;
114
115         csum = ext4_inode_csum(inode, raw, ei);
116         raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118             EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119                 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120 }
121
122 static inline int ext4_begin_ordered_truncate(struct inode *inode,
123                                               loff_t new_size)
124 {
125         trace_ext4_begin_ordered_truncate(inode, new_size);
126         /*
127          * If jinode is zero, then we never opened the file for
128          * writing, so there's no need to call
129          * jbd2_journal_begin_ordered_truncate() since there's no
130          * outstanding writes we need to flush.
131          */
132         if (!EXT4_I(inode)->jinode)
133                 return 0;
134         return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135                                                    EXT4_I(inode)->jinode,
136                                                    new_size);
137 }
138
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
140                                   int pextents);
141
142 /*
143  * Test whether an inode is a fast symlink.
144  * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
145  */
146 int ext4_inode_is_fast_symlink(struct inode *inode)
147 {
148         if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149                 int ea_blocks = EXT4_I(inode)->i_file_acl ?
150                                 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
151
152                 if (ext4_has_inline_data(inode))
153                         return 0;
154
155                 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
156         }
157         return S_ISLNK(inode->i_mode) && inode->i_size &&
158                (inode->i_size < EXT4_N_BLOCKS * 4);
159 }
160
161 /*
162  * Called at the last iput() if i_nlink is zero.
163  */
164 void ext4_evict_inode(struct inode *inode)
165 {
166         handle_t *handle;
167         int err;
168         /*
169          * Credits for final inode cleanup and freeing:
170          * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171          * (xattr block freeing), bitmap, group descriptor (inode freeing)
172          */
173         int extra_credits = 6;
174         struct ext4_xattr_inode_array *ea_inode_array = NULL;
175         bool freeze_protected = false;
176
177         trace_ext4_evict_inode(inode);
178
179         if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180                 ext4_evict_ea_inode(inode);
181         if (inode->i_nlink) {
182                 truncate_inode_pages_final(&inode->i_data);
183
184                 goto no_delete;
185         }
186
187         if (is_bad_inode(inode))
188                 goto no_delete;
189         dquot_initialize(inode);
190
191         if (ext4_should_order_data(inode))
192                 ext4_begin_ordered_truncate(inode, 0);
193         truncate_inode_pages_final(&inode->i_data);
194
195         /*
196          * For inodes with journalled data, transaction commit could have
197          * dirtied the inode. And for inodes with dioread_nolock, unwritten
198          * extents converting worker could merge extents and also have dirtied
199          * the inode. Flush worker is ignoring it because of I_FREEING flag but
200          * we still need to remove the inode from the writeback lists.
201          */
202         if (!list_empty_careful(&inode->i_io_list))
203                 inode_io_list_del(inode);
204
205         /*
206          * Protect us against freezing - iput() caller didn't have to have any
207          * protection against it. When we are in a running transaction though,
208          * we are already protected against freezing and we cannot grab further
209          * protection due to lock ordering constraints.
210          */
211         if (!ext4_journal_current_handle()) {
212                 sb_start_intwrite(inode->i_sb);
213                 freeze_protected = true;
214         }
215
216         if (!IS_NOQUOTA(inode))
217                 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
218
219         /*
220          * Block bitmap, group descriptor, and inode are accounted in both
221          * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
222          */
223         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
224                          ext4_blocks_for_truncate(inode) + extra_credits - 3);
225         if (IS_ERR(handle)) {
226                 ext4_std_error(inode->i_sb, PTR_ERR(handle));
227                 /*
228                  * If we're going to skip the normal cleanup, we still need to
229                  * make sure that the in-core orphan linked list is properly
230                  * cleaned up.
231                  */
232                 ext4_orphan_del(NULL, inode);
233                 if (freeze_protected)
234                         sb_end_intwrite(inode->i_sb);
235                 goto no_delete;
236         }
237
238         if (IS_SYNC(inode))
239                 ext4_handle_sync(handle);
240
241         /*
242          * Set inode->i_size to 0 before calling ext4_truncate(). We need
243          * special handling of symlinks here because i_size is used to
244          * determine whether ext4_inode_info->i_data contains symlink data or
245          * block mappings. Setting i_size to 0 will remove its fast symlink
246          * status. Erase i_data so that it becomes a valid empty block map.
247          */
248         if (ext4_inode_is_fast_symlink(inode))
249                 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
250         inode->i_size = 0;
251         err = ext4_mark_inode_dirty(handle, inode);
252         if (err) {
253                 ext4_warning(inode->i_sb,
254                              "couldn't mark inode dirty (err %d)", err);
255                 goto stop_handle;
256         }
257         if (inode->i_blocks) {
258                 err = ext4_truncate(inode);
259                 if (err) {
260                         ext4_error_err(inode->i_sb, -err,
261                                        "couldn't truncate inode %lu (err %d)",
262                                        inode->i_ino, err);
263                         goto stop_handle;
264                 }
265         }
266
267         /* Remove xattr references. */
268         err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
269                                       extra_credits);
270         if (err) {
271                 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
272 stop_handle:
273                 ext4_journal_stop(handle);
274                 ext4_orphan_del(NULL, inode);
275                 if (freeze_protected)
276                         sb_end_intwrite(inode->i_sb);
277                 ext4_xattr_inode_array_free(ea_inode_array);
278                 goto no_delete;
279         }
280
281         /*
282          * Kill off the orphan record which ext4_truncate created.
283          * AKPM: I think this can be inside the above `if'.
284          * Note that ext4_orphan_del() has to be able to cope with the
285          * deletion of a non-existent orphan - this is because we don't
286          * know if ext4_truncate() actually created an orphan record.
287          * (Well, we could do this if we need to, but heck - it works)
288          */
289         ext4_orphan_del(handle, inode);
290         EXT4_I(inode)->i_dtime  = (__u32)ktime_get_real_seconds();
291
292         /*
293          * One subtle ordering requirement: if anything has gone wrong
294          * (transaction abort, IO errors, whatever), then we can still
295          * do these next steps (the fs will already have been marked as
296          * having errors), but we can't free the inode if the mark_dirty
297          * fails.
298          */
299         if (ext4_mark_inode_dirty(handle, inode))
300                 /* If that failed, just do the required in-core inode clear. */
301                 ext4_clear_inode(inode);
302         else
303                 ext4_free_inode(handle, inode);
304         ext4_journal_stop(handle);
305         if (freeze_protected)
306                 sb_end_intwrite(inode->i_sb);
307         ext4_xattr_inode_array_free(ea_inode_array);
308         return;
309 no_delete:
310         /*
311          * Check out some where else accidentally dirty the evicting inode,
312          * which may probably cause inode use-after-free issues later.
313          */
314         WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
315
316         if (!list_empty(&EXT4_I(inode)->i_fc_list))
317                 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
318         ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
319 }
320
321 #ifdef CONFIG_QUOTA
322 qsize_t *ext4_get_reserved_space(struct inode *inode)
323 {
324         return &EXT4_I(inode)->i_reserved_quota;
325 }
326 #endif
327
328 /*
329  * Called with i_data_sem down, which is important since we can call
330  * ext4_discard_preallocations() from here.
331  */
332 void ext4_da_update_reserve_space(struct inode *inode,
333                                         int used, int quota_claim)
334 {
335         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336         struct ext4_inode_info *ei = EXT4_I(inode);
337
338         spin_lock(&ei->i_block_reservation_lock);
339         trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340         if (unlikely(used > ei->i_reserved_data_blocks)) {
341                 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
342                          "with only %d reserved data blocks",
343                          __func__, inode->i_ino, used,
344                          ei->i_reserved_data_blocks);
345                 WARN_ON(1);
346                 used = ei->i_reserved_data_blocks;
347         }
348
349         /* Update per-inode reservations */
350         ei->i_reserved_data_blocks -= used;
351         percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
352
353         spin_unlock(&ei->i_block_reservation_lock);
354
355         /* Update quota subsystem for data blocks */
356         if (quota_claim)
357                 dquot_claim_block(inode, EXT4_C2B(sbi, used));
358         else {
359                 /*
360                  * We did fallocate with an offset that is already delayed
361                  * allocated. So on delayed allocated writeback we should
362                  * not re-claim the quota for fallocated blocks.
363                  */
364                 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
365         }
366
367         /*
368          * If we have done all the pending block allocations and if
369          * there aren't any writers on the inode, we can discard the
370          * inode's preallocations.
371          */
372         if ((ei->i_reserved_data_blocks == 0) &&
373             !inode_is_open_for_write(inode))
374                 ext4_discard_preallocations(inode, 0);
375 }
376
377 static int __check_block_validity(struct inode *inode, const char *func,
378                                 unsigned int line,
379                                 struct ext4_map_blocks *map)
380 {
381         if (ext4_has_feature_journal(inode->i_sb) &&
382             (inode->i_ino ==
383              le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
384                 return 0;
385         if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386                 ext4_error_inode(inode, func, line, map->m_pblk,
387                                  "lblock %lu mapped to illegal pblock %llu "
388                                  "(length %d)", (unsigned long) map->m_lblk,
389                                  map->m_pblk, map->m_len);
390                 return -EFSCORRUPTED;
391         }
392         return 0;
393 }
394
395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
396                        ext4_lblk_t len)
397 {
398         int ret;
399
400         if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
401                 return fscrypt_zeroout_range(inode, lblk, pblk, len);
402
403         ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
404         if (ret > 0)
405                 ret = 0;
406
407         return ret;
408 }
409
410 #define check_block_validity(inode, map)        \
411         __check_block_validity((inode), __func__, __LINE__, (map))
412
413 #ifdef ES_AGGRESSIVE_TEST
414 static void ext4_map_blocks_es_recheck(handle_t *handle,
415                                        struct inode *inode,
416                                        struct ext4_map_blocks *es_map,
417                                        struct ext4_map_blocks *map,
418                                        int flags)
419 {
420         int retval;
421
422         map->m_flags = 0;
423         /*
424          * There is a race window that the result is not the same.
425          * e.g. xfstests #223 when dioread_nolock enables.  The reason
426          * is that we lookup a block mapping in extent status tree with
427          * out taking i_data_sem.  So at the time the unwritten extent
428          * could be converted.
429          */
430         down_read(&EXT4_I(inode)->i_data_sem);
431         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
432                 retval = ext4_ext_map_blocks(handle, inode, map, 0);
433         } else {
434                 retval = ext4_ind_map_blocks(handle, inode, map, 0);
435         }
436         up_read((&EXT4_I(inode)->i_data_sem));
437
438         /*
439          * We don't check m_len because extent will be collpased in status
440          * tree.  So the m_len might not equal.
441          */
442         if (es_map->m_lblk != map->m_lblk ||
443             es_map->m_flags != map->m_flags ||
444             es_map->m_pblk != map->m_pblk) {
445                 printk("ES cache assertion failed for inode: %lu "
446                        "es_cached ex [%d/%d/%llu/%x] != "
447                        "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
448                        inode->i_ino, es_map->m_lblk, es_map->m_len,
449                        es_map->m_pblk, es_map->m_flags, map->m_lblk,
450                        map->m_len, map->m_pblk, map->m_flags,
451                        retval, flags);
452         }
453 }
454 #endif /* ES_AGGRESSIVE_TEST */
455
456 /*
457  * The ext4_map_blocks() function tries to look up the requested blocks,
458  * and returns if the blocks are already mapped.
459  *
460  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
461  * and store the allocated blocks in the result buffer head and mark it
462  * mapped.
463  *
464  * If file type is extents based, it will call ext4_ext_map_blocks(),
465  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
466  * based files
467  *
468  * On success, it returns the number of blocks being mapped or allocated.  if
469  * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
470  * is marked as unwritten. If the create == 1, it will mark @map as mapped.
471  *
472  * It returns 0 if plain look up failed (blocks have not been allocated), in
473  * that case, @map is returned as unmapped but we still do fill map->m_len to
474  * indicate the length of a hole starting at map->m_lblk.
475  *
476  * It returns the error in case of allocation failure.
477  */
478 int ext4_map_blocks(handle_t *handle, struct inode *inode,
479                     struct ext4_map_blocks *map, int flags)
480 {
481         struct extent_status es;
482         int retval;
483         int ret = 0;
484 #ifdef ES_AGGRESSIVE_TEST
485         struct ext4_map_blocks orig_map;
486
487         memcpy(&orig_map, map, sizeof(*map));
488 #endif
489
490         map->m_flags = 0;
491         ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
492                   flags, map->m_len, (unsigned long) map->m_lblk);
493
494         /*
495          * ext4_map_blocks returns an int, and m_len is an unsigned int
496          */
497         if (unlikely(map->m_len > INT_MAX))
498                 map->m_len = INT_MAX;
499
500         /* We can handle the block number less than EXT_MAX_BLOCKS */
501         if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
502                 return -EFSCORRUPTED;
503
504         /* Lookup extent status tree firstly */
505         if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
506             ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
507                 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
508                         map->m_pblk = ext4_es_pblock(&es) +
509                                         map->m_lblk - es.es_lblk;
510                         map->m_flags |= ext4_es_is_written(&es) ?
511                                         EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
512                         retval = es.es_len - (map->m_lblk - es.es_lblk);
513                         if (retval > map->m_len)
514                                 retval = map->m_len;
515                         map->m_len = retval;
516                 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
517                         map->m_pblk = 0;
518                         retval = es.es_len - (map->m_lblk - es.es_lblk);
519                         if (retval > map->m_len)
520                                 retval = map->m_len;
521                         map->m_len = retval;
522                         retval = 0;
523                 } else {
524                         BUG();
525                 }
526
527                 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
528                         return retval;
529 #ifdef ES_AGGRESSIVE_TEST
530                 ext4_map_blocks_es_recheck(handle, inode, map,
531                                            &orig_map, flags);
532 #endif
533                 goto found;
534         }
535         /*
536          * In the query cache no-wait mode, nothing we can do more if we
537          * cannot find extent in the cache.
538          */
539         if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
540                 return 0;
541
542         /*
543          * Try to see if we can get the block without requesting a new
544          * file system block.
545          */
546         down_read(&EXT4_I(inode)->i_data_sem);
547         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548                 retval = ext4_ext_map_blocks(handle, inode, map, 0);
549         } else {
550                 retval = ext4_ind_map_blocks(handle, inode, map, 0);
551         }
552         if (retval > 0) {
553                 unsigned int status;
554
555                 if (unlikely(retval != map->m_len)) {
556                         ext4_warning(inode->i_sb,
557                                      "ES len assertion failed for inode "
558                                      "%lu: retval %d != map->m_len %d",
559                                      inode->i_ino, retval, map->m_len);
560                         WARN_ON(1);
561                 }
562
563                 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
564                                 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
565                 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
566                     !(status & EXTENT_STATUS_WRITTEN) &&
567                     ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
568                                        map->m_lblk + map->m_len - 1))
569                         status |= EXTENT_STATUS_DELAYED;
570                 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
571                                       map->m_pblk, status);
572         }
573         up_read((&EXT4_I(inode)->i_data_sem));
574
575 found:
576         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577                 ret = check_block_validity(inode, map);
578                 if (ret != 0)
579                         return ret;
580         }
581
582         /* If it is only a block(s) look up */
583         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
584                 return retval;
585
586         /*
587          * Returns if the blocks have already allocated
588          *
589          * Note that if blocks have been preallocated
590          * ext4_ext_get_block() returns the create = 0
591          * with buffer head unmapped.
592          */
593         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
594                 /*
595                  * If we need to convert extent to unwritten
596                  * we continue and do the actual work in
597                  * ext4_ext_map_blocks()
598                  */
599                 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
600                         return retval;
601
602         /*
603          * Here we clear m_flags because after allocating an new extent,
604          * it will be set again.
605          */
606         map->m_flags &= ~EXT4_MAP_FLAGS;
607
608         /*
609          * New blocks allocate and/or writing to unwritten extent
610          * will possibly result in updating i_data, so we take
611          * the write lock of i_data_sem, and call get_block()
612          * with create == 1 flag.
613          */
614         down_write(&EXT4_I(inode)->i_data_sem);
615
616         /*
617          * We need to check for EXT4 here because migrate
618          * could have changed the inode type in between
619          */
620         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
621                 retval = ext4_ext_map_blocks(handle, inode, map, flags);
622         } else {
623                 retval = ext4_ind_map_blocks(handle, inode, map, flags);
624
625                 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
626                         /*
627                          * We allocated new blocks which will result in
628                          * i_data's format changing.  Force the migrate
629                          * to fail by clearing migrate flags
630                          */
631                         ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
632                 }
633         }
634
635         if (retval > 0) {
636                 unsigned int status;
637
638                 if (unlikely(retval != map->m_len)) {
639                         ext4_warning(inode->i_sb,
640                                      "ES len assertion failed for inode "
641                                      "%lu: retval %d != map->m_len %d",
642                                      inode->i_ino, retval, map->m_len);
643                         WARN_ON(1);
644                 }
645
646                 /*
647                  * We have to zeroout blocks before inserting them into extent
648                  * status tree. Otherwise someone could look them up there and
649                  * use them before they are really zeroed. We also have to
650                  * unmap metadata before zeroing as otherwise writeback can
651                  * overwrite zeros with stale data from block device.
652                  */
653                 if (flags & EXT4_GET_BLOCKS_ZERO &&
654                     map->m_flags & EXT4_MAP_MAPPED &&
655                     map->m_flags & EXT4_MAP_NEW) {
656                         ret = ext4_issue_zeroout(inode, map->m_lblk,
657                                                  map->m_pblk, map->m_len);
658                         if (ret) {
659                                 retval = ret;
660                                 goto out_sem;
661                         }
662                 }
663
664                 /*
665                  * If the extent has been zeroed out, we don't need to update
666                  * extent status tree.
667                  */
668                 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
669                     ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
670                         if (ext4_es_is_written(&es))
671                                 goto out_sem;
672                 }
673                 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
674                                 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
675                 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
676                     !(status & EXTENT_STATUS_WRITTEN) &&
677                     ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
678                                        map->m_lblk + map->m_len - 1))
679                         status |= EXTENT_STATUS_DELAYED;
680                 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
681                                       map->m_pblk, status);
682         }
683
684 out_sem:
685         up_write((&EXT4_I(inode)->i_data_sem));
686         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
687                 ret = check_block_validity(inode, map);
688                 if (ret != 0)
689                         return ret;
690
691                 /*
692                  * Inodes with freshly allocated blocks where contents will be
693                  * visible after transaction commit must be on transaction's
694                  * ordered data list.
695                  */
696                 if (map->m_flags & EXT4_MAP_NEW &&
697                     !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
698                     !(flags & EXT4_GET_BLOCKS_ZERO) &&
699                     !ext4_is_quota_file(inode) &&
700                     ext4_should_order_data(inode)) {
701                         loff_t start_byte =
702                                 (loff_t)map->m_lblk << inode->i_blkbits;
703                         loff_t length = (loff_t)map->m_len << inode->i_blkbits;
704
705                         if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
706                                 ret = ext4_jbd2_inode_add_wait(handle, inode,
707                                                 start_byte, length);
708                         else
709                                 ret = ext4_jbd2_inode_add_write(handle, inode,
710                                                 start_byte, length);
711                         if (ret)
712                                 return ret;
713                 }
714         }
715         if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
716                                 map->m_flags & EXT4_MAP_MAPPED))
717                 ext4_fc_track_range(handle, inode, map->m_lblk,
718                                         map->m_lblk + map->m_len - 1);
719         if (retval < 0)
720                 ext_debug(inode, "failed with err %d\n", retval);
721         return retval;
722 }
723
724 /*
725  * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
726  * we have to be careful as someone else may be manipulating b_state as well.
727  */
728 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
729 {
730         unsigned long old_state;
731         unsigned long new_state;
732
733         flags &= EXT4_MAP_FLAGS;
734
735         /* Dummy buffer_head? Set non-atomically. */
736         if (!bh->b_page) {
737                 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
738                 return;
739         }
740         /*
741          * Someone else may be modifying b_state. Be careful! This is ugly but
742          * once we get rid of using bh as a container for mapping information
743          * to pass to / from get_block functions, this can go away.
744          */
745         old_state = READ_ONCE(bh->b_state);
746         do {
747                 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
748         } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
749 }
750
751 static int _ext4_get_block(struct inode *inode, sector_t iblock,
752                            struct buffer_head *bh, int flags)
753 {
754         struct ext4_map_blocks map;
755         int ret = 0;
756
757         if (ext4_has_inline_data(inode))
758                 return -ERANGE;
759
760         map.m_lblk = iblock;
761         map.m_len = bh->b_size >> inode->i_blkbits;
762
763         ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
764                               flags);
765         if (ret > 0) {
766                 map_bh(bh, inode->i_sb, map.m_pblk);
767                 ext4_update_bh_state(bh, map.m_flags);
768                 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
769                 ret = 0;
770         } else if (ret == 0) {
771                 /* hole case, need to fill in bh->b_size */
772                 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
773         }
774         return ret;
775 }
776
777 int ext4_get_block(struct inode *inode, sector_t iblock,
778                    struct buffer_head *bh, int create)
779 {
780         return _ext4_get_block(inode, iblock, bh,
781                                create ? EXT4_GET_BLOCKS_CREATE : 0);
782 }
783
784 /*
785  * Get block function used when preparing for buffered write if we require
786  * creating an unwritten extent if blocks haven't been allocated.  The extent
787  * will be converted to written after the IO is complete.
788  */
789 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
790                              struct buffer_head *bh_result, int create)
791 {
792         int ret = 0;
793
794         ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
795                    inode->i_ino, create);
796         ret = _ext4_get_block(inode, iblock, bh_result,
797                                EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
798
799         /*
800          * If the buffer is marked unwritten, mark it as new to make sure it is
801          * zeroed out correctly in case of partial writes. Otherwise, there is
802          * a chance of stale data getting exposed.
803          */
804         if (ret == 0 && buffer_unwritten(bh_result))
805                 set_buffer_new(bh_result);
806
807         return ret;
808 }
809
810 /* Maximum number of blocks we map for direct IO at once. */
811 #define DIO_MAX_BLOCKS 4096
812
813 /*
814  * `handle' can be NULL if create is zero
815  */
816 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
817                                 ext4_lblk_t block, int map_flags)
818 {
819         struct ext4_map_blocks map;
820         struct buffer_head *bh;
821         int create = map_flags & EXT4_GET_BLOCKS_CREATE;
822         bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
823         int err;
824
825         ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
826                     || handle != NULL || create == 0);
827         ASSERT(create == 0 || !nowait);
828
829         map.m_lblk = block;
830         map.m_len = 1;
831         err = ext4_map_blocks(handle, inode, &map, map_flags);
832
833         if (err == 0)
834                 return create ? ERR_PTR(-ENOSPC) : NULL;
835         if (err < 0)
836                 return ERR_PTR(err);
837
838         if (nowait)
839                 return sb_find_get_block(inode->i_sb, map.m_pblk);
840
841         bh = sb_getblk(inode->i_sb, map.m_pblk);
842         if (unlikely(!bh))
843                 return ERR_PTR(-ENOMEM);
844         if (map.m_flags & EXT4_MAP_NEW) {
845                 ASSERT(create != 0);
846                 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
847                             || (handle != NULL));
848
849                 /*
850                  * Now that we do not always journal data, we should
851                  * keep in mind whether this should always journal the
852                  * new buffer as metadata.  For now, regular file
853                  * writes use ext4_get_block instead, so it's not a
854                  * problem.
855                  */
856                 lock_buffer(bh);
857                 BUFFER_TRACE(bh, "call get_create_access");
858                 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
859                                                      EXT4_JTR_NONE);
860                 if (unlikely(err)) {
861                         unlock_buffer(bh);
862                         goto errout;
863                 }
864                 if (!buffer_uptodate(bh)) {
865                         memset(bh->b_data, 0, inode->i_sb->s_blocksize);
866                         set_buffer_uptodate(bh);
867                 }
868                 unlock_buffer(bh);
869                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
870                 err = ext4_handle_dirty_metadata(handle, inode, bh);
871                 if (unlikely(err))
872                         goto errout;
873         } else
874                 BUFFER_TRACE(bh, "not a new buffer");
875         return bh;
876 errout:
877         brelse(bh);
878         return ERR_PTR(err);
879 }
880
881 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
882                                ext4_lblk_t block, int map_flags)
883 {
884         struct buffer_head *bh;
885         int ret;
886
887         bh = ext4_getblk(handle, inode, block, map_flags);
888         if (IS_ERR(bh))
889                 return bh;
890         if (!bh || ext4_buffer_uptodate(bh))
891                 return bh;
892
893         ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
894         if (ret) {
895                 put_bh(bh);
896                 return ERR_PTR(ret);
897         }
898         return bh;
899 }
900
901 /* Read a contiguous batch of blocks. */
902 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
903                      bool wait, struct buffer_head **bhs)
904 {
905         int i, err;
906
907         for (i = 0; i < bh_count; i++) {
908                 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
909                 if (IS_ERR(bhs[i])) {
910                         err = PTR_ERR(bhs[i]);
911                         bh_count = i;
912                         goto out_brelse;
913                 }
914         }
915
916         for (i = 0; i < bh_count; i++)
917                 /* Note that NULL bhs[i] is valid because of holes. */
918                 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
919                         ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
920
921         if (!wait)
922                 return 0;
923
924         for (i = 0; i < bh_count; i++)
925                 if (bhs[i])
926                         wait_on_buffer(bhs[i]);
927
928         for (i = 0; i < bh_count; i++) {
929                 if (bhs[i] && !buffer_uptodate(bhs[i])) {
930                         err = -EIO;
931                         goto out_brelse;
932                 }
933         }
934         return 0;
935
936 out_brelse:
937         for (i = 0; i < bh_count; i++) {
938                 brelse(bhs[i]);
939                 bhs[i] = NULL;
940         }
941         return err;
942 }
943
944 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
945                            struct buffer_head *head,
946                            unsigned from,
947                            unsigned to,
948                            int *partial,
949                            int (*fn)(handle_t *handle, struct inode *inode,
950                                      struct buffer_head *bh))
951 {
952         struct buffer_head *bh;
953         unsigned block_start, block_end;
954         unsigned blocksize = head->b_size;
955         int err, ret = 0;
956         struct buffer_head *next;
957
958         for (bh = head, block_start = 0;
959              ret == 0 && (bh != head || !block_start);
960              block_start = block_end, bh = next) {
961                 next = bh->b_this_page;
962                 block_end = block_start + blocksize;
963                 if (block_end <= from || block_start >= to) {
964                         if (partial && !buffer_uptodate(bh))
965                                 *partial = 1;
966                         continue;
967                 }
968                 err = (*fn)(handle, inode, bh);
969                 if (!ret)
970                         ret = err;
971         }
972         return ret;
973 }
974
975 /*
976  * Helper for handling dirtying of journalled data. We also mark the folio as
977  * dirty so that writeback code knows about this page (and inode) contains
978  * dirty data. ext4_writepages() then commits appropriate transaction to
979  * make data stable.
980  */
981 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
982 {
983         folio_mark_dirty(bh->b_folio);
984         return ext4_handle_dirty_metadata(handle, NULL, bh);
985 }
986
987 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
988                                 struct buffer_head *bh)
989 {
990         int dirty = buffer_dirty(bh);
991         int ret;
992
993         if (!buffer_mapped(bh) || buffer_freed(bh))
994                 return 0;
995         /*
996          * __block_write_begin() could have dirtied some buffers. Clean
997          * the dirty bit as jbd2_journal_get_write_access() could complain
998          * otherwise about fs integrity issues. Setting of the dirty bit
999          * by __block_write_begin() isn't a real problem here as we clear
1000          * the bit before releasing a page lock and thus writeback cannot
1001          * ever write the buffer.
1002          */
1003         if (dirty)
1004                 clear_buffer_dirty(bh);
1005         BUFFER_TRACE(bh, "get write access");
1006         ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1007                                             EXT4_JTR_NONE);
1008         if (!ret && dirty)
1009                 ret = ext4_dirty_journalled_data(handle, bh);
1010         return ret;
1011 }
1012
1013 #ifdef CONFIG_FS_ENCRYPTION
1014 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1015                                   get_block_t *get_block)
1016 {
1017         unsigned from = pos & (PAGE_SIZE - 1);
1018         unsigned to = from + len;
1019         struct inode *inode = folio->mapping->host;
1020         unsigned block_start, block_end;
1021         sector_t block;
1022         int err = 0;
1023         unsigned blocksize = inode->i_sb->s_blocksize;
1024         unsigned bbits;
1025         struct buffer_head *bh, *head, *wait[2];
1026         int nr_wait = 0;
1027         int i;
1028
1029         BUG_ON(!folio_test_locked(folio));
1030         BUG_ON(from > PAGE_SIZE);
1031         BUG_ON(to > PAGE_SIZE);
1032         BUG_ON(from > to);
1033
1034         head = folio_buffers(folio);
1035         if (!head)
1036                 head = create_empty_buffers(folio, blocksize, 0);
1037         bbits = ilog2(blocksize);
1038         block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1039
1040         for (bh = head, block_start = 0; bh != head || !block_start;
1041             block++, block_start = block_end, bh = bh->b_this_page) {
1042                 block_end = block_start + blocksize;
1043                 if (block_end <= from || block_start >= to) {
1044                         if (folio_test_uptodate(folio)) {
1045                                 set_buffer_uptodate(bh);
1046                         }
1047                         continue;
1048                 }
1049                 if (buffer_new(bh))
1050                         clear_buffer_new(bh);
1051                 if (!buffer_mapped(bh)) {
1052                         WARN_ON(bh->b_size != blocksize);
1053                         err = get_block(inode, block, bh, 1);
1054                         if (err)
1055                                 break;
1056                         if (buffer_new(bh)) {
1057                                 if (folio_test_uptodate(folio)) {
1058                                         clear_buffer_new(bh);
1059                                         set_buffer_uptodate(bh);
1060                                         mark_buffer_dirty(bh);
1061                                         continue;
1062                                 }
1063                                 if (block_end > to || block_start < from)
1064                                         folio_zero_segments(folio, to,
1065                                                             block_end,
1066                                                             block_start, from);
1067                                 continue;
1068                         }
1069                 }
1070                 if (folio_test_uptodate(folio)) {
1071                         set_buffer_uptodate(bh);
1072                         continue;
1073                 }
1074                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1075                     !buffer_unwritten(bh) &&
1076                     (block_start < from || block_end > to)) {
1077                         ext4_read_bh_lock(bh, 0, false);
1078                         wait[nr_wait++] = bh;
1079                 }
1080         }
1081         /*
1082          * If we issued read requests, let them complete.
1083          */
1084         for (i = 0; i < nr_wait; i++) {
1085                 wait_on_buffer(wait[i]);
1086                 if (!buffer_uptodate(wait[i]))
1087                         err = -EIO;
1088         }
1089         if (unlikely(err)) {
1090                 folio_zero_new_buffers(folio, from, to);
1091         } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1092                 for (i = 0; i < nr_wait; i++) {
1093                         int err2;
1094
1095                         err2 = fscrypt_decrypt_pagecache_blocks(folio,
1096                                                 blocksize, bh_offset(wait[i]));
1097                         if (err2) {
1098                                 clear_buffer_uptodate(wait[i]);
1099                                 err = err2;
1100                         }
1101                 }
1102         }
1103
1104         return err;
1105 }
1106 #endif
1107
1108 /*
1109  * To preserve ordering, it is essential that the hole instantiation and
1110  * the data write be encapsulated in a single transaction.  We cannot
1111  * close off a transaction and start a new one between the ext4_get_block()
1112  * and the ext4_write_end().  So doing the jbd2_journal_start at the start of
1113  * ext4_write_begin() is the right place.
1114  */
1115 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1116                             loff_t pos, unsigned len,
1117                             struct page **pagep, void **fsdata)
1118 {
1119         struct inode *inode = mapping->host;
1120         int ret, needed_blocks;
1121         handle_t *handle;
1122         int retries = 0;
1123         struct folio *folio;
1124         pgoff_t index;
1125         unsigned from, to;
1126
1127         if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1128                 return -EIO;
1129
1130         trace_ext4_write_begin(inode, pos, len);
1131         /*
1132          * Reserve one block more for addition to orphan list in case
1133          * we allocate blocks but write fails for some reason
1134          */
1135         needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1136         index = pos >> PAGE_SHIFT;
1137         from = pos & (PAGE_SIZE - 1);
1138         to = from + len;
1139
1140         if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1141                 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1142                                                     pagep);
1143                 if (ret < 0)
1144                         return ret;
1145                 if (ret == 1)
1146                         return 0;
1147         }
1148
1149         /*
1150          * __filemap_get_folio() can take a long time if the
1151          * system is thrashing due to memory pressure, or if the folio
1152          * is being written back.  So grab it first before we start
1153          * the transaction handle.  This also allows us to allocate
1154          * the folio (if needed) without using GFP_NOFS.
1155          */
1156 retry_grab:
1157         folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1158                                         mapping_gfp_mask(mapping));
1159         if (IS_ERR(folio))
1160                 return PTR_ERR(folio);
1161         /*
1162          * The same as page allocation, we prealloc buffer heads before
1163          * starting the handle.
1164          */
1165         if (!folio_buffers(folio))
1166                 create_empty_buffers(folio, inode->i_sb->s_blocksize, 0);
1167
1168         folio_unlock(folio);
1169
1170 retry_journal:
1171         handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1172         if (IS_ERR(handle)) {
1173                 folio_put(folio);
1174                 return PTR_ERR(handle);
1175         }
1176
1177         folio_lock(folio);
1178         if (folio->mapping != mapping) {
1179                 /* The folio got truncated from under us */
1180                 folio_unlock(folio);
1181                 folio_put(folio);
1182                 ext4_journal_stop(handle);
1183                 goto retry_grab;
1184         }
1185         /* In case writeback began while the folio was unlocked */
1186         folio_wait_stable(folio);
1187
1188 #ifdef CONFIG_FS_ENCRYPTION
1189         if (ext4_should_dioread_nolock(inode))
1190                 ret = ext4_block_write_begin(folio, pos, len,
1191                                              ext4_get_block_unwritten);
1192         else
1193                 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
1194 #else
1195         if (ext4_should_dioread_nolock(inode))
1196                 ret = __block_write_begin(&folio->page, pos, len,
1197                                           ext4_get_block_unwritten);
1198         else
1199                 ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1200 #endif
1201         if (!ret && ext4_should_journal_data(inode)) {
1202                 ret = ext4_walk_page_buffers(handle, inode,
1203                                              folio_buffers(folio), from, to,
1204                                              NULL, do_journal_get_write_access);
1205         }
1206
1207         if (ret) {
1208                 bool extended = (pos + len > inode->i_size) &&
1209                                 !ext4_verity_in_progress(inode);
1210
1211                 folio_unlock(folio);
1212                 /*
1213                  * __block_write_begin may have instantiated a few blocks
1214                  * outside i_size.  Trim these off again. Don't need
1215                  * i_size_read because we hold i_rwsem.
1216                  *
1217                  * Add inode to orphan list in case we crash before
1218                  * truncate finishes
1219                  */
1220                 if (extended && ext4_can_truncate(inode))
1221                         ext4_orphan_add(handle, inode);
1222
1223                 ext4_journal_stop(handle);
1224                 if (extended) {
1225                         ext4_truncate_failed_write(inode);
1226                         /*
1227                          * If truncate failed early the inode might
1228                          * still be on the orphan list; we need to
1229                          * make sure the inode is removed from the
1230                          * orphan list in that case.
1231                          */
1232                         if (inode->i_nlink)
1233                                 ext4_orphan_del(NULL, inode);
1234                 }
1235
1236                 if (ret == -ENOSPC &&
1237                     ext4_should_retry_alloc(inode->i_sb, &retries))
1238                         goto retry_journal;
1239                 folio_put(folio);
1240                 return ret;
1241         }
1242         *pagep = &folio->page;
1243         return ret;
1244 }
1245
1246 /* For write_end() in data=journal mode */
1247 static int write_end_fn(handle_t *handle, struct inode *inode,
1248                         struct buffer_head *bh)
1249 {
1250         int ret;
1251         if (!buffer_mapped(bh) || buffer_freed(bh))
1252                 return 0;
1253         set_buffer_uptodate(bh);
1254         ret = ext4_dirty_journalled_data(handle, bh);
1255         clear_buffer_meta(bh);
1256         clear_buffer_prio(bh);
1257         return ret;
1258 }
1259
1260 /*
1261  * We need to pick up the new inode size which generic_commit_write gave us
1262  * `file' can be NULL - eg, when called from page_symlink().
1263  *
1264  * ext4 never places buffers on inode->i_mapping->i_private_list.  metadata
1265  * buffers are managed internally.
1266  */
1267 static int ext4_write_end(struct file *file,
1268                           struct address_space *mapping,
1269                           loff_t pos, unsigned len, unsigned copied,
1270                           struct page *page, void *fsdata)
1271 {
1272         struct folio *folio = page_folio(page);
1273         handle_t *handle = ext4_journal_current_handle();
1274         struct inode *inode = mapping->host;
1275         loff_t old_size = inode->i_size;
1276         int ret = 0, ret2;
1277         int i_size_changed = 0;
1278         bool verity = ext4_verity_in_progress(inode);
1279
1280         trace_ext4_write_end(inode, pos, len, copied);
1281
1282         if (ext4_has_inline_data(inode) &&
1283             ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1284                 return ext4_write_inline_data_end(inode, pos, len, copied,
1285                                                   folio);
1286
1287         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1288         /*
1289          * it's important to update i_size while still holding folio lock:
1290          * page writeout could otherwise come in and zero beyond i_size.
1291          *
1292          * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1293          * blocks are being written past EOF, so skip the i_size update.
1294          */
1295         if (!verity)
1296                 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1297         folio_unlock(folio);
1298         folio_put(folio);
1299
1300         if (old_size < pos && !verity)
1301                 pagecache_isize_extended(inode, old_size, pos);
1302         /*
1303          * Don't mark the inode dirty under folio lock. First, it unnecessarily
1304          * makes the holding time of folio lock longer. Second, it forces lock
1305          * ordering of folio lock and transaction start for journaling
1306          * filesystems.
1307          */
1308         if (i_size_changed)
1309                 ret = ext4_mark_inode_dirty(handle, inode);
1310
1311         if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1312                 /* if we have allocated more blocks and copied
1313                  * less. We will have blocks allocated outside
1314                  * inode->i_size. So truncate them
1315                  */
1316                 ext4_orphan_add(handle, inode);
1317
1318         ret2 = ext4_journal_stop(handle);
1319         if (!ret)
1320                 ret = ret2;
1321
1322         if (pos + len > inode->i_size && !verity) {
1323                 ext4_truncate_failed_write(inode);
1324                 /*
1325                  * If truncate failed early the inode might still be
1326                  * on the orphan list; we need to make sure the inode
1327                  * is removed from the orphan list in that case.
1328                  */
1329                 if (inode->i_nlink)
1330                         ext4_orphan_del(NULL, inode);
1331         }
1332
1333         return ret ? ret : copied;
1334 }
1335
1336 /*
1337  * This is a private version of folio_zero_new_buffers() which doesn't
1338  * set the buffer to be dirty, since in data=journalled mode we need
1339  * to call ext4_dirty_journalled_data() instead.
1340  */
1341 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1342                                             struct inode *inode,
1343                                             struct folio *folio,
1344                                             unsigned from, unsigned to)
1345 {
1346         unsigned int block_start = 0, block_end;
1347         struct buffer_head *head, *bh;
1348
1349         bh = head = folio_buffers(folio);
1350         do {
1351                 block_end = block_start + bh->b_size;
1352                 if (buffer_new(bh)) {
1353                         if (block_end > from && block_start < to) {
1354                                 if (!folio_test_uptodate(folio)) {
1355                                         unsigned start, size;
1356
1357                                         start = max(from, block_start);
1358                                         size = min(to, block_end) - start;
1359
1360                                         folio_zero_range(folio, start, size);
1361                                         write_end_fn(handle, inode, bh);
1362                                 }
1363                                 clear_buffer_new(bh);
1364                         }
1365                 }
1366                 block_start = block_end;
1367                 bh = bh->b_this_page;
1368         } while (bh != head);
1369 }
1370
1371 static int ext4_journalled_write_end(struct file *file,
1372                                      struct address_space *mapping,
1373                                      loff_t pos, unsigned len, unsigned copied,
1374                                      struct page *page, void *fsdata)
1375 {
1376         struct folio *folio = page_folio(page);
1377         handle_t *handle = ext4_journal_current_handle();
1378         struct inode *inode = mapping->host;
1379         loff_t old_size = inode->i_size;
1380         int ret = 0, ret2;
1381         int partial = 0;
1382         unsigned from, to;
1383         int size_changed = 0;
1384         bool verity = ext4_verity_in_progress(inode);
1385
1386         trace_ext4_journalled_write_end(inode, pos, len, copied);
1387         from = pos & (PAGE_SIZE - 1);
1388         to = from + len;
1389
1390         BUG_ON(!ext4_handle_valid(handle));
1391
1392         if (ext4_has_inline_data(inode))
1393                 return ext4_write_inline_data_end(inode, pos, len, copied,
1394                                                   folio);
1395
1396         if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1397                 copied = 0;
1398                 ext4_journalled_zero_new_buffers(handle, inode, folio,
1399                                                  from, to);
1400         } else {
1401                 if (unlikely(copied < len))
1402                         ext4_journalled_zero_new_buffers(handle, inode, folio,
1403                                                          from + copied, to);
1404                 ret = ext4_walk_page_buffers(handle, inode,
1405                                              folio_buffers(folio),
1406                                              from, from + copied, &partial,
1407                                              write_end_fn);
1408                 if (!partial)
1409                         folio_mark_uptodate(folio);
1410         }
1411         if (!verity)
1412                 size_changed = ext4_update_inode_size(inode, pos + copied);
1413         EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1414         folio_unlock(folio);
1415         folio_put(folio);
1416
1417         if (old_size < pos && !verity)
1418                 pagecache_isize_extended(inode, old_size, pos);
1419
1420         if (size_changed) {
1421                 ret2 = ext4_mark_inode_dirty(handle, inode);
1422                 if (!ret)
1423                         ret = ret2;
1424         }
1425
1426         if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1427                 /* if we have allocated more blocks and copied
1428                  * less. We will have blocks allocated outside
1429                  * inode->i_size. So truncate them
1430                  */
1431                 ext4_orphan_add(handle, inode);
1432
1433         ret2 = ext4_journal_stop(handle);
1434         if (!ret)
1435                 ret = ret2;
1436         if (pos + len > inode->i_size && !verity) {
1437                 ext4_truncate_failed_write(inode);
1438                 /*
1439                  * If truncate failed early the inode might still be
1440                  * on the orphan list; we need to make sure the inode
1441                  * is removed from the orphan list in that case.
1442                  */
1443                 if (inode->i_nlink)
1444                         ext4_orphan_del(NULL, inode);
1445         }
1446
1447         return ret ? ret : copied;
1448 }
1449
1450 /*
1451  * Reserve space for a single cluster
1452  */
1453 static int ext4_da_reserve_space(struct inode *inode)
1454 {
1455         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1456         struct ext4_inode_info *ei = EXT4_I(inode);
1457         int ret;
1458
1459         /*
1460          * We will charge metadata quota at writeout time; this saves
1461          * us from metadata over-estimation, though we may go over by
1462          * a small amount in the end.  Here we just reserve for data.
1463          */
1464         ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1465         if (ret)
1466                 return ret;
1467
1468         spin_lock(&ei->i_block_reservation_lock);
1469         if (ext4_claim_free_clusters(sbi, 1, 0)) {
1470                 spin_unlock(&ei->i_block_reservation_lock);
1471                 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1472                 return -ENOSPC;
1473         }
1474         ei->i_reserved_data_blocks++;
1475         trace_ext4_da_reserve_space(inode);
1476         spin_unlock(&ei->i_block_reservation_lock);
1477
1478         return 0;       /* success */
1479 }
1480
1481 void ext4_da_release_space(struct inode *inode, int to_free)
1482 {
1483         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1484         struct ext4_inode_info *ei = EXT4_I(inode);
1485
1486         if (!to_free)
1487                 return;         /* Nothing to release, exit */
1488
1489         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1490
1491         trace_ext4_da_release_space(inode, to_free);
1492         if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1493                 /*
1494                  * if there aren't enough reserved blocks, then the
1495                  * counter is messed up somewhere.  Since this
1496                  * function is called from invalidate page, it's
1497                  * harmless to return without any action.
1498                  */
1499                 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1500                          "ino %lu, to_free %d with only %d reserved "
1501                          "data blocks", inode->i_ino, to_free,
1502                          ei->i_reserved_data_blocks);
1503                 WARN_ON(1);
1504                 to_free = ei->i_reserved_data_blocks;
1505         }
1506         ei->i_reserved_data_blocks -= to_free;
1507
1508         /* update fs dirty data blocks counter */
1509         percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1510
1511         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1512
1513         dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1514 }
1515
1516 /*
1517  * Delayed allocation stuff
1518  */
1519
1520 struct mpage_da_data {
1521         /* These are input fields for ext4_do_writepages() */
1522         struct inode *inode;
1523         struct writeback_control *wbc;
1524         unsigned int can_map:1; /* Can writepages call map blocks? */
1525
1526         /* These are internal state of ext4_do_writepages() */
1527         pgoff_t first_page;     /* The first page to write */
1528         pgoff_t next_page;      /* Current page to examine */
1529         pgoff_t last_page;      /* Last page to examine */
1530         /*
1531          * Extent to map - this can be after first_page because that can be
1532          * fully mapped. We somewhat abuse m_flags to store whether the extent
1533          * is delalloc or unwritten.
1534          */
1535         struct ext4_map_blocks map;
1536         struct ext4_io_submit io_submit;        /* IO submission data */
1537         unsigned int do_map:1;
1538         unsigned int scanned_until_end:1;
1539         unsigned int journalled_more_data:1;
1540 };
1541
1542 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1543                                        bool invalidate)
1544 {
1545         unsigned nr, i;
1546         pgoff_t index, end;
1547         struct folio_batch fbatch;
1548         struct inode *inode = mpd->inode;
1549         struct address_space *mapping = inode->i_mapping;
1550
1551         /* This is necessary when next_page == 0. */
1552         if (mpd->first_page >= mpd->next_page)
1553                 return;
1554
1555         mpd->scanned_until_end = 0;
1556         index = mpd->first_page;
1557         end   = mpd->next_page - 1;
1558         if (invalidate) {
1559                 ext4_lblk_t start, last;
1560                 start = index << (PAGE_SHIFT - inode->i_blkbits);
1561                 last = end << (PAGE_SHIFT - inode->i_blkbits);
1562
1563                 /*
1564                  * avoid racing with extent status tree scans made by
1565                  * ext4_insert_delayed_block()
1566                  */
1567                 down_write(&EXT4_I(inode)->i_data_sem);
1568                 ext4_es_remove_extent(inode, start, last - start + 1);
1569                 up_write(&EXT4_I(inode)->i_data_sem);
1570         }
1571
1572         folio_batch_init(&fbatch);
1573         while (index <= end) {
1574                 nr = filemap_get_folios(mapping, &index, end, &fbatch);
1575                 if (nr == 0)
1576                         break;
1577                 for (i = 0; i < nr; i++) {
1578                         struct folio *folio = fbatch.folios[i];
1579
1580                         if (folio->index < mpd->first_page)
1581                                 continue;
1582                         if (folio_next_index(folio) - 1 > end)
1583                                 continue;
1584                         BUG_ON(!folio_test_locked(folio));
1585                         BUG_ON(folio_test_writeback(folio));
1586                         if (invalidate) {
1587                                 if (folio_mapped(folio))
1588                                         folio_clear_dirty_for_io(folio);
1589                                 block_invalidate_folio(folio, 0,
1590                                                 folio_size(folio));
1591                                 folio_clear_uptodate(folio);
1592                         }
1593                         folio_unlock(folio);
1594                 }
1595                 folio_batch_release(&fbatch);
1596         }
1597 }
1598
1599 static void ext4_print_free_blocks(struct inode *inode)
1600 {
1601         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1602         struct super_block *sb = inode->i_sb;
1603         struct ext4_inode_info *ei = EXT4_I(inode);
1604
1605         ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1606                EXT4_C2B(EXT4_SB(inode->i_sb),
1607                         ext4_count_free_clusters(sb)));
1608         ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1609         ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1610                (long long) EXT4_C2B(EXT4_SB(sb),
1611                 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1612         ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1613                (long long) EXT4_C2B(EXT4_SB(sb),
1614                 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1615         ext4_msg(sb, KERN_CRIT, "Block reservation details");
1616         ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1617                  ei->i_reserved_data_blocks);
1618         return;
1619 }
1620
1621 /*
1622  * ext4_insert_delayed_block - adds a delayed block to the extents status
1623  *                             tree, incrementing the reserved cluster/block
1624  *                             count or making a pending reservation
1625  *                             where needed
1626  *
1627  * @inode - file containing the newly added block
1628  * @lblk - logical block to be added
1629  *
1630  * Returns 0 on success, negative error code on failure.
1631  */
1632 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1633 {
1634         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1635         int ret;
1636         bool allocated = false;
1637
1638         /*
1639          * If the cluster containing lblk is shared with a delayed,
1640          * written, or unwritten extent in a bigalloc file system, it's
1641          * already been accounted for and does not need to be reserved.
1642          * A pending reservation must be made for the cluster if it's
1643          * shared with a written or unwritten extent and doesn't already
1644          * have one.  Written and unwritten extents can be purged from the
1645          * extents status tree if the system is under memory pressure, so
1646          * it's necessary to examine the extent tree if a search of the
1647          * extents status tree doesn't get a match.
1648          */
1649         if (sbi->s_cluster_ratio == 1) {
1650                 ret = ext4_da_reserve_space(inode);
1651                 if (ret != 0)   /* ENOSPC */
1652                         return ret;
1653         } else {   /* bigalloc */
1654                 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1655                         if (!ext4_es_scan_clu(inode,
1656                                               &ext4_es_is_mapped, lblk)) {
1657                                 ret = ext4_clu_mapped(inode,
1658                                                       EXT4_B2C(sbi, lblk));
1659                                 if (ret < 0)
1660                                         return ret;
1661                                 if (ret == 0) {
1662                                         ret = ext4_da_reserve_space(inode);
1663                                         if (ret != 0)   /* ENOSPC */
1664                                                 return ret;
1665                                 } else {
1666                                         allocated = true;
1667                                 }
1668                         } else {
1669                                 allocated = true;
1670                         }
1671                 }
1672         }
1673
1674         ext4_es_insert_delayed_block(inode, lblk, allocated);
1675         return 0;
1676 }
1677
1678 /*
1679  * This function is grabs code from the very beginning of
1680  * ext4_map_blocks, but assumes that the caller is from delayed write
1681  * time. This function looks up the requested blocks and sets the
1682  * buffer delay bit under the protection of i_data_sem.
1683  */
1684 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1685                               struct ext4_map_blocks *map,
1686                               struct buffer_head *bh)
1687 {
1688         struct extent_status es;
1689         int retval;
1690         sector_t invalid_block = ~((sector_t) 0xffff);
1691 #ifdef ES_AGGRESSIVE_TEST
1692         struct ext4_map_blocks orig_map;
1693
1694         memcpy(&orig_map, map, sizeof(*map));
1695 #endif
1696
1697         if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1698                 invalid_block = ~0;
1699
1700         map->m_flags = 0;
1701         ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1702                   (unsigned long) map->m_lblk);
1703
1704         /* Lookup extent status tree firstly */
1705         if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1706                 if (ext4_es_is_hole(&es)) {
1707                         retval = 0;
1708                         down_read(&EXT4_I(inode)->i_data_sem);
1709                         goto add_delayed;
1710                 }
1711
1712                 /*
1713                  * Delayed extent could be allocated by fallocate.
1714                  * So we need to check it.
1715                  */
1716                 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1717                         map_bh(bh, inode->i_sb, invalid_block);
1718                         set_buffer_new(bh);
1719                         set_buffer_delay(bh);
1720                         return 0;
1721                 }
1722
1723                 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1724                 retval = es.es_len - (iblock - es.es_lblk);
1725                 if (retval > map->m_len)
1726                         retval = map->m_len;
1727                 map->m_len = retval;
1728                 if (ext4_es_is_written(&es))
1729                         map->m_flags |= EXT4_MAP_MAPPED;
1730                 else if (ext4_es_is_unwritten(&es))
1731                         map->m_flags |= EXT4_MAP_UNWRITTEN;
1732                 else
1733                         BUG();
1734
1735 #ifdef ES_AGGRESSIVE_TEST
1736                 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1737 #endif
1738                 return retval;
1739         }
1740
1741         /*
1742          * Try to see if we can get the block without requesting a new
1743          * file system block.
1744          */
1745         down_read(&EXT4_I(inode)->i_data_sem);
1746         if (ext4_has_inline_data(inode))
1747                 retval = 0;
1748         else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1749                 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1750         else
1751                 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1752
1753 add_delayed:
1754         if (retval == 0) {
1755                 int ret;
1756
1757                 /*
1758                  * XXX: __block_prepare_write() unmaps passed block,
1759                  * is it OK?
1760                  */
1761
1762                 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1763                 if (ret != 0) {
1764                         retval = ret;
1765                         goto out_unlock;
1766                 }
1767
1768                 map_bh(bh, inode->i_sb, invalid_block);
1769                 set_buffer_new(bh);
1770                 set_buffer_delay(bh);
1771         } else if (retval > 0) {
1772                 unsigned int status;
1773
1774                 if (unlikely(retval != map->m_len)) {
1775                         ext4_warning(inode->i_sb,
1776                                      "ES len assertion failed for inode "
1777                                      "%lu: retval %d != map->m_len %d",
1778                                      inode->i_ino, retval, map->m_len);
1779                         WARN_ON(1);
1780                 }
1781
1782                 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1783                                 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1784                 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1785                                       map->m_pblk, status);
1786         }
1787
1788 out_unlock:
1789         up_read((&EXT4_I(inode)->i_data_sem));
1790
1791         return retval;
1792 }
1793
1794 /*
1795  * This is a special get_block_t callback which is used by
1796  * ext4_da_write_begin().  It will either return mapped block or
1797  * reserve space for a single block.
1798  *
1799  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1800  * We also have b_blocknr = -1 and b_bdev initialized properly
1801  *
1802  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1803  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1804  * initialized properly.
1805  */
1806 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1807                            struct buffer_head *bh, int create)
1808 {
1809         struct ext4_map_blocks map;
1810         int ret = 0;
1811
1812         BUG_ON(create == 0);
1813         BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1814
1815         map.m_lblk = iblock;
1816         map.m_len = 1;
1817
1818         /*
1819          * first, we need to know whether the block is allocated already
1820          * preallocated blocks are unmapped but should treated
1821          * the same as allocated blocks.
1822          */
1823         ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1824         if (ret <= 0)
1825                 return ret;
1826
1827         map_bh(bh, inode->i_sb, map.m_pblk);
1828         ext4_update_bh_state(bh, map.m_flags);
1829
1830         if (buffer_unwritten(bh)) {
1831                 /* A delayed write to unwritten bh should be marked
1832                  * new and mapped.  Mapped ensures that we don't do
1833                  * get_block multiple times when we write to the same
1834                  * offset and new ensures that we do proper zero out
1835                  * for partial write.
1836                  */
1837                 set_buffer_new(bh);
1838                 set_buffer_mapped(bh);
1839         }
1840         return 0;
1841 }
1842
1843 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
1844 {
1845         mpd->first_page += folio_nr_pages(folio);
1846         folio_unlock(folio);
1847 }
1848
1849 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
1850 {
1851         size_t len;
1852         loff_t size;
1853         int err;
1854
1855         BUG_ON(folio->index != mpd->first_page);
1856         folio_clear_dirty_for_io(folio);
1857         /*
1858          * We have to be very careful here!  Nothing protects writeback path
1859          * against i_size changes and the page can be writeably mapped into
1860          * page tables. So an application can be growing i_size and writing
1861          * data through mmap while writeback runs. folio_clear_dirty_for_io()
1862          * write-protects our page in page tables and the page cannot get
1863          * written to again until we release folio lock. So only after
1864          * folio_clear_dirty_for_io() we are safe to sample i_size for
1865          * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1866          * on the barrier provided by folio_test_clear_dirty() in
1867          * folio_clear_dirty_for_io() to make sure i_size is really sampled only
1868          * after page tables are updated.
1869          */
1870         size = i_size_read(mpd->inode);
1871         len = folio_size(folio);
1872         if (folio_pos(folio) + len > size &&
1873             !ext4_verity_in_progress(mpd->inode))
1874                 len = size & ~PAGE_MASK;
1875         err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
1876         if (!err)
1877                 mpd->wbc->nr_to_write--;
1878
1879         return err;
1880 }
1881
1882 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
1883
1884 /*
1885  * mballoc gives us at most this number of blocks...
1886  * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1887  * The rest of mballoc seems to handle chunks up to full group size.
1888  */
1889 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1890
1891 /*
1892  * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1893  *
1894  * @mpd - extent of blocks
1895  * @lblk - logical number of the block in the file
1896  * @bh - buffer head we want to add to the extent
1897  *
1898  * The function is used to collect contig. blocks in the same state. If the
1899  * buffer doesn't require mapping for writeback and we haven't started the
1900  * extent of buffers to map yet, the function returns 'true' immediately - the
1901  * caller can write the buffer right away. Otherwise the function returns true
1902  * if the block has been added to the extent, false if the block couldn't be
1903  * added.
1904  */
1905 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1906                                    struct buffer_head *bh)
1907 {
1908         struct ext4_map_blocks *map = &mpd->map;
1909
1910         /* Buffer that doesn't need mapping for writeback? */
1911         if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1912             (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1913                 /* So far no extent to map => we write the buffer right away */
1914                 if (map->m_len == 0)
1915                         return true;
1916                 return false;
1917         }
1918
1919         /* First block in the extent? */
1920         if (map->m_len == 0) {
1921                 /* We cannot map unless handle is started... */
1922                 if (!mpd->do_map)
1923                         return false;
1924                 map->m_lblk = lblk;
1925                 map->m_len = 1;
1926                 map->m_flags = bh->b_state & BH_FLAGS;
1927                 return true;
1928         }
1929
1930         /* Don't go larger than mballoc is willing to allocate */
1931         if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1932                 return false;
1933
1934         /* Can we merge the block to our big extent? */
1935         if (lblk == map->m_lblk + map->m_len &&
1936             (bh->b_state & BH_FLAGS) == map->m_flags) {
1937                 map->m_len++;
1938                 return true;
1939         }
1940         return false;
1941 }
1942
1943 /*
1944  * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1945  *
1946  * @mpd - extent of blocks for mapping
1947  * @head - the first buffer in the page
1948  * @bh - buffer we should start processing from
1949  * @lblk - logical number of the block in the file corresponding to @bh
1950  *
1951  * Walk through page buffers from @bh upto @head (exclusive) and either submit
1952  * the page for IO if all buffers in this page were mapped and there's no
1953  * accumulated extent of buffers to map or add buffers in the page to the
1954  * extent of buffers to map. The function returns 1 if the caller can continue
1955  * by processing the next page, 0 if it should stop adding buffers to the
1956  * extent to map because we cannot extend it anymore. It can also return value
1957  * < 0 in case of error during IO submission.
1958  */
1959 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1960                                    struct buffer_head *head,
1961                                    struct buffer_head *bh,
1962                                    ext4_lblk_t lblk)
1963 {
1964         struct inode *inode = mpd->inode;
1965         int err;
1966         ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
1967                                                         >> inode->i_blkbits;
1968
1969         if (ext4_verity_in_progress(inode))
1970                 blocks = EXT_MAX_BLOCKS;
1971
1972         do {
1973                 BUG_ON(buffer_locked(bh));
1974
1975                 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1976                         /* Found extent to map? */
1977                         if (mpd->map.m_len)
1978                                 return 0;
1979                         /* Buffer needs mapping and handle is not started? */
1980                         if (!mpd->do_map)
1981                                 return 0;
1982                         /* Everything mapped so far and we hit EOF */
1983                         break;
1984                 }
1985         } while (lblk++, (bh = bh->b_this_page) != head);
1986         /* So far everything mapped? Submit the page for IO. */
1987         if (mpd->map.m_len == 0) {
1988                 err = mpage_submit_folio(mpd, head->b_folio);
1989                 if (err < 0)
1990                         return err;
1991                 mpage_folio_done(mpd, head->b_folio);
1992         }
1993         if (lblk >= blocks) {
1994                 mpd->scanned_until_end = 1;
1995                 return 0;
1996         }
1997         return 1;
1998 }
1999
2000 /*
2001  * mpage_process_folio - update folio buffers corresponding to changed extent
2002  *                       and may submit fully mapped page for IO
2003  * @mpd: description of extent to map, on return next extent to map
2004  * @folio: Contains these buffers.
2005  * @m_lblk: logical block mapping.
2006  * @m_pblk: corresponding physical mapping.
2007  * @map_bh: determines on return whether this page requires any further
2008  *                mapping or not.
2009  *
2010  * Scan given folio buffers corresponding to changed extent and update buffer
2011  * state according to new extent state.
2012  * We map delalloc buffers to their physical location, clear unwritten bits.
2013  * If the given folio is not fully mapped, we update @mpd to the next extent in
2014  * the given folio that needs mapping & return @map_bh as true.
2015  */
2016 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2017                               ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2018                               bool *map_bh)
2019 {
2020         struct buffer_head *head, *bh;
2021         ext4_io_end_t *io_end = mpd->io_submit.io_end;
2022         ext4_lblk_t lblk = *m_lblk;
2023         ext4_fsblk_t pblock = *m_pblk;
2024         int err = 0;
2025         int blkbits = mpd->inode->i_blkbits;
2026         ssize_t io_end_size = 0;
2027         struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2028
2029         bh = head = folio_buffers(folio);
2030         do {
2031                 if (lblk < mpd->map.m_lblk)
2032                         continue;
2033                 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2034                         /*
2035                          * Buffer after end of mapped extent.
2036                          * Find next buffer in the folio to map.
2037                          */
2038                         mpd->map.m_len = 0;
2039                         mpd->map.m_flags = 0;
2040                         io_end_vec->size += io_end_size;
2041
2042                         err = mpage_process_page_bufs(mpd, head, bh, lblk);
2043                         if (err > 0)
2044                                 err = 0;
2045                         if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2046                                 io_end_vec = ext4_alloc_io_end_vec(io_end);
2047                                 if (IS_ERR(io_end_vec)) {
2048                                         err = PTR_ERR(io_end_vec);
2049                                         goto out;
2050                                 }
2051                                 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2052                         }
2053                         *map_bh = true;
2054                         goto out;
2055                 }
2056                 if (buffer_delay(bh)) {
2057                         clear_buffer_delay(bh);
2058                         bh->b_blocknr = pblock++;
2059                 }
2060                 clear_buffer_unwritten(bh);
2061                 io_end_size += (1 << blkbits);
2062         } while (lblk++, (bh = bh->b_this_page) != head);
2063
2064         io_end_vec->size += io_end_size;
2065         *map_bh = false;
2066 out:
2067         *m_lblk = lblk;
2068         *m_pblk = pblock;
2069         return err;
2070 }
2071
2072 /*
2073  * mpage_map_buffers - update buffers corresponding to changed extent and
2074  *                     submit fully mapped pages for IO
2075  *
2076  * @mpd - description of extent to map, on return next extent to map
2077  *
2078  * Scan buffers corresponding to changed extent (we expect corresponding pages
2079  * to be already locked) and update buffer state according to new extent state.
2080  * We map delalloc buffers to their physical location, clear unwritten bits,
2081  * and mark buffers as uninit when we perform writes to unwritten extents
2082  * and do extent conversion after IO is finished. If the last page is not fully
2083  * mapped, we update @map to the next extent in the last page that needs
2084  * mapping. Otherwise we submit the page for IO.
2085  */
2086 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2087 {
2088         struct folio_batch fbatch;
2089         unsigned nr, i;
2090         struct inode *inode = mpd->inode;
2091         int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2092         pgoff_t start, end;
2093         ext4_lblk_t lblk;
2094         ext4_fsblk_t pblock;
2095         int err;
2096         bool map_bh = false;
2097
2098         start = mpd->map.m_lblk >> bpp_bits;
2099         end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2100         lblk = start << bpp_bits;
2101         pblock = mpd->map.m_pblk;
2102
2103         folio_batch_init(&fbatch);
2104         while (start <= end) {
2105                 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2106                 if (nr == 0)
2107                         break;
2108                 for (i = 0; i < nr; i++) {
2109                         struct folio *folio = fbatch.folios[i];
2110
2111                         err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2112                                                  &map_bh);
2113                         /*
2114                          * If map_bh is true, means page may require further bh
2115                          * mapping, or maybe the page was submitted for IO.
2116                          * So we return to call further extent mapping.
2117                          */
2118                         if (err < 0 || map_bh)
2119                                 goto out;
2120                         /* Page fully mapped - let IO run! */
2121                         err = mpage_submit_folio(mpd, folio);
2122                         if (err < 0)
2123                                 goto out;
2124                         mpage_folio_done(mpd, folio);
2125                 }
2126                 folio_batch_release(&fbatch);
2127         }
2128         /* Extent fully mapped and matches with page boundary. We are done. */
2129         mpd->map.m_len = 0;
2130         mpd->map.m_flags = 0;
2131         return 0;
2132 out:
2133         folio_batch_release(&fbatch);
2134         return err;
2135 }
2136
2137 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2138 {
2139         struct inode *inode = mpd->inode;
2140         struct ext4_map_blocks *map = &mpd->map;
2141         int get_blocks_flags;
2142         int err, dioread_nolock;
2143
2144         trace_ext4_da_write_pages_extent(inode, map);
2145         /*
2146          * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2147          * to convert an unwritten extent to be initialized (in the case
2148          * where we have written into one or more preallocated blocks).  It is
2149          * possible that we're going to need more metadata blocks than
2150          * previously reserved. However we must not fail because we're in
2151          * writeback and there is nothing we can do about it so it might result
2152          * in data loss.  So use reserved blocks to allocate metadata if
2153          * possible.
2154          *
2155          * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2156          * the blocks in question are delalloc blocks.  This indicates
2157          * that the blocks and quotas has already been checked when
2158          * the data was copied into the page cache.
2159          */
2160         get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2161                            EXT4_GET_BLOCKS_METADATA_NOFAIL |
2162                            EXT4_GET_BLOCKS_IO_SUBMIT;
2163         dioread_nolock = ext4_should_dioread_nolock(inode);
2164         if (dioread_nolock)
2165                 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2166         if (map->m_flags & BIT(BH_Delay))
2167                 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2168
2169         err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2170         if (err < 0)
2171                 return err;
2172         if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2173                 if (!mpd->io_submit.io_end->handle &&
2174                     ext4_handle_valid(handle)) {
2175                         mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2176                         handle->h_rsv_handle = NULL;
2177                 }
2178                 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2179         }
2180
2181         BUG_ON(map->m_len == 0);
2182         return 0;
2183 }
2184
2185 /*
2186  * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2187  *                               mpd->len and submit pages underlying it for IO
2188  *
2189  * @handle - handle for journal operations
2190  * @mpd - extent to map
2191  * @give_up_on_write - we set this to true iff there is a fatal error and there
2192  *                     is no hope of writing the data. The caller should discard
2193  *                     dirty pages to avoid infinite loops.
2194  *
2195  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2196  * delayed, blocks are allocated, if it is unwritten, we may need to convert
2197  * them to initialized or split the described range from larger unwritten
2198  * extent. Note that we need not map all the described range since allocation
2199  * can return less blocks or the range is covered by more unwritten extents. We
2200  * cannot map more because we are limited by reserved transaction credits. On
2201  * the other hand we always make sure that the last touched page is fully
2202  * mapped so that it can be written out (and thus forward progress is
2203  * guaranteed). After mapping we submit all mapped pages for IO.
2204  */
2205 static int mpage_map_and_submit_extent(handle_t *handle,
2206                                        struct mpage_da_data *mpd,
2207                                        bool *give_up_on_write)
2208 {
2209         struct inode *inode = mpd->inode;
2210         struct ext4_map_blocks *map = &mpd->map;
2211         int err;
2212         loff_t disksize;
2213         int progress = 0;
2214         ext4_io_end_t *io_end = mpd->io_submit.io_end;
2215         struct ext4_io_end_vec *io_end_vec;
2216
2217         io_end_vec = ext4_alloc_io_end_vec(io_end);
2218         if (IS_ERR(io_end_vec))
2219                 return PTR_ERR(io_end_vec);
2220         io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2221         do {
2222                 err = mpage_map_one_extent(handle, mpd);
2223                 if (err < 0) {
2224                         struct super_block *sb = inode->i_sb;
2225
2226                         if (ext4_forced_shutdown(sb))
2227                                 goto invalidate_dirty_pages;
2228                         /*
2229                          * Let the uper layers retry transient errors.
2230                          * In the case of ENOSPC, if ext4_count_free_blocks()
2231                          * is non-zero, a commit should free up blocks.
2232                          */
2233                         if ((err == -ENOMEM) ||
2234                             (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2235                                 if (progress)
2236                                         goto update_disksize;
2237                                 return err;
2238                         }
2239                         ext4_msg(sb, KERN_CRIT,
2240                                  "Delayed block allocation failed for "
2241                                  "inode %lu at logical offset %llu with"
2242                                  " max blocks %u with error %d",
2243                                  inode->i_ino,
2244                                  (unsigned long long)map->m_lblk,
2245                                  (unsigned)map->m_len, -err);
2246                         ext4_msg(sb, KERN_CRIT,
2247                                  "This should not happen!! Data will "
2248                                  "be lost\n");
2249                         if (err == -ENOSPC)
2250                                 ext4_print_free_blocks(inode);
2251                 invalidate_dirty_pages:
2252                         *give_up_on_write = true;
2253                         return err;
2254                 }
2255                 progress = 1;
2256                 /*
2257                  * Update buffer state, submit mapped pages, and get us new
2258                  * extent to map
2259                  */
2260                 err = mpage_map_and_submit_buffers(mpd);
2261                 if (err < 0)
2262                         goto update_disksize;
2263         } while (map->m_len);
2264
2265 update_disksize:
2266         /*
2267          * Update on-disk size after IO is submitted.  Races with
2268          * truncate are avoided by checking i_size under i_data_sem.
2269          */
2270         disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2271         if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2272                 int err2;
2273                 loff_t i_size;
2274
2275                 down_write(&EXT4_I(inode)->i_data_sem);
2276                 i_size = i_size_read(inode);
2277                 if (disksize > i_size)
2278                         disksize = i_size;
2279                 if (disksize > EXT4_I(inode)->i_disksize)
2280                         EXT4_I(inode)->i_disksize = disksize;
2281                 up_write(&EXT4_I(inode)->i_data_sem);
2282                 err2 = ext4_mark_inode_dirty(handle, inode);
2283                 if (err2) {
2284                         ext4_error_err(inode->i_sb, -err2,
2285                                        "Failed to mark inode %lu dirty",
2286                                        inode->i_ino);
2287                 }
2288                 if (!err)
2289                         err = err2;
2290         }
2291         return err;
2292 }
2293
2294 /*
2295  * Calculate the total number of credits to reserve for one writepages
2296  * iteration. This is called from ext4_writepages(). We map an extent of
2297  * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2298  * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2299  * bpp - 1 blocks in bpp different extents.
2300  */
2301 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2302 {
2303         int bpp = ext4_journal_blocks_per_page(inode);
2304
2305         return ext4_meta_trans_blocks(inode,
2306                                 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2307 }
2308
2309 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2310                                      size_t len)
2311 {
2312         struct buffer_head *page_bufs = folio_buffers(folio);
2313         struct inode *inode = folio->mapping->host;
2314         int ret, err;
2315
2316         ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2317                                      NULL, do_journal_get_write_access);
2318         err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2319                                      NULL, write_end_fn);
2320         if (ret == 0)
2321                 ret = err;
2322         err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2323         if (ret == 0)
2324                 ret = err;
2325         EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2326
2327         return ret;
2328 }
2329
2330 static int mpage_journal_page_buffers(handle_t *handle,
2331                                       struct mpage_da_data *mpd,
2332                                       struct folio *folio)
2333 {
2334         struct inode *inode = mpd->inode;
2335         loff_t size = i_size_read(inode);
2336         size_t len = folio_size(folio);
2337
2338         folio_clear_checked(folio);
2339         mpd->wbc->nr_to_write--;
2340
2341         if (folio_pos(folio) + len > size &&
2342             !ext4_verity_in_progress(inode))
2343                 len = size - folio_pos(folio);
2344
2345         return ext4_journal_folio_buffers(handle, folio, len);
2346 }
2347
2348 /*
2349  * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2350  *                               needing mapping, submit mapped pages
2351  *
2352  * @mpd - where to look for pages
2353  *
2354  * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2355  * IO immediately. If we cannot map blocks, we submit just already mapped
2356  * buffers in the page for IO and keep page dirty. When we can map blocks and
2357  * we find a page which isn't mapped we start accumulating extent of buffers
2358  * underlying these pages that needs mapping (formed by either delayed or
2359  * unwritten buffers). We also lock the pages containing these buffers. The
2360  * extent found is returned in @mpd structure (starting at mpd->lblk with
2361  * length mpd->len blocks).
2362  *
2363  * Note that this function can attach bios to one io_end structure which are
2364  * neither logically nor physically contiguous. Although it may seem as an
2365  * unnecessary complication, it is actually inevitable in blocksize < pagesize
2366  * case as we need to track IO to all buffers underlying a page in one io_end.
2367  */
2368 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2369 {
2370         struct address_space *mapping = mpd->inode->i_mapping;
2371         struct folio_batch fbatch;
2372         unsigned int nr_folios;
2373         pgoff_t index = mpd->first_page;
2374         pgoff_t end = mpd->last_page;
2375         xa_mark_t tag;
2376         int i, err = 0;
2377         int blkbits = mpd->inode->i_blkbits;
2378         ext4_lblk_t lblk;
2379         struct buffer_head *head;
2380         handle_t *handle = NULL;
2381         int bpp = ext4_journal_blocks_per_page(mpd->inode);
2382
2383         if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2384                 tag = PAGECACHE_TAG_TOWRITE;
2385         else
2386                 tag = PAGECACHE_TAG_DIRTY;
2387
2388         mpd->map.m_len = 0;
2389         mpd->next_page = index;
2390         if (ext4_should_journal_data(mpd->inode)) {
2391                 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2392                                             bpp);
2393                 if (IS_ERR(handle))
2394                         return PTR_ERR(handle);
2395         }
2396         folio_batch_init(&fbatch);
2397         while (index <= end) {
2398                 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2399                                 tag, &fbatch);
2400                 if (nr_folios == 0)
2401                         break;
2402
2403                 for (i = 0; i < nr_folios; i++) {
2404                         struct folio *folio = fbatch.folios[i];
2405
2406                         /*
2407                          * Accumulated enough dirty pages? This doesn't apply
2408                          * to WB_SYNC_ALL mode. For integrity sync we have to
2409                          * keep going because someone may be concurrently
2410                          * dirtying pages, and we might have synced a lot of
2411                          * newly appeared dirty pages, but have not synced all
2412                          * of the old dirty pages.
2413                          */
2414                         if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2415                             mpd->wbc->nr_to_write <=
2416                             mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2417                                 goto out;
2418
2419                         /* If we can't merge this page, we are done. */
2420                         if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2421                                 goto out;
2422
2423                         if (handle) {
2424                                 err = ext4_journal_ensure_credits(handle, bpp,
2425                                                                   0);
2426                                 if (err < 0)
2427                                         goto out;
2428                         }
2429
2430                         folio_lock(folio);
2431                         /*
2432                          * If the page is no longer dirty, or its mapping no
2433                          * longer corresponds to inode we are writing (which
2434                          * means it has been truncated or invalidated), or the
2435                          * page is already under writeback and we are not doing
2436                          * a data integrity writeback, skip the page
2437                          */
2438                         if (!folio_test_dirty(folio) ||
2439                             (folio_test_writeback(folio) &&
2440                              (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2441                             unlikely(folio->mapping != mapping)) {
2442                                 folio_unlock(folio);
2443                                 continue;
2444                         }
2445
2446                         folio_wait_writeback(folio);
2447                         BUG_ON(folio_test_writeback(folio));
2448
2449                         /*
2450                          * Should never happen but for buggy code in
2451                          * other subsystems that call
2452                          * set_page_dirty() without properly warning
2453                          * the file system first.  See [1] for more
2454                          * information.
2455                          *
2456                          * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2457                          */
2458                         if (!folio_buffers(folio)) {
2459                                 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2460                                 folio_clear_dirty(folio);
2461                                 folio_unlock(folio);
2462                                 continue;
2463                         }
2464
2465                         if (mpd->map.m_len == 0)
2466                                 mpd->first_page = folio->index;
2467                         mpd->next_page = folio_next_index(folio);
2468                         /*
2469                          * Writeout when we cannot modify metadata is simple.
2470                          * Just submit the page. For data=journal mode we
2471                          * first handle writeout of the page for checkpoint and
2472                          * only after that handle delayed page dirtying. This
2473                          * makes sure current data is checkpointed to the final
2474                          * location before possibly journalling it again which
2475                          * is desirable when the page is frequently dirtied
2476                          * through a pin.
2477                          */
2478                         if (!mpd->can_map) {
2479                                 err = mpage_submit_folio(mpd, folio);
2480                                 if (err < 0)
2481                                         goto out;
2482                                 /* Pending dirtying of journalled data? */
2483                                 if (folio_test_checked(folio)) {
2484                                         err = mpage_journal_page_buffers(handle,
2485                                                 mpd, folio);
2486                                         if (err < 0)
2487                                                 goto out;
2488                                         mpd->journalled_more_data = 1;
2489                                 }
2490                                 mpage_folio_done(mpd, folio);
2491                         } else {
2492                                 /* Add all dirty buffers to mpd */
2493                                 lblk = ((ext4_lblk_t)folio->index) <<
2494                                         (PAGE_SHIFT - blkbits);
2495                                 head = folio_buffers(folio);
2496                                 err = mpage_process_page_bufs(mpd, head, head,
2497                                                 lblk);
2498                                 if (err <= 0)
2499                                         goto out;
2500                                 err = 0;
2501                         }
2502                 }
2503                 folio_batch_release(&fbatch);
2504                 cond_resched();
2505         }
2506         mpd->scanned_until_end = 1;
2507         if (handle)
2508                 ext4_journal_stop(handle);
2509         return 0;
2510 out:
2511         folio_batch_release(&fbatch);
2512         if (handle)
2513                 ext4_journal_stop(handle);
2514         return err;
2515 }
2516
2517 static int ext4_do_writepages(struct mpage_da_data *mpd)
2518 {
2519         struct writeback_control *wbc = mpd->wbc;
2520         pgoff_t writeback_index = 0;
2521         long nr_to_write = wbc->nr_to_write;
2522         int range_whole = 0;
2523         int cycled = 1;
2524         handle_t *handle = NULL;
2525         struct inode *inode = mpd->inode;
2526         struct address_space *mapping = inode->i_mapping;
2527         int needed_blocks, rsv_blocks = 0, ret = 0;
2528         struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2529         struct blk_plug plug;
2530         bool give_up_on_write = false;
2531
2532         trace_ext4_writepages(inode, wbc);
2533
2534         /*
2535          * No pages to write? This is mainly a kludge to avoid starting
2536          * a transaction for special inodes like journal inode on last iput()
2537          * because that could violate lock ordering on umount
2538          */
2539         if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2540                 goto out_writepages;
2541
2542         /*
2543          * If the filesystem has aborted, it is read-only, so return
2544          * right away instead of dumping stack traces later on that
2545          * will obscure the real source of the problem.  We test
2546          * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2547          * the latter could be true if the filesystem is mounted
2548          * read-only, and in that case, ext4_writepages should
2549          * *never* be called, so if that ever happens, we would want
2550          * the stack trace.
2551          */
2552         if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
2553                 ret = -EROFS;
2554                 goto out_writepages;
2555         }
2556
2557         /*
2558          * If we have inline data and arrive here, it means that
2559          * we will soon create the block for the 1st page, so
2560          * we'd better clear the inline data here.
2561          */
2562         if (ext4_has_inline_data(inode)) {
2563                 /* Just inode will be modified... */
2564                 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2565                 if (IS_ERR(handle)) {
2566                         ret = PTR_ERR(handle);
2567                         goto out_writepages;
2568                 }
2569                 BUG_ON(ext4_test_inode_state(inode,
2570                                 EXT4_STATE_MAY_INLINE_DATA));
2571                 ext4_destroy_inline_data(handle, inode);
2572                 ext4_journal_stop(handle);
2573         }
2574
2575         /*
2576          * data=journal mode does not do delalloc so we just need to writeout /
2577          * journal already mapped buffers. On the other hand we need to commit
2578          * transaction to make data stable. We expect all the data to be
2579          * already in the journal (the only exception are DMA pinned pages
2580          * dirtied behind our back) so we commit transaction here and run the
2581          * writeback loop to checkpoint them. The checkpointing is not actually
2582          * necessary to make data persistent *but* quite a few places (extent
2583          * shifting operations, fsverity, ...) depend on being able to drop
2584          * pagecache pages after calling filemap_write_and_wait() and for that
2585          * checkpointing needs to happen.
2586          */
2587         if (ext4_should_journal_data(inode)) {
2588                 mpd->can_map = 0;
2589                 if (wbc->sync_mode == WB_SYNC_ALL)
2590                         ext4_fc_commit(sbi->s_journal,
2591                                        EXT4_I(inode)->i_datasync_tid);
2592         }
2593         mpd->journalled_more_data = 0;
2594
2595         if (ext4_should_dioread_nolock(inode)) {
2596                 /*
2597                  * We may need to convert up to one extent per block in
2598                  * the page and we may dirty the inode.
2599                  */
2600                 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2601                                                 PAGE_SIZE >> inode->i_blkbits);
2602         }
2603
2604         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2605                 range_whole = 1;
2606
2607         if (wbc->range_cyclic) {
2608                 writeback_index = mapping->writeback_index;
2609                 if (writeback_index)
2610                         cycled = 0;
2611                 mpd->first_page = writeback_index;
2612                 mpd->last_page = -1;
2613         } else {
2614                 mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2615                 mpd->last_page = wbc->range_end >> PAGE_SHIFT;
2616         }
2617
2618         ext4_io_submit_init(&mpd->io_submit, wbc);
2619 retry:
2620         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2621                 tag_pages_for_writeback(mapping, mpd->first_page,
2622                                         mpd->last_page);
2623         blk_start_plug(&plug);
2624
2625         /*
2626          * First writeback pages that don't need mapping - we can avoid
2627          * starting a transaction unnecessarily and also avoid being blocked
2628          * in the block layer on device congestion while having transaction
2629          * started.
2630          */
2631         mpd->do_map = 0;
2632         mpd->scanned_until_end = 0;
2633         mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2634         if (!mpd->io_submit.io_end) {
2635                 ret = -ENOMEM;
2636                 goto unplug;
2637         }
2638         ret = mpage_prepare_extent_to_map(mpd);
2639         /* Unlock pages we didn't use */
2640         mpage_release_unused_pages(mpd, false);
2641         /* Submit prepared bio */
2642         ext4_io_submit(&mpd->io_submit);
2643         ext4_put_io_end_defer(mpd->io_submit.io_end);
2644         mpd->io_submit.io_end = NULL;
2645         if (ret < 0)
2646                 goto unplug;
2647
2648         while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2649                 /* For each extent of pages we use new io_end */
2650                 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2651                 if (!mpd->io_submit.io_end) {
2652                         ret = -ENOMEM;
2653                         break;
2654                 }
2655
2656                 WARN_ON_ONCE(!mpd->can_map);
2657                 /*
2658                  * We have two constraints: We find one extent to map and we
2659                  * must always write out whole page (makes a difference when
2660                  * blocksize < pagesize) so that we don't block on IO when we
2661                  * try to write out the rest of the page. Journalled mode is
2662                  * not supported by delalloc.
2663                  */
2664                 BUG_ON(ext4_should_journal_data(inode));
2665                 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2666
2667                 /* start a new transaction */
2668                 handle = ext4_journal_start_with_reserve(inode,
2669                                 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2670                 if (IS_ERR(handle)) {
2671                         ret = PTR_ERR(handle);
2672                         ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2673                                "%ld pages, ino %lu; err %d", __func__,
2674                                 wbc->nr_to_write, inode->i_ino, ret);
2675                         /* Release allocated io_end */
2676                         ext4_put_io_end(mpd->io_submit.io_end);
2677                         mpd->io_submit.io_end = NULL;
2678                         break;
2679                 }
2680                 mpd->do_map = 1;
2681
2682                 trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2683                 ret = mpage_prepare_extent_to_map(mpd);
2684                 if (!ret && mpd->map.m_len)
2685                         ret = mpage_map_and_submit_extent(handle, mpd,
2686                                         &give_up_on_write);
2687                 /*
2688                  * Caution: If the handle is synchronous,
2689                  * ext4_journal_stop() can wait for transaction commit
2690                  * to finish which may depend on writeback of pages to
2691                  * complete or on page lock to be released.  In that
2692                  * case, we have to wait until after we have
2693                  * submitted all the IO, released page locks we hold,
2694                  * and dropped io_end reference (for extent conversion
2695                  * to be able to complete) before stopping the handle.
2696                  */
2697                 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2698                         ext4_journal_stop(handle);
2699                         handle = NULL;
2700                         mpd->do_map = 0;
2701                 }
2702                 /* Unlock pages we didn't use */
2703                 mpage_release_unused_pages(mpd, give_up_on_write);
2704                 /* Submit prepared bio */
2705                 ext4_io_submit(&mpd->io_submit);
2706
2707                 /*
2708                  * Drop our io_end reference we got from init. We have
2709                  * to be careful and use deferred io_end finishing if
2710                  * we are still holding the transaction as we can
2711                  * release the last reference to io_end which may end
2712                  * up doing unwritten extent conversion.
2713                  */
2714                 if (handle) {
2715                         ext4_put_io_end_defer(mpd->io_submit.io_end);
2716                         ext4_journal_stop(handle);
2717                 } else
2718                         ext4_put_io_end(mpd->io_submit.io_end);
2719                 mpd->io_submit.io_end = NULL;
2720
2721                 if (ret == -ENOSPC && sbi->s_journal) {
2722                         /*
2723                          * Commit the transaction which would
2724                          * free blocks released in the transaction
2725                          * and try again
2726                          */
2727                         jbd2_journal_force_commit_nested(sbi->s_journal);
2728                         ret = 0;
2729                         continue;
2730                 }
2731                 /* Fatal error - ENOMEM, EIO... */
2732                 if (ret)
2733                         break;
2734         }
2735 unplug:
2736         blk_finish_plug(&plug);
2737         if (!ret && !cycled && wbc->nr_to_write > 0) {
2738                 cycled = 1;
2739                 mpd->last_page = writeback_index - 1;
2740                 mpd->first_page = 0;
2741                 goto retry;
2742         }
2743
2744         /* Update index */
2745         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2746                 /*
2747                  * Set the writeback_index so that range_cyclic
2748                  * mode will write it back later
2749                  */
2750                 mapping->writeback_index = mpd->first_page;
2751
2752 out_writepages:
2753         trace_ext4_writepages_result(inode, wbc, ret,
2754                                      nr_to_write - wbc->nr_to_write);
2755         return ret;
2756 }
2757
2758 static int ext4_writepages(struct address_space *mapping,
2759                            struct writeback_control *wbc)
2760 {
2761         struct super_block *sb = mapping->host->i_sb;
2762         struct mpage_da_data mpd = {
2763                 .inode = mapping->host,
2764                 .wbc = wbc,
2765                 .can_map = 1,
2766         };
2767         int ret;
2768         int alloc_ctx;
2769
2770         if (unlikely(ext4_forced_shutdown(sb)))
2771                 return -EIO;
2772
2773         alloc_ctx = ext4_writepages_down_read(sb);
2774         ret = ext4_do_writepages(&mpd);
2775         /*
2776          * For data=journal writeback we could have come across pages marked
2777          * for delayed dirtying (PageChecked) which were just added to the
2778          * running transaction. Try once more to get them to stable storage.
2779          */
2780         if (!ret && mpd.journalled_more_data)
2781                 ret = ext4_do_writepages(&mpd);
2782         ext4_writepages_up_read(sb, alloc_ctx);
2783
2784         return ret;
2785 }
2786
2787 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2788 {
2789         struct writeback_control wbc = {
2790                 .sync_mode = WB_SYNC_ALL,
2791                 .nr_to_write = LONG_MAX,
2792                 .range_start = jinode->i_dirty_start,
2793                 .range_end = jinode->i_dirty_end,
2794         };
2795         struct mpage_da_data mpd = {
2796                 .inode = jinode->i_vfs_inode,
2797                 .wbc = &wbc,
2798                 .can_map = 0,
2799         };
2800         return ext4_do_writepages(&mpd);
2801 }
2802
2803 static int ext4_dax_writepages(struct address_space *mapping,
2804                                struct writeback_control *wbc)
2805 {
2806         int ret;
2807         long nr_to_write = wbc->nr_to_write;
2808         struct inode *inode = mapping->host;
2809         int alloc_ctx;
2810
2811         if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2812                 return -EIO;
2813
2814         alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2815         trace_ext4_writepages(inode, wbc);
2816
2817         ret = dax_writeback_mapping_range(mapping,
2818                                           EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2819         trace_ext4_writepages_result(inode, wbc, ret,
2820                                      nr_to_write - wbc->nr_to_write);
2821         ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2822         return ret;
2823 }
2824
2825 static int ext4_nonda_switch(struct super_block *sb)
2826 {
2827         s64 free_clusters, dirty_clusters;
2828         struct ext4_sb_info *sbi = EXT4_SB(sb);
2829
2830         /*
2831          * switch to non delalloc mode if we are running low
2832          * on free block. The free block accounting via percpu
2833          * counters can get slightly wrong with percpu_counter_batch getting
2834          * accumulated on each CPU without updating global counters
2835          * Delalloc need an accurate free block accounting. So switch
2836          * to non delalloc when we are near to error range.
2837          */
2838         free_clusters =
2839                 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2840         dirty_clusters =
2841                 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2842         /*
2843          * Start pushing delalloc when 1/2 of free blocks are dirty.
2844          */
2845         if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2846                 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2847
2848         if (2 * free_clusters < 3 * dirty_clusters ||
2849             free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2850                 /*
2851                  * free block count is less than 150% of dirty blocks
2852                  * or free blocks is less than watermark
2853                  */
2854                 return 1;
2855         }
2856         return 0;
2857 }
2858
2859 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2860                                loff_t pos, unsigned len,
2861                                struct page **pagep, void **fsdata)
2862 {
2863         int ret, retries = 0;
2864         struct folio *folio;
2865         pgoff_t index;
2866         struct inode *inode = mapping->host;
2867
2868         if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2869                 return -EIO;
2870
2871         index = pos >> PAGE_SHIFT;
2872
2873         if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
2874                 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2875                 return ext4_write_begin(file, mapping, pos,
2876                                         len, pagep, fsdata);
2877         }
2878         *fsdata = (void *)0;
2879         trace_ext4_da_write_begin(inode, pos, len);
2880
2881         if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2882                 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
2883                                                       pagep, fsdata);
2884                 if (ret < 0)
2885                         return ret;
2886                 if (ret == 1)
2887                         return 0;
2888         }
2889
2890 retry:
2891         folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2892                         mapping_gfp_mask(mapping));
2893         if (IS_ERR(folio))
2894                 return PTR_ERR(folio);
2895
2896         /* In case writeback began while the folio was unlocked */
2897         folio_wait_stable(folio);
2898
2899 #ifdef CONFIG_FS_ENCRYPTION
2900         ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2901 #else
2902         ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2903 #endif
2904         if (ret < 0) {
2905                 folio_unlock(folio);
2906                 folio_put(folio);
2907                 /*
2908                  * block_write_begin may have instantiated a few blocks
2909                  * outside i_size.  Trim these off again. Don't need
2910                  * i_size_read because we hold inode lock.
2911                  */
2912                 if (pos + len > inode->i_size)
2913                         ext4_truncate_failed_write(inode);
2914
2915                 if (ret == -ENOSPC &&
2916                     ext4_should_retry_alloc(inode->i_sb, &retries))
2917                         goto retry;
2918                 return ret;
2919         }
2920
2921         *pagep = &folio->page;
2922         return ret;
2923 }
2924
2925 /*
2926  * Check if we should update i_disksize
2927  * when write to the end of file but not require block allocation
2928  */
2929 static int ext4_da_should_update_i_disksize(struct folio *folio,
2930                                             unsigned long offset)
2931 {
2932         struct buffer_head *bh;
2933         struct inode *inode = folio->mapping->host;
2934         unsigned int idx;
2935         int i;
2936
2937         bh = folio_buffers(folio);
2938         idx = offset >> inode->i_blkbits;
2939
2940         for (i = 0; i < idx; i++)
2941                 bh = bh->b_this_page;
2942
2943         if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2944                 return 0;
2945         return 1;
2946 }
2947
2948 static int ext4_da_do_write_end(struct address_space *mapping,
2949                         loff_t pos, unsigned len, unsigned copied,
2950                         struct folio *folio)
2951 {
2952         struct inode *inode = mapping->host;
2953         loff_t old_size = inode->i_size;
2954         bool disksize_changed = false;
2955         loff_t new_i_size;
2956
2957         /*
2958          * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
2959          * flag, which all that's needed to trigger page writeback.
2960          */
2961         copied = block_write_end(NULL, mapping, pos, len, copied,
2962                         &folio->page, NULL);
2963         new_i_size = pos + copied;
2964
2965         /*
2966          * It's important to update i_size while still holding folio lock,
2967          * because folio writeout could otherwise come in and zero beyond
2968          * i_size.
2969          *
2970          * Since we are holding inode lock, we are sure i_disksize <=
2971          * i_size. We also know that if i_disksize < i_size, there are
2972          * delalloc writes pending in the range up to i_size. If the end of
2973          * the current write is <= i_size, there's no need to touch
2974          * i_disksize since writeback will push i_disksize up to i_size
2975          * eventually. If the end of the current write is > i_size and
2976          * inside an allocated block which ext4_da_should_update_i_disksize()
2977          * checked, we need to update i_disksize here as certain
2978          * ext4_writepages() paths not allocating blocks and update i_disksize.
2979          */
2980         if (new_i_size > inode->i_size) {
2981                 unsigned long end;
2982
2983                 i_size_write(inode, new_i_size);
2984                 end = (new_i_size - 1) & (PAGE_SIZE - 1);
2985                 if (copied && ext4_da_should_update_i_disksize(folio, end)) {
2986                         ext4_update_i_disksize(inode, new_i_size);
2987                         disksize_changed = true;
2988                 }
2989         }
2990
2991         folio_unlock(folio);
2992         folio_put(folio);
2993
2994         if (old_size < pos)
2995                 pagecache_isize_extended(inode, old_size, pos);
2996
2997         if (disksize_changed) {
2998                 handle_t *handle;
2999
3000                 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3001                 if (IS_ERR(handle))
3002                         return PTR_ERR(handle);
3003                 ext4_mark_inode_dirty(handle, inode);
3004                 ext4_journal_stop(handle);
3005         }
3006
3007         return copied;
3008 }
3009
3010 static int ext4_da_write_end(struct file *file,
3011                              struct address_space *mapping,
3012                              loff_t pos, unsigned len, unsigned copied,
3013                              struct page *page, void *fsdata)
3014 {
3015         struct inode *inode = mapping->host;
3016         int write_mode = (int)(unsigned long)fsdata;
3017         struct folio *folio = page_folio(page);
3018
3019         if (write_mode == FALL_BACK_TO_NONDELALLOC)
3020                 return ext4_write_end(file, mapping, pos,
3021                                       len, copied, &folio->page, fsdata);
3022
3023         trace_ext4_da_write_end(inode, pos, len, copied);
3024
3025         if (write_mode != CONVERT_INLINE_DATA &&
3026             ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3027             ext4_has_inline_data(inode))
3028                 return ext4_write_inline_data_end(inode, pos, len, copied,
3029                                                   folio);
3030
3031         if (unlikely(copied < len) && !folio_test_uptodate(folio))
3032                 copied = 0;
3033
3034         return ext4_da_do_write_end(mapping, pos, len, copied, folio);
3035 }
3036
3037 /*
3038  * Force all delayed allocation blocks to be allocated for a given inode.
3039  */
3040 int ext4_alloc_da_blocks(struct inode *inode)
3041 {
3042         trace_ext4_alloc_da_blocks(inode);
3043
3044         if (!EXT4_I(inode)->i_reserved_data_blocks)
3045                 return 0;
3046
3047         /*
3048          * We do something simple for now.  The filemap_flush() will
3049          * also start triggering a write of the data blocks, which is
3050          * not strictly speaking necessary (and for users of
3051          * laptop_mode, not even desirable).  However, to do otherwise
3052          * would require replicating code paths in:
3053          *
3054          * ext4_writepages() ->
3055          *    write_cache_pages() ---> (via passed in callback function)
3056          *        __mpage_da_writepage() -->
3057          *           mpage_add_bh_to_extent()
3058          *           mpage_da_map_blocks()
3059          *
3060          * The problem is that write_cache_pages(), located in
3061          * mm/page-writeback.c, marks pages clean in preparation for
3062          * doing I/O, which is not desirable if we're not planning on
3063          * doing I/O at all.
3064          *
3065          * We could call write_cache_pages(), and then redirty all of
3066          * the pages by calling redirty_page_for_writepage() but that
3067          * would be ugly in the extreme.  So instead we would need to
3068          * replicate parts of the code in the above functions,
3069          * simplifying them because we wouldn't actually intend to
3070          * write out the pages, but rather only collect contiguous
3071          * logical block extents, call the multi-block allocator, and
3072          * then update the buffer heads with the block allocations.
3073          *
3074          * For now, though, we'll cheat by calling filemap_flush(),
3075          * which will map the blocks, and start the I/O, but not
3076          * actually wait for the I/O to complete.
3077          */
3078         return filemap_flush(inode->i_mapping);
3079 }
3080
3081 /*
3082  * bmap() is special.  It gets used by applications such as lilo and by
3083  * the swapper to find the on-disk block of a specific piece of data.
3084  *
3085  * Naturally, this is dangerous if the block concerned is still in the
3086  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3087  * filesystem and enables swap, then they may get a nasty shock when the
3088  * data getting swapped to that swapfile suddenly gets overwritten by
3089  * the original zero's written out previously to the journal and
3090  * awaiting writeback in the kernel's buffer cache.
3091  *
3092  * So, if we see any bmap calls here on a modified, data-journaled file,
3093  * take extra steps to flush any blocks which might be in the cache.
3094  */
3095 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3096 {
3097         struct inode *inode = mapping->host;
3098         sector_t ret = 0;
3099
3100         inode_lock_shared(inode);
3101         /*
3102          * We can get here for an inline file via the FIBMAP ioctl
3103          */
3104         if (ext4_has_inline_data(inode))
3105                 goto out;
3106
3107         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3108             (test_opt(inode->i_sb, DELALLOC) ||
3109              ext4_should_journal_data(inode))) {
3110                 /*
3111                  * With delalloc or journalled data we want to sync the file so
3112                  * that we can make sure we allocate blocks for file and data
3113                  * is in place for the user to see it
3114                  */
3115                 filemap_write_and_wait(mapping);
3116         }
3117
3118         ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3119
3120 out:
3121         inode_unlock_shared(inode);
3122         return ret;
3123 }
3124
3125 static int ext4_read_folio(struct file *file, struct folio *folio)
3126 {
3127         int ret = -EAGAIN;
3128         struct inode *inode = folio->mapping->host;
3129
3130         trace_ext4_read_folio(inode, folio);
3131
3132         if (ext4_has_inline_data(inode))
3133                 ret = ext4_readpage_inline(inode, folio);
3134
3135         if (ret == -EAGAIN)
3136                 return ext4_mpage_readpages(inode, NULL, folio);
3137
3138         return ret;
3139 }
3140
3141 static void ext4_readahead(struct readahead_control *rac)
3142 {
3143         struct inode *inode = rac->mapping->host;
3144
3145         /* If the file has inline data, no need to do readahead. */
3146         if (ext4_has_inline_data(inode))
3147                 return;
3148
3149         ext4_mpage_readpages(inode, rac, NULL);
3150 }
3151
3152 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3153                                 size_t length)
3154 {
3155         trace_ext4_invalidate_folio(folio, offset, length);
3156
3157         /* No journalling happens on data buffers when this function is used */
3158         WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3159
3160         block_invalidate_folio(folio, offset, length);
3161 }
3162
3163 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3164                                             size_t offset, size_t length)
3165 {
3166         journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3167
3168         trace_ext4_journalled_invalidate_folio(folio, offset, length);
3169
3170         /*
3171          * If it's a full truncate we just forget about the pending dirtying
3172          */
3173         if (offset == 0 && length == folio_size(folio))
3174                 folio_clear_checked(folio);
3175
3176         return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3177 }
3178
3179 /* Wrapper for aops... */
3180 static void ext4_journalled_invalidate_folio(struct folio *folio,
3181                                            size_t offset,
3182                                            size_t length)
3183 {
3184         WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3185 }
3186
3187 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3188 {
3189         struct inode *inode = folio->mapping->host;
3190         journal_t *journal = EXT4_JOURNAL(inode);
3191
3192         trace_ext4_release_folio(inode, folio);
3193
3194         /* Page has dirty journalled data -> cannot release */
3195         if (folio_test_checked(folio))
3196                 return false;
3197         if (journal)
3198                 return jbd2_journal_try_to_free_buffers(journal, folio);
3199         else
3200                 return try_to_free_buffers(folio);
3201 }
3202
3203 static bool ext4_inode_datasync_dirty(struct inode *inode)
3204 {
3205         journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3206
3207         if (journal) {
3208                 if (jbd2_transaction_committed(journal,
3209                         EXT4_I(inode)->i_datasync_tid))
3210                         return false;
3211                 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3212                         return !list_empty(&EXT4_I(inode)->i_fc_list);
3213                 return true;
3214         }
3215
3216         /* Any metadata buffers to write? */
3217         if (!list_empty(&inode->i_mapping->i_private_list))
3218                 return true;
3219         return inode->i_state & I_DIRTY_DATASYNC;
3220 }
3221
3222 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3223                            struct ext4_map_blocks *map, loff_t offset,
3224                            loff_t length, unsigned int flags)
3225 {
3226         u8 blkbits = inode->i_blkbits;
3227
3228         /*
3229          * Writes that span EOF might trigger an I/O size update on completion,
3230          * so consider them to be dirty for the purpose of O_DSYNC, even if
3231          * there is no other metadata changes being made or are pending.
3232          */
3233         iomap->flags = 0;
3234         if (ext4_inode_datasync_dirty(inode) ||
3235             offset + length > i_size_read(inode))
3236                 iomap->flags |= IOMAP_F_DIRTY;
3237
3238         if (map->m_flags & EXT4_MAP_NEW)
3239                 iomap->flags |= IOMAP_F_NEW;
3240
3241         if (flags & IOMAP_DAX)
3242                 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3243         else
3244                 iomap->bdev = inode->i_sb->s_bdev;
3245         iomap->offset = (u64) map->m_lblk << blkbits;
3246         iomap->length = (u64) map->m_len << blkbits;
3247
3248         if ((map->m_flags & EXT4_MAP_MAPPED) &&
3249             !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3250                 iomap->flags |= IOMAP_F_MERGED;
3251
3252         /*
3253          * Flags passed to ext4_map_blocks() for direct I/O writes can result
3254          * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3255          * set. In order for any allocated unwritten extents to be converted
3256          * into written extents correctly within the ->end_io() handler, we
3257          * need to ensure that the iomap->type is set appropriately. Hence, the
3258          * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3259          * been set first.
3260          */
3261         if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3262                 iomap->type = IOMAP_UNWRITTEN;
3263                 iomap->addr = (u64) map->m_pblk << blkbits;
3264                 if (flags & IOMAP_DAX)
3265                         iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3266         } else if (map->m_flags & EXT4_MAP_MAPPED) {
3267                 iomap->type = IOMAP_MAPPED;
3268                 iomap->addr = (u64) map->m_pblk << blkbits;
3269                 if (flags & IOMAP_DAX)
3270                         iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3271         } else {
3272                 iomap->type = IOMAP_HOLE;
3273                 iomap->addr = IOMAP_NULL_ADDR;
3274         }
3275 }
3276
3277 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3278                             unsigned int flags)
3279 {
3280         handle_t *handle;
3281         u8 blkbits = inode->i_blkbits;
3282         int ret, dio_credits, m_flags = 0, retries = 0;
3283
3284         /*
3285          * Trim the mapping request to the maximum value that we can map at
3286          * once for direct I/O.
3287          */
3288         if (map->m_len > DIO_MAX_BLOCKS)
3289                 map->m_len = DIO_MAX_BLOCKS;
3290         dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3291
3292 retry:
3293         /*
3294          * Either we allocate blocks and then don't get an unwritten extent, so
3295          * in that case we have reserved enough credits. Or, the blocks are
3296          * already allocated and unwritten. In that case, the extent conversion
3297          * fits into the credits as well.
3298          */
3299         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3300         if (IS_ERR(handle))
3301                 return PTR_ERR(handle);
3302
3303         /*
3304          * DAX and direct I/O are the only two operations that are currently
3305          * supported with IOMAP_WRITE.
3306          */
3307         WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3308         if (flags & IOMAP_DAX)
3309                 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3310         /*
3311          * We use i_size instead of i_disksize here because delalloc writeback
3312          * can complete at any point during the I/O and subsequently push the
3313          * i_disksize out to i_size. This could be beyond where direct I/O is
3314          * happening and thus expose allocated blocks to direct I/O reads.
3315          */
3316         else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3317                 m_flags = EXT4_GET_BLOCKS_CREATE;
3318         else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3319                 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3320
3321         ret = ext4_map_blocks(handle, inode, map, m_flags);
3322
3323         /*
3324          * We cannot fill holes in indirect tree based inodes as that could
3325          * expose stale data in the case of a crash. Use the magic error code
3326          * to fallback to buffered I/O.
3327          */
3328         if (!m_flags && !ret)
3329                 ret = -ENOTBLK;
3330
3331         ext4_journal_stop(handle);
3332         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3333                 goto retry;
3334
3335         return ret;
3336 }
3337
3338
3339 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3340                 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3341 {
3342         int ret;
3343         struct ext4_map_blocks map;
3344         u8 blkbits = inode->i_blkbits;
3345
3346         if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3347                 return -EINVAL;
3348
3349         if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3350                 return -ERANGE;
3351
3352         /*
3353          * Calculate the first and last logical blocks respectively.
3354          */
3355         map.m_lblk = offset >> blkbits;
3356         map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3357                           EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3358
3359         if (flags & IOMAP_WRITE) {
3360                 /*
3361                  * We check here if the blocks are already allocated, then we
3362                  * don't need to start a journal txn and we can directly return
3363                  * the mapping information. This could boost performance
3364                  * especially in multi-threaded overwrite requests.
3365                  */
3366                 if (offset + length <= i_size_read(inode)) {
3367                         ret = ext4_map_blocks(NULL, inode, &map, 0);
3368                         if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3369                                 goto out;
3370                 }
3371                 ret = ext4_iomap_alloc(inode, &map, flags);
3372         } else {
3373                 ret = ext4_map_blocks(NULL, inode, &map, 0);
3374         }
3375
3376         if (ret < 0)
3377                 return ret;
3378 out:
3379         /*
3380          * When inline encryption is enabled, sometimes I/O to an encrypted file
3381          * has to be broken up to guarantee DUN contiguity.  Handle this by
3382          * limiting the length of the mapping returned.
3383          */
3384         map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3385
3386         ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3387
3388         return 0;
3389 }
3390
3391 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3392                 loff_t length, unsigned flags, struct iomap *iomap,
3393                 struct iomap *srcmap)
3394 {
3395         int ret;
3396
3397         /*
3398          * Even for writes we don't need to allocate blocks, so just pretend
3399          * we are reading to save overhead of starting a transaction.
3400          */
3401         flags &= ~IOMAP_WRITE;
3402         ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3403         WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
3404         return ret;
3405 }
3406
3407 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3408                           ssize_t written, unsigned flags, struct iomap *iomap)
3409 {
3410         /*
3411          * Check to see whether an error occurred while writing out the data to
3412          * the allocated blocks. If so, return the magic error code so that we
3413          * fallback to buffered I/O and attempt to complete the remainder of
3414          * the I/O. Any blocks that may have been allocated in preparation for
3415          * the direct I/O will be reused during buffered I/O.
3416          */
3417         if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3418                 return -ENOTBLK;
3419
3420         return 0;
3421 }
3422
3423 const struct iomap_ops ext4_iomap_ops = {
3424         .iomap_begin            = ext4_iomap_begin,
3425         .iomap_end              = ext4_iomap_end,
3426 };
3427
3428 const struct iomap_ops ext4_iomap_overwrite_ops = {
3429         .iomap_begin            = ext4_iomap_overwrite_begin,
3430         .iomap_end              = ext4_iomap_end,
3431 };
3432
3433 static bool ext4_iomap_is_delalloc(struct inode *inode,
3434                                    struct ext4_map_blocks *map)
3435 {
3436         struct extent_status es;
3437         ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3438
3439         ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3440                                   map->m_lblk, end, &es);
3441
3442         if (!es.es_len || es.es_lblk > end)
3443                 return false;
3444
3445         if (es.es_lblk > map->m_lblk) {
3446                 map->m_len = es.es_lblk - map->m_lblk;
3447                 return false;
3448         }
3449
3450         offset = map->m_lblk - es.es_lblk;
3451         map->m_len = es.es_len - offset;
3452
3453         return true;
3454 }
3455
3456 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3457                                    loff_t length, unsigned int flags,
3458                                    struct iomap *iomap, struct iomap *srcmap)
3459 {
3460         int ret;
3461         bool delalloc = false;
3462         struct ext4_map_blocks map;
3463         u8 blkbits = inode->i_blkbits;
3464
3465         if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3466                 return -EINVAL;
3467
3468         if (ext4_has_inline_data(inode)) {
3469                 ret = ext4_inline_data_iomap(inode, iomap);
3470                 if (ret != -EAGAIN) {
3471                         if (ret == 0 && offset >= iomap->length)
3472                                 ret = -ENOENT;
3473                         return ret;
3474                 }
3475         }
3476
3477         /*
3478          * Calculate the first and last logical block respectively.
3479          */
3480         map.m_lblk = offset >> blkbits;
3481         map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3482                           EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3483
3484         /*
3485          * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3486          * So handle it here itself instead of querying ext4_map_blocks().
3487          * Since ext4_map_blocks() will warn about it and will return
3488          * -EIO error.
3489          */
3490         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3491                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3492
3493                 if (offset >= sbi->s_bitmap_maxbytes) {
3494                         map.m_flags = 0;
3495                         goto set_iomap;
3496                 }
3497         }
3498
3499         ret = ext4_map_blocks(NULL, inode, &map, 0);
3500         if (ret < 0)
3501                 return ret;
3502         if (ret == 0)
3503                 delalloc = ext4_iomap_is_delalloc(inode, &map);
3504
3505 set_iomap:
3506         ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3507         if (delalloc && iomap->type == IOMAP_HOLE)
3508                 iomap->type = IOMAP_DELALLOC;
3509
3510         return 0;
3511 }
3512
3513 const struct iomap_ops ext4_iomap_report_ops = {
3514         .iomap_begin = ext4_iomap_begin_report,
3515 };
3516
3517 /*
3518  * For data=journal mode, folio should be marked dirty only when it was
3519  * writeably mapped. When that happens, it was already attached to the
3520  * transaction and marked as jbddirty (we take care of this in
3521  * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3522  * so we should have nothing to do here, except for the case when someone
3523  * had the page pinned and dirtied the page through this pin (e.g. by doing
3524  * direct IO to it). In that case we'd need to attach buffers here to the
3525  * transaction but we cannot due to lock ordering.  We cannot just dirty the
3526  * folio and leave attached buffers clean, because the buffers' dirty state is
3527  * "definitive".  We cannot just set the buffers dirty or jbddirty because all
3528  * the journalling code will explode.  So what we do is to mark the folio
3529  * "pending dirty" and next time ext4_writepages() is called, attach buffers
3530  * to the transaction appropriately.
3531  */
3532 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3533                 struct folio *folio)
3534 {
3535         WARN_ON_ONCE(!folio_buffers(folio));
3536         if (folio_maybe_dma_pinned(folio))
3537                 folio_set_checked(folio);
3538         return filemap_dirty_folio(mapping, folio);
3539 }
3540
3541 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3542 {
3543         WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3544         WARN_ON_ONCE(!folio_buffers(folio));
3545         return block_dirty_folio(mapping, folio);
3546 }
3547
3548 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3549                                     struct file *file, sector_t *span)
3550 {
3551         return iomap_swapfile_activate(sis, file, span,
3552                                        &ext4_iomap_report_ops);
3553 }
3554
3555 static const struct address_space_operations ext4_aops = {
3556         .read_folio             = ext4_read_folio,
3557         .readahead              = ext4_readahead,
3558         .writepages             = ext4_writepages,
3559         .write_begin            = ext4_write_begin,
3560         .write_end              = ext4_write_end,
3561         .dirty_folio            = ext4_dirty_folio,
3562         .bmap                   = ext4_bmap,
3563         .invalidate_folio       = ext4_invalidate_folio,
3564         .release_folio          = ext4_release_folio,
3565         .direct_IO              = noop_direct_IO,
3566         .migrate_folio          = buffer_migrate_folio,
3567         .is_partially_uptodate  = block_is_partially_uptodate,
3568         .error_remove_folio     = generic_error_remove_folio,
3569         .swap_activate          = ext4_iomap_swap_activate,
3570 };
3571
3572 static const struct address_space_operations ext4_journalled_aops = {
3573         .read_folio             = ext4_read_folio,
3574         .readahead              = ext4_readahead,
3575         .writepages             = ext4_writepages,
3576         .write_begin            = ext4_write_begin,
3577         .write_end              = ext4_journalled_write_end,
3578         .dirty_folio            = ext4_journalled_dirty_folio,
3579         .bmap                   = ext4_bmap,
3580         .invalidate_folio       = ext4_journalled_invalidate_folio,
3581         .release_folio          = ext4_release_folio,
3582         .direct_IO              = noop_direct_IO,
3583         .migrate_folio          = buffer_migrate_folio_norefs,
3584         .is_partially_uptodate  = block_is_partially_uptodate,
3585         .error_remove_folio     = generic_error_remove_folio,
3586         .swap_activate          = ext4_iomap_swap_activate,
3587 };
3588
3589 static const struct address_space_operations ext4_da_aops = {
3590         .read_folio             = ext4_read_folio,
3591         .readahead              = ext4_readahead,
3592         .writepages             = ext4_writepages,
3593         .write_begin            = ext4_da_write_begin,
3594         .write_end              = ext4_da_write_end,
3595         .dirty_folio            = ext4_dirty_folio,
3596         .bmap                   = ext4_bmap,
3597         .invalidate_folio       = ext4_invalidate_folio,
3598         .release_folio          = ext4_release_folio,
3599         .direct_IO              = noop_direct_IO,
3600         .migrate_folio          = buffer_migrate_folio,
3601         .is_partially_uptodate  = block_is_partially_uptodate,
3602         .error_remove_folio     = generic_error_remove_folio,
3603         .swap_activate          = ext4_iomap_swap_activate,
3604 };
3605
3606 static const struct address_space_operations ext4_dax_aops = {
3607         .writepages             = ext4_dax_writepages,
3608         .direct_IO              = noop_direct_IO,
3609         .dirty_folio            = noop_dirty_folio,
3610         .bmap                   = ext4_bmap,
3611         .swap_activate          = ext4_iomap_swap_activate,
3612 };
3613
3614 void ext4_set_aops(struct inode *inode)
3615 {
3616         switch (ext4_inode_journal_mode(inode)) {
3617         case EXT4_INODE_ORDERED_DATA_MODE:
3618         case EXT4_INODE_WRITEBACK_DATA_MODE:
3619                 break;
3620         case EXT4_INODE_JOURNAL_DATA_MODE:
3621                 inode->i_mapping->a_ops = &ext4_journalled_aops;
3622                 return;
3623         default:
3624                 BUG();
3625         }
3626         if (IS_DAX(inode))
3627                 inode->i_mapping->a_ops = &ext4_dax_aops;
3628         else if (test_opt(inode->i_sb, DELALLOC))
3629                 inode->i_mapping->a_ops = &ext4_da_aops;
3630         else
3631                 inode->i_mapping->a_ops = &ext4_aops;
3632 }
3633
3634 /*
3635  * Here we can't skip an unwritten buffer even though it usually reads zero
3636  * because it might have data in pagecache (eg, if called from ext4_zero_range,
3637  * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
3638  * racing writeback can come later and flush the stale pagecache to disk.
3639  */
3640 static int __ext4_block_zero_page_range(handle_t *handle,
3641                 struct address_space *mapping, loff_t from, loff_t length)
3642 {
3643         ext4_fsblk_t index = from >> PAGE_SHIFT;
3644         unsigned offset = from & (PAGE_SIZE-1);
3645         unsigned blocksize, pos;
3646         ext4_lblk_t iblock;
3647         struct inode *inode = mapping->host;
3648         struct buffer_head *bh;
3649         struct folio *folio;
3650         int err = 0;
3651
3652         folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3653                                     FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3654                                     mapping_gfp_constraint(mapping, ~__GFP_FS));
3655         if (IS_ERR(folio))
3656                 return PTR_ERR(folio);
3657
3658         blocksize = inode->i_sb->s_blocksize;
3659
3660         iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3661
3662         bh = folio_buffers(folio);
3663         if (!bh)
3664                 bh = create_empty_buffers(folio, blocksize, 0);
3665
3666         /* Find the buffer that contains "offset" */
3667         pos = blocksize;
3668         while (offset >= pos) {
3669                 bh = bh->b_this_page;
3670                 iblock++;
3671                 pos += blocksize;
3672         }
3673         if (buffer_freed(bh)) {
3674                 BUFFER_TRACE(bh, "freed: skip");
3675                 goto unlock;
3676         }
3677         if (!buffer_mapped(bh)) {
3678                 BUFFER_TRACE(bh, "unmapped");
3679                 ext4_get_block(inode, iblock, bh, 0);
3680                 /* unmapped? It's a hole - nothing to do */
3681                 if (!buffer_mapped(bh)) {
3682                         BUFFER_TRACE(bh, "still unmapped");
3683                         goto unlock;
3684                 }
3685         }
3686
3687         /* Ok, it's mapped. Make sure it's up-to-date */
3688         if (folio_test_uptodate(folio))
3689                 set_buffer_uptodate(bh);
3690
3691         if (!buffer_uptodate(bh)) {
3692                 err = ext4_read_bh_lock(bh, 0, true);
3693                 if (err)
3694                         goto unlock;
3695                 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3696                         /* We expect the key to be set. */
3697                         BUG_ON(!fscrypt_has_encryption_key(inode));
3698                         err = fscrypt_decrypt_pagecache_blocks(folio,
3699                                                                blocksize,
3700                                                                bh_offset(bh));
3701                         if (err) {
3702                                 clear_buffer_uptodate(bh);
3703                                 goto unlock;
3704                         }
3705                 }
3706         }
3707         if (ext4_should_journal_data(inode)) {
3708                 BUFFER_TRACE(bh, "get write access");
3709                 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3710                                                     EXT4_JTR_NONE);
3711                 if (err)
3712                         goto unlock;
3713         }
3714         folio_zero_range(folio, offset, length);
3715         BUFFER_TRACE(bh, "zeroed end of block");
3716
3717         if (ext4_should_journal_data(inode)) {
3718                 err = ext4_dirty_journalled_data(handle, bh);
3719         } else {
3720                 err = 0;
3721                 mark_buffer_dirty(bh);
3722                 if (ext4_should_order_data(inode))
3723                         err = ext4_jbd2_inode_add_write(handle, inode, from,
3724                                         length);
3725         }
3726
3727 unlock:
3728         folio_unlock(folio);
3729         folio_put(folio);
3730         return err;
3731 }
3732
3733 /*
3734  * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3735  * starting from file offset 'from'.  The range to be zero'd must
3736  * be contained with in one block.  If the specified range exceeds
3737  * the end of the block it will be shortened to end of the block
3738  * that corresponds to 'from'
3739  */
3740 static int ext4_block_zero_page_range(handle_t *handle,
3741                 struct address_space *mapping, loff_t from, loff_t length)
3742 {
3743         struct inode *inode = mapping->host;
3744         unsigned offset = from & (PAGE_SIZE-1);
3745         unsigned blocksize = inode->i_sb->s_blocksize;
3746         unsigned max = blocksize - (offset & (blocksize - 1));
3747
3748         /*
3749          * correct length if it does not fall between
3750          * 'from' and the end of the block
3751          */
3752         if (length > max || length < 0)
3753                 length = max;
3754
3755         if (IS_DAX(inode)) {
3756                 return dax_zero_range(inode, from, length, NULL,
3757                                       &ext4_iomap_ops);
3758         }
3759         return __ext4_block_zero_page_range(handle, mapping, from, length);
3760 }
3761
3762 /*
3763  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3764  * up to the end of the block which corresponds to `from'.
3765  * This required during truncate. We need to physically zero the tail end
3766  * of that block so it doesn't yield old data if the file is later grown.
3767  */
3768 static int ext4_block_truncate_page(handle_t *handle,
3769                 struct address_space *mapping, loff_t from)
3770 {
3771         unsigned offset = from & (PAGE_SIZE-1);
3772         unsigned length;
3773         unsigned blocksize;
3774         struct inode *inode = mapping->host;
3775
3776         /* If we are processing an encrypted inode during orphan list handling */
3777         if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3778                 return 0;
3779
3780         blocksize = inode->i_sb->s_blocksize;
3781         length = blocksize - (offset & (blocksize - 1));
3782
3783         return ext4_block_zero_page_range(handle, mapping, from, length);
3784 }
3785
3786 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3787                              loff_t lstart, loff_t length)
3788 {
3789         struct super_block *sb = inode->i_sb;
3790         struct address_space *mapping = inode->i_mapping;
3791         unsigned partial_start, partial_end;
3792         ext4_fsblk_t start, end;
3793         loff_t byte_end = (lstart + length - 1);
3794         int err = 0;
3795
3796         partial_start = lstart & (sb->s_blocksize - 1);
3797         partial_end = byte_end & (sb->s_blocksize - 1);
3798
3799         start = lstart >> sb->s_blocksize_bits;
3800         end = byte_end >> sb->s_blocksize_bits;
3801
3802         /* Handle partial zero within the single block */
3803         if (start == end &&
3804             (partial_start || (partial_end != sb->s_blocksize - 1))) {
3805                 err = ext4_block_zero_page_range(handle, mapping,
3806                                                  lstart, length);
3807                 return err;
3808         }
3809         /* Handle partial zero out on the start of the range */
3810         if (partial_start) {
3811                 err = ext4_block_zero_page_range(handle, mapping,
3812                                                  lstart, sb->s_blocksize);
3813                 if (err)
3814                         return err;
3815         }
3816         /* Handle partial zero out on the end of the range */
3817         if (partial_end != sb->s_blocksize - 1)
3818                 err = ext4_block_zero_page_range(handle, mapping,
3819                                                  byte_end - partial_end,
3820                                                  partial_end + 1);
3821         return err;
3822 }
3823
3824 int ext4_can_truncate(struct inode *inode)
3825 {
3826         if (S_ISREG(inode->i_mode))
3827                 return 1;
3828         if (S_ISDIR(inode->i_mode))
3829                 return 1;
3830         if (S_ISLNK(inode->i_mode))
3831                 return !ext4_inode_is_fast_symlink(inode);
3832         return 0;
3833 }
3834
3835 /*
3836  * We have to make sure i_disksize gets properly updated before we truncate
3837  * page cache due to hole punching or zero range. Otherwise i_disksize update
3838  * can get lost as it may have been postponed to submission of writeback but
3839  * that will never happen after we truncate page cache.
3840  */
3841 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3842                                       loff_t len)
3843 {
3844         handle_t *handle;
3845         int ret;
3846
3847         loff_t size = i_size_read(inode);
3848
3849         WARN_ON(!inode_is_locked(inode));
3850         if (offset > size || offset + len < size)
3851                 return 0;
3852
3853         if (EXT4_I(inode)->i_disksize >= size)
3854                 return 0;
3855
3856         handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3857         if (IS_ERR(handle))
3858                 return PTR_ERR(handle);
3859         ext4_update_i_disksize(inode, size);
3860         ret = ext4_mark_inode_dirty(handle, inode);
3861         ext4_journal_stop(handle);
3862
3863         return ret;
3864 }
3865
3866 static void ext4_wait_dax_page(struct inode *inode)
3867 {
3868         filemap_invalidate_unlock(inode->i_mapping);
3869         schedule();
3870         filemap_invalidate_lock(inode->i_mapping);
3871 }
3872
3873 int ext4_break_layouts(struct inode *inode)
3874 {
3875         struct page *page;
3876         int error;
3877
3878         if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3879                 return -EINVAL;
3880
3881         do {
3882                 page = dax_layout_busy_page(inode->i_mapping);
3883                 if (!page)
3884                         return 0;
3885
3886                 error = ___wait_var_event(&page->_refcount,
3887                                 atomic_read(&page->_refcount) == 1,
3888                                 TASK_INTERRUPTIBLE, 0, 0,
3889                                 ext4_wait_dax_page(inode));
3890         } while (error == 0);
3891
3892         return error;
3893 }
3894
3895 /*
3896  * ext4_punch_hole: punches a hole in a file by releasing the blocks
3897  * associated with the given offset and length
3898  *
3899  * @inode:  File inode
3900  * @offset: The offset where the hole will begin
3901  * @len:    The length of the hole
3902  *
3903  * Returns: 0 on success or negative on failure
3904  */
3905
3906 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3907 {
3908         struct inode *inode = file_inode(file);
3909         struct super_block *sb = inode->i_sb;
3910         ext4_lblk_t first_block, stop_block;
3911         struct address_space *mapping = inode->i_mapping;
3912         loff_t first_block_offset, last_block_offset, max_length;
3913         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3914         handle_t *handle;
3915         unsigned int credits;
3916         int ret = 0, ret2 = 0;
3917
3918         trace_ext4_punch_hole(inode, offset, length, 0);
3919
3920         /*
3921          * Write out all dirty pages to avoid race conditions
3922          * Then release them.
3923          */
3924         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3925                 ret = filemap_write_and_wait_range(mapping, offset,
3926                                                    offset + length - 1);
3927                 if (ret)
3928                         return ret;
3929         }
3930
3931         inode_lock(inode);
3932
3933         /* No need to punch hole beyond i_size */
3934         if (offset >= inode->i_size)
3935                 goto out_mutex;
3936
3937         /*
3938          * If the hole extends beyond i_size, set the hole
3939          * to end after the page that contains i_size
3940          */
3941         if (offset + length > inode->i_size) {
3942                 length = inode->i_size +
3943                    PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3944                    offset;
3945         }
3946
3947         /*
3948          * For punch hole the length + offset needs to be within one block
3949          * before last range. Adjust the length if it goes beyond that limit.
3950          */
3951         max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3952         if (offset + length > max_length)
3953                 length = max_length - offset;
3954
3955         if (offset & (sb->s_blocksize - 1) ||
3956             (offset + length) & (sb->s_blocksize - 1)) {
3957                 /*
3958                  * Attach jinode to inode for jbd2 if we do any zeroing of
3959                  * partial block
3960                  */
3961                 ret = ext4_inode_attach_jinode(inode);
3962                 if (ret < 0)
3963                         goto out_mutex;
3964
3965         }
3966
3967         /* Wait all existing dio workers, newcomers will block on i_rwsem */
3968         inode_dio_wait(inode);
3969
3970         ret = file_modified(file);
3971         if (ret)
3972                 goto out_mutex;
3973
3974         /*
3975          * Prevent page faults from reinstantiating pages we have released from
3976          * page cache.
3977          */
3978         filemap_invalidate_lock(mapping);
3979
3980         ret = ext4_break_layouts(inode);
3981         if (ret)
3982                 goto out_dio;
3983
3984         first_block_offset = round_up(offset, sb->s_blocksize);
3985         last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3986
3987         /* Now release the pages and zero block aligned part of pages*/
3988         if (last_block_offset > first_block_offset) {
3989                 ret = ext4_update_disksize_before_punch(inode, offset, length);
3990                 if (ret)
3991                         goto out_dio;
3992                 truncate_pagecache_range(inode, first_block_offset,
3993                                          last_block_offset);
3994         }
3995
3996         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3997                 credits = ext4_writepage_trans_blocks(inode);
3998         else
3999                 credits = ext4_blocks_for_truncate(inode);
4000         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4001         if (IS_ERR(handle)) {
4002                 ret = PTR_ERR(handle);
4003                 ext4_std_error(sb, ret);
4004                 goto out_dio;
4005         }
4006
4007         ret = ext4_zero_partial_blocks(handle, inode, offset,
4008                                        length);
4009         if (ret)
4010                 goto out_stop;
4011
4012         first_block = (offset + sb->s_blocksize - 1) >>
4013                 EXT4_BLOCK_SIZE_BITS(sb);
4014         stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4015
4016         /* If there are blocks to remove, do it */
4017         if (stop_block > first_block) {
4018
4019                 down_write(&EXT4_I(inode)->i_data_sem);
4020                 ext4_discard_preallocations(inode, 0);
4021
4022                 ext4_es_remove_extent(inode, first_block,
4023                                       stop_block - first_block);
4024
4025                 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4026                         ret = ext4_ext_remove_space(inode, first_block,
4027                                                     stop_block - 1);
4028                 else
4029                         ret = ext4_ind_remove_space(handle, inode, first_block,
4030                                                     stop_block);
4031
4032                 up_write(&EXT4_I(inode)->i_data_sem);
4033         }
4034         ext4_fc_track_range(handle, inode, first_block, stop_block);
4035         if (IS_SYNC(inode))
4036                 ext4_handle_sync(handle);
4037
4038         inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4039         ret2 = ext4_mark_inode_dirty(handle, inode);
4040         if (unlikely(ret2))
4041                 ret = ret2;
4042         if (ret >= 0)
4043                 ext4_update_inode_fsync_trans(handle, inode, 1);
4044 out_stop:
4045         ext4_journal_stop(handle);
4046 out_dio:
4047         filemap_invalidate_unlock(mapping);
4048 out_mutex:
4049         inode_unlock(inode);
4050         return ret;
4051 }
4052
4053 int ext4_inode_attach_jinode(struct inode *inode)
4054 {
4055         struct ext4_inode_info *ei = EXT4_I(inode);
4056         struct jbd2_inode *jinode;
4057
4058         if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4059                 return 0;
4060
4061         jinode = jbd2_alloc_inode(GFP_KERNEL);
4062         spin_lock(&inode->i_lock);
4063         if (!ei->jinode) {
4064                 if (!jinode) {
4065                         spin_unlock(&inode->i_lock);
4066                         return -ENOMEM;
4067                 }
4068                 ei->jinode = jinode;
4069                 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4070                 jinode = NULL;
4071         }
4072         spin_unlock(&inode->i_lock);
4073         if (unlikely(jinode != NULL))
4074                 jbd2_free_inode(jinode);
4075         return 0;
4076 }
4077
4078 /*
4079  * ext4_truncate()
4080  *
4081  * We block out ext4_get_block() block instantiations across the entire
4082  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4083  * simultaneously on behalf of the same inode.
4084  *
4085  * As we work through the truncate and commit bits of it to the journal there
4086  * is one core, guiding principle: the file's tree must always be consistent on
4087  * disk.  We must be able to restart the truncate after a crash.
4088  *
4089  * The file's tree may be transiently inconsistent in memory (although it
4090  * probably isn't), but whenever we close off and commit a journal transaction,
4091  * the contents of (the filesystem + the journal) must be consistent and
4092  * restartable.  It's pretty simple, really: bottom up, right to left (although
4093  * left-to-right works OK too).
4094  *
4095  * Note that at recovery time, journal replay occurs *before* the restart of
4096  * truncate against the orphan inode list.
4097  *
4098  * The committed inode has the new, desired i_size (which is the same as
4099  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4100  * that this inode's truncate did not complete and it will again call
4101  * ext4_truncate() to have another go.  So there will be instantiated blocks
4102  * to the right of the truncation point in a crashed ext4 filesystem.  But
4103  * that's fine - as long as they are linked from the inode, the post-crash
4104  * ext4_truncate() run will find them and release them.
4105  */
4106 int ext4_truncate(struct inode *inode)
4107 {
4108         struct ext4_inode_info *ei = EXT4_I(inode);
4109         unsigned int credits;
4110         int err = 0, err2;
4111         handle_t *handle;
4112         struct address_space *mapping = inode->i_mapping;
4113
4114         /*
4115          * There is a possibility that we're either freeing the inode
4116          * or it's a completely new inode. In those cases we might not
4117          * have i_rwsem locked because it's not necessary.
4118          */
4119         if (!(inode->i_state & (I_NEW|I_FREEING)))
4120                 WARN_ON(!inode_is_locked(inode));
4121         trace_ext4_truncate_enter(inode);
4122
4123         if (!ext4_can_truncate(inode))
4124                 goto out_trace;
4125
4126         if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4127                 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4128
4129         if (ext4_has_inline_data(inode)) {
4130                 int has_inline = 1;
4131
4132                 err = ext4_inline_data_truncate(inode, &has_inline);
4133                 if (err || has_inline)
4134                         goto out_trace;
4135         }
4136
4137         /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4138         if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4139                 err = ext4_inode_attach_jinode(inode);
4140                 if (err)
4141                         goto out_trace;
4142         }
4143
4144         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4145                 credits = ext4_writepage_trans_blocks(inode);
4146         else
4147                 credits = ext4_blocks_for_truncate(inode);
4148
4149         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4150         if (IS_ERR(handle)) {
4151                 err = PTR_ERR(handle);
4152                 goto out_trace;
4153         }
4154
4155         if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4156                 ext4_block_truncate_page(handle, mapping, inode->i_size);
4157
4158         /*
4159          * We add the inode to the orphan list, so that if this
4160          * truncate spans multiple transactions, and we crash, we will
4161          * resume the truncate when the filesystem recovers.  It also
4162          * marks the inode dirty, to catch the new size.
4163          *
4164          * Implication: the file must always be in a sane, consistent
4165          * truncatable state while each transaction commits.
4166          */
4167         err = ext4_orphan_add(handle, inode);
4168         if (err)
4169                 goto out_stop;
4170
4171         down_write(&EXT4_I(inode)->i_data_sem);
4172
4173         ext4_discard_preallocations(inode, 0);
4174
4175         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4176                 err = ext4_ext_truncate(handle, inode);
4177         else
4178                 ext4_ind_truncate(handle, inode);
4179
4180         up_write(&ei->i_data_sem);
4181         if (err)
4182                 goto out_stop;
4183
4184         if (IS_SYNC(inode))
4185                 ext4_handle_sync(handle);
4186
4187 out_stop:
4188         /*
4189          * If this was a simple ftruncate() and the file will remain alive,
4190          * then we need to clear up the orphan record which we created above.
4191          * However, if this was a real unlink then we were called by
4192          * ext4_evict_inode(), and we allow that function to clean up the
4193          * orphan info for us.
4194          */
4195         if (inode->i_nlink)
4196                 ext4_orphan_del(handle, inode);
4197
4198         inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4199         err2 = ext4_mark_inode_dirty(handle, inode);
4200         if (unlikely(err2 && !err))
4201                 err = err2;
4202         ext4_journal_stop(handle);
4203
4204 out_trace:
4205         trace_ext4_truncate_exit(inode);
4206         return err;
4207 }
4208
4209 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4210 {
4211         if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4212                 return inode_peek_iversion_raw(inode);
4213         else
4214                 return inode_peek_iversion(inode);
4215 }
4216
4217 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4218                                  struct ext4_inode_info *ei)
4219 {
4220         struct inode *inode = &(ei->vfs_inode);
4221         u64 i_blocks = READ_ONCE(inode->i_blocks);
4222         struct super_block *sb = inode->i_sb;
4223
4224         if (i_blocks <= ~0U) {
4225                 /*
4226                  * i_blocks can be represented in a 32 bit variable
4227                  * as multiple of 512 bytes
4228                  */
4229                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4230                 raw_inode->i_blocks_high = 0;
4231                 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4232                 return 0;
4233         }
4234
4235         /*
4236          * This should never happen since sb->s_maxbytes should not have
4237          * allowed this, sb->s_maxbytes was set according to the huge_file
4238          * feature in ext4_fill_super().
4239          */
4240         if (!ext4_has_feature_huge_file(sb))
4241                 return -EFSCORRUPTED;
4242
4243         if (i_blocks <= 0xffffffffffffULL) {
4244                 /*
4245                  * i_blocks can be represented in a 48 bit variable
4246                  * as multiple of 512 bytes
4247                  */
4248                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4249                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4250                 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4251         } else {
4252                 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4253                 /* i_block is stored in file system block size */
4254                 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4255                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4256                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4257         }
4258         return 0;
4259 }
4260
4261 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4262 {
4263         struct ext4_inode_info *ei = EXT4_I(inode);
4264         uid_t i_uid;
4265         gid_t i_gid;
4266         projid_t i_projid;
4267         int block;
4268         int err;
4269
4270         err = ext4_inode_blocks_set(raw_inode, ei);
4271
4272         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4273         i_uid = i_uid_read(inode);
4274         i_gid = i_gid_read(inode);
4275         i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4276         if (!(test_opt(inode->i_sb, NO_UID32))) {
4277                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4278                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4279                 /*
4280                  * Fix up interoperability with old kernels. Otherwise,
4281                  * old inodes get re-used with the upper 16 bits of the
4282                  * uid/gid intact.
4283                  */
4284                 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4285                         raw_inode->i_uid_high = 0;
4286                         raw_inode->i_gid_high = 0;
4287                 } else {
4288                         raw_inode->i_uid_high =
4289                                 cpu_to_le16(high_16_bits(i_uid));
4290                         raw_inode->i_gid_high =
4291                                 cpu_to_le16(high_16_bits(i_gid));
4292                 }
4293         } else {
4294                 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4295                 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4296                 raw_inode->i_uid_high = 0;
4297                 raw_inode->i_gid_high = 0;
4298         }
4299         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4300
4301         EXT4_INODE_SET_CTIME(inode, raw_inode);
4302         EXT4_INODE_SET_MTIME(inode, raw_inode);
4303         EXT4_INODE_SET_ATIME(inode, raw_inode);
4304         EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4305
4306         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4307         raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4308         if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4309                 raw_inode->i_file_acl_high =
4310                         cpu_to_le16(ei->i_file_acl >> 32);
4311         raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4312         ext4_isize_set(raw_inode, ei->i_disksize);
4313
4314         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4315         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4316                 if (old_valid_dev(inode->i_rdev)) {
4317                         raw_inode->i_block[0] =
4318                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
4319                         raw_inode->i_block[1] = 0;
4320                 } else {
4321                         raw_inode->i_block[0] = 0;
4322                         raw_inode->i_block[1] =
4323                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
4324                         raw_inode->i_block[2] = 0;
4325                 }
4326         } else if (!ext4_has_inline_data(inode)) {
4327                 for (block = 0; block < EXT4_N_BLOCKS; block++)
4328                         raw_inode->i_block[block] = ei->i_data[block];
4329         }
4330
4331         if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4332                 u64 ivers = ext4_inode_peek_iversion(inode);
4333
4334                 raw_inode->i_disk_version = cpu_to_le32(ivers);
4335                 if (ei->i_extra_isize) {
4336                         if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4337                                 raw_inode->i_version_hi =
4338                                         cpu_to_le32(ivers >> 32);
4339                         raw_inode->i_extra_isize =
4340                                 cpu_to_le16(ei->i_extra_isize);
4341                 }
4342         }
4343
4344         if (i_projid != EXT4_DEF_PROJID &&
4345             !ext4_has_feature_project(inode->i_sb))
4346                 err = err ?: -EFSCORRUPTED;
4347
4348         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4349             EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4350                 raw_inode->i_projid = cpu_to_le32(i_projid);
4351
4352         ext4_inode_csum_set(inode, raw_inode, ei);
4353         return err;
4354 }
4355
4356 /*
4357  * ext4_get_inode_loc returns with an extra refcount against the inode's
4358  * underlying buffer_head on success. If we pass 'inode' and it does not
4359  * have in-inode xattr, we have all inode data in memory that is needed
4360  * to recreate the on-disk version of this inode.
4361  */
4362 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4363                                 struct inode *inode, struct ext4_iloc *iloc,
4364                                 ext4_fsblk_t *ret_block)
4365 {
4366         struct ext4_group_desc  *gdp;
4367         struct buffer_head      *bh;
4368         ext4_fsblk_t            block;
4369         struct blk_plug         plug;
4370         int                     inodes_per_block, inode_offset;
4371
4372         iloc->bh = NULL;
4373         if (ino < EXT4_ROOT_INO ||
4374             ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4375                 return -EFSCORRUPTED;
4376
4377         iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4378         gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4379         if (!gdp)
4380                 return -EIO;
4381
4382         /*
4383          * Figure out the offset within the block group inode table
4384          */
4385         inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4386         inode_offset = ((ino - 1) %
4387                         EXT4_INODES_PER_GROUP(sb));
4388         iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4389
4390         block = ext4_inode_table(sb, gdp);
4391         if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4392             (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4393                 ext4_error(sb, "Invalid inode table block %llu in "
4394                            "block_group %u", block, iloc->block_group);
4395                 return -EFSCORRUPTED;
4396         }
4397         block += (inode_offset / inodes_per_block);
4398
4399         bh = sb_getblk(sb, block);
4400         if (unlikely(!bh))
4401                 return -ENOMEM;
4402         if (ext4_buffer_uptodate(bh))
4403                 goto has_buffer;
4404
4405         lock_buffer(bh);
4406         if (ext4_buffer_uptodate(bh)) {
4407                 /* Someone brought it uptodate while we waited */
4408                 unlock_buffer(bh);
4409                 goto has_buffer;
4410         }
4411
4412         /*
4413          * If we have all information of the inode in memory and this
4414          * is the only valid inode in the block, we need not read the
4415          * block.
4416          */
4417         if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4418                 struct buffer_head *bitmap_bh;
4419                 int i, start;
4420
4421                 start = inode_offset & ~(inodes_per_block - 1);
4422
4423                 /* Is the inode bitmap in cache? */
4424                 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4425                 if (unlikely(!bitmap_bh))
4426                         goto make_io;
4427
4428                 /*
4429                  * If the inode bitmap isn't in cache then the
4430                  * optimisation may end up performing two reads instead
4431                  * of one, so skip it.
4432                  */
4433                 if (!buffer_uptodate(bitmap_bh)) {
4434                         brelse(bitmap_bh);
4435                         goto make_io;
4436                 }
4437                 for (i = start; i < start + inodes_per_block; i++) {
4438                         if (i == inode_offset)
4439                                 continue;
4440                         if (ext4_test_bit(i, bitmap_bh->b_data))
4441                                 break;
4442                 }
4443                 brelse(bitmap_bh);
4444                 if (i == start + inodes_per_block) {
4445                         struct ext4_inode *raw_inode =
4446                                 (struct ext4_inode *) (bh->b_data + iloc->offset);
4447
4448                         /* all other inodes are free, so skip I/O */
4449                         memset(bh->b_data, 0, bh->b_size);
4450                         if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4451                                 ext4_fill_raw_inode(inode, raw_inode);
4452                         set_buffer_uptodate(bh);
4453                         unlock_buffer(bh);
4454                         goto has_buffer;
4455                 }
4456         }
4457
4458 make_io:
4459         /*
4460          * If we need to do any I/O, try to pre-readahead extra
4461          * blocks from the inode table.
4462          */
4463         blk_start_plug(&plug);
4464         if (EXT4_SB(sb)->s_inode_readahead_blks) {
4465                 ext4_fsblk_t b, end, table;
4466                 unsigned num;
4467                 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4468
4469                 table = ext4_inode_table(sb, gdp);
4470                 /* s_inode_readahead_blks is always a power of 2 */
4471                 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4472                 if (table > b)
4473                         b = table;
4474                 end = b + ra_blks;
4475                 num = EXT4_INODES_PER_GROUP(sb);
4476                 if (ext4_has_group_desc_csum(sb))
4477                         num -= ext4_itable_unused_count(sb, gdp);
4478                 table += num / inodes_per_block;
4479                 if (end > table)
4480                         end = table;
4481                 while (b <= end)
4482                         ext4_sb_breadahead_unmovable(sb, b++);
4483         }
4484
4485         /*
4486          * There are other valid inodes in the buffer, this inode
4487          * has in-inode xattrs, or we don't have this inode in memory.
4488          * Read the block from disk.
4489          */
4490         trace_ext4_load_inode(sb, ino);
4491         ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4492         blk_finish_plug(&plug);
4493         wait_on_buffer(bh);
4494         ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4495         if (!buffer_uptodate(bh)) {
4496                 if (ret_block)
4497                         *ret_block = block;
4498                 brelse(bh);
4499                 return -EIO;
4500         }
4501 has_buffer:
4502         iloc->bh = bh;
4503         return 0;
4504 }
4505
4506 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4507                                         struct ext4_iloc *iloc)
4508 {
4509         ext4_fsblk_t err_blk = 0;
4510         int ret;
4511
4512         ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4513                                         &err_blk);
4514
4515         if (ret == -EIO)
4516                 ext4_error_inode_block(inode, err_blk, EIO,
4517                                         "unable to read itable block");
4518
4519         return ret;
4520 }
4521
4522 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4523 {
4524         ext4_fsblk_t err_blk = 0;
4525         int ret;
4526
4527         ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4528                                         &err_blk);
4529
4530         if (ret == -EIO)
4531                 ext4_error_inode_block(inode, err_blk, EIO,
4532                                         "unable to read itable block");
4533
4534         return ret;
4535 }
4536
4537
4538 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4539                           struct ext4_iloc *iloc)
4540 {
4541         return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
4542 }
4543
4544 static bool ext4_should_enable_dax(struct inode *inode)
4545 {
4546         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4547
4548         if (test_opt2(inode->i_sb, DAX_NEVER))
4549                 return false;
4550         if (!S_ISREG(inode->i_mode))
4551                 return false;
4552         if (ext4_should_journal_data(inode))
4553                 return false;
4554         if (ext4_has_inline_data(inode))
4555                 return false;
4556         if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4557                 return false;
4558         if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4559                 return false;
4560         if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4561                 return false;
4562         if (test_opt(inode->i_sb, DAX_ALWAYS))
4563                 return true;
4564
4565         return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4566 }
4567
4568 void ext4_set_inode_flags(struct inode *inode, bool init)
4569 {
4570         unsigned int flags = EXT4_I(inode)->i_flags;
4571         unsigned int new_fl = 0;
4572
4573         WARN_ON_ONCE(IS_DAX(inode) && init);
4574
4575         if (flags & EXT4_SYNC_FL)
4576                 new_fl |= S_SYNC;
4577         if (flags & EXT4_APPEND_FL)
4578                 new_fl |= S_APPEND;
4579         if (flags & EXT4_IMMUTABLE_FL)
4580                 new_fl |= S_IMMUTABLE;
4581         if (flags & EXT4_NOATIME_FL)
4582                 new_fl |= S_NOATIME;
4583         if (flags & EXT4_DIRSYNC_FL)
4584                 new_fl |= S_DIRSYNC;
4585
4586         /* Because of the way inode_set_flags() works we must preserve S_DAX
4587          * here if already set. */
4588         new_fl |= (inode->i_flags & S_DAX);
4589         if (init && ext4_should_enable_dax(inode))
4590                 new_fl |= S_DAX;
4591
4592         if (flags & EXT4_ENCRYPT_FL)
4593                 new_fl |= S_ENCRYPTED;
4594         if (flags & EXT4_CASEFOLD_FL)
4595                 new_fl |= S_CASEFOLD;
4596         if (flags & EXT4_VERITY_FL)
4597                 new_fl |= S_VERITY;
4598         inode_set_flags(inode, new_fl,
4599                         S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4600                         S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4601 }
4602
4603 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4604                                   struct ext4_inode_info *ei)
4605 {
4606         blkcnt_t i_blocks ;
4607         struct inode *inode = &(ei->vfs_inode);
4608         struct super_block *sb = inode->i_sb;
4609
4610         if (ext4_has_feature_huge_file(sb)) {
4611                 /* we are using combined 48 bit field */
4612                 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4613                                         le32_to_cpu(raw_inode->i_blocks_lo);
4614                 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4615                         /* i_blocks represent file system block size */
4616                         return i_blocks  << (inode->i_blkbits - 9);
4617                 } else {
4618                         return i_blocks;
4619                 }
4620         } else {
4621                 return le32_to_cpu(raw_inode->i_blocks_lo);
4622         }
4623 }
4624
4625 static inline int ext4_iget_extra_inode(struct inode *inode,
4626                                          struct ext4_inode *raw_inode,
4627                                          struct ext4_inode_info *ei)
4628 {
4629         __le32 *magic = (void *)raw_inode +
4630                         EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4631
4632         if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
4633             *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4634                 int err;
4635
4636                 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4637                 err = ext4_find_inline_data_nolock(inode);
4638                 if (!err && ext4_has_inline_data(inode))
4639                         ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4640                 return err;
4641         } else
4642                 EXT4_I(inode)->i_inline_off = 0;
4643         return 0;
4644 }
4645
4646 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4647 {
4648         if (!ext4_has_feature_project(inode->i_sb))
4649                 return -EOPNOTSUPP;
4650         *projid = EXT4_I(inode)->i_projid;
4651         return 0;
4652 }
4653
4654 /*
4655  * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4656  * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4657  * set.
4658  */
4659 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4660 {
4661         if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4662                 inode_set_iversion_raw(inode, val);
4663         else
4664                 inode_set_iversion_queried(inode, val);
4665 }
4666
4667 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4668
4669 {
4670         if (flags & EXT4_IGET_EA_INODE) {
4671                 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4672                         return "missing EA_INODE flag";
4673                 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4674                     EXT4_I(inode)->i_file_acl)
4675                         return "ea_inode with extended attributes";
4676         } else {
4677                 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4678                         return "unexpected EA_INODE flag";
4679         }
4680         if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4681                 return "unexpected bad inode w/o EXT4_IGET_BAD";
4682         return NULL;
4683 }
4684
4685 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4686                           ext4_iget_flags flags, const char *function,
4687                           unsigned int line)
4688 {
4689         struct ext4_iloc iloc;
4690         struct ext4_inode *raw_inode;
4691         struct ext4_inode_info *ei;
4692         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4693         struct inode *inode;
4694         const char *err_str;
4695         journal_t *journal = EXT4_SB(sb)->s_journal;
4696         long ret;
4697         loff_t size;
4698         int block;
4699         uid_t i_uid;
4700         gid_t i_gid;
4701         projid_t i_projid;
4702
4703         if ((!(flags & EXT4_IGET_SPECIAL) &&
4704              ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4705               ino == le32_to_cpu(es->s_usr_quota_inum) ||
4706               ino == le32_to_cpu(es->s_grp_quota_inum) ||
4707               ino == le32_to_cpu(es->s_prj_quota_inum) ||
4708               ino == le32_to_cpu(es->s_orphan_file_inum))) ||
4709             (ino < EXT4_ROOT_INO) ||
4710             (ino > le32_to_cpu(es->s_inodes_count))) {
4711                 if (flags & EXT4_IGET_HANDLE)
4712                         return ERR_PTR(-ESTALE);
4713                 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4714                              "inode #%lu: comm %s: iget: illegal inode #",
4715                              ino, current->comm);
4716                 return ERR_PTR(-EFSCORRUPTED);
4717         }
4718
4719         inode = iget_locked(sb, ino);
4720         if (!inode)
4721                 return ERR_PTR(-ENOMEM);
4722         if (!(inode->i_state & I_NEW)) {
4723                 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4724                         ext4_error_inode(inode, function, line, 0, err_str);
4725                         iput(inode);
4726                         return ERR_PTR(-EFSCORRUPTED);
4727                 }
4728                 return inode;
4729         }
4730
4731         ei = EXT4_I(inode);
4732         iloc.bh = NULL;
4733
4734         ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4735         if (ret < 0)
4736                 goto bad_inode;
4737         raw_inode = ext4_raw_inode(&iloc);
4738
4739         if ((flags & EXT4_IGET_HANDLE) &&
4740             (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4741                 ret = -ESTALE;
4742                 goto bad_inode;
4743         }
4744
4745         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4746                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4747                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4748                         EXT4_INODE_SIZE(inode->i_sb) ||
4749                     (ei->i_extra_isize & 3)) {
4750                         ext4_error_inode(inode, function, line, 0,
4751                                          "iget: bad extra_isize %u "
4752                                          "(inode size %u)",
4753                                          ei->i_extra_isize,
4754                                          EXT4_INODE_SIZE(inode->i_sb));
4755                         ret = -EFSCORRUPTED;
4756                         goto bad_inode;
4757                 }
4758         } else
4759                 ei->i_extra_isize = 0;
4760
4761         /* Precompute checksum seed for inode metadata */
4762         if (ext4_has_metadata_csum(sb)) {
4763                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4764                 __u32 csum;
4765                 __le32 inum = cpu_to_le32(inode->i_ino);
4766                 __le32 gen = raw_inode->i_generation;
4767                 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4768                                    sizeof(inum));
4769                 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4770                                               sizeof(gen));
4771         }
4772
4773         if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4774             ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4775              (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4776                 ext4_error_inode_err(inode, function, line, 0,
4777                                 EFSBADCRC, "iget: checksum invalid");
4778                 ret = -EFSBADCRC;
4779                 goto bad_inode;
4780         }
4781
4782         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4783         i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4784         i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4785         if (ext4_has_feature_project(sb) &&
4786             EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4787             EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4788                 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4789         else
4790                 i_projid = EXT4_DEF_PROJID;
4791
4792         if (!(test_opt(inode->i_sb, NO_UID32))) {
4793                 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4794                 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4795         }
4796         i_uid_write(inode, i_uid);
4797         i_gid_write(inode, i_gid);
4798         ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4799         set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4800
4801         ext4_clear_state_flags(ei);     /* Only relevant on 32-bit archs */
4802         ei->i_inline_off = 0;
4803         ei->i_dir_start_lookup = 0;
4804         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4805         /* We now have enough fields to check if the inode was active or not.
4806          * This is needed because nfsd might try to access dead inodes
4807          * the test is that same one that e2fsck uses
4808          * NeilBrown 1999oct15
4809          */
4810         if (inode->i_nlink == 0) {
4811                 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4812                      !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4813                     ino != EXT4_BOOT_LOADER_INO) {
4814                         /* this inode is deleted or unallocated */
4815                         if (flags & EXT4_IGET_SPECIAL) {
4816                                 ext4_error_inode(inode, function, line, 0,
4817                                                  "iget: special inode unallocated");
4818                                 ret = -EFSCORRUPTED;
4819                         } else
4820                                 ret = -ESTALE;
4821                         goto bad_inode;
4822                 }
4823                 /* The only unlinked inodes we let through here have
4824                  * valid i_mode and are being read by the orphan
4825                  * recovery code: that's fine, we're about to complete
4826                  * the process of deleting those.
4827                  * OR it is the EXT4_BOOT_LOADER_INO which is
4828                  * not initialized on a new filesystem. */
4829         }
4830         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4831         ext4_set_inode_flags(inode, true);
4832         inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4833         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4834         if (ext4_has_feature_64bit(sb))
4835                 ei->i_file_acl |=
4836                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4837         inode->i_size = ext4_isize(sb, raw_inode);
4838         if ((size = i_size_read(inode)) < 0) {
4839                 ext4_error_inode(inode, function, line, 0,
4840                                  "iget: bad i_size value: %lld", size);
4841                 ret = -EFSCORRUPTED;
4842                 goto bad_inode;
4843         }
4844         /*
4845          * If dir_index is not enabled but there's dir with INDEX flag set,
4846          * we'd normally treat htree data as empty space. But with metadata
4847          * checksumming that corrupts checksums so forbid that.
4848          */
4849         if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4850             ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4851                 ext4_error_inode(inode, function, line, 0,
4852                          "iget: Dir with htree data on filesystem without dir_index feature.");
4853                 ret = -EFSCORRUPTED;
4854                 goto bad_inode;
4855         }
4856         ei->i_disksize = inode->i_size;
4857 #ifdef CONFIG_QUOTA
4858         ei->i_reserved_quota = 0;
4859 #endif
4860         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4861         ei->i_block_group = iloc.block_group;
4862         ei->i_last_alloc_group = ~0;
4863         /*
4864          * NOTE! The in-memory inode i_data array is in little-endian order
4865          * even on big-endian machines: we do NOT byteswap the block numbers!
4866          */
4867         for (block = 0; block < EXT4_N_BLOCKS; block++)
4868                 ei->i_data[block] = raw_inode->i_block[block];
4869         INIT_LIST_HEAD(&ei->i_orphan);
4870         ext4_fc_init_inode(&ei->vfs_inode);
4871
4872         /*
4873          * Set transaction id's of transactions that have to be committed
4874          * to finish f[data]sync. We set them to currently running transaction
4875          * as we cannot be sure that the inode or some of its metadata isn't
4876          * part of the transaction - the inode could have been reclaimed and
4877          * now it is reread from disk.
4878          */
4879         if (journal) {
4880                 transaction_t *transaction;
4881                 tid_t tid;
4882
4883                 read_lock(&journal->j_state_lock);
4884                 if (journal->j_running_transaction)
4885                         transaction = journal->j_running_transaction;
4886                 else
4887                         transaction = journal->j_committing_transaction;
4888                 if (transaction)
4889                         tid = transaction->t_tid;
4890                 else
4891                         tid = journal->j_commit_sequence;
4892                 read_unlock(&journal->j_state_lock);
4893                 ei->i_sync_tid = tid;
4894                 ei->i_datasync_tid = tid;
4895         }
4896
4897         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4898                 if (ei->i_extra_isize == 0) {
4899                         /* The extra space is currently unused. Use it. */
4900                         BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4901                         ei->i_extra_isize = sizeof(struct ext4_inode) -
4902                                             EXT4_GOOD_OLD_INODE_SIZE;
4903                 } else {
4904                         ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4905                         if (ret)
4906                                 goto bad_inode;
4907                 }
4908         }
4909
4910         EXT4_INODE_GET_CTIME(inode, raw_inode);
4911         EXT4_INODE_GET_ATIME(inode, raw_inode);
4912         EXT4_INODE_GET_MTIME(inode, raw_inode);
4913         EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4914
4915         if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4916                 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4917
4918                 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4919                         if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4920                                 ivers |=
4921                     (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4922                 }
4923                 ext4_inode_set_iversion_queried(inode, ivers);
4924         }
4925
4926         ret = 0;
4927         if (ei->i_file_acl &&
4928             !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4929                 ext4_error_inode(inode, function, line, 0,
4930                                  "iget: bad extended attribute block %llu",
4931                                  ei->i_file_acl);
4932                 ret = -EFSCORRUPTED;
4933                 goto bad_inode;
4934         } else if (!ext4_has_inline_data(inode)) {
4935                 /* validate the block references in the inode */
4936                 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4937                         (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4938                         (S_ISLNK(inode->i_mode) &&
4939                         !ext4_inode_is_fast_symlink(inode)))) {
4940                         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4941                                 ret = ext4_ext_check_inode(inode);
4942                         else
4943                                 ret = ext4_ind_check_inode(inode);
4944                 }
4945         }
4946         if (ret)
4947                 goto bad_inode;
4948
4949         if (S_ISREG(inode->i_mode)) {
4950                 inode->i_op = &ext4_file_inode_operations;
4951                 inode->i_fop = &ext4_file_operations;
4952                 ext4_set_aops(inode);
4953         } else if (S_ISDIR(inode->i_mode)) {
4954                 inode->i_op = &ext4_dir_inode_operations;
4955                 inode->i_fop = &ext4_dir_operations;
4956         } else if (S_ISLNK(inode->i_mode)) {
4957                 /* VFS does not allow setting these so must be corruption */
4958                 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4959                         ext4_error_inode(inode, function, line, 0,
4960                                          "iget: immutable or append flags "
4961                                          "not allowed on symlinks");
4962                         ret = -EFSCORRUPTED;
4963                         goto bad_inode;
4964                 }
4965                 if (IS_ENCRYPTED(inode)) {
4966                         inode->i_op = &ext4_encrypted_symlink_inode_operations;
4967                 } else if (ext4_inode_is_fast_symlink(inode)) {
4968                         inode->i_link = (char *)ei->i_data;
4969                         inode->i_op = &ext4_fast_symlink_inode_operations;
4970                         nd_terminate_link(ei->i_data, inode->i_size,
4971                                 sizeof(ei->i_data) - 1);
4972                 } else {
4973                         inode->i_op = &ext4_symlink_inode_operations;
4974                 }
4975         } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4976               S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4977                 inode->i_op = &ext4_special_inode_operations;
4978                 if (raw_inode->i_block[0])
4979                         init_special_inode(inode, inode->i_mode,
4980                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4981                 else
4982                         init_special_inode(inode, inode->i_mode,
4983                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4984         } else if (ino == EXT4_BOOT_LOADER_INO) {
4985                 make_bad_inode(inode);
4986         } else {
4987                 ret = -EFSCORRUPTED;
4988                 ext4_error_inode(inode, function, line, 0,
4989                                  "iget: bogus i_mode (%o)", inode->i_mode);
4990                 goto bad_inode;
4991         }
4992         if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
4993                 ext4_error_inode(inode, function, line, 0,
4994                                  "casefold flag without casefold feature");
4995                 ret = -EFSCORRUPTED;
4996                 goto bad_inode;
4997         }
4998         if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4999                 ext4_error_inode(inode, function, line, 0, err_str);
5000                 ret = -EFSCORRUPTED;
5001                 goto bad_inode;
5002         }
5003
5004         brelse(iloc.bh);
5005         unlock_new_inode(inode);
5006         return inode;
5007
5008 bad_inode:
5009         brelse(iloc.bh);
5010         iget_failed(inode);
5011         return ERR_PTR(ret);
5012 }
5013
5014 static void __ext4_update_other_inode_time(struct super_block *sb,
5015                                            unsigned long orig_ino,
5016                                            unsigned long ino,
5017                                            struct ext4_inode *raw_inode)
5018 {
5019         struct inode *inode;
5020
5021         inode = find_inode_by_ino_rcu(sb, ino);
5022         if (!inode)
5023                 return;
5024
5025         if (!inode_is_dirtytime_only(inode))
5026                 return;
5027
5028         spin_lock(&inode->i_lock);
5029         if (inode_is_dirtytime_only(inode)) {
5030                 struct ext4_inode_info  *ei = EXT4_I(inode);
5031
5032                 inode->i_state &= ~I_DIRTY_TIME;
5033                 spin_unlock(&inode->i_lock);
5034
5035                 spin_lock(&ei->i_raw_lock);
5036                 EXT4_INODE_SET_CTIME(inode, raw_inode);
5037                 EXT4_INODE_SET_MTIME(inode, raw_inode);
5038                 EXT4_INODE_SET_ATIME(inode, raw_inode);
5039                 ext4_inode_csum_set(inode, raw_inode, ei);
5040                 spin_unlock(&ei->i_raw_lock);
5041                 trace_ext4_other_inode_update_time(inode, orig_ino);
5042                 return;
5043         }
5044         spin_unlock(&inode->i_lock);
5045 }
5046
5047 /*
5048  * Opportunistically update the other time fields for other inodes in
5049  * the same inode table block.
5050  */
5051 static void ext4_update_other_inodes_time(struct super_block *sb,
5052                                           unsigned long orig_ino, char *buf)
5053 {
5054         unsigned long ino;
5055         int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5056         int inode_size = EXT4_INODE_SIZE(sb);
5057
5058         /*
5059          * Calculate the first inode in the inode table block.  Inode
5060          * numbers are one-based.  That is, the first inode in a block
5061          * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5062          */
5063         ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5064         rcu_read_lock();
5065         for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5066                 if (ino == orig_ino)
5067                         continue;
5068                 __ext4_update_other_inode_time(sb, orig_ino, ino,
5069                                                (struct ext4_inode *)buf);
5070         }
5071         rcu_read_unlock();
5072 }
5073
5074 /*
5075  * Post the struct inode info into an on-disk inode location in the
5076  * buffer-cache.  This gobbles the caller's reference to the
5077  * buffer_head in the inode location struct.
5078  *
5079  * The caller must have write access to iloc->bh.
5080  */
5081 static int ext4_do_update_inode(handle_t *handle,
5082                                 struct inode *inode,
5083                                 struct ext4_iloc *iloc)
5084 {
5085         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5086         struct ext4_inode_info *ei = EXT4_I(inode);
5087         struct buffer_head *bh = iloc->bh;
5088         struct super_block *sb = inode->i_sb;
5089         int err;
5090         int need_datasync = 0, set_large_file = 0;
5091
5092         spin_lock(&ei->i_raw_lock);
5093
5094         /*
5095          * For fields not tracked in the in-memory inode, initialise them
5096          * to zero for new inodes.
5097          */
5098         if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5099                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5100
5101         if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5102                 need_datasync = 1;
5103         if (ei->i_disksize > 0x7fffffffULL) {
5104                 if (!ext4_has_feature_large_file(sb) ||
5105                     EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5106                         set_large_file = 1;
5107         }
5108
5109         err = ext4_fill_raw_inode(inode, raw_inode);
5110         spin_unlock(&ei->i_raw_lock);
5111         if (err) {
5112                 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5113                 goto out_brelse;
5114         }
5115
5116         if (inode->i_sb->s_flags & SB_LAZYTIME)
5117                 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5118                                               bh->b_data);
5119
5120         BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5121         err = ext4_handle_dirty_metadata(handle, NULL, bh);
5122         if (err)
5123                 goto out_error;
5124         ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5125         if (set_large_file) {
5126                 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5127                 err = ext4_journal_get_write_access(handle, sb,
5128                                                     EXT4_SB(sb)->s_sbh,
5129                                                     EXT4_JTR_NONE);
5130                 if (err)
5131                         goto out_error;
5132                 lock_buffer(EXT4_SB(sb)->s_sbh);
5133                 ext4_set_feature_large_file(sb);
5134                 ext4_superblock_csum_set(sb);
5135                 unlock_buffer(EXT4_SB(sb)->s_sbh);
5136                 ext4_handle_sync(handle);
5137                 err = ext4_handle_dirty_metadata(handle, NULL,
5138                                                  EXT4_SB(sb)->s_sbh);
5139         }
5140         ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5141 out_error:
5142         ext4_std_error(inode->i_sb, err);
5143 out_brelse:
5144         brelse(bh);
5145         return err;
5146 }
5147
5148 /*
5149  * ext4_write_inode()
5150  *
5151  * We are called from a few places:
5152  *
5153  * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5154  *   Here, there will be no transaction running. We wait for any running
5155  *   transaction to commit.
5156  *
5157  * - Within flush work (sys_sync(), kupdate and such).
5158  *   We wait on commit, if told to.
5159  *
5160  * - Within iput_final() -> write_inode_now()
5161  *   We wait on commit, if told to.
5162  *
5163  * In all cases it is actually safe for us to return without doing anything,
5164  * because the inode has been copied into a raw inode buffer in
5165  * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5166  * writeback.
5167  *
5168  * Note that we are absolutely dependent upon all inode dirtiers doing the
5169  * right thing: they *must* call mark_inode_dirty() after dirtying info in
5170  * which we are interested.
5171  *
5172  * It would be a bug for them to not do this.  The code:
5173  *
5174  *      mark_inode_dirty(inode)
5175  *      stuff();
5176  *      inode->i_size = expr;
5177  *
5178  * is in error because write_inode() could occur while `stuff()' is running,
5179  * and the new i_size will be lost.  Plus the inode will no longer be on the
5180  * superblock's dirty inode list.
5181  */
5182 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5183 {
5184         int err;
5185
5186         if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5187                 return 0;
5188
5189         if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5190                 return -EIO;
5191
5192         if (EXT4_SB(inode->i_sb)->s_journal) {
5193                 if (ext4_journal_current_handle()) {
5194                         ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5195                         dump_stack();
5196                         return -EIO;
5197                 }
5198
5199                 /*
5200                  * No need to force transaction in WB_SYNC_NONE mode. Also
5201                  * ext4_sync_fs() will force the commit after everything is
5202                  * written.
5203                  */
5204                 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5205                         return 0;
5206
5207                 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5208                                                 EXT4_I(inode)->i_sync_tid);
5209         } else {
5210                 struct ext4_iloc iloc;
5211
5212                 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5213                 if (err)
5214                         return err;
5215                 /*
5216                  * sync(2) will flush the whole buffer cache. No need to do
5217                  * it here separately for each inode.
5218                  */
5219                 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5220                         sync_dirty_buffer(iloc.bh);
5221                 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5222                         ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5223                                                "IO error syncing inode");
5224                         err = -EIO;
5225                 }
5226                 brelse(iloc.bh);
5227         }
5228         return err;
5229 }
5230
5231 /*
5232  * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5233  * buffers that are attached to a folio straddling i_size and are undergoing
5234  * commit. In that case we have to wait for commit to finish and try again.
5235  */
5236 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5237 {
5238         unsigned offset;
5239         journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5240         tid_t commit_tid = 0;
5241         int ret;
5242
5243         offset = inode->i_size & (PAGE_SIZE - 1);
5244         /*
5245          * If the folio is fully truncated, we don't need to wait for any commit
5246          * (and we even should not as __ext4_journalled_invalidate_folio() may
5247          * strip all buffers from the folio but keep the folio dirty which can then
5248          * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5249          * buffers). Also we don't need to wait for any commit if all buffers in
5250          * the folio remain valid. This is most beneficial for the common case of
5251          * blocksize == PAGESIZE.
5252          */
5253         if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5254                 return;
5255         while (1) {
5256                 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5257                                       inode->i_size >> PAGE_SHIFT);
5258                 if (IS_ERR(folio))
5259                         return;
5260                 ret = __ext4_journalled_invalidate_folio(folio, offset,
5261                                                 folio_size(folio) - offset);
5262                 folio_unlock(folio);
5263                 folio_put(folio);
5264                 if (ret != -EBUSY)
5265                         return;
5266                 commit_tid = 0;
5267                 read_lock(&journal->j_state_lock);
5268                 if (journal->j_committing_transaction)
5269                         commit_tid = journal->j_committing_transaction->t_tid;
5270                 read_unlock(&journal->j_state_lock);
5271                 if (commit_tid)
5272                         jbd2_log_wait_commit(journal, commit_tid);
5273         }
5274 }
5275
5276 /*
5277  * ext4_setattr()
5278  *
5279  * Called from notify_change.
5280  *
5281  * We want to trap VFS attempts to truncate the file as soon as
5282  * possible.  In particular, we want to make sure that when the VFS
5283  * shrinks i_size, we put the inode on the orphan list and modify
5284  * i_disksize immediately, so that during the subsequent flushing of
5285  * dirty pages and freeing of disk blocks, we can guarantee that any
5286  * commit will leave the blocks being flushed in an unused state on
5287  * disk.  (On recovery, the inode will get truncated and the blocks will
5288  * be freed, so we have a strong guarantee that no future commit will
5289  * leave these blocks visible to the user.)
5290  *
5291  * Another thing we have to assure is that if we are in ordered mode
5292  * and inode is still attached to the committing transaction, we must
5293  * we start writeout of all the dirty pages which are being truncated.
5294  * This way we are sure that all the data written in the previous
5295  * transaction are already on disk (truncate waits for pages under
5296  * writeback).
5297  *
5298  * Called with inode->i_rwsem down.
5299  */
5300 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5301                  struct iattr *attr)
5302 {
5303         struct inode *inode = d_inode(dentry);
5304         int error, rc = 0;
5305         int orphan = 0;
5306         const unsigned int ia_valid = attr->ia_valid;
5307         bool inc_ivers = true;
5308
5309         if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5310                 return -EIO;
5311
5312         if (unlikely(IS_IMMUTABLE(inode)))
5313                 return -EPERM;
5314
5315         if (unlikely(IS_APPEND(inode) &&
5316                      (ia_valid & (ATTR_MODE | ATTR_UID |
5317                                   ATTR_GID | ATTR_TIMES_SET))))
5318                 return -EPERM;
5319
5320         error = setattr_prepare(idmap, dentry, attr);
5321         if (error)
5322                 return error;
5323
5324         error = fscrypt_prepare_setattr(dentry, attr);
5325         if (error)
5326                 return error;
5327
5328         error = fsverity_prepare_setattr(dentry, attr);
5329         if (error)
5330                 return error;
5331
5332         if (is_quota_modification(idmap, inode, attr)) {
5333                 error = dquot_initialize(inode);
5334                 if (error)
5335                         return error;
5336         }
5337
5338         if (i_uid_needs_update(idmap, attr, inode) ||
5339             i_gid_needs_update(idmap, attr, inode)) {
5340                 handle_t *handle;
5341
5342                 /* (user+group)*(old+new) structure, inode write (sb,
5343                  * inode block, ? - but truncate inode update has it) */
5344                 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5345                         (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5346                          EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5347                 if (IS_ERR(handle)) {
5348                         error = PTR_ERR(handle);
5349                         goto err_out;
5350                 }
5351
5352                 /* dquot_transfer() calls back ext4_get_inode_usage() which
5353                  * counts xattr inode references.
5354                  */
5355                 down_read(&EXT4_I(inode)->xattr_sem);
5356                 error = dquot_transfer(idmap, inode, attr);
5357                 up_read(&EXT4_I(inode)->xattr_sem);
5358
5359                 if (error) {
5360                         ext4_journal_stop(handle);
5361                         return error;
5362                 }
5363                 /* Update corresponding info in inode so that everything is in
5364                  * one transaction */
5365                 i_uid_update(idmap, attr, inode);
5366                 i_gid_update(idmap, attr, inode);
5367                 error = ext4_mark_inode_dirty(handle, inode);
5368                 ext4_journal_stop(handle);
5369                 if (unlikely(error)) {
5370                         return error;
5371                 }
5372         }
5373
5374         if (attr->ia_valid & ATTR_SIZE) {
5375                 handle_t *handle;
5376                 loff_t oldsize = inode->i_size;
5377                 loff_t old_disksize;
5378                 int shrink = (attr->ia_size < inode->i_size);
5379
5380                 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5381                         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5382
5383                         if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5384                                 return -EFBIG;
5385                         }
5386                 }
5387                 if (!S_ISREG(inode->i_mode)) {
5388                         return -EINVAL;
5389                 }
5390
5391                 if (attr->ia_size == inode->i_size)
5392                         inc_ivers = false;
5393
5394                 if (shrink) {
5395                         if (ext4_should_order_data(inode)) {
5396                                 error = ext4_begin_ordered_truncate(inode,
5397                                                             attr->ia_size);
5398                                 if (error)
5399                                         goto err_out;
5400                         }
5401                         /*
5402                          * Blocks are going to be removed from the inode. Wait
5403                          * for dio in flight.
5404                          */
5405                         inode_dio_wait(inode);
5406                 }
5407
5408                 filemap_invalidate_lock(inode->i_mapping);
5409
5410                 rc = ext4_break_layouts(inode);
5411                 if (rc) {
5412                         filemap_invalidate_unlock(inode->i_mapping);
5413                         goto err_out;
5414                 }
5415
5416                 if (attr->ia_size != inode->i_size) {
5417                         handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5418                         if (IS_ERR(handle)) {
5419                                 error = PTR_ERR(handle);
5420                                 goto out_mmap_sem;
5421                         }
5422                         if (ext4_handle_valid(handle) && shrink) {
5423                                 error = ext4_orphan_add(handle, inode);
5424                                 orphan = 1;
5425                         }
5426                         /*
5427                          * Update c/mtime on truncate up, ext4_truncate() will
5428                          * update c/mtime in shrink case below
5429                          */
5430                         if (!shrink)
5431                                 inode_set_mtime_to_ts(inode,
5432                                                       inode_set_ctime_current(inode));
5433
5434                         if (shrink)
5435                                 ext4_fc_track_range(handle, inode,
5436                                         (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5437                                         inode->i_sb->s_blocksize_bits,
5438                                         EXT_MAX_BLOCKS - 1);
5439                         else
5440                                 ext4_fc_track_range(
5441                                         handle, inode,
5442                                         (oldsize > 0 ? oldsize - 1 : oldsize) >>
5443                                         inode->i_sb->s_blocksize_bits,
5444                                         (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5445                                         inode->i_sb->s_blocksize_bits);
5446
5447                         down_write(&EXT4_I(inode)->i_data_sem);
5448                         old_disksize = EXT4_I(inode)->i_disksize;
5449                         EXT4_I(inode)->i_disksize = attr->ia_size;
5450                         rc = ext4_mark_inode_dirty(handle, inode);
5451                         if (!error)
5452                                 error = rc;
5453                         /*
5454                          * We have to update i_size under i_data_sem together
5455                          * with i_disksize to avoid races with writeback code
5456                          * running ext4_wb_update_i_disksize().
5457                          */
5458                         if (!error)
5459                                 i_size_write(inode, attr->ia_size);
5460                         else
5461                                 EXT4_I(inode)->i_disksize = old_disksize;
5462                         up_write(&EXT4_I(inode)->i_data_sem);
5463                         ext4_journal_stop(handle);
5464                         if (error)
5465                                 goto out_mmap_sem;
5466                         if (!shrink) {
5467                                 pagecache_isize_extended(inode, oldsize,
5468                                                          inode->i_size);
5469                         } else if (ext4_should_journal_data(inode)) {
5470                                 ext4_wait_for_tail_page_commit(inode);
5471                         }
5472                 }
5473
5474                 /*
5475                  * Truncate pagecache after we've waited for commit
5476                  * in data=journal mode to make pages freeable.
5477                  */
5478                 truncate_pagecache(inode, inode->i_size);
5479                 /*
5480                  * Call ext4_truncate() even if i_size didn't change to
5481                  * truncate possible preallocated blocks.
5482                  */
5483                 if (attr->ia_size <= oldsize) {
5484                         rc = ext4_truncate(inode);
5485                         if (rc)
5486                                 error = rc;
5487                 }
5488 out_mmap_sem:
5489                 filemap_invalidate_unlock(inode->i_mapping);
5490         }
5491
5492         if (!error) {
5493                 if (inc_ivers)
5494                         inode_inc_iversion(inode);
5495                 setattr_copy(idmap, inode, attr);
5496                 mark_inode_dirty(inode);
5497         }
5498
5499         /*
5500          * If the call to ext4_truncate failed to get a transaction handle at
5501          * all, we need to clean up the in-core orphan list manually.
5502          */
5503         if (orphan && inode->i_nlink)
5504                 ext4_orphan_del(NULL, inode);
5505
5506         if (!error && (ia_valid & ATTR_MODE))
5507                 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5508
5509 err_out:
5510         if  (error)
5511                 ext4_std_error(inode->i_sb, error);
5512         if (!error)
5513                 error = rc;
5514         return error;
5515 }
5516
5517 u32 ext4_dio_alignment(struct inode *inode)
5518 {
5519         if (fsverity_active(inode))
5520                 return 0;
5521         if (ext4_should_journal_data(inode))
5522                 return 0;
5523         if (ext4_has_inline_data(inode))
5524                 return 0;
5525         if (IS_ENCRYPTED(inode)) {
5526                 if (!fscrypt_dio_supported(inode))
5527                         return 0;
5528                 return i_blocksize(inode);
5529         }
5530         return 1; /* use the iomap defaults */
5531 }
5532
5533 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
5534                  struct kstat *stat, u32 request_mask, unsigned int query_flags)
5535 {
5536         struct inode *inode = d_inode(path->dentry);
5537         struct ext4_inode *raw_inode;
5538         struct ext4_inode_info *ei = EXT4_I(inode);
5539         unsigned int flags;
5540
5541         if ((request_mask & STATX_BTIME) &&
5542             EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5543                 stat->result_mask |= STATX_BTIME;
5544                 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5545                 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5546         }
5547
5548         /*
5549          * Return the DIO alignment restrictions if requested.  We only return
5550          * this information when requested, since on encrypted files it might
5551          * take a fair bit of work to get if the file wasn't opened recently.
5552          */
5553         if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5554                 u32 dio_align = ext4_dio_alignment(inode);
5555
5556                 stat->result_mask |= STATX_DIOALIGN;
5557                 if (dio_align == 1) {
5558                         struct block_device *bdev = inode->i_sb->s_bdev;
5559
5560                         /* iomap defaults */
5561                         stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5562                         stat->dio_offset_align = bdev_logical_block_size(bdev);
5563                 } else {
5564                         stat->dio_mem_align = dio_align;
5565                         stat->dio_offset_align = dio_align;
5566                 }
5567         }
5568
5569         flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5570         if (flags & EXT4_APPEND_FL)
5571                 stat->attributes |= STATX_ATTR_APPEND;
5572         if (flags & EXT4_COMPR_FL)
5573                 stat->attributes |= STATX_ATTR_COMPRESSED;
5574         if (flags & EXT4_ENCRYPT_FL)
5575                 stat->attributes |= STATX_ATTR_ENCRYPTED;
5576         if (flags & EXT4_IMMUTABLE_FL)
5577                 stat->attributes |= STATX_ATTR_IMMUTABLE;
5578         if (flags & EXT4_NODUMP_FL)
5579                 stat->attributes |= STATX_ATTR_NODUMP;
5580         if (flags & EXT4_VERITY_FL)
5581                 stat->attributes |= STATX_ATTR_VERITY;
5582
5583         stat->attributes_mask |= (STATX_ATTR_APPEND |
5584                                   STATX_ATTR_COMPRESSED |
5585                                   STATX_ATTR_ENCRYPTED |
5586                                   STATX_ATTR_IMMUTABLE |
5587                                   STATX_ATTR_NODUMP |
5588                                   STATX_ATTR_VERITY);
5589
5590         generic_fillattr(idmap, request_mask, inode, stat);
5591         return 0;
5592 }
5593
5594 int ext4_file_getattr(struct mnt_idmap *idmap,
5595                       const struct path *path, struct kstat *stat,
5596                       u32 request_mask, unsigned int query_flags)
5597 {
5598         struct inode *inode = d_inode(path->dentry);
5599         u64 delalloc_blocks;
5600
5601         ext4_getattr(idmap, path, stat, request_mask, query_flags);
5602
5603         /*
5604          * If there is inline data in the inode, the inode will normally not
5605          * have data blocks allocated (it may have an external xattr block).
5606          * Report at least one sector for such files, so tools like tar, rsync,
5607          * others don't incorrectly think the file is completely sparse.
5608          */
5609         if (unlikely(ext4_has_inline_data(inode)))
5610                 stat->blocks += (stat->size + 511) >> 9;
5611
5612         /*
5613          * We can't update i_blocks if the block allocation is delayed
5614          * otherwise in the case of system crash before the real block
5615          * allocation is done, we will have i_blocks inconsistent with
5616          * on-disk file blocks.
5617          * We always keep i_blocks updated together with real
5618          * allocation. But to not confuse with user, stat
5619          * will return the blocks that include the delayed allocation
5620          * blocks for this file.
5621          */
5622         delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5623                                    EXT4_I(inode)->i_reserved_data_blocks);
5624         stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5625         return 0;
5626 }
5627
5628 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5629                                    int pextents)
5630 {
5631         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5632                 return ext4_ind_trans_blocks(inode, lblocks);
5633         return ext4_ext_index_trans_blocks(inode, pextents);
5634 }
5635
5636 /*
5637  * Account for index blocks, block groups bitmaps and block group
5638  * descriptor blocks if modify datablocks and index blocks
5639  * worse case, the indexs blocks spread over different block groups
5640  *
5641  * If datablocks are discontiguous, they are possible to spread over
5642  * different block groups too. If they are contiguous, with flexbg,
5643  * they could still across block group boundary.
5644  *
5645  * Also account for superblock, inode, quota and xattr blocks
5646  */
5647 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5648                                   int pextents)
5649 {
5650         ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5651         int gdpblocks;
5652         int idxblocks;
5653         int ret;
5654
5655         /*
5656          * How many index blocks need to touch to map @lblocks logical blocks
5657          * to @pextents physical extents?
5658          */
5659         idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5660
5661         ret = idxblocks;
5662
5663         /*
5664          * Now let's see how many group bitmaps and group descriptors need
5665          * to account
5666          */
5667         groups = idxblocks + pextents;
5668         gdpblocks = groups;
5669         if (groups > ngroups)
5670                 groups = ngroups;
5671         if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5672                 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5673
5674         /* bitmaps and block group descriptor blocks */
5675         ret += groups + gdpblocks;
5676
5677         /* Blocks for super block, inode, quota and xattr blocks */
5678         ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5679
5680         return ret;
5681 }
5682
5683 /*
5684  * Calculate the total number of credits to reserve to fit
5685  * the modification of a single pages into a single transaction,
5686  * which may include multiple chunks of block allocations.
5687  *
5688  * This could be called via ext4_write_begin()
5689  *
5690  * We need to consider the worse case, when
5691  * one new block per extent.
5692  */
5693 int ext4_writepage_trans_blocks(struct inode *inode)
5694 {
5695         int bpp = ext4_journal_blocks_per_page(inode);
5696         int ret;
5697
5698         ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5699
5700         /* Account for data blocks for journalled mode */
5701         if (ext4_should_journal_data(inode))
5702                 ret += bpp;
5703         return ret;
5704 }
5705
5706 /*
5707  * Calculate the journal credits for a chunk of data modification.
5708  *
5709  * This is called from DIO, fallocate or whoever calling
5710  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5711  *
5712  * journal buffers for data blocks are not included here, as DIO
5713  * and fallocate do no need to journal data buffers.
5714  */
5715 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5716 {
5717         return ext4_meta_trans_blocks(inode, nrblocks, 1);
5718 }
5719
5720 /*
5721  * The caller must have previously called ext4_reserve_inode_write().
5722  * Give this, we know that the caller already has write access to iloc->bh.
5723  */
5724 int ext4_mark_iloc_dirty(handle_t *handle,
5725                          struct inode *inode, struct ext4_iloc *iloc)
5726 {
5727         int err = 0;
5728
5729         if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5730                 put_bh(iloc->bh);
5731                 return -EIO;
5732         }
5733         ext4_fc_track_inode(handle, inode);
5734
5735         /* the do_update_inode consumes one bh->b_count */
5736         get_bh(iloc->bh);
5737
5738         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5739         err = ext4_do_update_inode(handle, inode, iloc);
5740         put_bh(iloc->bh);
5741         return err;
5742 }
5743
5744 /*
5745  * On success, We end up with an outstanding reference count against
5746  * iloc->bh.  This _must_ be cleaned up later.
5747  */
5748
5749 int
5750 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5751                          struct ext4_iloc *iloc)
5752 {
5753         int err;
5754
5755         if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5756                 return -EIO;
5757
5758         err = ext4_get_inode_loc(inode, iloc);
5759         if (!err) {
5760                 BUFFER_TRACE(iloc->bh, "get_write_access");
5761                 err = ext4_journal_get_write_access(handle, inode->i_sb,
5762                                                     iloc->bh, EXT4_JTR_NONE);
5763                 if (err) {
5764                         brelse(iloc->bh);
5765                         iloc->bh = NULL;
5766                 }
5767         }
5768         ext4_std_error(inode->i_sb, err);
5769         return err;
5770 }
5771
5772 static int __ext4_expand_extra_isize(struct inode *inode,
5773                                      unsigned int new_extra_isize,
5774                                      struct ext4_iloc *iloc,
5775                                      handle_t *handle, int *no_expand)
5776 {
5777         struct ext4_inode *raw_inode;
5778         struct ext4_xattr_ibody_header *header;
5779         unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5780         struct ext4_inode_info *ei = EXT4_I(inode);
5781         int error;
5782
5783         /* this was checked at iget time, but double check for good measure */
5784         if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5785             (ei->i_extra_isize & 3)) {
5786                 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5787                                  ei->i_extra_isize,
5788                                  EXT4_INODE_SIZE(inode->i_sb));
5789                 return -EFSCORRUPTED;
5790         }
5791         if ((new_extra_isize < ei->i_extra_isize) ||
5792             (new_extra_isize < 4) ||
5793             (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5794                 return -EINVAL; /* Should never happen */
5795
5796         raw_inode = ext4_raw_inode(iloc);
5797
5798         header = IHDR(inode, raw_inode);
5799
5800         /* No extended attributes present */
5801         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5802             header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5803                 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5804                        EXT4_I(inode)->i_extra_isize, 0,
5805                        new_extra_isize - EXT4_I(inode)->i_extra_isize);
5806                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5807                 return 0;
5808         }
5809
5810         /*
5811          * We may need to allocate external xattr block so we need quotas
5812          * initialized. Here we can be called with various locks held so we
5813          * cannot affort to initialize quotas ourselves. So just bail.
5814          */
5815         if (dquot_initialize_needed(inode))
5816                 return -EAGAIN;
5817
5818         /* try to expand with EAs present */
5819         error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5820                                            raw_inode, handle);
5821         if (error) {
5822                 /*
5823                  * Inode size expansion failed; don't try again
5824                  */
5825                 *no_expand = 1;
5826         }
5827
5828         return error;
5829 }
5830
5831 /*
5832  * Expand an inode by new_extra_isize bytes.
5833  * Returns 0 on success or negative error number on failure.
5834  */
5835 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5836                                           unsigned int new_extra_isize,
5837                                           struct ext4_iloc iloc,
5838                                           handle_t *handle)
5839 {
5840         int no_expand;
5841         int error;
5842
5843         if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5844                 return -EOVERFLOW;
5845
5846         /*
5847          * In nojournal mode, we can immediately attempt to expand
5848          * the inode.  When journaled, we first need to obtain extra
5849          * buffer credits since we may write into the EA block
5850          * with this same handle. If journal_extend fails, then it will
5851          * only result in a minor loss of functionality for that inode.
5852          * If this is felt to be critical, then e2fsck should be run to
5853          * force a large enough s_min_extra_isize.
5854          */
5855         if (ext4_journal_extend(handle,
5856                                 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5857                 return -ENOSPC;
5858
5859         if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5860                 return -EBUSY;
5861
5862         error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5863                                           handle, &no_expand);
5864         ext4_write_unlock_xattr(inode, &no_expand);
5865
5866         return error;
5867 }
5868
5869 int ext4_expand_extra_isize(struct inode *inode,
5870                             unsigned int new_extra_isize,
5871                             struct ext4_iloc *iloc)
5872 {
5873         handle_t *handle;
5874         int no_expand;
5875         int error, rc;
5876
5877         if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5878                 brelse(iloc->bh);
5879                 return -EOVERFLOW;
5880         }
5881
5882         handle = ext4_journal_start(inode, EXT4_HT_INODE,
5883                                     EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5884         if (IS_ERR(handle)) {
5885                 error = PTR_ERR(handle);
5886                 brelse(iloc->bh);
5887                 return error;
5888         }
5889
5890         ext4_write_lock_xattr(inode, &no_expand);
5891
5892         BUFFER_TRACE(iloc->bh, "get_write_access");
5893         error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5894                                               EXT4_JTR_NONE);
5895         if (error) {
5896                 brelse(iloc->bh);
5897                 goto out_unlock;
5898         }
5899
5900         error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5901                                           handle, &no_expand);
5902
5903         rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5904         if (!error)
5905                 error = rc;
5906
5907 out_unlock:
5908         ext4_write_unlock_xattr(inode, &no_expand);
5909         ext4_journal_stop(handle);
5910         return error;
5911 }
5912
5913 /*
5914  * What we do here is to mark the in-core inode as clean with respect to inode
5915  * dirtiness (it may still be data-dirty).
5916  * This means that the in-core inode may be reaped by prune_icache
5917  * without having to perform any I/O.  This is a very good thing,
5918  * because *any* task may call prune_icache - even ones which
5919  * have a transaction open against a different journal.
5920  *
5921  * Is this cheating?  Not really.  Sure, we haven't written the
5922  * inode out, but prune_icache isn't a user-visible syncing function.
5923  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5924  * we start and wait on commits.
5925  */
5926 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5927                                 const char *func, unsigned int line)
5928 {
5929         struct ext4_iloc iloc;
5930         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5931         int err;
5932
5933         might_sleep();
5934         trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5935         err = ext4_reserve_inode_write(handle, inode, &iloc);
5936         if (err)
5937                 goto out;
5938
5939         if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5940                 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5941                                                iloc, handle);
5942
5943         err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5944 out:
5945         if (unlikely(err))
5946                 ext4_error_inode_err(inode, func, line, 0, err,
5947                                         "mark_inode_dirty error");
5948         return err;
5949 }
5950
5951 /*
5952  * ext4_dirty_inode() is called from __mark_inode_dirty()
5953  *
5954  * We're really interested in the case where a file is being extended.
5955  * i_size has been changed by generic_commit_write() and we thus need
5956  * to include the updated inode in the current transaction.
5957  *
5958  * Also, dquot_alloc_block() will always dirty the inode when blocks
5959  * are allocated to the file.
5960  *
5961  * If the inode is marked synchronous, we don't honour that here - doing
5962  * so would cause a commit on atime updates, which we don't bother doing.
5963  * We handle synchronous inodes at the highest possible level.
5964  */
5965 void ext4_dirty_inode(struct inode *inode, int flags)
5966 {
5967         handle_t *handle;
5968
5969         handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5970         if (IS_ERR(handle))
5971                 return;
5972         ext4_mark_inode_dirty(handle, inode);
5973         ext4_journal_stop(handle);
5974 }
5975
5976 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5977 {
5978         journal_t *journal;
5979         handle_t *handle;
5980         int err;
5981         int alloc_ctx;
5982
5983         /*
5984          * We have to be very careful here: changing a data block's
5985          * journaling status dynamically is dangerous.  If we write a
5986          * data block to the journal, change the status and then delete
5987          * that block, we risk forgetting to revoke the old log record
5988          * from the journal and so a subsequent replay can corrupt data.
5989          * So, first we make sure that the journal is empty and that
5990          * nobody is changing anything.
5991          */
5992
5993         journal = EXT4_JOURNAL(inode);
5994         if (!journal)
5995                 return 0;
5996         if (is_journal_aborted(journal))
5997                 return -EROFS;
5998
5999         /* Wait for all existing dio workers */
6000         inode_dio_wait(inode);
6001
6002         /*
6003          * Before flushing the journal and switching inode's aops, we have
6004          * to flush all dirty data the inode has. There can be outstanding
6005          * delayed allocations, there can be unwritten extents created by
6006          * fallocate or buffered writes in dioread_nolock mode covered by
6007          * dirty data which can be converted only after flushing the dirty
6008          * data (and journalled aops don't know how to handle these cases).
6009          */
6010         if (val) {
6011                 filemap_invalidate_lock(inode->i_mapping);
6012                 err = filemap_write_and_wait(inode->i_mapping);
6013                 if (err < 0) {
6014                         filemap_invalidate_unlock(inode->i_mapping);
6015                         return err;
6016                 }
6017         }
6018
6019         alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6020         jbd2_journal_lock_updates(journal);
6021
6022         /*
6023          * OK, there are no updates running now, and all cached data is
6024          * synced to disk.  We are now in a completely consistent state
6025          * which doesn't have anything in the journal, and we know that
6026          * no filesystem updates are running, so it is safe to modify
6027          * the inode's in-core data-journaling state flag now.
6028          */
6029
6030         if (val)
6031                 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6032         else {
6033                 err = jbd2_journal_flush(journal, 0);
6034                 if (err < 0) {
6035                         jbd2_journal_unlock_updates(journal);
6036                         ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6037                         return err;
6038                 }
6039                 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6040         }
6041         ext4_set_aops(inode);
6042
6043         jbd2_journal_unlock_updates(journal);
6044         ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6045
6046         if (val)
6047                 filemap_invalidate_unlock(inode->i_mapping);
6048
6049         /* Finally we can mark the inode as dirty. */
6050
6051         handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6052         if (IS_ERR(handle))
6053                 return PTR_ERR(handle);
6054
6055         ext4_fc_mark_ineligible(inode->i_sb,
6056                 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6057         err = ext4_mark_inode_dirty(handle, inode);
6058         ext4_handle_sync(handle);
6059         ext4_journal_stop(handle);
6060         ext4_std_error(inode->i_sb, err);
6061
6062         return err;
6063 }
6064
6065 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6066                             struct buffer_head *bh)
6067 {
6068         return !buffer_mapped(bh);
6069 }
6070
6071 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6072 {
6073         struct vm_area_struct *vma = vmf->vma;
6074         struct folio *folio = page_folio(vmf->page);
6075         loff_t size;
6076         unsigned long len;
6077         int err;
6078         vm_fault_t ret;
6079         struct file *file = vma->vm_file;
6080         struct inode *inode = file_inode(file);
6081         struct address_space *mapping = inode->i_mapping;
6082         handle_t *handle;
6083         get_block_t *get_block;
6084         int retries = 0;
6085
6086         if (unlikely(IS_IMMUTABLE(inode)))
6087                 return VM_FAULT_SIGBUS;
6088
6089         sb_start_pagefault(inode->i_sb);
6090         file_update_time(vma->vm_file);
6091
6092         filemap_invalidate_lock_shared(mapping);
6093
6094         err = ext4_convert_inline_data(inode);
6095         if (err)
6096                 goto out_ret;
6097
6098         /*
6099          * On data journalling we skip straight to the transaction handle:
6100          * there's no delalloc; page truncated will be checked later; the
6101          * early return w/ all buffers mapped (calculates size/len) can't
6102          * be used; and there's no dioread_nolock, so only ext4_get_block.
6103          */
6104         if (ext4_should_journal_data(inode))
6105                 goto retry_alloc;
6106
6107         /* Delalloc case is easy... */
6108         if (test_opt(inode->i_sb, DELALLOC) &&
6109             !ext4_nonda_switch(inode->i_sb)) {
6110                 do {
6111                         err = block_page_mkwrite(vma, vmf,
6112                                                    ext4_da_get_block_prep);
6113                 } while (err == -ENOSPC &&
6114                        ext4_should_retry_alloc(inode->i_sb, &retries));
6115                 goto out_ret;
6116         }
6117
6118         folio_lock(folio);
6119         size = i_size_read(inode);
6120         /* Page got truncated from under us? */
6121         if (folio->mapping != mapping || folio_pos(folio) > size) {
6122                 folio_unlock(folio);
6123                 ret = VM_FAULT_NOPAGE;
6124                 goto out;
6125         }
6126
6127         len = folio_size(folio);
6128         if (folio_pos(folio) + len > size)
6129                 len = size - folio_pos(folio);
6130         /*
6131          * Return if we have all the buffers mapped. This avoids the need to do
6132          * journal_start/journal_stop which can block and take a long time
6133          *
6134          * This cannot be done for data journalling, as we have to add the
6135          * inode to the transaction's list to writeprotect pages on commit.
6136          */
6137         if (folio_buffers(folio)) {
6138                 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6139                                             0, len, NULL,
6140                                             ext4_bh_unmapped)) {
6141                         /* Wait so that we don't change page under IO */
6142                         folio_wait_stable(folio);
6143                         ret = VM_FAULT_LOCKED;
6144                         goto out;
6145                 }
6146         }
6147         folio_unlock(folio);
6148         /* OK, we need to fill the hole... */
6149         if (ext4_should_dioread_nolock(inode))
6150                 get_block = ext4_get_block_unwritten;
6151         else
6152                 get_block = ext4_get_block;
6153 retry_alloc:
6154         handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6155                                     ext4_writepage_trans_blocks(inode));
6156         if (IS_ERR(handle)) {
6157                 ret = VM_FAULT_SIGBUS;
6158                 goto out;
6159         }
6160         /*
6161          * Data journalling can't use block_page_mkwrite() because it
6162          * will set_buffer_dirty() before do_journal_get_write_access()
6163          * thus might hit warning messages for dirty metadata buffers.
6164          */
6165         if (!ext4_should_journal_data(inode)) {
6166                 err = block_page_mkwrite(vma, vmf, get_block);
6167         } else {
6168                 folio_lock(folio);
6169                 size = i_size_read(inode);
6170                 /* Page got truncated from under us? */
6171                 if (folio->mapping != mapping || folio_pos(folio) > size) {
6172                         ret = VM_FAULT_NOPAGE;
6173                         goto out_error;
6174                 }
6175
6176                 len = folio_size(folio);
6177                 if (folio_pos(folio) + len > size)
6178                         len = size - folio_pos(folio);
6179
6180                 err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
6181                 if (!err) {
6182                         ret = VM_FAULT_SIGBUS;
6183                         if (ext4_journal_folio_buffers(handle, folio, len))
6184                                 goto out_error;
6185                 } else {
6186                         folio_unlock(folio);
6187                 }
6188         }
6189         ext4_journal_stop(handle);
6190         if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6191                 goto retry_alloc;
6192 out_ret:
6193         ret = vmf_fs_error(err);
6194 out:
6195         filemap_invalidate_unlock_shared(mapping);
6196         sb_end_pagefault(inode->i_sb);
6197         return ret;
6198 out_error:
6199         folio_unlock(folio);
6200         ext4_journal_stop(handle);
6201         goto out;
6202 }
This page took 0.381495 seconds and 4 git commands to generate.