1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/ext4/resize.c
5 * Support for resizing an ext4 filesystem while it is mounted.
9 * This could probably be made into a module, because it is not often in use.
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/jiffies.h>
17 #include "ext4_jbd2.h"
24 static void ext4_rcu_ptr_callback(struct rcu_head *head)
26 struct ext4_rcu_ptr *ptr;
28 ptr = container_of(head, struct ext4_rcu_ptr, rcu);
33 void ext4_kvfree_array_rcu(void *to_free)
35 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
39 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
46 int ext4_resize_begin(struct super_block *sb)
48 struct ext4_sb_info *sbi = EXT4_SB(sb);
51 if (!capable(CAP_SYS_RESOURCE))
55 * If the reserved GDT blocks is non-zero, the resize_inode feature
56 * should always be set.
58 if (sbi->s_es->s_reserved_gdt_blocks &&
59 !ext4_has_feature_resize_inode(sb)) {
60 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
65 * If we are not using the primary superblock/GDT copy don't resize,
66 * because the user tools have no way of handling this. Probably a
67 * bad time to do it anyways.
69 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
70 le32_to_cpu(sbi->s_es->s_first_data_block)) {
71 ext4_warning(sb, "won't resize using backup superblock at %llu",
72 (unsigned long long)sbi->s_sbh->b_blocknr);
77 * We are not allowed to do online-resizing on a filesystem mounted
78 * with error, because it can destroy the filesystem easily.
80 if (sbi->s_mount_state & EXT4_ERROR_FS) {
81 ext4_warning(sb, "There are errors in the filesystem, "
82 "so online resizing is not allowed");
86 if (ext4_has_feature_sparse_super2(sb)) {
87 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
91 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
98 int ext4_resize_end(struct super_block *sb, bool update_backups)
100 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
101 smp_mb__after_atomic();
103 return ext4_update_overhead(sb, true);
107 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
108 ext4_group_t group) {
109 ext4_grpblk_t overhead;
110 overhead = ext4_bg_num_gdb(sb, group);
111 if (ext4_bg_has_super(sb, group))
113 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
117 #define outside(b, first, last) ((b) < (first) || (b) >= (last))
118 #define inside(b, first, last) ((b) >= (first) && (b) < (last))
120 static int verify_group_input(struct super_block *sb,
121 struct ext4_new_group_data *input)
123 struct ext4_sb_info *sbi = EXT4_SB(sb);
124 struct ext4_super_block *es = sbi->s_es;
125 ext4_fsblk_t start = ext4_blocks_count(es);
126 ext4_fsblk_t end = start + input->blocks_count;
127 ext4_group_t group = input->group;
128 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
130 ext4_fsblk_t metaend;
131 struct buffer_head *bh = NULL;
132 ext4_grpblk_t free_blocks_count, offset;
135 if (group != sbi->s_groups_count) {
136 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
137 input->group, sbi->s_groups_count);
141 overhead = ext4_group_overhead_blocks(sb, group);
142 metaend = start + overhead;
143 free_blocks_count = input->blocks_count - 2 - overhead -
144 sbi->s_itb_per_group;
145 input->free_clusters_count = EXT4_B2C(sbi, free_blocks_count);
147 if (test_opt(sb, DEBUG))
148 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
149 "(%d free, %u reserved)\n",
150 ext4_bg_has_super(sb, input->group) ? "normal" :
151 "no-super", input->group, input->blocks_count,
152 free_blocks_count, input->reserved_blocks);
154 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
156 ext4_warning(sb, "Last group not full");
157 else if (input->reserved_blocks > input->blocks_count / 5)
158 ext4_warning(sb, "Reserved blocks too high (%u)",
159 input->reserved_blocks);
160 else if (free_blocks_count < 0)
161 ext4_warning(sb, "Bad blocks count %u",
162 input->blocks_count);
163 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
166 ext4_warning(sb, "Cannot read last block (%llu)",
168 } else if (outside(input->block_bitmap, start, end))
169 ext4_warning(sb, "Block bitmap not in group (block %llu)",
170 (unsigned long long)input->block_bitmap);
171 else if (outside(input->inode_bitmap, start, end))
172 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
173 (unsigned long long)input->inode_bitmap);
174 else if (outside(input->inode_table, start, end) ||
175 outside(itend - 1, start, end))
176 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
177 (unsigned long long)input->inode_table, itend - 1);
178 else if (input->inode_bitmap == input->block_bitmap)
179 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
180 (unsigned long long)input->block_bitmap);
181 else if (inside(input->block_bitmap, input->inode_table, itend))
182 ext4_warning(sb, "Block bitmap (%llu) in inode table "
184 (unsigned long long)input->block_bitmap,
185 (unsigned long long)input->inode_table, itend - 1);
186 else if (inside(input->inode_bitmap, input->inode_table, itend))
187 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
189 (unsigned long long)input->inode_bitmap,
190 (unsigned long long)input->inode_table, itend - 1);
191 else if (inside(input->block_bitmap, start, metaend))
192 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
193 (unsigned long long)input->block_bitmap,
195 else if (inside(input->inode_bitmap, start, metaend))
196 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
197 (unsigned long long)input->inode_bitmap,
199 else if (inside(input->inode_table, start, metaend) ||
200 inside(itend - 1, start, metaend))
201 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
203 (unsigned long long)input->inode_table,
204 itend - 1, start, metaend - 1);
213 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
216 struct ext4_new_flex_group_data {
217 struct ext4_new_group_data *groups; /* new_group_data for groups
219 __u16 *bg_flags; /* block group flags of groups
221 ext4_group_t resize_bg; /* number of allocated
223 ext4_group_t count; /* number of groups in @groups
228 * Avoiding memory allocation failures due to too many groups added each time.
230 #define MAX_RESIZE_BG 16384
233 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
236 * Returns NULL on failure otherwise address of the allocated structure.
238 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size,
239 ext4_group_t o_group, ext4_group_t n_group)
241 ext4_group_t last_group;
242 struct ext4_new_flex_group_data *flex_gd;
244 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
248 if (unlikely(flexbg_size > MAX_RESIZE_BG))
249 flex_gd->resize_bg = MAX_RESIZE_BG;
251 flex_gd->resize_bg = flexbg_size;
253 /* Avoid allocating large 'groups' array if not needed */
254 last_group = o_group | (flex_gd->resize_bg - 1);
255 if (n_group <= last_group)
256 flex_gd->resize_bg = 1 << fls(n_group - o_group + 1);
257 else if (n_group - last_group < flex_gd->resize_bg)
258 flex_gd->resize_bg = 1 << max(fls(last_group - o_group + 1),
259 fls(n_group - last_group));
261 flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
262 sizeof(struct ext4_new_group_data),
264 if (flex_gd->groups == NULL)
267 flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
269 if (flex_gd->bg_flags == NULL)
275 kfree(flex_gd->groups);
282 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
284 kfree(flex_gd->bg_flags);
285 kfree(flex_gd->groups);
290 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
291 * and inode tables for a flex group.
293 * This function is used by 64bit-resize. Note that this function allocates
294 * group tables from the 1st group of groups contained by @flexgd, which may
295 * be a partial of a flex group.
297 * @sb: super block of fs to which the groups belongs
299 * Returns 0 on a successful allocation of the metadata blocks in the
302 static int ext4_alloc_group_tables(struct super_block *sb,
303 struct ext4_new_flex_group_data *flex_gd,
304 unsigned int flexbg_size)
306 struct ext4_new_group_data *group_data = flex_gd->groups;
307 ext4_fsblk_t start_blk;
308 ext4_fsblk_t last_blk;
309 ext4_group_t src_group;
310 ext4_group_t bb_index = 0;
311 ext4_group_t ib_index = 0;
312 ext4_group_t it_index = 0;
314 ext4_group_t last_group;
316 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
319 BUG_ON(flex_gd->count == 0 || group_data == NULL);
321 src_group = group_data[0].group;
322 last_group = src_group + flex_gd->count - 1;
324 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
325 (last_group & ~(flexbg_size - 1))));
327 group = group_data[0].group;
328 if (src_group >= group_data[0].group + flex_gd->count)
330 start_blk = ext4_group_first_block_no(sb, src_group);
331 last_blk = start_blk + group_data[src_group - group].blocks_count;
333 overhead = ext4_group_overhead_blocks(sb, src_group);
335 start_blk += overhead;
337 /* We collect contiguous blocks as much as possible. */
339 for (; src_group <= last_group; src_group++) {
340 overhead = ext4_group_overhead_blocks(sb, src_group);
342 last_blk += group_data[src_group - group].blocks_count;
347 /* Allocate block bitmaps */
348 for (; bb_index < flex_gd->count; bb_index++) {
349 if (start_blk >= last_blk)
351 group_data[bb_index].block_bitmap = start_blk++;
352 group = ext4_get_group_number(sb, start_blk - 1);
353 group -= group_data[0].group;
354 group_data[group].mdata_blocks++;
355 flex_gd->bg_flags[group] &= uninit_mask;
358 /* Allocate inode bitmaps */
359 for (; ib_index < flex_gd->count; ib_index++) {
360 if (start_blk >= last_blk)
362 group_data[ib_index].inode_bitmap = start_blk++;
363 group = ext4_get_group_number(sb, start_blk - 1);
364 group -= group_data[0].group;
365 group_data[group].mdata_blocks++;
366 flex_gd->bg_flags[group] &= uninit_mask;
369 /* Allocate inode tables */
370 for (; it_index < flex_gd->count; it_index++) {
371 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
372 ext4_fsblk_t next_group_start;
374 if (start_blk + itb > last_blk)
376 group_data[it_index].inode_table = start_blk;
377 group = ext4_get_group_number(sb, start_blk);
378 next_group_start = ext4_group_first_block_no(sb, group + 1);
379 group -= group_data[0].group;
381 if (start_blk + itb > next_group_start) {
382 flex_gd->bg_flags[group + 1] &= uninit_mask;
383 overhead = start_blk + itb - next_group_start;
384 group_data[group + 1].mdata_blocks += overhead;
388 group_data[group].mdata_blocks += itb;
389 flex_gd->bg_flags[group] &= uninit_mask;
390 start_blk += EXT4_SB(sb)->s_itb_per_group;
393 /* Update free clusters count to exclude metadata blocks */
394 for (i = 0; i < flex_gd->count; i++) {
395 group_data[i].free_clusters_count -=
396 EXT4_NUM_B2C(EXT4_SB(sb),
397 group_data[i].mdata_blocks);
400 if (test_opt(sb, DEBUG)) {
402 group = group_data[0].group;
404 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
405 "%u groups, flexbg size is %u:\n", flex_gd->count,
408 for (i = 0; i < flex_gd->count; i++) {
410 "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
411 ext4_bg_has_super(sb, group + i) ? "normal" :
412 "no-super", group + i,
413 group_data[i].blocks_count,
414 group_data[i].free_clusters_count,
415 group_data[i].mdata_blocks);
421 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
424 struct buffer_head *bh;
427 bh = sb_getblk(sb, blk);
429 return ERR_PTR(-ENOMEM);
430 BUFFER_TRACE(bh, "get_write_access");
431 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
436 memset(bh->b_data, 0, sb->s_blocksize);
437 set_buffer_uptodate(bh);
443 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
445 return ext4_journal_ensure_credits_fn(handle, credits,
446 EXT4_MAX_TRANS_DATA, 0, 0);
450 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
452 * Helper function for ext4_setup_new_group_blocks() which set .
455 * @handle: journal handle
456 * @flex_gd: flex group data
458 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
459 struct ext4_new_flex_group_data *flex_gd,
460 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
462 struct ext4_sb_info *sbi = EXT4_SB(sb);
463 ext4_group_t count = last_cluster - first_cluster + 1;
466 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
468 for (; count > 0; count -= count2, first_cluster += count2) {
470 struct buffer_head *bh;
474 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
475 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
476 group -= flex_gd->groups[0].group;
478 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
482 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
483 BUG_ON(flex_gd->count > 1);
487 err = ext4_resize_ensure_credits_batch(handle, 1);
491 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
495 BUFFER_TRACE(bh, "get_write_access");
496 err = ext4_journal_get_write_access(handle, sb, bh,
502 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
503 first_cluster, first_cluster - start, count2);
504 mb_set_bits(bh->b_data, first_cluster - start, count2);
506 err = ext4_handle_dirty_metadata(handle, NULL, bh);
516 * Set up the block and inode bitmaps, and the inode table for the new groups.
517 * This doesn't need to be part of the main transaction, since we are only
518 * changing blocks outside the actual filesystem. We still do journaling to
519 * ensure the recovery is correct in case of a failure just after resize.
520 * If any part of this fails, we simply abort the resize.
522 * setup_new_flex_group_blocks handles a flex group as follow:
523 * 1. copy super block and GDT, and initialize group tables if necessary.
524 * In this step, we only set bits in blocks bitmaps for blocks taken by
525 * super block and GDT.
526 * 2. allocate group tables in block bitmaps, that is, set bits in block
527 * bitmap for blocks taken by group tables.
529 static int setup_new_flex_group_blocks(struct super_block *sb,
530 struct ext4_new_flex_group_data *flex_gd)
532 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
535 struct ext4_sb_info *sbi = EXT4_SB(sb);
536 struct ext4_super_block *es = sbi->s_es;
537 struct ext4_new_group_data *group_data = flex_gd->groups;
538 __u16 *bg_flags = flex_gd->bg_flags;
540 ext4_group_t group, count;
541 struct buffer_head *bh = NULL;
542 int reserved_gdb, i, j, err = 0, err2;
545 BUG_ON(!flex_gd->count || !group_data ||
546 group_data[0].group != sbi->s_groups_count);
548 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
549 meta_bg = ext4_has_feature_meta_bg(sb);
551 /* This transaction may be extended/restarted along the way */
552 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
554 return PTR_ERR(handle);
556 group = group_data[0].group;
557 for (i = 0; i < flex_gd->count; i++, group++) {
558 unsigned long gdblocks;
559 ext4_grpblk_t overhead;
561 gdblocks = ext4_bg_num_gdb(sb, group);
562 start = ext4_group_first_block_no(sb, group);
564 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
570 block = start + ext4_bg_has_super(sb, group);
571 /* Copy all of the GDT blocks into the backup in this group */
572 for (j = 0; j < gdblocks; j++, block++) {
573 struct buffer_head *gdb;
575 ext4_debug("update backup group %#04llx\n", block);
576 err = ext4_resize_ensure_credits_batch(handle, 1);
580 gdb = sb_getblk(sb, block);
581 if (unlikely(!gdb)) {
586 BUFFER_TRACE(gdb, "get_write_access");
587 err = ext4_journal_get_write_access(handle, sb, gdb,
593 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
594 s_group_desc, j)->b_data, gdb->b_size);
595 set_buffer_uptodate(gdb);
597 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
605 /* Zero out all of the reserved backup group descriptor
608 if (ext4_bg_has_super(sb, group)) {
609 err = sb_issue_zeroout(sb, gdblocks + start + 1,
610 reserved_gdb, GFP_NOFS);
616 /* Initialize group tables of the group @group */
617 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
620 /* Zero out all of the inode table blocks */
621 block = group_data[i].inode_table;
622 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
623 block, sbi->s_itb_per_group);
624 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
630 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
633 /* Initialize block bitmap of the @group */
634 block = group_data[i].block_bitmap;
635 err = ext4_resize_ensure_credits_batch(handle, 1);
639 bh = bclean(handle, sb, block);
644 overhead = ext4_group_overhead_blocks(sb, group);
646 ext4_debug("mark backup superblock %#04llx (+0)\n",
648 mb_set_bits(bh->b_data, 0,
649 EXT4_NUM_B2C(sbi, overhead));
651 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
652 sb->s_blocksize * 8, bh->b_data);
653 err = ext4_handle_dirty_metadata(handle, NULL, bh);
659 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
662 /* Initialize inode bitmap of the @group */
663 block = group_data[i].inode_bitmap;
664 err = ext4_resize_ensure_credits_batch(handle, 1);
667 /* Mark unused entries in inode bitmap used */
668 bh = bclean(handle, sb, block);
674 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
675 sb->s_blocksize * 8, bh->b_data);
676 err = ext4_handle_dirty_metadata(handle, NULL, bh);
682 /* Mark group tables in block bitmap */
683 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
684 count = group_table_count[j];
685 start = (&group_data[0].block_bitmap)[j];
687 for (i = 1; i < flex_gd->count; i++) {
688 block += group_table_count[j];
689 if (block == (&group_data[i].block_bitmap)[j]) {
690 count += group_table_count[j];
693 err = set_flexbg_block_bitmap(sb, handle,
695 EXT4_B2C(sbi, start),
701 count = group_table_count[j];
702 start = (&group_data[i].block_bitmap)[j];
706 err = set_flexbg_block_bitmap(sb, handle,
708 EXT4_B2C(sbi, start),
717 err2 = ext4_journal_stop(handle);
725 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
726 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
727 * calling this for the first time. In a sparse filesystem it will be the
728 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
729 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
731 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
732 unsigned int *five, unsigned int *seven)
734 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
735 unsigned int *min = three;
739 if (ext4_has_feature_sparse_super2(sb)) {
743 ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
749 if (!ext4_has_feature_sparse_super(sb)) {
771 * Check that all of the backup GDT blocks are held in the primary GDT block.
772 * It is assumed that they are stored in group order. Returns the number of
773 * groups in current filesystem that have BACKUPS, or -ve error code.
775 static int verify_reserved_gdb(struct super_block *sb,
777 struct buffer_head *primary)
779 const ext4_fsblk_t blk = primary->b_blocknr;
784 __le32 *p = (__le32 *)primary->b_data;
787 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
788 if (le32_to_cpu(*p++) !=
789 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
790 ext4_warning(sb, "reserved GDT %llu"
791 " missing grp %d (%llu)",
794 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
798 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
806 * Called when we need to bring a reserved group descriptor table block into
807 * use from the resize inode. The primary copy of the new GDT block currently
808 * is an indirect block (under the double indirect block in the resize inode).
809 * The new backup GDT blocks will be stored as leaf blocks in this indirect
810 * block, in group order. Even though we know all the block numbers we need,
811 * we check to ensure that the resize inode has actually reserved these blocks.
813 * Don't need to update the block bitmaps because the blocks are still in use.
815 * We get all of the error cases out of the way, so that we are sure to not
816 * fail once we start modifying the data on disk, because JBD has no rollback.
818 static int add_new_gdb(handle_t *handle, struct inode *inode,
821 struct super_block *sb = inode->i_sb;
822 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
823 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
824 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
825 struct buffer_head **o_group_desc, **n_group_desc = NULL;
826 struct buffer_head *dind = NULL;
827 struct buffer_head *gdb_bh = NULL;
829 struct ext4_iloc iloc = { .bh = NULL };
833 if (test_opt(sb, DEBUG))
835 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
838 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
840 return PTR_ERR(gdb_bh);
842 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
848 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
849 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
856 data = (__le32 *)dind->b_data;
857 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
858 ext4_warning(sb, "new group %u GDT block %llu not reserved",
864 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
865 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
870 BUFFER_TRACE(gdb_bh, "get_write_access");
871 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
875 BUFFER_TRACE(dind, "get_write_access");
876 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
878 ext4_std_error(sb, err);
882 /* ext4_reserve_inode_write() gets a reference on the iloc */
883 err = ext4_reserve_inode_write(handle, inode, &iloc);
887 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
891 ext4_warning(sb, "not enough memory for %lu groups",
897 * Finally, we have all of the possible failures behind us...
899 * Remove new GDT block from inode double-indirect block and clear out
900 * the new GDT block for use (which also "frees" the backup GDT blocks
901 * from the reserved inode). We don't need to change the bitmaps for
902 * these blocks, because they are marked as in-use from being in the
903 * reserved inode, and will become GDT blocks (primary and backup).
905 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
906 err = ext4_handle_dirty_metadata(handle, NULL, dind);
908 ext4_std_error(sb, err);
911 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
912 (9 - EXT4_SB(sb)->s_cluster_bits);
913 ext4_mark_iloc_dirty(handle, inode, &iloc);
914 memset(gdb_bh->b_data, 0, sb->s_blocksize);
915 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
917 ext4_std_error(sb, err);
924 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
925 memcpy(n_group_desc, o_group_desc,
926 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
928 n_group_desc[gdb_num] = gdb_bh;
929 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
930 EXT4_SB(sb)->s_gdb_count++;
931 ext4_kvfree_array_rcu(o_group_desc);
933 lock_buffer(EXT4_SB(sb)->s_sbh);
934 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
935 ext4_superblock_csum_set(sb);
936 unlock_buffer(EXT4_SB(sb)->s_sbh);
937 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
939 ext4_std_error(sb, err);
942 kvfree(n_group_desc);
947 ext4_debug("leaving with error %d\n", err);
952 * If there is no available space in the existing block group descriptors for
953 * the new block group and there are no reserved block group descriptors, then
954 * the meta_bg feature will get enabled, and es->s_first_meta_bg will get set
955 * to the first block group that is managed using meta_bg and s_first_meta_bg
956 * must be a multiple of EXT4_DESC_PER_BLOCK(sb).
957 * This function will be called when first group of meta_bg is added to bring
958 * new group descriptors block of new added meta_bg.
960 static int add_new_gdb_meta_bg(struct super_block *sb,
961 handle_t *handle, ext4_group_t group) {
962 ext4_fsblk_t gdblock;
963 struct buffer_head *gdb_bh;
964 struct buffer_head **o_group_desc, **n_group_desc;
965 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
968 gdblock = ext4_group_first_block_no(sb, group) +
969 ext4_bg_has_super(sb, group);
970 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
972 return PTR_ERR(gdb_bh);
973 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
978 ext4_warning(sb, "not enough memory for %lu groups",
984 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
985 memcpy(n_group_desc, o_group_desc,
986 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
988 n_group_desc[gdb_num] = gdb_bh;
990 BUFFER_TRACE(gdb_bh, "get_write_access");
991 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
993 kvfree(n_group_desc);
998 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
999 EXT4_SB(sb)->s_gdb_count++;
1000 ext4_kvfree_array_rcu(o_group_desc);
1005 * Called when we are adding a new group which has a backup copy of each of
1006 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
1007 * We need to add these reserved backup GDT blocks to the resize inode, so
1008 * that they are kept for future resizing and not allocated to files.
1010 * Each reserved backup GDT block will go into a different indirect block.
1011 * The indirect blocks are actually the primary reserved GDT blocks,
1012 * so we know in advance what their block numbers are. We only get the
1013 * double-indirect block to verify it is pointing to the primary reserved
1014 * GDT blocks so we don't overwrite a data block by accident. The reserved
1015 * backup GDT blocks are stored in their reserved primary GDT block.
1017 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1020 struct super_block *sb = inode->i_sb;
1021 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1022 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1023 struct buffer_head **primary;
1024 struct buffer_head *dind;
1025 struct ext4_iloc iloc;
1032 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1036 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1037 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1039 err = PTR_ERR(dind);
1044 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1045 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1046 EXT4_ADDR_PER_BLOCK(sb));
1047 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1049 /* Get each reserved primary GDT block and verify it holds backups */
1050 for (res = 0; res < reserved_gdb; res++, blk++) {
1051 if (le32_to_cpu(*data) != blk) {
1052 ext4_warning(sb, "reserved block %llu"
1053 " not at offset %ld",
1055 (long)(data - (__le32 *)dind->b_data));
1059 primary[res] = ext4_sb_bread(sb, blk, 0);
1060 if (IS_ERR(primary[res])) {
1061 err = PTR_ERR(primary[res]);
1062 primary[res] = NULL;
1065 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1066 if (gdbackups < 0) {
1067 brelse(primary[res]);
1072 data = (__le32 *)dind->b_data;
1075 for (i = 0; i < reserved_gdb; i++) {
1076 BUFFER_TRACE(primary[i], "get_write_access");
1077 if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1082 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1086 * Finally we can add each of the reserved backup GDT blocks from
1087 * the new group to its reserved primary GDT block.
1089 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1090 for (i = 0; i < reserved_gdb; i++) {
1092 data = (__le32 *)primary[i]->b_data;
1093 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1094 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1099 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1100 ext4_mark_iloc_dirty(handle, inode, &iloc);
1104 brelse(primary[res]);
1113 static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
1116 struct ext4_super_block *es = (struct ext4_super_block *) data;
1118 es->s_block_group_nr = cpu_to_le16(group);
1119 if (ext4_has_metadata_csum(sb))
1120 es->s_checksum = ext4_superblock_csum(sb, es);
1124 * Update the backup copies of the ext4 metadata. These don't need to be part
1125 * of the main resize transaction, because e2fsck will re-write them if there
1126 * is a problem (basically only OOM will cause a problem). However, we
1127 * _should_ update the backups if possible, in case the primary gets trashed
1128 * for some reason and we need to run e2fsck from a backup superblock. The
1129 * important part is that the new block and inode counts are in the backup
1130 * superblocks, and the location of the new group metadata in the GDT backups.
1132 * We do not need take the s_resize_lock for this, because these
1133 * blocks are not otherwise touched by the filesystem code when it is
1134 * mounted. We don't need to worry about last changing from
1135 * sbi->s_groups_count, because the worst that can happen is that we
1136 * do not copy the full number of backups at this time. The resize
1137 * which changed s_groups_count will backup again.
1139 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1140 int size, int meta_bg)
1142 struct ext4_sb_info *sbi = EXT4_SB(sb);
1144 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1148 ext4_group_t group = 0;
1149 int rest = sb->s_blocksize - size;
1153 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1154 if (IS_ERR(handle)) {
1156 err = PTR_ERR(handle);
1161 group = ext4_list_backups(sb, &three, &five, &seven);
1162 last = sbi->s_groups_count;
1164 group = ext4_get_group_number(sb, blk_off) + 1;
1165 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1168 while (group < sbi->s_groups_count) {
1169 struct buffer_head *bh;
1170 ext4_fsblk_t backup_block;
1171 int has_super = ext4_bg_has_super(sb, group);
1172 ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group);
1174 /* Out of journal space, and can't get more - abort - so sad */
1175 err = ext4_resize_ensure_credits_batch(handle, 1);
1180 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1182 backup_block = first_block + has_super;
1184 bh = sb_getblk(sb, backup_block);
1185 if (unlikely(!bh)) {
1189 ext4_debug("update metadata backup %llu(+%llu)\n",
1190 backup_block, backup_block -
1191 ext4_group_first_block_no(sb, group));
1192 BUFFER_TRACE(bh, "get_write_access");
1193 if ((err = ext4_journal_get_write_access(handle, sb, bh,
1199 memcpy(bh->b_data, data, size);
1201 memset(bh->b_data + size, 0, rest);
1202 if (has_super && (backup_block == first_block))
1203 ext4_set_block_group_nr(sb, bh->b_data, group);
1204 set_buffer_uptodate(bh);
1206 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1208 ext4_std_error(sb, err);
1212 group = ext4_list_backups(sb, &three, &five, &seven);
1213 else if (group == last)
1218 if ((err2 = ext4_journal_stop(handle)) && !err)
1222 * Ugh! Need to have e2fsck write the backup copies. It is too
1223 * late to revert the resize, we shouldn't fail just because of
1224 * the backup copies (they are only needed in case of corruption).
1226 * However, if we got here we have a journal problem too, so we
1227 * can't really start a transaction to mark the superblock.
1228 * Chicken out and just set the flag on the hope it will be written
1229 * to disk, and if not - we will simply wait until next fsck.
1233 ext4_warning(sb, "can't update backup for group %u (err %d), "
1234 "forcing fsck on next reboot", group, err);
1235 sbi->s_mount_state &= ~EXT4_VALID_FS;
1236 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1237 mark_buffer_dirty(sbi->s_sbh);
1242 * ext4_add_new_descs() adds @count group descriptor of groups
1243 * starting at @group
1245 * @handle: journal handle
1247 * @group: the group no. of the first group desc to be added
1248 * @resize_inode: the resize inode
1249 * @count: number of group descriptors to be added
1251 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1252 ext4_group_t group, struct inode *resize_inode,
1255 struct ext4_sb_info *sbi = EXT4_SB(sb);
1256 struct ext4_super_block *es = sbi->s_es;
1257 struct buffer_head *gdb_bh;
1258 int i, gdb_off, gdb_num, err = 0;
1261 meta_bg = ext4_has_feature_meta_bg(sb);
1262 for (i = 0; i < count; i++, group++) {
1263 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1264 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1266 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1267 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1270 * We will only either add reserved group blocks to a backup group
1271 * or remove reserved blocks for the first group in a new group block.
1272 * Doing both would be mean more complex code, and sane people don't
1273 * use non-sparse filesystems anymore. This is already checked above.
1276 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1278 BUFFER_TRACE(gdb_bh, "get_write_access");
1279 err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1282 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1283 err = reserve_backup_gdb(handle, resize_inode, group);
1284 } else if (meta_bg != 0) {
1285 err = add_new_gdb_meta_bg(sb, handle, group);
1287 err = add_new_gdb(handle, resize_inode, group);
1295 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1297 struct buffer_head *bh = sb_getblk(sb, block);
1300 if (!bh_uptodate_or_lock(bh)) {
1301 if (ext4_read_bh(bh, 0, NULL) < 0) {
1310 static int ext4_set_bitmap_checksums(struct super_block *sb,
1311 struct ext4_group_desc *gdp,
1312 struct ext4_new_group_data *group_data)
1314 struct buffer_head *bh;
1316 if (!ext4_has_metadata_csum(sb))
1319 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1322 ext4_inode_bitmap_csum_set(sb, gdp, bh,
1323 EXT4_INODES_PER_GROUP(sb) / 8);
1326 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1329 ext4_block_bitmap_csum_set(sb, gdp, bh);
1336 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1338 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1339 struct ext4_new_flex_group_data *flex_gd)
1341 struct ext4_new_group_data *group_data = flex_gd->groups;
1342 struct ext4_group_desc *gdp;
1343 struct ext4_sb_info *sbi = EXT4_SB(sb);
1344 struct buffer_head *gdb_bh;
1346 __u16 *bg_flags = flex_gd->bg_flags;
1347 int i, gdb_off, gdb_num, err = 0;
1350 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1351 group = group_data->group;
1353 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1354 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1357 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1359 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1360 /* Update group descriptor block for new group */
1361 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1362 gdb_off * EXT4_DESC_SIZE(sb));
1364 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1365 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1366 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1367 err = ext4_set_bitmap_checksums(sb, gdp, group_data);
1369 ext4_std_error(sb, err);
1373 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1374 ext4_free_group_clusters_set(sb, gdp,
1375 group_data->free_clusters_count);
1376 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1377 if (ext4_has_group_desc_csum(sb))
1378 ext4_itable_unused_set(sb, gdp,
1379 EXT4_INODES_PER_GROUP(sb));
1380 gdp->bg_flags = cpu_to_le16(*bg_flags);
1381 ext4_group_desc_csum_set(sb, group, gdp);
1383 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1384 if (unlikely(err)) {
1385 ext4_std_error(sb, err);
1390 * We can allocate memory for mb_alloc based on the new group
1393 err = ext4_mb_add_groupinfo(sb, group, gdp);
1400 static void ext4_add_overhead(struct super_block *sb,
1401 const ext4_fsblk_t overhead)
1403 struct ext4_sb_info *sbi = EXT4_SB(sb);
1404 struct ext4_super_block *es = sbi->s_es;
1406 sbi->s_overhead += overhead;
1407 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1412 * ext4_update_super() updates the super block so that the newly added
1413 * groups can be seen by the filesystem.
1416 * @flex_gd: new added groups
1418 static void ext4_update_super(struct super_block *sb,
1419 struct ext4_new_flex_group_data *flex_gd)
1421 ext4_fsblk_t blocks_count = 0;
1422 ext4_fsblk_t free_blocks = 0;
1423 ext4_fsblk_t reserved_blocks = 0;
1424 struct ext4_new_group_data *group_data = flex_gd->groups;
1425 struct ext4_sb_info *sbi = EXT4_SB(sb);
1426 struct ext4_super_block *es = sbi->s_es;
1429 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1431 * Make the new blocks and inodes valid next. We do this before
1432 * increasing the group count so that once the group is enabled,
1433 * all of its blocks and inodes are already valid.
1435 * We always allocate group-by-group, then block-by-block or
1436 * inode-by-inode within a group, so enabling these
1437 * blocks/inodes before the group is live won't actually let us
1438 * allocate the new space yet.
1440 for (i = 0; i < flex_gd->count; i++) {
1441 blocks_count += group_data[i].blocks_count;
1442 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1445 reserved_blocks = ext4_r_blocks_count(es) * 100;
1446 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1447 reserved_blocks *= blocks_count;
1448 do_div(reserved_blocks, 100);
1450 lock_buffer(sbi->s_sbh);
1451 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1452 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1453 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1455 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1458 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1460 * We need to protect s_groups_count against other CPUs seeing
1461 * inconsistent state in the superblock.
1463 * The precise rules we use are:
1465 * * Writers must perform a smp_wmb() after updating all
1466 * dependent data and before modifying the groups count
1468 * * Readers must perform an smp_rmb() after reading the groups
1469 * count and before reading any dependent data.
1471 * NB. These rules can be relaxed when checking the group count
1472 * while freeing data, as we can only allocate from a block
1473 * group after serialising against the group count, and we can
1474 * only then free after serialising in turn against that
1479 /* Update the global fs size fields */
1480 sbi->s_groups_count += flex_gd->count;
1481 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1482 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1484 /* Update the reserved block counts only once the new group is
1486 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1489 /* Update the free space counts */
1490 percpu_counter_add(&sbi->s_freeclusters_counter,
1491 EXT4_NUM_B2C(sbi, free_blocks));
1492 percpu_counter_add(&sbi->s_freeinodes_counter,
1493 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1495 ext4_debug("free blocks count %llu",
1496 percpu_counter_read(&sbi->s_freeclusters_counter));
1497 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1498 ext4_group_t flex_group;
1499 struct flex_groups *fg;
1501 flex_group = ext4_flex_group(sbi, group_data[0].group);
1502 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1503 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1504 &fg->free_clusters);
1505 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1510 * Update the fs overhead information.
1512 * For bigalloc, if the superblock already has a properly calculated
1513 * overhead, update it with a value based on numbers already computed
1514 * above for the newly allocated capacity.
1516 if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
1517 ext4_add_overhead(sb,
1518 EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
1520 ext4_calculate_overhead(sb);
1521 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1523 ext4_superblock_csum_set(sb);
1524 unlock_buffer(sbi->s_sbh);
1525 if (test_opt(sb, DEBUG))
1526 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1527 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1528 blocks_count, free_blocks, reserved_blocks);
1531 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1532 * _before_ we start modifying the filesystem, because we cannot abort the
1533 * transaction and not have it write the data to disk.
1535 static int ext4_flex_group_add(struct super_block *sb,
1536 struct inode *resize_inode,
1537 struct ext4_new_flex_group_data *flex_gd)
1539 struct ext4_sb_info *sbi = EXT4_SB(sb);
1540 struct ext4_super_block *es = sbi->s_es;
1541 ext4_fsblk_t o_blocks_count;
1545 unsigned reserved_gdb;
1546 int err = 0, err2 = 0, credit;
1548 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1550 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1551 o_blocks_count = ext4_blocks_count(es);
1552 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1555 err = setup_new_flex_group_blocks(sb, flex_gd);
1559 * We will always be modifying at least the superblock and GDT
1560 * blocks. If we are adding a group past the last current GDT block,
1561 * we will also modify the inode and the dindirect block. If we
1562 * are adding a group with superblock/GDT backups we will also
1563 * modify each of the reserved GDT dindirect blocks.
1565 credit = 3; /* sb, resize inode, resize inode dindirect */
1567 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1568 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1569 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1570 if (IS_ERR(handle)) {
1571 err = PTR_ERR(handle);
1575 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1576 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1581 group = flex_gd->groups[0].group;
1582 BUG_ON(group != sbi->s_groups_count);
1583 err = ext4_add_new_descs(handle, sb, group,
1584 resize_inode, flex_gd->count);
1588 err = ext4_setup_new_descs(handle, sb, flex_gd);
1592 ext4_update_super(sb, flex_gd);
1594 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1597 err2 = ext4_journal_stop(handle);
1602 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1603 int gdb_num_end = ((group + flex_gd->count - 1) /
1604 EXT4_DESC_PER_BLOCK(sb));
1605 int meta_bg = ext4_has_feature_meta_bg(sb) &&
1606 gdb_num >= le32_to_cpu(es->s_first_meta_bg);
1607 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
1608 ext4_group_first_block_no(sb, 0);
1610 update_backups(sb, ext4_group_first_block_no(sb, 0),
1611 (char *)es, sizeof(struct ext4_super_block), 0);
1612 for (; gdb_num <= gdb_num_end; gdb_num++) {
1613 struct buffer_head *gdb_bh;
1615 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1617 update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
1618 gdb_bh->b_data, gdb_bh->b_size, meta_bg);
1625 static int ext4_setup_next_flex_gd(struct super_block *sb,
1626 struct ext4_new_flex_group_data *flex_gd,
1627 ext4_fsblk_t n_blocks_count)
1629 struct ext4_sb_info *sbi = EXT4_SB(sb);
1630 struct ext4_super_block *es = sbi->s_es;
1631 struct ext4_new_group_data *group_data = flex_gd->groups;
1632 ext4_fsblk_t o_blocks_count;
1633 ext4_group_t n_group;
1635 ext4_group_t last_group;
1637 ext4_grpblk_t clusters_per_group;
1640 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1642 o_blocks_count = ext4_blocks_count(es);
1644 if (o_blocks_count == n_blocks_count)
1647 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1649 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1651 last_group = group | (flex_gd->resize_bg - 1);
1652 if (last_group > n_group)
1653 last_group = n_group;
1655 flex_gd->count = last_group - group + 1;
1657 for (i = 0; i < flex_gd->count; i++) {
1660 group_data[i].group = group + i;
1661 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1662 overhead = ext4_group_overhead_blocks(sb, group + i);
1663 group_data[i].mdata_blocks = overhead;
1664 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1665 if (ext4_has_group_desc_csum(sb)) {
1666 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1667 EXT4_BG_INODE_UNINIT;
1668 if (!test_opt(sb, INIT_INODE_TABLE))
1669 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1671 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1674 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1675 /* We need to initialize block bitmap of last group. */
1676 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1678 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1679 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1680 group_data[i - 1].free_clusters_count -= clusters_per_group -
1687 /* Add group descriptor data to an existing or new group descriptor block.
1688 * Ensure we handle all possible error conditions _before_ we start modifying
1689 * the filesystem, because we cannot abort the transaction and not have it
1690 * write the data to disk.
1692 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1693 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1695 * We only need to hold the superblock lock while we are actually adding
1696 * in the new group's counts to the superblock. Prior to that we have
1697 * not really "added" the group at all. We re-check that we are still
1698 * adding in the last group in case things have changed since verifying.
1700 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1702 struct ext4_new_flex_group_data flex_gd;
1703 struct ext4_sb_info *sbi = EXT4_SB(sb);
1704 struct ext4_super_block *es = sbi->s_es;
1705 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1706 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1707 struct inode *inode = NULL;
1712 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1714 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1715 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1719 if (ext4_blocks_count(es) + input->blocks_count <
1720 ext4_blocks_count(es)) {
1721 ext4_warning(sb, "blocks_count overflow");
1725 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1726 le32_to_cpu(es->s_inodes_count)) {
1727 ext4_warning(sb, "inodes_count overflow");
1731 if (reserved_gdb || gdb_off == 0) {
1732 if (!ext4_has_feature_resize_inode(sb) ||
1733 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1735 "No reserved GDT blocks, can't resize");
1738 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1739 if (IS_ERR(inode)) {
1740 ext4_warning(sb, "Error opening resize inode");
1741 return PTR_ERR(inode);
1746 err = verify_group_input(sb, input);
1750 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1754 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1759 flex_gd.groups = input;
1760 flex_gd.bg_flags = &bg_flags;
1761 err = ext4_flex_group_add(sb, inode, &flex_gd);
1765 } /* ext4_group_add */
1768 * extend a group without checking assuming that checking has been done.
1770 static int ext4_group_extend_no_check(struct super_block *sb,
1771 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1773 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1777 /* We will update the superblock, one block bitmap, and
1778 * one group descriptor via ext4_group_add_blocks().
1780 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1781 if (IS_ERR(handle)) {
1782 err = PTR_ERR(handle);
1783 ext4_warning(sb, "error %d on journal start", err);
1787 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1788 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1791 ext4_warning(sb, "error %d on journal write access", err);
1795 lock_buffer(EXT4_SB(sb)->s_sbh);
1796 ext4_blocks_count_set(es, o_blocks_count + add);
1797 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1798 ext4_superblock_csum_set(sb);
1799 unlock_buffer(EXT4_SB(sb)->s_sbh);
1800 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1801 o_blocks_count + add);
1802 /* We add the blocks to the bitmap and set the group need init bit */
1803 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1806 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1807 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1808 o_blocks_count + add);
1810 err2 = ext4_journal_stop(handle);
1815 if (test_opt(sb, DEBUG))
1816 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1817 "blocks\n", ext4_blocks_count(es));
1818 update_backups(sb, ext4_group_first_block_no(sb, 0),
1819 (char *)es, sizeof(struct ext4_super_block), 0);
1825 * Extend the filesystem to the new number of blocks specified. This entry
1826 * point is only used to extend the current filesystem to the end of the last
1827 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1828 * for emergencies (because it has no dependencies on reserved blocks).
1830 * If we _really_ wanted, we could use default values to call ext4_group_add()
1831 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1832 * GDT blocks are reserved to grow to the desired size.
1834 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1835 ext4_fsblk_t n_blocks_count)
1837 ext4_fsblk_t o_blocks_count;
1840 struct buffer_head *bh;
1843 o_blocks_count = ext4_blocks_count(es);
1845 if (test_opt(sb, DEBUG))
1846 ext4_msg(sb, KERN_DEBUG,
1847 "extending last group from %llu to %llu blocks",
1848 o_blocks_count, n_blocks_count);
1850 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1853 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1854 ext4_msg(sb, KERN_ERR,
1855 "filesystem too large to resize to %llu blocks safely",
1860 if (n_blocks_count < o_blocks_count) {
1861 ext4_warning(sb, "can't shrink FS - resize aborted");
1865 /* Handle the remaining blocks in the last group only. */
1866 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1869 ext4_warning(sb, "need to use ext2online to resize further");
1873 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1875 if (o_blocks_count + add < o_blocks_count) {
1876 ext4_warning(sb, "blocks_count overflow");
1880 if (o_blocks_count + add > n_blocks_count)
1881 add = n_blocks_count - o_blocks_count;
1883 if (o_blocks_count + add < n_blocks_count)
1884 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1885 o_blocks_count + add, add);
1887 /* See if the device is actually as big as what was requested */
1888 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1890 ext4_warning(sb, "can't read last block, resize aborted");
1895 return ext4_group_extend_no_check(sb, o_blocks_count, add);
1896 } /* ext4_group_extend */
1899 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1901 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1905 * Release the resize inode and drop the resize_inode feature if there
1906 * are no more reserved gdt blocks, and then convert the file system
1909 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1912 struct ext4_sb_info *sbi = EXT4_SB(sb);
1913 struct ext4_super_block *es = sbi->s_es;
1914 struct ext4_inode_info *ei = EXT4_I(inode);
1916 int i, ret, err = 0;
1919 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1921 if (es->s_reserved_gdt_blocks) {
1922 ext4_error(sb, "Unexpected non-zero "
1923 "s_reserved_gdt_blocks");
1927 /* Do a quick sanity check of the resize inode */
1928 if (inode->i_blocks != 1 << (inode->i_blkbits -
1929 (9 - sbi->s_cluster_bits)))
1930 goto invalid_resize_inode;
1931 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1932 if (i == EXT4_DIND_BLOCK) {
1936 goto invalid_resize_inode;
1939 goto invalid_resize_inode;
1941 credits += 3; /* block bitmap, bg descriptor, resize inode */
1944 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1946 return PTR_ERR(handle);
1948 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1949 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1954 lock_buffer(sbi->s_sbh);
1955 ext4_clear_feature_resize_inode(sb);
1956 ext4_set_feature_meta_bg(sb);
1957 sbi->s_es->s_first_meta_bg =
1958 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1959 ext4_superblock_csum_set(sb);
1960 unlock_buffer(sbi->s_sbh);
1962 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1964 ext4_std_error(sb, err);
1969 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1970 ext4_free_blocks(handle, inode, NULL, nr, 1,
1971 EXT4_FREE_BLOCKS_METADATA |
1972 EXT4_FREE_BLOCKS_FORGET);
1973 ei->i_data[EXT4_DIND_BLOCK] = 0;
1974 inode->i_blocks = 0;
1976 err = ext4_mark_inode_dirty(handle, inode);
1978 ext4_std_error(sb, err);
1982 ret = ext4_journal_stop(handle);
1983 return err ? err : ret;
1985 invalid_resize_inode:
1986 ext4_error(sb, "corrupted/inconsistent resize inode");
1991 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1993 * @sb: super block of the fs to be resized
1994 * @n_blocks_count: the number of blocks resides in the resized fs
1996 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1998 struct ext4_new_flex_group_data *flex_gd = NULL;
1999 struct ext4_sb_info *sbi = EXT4_SB(sb);
2000 struct ext4_super_block *es = sbi->s_es;
2001 struct buffer_head *bh;
2002 struct inode *resize_inode = NULL;
2003 ext4_grpblk_t add, offset;
2004 unsigned long n_desc_blocks;
2005 unsigned long o_desc_blocks;
2006 ext4_group_t o_group;
2007 ext4_group_t n_group;
2008 ext4_fsblk_t o_blocks_count;
2009 ext4_fsblk_t n_blocks_count_retry = 0;
2010 unsigned long last_update_time = 0;
2013 unsigned int flexbg_size = ext4_flex_bg_size(sbi);
2015 /* See if the device is actually as big as what was requested */
2016 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
2018 ext4_warning(sb, "can't read last block, resize aborted");
2024 * For bigalloc, trim the requested size to the nearest cluster
2025 * boundary to avoid creating an unusable filesystem. We do this
2026 * silently, instead of returning an error, to avoid breaking
2027 * callers that blindly resize the filesystem to the full size of
2028 * the underlying block device.
2030 if (ext4_has_feature_bigalloc(sb))
2031 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
2034 o_blocks_count = ext4_blocks_count(es);
2036 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
2037 "to %llu blocks", o_blocks_count, n_blocks_count);
2039 if (n_blocks_count < o_blocks_count) {
2040 /* On-line shrinking not supported */
2041 ext4_warning(sb, "can't shrink FS - resize aborted");
2045 if (n_blocks_count == o_blocks_count)
2046 /* Nothing need to do */
2049 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2050 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2051 ext4_warning(sb, "resize would cause inodes_count overflow");
2054 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2056 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2057 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2059 meta_bg = ext4_has_feature_meta_bg(sb);
2061 if (ext4_has_feature_resize_inode(sb)) {
2063 ext4_error(sb, "resize_inode and meta_bg enabled "
2067 if (n_desc_blocks > o_desc_blocks +
2068 le16_to_cpu(es->s_reserved_gdt_blocks)) {
2069 n_blocks_count_retry = n_blocks_count;
2070 n_desc_blocks = o_desc_blocks +
2071 le16_to_cpu(es->s_reserved_gdt_blocks);
2072 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2073 n_blocks_count = (ext4_fsblk_t)n_group *
2074 EXT4_BLOCKS_PER_GROUP(sb) +
2075 le32_to_cpu(es->s_first_data_block);
2076 n_group--; /* set to last group number */
2080 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2082 if (IS_ERR(resize_inode)) {
2083 ext4_warning(sb, "Error opening resize inode");
2084 return PTR_ERR(resize_inode);
2088 if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) {
2089 err = ext4_convert_meta_bg(sb, resize_inode);
2094 resize_inode = NULL;
2096 if (n_blocks_count_retry) {
2097 n_blocks_count = n_blocks_count_retry;
2098 n_blocks_count_retry = 0;
2104 * Make sure the last group has enough space so that it's
2105 * guaranteed to have enough space for all metadata blocks
2106 * that it might need to hold. (We might not need to store
2107 * the inode table blocks in the last block group, but there
2108 * will be cases where this might be needed.)
2110 if ((ext4_group_first_block_no(sb, n_group) +
2111 ext4_group_overhead_blocks(sb, n_group) + 2 +
2112 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2113 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2115 n_blocks_count_retry = 0;
2118 resize_inode = NULL;
2123 /* extend the last group */
2124 if (n_group == o_group)
2125 add = n_blocks_count - o_blocks_count;
2127 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2129 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2134 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2137 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2141 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2145 flex_gd = alloc_flex_gd(flexbg_size, o_group, n_group);
2146 if (flex_gd == NULL) {
2151 /* Add flex groups. Note that a regular group is a
2152 * flex group with 1 group.
2154 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
2155 if (time_is_before_jiffies(last_update_time + HZ * 10)) {
2156 if (last_update_time)
2157 ext4_msg(sb, KERN_INFO,
2158 "resized to %llu blocks",
2159 ext4_blocks_count(es));
2160 last_update_time = jiffies;
2162 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2164 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2169 if (!err && n_blocks_count_retry) {
2170 n_blocks_count = n_blocks_count_retry;
2171 n_blocks_count_retry = 0;
2172 free_flex_gd(flex_gd);
2176 resize_inode = NULL;
2183 free_flex_gd(flex_gd);
2184 if (resize_inode != NULL)
2187 ext4_warning(sb, "error (%d) occurred during "
2188 "file system resize", err);
2189 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2190 ext4_blocks_count(es));