1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <asm/unaligned.h>
21 #include <crypto/hash.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
27 #include "print-tree.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "dev-replace.h"
36 #include "compression.h"
37 #include "tree-checker.h"
38 #include "ref-verify.h"
39 #include "block-group.h"
41 #include "space-info.h"
45 #include "accessors.h"
46 #include "extent-tree.h"
47 #include "root-tree.h"
49 #include "uuid-tree.h"
50 #include "relocation.h"
54 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
55 BTRFS_HEADER_FLAG_RELOC |\
56 BTRFS_SUPER_FLAG_ERROR |\
57 BTRFS_SUPER_FLAG_SEEDING |\
58 BTRFS_SUPER_FLAG_METADUMP |\
59 BTRFS_SUPER_FLAG_METADUMP_V2)
61 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
62 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
64 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
66 if (fs_info->csum_shash)
67 crypto_free_shash(fs_info->csum_shash);
71 * Compute the csum of a btree block and store the result to provided buffer.
73 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
75 struct btrfs_fs_info *fs_info = buf->fs_info;
78 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
82 shash->tfm = fs_info->csum_shash;
83 crypto_shash_init(shash);
86 /* Pages are contiguous, handle them as a big one. */
88 first_page_part = fs_info->nodesize;
91 kaddr = folio_address(buf->folios[0]);
92 first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
93 num_pages = num_extent_pages(buf);
96 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
97 first_page_part - BTRFS_CSUM_SIZE);
100 * Multiple single-page folios case would reach here.
102 * nodesize <= PAGE_SIZE and large folio all handled by above
103 * crypto_shash_update() already.
105 for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
106 kaddr = folio_address(buf->folios[i]);
107 crypto_shash_update(shash, kaddr, PAGE_SIZE);
109 memset(result, 0, BTRFS_CSUM_SIZE);
110 crypto_shash_final(shash, result);
114 * we can't consider a given block up to date unless the transid of the
115 * block matches the transid in the parent node's pointer. This is how we
116 * detect blocks that either didn't get written at all or got written
117 * in the wrong place.
119 int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
121 if (!extent_buffer_uptodate(eb))
124 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
130 if (!extent_buffer_uptodate(eb) ||
131 btrfs_header_generation(eb) != parent_transid) {
132 btrfs_err_rl(eb->fs_info,
133 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
134 eb->start, eb->read_mirror,
135 parent_transid, btrfs_header_generation(eb));
136 clear_extent_buffer_uptodate(eb);
142 static bool btrfs_supported_super_csum(u16 csum_type)
145 case BTRFS_CSUM_TYPE_CRC32:
146 case BTRFS_CSUM_TYPE_XXHASH:
147 case BTRFS_CSUM_TYPE_SHA256:
148 case BTRFS_CSUM_TYPE_BLAKE2:
156 * Return 0 if the superblock checksum type matches the checksum value of that
157 * algorithm. Pass the raw disk superblock data.
159 int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
160 const struct btrfs_super_block *disk_sb)
162 char result[BTRFS_CSUM_SIZE];
163 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
165 shash->tfm = fs_info->csum_shash;
168 * The super_block structure does not span the whole
169 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
170 * filled with zeros and is included in the checksum.
172 crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
173 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
175 if (memcmp(disk_sb->csum, result, fs_info->csum_size))
181 static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
184 struct btrfs_fs_info *fs_info = eb->fs_info;
185 int num_folios = num_extent_folios(eb);
188 if (sb_rdonly(fs_info->sb))
191 for (int i = 0; i < num_folios; i++) {
192 struct folio *folio = eb->folios[i];
193 u64 start = max_t(u64, eb->start, folio_pos(folio));
194 u64 end = min_t(u64, eb->start + eb->len,
195 folio_pos(folio) + eb->folio_size);
196 u32 len = end - start;
198 ret = btrfs_repair_io_failure(fs_info, 0, start, len,
199 start, folio, offset_in_folio(folio, start),
209 * helper to read a given tree block, doing retries as required when
210 * the checksums don't match and we have alternate mirrors to try.
212 * @check: expected tree parentness check, see the comments of the
213 * structure for details.
215 int btrfs_read_extent_buffer(struct extent_buffer *eb,
216 const struct btrfs_tree_parent_check *check)
218 struct btrfs_fs_info *fs_info = eb->fs_info;
223 int failed_mirror = 0;
228 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
229 ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
233 num_copies = btrfs_num_copies(fs_info,
238 if (!failed_mirror) {
240 failed_mirror = eb->read_mirror;
244 if (mirror_num == failed_mirror)
247 if (mirror_num > num_copies)
251 if (failed && !ret && failed_mirror)
252 btrfs_repair_eb_io_failure(eb, failed_mirror);
258 * Checksum a dirty tree block before IO.
260 blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
262 struct extent_buffer *eb = bbio->private;
263 struct btrfs_fs_info *fs_info = eb->fs_info;
264 u64 found_start = btrfs_header_bytenr(eb);
266 u8 result[BTRFS_CSUM_SIZE];
269 /* Btree blocks are always contiguous on disk. */
270 if (WARN_ON_ONCE(bbio->file_offset != eb->start))
271 return BLK_STS_IOERR;
272 if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
273 return BLK_STS_IOERR;
276 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
277 * checksum it but zero-out its content. This is done to preserve
278 * ordering of I/O without unnecessarily writing out data.
280 if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
281 memzero_extent_buffer(eb, 0, eb->len);
285 if (WARN_ON_ONCE(found_start != eb->start))
286 return BLK_STS_IOERR;
287 if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
288 eb->start, eb->len)))
289 return BLK_STS_IOERR;
291 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
292 offsetof(struct btrfs_header, fsid),
293 BTRFS_FSID_SIZE) == 0);
294 csum_tree_block(eb, result);
296 if (btrfs_header_level(eb))
297 ret = btrfs_check_node(eb);
299 ret = btrfs_check_leaf(eb);
305 * Also check the generation, the eb reached here must be newer than
306 * last committed. Or something seriously wrong happened.
308 last_trans = btrfs_get_last_trans_committed(fs_info);
309 if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
312 "block=%llu bad generation, have %llu expect > %llu",
313 eb->start, btrfs_header_generation(eb), last_trans);
316 write_extent_buffer(eb, result, 0, fs_info->csum_size);
320 btrfs_print_tree(eb, 0);
321 btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
324 * Be noisy if this is an extent buffer from a log tree. We don't abort
325 * a transaction in case there's a bad log tree extent buffer, we just
326 * fallback to a transaction commit. Still we want to know when there is
327 * a bad log tree extent buffer, as that may signal a bug somewhere.
329 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
330 btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
331 return errno_to_blk_status(ret);
334 static bool check_tree_block_fsid(struct extent_buffer *eb)
336 struct btrfs_fs_info *fs_info = eb->fs_info;
337 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
338 u8 fsid[BTRFS_FSID_SIZE];
340 read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
344 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
345 * This is then overwritten by metadata_uuid if it is present in the
346 * device_list_add(). The same true for a seed device as well. So use of
347 * fs_devices::metadata_uuid is appropriate here.
349 if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
352 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
353 if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
359 /* Do basic extent buffer checks at read time */
360 int btrfs_validate_extent_buffer(struct extent_buffer *eb,
361 const struct btrfs_tree_parent_check *check)
363 struct btrfs_fs_info *fs_info = eb->fs_info;
365 const u32 csum_size = fs_info->csum_size;
367 u8 result[BTRFS_CSUM_SIZE];
368 const u8 *header_csum;
370 const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
374 found_start = btrfs_header_bytenr(eb);
375 if (found_start != eb->start) {
376 btrfs_err_rl(fs_info,
377 "bad tree block start, mirror %u want %llu have %llu",
378 eb->read_mirror, eb->start, found_start);
382 if (check_tree_block_fsid(eb)) {
383 btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
384 eb->start, eb->read_mirror);
388 found_level = btrfs_header_level(eb);
389 if (found_level >= BTRFS_MAX_LEVEL) {
391 "bad tree block level, mirror %u level %d on logical %llu",
392 eb->read_mirror, btrfs_header_level(eb), eb->start);
397 csum_tree_block(eb, result);
398 header_csum = folio_address(eb->folios[0]) +
399 get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
401 if (memcmp(result, header_csum, csum_size) != 0) {
402 btrfs_warn_rl(fs_info,
403 "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
404 eb->start, eb->read_mirror,
405 CSUM_FMT_VALUE(csum_size, header_csum),
406 CSUM_FMT_VALUE(csum_size, result),
407 btrfs_header_level(eb),
408 ignore_csum ? ", ignored" : "");
415 if (found_level != check->level) {
417 "level verify failed on logical %llu mirror %u wanted %u found %u",
418 eb->start, eb->read_mirror, check->level, found_level);
422 if (unlikely(check->transid &&
423 btrfs_header_generation(eb) != check->transid)) {
424 btrfs_err_rl(eb->fs_info,
425 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
426 eb->start, eb->read_mirror, check->transid,
427 btrfs_header_generation(eb));
431 if (check->has_first_key) {
432 const struct btrfs_key *expect_key = &check->first_key;
433 struct btrfs_key found_key;
436 btrfs_node_key_to_cpu(eb, &found_key, 0);
438 btrfs_item_key_to_cpu(eb, &found_key, 0);
439 if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
441 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
442 eb->start, check->transid,
443 expect_key->objectid,
444 expect_key->type, expect_key->offset,
445 found_key.objectid, found_key.type,
451 if (check->owner_root) {
452 ret = btrfs_check_eb_owner(eb, check->owner_root);
458 * If this is a leaf block and it is corrupt, set the corrupt bit so
459 * that we don't try and read the other copies of this block, just
462 if (found_level == 0 && btrfs_check_leaf(eb)) {
463 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
467 if (found_level > 0 && btrfs_check_node(eb))
472 "read time tree block corruption detected on logical %llu mirror %u",
473 eb->start, eb->read_mirror);
478 #ifdef CONFIG_MIGRATION
479 static int btree_migrate_folio(struct address_space *mapping,
480 struct folio *dst, struct folio *src, enum migrate_mode mode)
483 * we can't safely write a btree page from here,
484 * we haven't done the locking hook
486 if (folio_test_dirty(src))
489 * Buffers may be managed in a filesystem specific way.
490 * We must have no buffers or drop them.
492 if (folio_get_private(src) &&
493 !filemap_release_folio(src, GFP_KERNEL))
495 return migrate_folio(mapping, dst, src, mode);
498 #define btree_migrate_folio NULL
501 static int btree_writepages(struct address_space *mapping,
502 struct writeback_control *wbc)
506 if (wbc->sync_mode == WB_SYNC_NONE) {
507 struct btrfs_fs_info *fs_info;
509 if (wbc->for_kupdate)
512 fs_info = inode_to_fs_info(mapping->host);
513 /* this is a bit racy, but that's ok */
514 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
515 BTRFS_DIRTY_METADATA_THRESH,
516 fs_info->dirty_metadata_batch);
520 return btree_write_cache_pages(mapping, wbc);
523 static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
525 if (folio_test_writeback(folio) || folio_test_dirty(folio))
528 return try_release_extent_buffer(folio);
531 static void btree_invalidate_folio(struct folio *folio, size_t offset,
534 struct extent_io_tree *tree;
536 tree = &folio_to_inode(folio)->io_tree;
537 extent_invalidate_folio(tree, folio, offset);
538 btree_release_folio(folio, GFP_NOFS);
539 if (folio_get_private(folio)) {
540 btrfs_warn(folio_to_fs_info(folio),
541 "folio private not zero on folio %llu",
542 (unsigned long long)folio_pos(folio));
543 folio_detach_private(folio);
548 static bool btree_dirty_folio(struct address_space *mapping,
551 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
552 struct btrfs_subpage_info *spi = fs_info->subpage_info;
553 struct btrfs_subpage *subpage;
554 struct extent_buffer *eb;
556 u64 page_start = folio_pos(folio);
558 if (fs_info->sectorsize == PAGE_SIZE) {
559 eb = folio_get_private(folio);
561 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
562 BUG_ON(!atomic_read(&eb->refs));
563 btrfs_assert_tree_write_locked(eb);
564 return filemap_dirty_folio(mapping, folio);
568 subpage = folio_get_private(folio);
570 for (cur_bit = spi->dirty_offset;
571 cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
576 spin_lock_irqsave(&subpage->lock, flags);
577 if (!test_bit(cur_bit, subpage->bitmaps)) {
578 spin_unlock_irqrestore(&subpage->lock, flags);
581 spin_unlock_irqrestore(&subpage->lock, flags);
582 cur = page_start + cur_bit * fs_info->sectorsize;
584 eb = find_extent_buffer(fs_info, cur);
586 ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
587 ASSERT(atomic_read(&eb->refs));
588 btrfs_assert_tree_write_locked(eb);
589 free_extent_buffer(eb);
591 cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
593 return filemap_dirty_folio(mapping, folio);
596 #define btree_dirty_folio filemap_dirty_folio
599 static const struct address_space_operations btree_aops = {
600 .writepages = btree_writepages,
601 .release_folio = btree_release_folio,
602 .invalidate_folio = btree_invalidate_folio,
603 .migrate_folio = btree_migrate_folio,
604 .dirty_folio = btree_dirty_folio,
607 struct extent_buffer *btrfs_find_create_tree_block(
608 struct btrfs_fs_info *fs_info,
609 u64 bytenr, u64 owner_root,
612 if (btrfs_is_testing(fs_info))
613 return alloc_test_extent_buffer(fs_info, bytenr);
614 return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
618 * Read tree block at logical address @bytenr and do variant basic but critical
621 * @check: expected tree parentness check, see comments of the
622 * structure for details.
624 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
625 struct btrfs_tree_parent_check *check)
627 struct extent_buffer *buf = NULL;
632 buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
637 ret = btrfs_read_extent_buffer(buf, check);
639 free_extent_buffer_stale(buf);
646 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
649 bool dummy = btrfs_is_testing(fs_info);
651 memset(&root->root_key, 0, sizeof(root->root_key));
652 memset(&root->root_item, 0, sizeof(root->root_item));
653 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
654 root->fs_info = fs_info;
655 root->root_key.objectid = objectid;
657 root->commit_root = NULL;
659 RB_CLEAR_NODE(&root->rb_node);
661 btrfs_set_root_last_trans(root, 0);
662 root->free_objectid = 0;
663 root->nr_delalloc_inodes = 0;
664 root->nr_ordered_extents = 0;
665 xa_init(&root->inodes);
666 xa_init(&root->delayed_nodes);
668 btrfs_init_root_block_rsv(root);
670 INIT_LIST_HEAD(&root->dirty_list);
671 INIT_LIST_HEAD(&root->root_list);
672 INIT_LIST_HEAD(&root->delalloc_inodes);
673 INIT_LIST_HEAD(&root->delalloc_root);
674 INIT_LIST_HEAD(&root->ordered_extents);
675 INIT_LIST_HEAD(&root->ordered_root);
676 INIT_LIST_HEAD(&root->reloc_dirty_list);
677 spin_lock_init(&root->delalloc_lock);
678 spin_lock_init(&root->ordered_extent_lock);
679 spin_lock_init(&root->accounting_lock);
680 spin_lock_init(&root->qgroup_meta_rsv_lock);
681 mutex_init(&root->objectid_mutex);
682 mutex_init(&root->log_mutex);
683 mutex_init(&root->ordered_extent_mutex);
684 mutex_init(&root->delalloc_mutex);
685 init_waitqueue_head(&root->qgroup_flush_wait);
686 init_waitqueue_head(&root->log_writer_wait);
687 init_waitqueue_head(&root->log_commit_wait[0]);
688 init_waitqueue_head(&root->log_commit_wait[1]);
689 INIT_LIST_HEAD(&root->log_ctxs[0]);
690 INIT_LIST_HEAD(&root->log_ctxs[1]);
691 atomic_set(&root->log_commit[0], 0);
692 atomic_set(&root->log_commit[1], 0);
693 atomic_set(&root->log_writers, 0);
694 atomic_set(&root->log_batch, 0);
695 refcount_set(&root->refs, 1);
696 atomic_set(&root->snapshot_force_cow, 0);
697 atomic_set(&root->nr_swapfiles, 0);
698 btrfs_set_root_log_transid(root, 0);
699 root->log_transid_committed = -1;
700 btrfs_set_root_last_log_commit(root, 0);
703 extent_io_tree_init(fs_info, &root->dirty_log_pages,
704 IO_TREE_ROOT_DIRTY_LOG_PAGES);
705 extent_io_tree_init(fs_info, &root->log_csum_range,
706 IO_TREE_LOG_CSUM_RANGE);
709 spin_lock_init(&root->root_item_lock);
710 btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
711 #ifdef CONFIG_BTRFS_DEBUG
712 INIT_LIST_HEAD(&root->leak_list);
713 spin_lock(&fs_info->fs_roots_radix_lock);
714 list_add_tail(&root->leak_list, &fs_info->allocated_roots);
715 spin_unlock(&fs_info->fs_roots_radix_lock);
719 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
720 u64 objectid, gfp_t flags)
722 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
724 __setup_root(root, fs_info, objectid);
728 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
729 /* Should only be used by the testing infrastructure */
730 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
732 struct btrfs_root *root;
735 return ERR_PTR(-EINVAL);
737 root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
739 return ERR_PTR(-ENOMEM);
741 /* We don't use the stripesize in selftest, set it as sectorsize */
742 root->alloc_bytenr = 0;
748 static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
750 const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
751 const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
753 return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
756 static int global_root_key_cmp(const void *k, const struct rb_node *node)
758 const struct btrfs_key *key = k;
759 const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
761 return btrfs_comp_cpu_keys(key, &root->root_key);
764 int btrfs_global_root_insert(struct btrfs_root *root)
766 struct btrfs_fs_info *fs_info = root->fs_info;
770 write_lock(&fs_info->global_root_lock);
771 tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
772 write_unlock(&fs_info->global_root_lock);
776 btrfs_warn(fs_info, "global root %llu %llu already exists",
777 btrfs_root_id(root), root->root_key.offset);
782 void btrfs_global_root_delete(struct btrfs_root *root)
784 struct btrfs_fs_info *fs_info = root->fs_info;
786 write_lock(&fs_info->global_root_lock);
787 rb_erase(&root->rb_node, &fs_info->global_root_tree);
788 write_unlock(&fs_info->global_root_lock);
791 struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
792 struct btrfs_key *key)
794 struct rb_node *node;
795 struct btrfs_root *root = NULL;
797 read_lock(&fs_info->global_root_lock);
798 node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
800 root = container_of(node, struct btrfs_root, rb_node);
801 read_unlock(&fs_info->global_root_lock);
806 static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
808 struct btrfs_block_group *block_group;
811 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
815 block_group = btrfs_lookup_block_group(fs_info, bytenr);
817 block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
821 ret = block_group->global_root_id;
822 btrfs_put_block_group(block_group);
827 struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
829 struct btrfs_key key = {
830 .objectid = BTRFS_CSUM_TREE_OBJECTID,
831 .type = BTRFS_ROOT_ITEM_KEY,
832 .offset = btrfs_global_root_id(fs_info, bytenr),
835 return btrfs_global_root(fs_info, &key);
838 struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
840 struct btrfs_key key = {
841 .objectid = BTRFS_EXTENT_TREE_OBJECTID,
842 .type = BTRFS_ROOT_ITEM_KEY,
843 .offset = btrfs_global_root_id(fs_info, bytenr),
846 return btrfs_global_root(fs_info, &key);
849 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
852 struct btrfs_fs_info *fs_info = trans->fs_info;
853 struct extent_buffer *leaf;
854 struct btrfs_root *tree_root = fs_info->tree_root;
855 struct btrfs_root *root;
856 struct btrfs_key key;
857 unsigned int nofs_flag;
861 * We're holding a transaction handle, so use a NOFS memory allocation
862 * context to avoid deadlock if reclaim happens.
864 nofs_flag = memalloc_nofs_save();
865 root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
866 memalloc_nofs_restore(nofs_flag);
868 return ERR_PTR(-ENOMEM);
870 root->root_key.objectid = objectid;
871 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
872 root->root_key.offset = 0;
874 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
875 0, BTRFS_NESTING_NORMAL);
883 btrfs_mark_buffer_dirty(trans, leaf);
885 root->commit_root = btrfs_root_node(root);
886 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
888 btrfs_set_root_flags(&root->root_item, 0);
889 btrfs_set_root_limit(&root->root_item, 0);
890 btrfs_set_root_bytenr(&root->root_item, leaf->start);
891 btrfs_set_root_generation(&root->root_item, trans->transid);
892 btrfs_set_root_level(&root->root_item, 0);
893 btrfs_set_root_refs(&root->root_item, 1);
894 btrfs_set_root_used(&root->root_item, leaf->len);
895 btrfs_set_root_last_snapshot(&root->root_item, 0);
896 btrfs_set_root_dirid(&root->root_item, 0);
897 if (is_fstree(objectid))
898 generate_random_guid(root->root_item.uuid);
900 export_guid(root->root_item.uuid, &guid_null);
901 btrfs_set_root_drop_level(&root->root_item, 0);
903 btrfs_tree_unlock(leaf);
905 key.objectid = objectid;
906 key.type = BTRFS_ROOT_ITEM_KEY;
908 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
915 btrfs_put_root(root);
920 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
921 struct btrfs_fs_info *fs_info)
923 struct btrfs_root *root;
925 root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
927 return ERR_PTR(-ENOMEM);
929 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
930 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
931 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
936 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
937 struct btrfs_root *root)
939 struct extent_buffer *leaf;
942 * DON'T set SHAREABLE bit for log trees.
944 * Log trees are not exposed to user space thus can't be snapshotted,
945 * and they go away before a real commit is actually done.
947 * They do store pointers to file data extents, and those reference
948 * counts still get updated (along with back refs to the log tree).
951 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
952 NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
954 return PTR_ERR(leaf);
958 btrfs_mark_buffer_dirty(trans, root->node);
959 btrfs_tree_unlock(root->node);
964 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
965 struct btrfs_fs_info *fs_info)
967 struct btrfs_root *log_root;
969 log_root = alloc_log_tree(trans, fs_info);
970 if (IS_ERR(log_root))
971 return PTR_ERR(log_root);
973 if (!btrfs_is_zoned(fs_info)) {
974 int ret = btrfs_alloc_log_tree_node(trans, log_root);
977 btrfs_put_root(log_root);
982 WARN_ON(fs_info->log_root_tree);
983 fs_info->log_root_tree = log_root;
987 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
988 struct btrfs_root *root)
990 struct btrfs_fs_info *fs_info = root->fs_info;
991 struct btrfs_root *log_root;
992 struct btrfs_inode_item *inode_item;
995 log_root = alloc_log_tree(trans, fs_info);
996 if (IS_ERR(log_root))
997 return PTR_ERR(log_root);
999 ret = btrfs_alloc_log_tree_node(trans, log_root);
1001 btrfs_put_root(log_root);
1005 btrfs_set_root_last_trans(log_root, trans->transid);
1006 log_root->root_key.offset = btrfs_root_id(root);
1008 inode_item = &log_root->root_item.inode;
1009 btrfs_set_stack_inode_generation(inode_item, 1);
1010 btrfs_set_stack_inode_size(inode_item, 3);
1011 btrfs_set_stack_inode_nlink(inode_item, 1);
1012 btrfs_set_stack_inode_nbytes(inode_item,
1014 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1016 btrfs_set_root_node(&log_root->root_item, log_root->node);
1018 WARN_ON(root->log_root);
1019 root->log_root = log_root;
1020 btrfs_set_root_log_transid(root, 0);
1021 root->log_transid_committed = -1;
1022 btrfs_set_root_last_log_commit(root, 0);
1026 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1027 struct btrfs_path *path,
1028 const struct btrfs_key *key)
1030 struct btrfs_root *root;
1031 struct btrfs_tree_parent_check check = { 0 };
1032 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1037 root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1039 return ERR_PTR(-ENOMEM);
1041 ret = btrfs_find_root(tree_root, key, path,
1042 &root->root_item, &root->root_key);
1049 generation = btrfs_root_generation(&root->root_item);
1050 level = btrfs_root_level(&root->root_item);
1051 check.level = level;
1052 check.transid = generation;
1053 check.owner_root = key->objectid;
1054 root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1056 if (IS_ERR(root->node)) {
1057 ret = PTR_ERR(root->node);
1061 if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1067 * For real fs, and not log/reloc trees, root owner must
1068 * match its root node owner
1070 if (!btrfs_is_testing(fs_info) &&
1071 btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1072 btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
1073 btrfs_root_id(root) != btrfs_header_owner(root->node)) {
1075 "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1076 btrfs_root_id(root), root->node->start,
1077 btrfs_header_owner(root->node),
1078 btrfs_root_id(root));
1082 root->commit_root = btrfs_root_node(root);
1085 btrfs_put_root(root);
1086 return ERR_PTR(ret);
1089 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1090 const struct btrfs_key *key)
1092 struct btrfs_root *root;
1093 struct btrfs_path *path;
1095 path = btrfs_alloc_path();
1097 return ERR_PTR(-ENOMEM);
1098 root = read_tree_root_path(tree_root, path, key);
1099 btrfs_free_path(path);
1105 * Initialize subvolume root in-memory structure
1107 * @anon_dev: anonymous device to attach to the root, if zero, allocate new
1109 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1113 btrfs_drew_lock_init(&root->snapshot_lock);
1115 if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1116 !btrfs_is_data_reloc_root(root) &&
1117 is_fstree(btrfs_root_id(root))) {
1118 set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1119 btrfs_check_and_init_root_item(&root->root_item);
1123 * Don't assign anonymous block device to roots that are not exposed to
1124 * userspace, the id pool is limited to 1M
1126 if (is_fstree(btrfs_root_id(root)) &&
1127 btrfs_root_refs(&root->root_item) > 0) {
1129 ret = get_anon_bdev(&root->anon_dev);
1133 root->anon_dev = anon_dev;
1137 mutex_lock(&root->objectid_mutex);
1138 ret = btrfs_init_root_free_objectid(root);
1140 mutex_unlock(&root->objectid_mutex);
1144 ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1146 mutex_unlock(&root->objectid_mutex);
1150 /* The caller is responsible to call btrfs_free_fs_root */
1154 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1157 struct btrfs_root *root;
1159 spin_lock(&fs_info->fs_roots_radix_lock);
1160 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1161 (unsigned long)root_id);
1162 root = btrfs_grab_root(root);
1163 spin_unlock(&fs_info->fs_roots_radix_lock);
1167 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1170 struct btrfs_key key = {
1171 .objectid = objectid,
1172 .type = BTRFS_ROOT_ITEM_KEY,
1177 case BTRFS_ROOT_TREE_OBJECTID:
1178 return btrfs_grab_root(fs_info->tree_root);
1179 case BTRFS_EXTENT_TREE_OBJECTID:
1180 return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1181 case BTRFS_CHUNK_TREE_OBJECTID:
1182 return btrfs_grab_root(fs_info->chunk_root);
1183 case BTRFS_DEV_TREE_OBJECTID:
1184 return btrfs_grab_root(fs_info->dev_root);
1185 case BTRFS_CSUM_TREE_OBJECTID:
1186 return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1187 case BTRFS_QUOTA_TREE_OBJECTID:
1188 return btrfs_grab_root(fs_info->quota_root);
1189 case BTRFS_UUID_TREE_OBJECTID:
1190 return btrfs_grab_root(fs_info->uuid_root);
1191 case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1192 return btrfs_grab_root(fs_info->block_group_root);
1193 case BTRFS_FREE_SPACE_TREE_OBJECTID:
1194 return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1195 case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1196 return btrfs_grab_root(fs_info->stripe_root);
1202 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1203 struct btrfs_root *root)
1207 ret = radix_tree_preload(GFP_NOFS);
1211 spin_lock(&fs_info->fs_roots_radix_lock);
1212 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1213 (unsigned long)btrfs_root_id(root),
1216 btrfs_grab_root(root);
1217 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1219 spin_unlock(&fs_info->fs_roots_radix_lock);
1220 radix_tree_preload_end();
1225 void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
1227 #ifdef CONFIG_BTRFS_DEBUG
1228 struct btrfs_root *root;
1230 while (!list_empty(&fs_info->allocated_roots)) {
1231 char buf[BTRFS_ROOT_NAME_BUF_LEN];
1233 root = list_first_entry(&fs_info->allocated_roots,
1234 struct btrfs_root, leak_list);
1235 btrfs_err(fs_info, "leaked root %s refcount %d",
1236 btrfs_root_name(&root->root_key, buf),
1237 refcount_read(&root->refs));
1239 while (refcount_read(&root->refs) > 1)
1240 btrfs_put_root(root);
1241 btrfs_put_root(root);
1246 static void free_global_roots(struct btrfs_fs_info *fs_info)
1248 struct btrfs_root *root;
1249 struct rb_node *node;
1251 while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1252 root = rb_entry(node, struct btrfs_root, rb_node);
1253 rb_erase(&root->rb_node, &fs_info->global_root_tree);
1254 btrfs_put_root(root);
1258 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1260 struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
1262 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1263 percpu_counter_destroy(&fs_info->delalloc_bytes);
1264 percpu_counter_destroy(&fs_info->ordered_bytes);
1265 if (percpu_counter_initialized(em_counter))
1266 ASSERT(percpu_counter_sum_positive(em_counter) == 0);
1267 percpu_counter_destroy(em_counter);
1268 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1269 btrfs_free_csum_hash(fs_info);
1270 btrfs_free_stripe_hash_table(fs_info);
1271 btrfs_free_ref_cache(fs_info);
1272 kfree(fs_info->balance_ctl);
1273 kfree(fs_info->delayed_root);
1274 free_global_roots(fs_info);
1275 btrfs_put_root(fs_info->tree_root);
1276 btrfs_put_root(fs_info->chunk_root);
1277 btrfs_put_root(fs_info->dev_root);
1278 btrfs_put_root(fs_info->quota_root);
1279 btrfs_put_root(fs_info->uuid_root);
1280 btrfs_put_root(fs_info->fs_root);
1281 btrfs_put_root(fs_info->data_reloc_root);
1282 btrfs_put_root(fs_info->block_group_root);
1283 btrfs_put_root(fs_info->stripe_root);
1284 btrfs_check_leaked_roots(fs_info);
1285 btrfs_extent_buffer_leak_debug_check(fs_info);
1286 kfree(fs_info->super_copy);
1287 kfree(fs_info->super_for_commit);
1293 * Get an in-memory reference of a root structure.
1295 * For essential trees like root/extent tree, we grab it from fs_info directly.
1296 * For subvolume trees, we check the cached filesystem roots first. If not
1297 * found, then read it from disk and add it to cached fs roots.
1299 * Caller should release the root by calling btrfs_put_root() after the usage.
1301 * NOTE: Reloc and log trees can't be read by this function as they share the
1302 * same root objectid.
1304 * @objectid: root id
1305 * @anon_dev: preallocated anonymous block device number for new roots,
1306 * pass NULL for a new allocation.
1307 * @check_ref: whether to check root item references, If true, return -ENOENT
1310 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1311 u64 objectid, dev_t *anon_dev,
1314 struct btrfs_root *root;
1315 struct btrfs_path *path;
1316 struct btrfs_key key;
1319 root = btrfs_get_global_root(fs_info, objectid);
1324 * If we're called for non-subvolume trees, and above function didn't
1325 * find one, do not try to read it from disk.
1327 * This is namely for free-space-tree and quota tree, which can change
1328 * at runtime and should only be grabbed from fs_info.
1330 if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1331 return ERR_PTR(-ENOENT);
1333 root = btrfs_lookup_fs_root(fs_info, objectid);
1336 * Some other caller may have read out the newly inserted
1337 * subvolume already (for things like backref walk etc). Not
1338 * that common but still possible. In that case, we just need
1339 * to free the anon_dev.
1341 if (unlikely(anon_dev && *anon_dev)) {
1342 free_anon_bdev(*anon_dev);
1346 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1347 btrfs_put_root(root);
1348 return ERR_PTR(-ENOENT);
1353 key.objectid = objectid;
1354 key.type = BTRFS_ROOT_ITEM_KEY;
1355 key.offset = (u64)-1;
1356 root = btrfs_read_tree_root(fs_info->tree_root, &key);
1360 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1365 ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1369 path = btrfs_alloc_path();
1374 key.objectid = BTRFS_ORPHAN_OBJECTID;
1375 key.type = BTRFS_ORPHAN_ITEM_KEY;
1376 key.offset = objectid;
1378 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1379 btrfs_free_path(path);
1383 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1385 ret = btrfs_insert_fs_root(fs_info, root);
1387 if (ret == -EEXIST) {
1388 btrfs_put_root(root);
1396 * If our caller provided us an anonymous device, then it's his
1397 * responsibility to free it in case we fail. So we have to set our
1398 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1399 * and once again by our caller.
1401 if (anon_dev && *anon_dev)
1403 btrfs_put_root(root);
1404 return ERR_PTR(ret);
1408 * Get in-memory reference of a root structure
1410 * @objectid: tree objectid
1411 * @check_ref: if set, verify that the tree exists and the item has at least
1414 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1415 u64 objectid, bool check_ref)
1417 return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1421 * Get in-memory reference of a root structure, created as new, optionally pass
1422 * the anonymous block device id
1424 * @objectid: tree objectid
1425 * @anon_dev: if NULL, allocate a new anonymous block device or use the
1426 * parameter value if not NULL
1428 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1429 u64 objectid, dev_t *anon_dev)
1431 return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1435 * Return a root for the given objectid.
1437 * @fs_info: the fs_info
1438 * @objectid: the objectid we need to lookup
1440 * This is exclusively used for backref walking, and exists specifically because
1441 * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref
1442 * creation time, which means we may have to read the tree_root in order to look
1443 * up a fs root that is not in memory. If the root is not in memory we will
1444 * read the tree root commit root and look up the fs root from there. This is a
1445 * temporary root, it will not be inserted into the radix tree as it doesn't
1446 * have the most uptodate information, it'll simply be discarded once the
1447 * backref code is finished using the root.
1449 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1450 struct btrfs_path *path,
1453 struct btrfs_root *root;
1454 struct btrfs_key key;
1456 ASSERT(path->search_commit_root && path->skip_locking);
1459 * This can return -ENOENT if we ask for a root that doesn't exist, but
1460 * since this is called via the backref walking code we won't be looking
1461 * up a root that doesn't exist, unless there's corruption. So if root
1462 * != NULL just return it.
1464 root = btrfs_get_global_root(fs_info, objectid);
1468 root = btrfs_lookup_fs_root(fs_info, objectid);
1472 key.objectid = objectid;
1473 key.type = BTRFS_ROOT_ITEM_KEY;
1474 key.offset = (u64)-1;
1475 root = read_tree_root_path(fs_info->tree_root, path, &key);
1476 btrfs_release_path(path);
1481 static int cleaner_kthread(void *arg)
1483 struct btrfs_fs_info *fs_info = arg;
1489 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1491 /* Make the cleaner go to sleep early. */
1492 if (btrfs_need_cleaner_sleep(fs_info))
1496 * Do not do anything if we might cause open_ctree() to block
1497 * before we have finished mounting the filesystem.
1499 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1502 if (!mutex_trylock(&fs_info->cleaner_mutex))
1506 * Avoid the problem that we change the status of the fs
1507 * during the above check and trylock.
1509 if (btrfs_need_cleaner_sleep(fs_info)) {
1510 mutex_unlock(&fs_info->cleaner_mutex);
1514 if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1515 btrfs_sysfs_feature_update(fs_info);
1517 btrfs_run_delayed_iputs(fs_info);
1519 again = btrfs_clean_one_deleted_snapshot(fs_info);
1520 mutex_unlock(&fs_info->cleaner_mutex);
1523 * The defragger has dealt with the R/O remount and umount,
1524 * needn't do anything special here.
1526 btrfs_run_defrag_inodes(fs_info);
1529 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1530 * with relocation (btrfs_relocate_chunk) and relocation
1531 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1532 * after acquiring fs_info->reclaim_bgs_lock. So we
1533 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1534 * unused block groups.
1536 btrfs_delete_unused_bgs(fs_info);
1539 * Reclaim block groups in the reclaim_bgs list after we deleted
1540 * all unused block_groups. This possibly gives us some more free
1543 btrfs_reclaim_bgs(fs_info);
1545 clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1546 if (kthread_should_park())
1548 if (kthread_should_stop())
1551 set_current_state(TASK_INTERRUPTIBLE);
1553 __set_current_state(TASK_RUNNING);
1558 static int transaction_kthread(void *arg)
1560 struct btrfs_root *root = arg;
1561 struct btrfs_fs_info *fs_info = root->fs_info;
1562 struct btrfs_trans_handle *trans;
1563 struct btrfs_transaction *cur;
1566 unsigned long delay;
1570 cannot_commit = false;
1571 delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
1572 mutex_lock(&fs_info->transaction_kthread_mutex);
1574 spin_lock(&fs_info->trans_lock);
1575 cur = fs_info->running_transaction;
1577 spin_unlock(&fs_info->trans_lock);
1581 delta = ktime_get_seconds() - cur->start_time;
1582 if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1583 cur->state < TRANS_STATE_COMMIT_PREP &&
1584 delta < fs_info->commit_interval) {
1585 spin_unlock(&fs_info->trans_lock);
1586 delay -= msecs_to_jiffies((delta - 1) * 1000);
1588 msecs_to_jiffies(fs_info->commit_interval * 1000));
1591 transid = cur->transid;
1592 spin_unlock(&fs_info->trans_lock);
1594 /* If the file system is aborted, this will always fail. */
1595 trans = btrfs_attach_transaction(root);
1596 if (IS_ERR(trans)) {
1597 if (PTR_ERR(trans) != -ENOENT)
1598 cannot_commit = true;
1601 if (transid == trans->transid) {
1602 btrfs_commit_transaction(trans);
1604 btrfs_end_transaction(trans);
1607 wake_up_process(fs_info->cleaner_kthread);
1608 mutex_unlock(&fs_info->transaction_kthread_mutex);
1610 if (BTRFS_FS_ERROR(fs_info))
1611 btrfs_cleanup_transaction(fs_info);
1612 if (!kthread_should_stop() &&
1613 (!btrfs_transaction_blocked(fs_info) ||
1615 schedule_timeout_interruptible(delay);
1616 } while (!kthread_should_stop());
1621 * This will find the highest generation in the array of root backups. The
1622 * index of the highest array is returned, or -EINVAL if we can't find
1625 * We check to make sure the array is valid by comparing the
1626 * generation of the latest root in the array with the generation
1627 * in the super block. If they don't match we pitch it.
1629 static int find_newest_super_backup(struct btrfs_fs_info *info)
1631 const u64 newest_gen = btrfs_super_generation(info->super_copy);
1633 struct btrfs_root_backup *root_backup;
1636 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1637 root_backup = info->super_copy->super_roots + i;
1638 cur = btrfs_backup_tree_root_gen(root_backup);
1639 if (cur == newest_gen)
1647 * copy all the root pointers into the super backup array.
1648 * this will bump the backup pointer by one when it is
1651 static void backup_super_roots(struct btrfs_fs_info *info)
1653 const int next_backup = info->backup_root_index;
1654 struct btrfs_root_backup *root_backup;
1656 root_backup = info->super_for_commit->super_roots + next_backup;
1659 * make sure all of our padding and empty slots get zero filled
1660 * regardless of which ones we use today
1662 memset(root_backup, 0, sizeof(*root_backup));
1664 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1666 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1667 btrfs_set_backup_tree_root_gen(root_backup,
1668 btrfs_header_generation(info->tree_root->node));
1670 btrfs_set_backup_tree_root_level(root_backup,
1671 btrfs_header_level(info->tree_root->node));
1673 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1674 btrfs_set_backup_chunk_root_gen(root_backup,
1675 btrfs_header_generation(info->chunk_root->node));
1676 btrfs_set_backup_chunk_root_level(root_backup,
1677 btrfs_header_level(info->chunk_root->node));
1679 if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1680 struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1681 struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1683 btrfs_set_backup_extent_root(root_backup,
1684 extent_root->node->start);
1685 btrfs_set_backup_extent_root_gen(root_backup,
1686 btrfs_header_generation(extent_root->node));
1687 btrfs_set_backup_extent_root_level(root_backup,
1688 btrfs_header_level(extent_root->node));
1690 btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1691 btrfs_set_backup_csum_root_gen(root_backup,
1692 btrfs_header_generation(csum_root->node));
1693 btrfs_set_backup_csum_root_level(root_backup,
1694 btrfs_header_level(csum_root->node));
1698 * we might commit during log recovery, which happens before we set
1699 * the fs_root. Make sure it is valid before we fill it in.
1701 if (info->fs_root && info->fs_root->node) {
1702 btrfs_set_backup_fs_root(root_backup,
1703 info->fs_root->node->start);
1704 btrfs_set_backup_fs_root_gen(root_backup,
1705 btrfs_header_generation(info->fs_root->node));
1706 btrfs_set_backup_fs_root_level(root_backup,
1707 btrfs_header_level(info->fs_root->node));
1710 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1711 btrfs_set_backup_dev_root_gen(root_backup,
1712 btrfs_header_generation(info->dev_root->node));
1713 btrfs_set_backup_dev_root_level(root_backup,
1714 btrfs_header_level(info->dev_root->node));
1716 btrfs_set_backup_total_bytes(root_backup,
1717 btrfs_super_total_bytes(info->super_copy));
1718 btrfs_set_backup_bytes_used(root_backup,
1719 btrfs_super_bytes_used(info->super_copy));
1720 btrfs_set_backup_num_devices(root_backup,
1721 btrfs_super_num_devices(info->super_copy));
1724 * if we don't copy this out to the super_copy, it won't get remembered
1725 * for the next commit
1727 memcpy(&info->super_copy->super_roots,
1728 &info->super_for_commit->super_roots,
1729 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1733 * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1734 * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1736 * @fs_info: filesystem whose backup roots need to be read
1737 * @priority: priority of backup root required
1739 * Returns backup root index on success and -EINVAL otherwise.
1741 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1743 int backup_index = find_newest_super_backup(fs_info);
1744 struct btrfs_super_block *super = fs_info->super_copy;
1745 struct btrfs_root_backup *root_backup;
1747 if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1749 return backup_index;
1751 backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1752 backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1757 root_backup = super->super_roots + backup_index;
1759 btrfs_set_super_generation(super,
1760 btrfs_backup_tree_root_gen(root_backup));
1761 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1762 btrfs_set_super_root_level(super,
1763 btrfs_backup_tree_root_level(root_backup));
1764 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1767 * Fixme: the total bytes and num_devices need to match or we should
1770 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1771 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1773 return backup_index;
1776 /* helper to cleanup workers */
1777 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1779 btrfs_destroy_workqueue(fs_info->fixup_workers);
1780 btrfs_destroy_workqueue(fs_info->delalloc_workers);
1781 btrfs_destroy_workqueue(fs_info->workers);
1782 if (fs_info->endio_workers)
1783 destroy_workqueue(fs_info->endio_workers);
1784 if (fs_info->rmw_workers)
1785 destroy_workqueue(fs_info->rmw_workers);
1786 if (fs_info->compressed_write_workers)
1787 destroy_workqueue(fs_info->compressed_write_workers);
1788 btrfs_destroy_workqueue(fs_info->endio_write_workers);
1789 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1790 btrfs_destroy_workqueue(fs_info->delayed_workers);
1791 btrfs_destroy_workqueue(fs_info->caching_workers);
1792 btrfs_destroy_workqueue(fs_info->flush_workers);
1793 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1794 if (fs_info->discard_ctl.discard_workers)
1795 destroy_workqueue(fs_info->discard_ctl.discard_workers);
1797 * Now that all other work queues are destroyed, we can safely destroy
1798 * the queues used for metadata I/O, since tasks from those other work
1799 * queues can do metadata I/O operations.
1801 if (fs_info->endio_meta_workers)
1802 destroy_workqueue(fs_info->endio_meta_workers);
1805 static void free_root_extent_buffers(struct btrfs_root *root)
1808 free_extent_buffer(root->node);
1809 free_extent_buffer(root->commit_root);
1811 root->commit_root = NULL;
1815 static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1817 struct btrfs_root *root, *tmp;
1819 rbtree_postorder_for_each_entry_safe(root, tmp,
1820 &fs_info->global_root_tree,
1822 free_root_extent_buffers(root);
1825 /* helper to cleanup tree roots */
1826 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1828 free_root_extent_buffers(info->tree_root);
1830 free_global_root_pointers(info);
1831 free_root_extent_buffers(info->dev_root);
1832 free_root_extent_buffers(info->quota_root);
1833 free_root_extent_buffers(info->uuid_root);
1834 free_root_extent_buffers(info->fs_root);
1835 free_root_extent_buffers(info->data_reloc_root);
1836 free_root_extent_buffers(info->block_group_root);
1837 free_root_extent_buffers(info->stripe_root);
1838 if (free_chunk_root)
1839 free_root_extent_buffers(info->chunk_root);
1842 void btrfs_put_root(struct btrfs_root *root)
1847 if (refcount_dec_and_test(&root->refs)) {
1848 if (WARN_ON(!xa_empty(&root->inodes)))
1849 xa_destroy(&root->inodes);
1850 WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1852 free_anon_bdev(root->anon_dev);
1853 free_root_extent_buffers(root);
1854 #ifdef CONFIG_BTRFS_DEBUG
1855 spin_lock(&root->fs_info->fs_roots_radix_lock);
1856 list_del_init(&root->leak_list);
1857 spin_unlock(&root->fs_info->fs_roots_radix_lock);
1863 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1866 struct btrfs_root *gang[8];
1869 while (!list_empty(&fs_info->dead_roots)) {
1870 gang[0] = list_entry(fs_info->dead_roots.next,
1871 struct btrfs_root, root_list);
1872 list_del(&gang[0]->root_list);
1874 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1875 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1876 btrfs_put_root(gang[0]);
1880 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1885 for (i = 0; i < ret; i++)
1886 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1890 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1892 mutex_init(&fs_info->scrub_lock);
1893 atomic_set(&fs_info->scrubs_running, 0);
1894 atomic_set(&fs_info->scrub_pause_req, 0);
1895 atomic_set(&fs_info->scrubs_paused, 0);
1896 atomic_set(&fs_info->scrub_cancel_req, 0);
1897 init_waitqueue_head(&fs_info->scrub_pause_wait);
1898 refcount_set(&fs_info->scrub_workers_refcnt, 0);
1901 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1903 spin_lock_init(&fs_info->balance_lock);
1904 mutex_init(&fs_info->balance_mutex);
1905 atomic_set(&fs_info->balance_pause_req, 0);
1906 atomic_set(&fs_info->balance_cancel_req, 0);
1907 fs_info->balance_ctl = NULL;
1908 init_waitqueue_head(&fs_info->balance_wait_q);
1909 atomic_set(&fs_info->reloc_cancel_req, 0);
1912 static int btrfs_init_btree_inode(struct super_block *sb)
1914 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1915 unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1916 fs_info->tree_root);
1917 struct inode *inode;
1919 inode = new_inode(sb);
1923 btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
1924 set_nlink(inode, 1);
1926 * we set the i_size on the btree inode to the max possible int.
1927 * the real end of the address space is determined by all of
1928 * the devices in the system
1930 inode->i_size = OFFSET_MAX;
1931 inode->i_mapping->a_ops = &btree_aops;
1932 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1934 extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1935 IO_TREE_BTREE_INODE_IO);
1936 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1938 BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1939 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1940 __insert_inode_hash(inode, hash);
1941 fs_info->btree_inode = inode;
1946 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1948 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1949 init_rwsem(&fs_info->dev_replace.rwsem);
1950 init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1953 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1955 spin_lock_init(&fs_info->qgroup_lock);
1956 mutex_init(&fs_info->qgroup_ioctl_lock);
1957 fs_info->qgroup_tree = RB_ROOT;
1958 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1959 fs_info->qgroup_seq = 1;
1960 fs_info->qgroup_ulist = NULL;
1961 fs_info->qgroup_rescan_running = false;
1962 fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1963 mutex_init(&fs_info->qgroup_rescan_lock);
1966 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1968 u32 max_active = fs_info->thread_pool_size;
1969 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1970 unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1973 btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1975 fs_info->delalloc_workers =
1976 btrfs_alloc_workqueue(fs_info, "delalloc",
1977 flags, max_active, 2);
1979 fs_info->flush_workers =
1980 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1981 flags, max_active, 0);
1983 fs_info->caching_workers =
1984 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1986 fs_info->fixup_workers =
1987 btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1989 fs_info->endio_workers =
1990 alloc_workqueue("btrfs-endio", flags, max_active);
1991 fs_info->endio_meta_workers =
1992 alloc_workqueue("btrfs-endio-meta", flags, max_active);
1993 fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
1994 fs_info->endio_write_workers =
1995 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
1997 fs_info->compressed_write_workers =
1998 alloc_workqueue("btrfs-compressed-write", flags, max_active);
1999 fs_info->endio_freespace_worker =
2000 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2002 fs_info->delayed_workers =
2003 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2005 fs_info->qgroup_rescan_workers =
2006 btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
2008 fs_info->discard_ctl.discard_workers =
2009 alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
2011 if (!(fs_info->workers &&
2012 fs_info->delalloc_workers && fs_info->flush_workers &&
2013 fs_info->endio_workers && fs_info->endio_meta_workers &&
2014 fs_info->compressed_write_workers &&
2015 fs_info->endio_write_workers &&
2016 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2017 fs_info->caching_workers && fs_info->fixup_workers &&
2018 fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
2019 fs_info->discard_ctl.discard_workers)) {
2026 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2028 struct crypto_shash *csum_shash;
2029 const char *csum_driver = btrfs_super_csum_driver(csum_type);
2031 csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2033 if (IS_ERR(csum_shash)) {
2034 btrfs_err(fs_info, "error allocating %s hash for checksum",
2036 return PTR_ERR(csum_shash);
2039 fs_info->csum_shash = csum_shash;
2042 * Check if the checksum implementation is a fast accelerated one.
2043 * As-is this is a bit of a hack and should be replaced once the csum
2044 * implementations provide that information themselves.
2046 switch (csum_type) {
2047 case BTRFS_CSUM_TYPE_CRC32:
2048 if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2049 set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2051 case BTRFS_CSUM_TYPE_XXHASH:
2052 set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2058 btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2059 btrfs_super_csum_name(csum_type),
2060 crypto_shash_driver_name(csum_shash));
2064 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2065 struct btrfs_fs_devices *fs_devices)
2068 struct btrfs_tree_parent_check check = { 0 };
2069 struct btrfs_root *log_tree_root;
2070 struct btrfs_super_block *disk_super = fs_info->super_copy;
2071 u64 bytenr = btrfs_super_log_root(disk_super);
2072 int level = btrfs_super_log_root_level(disk_super);
2074 if (fs_devices->rw_devices == 0) {
2075 btrfs_warn(fs_info, "log replay required on RO media");
2079 log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2084 check.level = level;
2085 check.transid = fs_info->generation + 1;
2086 check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2087 log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2088 if (IS_ERR(log_tree_root->node)) {
2089 btrfs_warn(fs_info, "failed to read log tree");
2090 ret = PTR_ERR(log_tree_root->node);
2091 log_tree_root->node = NULL;
2092 btrfs_put_root(log_tree_root);
2095 if (!extent_buffer_uptodate(log_tree_root->node)) {
2096 btrfs_err(fs_info, "failed to read log tree");
2097 btrfs_put_root(log_tree_root);
2101 /* returns with log_tree_root freed on success */
2102 ret = btrfs_recover_log_trees(log_tree_root);
2104 btrfs_handle_fs_error(fs_info, ret,
2105 "Failed to recover log tree");
2106 btrfs_put_root(log_tree_root);
2110 if (sb_rdonly(fs_info->sb)) {
2111 ret = btrfs_commit_super(fs_info);
2119 static int load_global_roots_objectid(struct btrfs_root *tree_root,
2120 struct btrfs_path *path, u64 objectid,
2123 struct btrfs_fs_info *fs_info = tree_root->fs_info;
2124 struct btrfs_root *root;
2125 u64 max_global_id = 0;
2127 struct btrfs_key key = {
2128 .objectid = objectid,
2129 .type = BTRFS_ROOT_ITEM_KEY,
2134 /* If we have IGNOREDATACSUMS skip loading these roots. */
2135 if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2136 btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2137 set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2142 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2146 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2147 ret = btrfs_next_leaf(tree_root, path);
2156 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2157 if (key.objectid != objectid)
2159 btrfs_release_path(path);
2162 * Just worry about this for extent tree, it'll be the same for
2165 if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2166 max_global_id = max(max_global_id, key.offset);
2169 root = read_tree_root_path(tree_root, path, &key);
2171 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2172 ret = PTR_ERR(root);
2175 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2176 ret = btrfs_global_root_insert(root);
2178 btrfs_put_root(root);
2183 btrfs_release_path(path);
2185 if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2186 fs_info->nr_global_roots = max_global_id + 1;
2188 if (!found || ret) {
2189 if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2190 set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2192 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2193 ret = ret ? ret : -ENOENT;
2196 btrfs_err(fs_info, "failed to load root %s", name);
2201 static int load_global_roots(struct btrfs_root *tree_root)
2203 struct btrfs_path *path;
2206 path = btrfs_alloc_path();
2210 ret = load_global_roots_objectid(tree_root, path,
2211 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2214 ret = load_global_roots_objectid(tree_root, path,
2215 BTRFS_CSUM_TREE_OBJECTID, "csum");
2218 if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2220 ret = load_global_roots_objectid(tree_root, path,
2221 BTRFS_FREE_SPACE_TREE_OBJECTID,
2224 btrfs_free_path(path);
2228 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2230 struct btrfs_root *tree_root = fs_info->tree_root;
2231 struct btrfs_root *root;
2232 struct btrfs_key location;
2235 ASSERT(fs_info->tree_root);
2237 ret = load_global_roots(tree_root);
2241 location.type = BTRFS_ROOT_ITEM_KEY;
2242 location.offset = 0;
2244 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2245 location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2246 root = btrfs_read_tree_root(tree_root, &location);
2248 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2249 ret = PTR_ERR(root);
2253 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2254 fs_info->block_group_root = root;
2258 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2259 root = btrfs_read_tree_root(tree_root, &location);
2261 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2262 ret = PTR_ERR(root);
2266 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2267 fs_info->dev_root = root;
2269 /* Initialize fs_info for all devices in any case */
2270 ret = btrfs_init_devices_late(fs_info);
2275 * This tree can share blocks with some other fs tree during relocation
2276 * and we need a proper setup by btrfs_get_fs_root
2278 root = btrfs_get_fs_root(tree_root->fs_info,
2279 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2281 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2282 ret = PTR_ERR(root);
2286 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2287 fs_info->data_reloc_root = root;
2290 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2291 root = btrfs_read_tree_root(tree_root, &location);
2292 if (!IS_ERR(root)) {
2293 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2294 fs_info->quota_root = root;
2297 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2298 root = btrfs_read_tree_root(tree_root, &location);
2300 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2301 ret = PTR_ERR(root);
2306 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2307 fs_info->uuid_root = root;
2310 if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2311 location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2312 root = btrfs_read_tree_root(tree_root, &location);
2314 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2315 ret = PTR_ERR(root);
2319 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2320 fs_info->stripe_root = root;
2326 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2327 location.objectid, ret);
2332 * Real super block validation
2333 * NOTE: super csum type and incompat features will not be checked here.
2335 * @sb: super block to check
2336 * @mirror_num: the super block number to check its bytenr:
2337 * 0 the primary (1st) sb
2338 * 1, 2 2nd and 3rd backup copy
2339 * -1 skip bytenr check
2341 int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
2342 const struct btrfs_super_block *sb, int mirror_num)
2344 u64 nodesize = btrfs_super_nodesize(sb);
2345 u64 sectorsize = btrfs_super_sectorsize(sb);
2347 const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
2349 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2350 btrfs_err(fs_info, "no valid FS found");
2353 if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
2354 if (!ignore_flags) {
2356 "unrecognized or unsupported super flag 0x%llx",
2357 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2361 "unrecognized or unsupported super flags: 0x%llx, ignored",
2362 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2365 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2366 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2367 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2370 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2371 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2372 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2375 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2376 btrfs_err(fs_info, "log_root level too big: %d >= %d",
2377 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2382 * Check sectorsize and nodesize first, other check will need it.
2383 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2385 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2386 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2387 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2392 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2394 * We can support 16K sectorsize with 64K page size without problem,
2395 * but such sectorsize/pagesize combination doesn't make much sense.
2396 * 4K will be our future standard, PAGE_SIZE is supported from the very
2399 if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2401 "sectorsize %llu not yet supported for page size %lu",
2402 sectorsize, PAGE_SIZE);
2406 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2407 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2408 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2411 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2412 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2413 le32_to_cpu(sb->__unused_leafsize), nodesize);
2417 /* Root alignment check */
2418 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2419 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2420 btrfs_super_root(sb));
2423 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2424 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2425 btrfs_super_chunk_root(sb));
2428 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2429 btrfs_warn(fs_info, "log_root block unaligned: %llu",
2430 btrfs_super_log_root(sb));
2434 if (!fs_info->fs_devices->temp_fsid &&
2435 memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2437 "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2438 sb->fsid, fs_info->fs_devices->fsid);
2442 if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2443 BTRFS_FSID_SIZE) != 0) {
2445 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2446 btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2450 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2451 BTRFS_FSID_SIZE) != 0) {
2453 "dev_item UUID does not match metadata fsid: %pU != %pU",
2454 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2459 * Artificial requirement for block-group-tree to force newer features
2460 * (free-space-tree, no-holes) so the test matrix is smaller.
2462 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2463 (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2464 !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2466 "block-group-tree feature requires free-space-tree and no-holes");
2471 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2474 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2475 btrfs_err(fs_info, "bytes_used is too small %llu",
2476 btrfs_super_bytes_used(sb));
2479 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2480 btrfs_err(fs_info, "invalid stripesize %u",
2481 btrfs_super_stripesize(sb));
2484 if (btrfs_super_num_devices(sb) > (1UL << 31))
2485 btrfs_warn(fs_info, "suspicious number of devices: %llu",
2486 btrfs_super_num_devices(sb));
2487 if (btrfs_super_num_devices(sb) == 0) {
2488 btrfs_err(fs_info, "number of devices is 0");
2492 if (mirror_num >= 0 &&
2493 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2494 btrfs_err(fs_info, "super offset mismatch %llu != %u",
2495 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2500 * Obvious sys_chunk_array corruptions, it must hold at least one key
2503 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2504 btrfs_err(fs_info, "system chunk array too big %u > %u",
2505 btrfs_super_sys_array_size(sb),
2506 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2509 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2510 + sizeof(struct btrfs_chunk)) {
2511 btrfs_err(fs_info, "system chunk array too small %u < %zu",
2512 btrfs_super_sys_array_size(sb),
2513 sizeof(struct btrfs_disk_key)
2514 + sizeof(struct btrfs_chunk));
2519 * The generation is a global counter, we'll trust it more than the others
2520 * but it's still possible that it's the one that's wrong.
2522 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2524 "suspicious: generation < chunk_root_generation: %llu < %llu",
2525 btrfs_super_generation(sb),
2526 btrfs_super_chunk_root_generation(sb));
2527 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2528 && btrfs_super_cache_generation(sb) != (u64)-1)
2530 "suspicious: generation < cache_generation: %llu < %llu",
2531 btrfs_super_generation(sb),
2532 btrfs_super_cache_generation(sb));
2538 * Validation of super block at mount time.
2539 * Some checks already done early at mount time, like csum type and incompat
2540 * flags will be skipped.
2542 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2544 return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2548 * Validation of super block at write time.
2549 * Some checks like bytenr check will be skipped as their values will be
2551 * Extra checks like csum type and incompat flags will be done here.
2553 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2554 struct btrfs_super_block *sb)
2558 ret = btrfs_validate_super(fs_info, sb, -1);
2561 if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2563 btrfs_err(fs_info, "invalid csum type, has %u want %u",
2564 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2567 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2570 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2571 btrfs_super_incompat_flags(sb),
2572 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2578 "super block corruption detected before writing it to disk");
2582 static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2584 struct btrfs_tree_parent_check check = {
2587 .owner_root = btrfs_root_id(root)
2591 root->node = read_tree_block(root->fs_info, bytenr, &check);
2592 if (IS_ERR(root->node)) {
2593 ret = PTR_ERR(root->node);
2597 if (!extent_buffer_uptodate(root->node)) {
2598 free_extent_buffer(root->node);
2603 btrfs_set_root_node(&root->root_item, root->node);
2604 root->commit_root = btrfs_root_node(root);
2605 btrfs_set_root_refs(&root->root_item, 1);
2609 static int load_important_roots(struct btrfs_fs_info *fs_info)
2611 struct btrfs_super_block *sb = fs_info->super_copy;
2615 bytenr = btrfs_super_root(sb);
2616 gen = btrfs_super_generation(sb);
2617 level = btrfs_super_root_level(sb);
2618 ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2620 btrfs_warn(fs_info, "couldn't read tree root");
2626 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2628 int backup_index = find_newest_super_backup(fs_info);
2629 struct btrfs_super_block *sb = fs_info->super_copy;
2630 struct btrfs_root *tree_root = fs_info->tree_root;
2631 bool handle_error = false;
2635 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2637 if (!IS_ERR(tree_root->node))
2638 free_extent_buffer(tree_root->node);
2639 tree_root->node = NULL;
2641 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2644 free_root_pointers(fs_info, 0);
2647 * Don't use the log in recovery mode, it won't be
2650 btrfs_set_super_log_root(sb, 0);
2652 btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2653 ret = read_backup_root(fs_info, i);
2659 ret = load_important_roots(fs_info);
2661 handle_error = true;
2666 * No need to hold btrfs_root::objectid_mutex since the fs
2667 * hasn't been fully initialised and we are the only user
2669 ret = btrfs_init_root_free_objectid(tree_root);
2671 handle_error = true;
2675 ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2677 ret = btrfs_read_roots(fs_info);
2679 handle_error = true;
2683 /* All successful */
2684 fs_info->generation = btrfs_header_generation(tree_root->node);
2685 btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2686 fs_info->last_reloc_trans = 0;
2688 /* Always begin writing backup roots after the one being used */
2689 if (backup_index < 0) {
2690 fs_info->backup_root_index = 0;
2692 fs_info->backup_root_index = backup_index + 1;
2693 fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2701 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2703 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2704 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2705 INIT_LIST_HEAD(&fs_info->trans_list);
2706 INIT_LIST_HEAD(&fs_info->dead_roots);
2707 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2708 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2709 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2710 spin_lock_init(&fs_info->delalloc_root_lock);
2711 spin_lock_init(&fs_info->trans_lock);
2712 spin_lock_init(&fs_info->fs_roots_radix_lock);
2713 spin_lock_init(&fs_info->delayed_iput_lock);
2714 spin_lock_init(&fs_info->defrag_inodes_lock);
2715 spin_lock_init(&fs_info->super_lock);
2716 spin_lock_init(&fs_info->buffer_lock);
2717 spin_lock_init(&fs_info->unused_bgs_lock);
2718 spin_lock_init(&fs_info->treelog_bg_lock);
2719 spin_lock_init(&fs_info->zone_active_bgs_lock);
2720 spin_lock_init(&fs_info->relocation_bg_lock);
2721 rwlock_init(&fs_info->tree_mod_log_lock);
2722 rwlock_init(&fs_info->global_root_lock);
2723 mutex_init(&fs_info->unused_bg_unpin_mutex);
2724 mutex_init(&fs_info->reclaim_bgs_lock);
2725 mutex_init(&fs_info->reloc_mutex);
2726 mutex_init(&fs_info->delalloc_root_mutex);
2727 mutex_init(&fs_info->zoned_meta_io_lock);
2728 mutex_init(&fs_info->zoned_data_reloc_io_lock);
2729 seqlock_init(&fs_info->profiles_lock);
2731 btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2732 btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2733 btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2734 btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2735 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2736 BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2737 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2738 BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2739 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2740 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2741 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2742 BTRFS_LOCKDEP_TRANS_COMPLETED);
2744 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2745 INIT_LIST_HEAD(&fs_info->space_info);
2746 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2747 INIT_LIST_HEAD(&fs_info->unused_bgs);
2748 INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2749 INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2750 #ifdef CONFIG_BTRFS_DEBUG
2751 INIT_LIST_HEAD(&fs_info->allocated_roots);
2752 INIT_LIST_HEAD(&fs_info->allocated_ebs);
2753 spin_lock_init(&fs_info->eb_leak_lock);
2755 fs_info->mapping_tree = RB_ROOT_CACHED;
2756 rwlock_init(&fs_info->mapping_tree_lock);
2757 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2758 BTRFS_BLOCK_RSV_GLOBAL);
2759 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2760 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2761 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2762 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2763 BTRFS_BLOCK_RSV_DELOPS);
2764 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2765 BTRFS_BLOCK_RSV_DELREFS);
2767 atomic_set(&fs_info->async_delalloc_pages, 0);
2768 atomic_set(&fs_info->defrag_running, 0);
2769 atomic_set(&fs_info->nr_delayed_iputs, 0);
2770 atomic64_set(&fs_info->tree_mod_seq, 0);
2771 fs_info->global_root_tree = RB_ROOT;
2772 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2773 fs_info->metadata_ratio = 0;
2774 fs_info->defrag_inodes = RB_ROOT;
2775 atomic64_set(&fs_info->free_chunk_space, 0);
2776 fs_info->tree_mod_log = RB_ROOT;
2777 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2778 btrfs_init_ref_verify(fs_info);
2780 fs_info->thread_pool_size = min_t(unsigned long,
2781 num_online_cpus() + 2, 8);
2783 INIT_LIST_HEAD(&fs_info->ordered_roots);
2784 spin_lock_init(&fs_info->ordered_root_lock);
2786 btrfs_init_scrub(fs_info);
2787 btrfs_init_balance(fs_info);
2788 btrfs_init_async_reclaim_work(fs_info);
2790 rwlock_init(&fs_info->block_group_cache_lock);
2791 fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2793 extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2794 IO_TREE_FS_EXCLUDED_EXTENTS);
2796 mutex_init(&fs_info->ordered_operations_mutex);
2797 mutex_init(&fs_info->tree_log_mutex);
2798 mutex_init(&fs_info->chunk_mutex);
2799 mutex_init(&fs_info->transaction_kthread_mutex);
2800 mutex_init(&fs_info->cleaner_mutex);
2801 mutex_init(&fs_info->ro_block_group_mutex);
2802 init_rwsem(&fs_info->commit_root_sem);
2803 init_rwsem(&fs_info->cleanup_work_sem);
2804 init_rwsem(&fs_info->subvol_sem);
2805 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2807 btrfs_init_dev_replace_locks(fs_info);
2808 btrfs_init_qgroup(fs_info);
2809 btrfs_discard_init(fs_info);
2811 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2812 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2814 init_waitqueue_head(&fs_info->transaction_throttle);
2815 init_waitqueue_head(&fs_info->transaction_wait);
2816 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2817 init_waitqueue_head(&fs_info->async_submit_wait);
2818 init_waitqueue_head(&fs_info->delayed_iputs_wait);
2820 /* Usable values until the real ones are cached from the superblock */
2821 fs_info->nodesize = 4096;
2822 fs_info->sectorsize = 4096;
2823 fs_info->sectorsize_bits = ilog2(4096);
2824 fs_info->stripesize = 4096;
2826 /* Default compress algorithm when user does -o compress */
2827 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2829 fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2831 spin_lock_init(&fs_info->swapfile_pins_lock);
2832 fs_info->swapfile_pins = RB_ROOT;
2834 fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2835 INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2838 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2843 /* Temporary fixed values for block size until we read the superblock. */
2844 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2845 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2847 ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2851 ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
2855 spin_lock_init(&fs_info->extent_map_shrinker_lock);
2857 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2861 fs_info->dirty_metadata_batch = PAGE_SIZE *
2862 (1 + ilog2(nr_cpu_ids));
2864 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2868 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2873 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2875 if (!fs_info->delayed_root)
2877 btrfs_init_delayed_root(fs_info->delayed_root);
2880 set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2881 if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
2882 set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
2884 return btrfs_alloc_stripe_hash_table(fs_info);
2887 static int btrfs_uuid_rescan_kthread(void *data)
2889 struct btrfs_fs_info *fs_info = data;
2893 * 1st step is to iterate through the existing UUID tree and
2894 * to delete all entries that contain outdated data.
2895 * 2nd step is to add all missing entries to the UUID tree.
2897 ret = btrfs_uuid_tree_iterate(fs_info);
2900 btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2902 up(&fs_info->uuid_tree_rescan_sem);
2905 return btrfs_uuid_scan_kthread(data);
2908 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2910 struct task_struct *task;
2912 down(&fs_info->uuid_tree_rescan_sem);
2913 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2915 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
2916 btrfs_warn(fs_info, "failed to start uuid_rescan task");
2917 up(&fs_info->uuid_tree_rescan_sem);
2918 return PTR_ERR(task);
2924 static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2926 u64 root_objectid = 0;
2927 struct btrfs_root *gang[8];
2933 spin_lock(&fs_info->fs_roots_radix_lock);
2934 found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2935 (void **)gang, root_objectid,
2938 spin_unlock(&fs_info->fs_roots_radix_lock);
2941 root_objectid = btrfs_root_id(gang[found - 1]) + 1;
2943 for (int i = 0; i < found; i++) {
2944 /* Avoid to grab roots in dead_roots. */
2945 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
2949 /* Grab all the search result for later use. */
2950 gang[i] = btrfs_grab_root(gang[i]);
2952 spin_unlock(&fs_info->fs_roots_radix_lock);
2954 for (int i = 0; i < found; i++) {
2957 root_objectid = btrfs_root_id(gang[i]);
2959 * Continue to release the remaining roots after the first
2960 * error without cleanup and preserve the first error
2964 ret = btrfs_orphan_cleanup(gang[i]);
2965 btrfs_put_root(gang[i]);
2976 * Mounting logic specific to read-write file systems. Shared by open_ctree
2977 * and btrfs_remount when remounting from read-only to read-write.
2979 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2982 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2983 bool rebuild_free_space_tree = false;
2985 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2986 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2987 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2989 "'clear_cache' option is ignored with extent tree v2");
2991 rebuild_free_space_tree = true;
2992 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2993 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2994 btrfs_warn(fs_info, "free space tree is invalid");
2995 rebuild_free_space_tree = true;
2998 if (rebuild_free_space_tree) {
2999 btrfs_info(fs_info, "rebuilding free space tree");
3000 ret = btrfs_rebuild_free_space_tree(fs_info);
3003 "failed to rebuild free space tree: %d", ret);
3008 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3009 !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
3010 btrfs_info(fs_info, "disabling free space tree");
3011 ret = btrfs_delete_free_space_tree(fs_info);
3014 "failed to disable free space tree: %d", ret);
3020 * btrfs_find_orphan_roots() is responsible for finding all the dead
3021 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3022 * them into the fs_info->fs_roots_radix tree. This must be done before
3023 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3024 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3025 * item before the root's tree is deleted - this means that if we unmount
3026 * or crash before the deletion completes, on the next mount we will not
3027 * delete what remains of the tree because the orphan item does not
3028 * exists anymore, which is what tells us we have a pending deletion.
3030 ret = btrfs_find_orphan_roots(fs_info);
3034 ret = btrfs_cleanup_fs_roots(fs_info);
3038 down_read(&fs_info->cleanup_work_sem);
3039 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3040 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3041 up_read(&fs_info->cleanup_work_sem);
3044 up_read(&fs_info->cleanup_work_sem);
3046 mutex_lock(&fs_info->cleaner_mutex);
3047 ret = btrfs_recover_relocation(fs_info);
3048 mutex_unlock(&fs_info->cleaner_mutex);
3050 btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3054 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3055 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3056 btrfs_info(fs_info, "creating free space tree");
3057 ret = btrfs_create_free_space_tree(fs_info);
3060 "failed to create free space tree: %d", ret);
3065 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3066 ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3071 ret = btrfs_resume_balance_async(fs_info);
3075 ret = btrfs_resume_dev_replace_async(fs_info);
3077 btrfs_warn(fs_info, "failed to resume dev_replace");
3081 btrfs_qgroup_rescan_resume(fs_info);
3083 if (!fs_info->uuid_root) {
3084 btrfs_info(fs_info, "creating UUID tree");
3085 ret = btrfs_create_uuid_tree(fs_info);
3088 "failed to create the UUID tree %d", ret);
3098 * Do various sanity and dependency checks of different features.
3100 * @is_rw_mount: If the mount is read-write.
3102 * This is the place for less strict checks (like for subpage or artificial
3103 * feature dependencies).
3105 * For strict checks or possible corruption detection, see
3106 * btrfs_validate_super().
3108 * This should be called after btrfs_parse_options(), as some mount options
3109 * (space cache related) can modify on-disk format like free space tree and
3110 * screw up certain feature dependencies.
3112 int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3114 struct btrfs_super_block *disk_super = fs_info->super_copy;
3115 u64 incompat = btrfs_super_incompat_flags(disk_super);
3116 const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3117 const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3119 if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3121 "cannot mount because of unknown incompat features (0x%llx)",
3126 /* Runtime limitation for mixed block groups. */
3127 if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3128 (fs_info->sectorsize != fs_info->nodesize)) {
3130 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3131 fs_info->nodesize, fs_info->sectorsize);
3135 /* Mixed backref is an always-enabled feature. */
3136 incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3138 /* Set compression related flags just in case. */
3139 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3140 incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3141 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3142 incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3145 * An ancient flag, which should really be marked deprecated.
3146 * Such runtime limitation doesn't really need a incompat flag.
3148 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3149 incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3151 if (compat_ro_unsupp && is_rw_mount) {
3153 "cannot mount read-write because of unknown compat_ro features (0x%llx)",
3159 * We have unsupported RO compat features, although RO mounted, we
3160 * should not cause any metadata writes, including log replay.
3161 * Or we could screw up whatever the new feature requires.
3163 if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3164 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3166 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3172 * Artificial limitations for block group tree, to force
3173 * block-group-tree to rely on no-holes and free-space-tree.
3175 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3176 (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3177 !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3179 "block-group-tree feature requires no-holes and free-space-tree features");
3184 * Subpage runtime limitation on v1 cache.
3186 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3187 * we're already defaulting to v2 cache, no need to bother v1 as it's
3188 * going to be deprecated anyway.
3190 if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3192 "v1 space cache is not supported for page size %lu with sectorsize %u",
3193 PAGE_SIZE, fs_info->sectorsize);
3197 /* This can be called by remount, we need to protect the super block. */
3198 spin_lock(&fs_info->super_lock);
3199 btrfs_set_super_incompat_flags(disk_super, incompat);
3200 spin_unlock(&fs_info->super_lock);
3205 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
3206 const char *options)
3213 struct btrfs_super_block *disk_super;
3214 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3215 struct btrfs_root *tree_root;
3216 struct btrfs_root *chunk_root;
3220 ret = init_mount_fs_info(fs_info, sb);
3224 /* These need to be init'ed before we start creating inodes and such. */
3225 tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3227 fs_info->tree_root = tree_root;
3228 chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3230 fs_info->chunk_root = chunk_root;
3231 if (!tree_root || !chunk_root) {
3236 ret = btrfs_init_btree_inode(sb);
3240 invalidate_bdev(fs_devices->latest_dev->bdev);
3243 * Read super block and check the signature bytes only
3245 disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
3246 if (IS_ERR(disk_super)) {
3247 ret = PTR_ERR(disk_super);
3251 btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3253 * Verify the type first, if that or the checksum value are
3254 * corrupted, we'll find out
3256 csum_type = btrfs_super_csum_type(disk_super);
3257 if (!btrfs_supported_super_csum(csum_type)) {
3258 btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3261 btrfs_release_disk_super(disk_super);
3265 fs_info->csum_size = btrfs_super_csum_size(disk_super);
3267 ret = btrfs_init_csum_hash(fs_info, csum_type);
3269 btrfs_release_disk_super(disk_super);
3274 * We want to check superblock checksum, the type is stored inside.
3275 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3277 if (btrfs_check_super_csum(fs_info, disk_super)) {
3278 btrfs_err(fs_info, "superblock checksum mismatch");
3280 btrfs_release_disk_super(disk_super);
3285 * super_copy is zeroed at allocation time and we never touch the
3286 * following bytes up to INFO_SIZE, the checksum is calculated from
3287 * the whole block of INFO_SIZE
3289 memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3290 btrfs_release_disk_super(disk_super);
3292 disk_super = fs_info->super_copy;
3294 memcpy(fs_info->super_for_commit, fs_info->super_copy,
3295 sizeof(*fs_info->super_for_commit));
3297 ret = btrfs_validate_mount_super(fs_info);
3299 btrfs_err(fs_info, "superblock contains fatal errors");
3304 if (!btrfs_super_root(disk_super)) {
3305 btrfs_err(fs_info, "invalid superblock tree root bytenr");
3310 /* check FS state, whether FS is broken. */
3311 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3312 WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3314 /* Set up fs_info before parsing mount options */
3315 nodesize = btrfs_super_nodesize(disk_super);
3316 sectorsize = btrfs_super_sectorsize(disk_super);
3317 stripesize = sectorsize;
3318 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3319 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3321 fs_info->nodesize = nodesize;
3322 fs_info->sectorsize = sectorsize;
3323 fs_info->sectorsize_bits = ilog2(sectorsize);
3324 fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
3325 fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3326 fs_info->stripesize = stripesize;
3329 * Handle the space caching options appropriately now that we have the
3330 * super block loaded and validated.
3332 btrfs_set_free_space_cache_settings(fs_info);
3334 if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3339 ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3344 * At this point our mount options are validated, if we set ->max_inline
3345 * to something non-standard make sure we truncate it to sectorsize.
3347 fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3349 if (sectorsize < PAGE_SIZE)
3351 "read-write for sector size %u with page size %lu is experimental",
3352 sectorsize, PAGE_SIZE);
3354 ret = btrfs_init_workqueues(fs_info);
3356 goto fail_sb_buffer;
3358 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3359 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3361 /* Update the values for the current filesystem. */
3362 sb->s_blocksize = sectorsize;
3363 sb->s_blocksize_bits = blksize_bits(sectorsize);
3364 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3366 mutex_lock(&fs_info->chunk_mutex);
3367 ret = btrfs_read_sys_array(fs_info);
3368 mutex_unlock(&fs_info->chunk_mutex);
3370 btrfs_err(fs_info, "failed to read the system array: %d", ret);
3371 goto fail_sb_buffer;
3374 generation = btrfs_super_chunk_root_generation(disk_super);
3375 level = btrfs_super_chunk_root_level(disk_super);
3376 ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3379 btrfs_err(fs_info, "failed to read chunk root");
3380 goto fail_tree_roots;
3383 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3384 offsetof(struct btrfs_header, chunk_tree_uuid),
3387 ret = btrfs_read_chunk_tree(fs_info);
3389 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3390 goto fail_tree_roots;
3394 * At this point we know all the devices that make this filesystem,
3395 * including the seed devices but we don't know yet if the replace
3396 * target is required. So free devices that are not part of this
3397 * filesystem but skip the replace target device which is checked
3398 * below in btrfs_init_dev_replace().
3400 btrfs_free_extra_devids(fs_devices);
3401 if (!fs_devices->latest_dev->bdev) {
3402 btrfs_err(fs_info, "failed to read devices");
3404 goto fail_tree_roots;
3407 ret = init_tree_roots(fs_info);
3409 goto fail_tree_roots;
3412 * Get zone type information of zoned block devices. This will also
3413 * handle emulation of a zoned filesystem if a regular device has the
3414 * zoned incompat feature flag set.
3416 ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3419 "zoned: failed to read device zone info: %d", ret);
3420 goto fail_block_groups;
3424 * If we have a uuid root and we're not being told to rescan we need to
3425 * check the generation here so we can set the
3426 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
3427 * transaction during a balance or the log replay without updating the
3428 * uuid generation, and then if we crash we would rescan the uuid tree,
3429 * even though it was perfectly fine.
3431 if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3432 fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3433 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3435 ret = btrfs_verify_dev_extents(fs_info);
3438 "failed to verify dev extents against chunks: %d",
3440 goto fail_block_groups;
3442 ret = btrfs_recover_balance(fs_info);
3444 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3445 goto fail_block_groups;
3448 ret = btrfs_init_dev_stats(fs_info);
3450 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3451 goto fail_block_groups;
3454 ret = btrfs_init_dev_replace(fs_info);
3456 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3457 goto fail_block_groups;
3460 ret = btrfs_check_zoned_mode(fs_info);
3462 btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3464 goto fail_block_groups;
3467 ret = btrfs_sysfs_add_fsid(fs_devices);
3469 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3471 goto fail_block_groups;
3474 ret = btrfs_sysfs_add_mounted(fs_info);
3476 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3477 goto fail_fsdev_sysfs;
3480 ret = btrfs_init_space_info(fs_info);
3482 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3486 ret = btrfs_read_block_groups(fs_info);
3488 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3492 btrfs_free_zone_cache(fs_info);
3494 btrfs_check_active_zone_reservation(fs_info);
3496 if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3497 !btrfs_check_rw_degradable(fs_info, NULL)) {
3499 "writable mount is not allowed due to too many missing devices");
3504 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3506 if (IS_ERR(fs_info->cleaner_kthread)) {
3507 ret = PTR_ERR(fs_info->cleaner_kthread);
3511 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3513 "btrfs-transaction");
3514 if (IS_ERR(fs_info->transaction_kthread)) {
3515 ret = PTR_ERR(fs_info->transaction_kthread);
3519 ret = btrfs_read_qgroup_config(fs_info);
3521 goto fail_trans_kthread;
3523 if (btrfs_build_ref_tree(fs_info))
3524 btrfs_err(fs_info, "couldn't build ref tree");
3526 /* do not make disk changes in broken FS or nologreplay is given */
3527 if (btrfs_super_log_root(disk_super) != 0 &&
3528 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3529 btrfs_info(fs_info, "start tree-log replay");
3530 ret = btrfs_replay_log(fs_info, fs_devices);
3535 fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3536 if (IS_ERR(fs_info->fs_root)) {
3537 ret = PTR_ERR(fs_info->fs_root);
3538 btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3539 fs_info->fs_root = NULL;
3546 ret = btrfs_start_pre_rw_mount(fs_info);
3548 close_ctree(fs_info);
3551 btrfs_discard_resume(fs_info);
3553 if (fs_info->uuid_root &&
3554 (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3555 fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3556 btrfs_info(fs_info, "checking UUID tree");
3557 ret = btrfs_check_uuid_tree(fs_info);
3560 "failed to check the UUID tree: %d", ret);
3561 close_ctree(fs_info);
3566 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3568 /* Kick the cleaner thread so it'll start deleting snapshots. */
3569 if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3570 wake_up_process(fs_info->cleaner_kthread);
3575 btrfs_free_qgroup_config(fs_info);
3577 kthread_stop(fs_info->transaction_kthread);
3578 btrfs_cleanup_transaction(fs_info);
3579 btrfs_free_fs_roots(fs_info);
3581 kthread_stop(fs_info->cleaner_kthread);
3584 * make sure we're done with the btree inode before we stop our
3587 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3590 btrfs_sysfs_remove_mounted(fs_info);
3593 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3596 btrfs_put_block_group_cache(fs_info);
3599 if (fs_info->data_reloc_root)
3600 btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3601 free_root_pointers(fs_info, true);
3602 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3605 btrfs_stop_all_workers(fs_info);
3606 btrfs_free_block_groups(fs_info);
3608 btrfs_mapping_tree_free(fs_info);
3610 iput(fs_info->btree_inode);
3612 btrfs_close_devices(fs_info->fs_devices);
3616 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3618 static void btrfs_end_super_write(struct bio *bio)
3620 struct btrfs_device *device = bio->bi_private;
3621 struct folio_iter fi;
3623 bio_for_each_folio_all(fi, bio) {
3624 if (bio->bi_status) {
3625 btrfs_warn_rl_in_rcu(device->fs_info,
3626 "lost super block write due to IO error on %s (%d)",
3627 btrfs_dev_name(device),
3628 blk_status_to_errno(bio->bi_status));
3629 btrfs_dev_stat_inc_and_print(device,
3630 BTRFS_DEV_STAT_WRITE_ERRS);
3631 /* Ensure failure if the primary sb fails. */
3632 if (bio->bi_opf & REQ_FUA)
3633 atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
3634 &device->sb_write_errors);
3636 atomic_inc(&device->sb_write_errors);
3638 folio_unlock(fi.folio);
3639 folio_put(fi.folio);
3645 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3646 int copy_num, bool drop_cache)
3648 struct btrfs_super_block *super;
3650 u64 bytenr, bytenr_orig;
3651 struct address_space *mapping = bdev->bd_mapping;
3654 bytenr_orig = btrfs_sb_offset(copy_num);
3655 ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
3657 return ERR_PTR(-EINVAL);
3659 return ERR_PTR(ret);
3661 if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3662 return ERR_PTR(-EINVAL);
3665 /* This should only be called with the primary sb. */
3666 ASSERT(copy_num == 0);
3669 * Drop the page of the primary superblock, so later read will
3670 * always read from the device.
3672 invalidate_inode_pages2_range(mapping,
3673 bytenr >> PAGE_SHIFT,
3674 (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3677 page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3679 return ERR_CAST(page);
3681 super = page_address(page);
3682 if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3683 btrfs_release_disk_super(super);
3684 return ERR_PTR(-ENODATA);
3687 if (btrfs_super_bytenr(super) != bytenr_orig) {
3688 btrfs_release_disk_super(super);
3689 return ERR_PTR(-EINVAL);
3696 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3698 struct btrfs_super_block *super, *latest = NULL;
3702 /* we would like to check all the supers, but that would make
3703 * a btrfs mount succeed after a mkfs from a different FS.
3704 * So, we need to add a special mount option to scan for
3705 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3707 for (i = 0; i < 1; i++) {
3708 super = btrfs_read_dev_one_super(bdev, i, false);
3712 if (!latest || btrfs_super_generation(super) > transid) {
3714 btrfs_release_disk_super(super);
3717 transid = btrfs_super_generation(super);
3725 * Write superblock @sb to the @device. Do not wait for completion, all the
3726 * folios we use for writing are locked.
3728 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3729 * the expected device size at commit time. Note that max_mirrors must be
3730 * same for write and wait phases.
3732 * Return number of errors when folio is not found or submission fails.
3734 static int write_dev_supers(struct btrfs_device *device,
3735 struct btrfs_super_block *sb, int max_mirrors)
3737 struct btrfs_fs_info *fs_info = device->fs_info;
3738 struct address_space *mapping = device->bdev->bd_mapping;
3739 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3742 u64 bytenr, bytenr_orig;
3744 atomic_set(&device->sb_write_errors, 0);
3746 if (max_mirrors == 0)
3747 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3749 shash->tfm = fs_info->csum_shash;
3751 for (i = 0; i < max_mirrors; i++) {
3752 struct folio *folio;
3754 struct btrfs_super_block *disk_super;
3757 bytenr_orig = btrfs_sb_offset(i);
3758 ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3759 if (ret == -ENOENT) {
3761 } else if (ret < 0) {
3762 btrfs_err(device->fs_info,
3763 "couldn't get super block location for mirror %d",
3765 atomic_inc(&device->sb_write_errors);
3768 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3769 device->commit_total_bytes)
3772 btrfs_set_super_bytenr(sb, bytenr_orig);
3774 crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3775 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3778 folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
3779 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3781 if (IS_ERR(folio)) {
3782 btrfs_err(device->fs_info,
3783 "couldn't get super block page for bytenr %llu",
3785 atomic_inc(&device->sb_write_errors);
3788 ASSERT(folio_order(folio) == 0);
3790 offset = offset_in_folio(folio, bytenr);
3791 disk_super = folio_address(folio) + offset;
3792 memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3795 * Directly use bios here instead of relying on the page cache
3796 * to do I/O, so we don't lose the ability to do integrity
3799 bio = bio_alloc(device->bdev, 1,
3800 REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3802 bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3803 bio->bi_private = device;
3804 bio->bi_end_io = btrfs_end_super_write;
3805 bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
3808 * We FUA only the first super block. The others we allow to
3809 * go down lazy and there's a short window where the on-disk
3810 * copies might still contain the older version.
3812 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3813 bio->bi_opf |= REQ_FUA;
3816 if (btrfs_advance_sb_log(device, i))
3817 atomic_inc(&device->sb_write_errors);
3819 return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
3823 * Wait for write completion of superblocks done by write_dev_supers,
3824 * @max_mirrors same for write and wait phases.
3826 * Return -1 if primary super block write failed or when there were no super block
3827 * copies written. Otherwise 0.
3829 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3833 bool primary_failed = false;
3837 if (max_mirrors == 0)
3838 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3840 for (i = 0; i < max_mirrors; i++) {
3841 struct folio *folio;
3843 ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3844 if (ret == -ENOENT) {
3846 } else if (ret < 0) {
3849 primary_failed = true;
3852 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3853 device->commit_total_bytes)
3856 folio = filemap_get_folio(device->bdev->bd_mapping,
3857 bytenr >> PAGE_SHIFT);
3858 /* If the folio has been removed, then we know it completed. */
3861 ASSERT(folio_order(folio) == 0);
3863 /* Folio will be unlocked once the write completes. */
3864 folio_wait_locked(folio);
3868 errors += atomic_read(&device->sb_write_errors);
3869 if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
3870 primary_failed = true;
3871 if (primary_failed) {
3872 btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3877 return errors < i ? 0 : -1;
3881 * endio for the write_dev_flush, this will wake anyone waiting
3882 * for the barrier when it is done
3884 static void btrfs_end_empty_barrier(struct bio *bio)
3887 complete(bio->bi_private);
3891 * Submit a flush request to the device if it supports it. Error handling is
3892 * done in the waiting counterpart.
3894 static void write_dev_flush(struct btrfs_device *device)
3896 struct bio *bio = &device->flush_bio;
3898 device->last_flush_error = BLK_STS_OK;
3900 bio_init(bio, device->bdev, NULL, 0,
3901 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3902 bio->bi_end_io = btrfs_end_empty_barrier;
3903 init_completion(&device->flush_wait);
3904 bio->bi_private = &device->flush_wait;
3906 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3910 * If the flush bio has been submitted by write_dev_flush, wait for it.
3911 * Return true for any error, and false otherwise.
3913 static bool wait_dev_flush(struct btrfs_device *device)
3915 struct bio *bio = &device->flush_bio;
3917 if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3920 wait_for_completion_io(&device->flush_wait);
3922 if (bio->bi_status) {
3923 device->last_flush_error = bio->bi_status;
3924 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3932 * send an empty flush down to each device in parallel,
3933 * then wait for them
3935 static int barrier_all_devices(struct btrfs_fs_info *info)
3937 struct list_head *head;
3938 struct btrfs_device *dev;
3939 int errors_wait = 0;
3941 lockdep_assert_held(&info->fs_devices->device_list_mutex);
3942 /* send down all the barriers */
3943 head = &info->fs_devices->devices;
3944 list_for_each_entry(dev, head, dev_list) {
3945 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3949 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3950 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3953 write_dev_flush(dev);
3956 /* wait for all the barriers */
3957 list_for_each_entry(dev, head, dev_list) {
3958 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3964 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3965 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3968 if (wait_dev_flush(dev))
3973 * Checks last_flush_error of disks in order to determine the device
3976 if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
3982 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3985 int min_tolerated = INT_MAX;
3987 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3988 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3989 min_tolerated = min_t(int, min_tolerated,
3990 btrfs_raid_array[BTRFS_RAID_SINGLE].
3991 tolerated_failures);
3993 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3994 if (raid_type == BTRFS_RAID_SINGLE)
3996 if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3998 min_tolerated = min_t(int, min_tolerated,
3999 btrfs_raid_array[raid_type].
4000 tolerated_failures);
4003 if (min_tolerated == INT_MAX) {
4004 pr_warn("BTRFS: unknown raid flag: %llu", flags);
4008 return min_tolerated;
4011 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4013 struct list_head *head;
4014 struct btrfs_device *dev;
4015 struct btrfs_super_block *sb;
4016 struct btrfs_dev_item *dev_item;
4020 int total_errors = 0;
4023 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4026 * max_mirrors == 0 indicates we're from commit_transaction,
4027 * not from fsync where the tree roots in fs_info have not
4028 * been consistent on disk.
4030 if (max_mirrors == 0)
4031 backup_super_roots(fs_info);
4033 sb = fs_info->super_for_commit;
4034 dev_item = &sb->dev_item;
4036 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4037 head = &fs_info->fs_devices->devices;
4038 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4041 ret = barrier_all_devices(fs_info);
4044 &fs_info->fs_devices->device_list_mutex);
4045 btrfs_handle_fs_error(fs_info, ret,
4046 "errors while submitting device barriers.");
4051 list_for_each_entry(dev, head, dev_list) {
4056 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4057 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4060 btrfs_set_stack_device_generation(dev_item, 0);
4061 btrfs_set_stack_device_type(dev_item, dev->type);
4062 btrfs_set_stack_device_id(dev_item, dev->devid);
4063 btrfs_set_stack_device_total_bytes(dev_item,
4064 dev->commit_total_bytes);
4065 btrfs_set_stack_device_bytes_used(dev_item,
4066 dev->commit_bytes_used);
4067 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4068 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4069 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4070 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4071 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4074 flags = btrfs_super_flags(sb);
4075 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4077 ret = btrfs_validate_write_super(fs_info, sb);
4079 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4080 btrfs_handle_fs_error(fs_info, -EUCLEAN,
4081 "unexpected superblock corruption detected");
4085 ret = write_dev_supers(dev, sb, max_mirrors);
4089 if (total_errors > max_errors) {
4090 btrfs_err(fs_info, "%d errors while writing supers",
4092 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4094 /* FUA is masked off if unsupported and can't be the reason */
4095 btrfs_handle_fs_error(fs_info, -EIO,
4096 "%d errors while writing supers",
4102 list_for_each_entry(dev, head, dev_list) {
4105 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4106 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4109 ret = wait_dev_supers(dev, max_mirrors);
4113 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4114 if (total_errors > max_errors) {
4115 btrfs_handle_fs_error(fs_info, -EIO,
4116 "%d errors while writing supers",
4123 /* Drop a fs root from the radix tree and free it. */
4124 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4125 struct btrfs_root *root)
4127 bool drop_ref = false;
4129 spin_lock(&fs_info->fs_roots_radix_lock);
4130 radix_tree_delete(&fs_info->fs_roots_radix,
4131 (unsigned long)btrfs_root_id(root));
4132 if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4134 spin_unlock(&fs_info->fs_roots_radix_lock);
4136 if (BTRFS_FS_ERROR(fs_info)) {
4137 ASSERT(root->log_root == NULL);
4138 if (root->reloc_root) {
4139 btrfs_put_root(root->reloc_root);
4140 root->reloc_root = NULL;
4145 btrfs_put_root(root);
4148 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4150 mutex_lock(&fs_info->cleaner_mutex);
4151 btrfs_run_delayed_iputs(fs_info);
4152 mutex_unlock(&fs_info->cleaner_mutex);
4153 wake_up_process(fs_info->cleaner_kthread);
4155 /* wait until ongoing cleanup work done */
4156 down_write(&fs_info->cleanup_work_sem);
4157 up_write(&fs_info->cleanup_work_sem);
4159 return btrfs_commit_current_transaction(fs_info->tree_root);
4162 static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4164 struct btrfs_transaction *trans;
4165 struct btrfs_transaction *tmp;
4169 * This function is only called at the very end of close_ctree(),
4170 * thus no other running transaction, no need to take trans_lock.
4172 ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4173 list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4174 struct extent_state *cached = NULL;
4175 u64 dirty_bytes = 0;
4181 while (find_first_extent_bit(&trans->dirty_pages, cur,
4182 &found_start, &found_end, EXTENT_DIRTY, &cached)) {
4183 dirty_bytes += found_end + 1 - found_start;
4184 cur = found_end + 1;
4187 "transaction %llu (with %llu dirty metadata bytes) is not committed",
4188 trans->transid, dirty_bytes);
4189 btrfs_cleanup_one_transaction(trans, fs_info);
4191 if (trans == fs_info->running_transaction)
4192 fs_info->running_transaction = NULL;
4193 list_del_init(&trans->list);
4195 btrfs_put_transaction(trans);
4196 trace_btrfs_transaction_commit(fs_info);
4201 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4205 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4208 * If we had UNFINISHED_DROPS we could still be processing them, so
4209 * clear that bit and wake up relocation so it can stop.
4210 * We must do this before stopping the block group reclaim task, because
4211 * at btrfs_relocate_block_group() we wait for this bit, and after the
4212 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4213 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4216 btrfs_wake_unfinished_drop(fs_info);
4219 * We may have the reclaim task running and relocating a data block group,
4220 * in which case it may create delayed iputs. So stop it before we park
4221 * the cleaner kthread otherwise we can get new delayed iputs after
4222 * parking the cleaner, and that can make the async reclaim task to hang
4223 * if it's waiting for delayed iputs to complete, since the cleaner is
4224 * parked and can not run delayed iputs - this will make us hang when
4225 * trying to stop the async reclaim task.
4227 cancel_work_sync(&fs_info->reclaim_bgs_work);
4229 * We don't want the cleaner to start new transactions, add more delayed
4230 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4231 * because that frees the task_struct, and the transaction kthread might
4232 * still try to wake up the cleaner.
4234 kthread_park(fs_info->cleaner_kthread);
4236 /* wait for the qgroup rescan worker to stop */
4237 btrfs_qgroup_wait_for_completion(fs_info, false);
4239 /* wait for the uuid_scan task to finish */
4240 down(&fs_info->uuid_tree_rescan_sem);
4241 /* avoid complains from lockdep et al., set sem back to initial state */
4242 up(&fs_info->uuid_tree_rescan_sem);
4244 /* pause restriper - we want to resume on mount */
4245 btrfs_pause_balance(fs_info);
4247 btrfs_dev_replace_suspend_for_unmount(fs_info);
4249 btrfs_scrub_cancel(fs_info);
4251 /* wait for any defraggers to finish */
4252 wait_event(fs_info->transaction_wait,
4253 (atomic_read(&fs_info->defrag_running) == 0));
4255 /* clear out the rbtree of defraggable inodes */
4256 btrfs_cleanup_defrag_inodes(fs_info);
4259 * After we parked the cleaner kthread, ordered extents may have
4260 * completed and created new delayed iputs. If one of the async reclaim
4261 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4262 * can hang forever trying to stop it, because if a delayed iput is
4263 * added after it ran btrfs_run_delayed_iputs() and before it called
4264 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4265 * no one else to run iputs.
4267 * So wait for all ongoing ordered extents to complete and then run
4268 * delayed iputs. This works because once we reach this point no one
4269 * can either create new ordered extents nor create delayed iputs
4270 * through some other means.
4272 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4273 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4274 * but the delayed iput for the respective inode is made only when doing
4275 * the final btrfs_put_ordered_extent() (which must happen at
4276 * btrfs_finish_ordered_io() when we are unmounting).
4278 btrfs_flush_workqueue(fs_info->endio_write_workers);
4279 /* Ordered extents for free space inodes. */
4280 btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4281 btrfs_run_delayed_iputs(fs_info);
4283 cancel_work_sync(&fs_info->async_reclaim_work);
4284 cancel_work_sync(&fs_info->async_data_reclaim_work);
4285 cancel_work_sync(&fs_info->preempt_reclaim_work);
4287 /* Cancel or finish ongoing discard work */
4288 btrfs_discard_cleanup(fs_info);
4290 if (!sb_rdonly(fs_info->sb)) {
4292 * The cleaner kthread is stopped, so do one final pass over
4293 * unused block groups.
4295 btrfs_delete_unused_bgs(fs_info);
4298 * There might be existing delayed inode workers still running
4299 * and holding an empty delayed inode item. We must wait for
4300 * them to complete first because they can create a transaction.
4301 * This happens when someone calls btrfs_balance_delayed_items()
4302 * and then a transaction commit runs the same delayed nodes
4303 * before any delayed worker has done something with the nodes.
4304 * We must wait for any worker here and not at transaction
4305 * commit time since that could cause a deadlock.
4306 * This is a very rare case.
4308 btrfs_flush_workqueue(fs_info->delayed_workers);
4310 ret = btrfs_commit_super(fs_info);
4312 btrfs_err(fs_info, "commit super ret %d", ret);
4315 if (BTRFS_FS_ERROR(fs_info))
4316 btrfs_error_commit_super(fs_info);
4318 kthread_stop(fs_info->transaction_kthread);
4319 kthread_stop(fs_info->cleaner_kthread);
4321 ASSERT(list_empty(&fs_info->delayed_iputs));
4322 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4324 if (btrfs_check_quota_leak(fs_info)) {
4325 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4326 btrfs_err(fs_info, "qgroup reserved space leaked");
4329 btrfs_free_qgroup_config(fs_info);
4330 ASSERT(list_empty(&fs_info->delalloc_roots));
4332 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4333 btrfs_info(fs_info, "at unmount delalloc count %lld",
4334 percpu_counter_sum(&fs_info->delalloc_bytes));
4337 if (percpu_counter_sum(&fs_info->ordered_bytes))
4338 btrfs_info(fs_info, "at unmount dio bytes count %lld",
4339 percpu_counter_sum(&fs_info->ordered_bytes));
4341 btrfs_sysfs_remove_mounted(fs_info);
4342 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4344 btrfs_put_block_group_cache(fs_info);
4347 * we must make sure there is not any read request to
4348 * submit after we stopping all workers.
4350 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4351 btrfs_stop_all_workers(fs_info);
4353 /* We shouldn't have any transaction open at this point */
4354 warn_about_uncommitted_trans(fs_info);
4356 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4357 free_root_pointers(fs_info, true);
4358 btrfs_free_fs_roots(fs_info);
4361 * We must free the block groups after dropping the fs_roots as we could
4362 * have had an IO error and have left over tree log blocks that aren't
4363 * cleaned up until the fs roots are freed. This makes the block group
4364 * accounting appear to be wrong because there's pending reserved bytes,
4365 * so make sure we do the block group cleanup afterwards.
4367 btrfs_free_block_groups(fs_info);
4369 iput(fs_info->btree_inode);
4371 btrfs_mapping_tree_free(fs_info);
4372 btrfs_close_devices(fs_info->fs_devices);
4375 void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4376 struct extent_buffer *buf)
4378 struct btrfs_fs_info *fs_info = buf->fs_info;
4379 u64 transid = btrfs_header_generation(buf);
4381 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4383 * This is a fast path so only do this check if we have sanity tests
4384 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4385 * outside of the sanity tests.
4387 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4390 /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4391 ASSERT(trans->transid == fs_info->generation);
4392 btrfs_assert_tree_write_locked(buf);
4393 if (unlikely(transid != fs_info->generation)) {
4394 btrfs_abort_transaction(trans, -EUCLEAN);
4396 "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4397 buf->start, transid, fs_info->generation);
4399 set_extent_buffer_dirty(buf);
4402 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4406 * looks as though older kernels can get into trouble with
4407 * this code, they end up stuck in balance_dirty_pages forever
4411 if (current->flags & PF_MEMALLOC)
4415 btrfs_balance_delayed_items(fs_info);
4417 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4418 BTRFS_DIRTY_METADATA_THRESH,
4419 fs_info->dirty_metadata_batch);
4421 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4425 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4427 __btrfs_btree_balance_dirty(fs_info, 1);
4430 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4432 __btrfs_btree_balance_dirty(fs_info, 0);
4435 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4437 /* cleanup FS via transaction */
4438 btrfs_cleanup_transaction(fs_info);
4440 mutex_lock(&fs_info->cleaner_mutex);
4441 btrfs_run_delayed_iputs(fs_info);
4442 mutex_unlock(&fs_info->cleaner_mutex);
4444 down_write(&fs_info->cleanup_work_sem);
4445 up_write(&fs_info->cleanup_work_sem);
4448 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4450 struct btrfs_root *gang[8];
4451 u64 root_objectid = 0;
4454 spin_lock(&fs_info->fs_roots_radix_lock);
4455 while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4456 (void **)gang, root_objectid,
4457 ARRAY_SIZE(gang))) != 0) {
4460 for (i = 0; i < ret; i++)
4461 gang[i] = btrfs_grab_root(gang[i]);
4462 spin_unlock(&fs_info->fs_roots_radix_lock);
4464 for (i = 0; i < ret; i++) {
4467 root_objectid = btrfs_root_id(gang[i]);
4468 btrfs_free_log(NULL, gang[i]);
4469 btrfs_put_root(gang[i]);
4472 spin_lock(&fs_info->fs_roots_radix_lock);
4474 spin_unlock(&fs_info->fs_roots_radix_lock);
4475 btrfs_free_log_root_tree(NULL, fs_info);
4478 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4480 struct btrfs_ordered_extent *ordered;
4482 spin_lock(&root->ordered_extent_lock);
4484 * This will just short circuit the ordered completion stuff which will
4485 * make sure the ordered extent gets properly cleaned up.
4487 list_for_each_entry(ordered, &root->ordered_extents,
4489 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4490 spin_unlock(&root->ordered_extent_lock);
4493 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4495 struct btrfs_root *root;
4498 spin_lock(&fs_info->ordered_root_lock);
4499 list_splice_init(&fs_info->ordered_roots, &splice);
4500 while (!list_empty(&splice)) {
4501 root = list_first_entry(&splice, struct btrfs_root,
4503 list_move_tail(&root->ordered_root,
4504 &fs_info->ordered_roots);
4506 spin_unlock(&fs_info->ordered_root_lock);
4507 btrfs_destroy_ordered_extents(root);
4510 spin_lock(&fs_info->ordered_root_lock);
4512 spin_unlock(&fs_info->ordered_root_lock);
4515 * We need this here because if we've been flipped read-only we won't
4516 * get sync() from the umount, so we need to make sure any ordered
4517 * extents that haven't had their dirty pages IO start writeout yet
4518 * actually get run and error out properly.
4520 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
4523 static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4524 struct btrfs_fs_info *fs_info)
4526 struct rb_node *node;
4527 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
4528 struct btrfs_delayed_ref_node *ref;
4530 spin_lock(&delayed_refs->lock);
4531 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4532 struct btrfs_delayed_ref_head *head;
4534 bool pin_bytes = false;
4536 head = rb_entry(node, struct btrfs_delayed_ref_head,
4538 if (btrfs_delayed_ref_lock(delayed_refs, head))
4541 spin_lock(&head->lock);
4542 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4543 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4545 rb_erase_cached(&ref->ref_node, &head->ref_tree);
4546 RB_CLEAR_NODE(&ref->ref_node);
4547 if (!list_empty(&ref->add_list))
4548 list_del(&ref->add_list);
4549 atomic_dec(&delayed_refs->num_entries);
4550 btrfs_put_delayed_ref(ref);
4551 btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
4553 if (head->must_insert_reserved)
4555 btrfs_free_delayed_extent_op(head->extent_op);
4556 btrfs_delete_ref_head(delayed_refs, head);
4557 spin_unlock(&head->lock);
4558 spin_unlock(&delayed_refs->lock);
4559 mutex_unlock(&head->mutex);
4562 struct btrfs_block_group *cache;
4564 cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4567 spin_lock(&cache->space_info->lock);
4568 spin_lock(&cache->lock);
4569 cache->pinned += head->num_bytes;
4570 btrfs_space_info_update_bytes_pinned(fs_info,
4571 cache->space_info, head->num_bytes);
4572 cache->reserved -= head->num_bytes;
4573 cache->space_info->bytes_reserved -= head->num_bytes;
4574 spin_unlock(&cache->lock);
4575 spin_unlock(&cache->space_info->lock);
4577 btrfs_put_block_group(cache);
4579 btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4580 head->bytenr + head->num_bytes - 1);
4582 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4583 btrfs_put_delayed_ref_head(head);
4585 spin_lock(&delayed_refs->lock);
4587 btrfs_qgroup_destroy_extent_records(trans);
4589 spin_unlock(&delayed_refs->lock);
4592 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4594 struct btrfs_inode *btrfs_inode;
4597 spin_lock(&root->delalloc_lock);
4598 list_splice_init(&root->delalloc_inodes, &splice);
4600 while (!list_empty(&splice)) {
4601 struct inode *inode = NULL;
4602 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4604 btrfs_del_delalloc_inode(btrfs_inode);
4605 spin_unlock(&root->delalloc_lock);
4608 * Make sure we get a live inode and that it'll not disappear
4611 inode = igrab(&btrfs_inode->vfs_inode);
4613 unsigned int nofs_flag;
4615 nofs_flag = memalloc_nofs_save();
4616 invalidate_inode_pages2(inode->i_mapping);
4617 memalloc_nofs_restore(nofs_flag);
4620 spin_lock(&root->delalloc_lock);
4622 spin_unlock(&root->delalloc_lock);
4625 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4627 struct btrfs_root *root;
4630 spin_lock(&fs_info->delalloc_root_lock);
4631 list_splice_init(&fs_info->delalloc_roots, &splice);
4632 while (!list_empty(&splice)) {
4633 root = list_first_entry(&splice, struct btrfs_root,
4635 root = btrfs_grab_root(root);
4637 spin_unlock(&fs_info->delalloc_root_lock);
4639 btrfs_destroy_delalloc_inodes(root);
4640 btrfs_put_root(root);
4642 spin_lock(&fs_info->delalloc_root_lock);
4644 spin_unlock(&fs_info->delalloc_root_lock);
4647 static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4648 struct extent_io_tree *dirty_pages,
4651 struct extent_buffer *eb;
4655 while (find_first_extent_bit(dirty_pages, start, &start, &end,
4657 clear_extent_bits(dirty_pages, start, end, mark);
4658 while (start <= end) {
4659 eb = find_extent_buffer(fs_info, start);
4660 start += fs_info->nodesize;
4664 btrfs_tree_lock(eb);
4665 wait_on_extent_buffer_writeback(eb);
4666 btrfs_clear_buffer_dirty(NULL, eb);
4667 btrfs_tree_unlock(eb);
4669 free_extent_buffer_stale(eb);
4674 static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4675 struct extent_io_tree *unpin)
4681 struct extent_state *cached_state = NULL;
4684 * The btrfs_finish_extent_commit() may get the same range as
4685 * ours between find_first_extent_bit and clear_extent_dirty.
4686 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4687 * the same extent range.
4689 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4690 if (!find_first_extent_bit(unpin, 0, &start, &end,
4691 EXTENT_DIRTY, &cached_state)) {
4692 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4696 clear_extent_dirty(unpin, start, end, &cached_state);
4697 free_extent_state(cached_state);
4698 btrfs_error_unpin_extent_range(fs_info, start, end);
4699 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4704 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4706 struct inode *inode;
4708 inode = cache->io_ctl.inode;
4710 unsigned int nofs_flag;
4712 nofs_flag = memalloc_nofs_save();
4713 invalidate_inode_pages2(inode->i_mapping);
4714 memalloc_nofs_restore(nofs_flag);
4716 BTRFS_I(inode)->generation = 0;
4717 cache->io_ctl.inode = NULL;
4720 ASSERT(cache->io_ctl.pages == NULL);
4721 btrfs_put_block_group(cache);
4724 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4725 struct btrfs_fs_info *fs_info)
4727 struct btrfs_block_group *cache;
4729 spin_lock(&cur_trans->dirty_bgs_lock);
4730 while (!list_empty(&cur_trans->dirty_bgs)) {
4731 cache = list_first_entry(&cur_trans->dirty_bgs,
4732 struct btrfs_block_group,
4735 if (!list_empty(&cache->io_list)) {
4736 spin_unlock(&cur_trans->dirty_bgs_lock);
4737 list_del_init(&cache->io_list);
4738 btrfs_cleanup_bg_io(cache);
4739 spin_lock(&cur_trans->dirty_bgs_lock);
4742 list_del_init(&cache->dirty_list);
4743 spin_lock(&cache->lock);
4744 cache->disk_cache_state = BTRFS_DC_ERROR;
4745 spin_unlock(&cache->lock);
4747 spin_unlock(&cur_trans->dirty_bgs_lock);
4748 btrfs_put_block_group(cache);
4749 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4750 spin_lock(&cur_trans->dirty_bgs_lock);
4752 spin_unlock(&cur_trans->dirty_bgs_lock);
4755 * Refer to the definition of io_bgs member for details why it's safe
4756 * to use it without any locking
4758 while (!list_empty(&cur_trans->io_bgs)) {
4759 cache = list_first_entry(&cur_trans->io_bgs,
4760 struct btrfs_block_group,
4763 list_del_init(&cache->io_list);
4764 spin_lock(&cache->lock);
4765 cache->disk_cache_state = BTRFS_DC_ERROR;
4766 spin_unlock(&cache->lock);
4767 btrfs_cleanup_bg_io(cache);
4771 static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4773 struct btrfs_root *gang[8];
4777 spin_lock(&fs_info->fs_roots_radix_lock);
4779 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4782 BTRFS_ROOT_TRANS_TAG);
4785 for (i = 0; i < ret; i++) {
4786 struct btrfs_root *root = gang[i];
4788 btrfs_qgroup_free_meta_all_pertrans(root);
4789 radix_tree_tag_clear(&fs_info->fs_roots_radix,
4790 (unsigned long)btrfs_root_id(root),
4791 BTRFS_ROOT_TRANS_TAG);
4794 spin_unlock(&fs_info->fs_roots_radix_lock);
4797 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4798 struct btrfs_fs_info *fs_info)
4800 struct btrfs_device *dev, *tmp;
4802 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4803 ASSERT(list_empty(&cur_trans->dirty_bgs));
4804 ASSERT(list_empty(&cur_trans->io_bgs));
4806 list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4808 list_del_init(&dev->post_commit_list);
4811 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4813 cur_trans->state = TRANS_STATE_COMMIT_START;
4814 wake_up(&fs_info->transaction_blocked_wait);
4816 cur_trans->state = TRANS_STATE_UNBLOCKED;
4817 wake_up(&fs_info->transaction_wait);
4819 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4821 btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4823 cur_trans->state =TRANS_STATE_COMPLETED;
4824 wake_up(&cur_trans->commit_wait);
4827 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4829 struct btrfs_transaction *t;
4831 mutex_lock(&fs_info->transaction_kthread_mutex);
4833 spin_lock(&fs_info->trans_lock);
4834 while (!list_empty(&fs_info->trans_list)) {
4835 t = list_first_entry(&fs_info->trans_list,
4836 struct btrfs_transaction, list);
4837 if (t->state >= TRANS_STATE_COMMIT_PREP) {
4838 refcount_inc(&t->use_count);
4839 spin_unlock(&fs_info->trans_lock);
4840 btrfs_wait_for_commit(fs_info, t->transid);
4841 btrfs_put_transaction(t);
4842 spin_lock(&fs_info->trans_lock);
4845 if (t == fs_info->running_transaction) {
4846 t->state = TRANS_STATE_COMMIT_DOING;
4847 spin_unlock(&fs_info->trans_lock);
4849 * We wait for 0 num_writers since we don't hold a trans
4850 * handle open currently for this transaction.
4852 wait_event(t->writer_wait,
4853 atomic_read(&t->num_writers) == 0);
4855 spin_unlock(&fs_info->trans_lock);
4857 btrfs_cleanup_one_transaction(t, fs_info);
4859 spin_lock(&fs_info->trans_lock);
4860 if (t == fs_info->running_transaction)
4861 fs_info->running_transaction = NULL;
4862 list_del_init(&t->list);
4863 spin_unlock(&fs_info->trans_lock);
4865 btrfs_put_transaction(t);
4866 trace_btrfs_transaction_commit(fs_info);
4867 spin_lock(&fs_info->trans_lock);
4869 spin_unlock(&fs_info->trans_lock);
4870 btrfs_destroy_all_ordered_extents(fs_info);
4871 btrfs_destroy_delayed_inodes(fs_info);
4872 btrfs_assert_delayed_root_empty(fs_info);
4873 btrfs_destroy_all_delalloc_inodes(fs_info);
4874 btrfs_drop_all_logs(fs_info);
4875 btrfs_free_all_qgroup_pertrans(fs_info);
4876 mutex_unlock(&fs_info->transaction_kthread_mutex);
4881 int btrfs_init_root_free_objectid(struct btrfs_root *root)
4883 struct btrfs_path *path;
4885 struct extent_buffer *l;
4886 struct btrfs_key search_key;
4887 struct btrfs_key found_key;
4890 path = btrfs_alloc_path();
4894 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4895 search_key.type = -1;
4896 search_key.offset = (u64)-1;
4897 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4902 * Key with offset -1 found, there would have to exist a root
4903 * with such id, but this is out of valid range.
4908 if (path->slots[0] > 0) {
4909 slot = path->slots[0] - 1;
4911 btrfs_item_key_to_cpu(l, &found_key, slot);
4912 root->free_objectid = max_t(u64, found_key.objectid + 1,
4913 BTRFS_FIRST_FREE_OBJECTID);
4915 root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4919 btrfs_free_path(path);
4923 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4926 mutex_lock(&root->objectid_mutex);
4928 if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4929 btrfs_warn(root->fs_info,
4930 "the objectid of root %llu reaches its highest value",
4931 btrfs_root_id(root));
4936 *objectid = root->free_objectid++;
4939 mutex_unlock(&root->objectid_mutex);