* ref_head. Must clean this mess up later.
*/
struct btrfs_delayed_ref_node {
- /*
- * ref_head use rb tree, stored in ref_root->href.
- * indexed by bytenr
- */
- struct rb_node rb_node;
-
/*data/tree ref use list, stored in ref_head->ref_list. */
struct list_head list;
+ /*
+ * If action is BTRFS_ADD_DELAYED_REF, also link this node to
+ * ref_head->ref_add_list, then we do not need to iterate the
+ * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
+ */
+ struct list_head add_list;
/* the starting bytenr of the extent */
u64 bytenr;
spinlock_t lock;
struct list_head ref_list;
+ /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
+ struct list_head ref_add_list;
struct rb_node href_node;
int read_only);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root);
+ struct btrfs_fs_info *fs_info);
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark);
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents);
-static int btrfs_cleanup_transaction(struct btrfs_root *root);
-static void btrfs_error_commit_super(struct btrfs_root *root);
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
/*
* btrfs_end_io_wq structs are used to do processing in task context when an IO
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em) {
- em->bdev =
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
goto out;
}
em->len = (u64)-1;
em->block_len = (u64)-1;
em->block_start = 0;
- em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
return btrfs_crc32c(seed, data, len);
}
-void btrfs_csum_final(u32 crc, char *result)
+void btrfs_csum_final(u32 crc, u8 *result)
{
put_unaligned_le32(~crc, result);
}
* helper to read a given tree block, doing retries as required when
* the checksums don't match and we have alternate mirrors to try.
*/
-static int btree_read_extent_buffer_pages(struct btrfs_root *root,
+static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
u64 parent_transid)
{
int failed_mirror = 0;
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
+ io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
btree_get_extent, mirror_num);
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
break;
- num_copies = btrfs_num_copies(root->fs_info,
+ num_copies = btrfs_num_copies(fs_info,
eb->start, eb->len);
if (num_copies == 1)
break;
}
if (failed && !ret && failed_mirror)
- repair_eb_io_failure(root, eb, failed_mirror);
+ repair_eb_io_failure(fs_info, eb, failed_mirror);
return ret;
}
return ret;
}
-#define CORRUPT(reason, eb, root, slot) \
- btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \
- " root=%llu, slot=%d", \
- btrfs_header_level(eb) == 0 ? "leaf" : "node",\
+#define CORRUPT(reason, eb, root, slot) \
+ btrfs_crit(root->fs_info, \
+ "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", \
reason, btrfs_header_bytenr(eb), root->objectid, slot)
static noinline int check_leaf(struct btrfs_root *root,
struct extent_buffer *leaf)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key leaf_key;
u32 nritems = btrfs_header_nritems(leaf);
int slot;
- if (nritems == 0) {
+ /*
+ * Extent buffers from a relocation tree have a owner field that
+ * corresponds to the subvolume tree they are based on. So just from an
+ * extent buffer alone we can not find out what is the id of the
+ * corresponding subvolume tree, so we can not figure out if the extent
+ * buffer corresponds to the root of the relocation tree or not. So skip
+ * this check for relocation trees.
+ */
+ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
struct btrfs_root *check_root;
key.objectid = btrfs_header_owner(leaf);
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
- check_root = btrfs_get_fs_root(root->fs_info, &key, false);
+ check_root = btrfs_get_fs_root(fs_info, &key, false);
/*
* The only reason we also check NULL here is that during
* open_ctree() some roots has not yet been set up.
*/
if (!IS_ERR_OR_NULL(check_root)) {
+ struct extent_buffer *eb;
+
+ eb = btrfs_root_node(check_root);
/* if leaf is the root, then it's fine */
- if (leaf->start !=
- btrfs_root_bytenr(&check_root->root_item)) {
+ if (leaf != eb) {
CORRUPT("non-root leaf's nritems is 0",
- leaf, root, 0);
+ leaf, check_root, 0);
+ free_extent_buffer(eb);
return -EIO;
}
+ free_extent_buffer(eb);
}
return 0;
}
+ if (nritems == 0)
+ return 0;
+
/* Check the 0 item */
if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
- BTRFS_LEAF_DATA_SIZE(root)) {
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
CORRUPT("invalid item offset size pair", leaf, root, 0);
return -EIO;
}
* all point outside of the leaf.
*/
if (btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(root)) {
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
CORRUPT("slot end outside of leaf", leaf, root, slot);
return -EIO;
}
u64 bytenr;
int ret = 0;
- if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
btrfs_crit(root->fs_info,
"corrupt node: block %llu root %llu nritems %lu",
node->start, root->objectid, nr);
err:
if (reads_done &&
test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(fs_info, eb, eb->start, ret);
+ btree_readahead_hook(fs_info, eb, ret);
if (ret) {
/*
eb->read_mirror = failed_mirror;
atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO);
+ btree_readahead_hook(eb->fs_info, eb, -EIO);
return -EIO; /* we fixed nothing */
}
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int async = check_async_write(inode, bio_flags);
int ret;
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
*/
- ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
- bio, BTRFS_WQ_ENDIO_METADATA);
+ ret = btrfs_bio_wq_end_io(fs_info, bio,
+ BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else {
/*
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
- ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, bio, mirror_num, 0,
+ ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
bio_offset,
__btree_submit_bio_start,
__btree_submit_bio_done);
.set_page_dirty = btree_set_page_dirty,
};
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
+ struct inode *btree_inode = fs_info->btree_inode;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
free_extent_buffer(buf);
}
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
- struct inode *btree_inode = root->fs_info->btree_inode;
+ struct inode *btree_inode = fs_info->btree_inode;
struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return 0;
return 0;
}
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
- u64 bytenr)
-{
- return find_extent_buffer(fs_info, bytenr);
-}
-
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
- u64 bytenr)
+struct extent_buffer *btrfs_find_create_tree_block(
+ struct btrfs_fs_info *fs_info,
+ u64 bytenr)
{
- if (btrfs_is_testing(root->fs_info))
- return alloc_test_extent_buffer(root->fs_info, bytenr,
- root->nodesize);
- return alloc_extent_buffer(root->fs_info, bytenr);
+ if (btrfs_is_testing(fs_info))
+ return alloc_test_extent_buffer(fs_info, bytenr);
+ return alloc_extent_buffer(fs_info, bytenr);
}
buf->start, buf->start + buf->len - 1);
}
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
- ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
+ ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
if (ret) {
free_extent_buffer(buf);
return ERR_PTR(ret);
kfree(writers);
}
-static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
- struct btrfs_root *root, struct btrfs_fs_info *fs_info,
+static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid)
{
bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
root->node = NULL;
root->commit_root = NULL;
- root->sectorsize = sectorsize;
- root->nodesize = nodesize;
- root->stripesize = stripesize;
root->state = 0;
root->orphan_cleanup_state = 0;
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
- u32 sectorsize, u32 nodesize)
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
+
/* We don't use the stripesize in selftest, set it as sectorsize */
- __setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
- BTRFS_ROOT_TREE_OBJECTID);
+ __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
root->alloc_bytenr = 0;
return root;
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, root, fs_info, objectid);
+ __setup_root(root, fs_info, objectid);
root->root_key.objectid = objectid;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = 0;
goto fail;
}
- memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, objectid);
root->node = leaf;
- write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
- BTRFS_FSID_SIZE);
- write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
- btrfs_header_chunk_tree_uuid(leaf),
- BTRFS_UUID_SIZE);
+ write_extent_buffer_fsid(leaf, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
root->commit_root = btrfs_root_node(root);
struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct btrfs_root *tree_root = fs_info->tree_root;
struct extent_buffer *leaf;
root = btrfs_alloc_root(fs_info, GFP_NOFS);
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, root, fs_info,
- BTRFS_TREE_LOG_OBJECTID);
+ __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
return ERR_CAST(leaf);
}
- memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
root->node = leaf;
- write_extent_buffer(root->node, root->fs_info->fsid,
- btrfs_header_fsid(), BTRFS_FSID_SIZE);
+ write_extent_buffer_fsid(root->node, fs_info->fsid);
btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node);
return root;
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log_root;
struct btrfs_inode_item *inode_item;
- log_root = alloc_log_tree(trans, root->fs_info);
+ log_root = alloc_log_tree(trans, fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
btrfs_set_stack_inode_generation(inode_item, 1);
btrfs_set_stack_inode_size(inode_item, 3);
btrfs_set_stack_inode_nlink(inode_item, 1);
- btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
+ btrfs_set_stack_inode_nbytes(inode_item,
+ fs_info->nodesize);
btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
btrfs_set_root_node(&log_root->root_item, log_root->node);
goto alloc_fail;
}
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, root, fs_info, key->objectid);
+ __setup_root(root, fs_info, key->objectid);
ret = btrfs_find_root(tree_root, key, path,
&root->root_item, &root->root_key);
}
generation = btrfs_root_generation(&root->root_item);
- root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+ root->node = read_tree_block(fs_info,
+ btrfs_root_bytenr(&root->root_item),
generation);
if (IS_ERR(root->node)) {
ret = PTR_ERR(root->node);
static int cleaner_kthread(void *arg)
{
struct btrfs_root *root = arg;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int again;
struct btrfs_trans_handle *trans;
again = 0;
/* Make the cleaner go to sleep early. */
- if (btrfs_need_cleaner_sleep(root))
+ if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
/*
* Do not do anything if we might cause open_ctree() to block
* before we have finished mounting the filesystem.
*/
- if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
+ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
goto sleep;
- if (!mutex_trylock(&root->fs_info->cleaner_mutex))
+ if (!mutex_trylock(&fs_info->cleaner_mutex))
goto sleep;
/*
* Avoid the problem that we change the status of the fs
* during the above check and trylock.
*/
- if (btrfs_need_cleaner_sleep(root)) {
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ if (btrfs_need_cleaner_sleep(fs_info)) {
+ mutex_unlock(&fs_info->cleaner_mutex);
goto sleep;
}
- mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
- btrfs_run_delayed_iputs(root);
- mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+ mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ mutex_unlock(&fs_info->cleaner_mutex);
/*
* The defragger has dealt with the R/O remount and umount,
* needn't do anything special here.
*/
- btrfs_run_defrag_inodes(root->fs_info);
+ btrfs_run_defrag_inodes(fs_info);
/*
* Acquires fs_info->delete_unused_bgs_mutex to avoid racing
* can't hold, nor need to, fs_info->cleaner_mutex when deleting
* unused block groups.
*/
- btrfs_delete_unused_bgs(root->fs_info);
+ btrfs_delete_unused_bgs(fs_info);
sleep:
if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT)
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"cleaner transaction attach returned %ld",
PTR_ERR(trans));
} else {
int ret;
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"cleaner open transaction commit returned %d",
ret);
}
static int transaction_kthread(void *arg)
{
struct btrfs_root *root = arg;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_transaction *cur;
u64 transid;
do {
cannot_commit = false;
- delay = HZ * root->fs_info->commit_interval;
- mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ delay = HZ * fs_info->commit_interval;
+ mutex_lock(&fs_info->transaction_kthread_mutex);
- spin_lock(&root->fs_info->trans_lock);
- cur = root->fs_info->running_transaction;
+ spin_lock(&fs_info->trans_lock);
+ cur = fs_info->running_transaction;
if (!cur) {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
goto sleep;
}
now = get_seconds();
if (cur->state < TRANS_STATE_BLOCKED &&
(now < cur->start_time ||
- now - cur->start_time < root->fs_info->commit_interval)) {
- spin_unlock(&root->fs_info->trans_lock);
+ now - cur->start_time < fs_info->commit_interval)) {
+ spin_unlock(&fs_info->trans_lock);
delay = HZ * 5;
goto sleep;
}
transid = cur->transid;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
/* If the file system is aborted, this will always fail. */
trans = btrfs_attach_transaction(root);
goto sleep;
}
if (transid == trans->transid) {
- btrfs_commit_transaction(trans, root);
+ btrfs_commit_transaction(trans);
} else {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
}
sleep:
- wake_up_process(root->fs_info->cleaner_kthread);
- mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+ wake_up_process(fs_info->cleaner_kthread);
+ mutex_unlock(&fs_info->transaction_kthread_mutex);
if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
- &root->fs_info->fs_state)))
- btrfs_cleanup_transaction(root);
+ &fs_info->fs_state)))
+ btrfs_cleanup_transaction(fs_info);
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
- (!btrfs_transaction_blocked(root->fs_info) ||
+ (!btrfs_transaction_blocked(fs_info) ||
cannot_commit))
schedule_timeout(delay);
__set_current_state(TASK_RUNNING);
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_free_log_root_tree(NULL, fs_info);
- btrfs_destroy_pinned_extent(fs_info->tree_root,
- fs_info->pinned_extents);
+ btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
}
}
init_waitqueue_head(&fs_info->balance_wait_q);
}
-static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_root *tree_root)
+static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
{
- fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
- set_nlink(fs_info->btree_inode, 1);
+ struct inode *inode = fs_info->btree_inode;
+
+ inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ set_nlink(inode, 1);
/*
* we set the i_size on the btree inode to the max possible int.
* the real end of the address space is determined by all of
* the devices in the system
*/
- fs_info->btree_inode->i_size = OFFSET_MAX;
- fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &btree_aops;
- RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
- extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping);
- BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+ RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
+ extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
+ BTRFS_I(inode)->io_tree.track_uptodate = 0;
+ extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
- BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+ BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
- BTRFS_I(fs_info->btree_inode)->root = tree_root;
- memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
- sizeof(struct btrfs_key));
- set_bit(BTRFS_INODE_DUMMY,
- &BTRFS_I(fs_info->btree_inode)->runtime_flags);
- btrfs_insert_inode_hash(fs_info->btree_inode);
+ BTRFS_I(inode)->root = fs_info->tree_root;
+ memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
+ set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
+ btrfs_insert_inode_hash(inode);
}
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
struct btrfs_fs_devices *fs_devices)
{
int ret;
- struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *log_tree_root;
struct btrfs_super_block *disk_super = fs_info->super_copy;
u64 bytenr = btrfs_super_log_root(disk_super);
if (!log_tree_root)
return -ENOMEM;
- __setup_root(tree_root->nodesize, tree_root->sectorsize,
- tree_root->stripesize, log_tree_root, fs_info,
- BTRFS_TREE_LOG_OBJECTID);
+ __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
- log_tree_root->node = read_tree_block(tree_root, bytenr,
- fs_info->generation + 1);
+ log_tree_root->node = read_tree_block(fs_info, bytenr,
+ fs_info->generation + 1);
if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
if (ret) {
- btrfs_handle_fs_error(tree_root->fs_info, ret,
- "Failed to recover log tree");
+ btrfs_handle_fs_error(fs_info, ret,
+ "Failed to recover log tree");
free_extent_buffer(log_tree_root->node);
kfree(log_tree_root);
return ret;
}
if (fs_info->sb->s_flags & MS_RDONLY) {
- ret = btrfs_commit_super(tree_root);
+ ret = btrfs_commit_super(fs_info);
if (ret)
return ret;
}
return 0;
}
-static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
- struct btrfs_root *tree_root)
+static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *root;
struct btrfs_key location;
int ret;
+ BUG_ON(!fs_info->tree_root);
+
location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = 0;
sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
- btrfs_init_btree_inode(fs_info, tree_root);
+ btrfs_init_btree_inode(fs_info);
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
INIT_LIST_HEAD(&fs_info->pinned_chunks);
+ /* Usable values until the real ones are cached from the superblock */
+ fs_info->nodesize = 4096;
+ fs_info->sectorsize = 4096;
+ fs_info->stripesize = 4096;
+
ret = btrfs_alloc_stripe_hash_table(fs_info);
if (ret) {
err = ret;
goto fail_alloc;
}
- __setup_root(4096, 4096, 4096, tree_root,
- fs_info, BTRFS_ROOT_TREE_OBJECTID);
+ __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
invalidate_bdev(fs_devices->latest_bdev);
*/
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
- ret = btrfs_parse_options(tree_root, options, sb->s_flags);
+ ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) {
err = ret;
goto fail_alloc;
features = btrfs_super_incompat_flags(disk_super);
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
- if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+ if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
+ /* Cache block sizes */
+ fs_info->nodesize = nodesize;
+ fs_info->sectorsize = sectorsize;
+ fs_info->stripesize = stripesize;
+
/*
* mixed block groups end up with duplicate but slightly offset
* extent buffers for the same range. It leads to corruptions
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
SZ_4M / PAGE_SIZE);
- tree_root->nodesize = nodesize;
- tree_root->sectorsize = sectorsize;
- tree_root->stripesize = stripesize;
-
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
mutex_lock(&fs_info->chunk_mutex);
- ret = btrfs_read_sys_array(tree_root);
+ ret = btrfs_read_sys_array(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
btrfs_err(fs_info, "failed to read the system array: %d", ret);
generation = btrfs_super_chunk_root_generation(disk_super);
- __setup_root(nodesize, sectorsize, stripesize, chunk_root,
- fs_info, BTRFS_CHUNK_TREE_OBJECTID);
+ __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
- chunk_root->node = read_tree_block(chunk_root,
+ chunk_root->node = read_tree_block(fs_info,
btrfs_super_chunk_root(disk_super),
generation);
if (IS_ERR(chunk_root->node) ||
read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
- ret = btrfs_read_chunk_tree(chunk_root);
+ ret = btrfs_read_chunk_tree(fs_info);
if (ret) {
btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
goto fail_tree_roots;
retry_root_backup:
generation = btrfs_super_generation(disk_super);
- tree_root->node = read_tree_block(tree_root,
+ tree_root->node = read_tree_block(fs_info,
btrfs_super_root(disk_super),
generation);
if (IS_ERR(tree_root->node) ||
mutex_unlock(&tree_root->objectid_mutex);
- ret = btrfs_read_roots(fs_info, tree_root);
+ ret = btrfs_read_roots(fs_info);
if (ret)
goto recovery_tree_root;
goto fail_sysfs;
}
- ret = btrfs_read_block_groups(fs_info->extent_root);
+ ret = btrfs_read_block_groups(fs_info);
if (ret) {
btrfs_err(fs_info, "failed to read block groups: %d", ret);
goto fail_sysfs;
if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
- if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
- !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
+ if (!btrfs_test_opt(fs_info, SSD) &&
+ !btrfs_test_opt(fs_info, NOSSD) &&
!fs_info->fs_devices->rotating) {
btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
btrfs_set_opt(fs_info->mount_opt, SSD);
btrfs_apply_pending_changes(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
- ret = btrfsic_mount(tree_root, fs_devices,
- btrfs_test_opt(tree_root->fs_info,
+ if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
+ ret = btrfsic_mount(fs_info, fs_devices,
+ btrfs_test_opt(fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0,
fs_info->check_integrity_print_mask);
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
- !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
+ !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
err = ret;
}
}
- ret = btrfs_find_orphan_roots(tree_root);
+ ret = btrfs_find_orphan_roots(fs_info);
if (ret)
goto fail_qgroup;
if (ret) {
btrfs_warn(fs_info,
"failed to clear free space tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
}
- if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "creating free space tree");
ret = btrfs_create_free_space_tree(fs_info);
if (ret) {
btrfs_warn(fs_info,
"failed to create free space tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
}
if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
(ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
up_read(&fs_info->cleanup_work_sem);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
up_read(&fs_info->cleanup_work_sem);
ret = btrfs_resume_balance_async(fs_info);
if (ret) {
btrfs_warn(fs_info, "failed to resume balance: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
ret = btrfs_resume_dev_replace_async(fs_info);
if (ret) {
btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
if (ret) {
btrfs_warn(fs_info,
"failed to create the UUID tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
- } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
+ } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
fs_info->generation !=
btrfs_super_uuid_tree_generation(disk_super)) {
btrfs_info(fs_info, "checking UUID tree");
if (ret) {
btrfs_warn(fs_info,
"failed to check the UUID tree: %d", ret);
- close_ctree(tree_root);
+ close_ctree(fs_info);
return ret;
}
} else {
btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
- btrfs_cleanup_transaction(fs_info->tree_root);
+ btrfs_cleanup_transaction(fs_info);
btrfs_free_fs_roots(fs_info);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
return err;
recovery_tree_root:
- if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
+ if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
goto fail_tree_roots;
free_root_pointers(fs_info, 0);
struct btrfs_device *device = (struct btrfs_device *)
bh->b_private;
- btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
+ btrfs_warn_rl_in_rcu(device->fs_info,
"lost page write due to IO error on %s",
rcu_str_deref(device->name));
/* note, we don't set_buffer_write_io_error because we have
bh = __getblk(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
if (!bh) {
- btrfs_err(device->dev_root->fs_info,
+ btrfs_err(device->fs_info,
"couldn't get super buffer head for bytenr %llu",
bytenr);
errors++;
return num_tolerated_disk_barrier_failures;
}
-static int write_all_supers(struct btrfs_root *root, int max_mirrors)
+static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
{
struct list_head *head;
struct btrfs_device *dev;
int total_errors = 0;
u64 flags;
- do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
- backup_super_roots(root->fs_info);
+ do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
+ backup_super_roots(fs_info);
- sb = root->fs_info->super_for_commit;
+ sb = fs_info->super_for_commit;
dev_item = &sb->dev_item;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- head = &root->fs_info->fs_devices->devices;
- max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ head = &fs_info->fs_devices->devices;
+ max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
if (do_barriers) {
- ret = barrier_all_devices(root->fs_info);
+ ret = barrier_all_devices(fs_info);
if (ret) {
mutex_unlock(
- &root->fs_info->fs_devices->device_list_mutex);
- btrfs_handle_fs_error(root->fs_info, ret,
- "errors while submitting device barriers.");
+ &fs_info->fs_devices->device_list_mutex);
+ btrfs_handle_fs_error(fs_info, ret,
+ "errors while submitting device barriers.");
return ret;
}
}
total_errors++;
}
if (total_errors > max_errors) {
- btrfs_err(root->fs_info, "%d errors while writing supers",
- total_errors);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ btrfs_err(fs_info, "%d errors while writing supers",
+ total_errors);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/* FUA is masked off if unsupported and can't be the reason */
- btrfs_handle_fs_error(root->fs_info, -EIO,
- "%d errors while writing supers", total_errors);
+ btrfs_handle_fs_error(fs_info, -EIO,
+ "%d errors while writing supers",
+ total_errors);
return -EIO;
}
if (ret)
total_errors++;
}
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
- btrfs_handle_fs_error(root->fs_info, -EIO,
- "%d errors while writing supers", total_errors);
+ btrfs_handle_fs_error(fs_info, -EIO,
+ "%d errors while writing supers",
+ total_errors);
return -EIO;
}
return 0;
}
int write_ctree_super(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int max_mirrors)
+ struct btrfs_fs_info *fs_info, int max_mirrors)
{
- return write_all_supers(root, max_mirrors);
+ return write_all_supers(fs_info, max_mirrors);
}
/* Drop a fs root from the radix tree and free it. */
{
iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
- btrfs_free_block_rsv(root, root->orphan_block_rsv);
+ btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
return err;
}
-int btrfs_commit_super(struct btrfs_root *root)
+int btrfs_commit_super(struct btrfs_fs_info *fs_info)
{
+ struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
- mutex_lock(&root->fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
- wake_up_process(root->fs_info->cleaner_kthread);
+ mutex_lock(&fs_info->cleaner_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_mutex);
+ wake_up_process(fs_info->cleaner_kthread);
/* wait until ongoing cleanup work done */
- down_write(&root->fs_info->cleanup_work_sem);
- up_write(&root->fs_info->cleanup_work_sem);
+ down_write(&fs_info->cleanup_work_sem);
+ up_write(&fs_info->cleanup_work_sem);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- return btrfs_commit_transaction(trans, root);
+ return btrfs_commit_transaction(trans);
}
-void close_ctree(struct btrfs_root *root)
+void close_ctree(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *root = fs_info->tree_root;
int ret;
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
* block groups queued for removal, the deletion will be
* skipped when we quit the cleaner thread.
*/
- btrfs_delete_unused_bgs(root->fs_info);
+ btrfs_delete_unused_bgs(fs_info);
- ret = btrfs_commit_super(root);
+ ret = btrfs_commit_super(fs_info);
if (ret)
btrfs_err(fs_info, "commit super ret %d", ret);
}
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
- btrfs_error_commit_super(root);
+ btrfs_error_commit_super(fs_info);
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
- btrfsic_unmount(root, fs_info->fs_devices);
+ if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
+ btrfsic_unmount(fs_info->fs_devices);
#endif
btrfs_close_devices(fs_info->fs_devices);
__btrfs_free_block_rsv(root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
- lock_chunks(root);
+ mutex_lock(&fs_info->chunk_mutex);
while (!list_empty(&fs_info->pinned_chunks)) {
struct extent_map *em;
list_del_init(&em->list);
free_extent_map(em);
}
- unlock_chunks(root);
+ mutex_unlock(&fs_info->chunk_mutex);
}
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_root *root;
u64 transid = btrfs_header_generation(buf);
int was_dirty;
return;
#endif
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+ fs_info = root->fs_info;
btrfs_assert_tree_locked(buf);
- if (transid != root->fs_info->generation)
+ if (transid != fs_info->generation)
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
- buf->start, transid, root->fs_info->generation);
+ buf->start, transid, fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty)
- __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
+ __percpu_counter_add(&fs_info->dirty_metadata_bytes,
buf->len,
- root->fs_info->dirty_metadata_batch);
+ fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
- btrfs_print_leaf(root, buf);
+ btrfs_print_leaf(fs_info, buf);
ASSERT(0);
}
#endif
}
-static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
+static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
int flush_delayed)
{
/*
return;
if (flush_delayed)
- btrfs_balance_delayed_items(root);
+ btrfs_balance_delayed_items(fs_info);
- ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+ ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH);
if (ret > 0) {
- balance_dirty_pages_ratelimited(
- root->fs_info->btree_inode->i_mapping);
+ balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
}
}
-void btrfs_btree_balance_dirty(struct btrfs_root *root)
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
{
- __btrfs_btree_balance_dirty(root, 1);
+ __btrfs_btree_balance_dirty(fs_info, 1);
}
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
{
- __btrfs_btree_balance_dirty(root, 0);
+ __btrfs_btree_balance_dirty(fs_info, 0);
}
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
- return btree_read_extent_buffer_pages(root, buf, parent_transid);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
}
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
return ret;
}
-static void btrfs_error_commit_super(struct btrfs_root *root)
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
{
- mutex_lock(&root->fs_info->cleaner_mutex);
- btrfs_run_delayed_iputs(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ mutex_lock(&fs_info->cleaner_mutex);
+ btrfs_run_delayed_iputs(fs_info);
+ mutex_unlock(&fs_info->cleaner_mutex);
- down_write(&root->fs_info->cleanup_work_sem);
- up_write(&root->fs_info->cleanup_work_sem);
+ down_write(&fs_info->cleanup_work_sem);
+ up_write(&fs_info->cleanup_work_sem);
/* cleanup FS via transaction */
- btrfs_cleanup_transaction(root);
+ btrfs_cleanup_transaction(fs_info);
}
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
spin_lock(&delayed_refs->lock);
if (atomic_read(&delayed_refs->num_entries) == 0) {
spin_unlock(&delayed_refs->lock);
- btrfs_info(root->fs_info, "delayed_refs has NO entry");
+ btrfs_info(fs_info, "delayed_refs has NO entry");
return ret;
}
list) {
ref->in_tree = 0;
list_del(&ref->list);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
btrfs_put_delayed_ref(ref);
}
mutex_unlock(&head->mutex);
if (pin_bytes)
- btrfs_pin_extent(root, head->node.bytenr,
+ btrfs_pin_extent(fs_info, head->node.bytenr,
head->node.num_bytes, 1);
btrfs_put_delayed_ref(&head->node);
cond_resched();
spin_unlock(&fs_info->delalloc_root_lock);
}
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark)
{
clear_extent_bits(dirty_pages, start, end, mark);
while (start <= end) {
- eb = btrfs_find_tree_block(root->fs_info, start);
- start += root->nodesize;
+ eb = find_extent_buffer(fs_info, start);
+ start += fs_info->nodesize;
if (!eb)
continue;
wait_on_extent_buffer_writeback(eb);
return ret;
}
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents)
{
struct extent_io_tree *unpin;
break;
clear_extent_dirty(unpin, start, end);
- btrfs_error_unpin_extent_range(root, start, end);
+ btrfs_error_unpin_extent_range(fs_info, start, end);
cond_resched();
}
if (loop) {
- if (unpin == &root->fs_info->freed_extents[0])
- unpin = &root->fs_info->freed_extents[1];
+ if (unpin == &fs_info->freed_extents[0])
+ unpin = &fs_info->freed_extents[1];
else
- unpin = &root->fs_info->freed_extents[0];
+ unpin = &fs_info->freed_extents[0];
loop = false;
goto again;
}
}
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
struct btrfs_block_group_cache *cache;
struct btrfs_block_group_cache,
dirty_list);
if (!cache) {
- btrfs_err(root->fs_info,
- "orphan block group dirty_bgs list");
+ btrfs_err(fs_info, "orphan block group dirty_bgs list");
spin_unlock(&cur_trans->dirty_bgs_lock);
return;
}
struct btrfs_block_group_cache,
io_list);
if (!cache) {
- btrfs_err(root->fs_info,
- "orphan block group on io_bgs list");
+ btrfs_err(fs_info, "orphan block group on io_bgs list");
return;
}
}
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
- struct btrfs_root *root)
+ struct btrfs_fs_info *fs_info)
{
- btrfs_cleanup_dirty_bgs(cur_trans, root);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
- btrfs_destroy_delayed_refs(cur_trans, root);
+ btrfs_destroy_delayed_refs(cur_trans, fs_info);
cur_trans->state = TRANS_STATE_COMMIT_START;
- wake_up(&root->fs_info->transaction_blocked_wait);
+ wake_up(&fs_info->transaction_blocked_wait);
cur_trans->state = TRANS_STATE_UNBLOCKED;
- wake_up(&root->fs_info->transaction_wait);
+ wake_up(&fs_info->transaction_wait);
- btrfs_destroy_delayed_inodes(root);
- btrfs_assert_delayed_root_empty(root);
+ btrfs_destroy_delayed_inodes(fs_info);
+ btrfs_assert_delayed_root_empty(fs_info);
- btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+ btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
- btrfs_destroy_pinned_extent(root,
- root->fs_info->pinned_extents);
+ btrfs_destroy_pinned_extent(fs_info,
+ fs_info->pinned_extents);
cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
*/
}
-static int btrfs_cleanup_transaction(struct btrfs_root *root)
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
{
struct btrfs_transaction *t;
- mutex_lock(&root->fs_info->transaction_kthread_mutex);
+ mutex_lock(&fs_info->transaction_kthread_mutex);
- spin_lock(&root->fs_info->trans_lock);
- while (!list_empty(&root->fs_info->trans_list)) {
- t = list_first_entry(&root->fs_info->trans_list,
+ spin_lock(&fs_info->trans_lock);
+ while (!list_empty(&fs_info->trans_list)) {
+ t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
if (t->state >= TRANS_STATE_COMMIT_START) {
atomic_inc(&t->use_count);
- spin_unlock(&root->fs_info->trans_lock);
- btrfs_wait_for_commit(root, t->transid);
+ spin_unlock(&fs_info->trans_lock);
+ btrfs_wait_for_commit(fs_info, t->transid);
btrfs_put_transaction(t);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
continue;
}
- if (t == root->fs_info->running_transaction) {
+ if (t == fs_info->running_transaction) {
t->state = TRANS_STATE_COMMIT_DOING;
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
/*
* We wait for 0 num_writers since we don't hold a trans
* handle open currently for this transaction.
wait_event(t->writer_wait,
atomic_read(&t->num_writers) == 0);
} else {
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
}
- btrfs_cleanup_one_transaction(t, root);
+ btrfs_cleanup_one_transaction(t, fs_info);
- spin_lock(&root->fs_info->trans_lock);
- if (t == root->fs_info->running_transaction)
- root->fs_info->running_transaction = NULL;
+ spin_lock(&fs_info->trans_lock);
+ if (t == fs_info->running_transaction)
+ fs_info->running_transaction = NULL;
list_del_init(&t->list);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(t);
- trace_btrfs_transaction_commit(root);
- spin_lock(&root->fs_info->trans_lock);
- }
- spin_unlock(&root->fs_info->trans_lock);
- btrfs_destroy_all_ordered_extents(root->fs_info);
- btrfs_destroy_delayed_inodes(root);
- btrfs_assert_delayed_root_empty(root);
- btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
- btrfs_destroy_all_delalloc_inodes(root->fs_info);
- mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+ trace_btrfs_transaction_commit(fs_info->tree_root);
+ spin_lock(&fs_info->trans_lock);
+ }
+ spin_unlock(&fs_info->trans_lock);
+ btrfs_destroy_all_ordered_extents(fs_info);
+ btrfs_destroy_delayed_inodes(fs_info);
+ btrfs_assert_delayed_root_empty(fs_info);
+ btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
+ btrfs_destroy_all_delalloc_inodes(fs_info);
+ mutex_unlock(&fs_info->transaction_kthread_mutex);
return 0;
}
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/btrfs.h>
static int __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
int ret;
- p = &root->fs_info->defrag_inodes.rb_node;
+ p = &fs_info->defrag_inodes.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
}
set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p);
- rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
+ rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0;
}
-static inline int __need_auto_defrag(struct btrfs_root *root)
+static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
- if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
+ if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
- if (btrfs_fs_closing(root->fs_info))
+ if (btrfs_fs_closing(fs_info))
return 0;
return 1;
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
u64 transid;
int ret;
- if (!__need_auto_defrag(root))
+ if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
defrag->transid = transid;
defrag->root = root->root_key.objectid;
- spin_lock(&root->fs_info->defrag_inodes_lock);
+ spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
/*
* If we set IN_DEFRAG flag and evict the inode from memory,
} else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
- spin_unlock(&root->fs_info->defrag_inodes_lock);
+ spin_unlock(&fs_info->defrag_inodes_lock);
return 0;
}
static void btrfs_requeue_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
- if (!__need_auto_defrag(root))
+ if (!__need_auto_defrag(fs_info))
goto out;
/*
* Here we don't check the IN_DEFRAG flag, because we need merge
* them together.
*/
- spin_lock(&root->fs_info->defrag_inodes_lock);
+ spin_lock(&fs_info->defrag_inodes_lock);
ret = __btrfs_add_inode_defrag(inode, defrag);
- spin_unlock(&root->fs_info->defrag_inodes_lock);
+ spin_unlock(&fs_info->defrag_inodes_lock);
if (ret)
goto out;
return;
&fs_info->fs_state))
break;
- if (!__need_auto_defrag(fs_info->tree_root))
+ if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
- struct extent_state **cached)
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+ size_t num_pages, loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int err = 0;
int i;
u64 num_bytes;
u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode);
- start_pos = pos & ~((u64)root->sectorsize - 1);
- num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
+ start_pos = pos & ~((u64) fs_info->sectorsize - 1);
+ num_bytes = round_up(write_bytes + pos - start_pos,
+ fs_info->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
u32 extent_item_size,
int *key_inserted)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 num_bytes = 0;
u64 extent_offset = 0;
u64 extent_end = 0;
+ u64 last_end = start;
int del_nr = 0;
int del_slot = 0;
int extent_type;
modify_tree = 0;
update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == root->fs_info->tree_root);
+ root == fs_info->tree_root);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
* extent item in the call to setup_items_for_insert() later
* in this function.
*/
- if (extent_end == key.offset && extent_end >= search_start)
+ if (extent_end == key.offset && extent_end >= search_start) {
+ last_end = extent_end;
goto delete_extent_item;
+ }
if (extent_end <= search_start) {
path->slots[0]++;
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
}
key.offset = start;
}
+ /*
+ * From here on out we will have actually dropped something, so
+ * last_end can be updated.
+ */
+ last_end = extent_end;
+
/*
* | ---- range to drop ----- |
* | -------- extent -------- |
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+ btrfs_set_item_key_safe(fs_info, path, &new_key);
extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
inode_sub_bytes(inode,
extent_end - key.offset);
extent_end = ALIGN(extent_end,
- root->sectorsize);
+ fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
- ret = btrfs_free_extent(trans, root,
+ ret = btrfs_free_extent(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
if (!ret && replace_extent && leafs_visited == 1 &&
(path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
path->locks[0] == BTRFS_WRITE_LOCK) &&
- btrfs_leaf_free_space(root, leaf) >=
+ btrfs_leaf_free_space(fs_info, leaf) >=
sizeof(struct btrfs_item) + extent_item_size) {
key.objectid = ino;
if (!replace_extent || !(*key_inserted))
btrfs_release_path(path);
if (drop_end)
- *drop_end = found ? min(end, extent_end) : end;
+ *drop_end = found ? min(end, last_end) : end;
return ret;
}
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+ btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
trans->transid);
path->slots[0]++;
new_key.offset = start;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+ btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
- root->root_key.objectid,
+ ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
+ 0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
u64 *lockstart, u64 *lockend,
struct extent_state **cached_state)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 start_pos;
u64 last_pos;
int i;
int ret = 0;
- start_pos = round_down(pos, root->sectorsize);
+ start_pos = round_down(pos, fs_info->sectorsize);
last_pos = start_pos
- + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
+ + round_up(pos + write_bytes - start_pos,
+ fs_info->sectorsize) - 1;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
static noinline int check_can_nocow(struct inode *inode, loff_t pos,
size_t *write_bytes)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend;
if (!ret)
return -ENOSPC;
- lockstart = round_down(pos, root->sectorsize);
- lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
+ lockstart = round_down(pos, fs_info->sectorsize);
+ lockend = round_up(pos + *write_bytes,
+ fs_info->sectorsize) - 1;
while (1) {
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
loff_t pos)
{
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
struct extent_state *cached_state = NULL;
break;
}
- sector_offset = pos & (root->sectorsize - 1);
+ sector_offset = pos & (fs_info->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
- root->sectorsize);
+ fs_info->sectorsize);
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
if (ret < 0) {
PAGE_SIZE);
reserve_bytes = round_up(write_bytes +
sector_offset,
- root->sectorsize);
+ fs_info->sectorsize);
} else {
break;
}
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
- num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- reserve_bytes);
+ num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
dirty_sectors = round_up(copied + sector_offset,
- root->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- dirty_sectors);
+ fs_info->sectorsize);
+ dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
/*
* if we have trouble faulting in the pages, fall
* managed to copy.
*/
if (num_sectors > dirty_sectors) {
-
/* release everything except the sectors we dirtied */
release_bytes -= dirty_sectors <<
- root->fs_info->sb->s_blocksize_bits;
-
+ fs_info->sb->s_blocksize_bits;
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
} else {
u64 __pos;
- __pos = round_down(pos, root->sectorsize) +
+ __pos = round_down(pos,
+ fs_info->sectorsize) +
(dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos,
release_bytes);
}
release_bytes = round_up(copied + sector_offset,
- root->sectorsize);
+ fs_info->sectorsize);
if (copied > 0)
- ret = btrfs_dirty_pages(root, inode, pages,
- dirty_pages, pos, copied,
- NULL);
+ ret = btrfs_dirty_pages(inode, pages, dirty_pages,
+ pos, copied, NULL);
if (need_unlock)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
btrfs_end_write_no_snapshoting(root);
if (only_release_metadata && copied > 0) {
- lockstart = round_down(pos, root->sectorsize);
- lockend = round_up(pos + copied, root->sectorsize) - 1;
+ lockstart = round_down(pos,
+ fs_info->sectorsize);
+ lockend = round_up(pos + copied,
+ fs_info->sectorsize) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root);
+ if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
+ btrfs_btree_balance_dirty(fs_info);
pos += copied;
num_written += copied;
btrfs_delalloc_release_metadata(inode, release_bytes);
} else {
btrfs_delalloc_release_space(inode,
- round_down(pos, root->sectorsize),
+ round_down(pos, fs_info->sectorsize),
release_bytes);
}
}
if (IS_NOCMTIME(inode))
return;
- now = current_fs_time(inode->i_sb);
+ now = current_time(inode);
if (!timespec_equal(&inode->i_mtime, &now))
inode->i_mtime = now;
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos;
u64 end_pos;
* although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency.
*/
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
inode_unlock(inode);
err = -EROFS;
goto out;
pos = iocb->ki_pos;
count = iov_iter_count(from);
- start_pos = round_down(pos, root->sectorsize);
+ start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
if (start_pos > oldsize) {
/* Expand hole size to cover write data, preventing empty gap */
- end_pos = round_up(pos + count, root->sectorsize);
+ end_pos = round_up(pos + count,
+ fs_info->sectorsize);
err = btrfs_cont_expand(inode, oldsize, end_pos);
if (err) {
inode_unlock(inode);
goto out;
}
- if (start_pos > round_up(oldsize, root->sectorsize))
+ if (start_pos > round_up(oldsize, fs_info->sectorsize))
clean_page = 1;
}
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
* commit does not start nor waits for ordered extents to complete.
*/
smp_mb();
- if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+ if (btrfs_inode_in_log(inode, fs_info->generation) ||
(full_sync && BTRFS_I(inode)->last_trans <=
- root->fs_info->last_trans_committed) ||
+ fs_info->last_trans_committed) ||
(!btrfs_have_ordered_extents_in_range(inode, start, len) &&
BTRFS_I(inode)->last_trans
- <= root->fs_info->last_trans_committed)) {
+ <= fs_info->last_trans_committed)) {
/*
* We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
* flags for any errors that might have happened while doing
* writeback of file data.
*/
- ret = btrfs_inode_check_errors(inode);
+ ret = filemap_check_errors(inode->i_mapping);
inode_unlock(inode);
goto out;
}
* which are indicated by ctx.io_err.
*/
if (ctx.io_err) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
ret = ctx.io_err;
goto out;
}
if (!ret) {
ret = btrfs_sync_log(trans, root, &ctx);
if (!ret) {
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
goto out;
}
}
if (!full_sync) {
ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
goto out;
}
}
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans);
} else {
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
}
out:
return ret > 0 ? -EIO : ret;
static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_path *path, u64 offset, u64 end)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
int ret;
- if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ if (btrfs_fs_incompat(fs_info, NO_HOLES))
goto out;
key.objectid = btrfs_ino(inode);
key.offset = offset;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- if (ret < 0)
+ if (ret <= 0) {
+ /*
+ * We should have dropped this offset, so if we find it then
+ * something has gone horribly wrong.
+ */
+ if (ret == 0)
+ ret = -EINVAL;
return ret;
- BUG_ON(!ret);
+ }
leaf = path->nodes[0];
if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
u64 num_bytes;
key.offset = offset;
- btrfs_set_item_key_safe(root->fs_info, path, &key);
+ btrfs_set_item_key_safe(fs_info, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
- hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+ hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
u64 tail_len;
u64 orig_start = offset;
u64 cur_offset;
- u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
- u64 min_size = btrfs_calc_trans_metadata_size(root, 1);
++ u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
u64 drop_end;
int ret = 0;
int err = 0;
unsigned int rsv_count;
bool same_block;
- bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
+ bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
u64 ino_size;
bool truncated_block = false;
bool updated_inode = false;
return ret;
inode_lock(inode);
- ino_size = round_up(inode->i_size, root->sectorsize);
+ ino_size = round_up(inode->i_size, fs_info->sectorsize);
ret = find_first_non_hole(inode, &offset, &len);
if (ret < 0)
goto out_only_mutex;
goto out_only_mutex;
}
- lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
+ lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
lockend = round_down(offset + len,
- BTRFS_I(inode)->root->sectorsize) - 1;
- same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
- == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
+ btrfs_inode_sectorsize(inode)) - 1;
+ same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
+ == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
/*
* We needn't truncate any block which is beyond the end of the file
* because we are sure there is no data there.
* Only do this if we are in the same block and we aren't doing the
* entire block.
*/
- if (same_block && len < root->sectorsize) {
+ if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) {
truncated_block = true;
ret = btrfs_truncate_block(inode, offset, len, 0);
goto out;
}
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
ret = -ENOMEM;
goto out_free;
}
- rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1);
- rsv->size = btrfs_calc_trans_metadata_size(root, 1);
++ rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
rsv->failfast = 1;
/*
goto out_free;
}
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
min_size, 0);
BUG_ON(ret);
trans->block_rsv = rsv;
if (ret != -ENOSPC)
break;
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
- if (cur_offset < ino_size) {
+ if (cur_offset < drop_end && cur_offset < ino_size) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_end);
if (ret) {
+ /*
+ * If we failed then we didn't insert our hole
+ * entries for the area we dropped, so now the
+ * fs is corrupted, so we must abort the
+ * transaction.
+ */
+ btrfs_abort_transaction(trans, ret);
err = ret;
break;
}
break;
}
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, rsv_count);
if (IS_ERR(trans)) {
break;
}
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+ ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
goto out_trans;
}
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
/*
* If we are using the NO_HOLES feature we might have had already an
* hole that overlaps a part of the region [lockstart, lockend] and
if (cur_offset < ino_size && cur_offset < drop_end) {
ret = fill_holes(trans, inode, path, cur_offset, drop_end);
if (ret) {
+ /* Same comment as above. */
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_trans;
}
goto out_free;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
- trans->block_rsv = &root->fs_info->trans_block_rsv;
+ trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
out_free:
btrfs_free_path(path);
- btrfs_free_block_rsv(root, rsv);
+ btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
err = PTR_ERR(trans);
} else {
err = btrfs_update_inode(trans, root, inode);
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
}
}
inode_unlock(inode);
u64 locked_end;
u64 actual_end = 0;
struct extent_map *em;
- int blocksize = BTRFS_I(inode)->root->sectorsize;
+ int blocksize = btrfs_inode_sectorsize(inode);
int ret;
alloc_start = round_down(offset, blocksize);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
} else {
- inode->i_ctime = current_fs_time(inode->i_sb);
+ inode->i_ctime = current_time(inode);
i_size_write(inode, actual_end);
btrfs_ordered_update_i_size(inode, actual_end, NULL);
ret = btrfs_update_inode(trans, root, inode);
if (ret)
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans);
else
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_end_transaction(trans);
}
}
out_unlock:
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 lockstart;
*/
start = max_t(loff_t, 0, *offset);
- lockstart = round_down(start, root->sectorsize);
- lockend = round_up(i_size_read(inode), root->sectorsize);
+ lockstart = round_down(start, fs_info->sectorsize);
+ lockend = round_up(i_size_read(inode),
+ fs_info->sectorsize);
if (lockend <= lockstart)
- lockend = lockstart + root->sectorsize;
+ lockend = lockstart + fs_info->sectorsize;
lockend--;
len = lockend - lockstart + 1;
struct btrfs_qgroup *member;
};
-#define ptr_to_u64(x) ((u64)(uintptr_t)x)
-#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
+static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
+{
+ return (u64)(uintptr_t)qg;
+}
+
+static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
+{
+ return (struct btrfs_qgroup *)(uintptr_t)n->aux;
+}
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
list_del("a_root->dirty_list);
btrfs_tree_lock(quota_root->node);
- clean_tree_block(trans, tree_root->fs_info, quota_root->node);
+ clean_tree_block(trans, fs_info, quota_root->node);
btrfs_tree_unlock(quota_root->node);
btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
/* Get all of the parent groups that contain this qgroup */
list_for_each_entry(glist, &qgroup->groups, next_group) {
ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group), GFP_ATOMIC);
+ qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
/* Iterate all of the parents and adjust their reference counts */
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(tmp, &uiter))) {
- qgroup = u64_to_ptr(unode->aux);
+ qgroup = unode_aux_to_qgroup(unode);
qgroup->rfer += sign * num_bytes;
qgroup->rfer_cmpr += sign * num_bytes;
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
/* Add any parents of the parents */
list_for_each_entry(glist, &qgroup->groups, next_group) {
ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group), GFP_ATOMIC);
+ qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
}
spin_lock(&fs_info->qgroup_lock);
- ret = add_relation_rb(quota_root->fs_info, src, dst);
+ ret = add_relation_rb(fs_info, src, dst);
if (ret < 0) {
spin_unlock(&fs_info->qgroup_lock);
goto out;
}
spin_lock(&fs_info->qgroup_lock);
- del_qgroup_rb(quota_root->fs_info, qgroupid);
+ del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
{
u64 bytenr = record->bytenr;
assert_spin_locked(&delayed_refs->lock);
- trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
+ trace_btrfs_qgroup_trace_extent(fs_info, record);
while (*p) {
parent_node = *p;
return 0;
}
-int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag)
{
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
- ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
- record);
+ ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
if (ret > 0)
kfree(record);
return 0;
}
+int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb)
+{
+ int nr = btrfs_header_nritems(eb);
+ int i, extent_type, ret;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *fi;
+ u64 bytenr, num_bytes;
+
+ /* We can be called directly from walk_up_proc() */
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ for (i = 0; i < nr; i++) {
+ btrfs_item_key_to_cpu(eb, &key, i);
+
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
+ /* filter out non qgroup-accountable extents */
+ extent_type = btrfs_file_extent_type(eb, fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ continue;
+
+ bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (!bytenr)
+ continue;
+
+ num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
+
+ ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
+ num_bytes, GFP_NOFS);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Walk up the tree from the bottom, freeing leaves and any interior
+ * nodes which have had all slots visited. If a node (leaf or
+ * interior) is freed, the node above it will have it's slot
+ * incremented. The root node will never be freed.
+ *
+ * At the end of this function, we should have a path which has all
+ * slots incremented to the next position for a search. If we need to
+ * read a new node it will be NULL and the node above it will have the
+ * correct slot selected for a later read.
+ *
+ * If we increment the root nodes slot counter past the number of
+ * elements, 1 is returned to signal completion of the search.
+ */
+static int adjust_slots_upwards(struct btrfs_root *root,
+ struct btrfs_path *path, int root_level)
+{
+ int level = 0;
+ int nr, slot;
+ struct extent_buffer *eb;
+
+ if (root_level == 0)
+ return 1;
+
+ while (level <= root_level) {
+ eb = path->nodes[level];
+ nr = btrfs_header_nritems(eb);
+ path->slots[level]++;
+ slot = path->slots[level];
+ if (slot >= nr || level == 0) {
+ /*
+ * Don't free the root - we will detect this
+ * condition after our loop and return a
+ * positive value for caller to stop walking the tree.
+ */
+ if (level != root_level) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ path->locks[level] = 0;
+
+ free_extent_buffer(eb);
+ path->nodes[level] = NULL;
+ path->slots[level] = 0;
+ }
+ } else {
+ /*
+ * We have a valid slot to walk back down
+ * from. Stop here so caller can process these
+ * new nodes.
+ */
+ break;
+ }
+
+ level++;
+ }
+
+ eb = path->nodes[root_level];
+ if (path->slots[root_level] >= btrfs_header_nritems(eb))
+ return 1;
+
+ return 0;
+}
+
+int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *root_eb,
+ u64 root_gen, int root_level)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
+ int level;
+ struct extent_buffer *eb = root_eb;
+ struct btrfs_path *path = NULL;
+
+ BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
+ BUG_ON(root_eb == NULL);
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ if (!extent_buffer_uptodate(root_eb)) {
+ ret = btrfs_read_buffer(root_eb, root_gen);
+ if (ret)
+ goto out;
+ }
+
+ if (root_level == 0) {
+ ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /*
+ * Walk down the tree. Missing extent blocks are filled in as
+ * we go. Metadata is accounted every time we read a new
+ * extent block.
+ *
+ * When we reach a leaf, we account for file extent items in it,
+ * walk back up the tree (adjusting slot pointers as we go)
+ * and restart the search process.
+ */
+ extent_buffer_get(root_eb); /* For path */
+ path->nodes[root_level] = root_eb;
+ path->slots[root_level] = 0;
+ path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
+walk_down:
+ level = root_level;
+ while (level >= 0) {
+ if (path->nodes[level] == NULL) {
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ /*
+ * We need to get child blockptr/gen from parent before
+ * we can read it.
+ */
+ eb = path->nodes[level + 1];
+ parent_slot = path->slots[level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+
+ eb = read_tree_block(fs_info, child_bytenr, child_gen);
+ if (IS_ERR(eb)) {
+ ret = PTR_ERR(eb);
+ goto out;
+ } else if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ ret = -EIO;
+ goto out;
+ }
+
+ path->nodes[level] = eb;
+ path->slots[level] = 0;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+
+ ret = btrfs_qgroup_trace_extent(trans, fs_info,
+ child_bytenr,
+ fs_info->nodesize,
+ GFP_NOFS);
+ if (ret)
+ goto out;
+ }
+
+ if (level == 0) {
+ ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
+ path->nodes[level]);
+ if (ret)
+ goto out;
+
+ /* Nonzero return here means we completed our search */
+ ret = adjust_slots_upwards(root, path, root_level);
+ if (ret)
+ break;
+
+ /* Restart search with new slots */
+ goto walk_down;
+ }
+
+ level--;
+ }
+
+ ret = 0;
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
#define UPDATE_NEW 0
#define UPDATE_OLD 1
/*
continue;
ulist_reinit(tmp);
- ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
+ ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
GFP_ATOMIC);
if (ret < 0)
return ret;
- ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
+ ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
if (ret < 0)
return ret;
ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist;
- qg = u64_to_ptr(tmp_unode->aux);
+ qg = unode_aux_to_qgroup(tmp_unode);
if (update_old)
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
else
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(qgroups, glist->group->qgroupid,
- ptr_to_u64(glist->group),
+ qgroup_to_aux(glist->group),
GFP_ATOMIC);
if (ret < 0)
return ret;
ret = ulist_add(tmp, glist->group->qgroupid,
- ptr_to_u64(glist->group),
+ qgroup_to_aux(glist->group),
GFP_ATOMIC);
if (ret < 0)
return ret;
while ((unode = ulist_next(qgroups, &uiter))) {
bool dirty = false;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
}
rcu_read_lock();
- level_size = srcroot->nodesize;
+ level_size = fs_info->nodesize;
rcu_read_unlock();
}
i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) {
if (*i_qgroups) {
- ret = add_relation_rb(quota_root->fs_info, objectid,
- *i_qgroups);
+ ret = add_relation_rb(fs_info, objectid, *i_qgroups);
if (ret)
goto unlock;
}
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
qg->reserved + (s64)qg->rfer + num_bytes >
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
qg->reserved += num_bytes;
}
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = u64_to_ptr(unode->aux);
+ qg = unode_aux_to_qgroup(unode);
qg->reserved -= num_bytes;
found.type != BTRFS_METADATA_ITEM_KEY)
continue;
if (found.type == BTRFS_METADATA_ITEM_KEY)
- num_bytes = fs_info->extent_root->nodesize;
+ num_bytes = fs_info->nodesize;
else
num_bytes = found.offset;
int err = -ENOMEM;
int ret = 0;
- mutex_lock(&fs_info->qgroup_rescan_lock);
- fs_info->qgroup_rescan_running = true;
- mutex_unlock(&fs_info->qgroup_rescan_lock);
-
path = btrfs_alloc_path();
if (!path)
goto out;
err = qgroup_rescan_leaf(fs_info, path, trans);
}
if (err > 0)
- btrfs_commit_transaction(trans, fs_info->fs_root);
+ btrfs_commit_transaction(trans);
else
- btrfs_end_transaction(trans, fs_info->fs_root);
+ btrfs_end_transaction(trans);
}
out:
err = ret;
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
}
- btrfs_end_transaction(trans, fs_info->quota_root);
+ btrfs_end_transaction(trans);
if (btrfs_fs_closing(fs_info)) {
btrfs_info(fs_info, "qgroup scan paused");
sizeof(fs_info->qgroup_rescan_progress));
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
init_completion(&fs_info->qgroup_rescan_completion);
+ fs_info->qgroup_rescan_running = true;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return PTR_ERR(trans);
}
- ret = btrfs_commit_transaction(trans, fs_info->fs_root);
+ ret = btrfs_commit_transaction(trans);
if (ret) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return ret;
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid) || num_bytes == 0)
return 0;
- BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
+ BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
ret = qgroup_reserve(root, num_bytes);
if (ret < 0)
return ret;
void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int reserved;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
{
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
- BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
+ BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
atomic_sub(num_bytes, &root->qgroup_meta_rsv);
qgroup_free(root, num_bytes);
*/
static int __must_check __add_reloc_root(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
node = kmalloc(sizeof(*node), GFP_NOFS);
if (!node)
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
- btrfs_panic(root->fs_info, -EEXIST,
+ btrfs_panic(fs_info, -EEXIST,
"Duplicate root found for start=%llu while inserting into relocation tree",
node->bytenr);
kfree(node);
*/
static void __del_reloc_root(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node = NULL;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
return;
BUG_ON((struct btrfs_root *)node->data != root);
- spin_lock(&root->fs_info->trans_lock);
+ spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list);
- spin_unlock(&root->fs_info->trans_lock);
+ spin_unlock(&fs_info->trans_lock);
kfree(node);
}
*/
static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node = NULL;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct extent_buffer *eb;
struct btrfs_root_item *root_item;
struct btrfs_key root_key;
- u64 last_snap = 0;
int ret;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
root_key.offset = objectid;
if (root->root_key.objectid == objectid) {
+ u64 commit_root_gen;
+
/* called by btrfs_init_reloc_root */
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
BTRFS_TREE_RELOC_OBJECTID);
BUG_ON(ret);
-
- last_snap = btrfs_root_last_snapshot(&root->root_item);
- btrfs_set_root_last_snapshot(&root->root_item,
- trans->transid - 1);
+ /*
+ * Set the last_snapshot field to the generation of the commit
+ * root - like this ctree.c:btrfs_block_can_be_shared() behaves
+ * correctly (returns true) when the relocation root is created
+ * either inside the critical section of a transaction commit
+ * (through transaction.c:qgroup_account_snapshot()) and when
+ * it's created before the transaction commit is started.
+ */
+ commit_root_gen = btrfs_header_generation(root->commit_root);
+ btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
} else {
/*
* called by btrfs_reloc_post_snapshot_hook.
memset(&root_item->drop_progress, 0,
sizeof(struct btrfs_disk_key));
root_item->drop_level = 0;
- /*
- * abuse rtransid, it is safe because it is impossible to
- * receive data into a relocation tree.
- */
- btrfs_set_root_rtransid(root_item, last_snap);
- btrfs_set_root_otransid(root_item, trans->transid);
}
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
- ret = btrfs_insert_root(trans, root->fs_info->tree_root,
+ ret = btrfs_insert_root(trans, fs_info->tree_root,
&root_key, root_item);
BUG_ON(ret);
kfree(root_item);
- reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key);
+ reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
BUG_ON(IS_ERR(reloc_root));
reloc_root->last_trans = trans->transid;
return reloc_root;
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
- struct reloc_control *rc = root->fs_info->reloc_ctl;
+ struct reloc_control *rc = fs_info->reloc_ctl;
struct btrfs_block_rsv *rsv;
int clear_rsv = 0;
int ret;
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item;
int ret;
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
- if (root->fs_info->reloc_ctl->merge_reloc_tree &&
+ if (fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
root->reloc_root = NULL;
__del_reloc_root(reloc_root);
reloc_root->commit_root = btrfs_root_node(reloc_root);
}
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ ret = btrfs_update_root(trans, fs_info->tree_root,
&reloc_root->root_key, root_item);
BUG_ON(ret);
struct btrfs_root *root,
struct extent_buffer *leaf)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
struct inode *inode = NULL;
end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset,
- root->sectorsize));
- WARN_ON(!IS_ALIGNED(end, root->sectorsize));
+ fs_info->sectorsize));
+ WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end);
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
- ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
+ ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset);
break;
}
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
struct btrfs_path *path, struct btrfs_key *next_key,
int lowest_level, int max_level)
{
+ struct btrfs_fs_info *fs_info = dest->fs_info;
struct extent_buffer *eb;
struct extent_buffer *parent;
struct btrfs_key key;
btrfs_node_key_to_cpu(parent, next_key, slot + 1);
old_bytenr = btrfs_node_blockptr(parent, slot);
- blocksize = dest->nodesize;
+ blocksize = fs_info->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
if (level <= max_level) {
break;
}
- eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
+ eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
path->lowest_level = 0;
BUG_ON(ret);
+ /*
+ * Info qgroup to trace both subtrees.
+ *
+ * We must trace both trees.
+ * 1) Tree reloc subtree
+ * If not traced, we will leak data numbers
+ * 2) Fs subtree
+ * If not traced, we will double count old data
+ * and tree block numbers, if current trans doesn't free
+ * data reloc tree inode.
+ */
+ ret = btrfs_qgroup_trace_subtree(trans, src, parent,
+ btrfs_header_generation(parent),
+ btrfs_header_level(parent));
+ if (ret < 0)
+ break;
+ ret = btrfs_qgroup_trace_subtree(trans, dest,
+ path->nodes[level],
+ btrfs_header_generation(path->nodes[level]),
+ btrfs_header_level(path->nodes[level]));
+ if (ret < 0)
+ break;
+
/*
* swap blocks in fs tree and reloc tree.
*/
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
- ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
- path->nodes[level]->start,
+ ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
+ blocksize, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
- 0, dest->root_key.objectid, level - 1,
- 0);
+ ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+ blocksize, 0, dest->root_key.objectid,
+ level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
BUG_ON(ret);
int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb = NULL;
int i;
u64 bytenr;
}
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
- eb = read_tree_block(root, bytenr, ptr_gen);
+ eb = read_tree_block(fs_info, bytenr, ptr_gen);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
struct btrfs_key *min_key,
struct btrfs_key *max_key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = NULL;
u64 objectid;
u64 start, end;
start = 0;
else {
start = min_key->offset;
- WARN_ON(!IS_ALIGNED(start, root->sectorsize));
+ WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
}
} else {
start = 0;
if (max_key->offset == 0)
continue;
end = max_key->offset;
- WARN_ON(!IS_ALIGNED(end, root->sectorsize));
+ WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
}
} else {
static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
LIST_HEAD(inode_list);
struct btrfs_key key;
struct btrfs_key next_key;
btrfs_unlock_up_safe(path, 0);
}
- min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
+ min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
memset(&next_key, 0, sizeof(next_key));
while (1) {
path->slots[level]);
root_item->drop_level = level;
- btrfs_end_transaction_throttle(trans, root);
+ btrfs_end_transaction_throttle(trans);
trans = NULL;
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
}
if (trans)
- btrfs_end_transaction_throttle(trans, root);
+ btrfs_end_transaction_throttle(trans);
- btrfs_btree_balance_dirty(root);
+ btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
int prepare_to_merge(struct reloc_control *rc, int err)
{
struct btrfs_root *root = rc->extent_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct btrfs_trans_handle *trans;
LIST_HEAD(reloc_roots);
u64 num_bytes = 0;
int ret;
- mutex_lock(&root->fs_info->reloc_mutex);
- rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
+ mutex_lock(&fs_info->reloc_mutex);
+ rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
rc->merging_rsv_size += rc->nodes_relocated * 2;
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
again:
if (!err) {
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
if (!err)
- btrfs_block_rsv_release(rc->extent_root,
- rc->block_rsv, num_bytes);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv,
+ num_bytes);
return PTR_ERR(trans);
}
if (!err) {
if (num_bytes != rc->merging_rsv_size) {
- btrfs_end_transaction(trans, rc->extent_root);
- btrfs_block_rsv_release(rc->extent_root,
- rc->block_rsv, num_bytes);
+ btrfs_end_transaction(trans);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv,
+ num_bytes);
goto again;
}
}
struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list);
- root = read_fs_root(reloc_root->fs_info,
- reloc_root->root_key.offset);
+ root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
list_splice(&reloc_roots, &rc->reloc_roots);
if (!err)
- btrfs_commit_transaction(trans, rc->extent_root);
+ btrfs_commit_transaction(trans);
else
- btrfs_end_transaction(trans, rc->extent_root);
+ btrfs_end_transaction(trans);
return err;
}
static noinline_for_stack
void merge_reloc_roots(struct reloc_control *rc)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_root *root;
struct btrfs_root *reloc_root;
- u64 last_snap;
- u64 otransid;
- u64 objectid;
LIST_HEAD(reloc_roots);
int found = 0;
int ret = 0;
* adding their roots to the list while we are
* doing this splice
*/
- mutex_lock(&root->fs_info->reloc_mutex);
+ mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
while (!list_empty(&reloc_roots)) {
found = 1;
struct btrfs_root, root_list);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- root = read_fs_root(reloc_root->fs_info,
+ root = read_fs_root(fs_info,
reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
list_del_init(&reloc_root->root_list);
}
- /*
- * we keep the old last snapshot transid in rtranid when we
- * created the relocation tree.
- */
- last_snap = btrfs_root_rtransid(&reloc_root->root_item);
- otransid = btrfs_root_otransid(&reloc_root->root_item);
- objectid = reloc_root->root_key.offset;
-
ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
if (ret < 0) {
if (list_empty(&reloc_root->root_list))
}
out:
if (ret) {
- btrfs_handle_fs_error(root->fs_info, ret, NULL);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
/* new reloc root may be added */
- mutex_lock(&root->fs_info->reloc_mutex);
+ mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
- mutex_unlock(&root->fs_info->reloc_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
}
static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *reloc_root)
{
+ struct btrfs_fs_info *fs_info = reloc_root->fs_info;
struct btrfs_root *root;
if (reloc_root->last_trans == trans->transid)
return 0;
- root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
+ root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
u64 calcu_metadata_size(struct reloc_control *rc,
struct backref_node *node, int reserve)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *next = node;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
if (next->processed && (reserve || next != node))
break;
- num_bytes += rc->extent_root->nodesize;
+ num_bytes += fs_info->nodesize;
if (list_empty(&next->upper))
break;
struct backref_node *node)
{
struct btrfs_root *root = rc->extent_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_bytes;
int ret;
u64 tmp;
ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- tmp = rc->extent_root->nodesize * RELOCATION_RESERVED_NODES;
+ tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
while (tmp <= rc->reserved_bytes)
tmp <<= 1;
/*
* space for relocation and we will return eailer in
* enospc case.
*/
- rc->block_rsv->size = tmp + rc->extent_root->nodesize *
- RELOCATION_RESERVED_NODES;
+ rc->block_rsv->size = tmp + fs_info->nodesize *
+ RELOCATION_RESERVED_NODES;
return -EAGAIN;
}
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *upper;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
goto next;
}
- blocksize = root->nodesize;
+ blocksize = root->fs_info->nodesize;
generation = btrfs_node_ptr_generation(upper->eb, slot);
- eb = read_tree_block(root, bytenr, generation);
+ eb = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(eb)) {
err = PTR_ERR(eb);
goto next;
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, root->fs_info,
node->eb->start, blocksize,
upper->eb->start,
btrfs_header_owner(upper->eb),
u32 blocksize;
if (node->level == 0 ||
in_block_group(node->bytenr, rc->block_group)) {
- blocksize = rc->extent_root->nodesize;
+ blocksize = rc->extent_root->fs_info->nodesize;
mark_block_processed(rc, node->bytenr, blocksize);
}
node->processed = 1;
static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
{
- u32 blocksize = rc->extent_root->nodesize;
+ u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
return 0;
}
-static int get_tree_block_key(struct reloc_control *rc,
+static int get_tree_block_key(struct btrfs_fs_info *fs_info,
struct tree_block *block)
{
struct extent_buffer *eb;
BUG_ON(block->key_ready);
- eb = read_tree_block(rc->extent_root, block->bytenr,
- block->key.offset);
+ eb = read_tree_block(fs_info, block->bytenr, block->key.offset);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready)
- readahead_tree_block(rc->extent_root, block->bytenr);
+ readahead_tree_block(fs_info, block->bytenr);
rb_node = rb_next(rb_node);
}
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready) {
- err = get_tree_block_key(rc, block);
+ err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
u64 block_start)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret = 0;
em->len = end + 1 - start;
em->block_len = em->len;
em->block_start = block_start;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 page_start;
u64 page_end;
u64 offset = BTRFS_I(inode)->index_cnt;
index++;
balance_dirty_pages_ratelimited(inode->i_mapping);
- btrfs_throttle(BTRFS_I(inode)->root);
+ btrfs_throttle(fs_info);
}
WARN_ON(nr != cluster->nr);
out:
return -ENOMEM;
block->bytenr = extent_key->objectid;
- block->key.objectid = rc->extent_root->nodesize;
+ block->key.objectid = rc->extent_root->fs_info->nodesize;
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
u64 bytenr, u32 blocksize,
struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
int ret;
- bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
- SKINNY_METADATA);
+ bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
if (tree_block_processed(bytenr, rc))
return 0;
btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
return 1;
- ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
+ ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
eb->start, btrfs_header_level(eb), 1,
NULL, &flags);
BUG_ON(ret);
}
truncate:
- ret = btrfs_check_trunc_cache_free_space(root,
+ ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out;
ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
out:
iput(inode);
return ret;
struct btrfs_extent_data_ref *ref,
struct rb_root *blocks)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path;
struct tree_block *block;
struct btrfs_root *root;
* it and redo the search.
*/
if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
- ret = delete_block_group_cache(rc->extent_root->fs_info,
- rc->block_group,
+ ret = delete_block_group_cache(fs_info, rc->block_group,
NULL, ref_objectid);
if (ret != -ENOENT)
return ret;
return -ENOMEM;
path->reada = READA_FORWARD;
- root = read_fs_root(rc->extent_root->fs_info, ref_root);
+ root = read_fs_root(fs_info, ref_root);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
struct btrfs_extent_inline_ref *iref;
unsigned long ptr;
unsigned long end;
- u32 blocksize = rc->extent_root->nodesize;
+ u32 blocksize = rc->extent_root->fs_info->nodesize;
int ret = 0;
int err = 0;
int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
struct btrfs_key *extent_key)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
u64 start, end, last;
}
if (key.type == BTRFS_METADATA_ITEM_KEY &&
- key.objectid + rc->extent_root->nodesize <=
+ key.objectid + fs_info->nodesize <=
rc->search_start) {
path->slots[0]++;
goto next;
rc->search_start = key.objectid + key.offset;
else
rc->search_start = key.objectid +
- rc->extent_root->nodesize;
+ fs_info->nodesize;
memcpy(extent_key, &key, sizeof(key));
return 0;
}
struct btrfs_trans_handle *trans;
int ret;
- rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
+ rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
BTRFS_BLOCK_RSV_TEMP);
if (!rc->block_rsv)
return -ENOMEM;
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
rc->reserved_bytes = 0;
- rc->block_rsv->size = rc->extent_root->nodesize *
+ rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
RELOCATION_RESERVED_NODES;
ret = btrfs_block_rsv_refill(rc->extent_root,
rc->block_rsv, rc->block_rsv->size,
*/
return PTR_ERR(trans);
}
- btrfs_commit_transaction(trans, rc->extent_root);
+ btrfs_commit_transaction(trans);
return 0;
}
-/*
- * Qgroup fixer for data chunk relocation.
- * The data relocation is done in the following steps
- * 1) Copy data extents into data reloc tree
- * 2) Create tree reloc tree(special snapshot) for related subvolumes
- * 3) Modify file extents in tree reloc tree
- * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
- *
- * The problem is, data and tree reloc tree are not accounted to qgroup,
- * and 4) will only info qgroup to track tree blocks change, not file extents
- * in the tree blocks.
- *
- * The good news is, related data extents are all in data reloc tree, so we
- * only need to info qgroup to track all file extents in data reloc tree
- * before commit trans.
- */
-static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
- struct reloc_control *rc)
-{
- struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct inode *inode = rc->data_inode;
- struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
- struct btrfs_key key;
- int ret = 0;
-
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
- return 0;
-
- /*
- * Only for stage where we update data pointers the qgroup fix is
- * valid.
- * For MOVING_DATA stage, we will miss the timing of swapping tree
- * blocks, and won't fix it.
- */
- if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- key.objectid = btrfs_ino(inode);
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = 0;
-
- ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
- while (1) {
- struct btrfs_file_extent_item *fi;
-
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid > btrfs_ino(inode))
- break;
- if (key.type != BTRFS_EXTENT_DATA_KEY)
- goto next;
- fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(path->nodes[0], fi) !=
- BTRFS_FILE_EXTENT_REG)
- goto next;
- ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
- btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
- btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
- GFP_NOFS);
- if (ret < 0)
- break;
-next:
- ret = btrfs_next_item(data_reloc_root, path);
- if (ret < 0)
- break;
- if (ret > 0) {
- ret = 0;
- break;
- }
- }
- unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
-out:
- btrfs_free_path(path);
- return ret;
-}
-
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
+ struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
}
restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
- btrfs_end_transaction(trans, rc->extent_root);
+ btrfs_end_transaction(trans);
continue;
}
}
}
- btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root);
+ btrfs_end_transaction_throttle(trans);
+ btrfs_btree_balance_dirty(fs_info);
trans = NULL;
if (rc->stage == MOVE_DATA_EXTENTS &&
}
}
if (trans && progress && err == -ENOSPC) {
- ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
rc->block_group->flags);
if (ret == 1) {
err = 0;
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
if (trans) {
- btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root);
+ btrfs_end_transaction_throttle(trans);
+ btrfs_btree_balance_dirty(fs_info);
}
if (!err) {
set_reloc_control(rc);
backref_cache_cleanup(&rc->backref_cache);
- btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
err = prepare_to_merge(rc, err);
rc->merge_reloc_tree = 0;
unset_reloc_control(rc);
- btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+ btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root);
err = PTR_ERR(trans);
goto out_free;
}
- ret = qgroup_fix_relocated_data_extents(trans, rc);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- if (!err)
- err = ret;
- goto out_free;
- }
- btrfs_commit_transaction(trans, rc->extent_root);
+ btrfs_commit_transaction(trans);
out_free:
- btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
+ btrfs_free_block_rsv(fs_info, rc->block_rsv);
btrfs_free_path(path);
return err;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
- memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+ memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
btrfs_set_inode_generation(leaf, item, 1);
btrfs_set_inode_size(leaf, item, 0);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+ inode = btrfs_iget(fs_info->sb, &key, root, NULL);
BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid;
err = btrfs_orphan_add(trans, inode);
out:
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
if (err) {
if (inode)
iput(inode);
return rc;
}
+/*
+ * Print the block group being relocated
+ */
+static void describe_relocation(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group_cache *block_group)
+{
+ char buf[128]; /* prefixed by a '|' that'll be dropped */
+ u64 flags = block_group->flags;
+
+ /* Shouldn't happen */
+ if (!flags) {
+ strcpy(buf, "|NONE");
+ } else {
+ char *bp = buf;
+
+#define DESCRIBE_FLAG(f, d) \
+ if (flags & BTRFS_BLOCK_GROUP_##f) { \
+ bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \
+ flags &= ~BTRFS_BLOCK_GROUP_##f; \
+ }
+ DESCRIBE_FLAG(DATA, "data");
+ DESCRIBE_FLAG(SYSTEM, "system");
+ DESCRIBE_FLAG(METADATA, "metadata");
+ DESCRIBE_FLAG(RAID0, "raid0");
+ DESCRIBE_FLAG(RAID1, "raid1");
+ DESCRIBE_FLAG(DUP, "dup");
+ DESCRIBE_FLAG(RAID10, "raid10");
+ DESCRIBE_FLAG(RAID5, "raid5");
+ DESCRIBE_FLAG(RAID6, "raid6");
+ if (flags)
+ snprintf(buf, buf - bp + sizeof(buf), "|0x%llx", flags);
+#undef DESCRIBE_FLAG
+ }
+
+ btrfs_info(fs_info,
+ "relocating block group %llu flags %s",
+ block_group->key.objectid, buf + 1);
+}
+
/*
* function to relocate all extents in a block group.
*/
-int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
{
- struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
struct reloc_control *rc;
struct inode *inode;
struct btrfs_path *path;
goto out;
}
- btrfs_info(extent_root->fs_info,
- "relocating block group %llu flags %llu",
- rc->block_group->key.objectid, rc->block_group->flags);
+ describe_relocation(fs_info, rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
if (rc->extents_found == 0)
break;
- btrfs_info(extent_root->fs_info, "found %llu extents",
- rc->extents_found);
+ btrfs_info(fs_info, "found %llu extents", rc->extents_found);
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
ret = btrfs_wait_ordered_range(rc->data_inode, 0,
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
out:
if (err && rw)
- btrfs_dec_block_group_ro(extent_root, rc->block_group);
+ btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
btrfs_put_block_group(rc->block_group);
kfree(rc);
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret, err;
- trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
sizeof(root->root_item.drop_progress));
root->root_item.drop_level = 0;
btrfs_set_root_refs(&root->root_item, 0);
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
- err = btrfs_end_transaction(trans, root->fs_info->tree_root);
+ err = btrfs_end_transaction(trans);
if (err)
return err;
return ret;
*/
int btrfs_recover_relocation(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(reloc_roots);
struct btrfs_key key;
struct btrfs_root *fs_root;
key.offset = (u64)-1;
while (1) {
- ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
+ ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
path, 0, 0);
if (ret < 0) {
err = ret;
list_add(&reloc_root->root_list, &reloc_roots);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- fs_root = read_fs_root(root->fs_info,
+ fs_root = read_fs_root(fs_info,
reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
if (list_empty(&reloc_roots))
goto out;
- rc = alloc_reloc_control(root->fs_info);
+ rc = alloc_reloc_control(fs_info);
if (!rc) {
err = -ENOMEM;
goto out;
}
- rc->extent_root = root->fs_info->extent_root;
+ rc->extent_root = fs_info->extent_root;
set_reloc_control(rc);
continue;
}
- fs_root = read_fs_root(root->fs_info,
- reloc_root->root_key.offset);
+ fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root);
goto out_free;
fs_root->reloc_root = reloc_root;
}
- err = btrfs_commit_transaction(trans, rc->extent_root);
+ err = btrfs_commit_transaction(trans);
if (err)
goto out_free;
err = PTR_ERR(trans);
goto out_free;
}
- err = qgroup_fix_relocated_data_extents(trans, rc);
- if (err < 0) {
- btrfs_abort_transaction(trans, err);
- goto out_free;
- }
- err = btrfs_commit_transaction(trans, rc->extent_root);
+ err = btrfs_commit_transaction(trans);
out_free:
kfree(rc);
out:
if (err == 0) {
/* cleanup orphan inode in data relocation tree */
- fs_root = read_fs_root(root->fs_info,
- BTRFS_DATA_RELOC_TREE_OBJECTID);
+ fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
else
*/
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered;
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
u64 disk_bytenr;
u64 new_bytenr;
BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
- ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
+ ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
disk_bytenr + len - 1, &list, 0);
if (ret)
goto out;
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *cow)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct reloc_control *rc;
struct backref_node *node;
int first_cow = 0;
int level;
int ret = 0;
- rc = root->fs_info->reloc_ctl;
+ rc = fs_info->reloc_ctl;
if (!rc)
return 0;
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
mutex_lock(&root->log_mutex);
if (root->log_root) {
- if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+ if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
goto out;
}
set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
}
} else {
- mutex_lock(&root->fs_info->tree_log_mutex);
- if (!root->fs_info->log_root_tree)
- ret = btrfs_init_log_root_tree(trans, root->fs_info);
- mutex_unlock(&root->fs_info->tree_log_mutex);
+ mutex_lock(&fs_info->tree_log_mutex);
+ if (!fs_info->log_root_tree)
+ ret = btrfs_init_log_root_tree(trans, fs_info);
+ mutex_unlock(&fs_info->tree_log_mutex);
if (ret)
goto out;
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
/*
* If this fs is mixed then we need to be able to process the leaves to
* pin down any logged extents, so we have to read the block.
*/
- if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
+ if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
ret = btrfs_read_buffer(eb, gen);
if (ret)
return ret;
}
if (wc->pin)
- ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
- eb->start, eb->len);
+ ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
+ eb->len);
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
if (wc->pin && btrfs_header_level(eb) == 0)
- ret = btrfs_exclude_logged_extents(log, eb);
+ ret = btrfs_exclude_logged_extents(fs_info, eb);
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
u32 item_size;
u64 saved_i_size = 0;
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
- btrfs_truncate_item(root, path, item_size, 1);
+ btrfs_truncate_item(fs_info, path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(root, path,
+ btrfs_extend_item(fs_info, path,
item_size - found_size);
} else if (ret) {
return ret;
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int found_type;
u64 extent_end;
u64 start = key->offset;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, slot, item);
nbytes = btrfs_file_extent_ram_bytes(eb, item);
- extent_end = ALIGN(start + size, root->sectorsize);
+ extent_end = ALIGN(start + size,
+ fs_info->sectorsize);
} else {
ret = 0;
goto out;
* as the owner of the file extent changed from log tree
* (doesn't affect qgroup) to fs/file tree(affects qgroup)
*/
- ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+ ret = btrfs_qgroup_trace_extent(trans, fs_info,
btrfs_file_extent_disk_bytenr(eb, item),
btrfs_file_extent_disk_num_bytes(eb, item),
GFP_NOFS);
* is this extent already allocated in the extent
* allocation tree? If so, just add a reference
*/
- ret = btrfs_lookup_data_extent(root, ins.objectid,
+ ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
ins.offset);
if (ret == 0) {
- ret = btrfs_inc_extent_ref(trans, root,
+ ret = btrfs_inc_extent_ref(trans, fs_info,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
- root, root->root_key.objectid,
+ fs_info,
+ root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
goto out;
struct btrfs_ordered_sum,
list);
if (!ret)
- ret = btrfs_del_csums(trans,
- root->fs_info->csum_root,
- sums->bytenr,
- sums->len);
+ ret = btrfs_del_csums(trans, fs_info,
+ sums->bytenr,
+ sums->len);
if (!ret)
ret = btrfs_csum_file_blocks(trans,
- root->fs_info->csum_root,
- sums);
+ fs_info->csum_root, sums);
list_del(&sums->list);
kfree(sums);
}
struct inode *dir,
struct btrfs_dir_item *di)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
char *name;
int name_len;
if (ret)
goto out;
else
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
out:
kfree(name);
iput(inode);
u64 ref_index, char *name, int namelen,
int *search_done)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *victim_name;
int victim_name_len;
kfree(victim_name);
if (ret)
return ret;
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
return ret;
*search_done = 1;
victim_name_len);
if (!ret)
ret = btrfs_run_delayed_items(
- trans, root);
+ trans,
+ fs_info);
}
iput(victim_parent);
kfree(victim_name);
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(root, eb, di))
+ if (verify_dir_item(fs_info, eb, di))
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
next:
/* check the next slot in the tree to see if it is a valid item */
nritems = btrfs_header_nritems(path->nodes[0]);
+ path->slots[0]++;
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret)
goto out;
- } else {
- path->slots[0]++;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
struct inode *dir,
struct btrfs_key *dir_key)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
int slot;
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(root, eb, di)) {
+ if (verify_dir_item(fs_info, eb, di)) {
ret = -EIO;
goto out;
}
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
if (!ret)
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_run_delayed_items(trans, fs_info);
kfree(name);
iput(inode);
if (ret)
struct btrfs_path *path, int *level,
struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner;
u64 bytenr;
u64 ptr_gen;
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = root->nodesize;
+ blocksize = fs_info->nodesize;
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
- next = btrfs_find_create_tree_block(root, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, root->fs_info,
- next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(root_owner !=
BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(root,
- bytenr, blocksize);
+ ret = btrfs_free_and_pin_reserved_extent(
+ fs_info, bytenr,
+ blocksize);
if (ret) {
free_extent_buffer(next);
return ret;
struct btrfs_path *path, int *level,
struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner;
int i;
int slot;
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, root->fs_info,
- next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(root,
+ ret = btrfs_free_and_pin_reserved_extent(
+ fs_info,
path->nodes[*level]->start,
path->nodes[*level]->len);
if (ret)
static int walk_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *log, struct walk_control *wc)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
int wret;
int level;
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- clean_tree_block(trans, log->fs_info, next);
+ clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(log->root_key.objectid !=
BTRFS_TREE_LOG_OBJECTID);
- ret = btrfs_free_and_pin_reserved_extent(log, next->start,
- next->len);
+ ret = btrfs_free_and_pin_reserved_extent(fs_info,
+ next->start, next->len);
if (ret)
goto out;
}
static int update_log_root(struct btrfs_trans_handle *trans,
struct btrfs_root *log)
{
+ struct btrfs_fs_info *fs_info = log->fs_info;
int ret;
if (log->log_transid == 1) {
/* insert root item on the first sync */
- ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
+ ret = btrfs_insert_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item);
} else {
- ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
+ ret = btrfs_update_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item);
}
return ret;
int index2;
int mark;
int ret;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
- struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
+ struct btrfs_root *log_root_tree = fs_info->log_root_tree;
int log_transid = 0;
struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug;
while (1) {
int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
- if (!btrfs_test_opt(root->fs_info, SSD) &&
+ if (!btrfs_test_opt(fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1);
}
/* bail out if we need to do a full commit */
- if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+ if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
* wait for them until later.
*/
blk_start_plug(&plug);
- ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
+ ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
mutex_unlock(&root->log_mutex);
goto out;
}
list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug);
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
if (ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
goto out;
}
- btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_tree_log_extents(log, mark);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
- ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
- mark);
+ ret = btrfs_wait_tree_log_extents(log, mark);
btrfs_wait_logged_extents(trans, log, log_transid);
wait_log_commit(log_root_tree,
root_log_ctx.log_transid);
* now that we've moved on to the tree of log tree roots,
* check the full commit flag again
*/
- if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+ if (btrfs_need_log_full_commit(fs_info, trans)) {
blk_finish_plug(&plug);
- btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_tree_log_extents(log, mark);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
}
- ret = btrfs_write_marked_extents(log_root_tree,
+ ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug);
if (ret) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
- ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ ret = btrfs_wait_tree_log_extents(log, mark);
if (!ret)
- ret = btrfs_wait_marked_extents(log_root_tree,
- &log_root_tree->dirty_log_pages,
- EXTENT_NEW | EXTENT_DIRTY);
+ ret = btrfs_wait_tree_log_extents(log_root_tree,
+ EXTENT_NEW | EXTENT_DIRTY);
if (ret) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
btrfs_wait_logged_extents(trans, log, log_transid);
- btrfs_set_super_log_root(root->fs_info->super_for_commit,
- log_root_tree->node->start);
- btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
- btrfs_header_level(log_root_tree->node));
+ btrfs_set_super_log_root(fs_info->super_for_commit,
+ log_root_tree->node->start);
+ btrfs_set_super_log_root_level(fs_info->super_for_commit,
+ btrfs_header_level(log_root_tree->node));
log_root_tree->log_transid++;
mutex_unlock(&log_root_tree->log_mutex);
* the running transaction open, so a full commit can't hop
* in and cause problems either.
*/
- ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
+ ret = write_ctree_super(trans, fs_info, 1);
if (ret) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
}
const char *name, int name_len,
struct inode *inode, u64 dirid)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log;
u64 index;
int ret;
dirid, &index);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
if (ret == -ENOSPC) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
ret = 0;
} else if (ret < 0 && ret != -ENOENT)
btrfs_abort_transaction(trans, ret);
int start_slot, int nr, int inode_only,
u64 logged_isize)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
unsigned long src_offset;
unsigned long dst_offset;
struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
}
ret = btrfs_lookup_csums_range(
- log->fs_info->csum_root,
+ fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
&ordered_sums, 0);
if (ret) {
src_path->slots[0],
extent);
*last_extent = ALIGN(key.offset + len,
- log->sectorsize);
+ fs_info->sectorsize);
} else {
len = btrfs_file_extent_num_bytes(src, extent);
*last_extent = key.offset + len;
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
len = btrfs_file_extent_inline_len(src, i, extent);
- extent_end = ALIGN(key.offset + len, log->sectorsize);
+ extent_end = ALIGN(key.offset + len,
+ fs_info->sectorsize);
} else {
len = btrfs_file_extent_num_bytes(src, extent);
extent_end = key.offset + len;
const struct list_head *logged_list,
bool *ordered_io_error)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
struct btrfs_root *log = root->log_root;
u64 mod_start = em->mod_start;
* i_mapping flags, so that the next fsync won't get
* an outdated io error too.
*/
- btrfs_inode_check_errors(inode);
+ filemap_check_errors(inode->i_mapping);
*ordered_io_error = true;
break;
}
}
/* block start is already adjusted for the file extent offset. */
- ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
+ ret = btrfs_lookup_csums_range(fs_info->csum_root,
em->block_start + csum_offset,
em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0);
* without writing to the log tree and the fsync must report the
* file data write error and not commit the current transaction.
*/
- ret = btrfs_inode_check_errors(inode);
+ ret = filemap_check_errors(inode->i_mapping);
if (ret)
ctx->io_err = ret;
process:
struct inode *inode,
struct btrfs_path *path)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_key key;
u64 hole_start;
const u64 ino = btrfs_ino(inode);
const u64 i_size = i_size_read(inode);
- if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ if (!btrfs_fs_incompat(fs_info, NO_HOLES))
return 0;
key.objectid = ino;
if (hole_size == 0)
return 0;
- hole_size = ALIGN(hole_size, root->sectorsize);
+ hole_size = ALIGN(hole_size, fs_info->sectorsize);
ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
hole_size, 0, hole_size, 0, 0, 0);
return ret;
const loff_t end,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_path *dst_path;
struct btrfs_key min_key;
* fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
*/
if (S_ISDIR(inode->i_mode) ||
- BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
+ BTRFS_I(inode)->generation > fs_info->last_trans_committed)
ret = btrfs_commit_inode_delayed_items(trans, inode);
else
ret = btrfs_commit_inode_delayed_inode(inode);
inode_key.objectid = other_ino;
inode_key.type = BTRFS_INODE_ITEM_KEY;
inode_key.offset = 0;
- other_inode = btrfs_iget(root->fs_info->sb,
+ other_inode = btrfs_iget(fs_info->sb,
&inode_key, root,
NULL);
/*
struct inode *start_inode,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
struct btrfs_path *path;
LIST_HEAD(dir_list);
if (di_key.type == BTRFS_ROOT_ITEM_KEY)
continue;
- di_inode = btrfs_iget(root->fs_info->sb, &di_key,
- root, NULL);
+ btrfs_release_path(path);
+ di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto next_dir_inode;
if (btrfs_inode_in_log(di_inode, trans->transid)) {
iput(di_inode);
- continue;
+ break;
}
ctx->log_new_dentries = false;
if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
log_mode = LOG_INODE_ALL;
- btrfs_release_path(path);
ret = btrfs_log_inode(trans, root, di_inode,
log_mode, 0, LLONG_MAX, ctx);
if (!ret &&
struct inode *inode,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
struct btrfs_path *path;
struct btrfs_key key;
cur_offset = item_size;
}
- dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
+ dir_inode = btrfs_iget(fs_info->sb, &inode_key,
root, NULL);
/* If parent inode was deleted, skip it. */
if (IS_ERR(dir_inode))
int exists_only,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
struct dentry *old_parent = NULL;
int ret = 0;
- u64 last_committed = root->fs_info->last_trans_committed;
+ u64 last_committed = fs_info->last_trans_committed;
bool log_dentries = false;
struct inode *orig_inode = inode;
sb = inode->i_sb;
- if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
+ if (btrfs_test_opt(fs_info, NOTREELOG)) {
ret = 1;
goto end_no_trans;
}
* The prev transaction commit doesn't complete, we need do
* full commit by ourselves.
*/
- if (root->fs_info->last_trans_log_full_commit >
- root->fs_info->last_trans_committed) {
+ if (fs_info->last_trans_log_full_commit >
+ fs_info->last_trans_committed) {
ret = 1;
goto end_no_trans;
}
end_trans:
dput(old_parent);
if (ret < 0) {
- btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_set_log_full_commit(fs_info, trans);
ret = 1;
}
btrfs_free_path(path);
/* step 4: commit the transaction, which also unpins the blocks */
- ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+ ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
return 0;
error:
if (wc.trans)
- btrfs_end_transaction(wc.trans, fs_info->tree_root);
+ btrfs_end_transaction(wc.trans);
btrfs_free_path(path);
return ret;
}
struct inode *inode, struct inode *old_dir,
struct dentry *parent)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root * root = BTRFS_I(inode)->root;
/*
* from hasn't been logged, we don't need to log it
*/
if (BTRFS_I(inode)->logged_trans <=
- root->fs_info->last_trans_committed &&
+ fs_info->last_trans_committed &&
(!old_dir || BTRFS_I(old_dir)->logged_trans <=
- root->fs_info->last_trans_committed))
+ fs_info->last_trans_committed))
return 0;
return btrfs_log_inode_parent(trans, root, inode, parent, 0,