if (value) {
acl = posix_acl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
if (acl) {
ret = posix_acl_valid(acl);
if (ret)
goto out;
- } else if (IS_ERR(acl)) {
- return PTR_ERR(acl);
}
}
return 0;
acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
- if (IS_ERR(acl) || !acl)
+ if (IS_ERR_OR_NULL(acl))
return PTR_ERR(acl);
clone = posix_acl_clone(acl, GFP_KERNEL);
kunmap_atomic(kaddr, KM_USER0);
if (csum != *cb_sum) {
- printk(KERN_INFO "btrfs csum failed ino %lu "
+ printk(KERN_INFO "btrfs csum failed ino %llu "
"extent %llu csum %u "
- "wanted %u mirror %d\n", inode->i_ino,
+ "wanted %u mirror %d\n",
+ (unsigned long long)btrfs_ino(inode),
(unsigned long long)disk_start,
csum, *cb_sum, cb->mirror_num);
ret = -EIO;
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- int page_index = 0;
+ int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
struct block_device *bdev;
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
- for (page_index = 0; page_index < cb->nr_pages; page_index++) {
- page = compressed_pages[page_index];
+ for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+ page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0,
struct compressed_bio *cb)
{
unsigned long end_index;
- unsigned long page_index;
+ unsigned long pg_index;
u64 last_offset;
u64 isize = i_size_read(inode);
int ret;
end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
while (last_offset < compressed_end) {
- page_index = last_offset >> PAGE_CACHE_SHIFT;
+ pg_index = last_offset >> PAGE_CACHE_SHIFT;
- if (page_index > end_index)
+ if (pg_index > end_index)
break;
rcu_read_lock();
- page = radix_tree_lookup(&mapping->page_tree, page_index);
+ page = radix_tree_lookup(&mapping->page_tree, pg_index);
rcu_read_unlock();
if (page) {
misses++;
if (!page)
break;
- if (add_to_page_cache_lru(page, mapping, page_index,
+ if (add_to_page_cache_lru(page, mapping, pg_index,
GFP_NOFS)) {
page_cache_release(page);
goto next;
unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
- unsigned long page_index;
+ unsigned long pg_index;
struct page *page;
struct block_device *bdev;
struct bio *comp_bio;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- for (page_index = 0; page_index < nr_pages; page_index++) {
- cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
+ for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+ cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
- if (!cb->compressed_pages[page_index])
+ if (!cb->compressed_pages[pg_index])
goto fail2;
}
cb->nr_pages = nr_pages;
comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios);
- for (page_index = 0; page_index < nr_pages; page_index++) {
- page = cb->compressed_pages[page_index];
+ for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+ page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_CACHE_SHIFT;
return 0;
fail2:
- for (page_index = 0; page_index < nr_pages; page_index++)
- free_page((unsigned long)cb->compressed_pages[page_index]);
+ for (pg_index = 0; pg_index < nr_pages; pg_index++)
+ free_page((unsigned long)cb->compressed_pages[pg_index]);
kfree(cb->compressed_pages);
fail1:
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
- unsigned long *page_index,
+ unsigned long *pg_index,
unsigned long *pg_offset)
{
unsigned long buf_offset;
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
- struct page *page_out = bvec[*page_index].bv_page;
+ struct page *page_out = bvec[*pg_index].bv_page;
/*
* start byte is the first byte of the page we're currently
/* check if we need to pick another page */
if (*pg_offset == PAGE_CACHE_SIZE) {
- (*page_index)++;
- if (*page_index >= vcnt)
+ (*pg_index)++;
+ if (*pg_index >= vcnt)
return 0;
- page_out = bvec[*page_index].bv_page;
+ page_out = bvec[*pg_index].bv_page;
*pg_offset = 0;
start_byte = page_offset(page_out) - disk_start;
struct extent_buffer *src_buf);
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
-static int setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- u32 total_data, u32 total_size, int nr);
-
struct btrfs_path *btrfs_alloc_path(void)
{
* retake all the spinlocks in the path. You can safely use NULL
* for held
*/
-static noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held)
+noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
+ struct extent_buffer *held)
{
int i;
{
if (!p)
return;
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
kmem_cache_free(btrfs_path_cachep, p);
}
*
* It is safe to call this on paths that no locks or extent buffers held.
*/
- noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
+ noinline void btrfs_release_path(struct btrfs_path *p)
{
int i;
ret = -EAGAIN;
/* release the whole path */
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* read the blocks */
if (block1)
return 0;
}
free_extent_buffer(tmp);
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
return -EIO;
}
}
if (p->reada)
reada_for_search(root, p, level, slot, key->objectid);
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
ret = -EAGAIN;
tmp = read_tree_block(root, blocknr, blocksize, 0);
}
b = p->nodes[level];
if (!b) {
- btrfs_release_path(NULL, p);
+ btrfs_release_path(p);
goto again;
}
BUG_ON(btrfs_header_nritems(b) == 1);
if (!p->leave_spinning)
btrfs_set_path_blocking(p);
if (ret < 0)
- btrfs_release_path(root, p);
+ btrfs_release_path(p);
return ret;
}
struct btrfs_file_extent_item);
extent_len = btrfs_file_extent_num_bytes(leaf, fi);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
path->keep_locks = 1;
path->search_for_split = 1;
* to save stack depth by doing the bulk of the work in a function
* that doesn't call btrfs_search_slot
*/
-static noinline_for_stack int
-setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- u32 total_data, u32 total_size, int nr)
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr)
{
struct btrfs_item *item;
int i;
ret = 0;
if (slot == 0) {
- struct btrfs_disk_key disk_key;
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
ret = fixup_low_keys(trans, root, path, &disk_key, 1);
}
else
return 1;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
sret = btrfs_find_next_key(root, path, min_key, level,
cache_only, min_trans);
if (sret == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
} else {
goto out;
btrfs_node_key_to_cpu(c, &cur_key, slot);
orig_lowest = path->lowest_level;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &cur_key, path,
0, 0);
again:
level = 1;
next = NULL;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
path->keep_locks = 1;
goto again;
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto done;
}
goto again;
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto done;
}
/* For storing free space cache */
#define BTRFS_FREE_SPACE_OBJECTID -11ULL
+/*
+ * The inode number assigned to the special inode for sotring
+ * free ino cache
+ */
+#define BTRFS_FREE_INO_OBJECTID -12ULL
+
/* dummy objectid represents multiple objectids */
#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
*/
unsigned long reservation_progress;
- int full:1; /* indicates that we cannot allocate any more
+ unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
- int chunk_alloc:1; /* set if we are allocating a chunk */
+ unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
- int force_alloc; /* set if we need to force a chunk alloc for
- this space */
+ unsigned int force_alloc; /* set if we need to force a chunk
+ alloc for this space */
struct list_head list;
u64 bytes_super;
u64 flags;
u64 sectorsize;
- int extents_thresh;
- int free_extents;
- int total_bitmaps;
unsigned int ro:1;
unsigned int dirty:1;
unsigned int iref:1;
struct btrfs_space_info *space_info;
/* free space cache stuff */
- spinlock_t tree_lock;
- struct rb_root free_space_offset;
- u64 free_space;
+ struct btrfs_free_space_ctl *free_space_ctl;
/* block group cache stuff */
struct rb_node cache_node;
struct reloc_control;
struct btrfs_device;
struct btrfs_fs_devices;
+struct btrfs_delayed_root;
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
/* logical->physical extent mapping */
struct btrfs_mapping_tree mapping_tree;
- /* block reservation for extent, checksum and root tree */
+ /*
+ * block reservation for extent, checksum, root tree and
+ * delayed dir index item
+ */
struct btrfs_block_rsv global_block_rsv;
/* block reservation for delay allocation */
struct btrfs_block_rsv delalloc_block_rsv;
* for the sys_munmap function call path
*/
struct btrfs_workers fixup_workers;
+ struct btrfs_workers delayed_workers;
struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread;
int thread_pool_size;
/* filesystem state */
u64 fs_state;
+
+ struct btrfs_delayed_root *delayed_root;
};
/*
spinlock_t accounting_lock;
struct btrfs_block_rsv *block_rsv;
+ /* free ino cache stuff */
+ struct mutex fs_commit_mutex;
+ struct btrfs_free_space_ctl *free_ino_ctl;
+ enum btrfs_caching_type cached;
+ spinlock_t cache_lock;
+ wait_queue_head_t cache_wait;
+ struct btrfs_free_space_ctl *free_ino_pinned;
+ u64 cache_progress;
+ struct inode *cache_inode;
+
struct mutex log_mutex;
wait_queue_head_t log_writer_wait;
wait_queue_head_t log_commit_wait[2];
/* red-black tree that keeps track of in-memory inodes */
struct rb_root inode_tree;
+ /*
+ * radix tree that keeps track of delayed nodes of every inode,
+ * protected by inode_lock
+ */
+ struct radix_tree_root delayed_nodes_tree;
/*
* right now this just gets used so that a root has its own devid
* for stat. It may be used for more later
return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
}
- static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
- struct btrfs_chunk *c, int nr,
- u64 val)
- {
- btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
- }
-
static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
struct btrfs_chunk *c, int nr)
{
return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
}
- static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
- struct btrfs_chunk *c, int nr,
- u64 val)
- {
- btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
- }
-
/* struct btrfs_block_group_item */
BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
used, 64);
return (struct btrfs_timespec *)ptr;
}
- static inline struct btrfs_timespec *
- btrfs_inode_otime(struct btrfs_inode_item *inode_item)
- {
- unsigned long ptr = (unsigned long)inode_item;
- ptr += offsetof(struct btrfs_inode_item, otime);
- return (struct btrfs_timespec *)ptr;
- }
-
BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
return (u8 *)ptr;
}
- static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
- {
- unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
- return (u8 *)ptr;
- }
-
- static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
- {
- unsigned long ptr = offsetof(struct btrfs_header, csum);
- return (u8 *)ptr;
- }
-
- static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
- {
- return NULL;
- }
-
- static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
- {
- return NULL;
- }
-
- static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
- {
- return NULL;
- }
-
static inline int btrfs_is_leaf(struct extent_buffer *eb)
{
return btrfs_header_level(eb) == 0;
return sb->s_fs_info;
}
- static inline int btrfs_set_root_name(struct btrfs_root *root,
- const char *name, int len)
- {
- /* if we already have a name just free it */
- kfree(root->name);
-
- root->name = kmalloc(len+1, GFP_KERNEL);
- if (!root->name)
- return -ENOMEM;
-
- memcpy(root->name, name, len);
- root->name[len] = '\0';
-
- return 0;
- }
-
static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
{
if (level == 0)
}
/* extent-tree.c */
+static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
+ int num_items)
+{
+ return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
+ 3 * num_items;
+}
+
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
u64 num_bytes, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num, int reserved);
- int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr);
- int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr);
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, int cache_only, u64 *last_ret,
struct btrfs_key *progress);
- void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
+ void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_clear_path_blocking(struct btrfs_path *p,
+ struct extent_buffer *held);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return btrfs_del_items(trans, root, path, path->slots[0], 1);
}
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr);
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, void *data, u32 data_size);
- int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- int nr);
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
*item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
- int btrfs_search_root(struct btrfs_root *root, u64 search_start,
- u64 *found_objectid);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
- int name_len, u64 dir,
+ int name_len, struct inode *dir,
struct btrfs_key *location, u8 type, u64 index);
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_root *root, u64 offset);
int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
-/* inode-map.c */
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
- struct btrfs_root *fs_root,
- u64 dirid, u64 *objectid);
-int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
-
/* inode-item.c */
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
- int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
- u64 start, unsigned long len);
struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, int cow);
- int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- u64 isize);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
u64 end, struct list_head *list);
/* inode.c */
u32 min_type);
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
- int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
- int sync);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
int btrfs_writepages(struct address_space *mapping,
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
- void btrfs_put_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
void btrfs_dirty_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
long btrfs_ioctl_trans_end(struct file *file);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
- int btrfs_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to);
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
- size_t page_offset, u64 start, u64 end,
+ size_t pg_offset, u64 start, u64 end,
int create);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int btrfs_sync_file(struct file *file, int datasync);
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned);
- int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
extern const struct file_operations btrfs_file_operations;
int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
u64 start, u64 end, u64 *hint_byte, int drop_cache);
/* sysfs.c */
int btrfs_init_sysfs(void);
void btrfs_exit_sysfs(void);
- int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
- int btrfs_sysfs_add_root(struct btrfs_root *root);
- void btrfs_sysfs_del_root(struct btrfs_root *root);
- void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
/* xattr.c */
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
--- /dev/null
- btrfs_release_path(root, path);
+/*
+ * Copyright (C) 2011 Fujitsu. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include "delayed-inode.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+#define BTRFS_DELAYED_WRITEBACK 400
+#define BTRFS_DELAYED_BACKGROUND 100
+
+static struct kmem_cache *delayed_node_cache;
+
+int __init btrfs_delayed_inode_init(void)
+{
+ delayed_node_cache = kmem_cache_create("delayed_node",
+ sizeof(struct btrfs_delayed_node),
+ 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!delayed_node_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void btrfs_delayed_inode_exit(void)
+{
+ if (delayed_node_cache)
+ kmem_cache_destroy(delayed_node_cache);
+}
+
+static inline void btrfs_init_delayed_node(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_root *root, u64 inode_id)
+{
+ delayed_node->root = root;
+ delayed_node->inode_id = inode_id;
+ atomic_set(&delayed_node->refs, 0);
+ delayed_node->count = 0;
+ delayed_node->in_list = 0;
+ delayed_node->inode_dirty = 0;
+ delayed_node->ins_root = RB_ROOT;
+ delayed_node->del_root = RB_ROOT;
+ mutex_init(&delayed_node->mutex);
+ delayed_node->index_cnt = 0;
+ INIT_LIST_HEAD(&delayed_node->n_list);
+ INIT_LIST_HEAD(&delayed_node->p_list);
+ delayed_node->bytes_reserved = 0;
+}
+
+static inline int btrfs_is_continuous_delayed_item(
+ struct btrfs_delayed_item *item1,
+ struct btrfs_delayed_item *item2)
+{
+ if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
+ item1->key.objectid == item2->key.objectid &&
+ item1->key.type == item2->key.type &&
+ item1->key.offset + 1 == item2->key.offset)
+ return 1;
+ return 0;
+}
+
+static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
+ struct btrfs_root *root)
+{
+ return root->fs_info->delayed_root;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+ struct inode *inode)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ struct btrfs_root *root = btrfs_inode->root;
+ u64 ino = btrfs_ino(inode);
+ int ret;
+
+again:
+ node = ACCESS_ONCE(btrfs_inode->delayed_node);
+ if (node) {
+ atomic_inc(&node->refs); /* can be accessed */
+ return node;
+ }
+
+ spin_lock(&root->inode_lock);
+ node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+ if (node) {
+ if (btrfs_inode->delayed_node) {
+ spin_unlock(&root->inode_lock);
+ goto again;
+ }
+ btrfs_inode->delayed_node = node;
+ atomic_inc(&node->refs); /* can be accessed */
+ atomic_inc(&node->refs); /* cached in the inode */
+ spin_unlock(&root->inode_lock);
+ return node;
+ }
+ spin_unlock(&root->inode_lock);
+
+ node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+ btrfs_init_delayed_node(node, root, ino);
+
+ atomic_inc(&node->refs); /* cached in the btrfs inode */
+ atomic_inc(&node->refs); /* can be accessed */
+
+ ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
+ if (ret) {
+ kmem_cache_free(delayed_node_cache, node);
+ return ERR_PTR(ret);
+ }
+
+ spin_lock(&root->inode_lock);
+ ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
+ if (ret == -EEXIST) {
+ kmem_cache_free(delayed_node_cache, node);
+ spin_unlock(&root->inode_lock);
+ radix_tree_preload_end();
+ goto again;
+ }
+ btrfs_inode->delayed_node = node;
+ spin_unlock(&root->inode_lock);
+ radix_tree_preload_end();
+
+ return node;
+}
+
+/*
+ * Call it when holding delayed_node->mutex
+ *
+ * If mod = 1, add this node into the prepared list.
+ */
+static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
+ struct btrfs_delayed_node *node,
+ int mod)
+{
+ spin_lock(&root->lock);
+ if (node->in_list) {
+ if (!list_empty(&node->p_list))
+ list_move_tail(&node->p_list, &root->prepare_list);
+ else if (mod)
+ list_add_tail(&node->p_list, &root->prepare_list);
+ } else {
+ list_add_tail(&node->n_list, &root->node_list);
+ list_add_tail(&node->p_list, &root->prepare_list);
+ atomic_inc(&node->refs); /* inserted into list */
+ root->nodes++;
+ node->in_list = 1;
+ }
+ spin_unlock(&root->lock);
+}
+
+/* Call it when holding delayed_node->mutex */
+static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
+ struct btrfs_delayed_node *node)
+{
+ spin_lock(&root->lock);
+ if (node->in_list) {
+ root->nodes--;
+ atomic_dec(&node->refs); /* not in the list */
+ list_del_init(&node->n_list);
+ if (!list_empty(&node->p_list))
+ list_del_init(&node->p_list);
+ node->in_list = 0;
+ }
+ spin_unlock(&root->lock);
+}
+
+struct btrfs_delayed_node *btrfs_first_delayed_node(
+ struct btrfs_delayed_root *delayed_root)
+{
+ struct list_head *p;
+ struct btrfs_delayed_node *node = NULL;
+
+ spin_lock(&delayed_root->lock);
+ if (list_empty(&delayed_root->node_list))
+ goto out;
+
+ p = delayed_root->node_list.next;
+ node = list_entry(p, struct btrfs_delayed_node, n_list);
+ atomic_inc(&node->refs);
+out:
+ spin_unlock(&delayed_root->lock);
+
+ return node;
+}
+
+struct btrfs_delayed_node *btrfs_next_delayed_node(
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_delayed_root *delayed_root;
+ struct list_head *p;
+ struct btrfs_delayed_node *next = NULL;
+
+ delayed_root = node->root->fs_info->delayed_root;
+ spin_lock(&delayed_root->lock);
+ if (!node->in_list) { /* not in the list */
+ if (list_empty(&delayed_root->node_list))
+ goto out;
+ p = delayed_root->node_list.next;
+ } else if (list_is_last(&node->n_list, &delayed_root->node_list))
+ goto out;
+ else
+ p = node->n_list.next;
+
+ next = list_entry(p, struct btrfs_delayed_node, n_list);
+ atomic_inc(&next->refs);
+out:
+ spin_unlock(&delayed_root->lock);
+
+ return next;
+}
+
+static void __btrfs_release_delayed_node(
+ struct btrfs_delayed_node *delayed_node,
+ int mod)
+{
+ struct btrfs_delayed_root *delayed_root;
+
+ if (!delayed_node)
+ return;
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->count)
+ btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
+ else
+ btrfs_dequeue_delayed_node(delayed_root, delayed_node);
+ mutex_unlock(&delayed_node->mutex);
+
+ if (atomic_dec_and_test(&delayed_node->refs)) {
+ struct btrfs_root *root = delayed_node->root;
+ spin_lock(&root->inode_lock);
+ if (atomic_read(&delayed_node->refs) == 0) {
+ radix_tree_delete(&root->delayed_nodes_tree,
+ delayed_node->inode_id);
+ kmem_cache_free(delayed_node_cache, delayed_node);
+ }
+ spin_unlock(&root->inode_lock);
+ }
+}
+
+static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
+{
+ __btrfs_release_delayed_node(node, 0);
+}
+
+struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
+ struct btrfs_delayed_root *delayed_root)
+{
+ struct list_head *p;
+ struct btrfs_delayed_node *node = NULL;
+
+ spin_lock(&delayed_root->lock);
+ if (list_empty(&delayed_root->prepare_list))
+ goto out;
+
+ p = delayed_root->prepare_list.next;
+ list_del_init(p);
+ node = list_entry(p, struct btrfs_delayed_node, p_list);
+ atomic_inc(&node->refs);
+out:
+ spin_unlock(&delayed_root->lock);
+
+ return node;
+}
+
+static inline void btrfs_release_prepared_delayed_node(
+ struct btrfs_delayed_node *node)
+{
+ __btrfs_release_delayed_node(node, 1);
+}
+
+struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+{
+ struct btrfs_delayed_item *item;
+ item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+ if (item) {
+ item->data_len = data_len;
+ item->ins_or_del = 0;
+ item->bytes_reserved = 0;
+ item->block_rsv = NULL;
+ item->delayed_node = NULL;
+ atomic_set(&item->refs, 1);
+ }
+ return item;
+}
+
+/*
+ * __btrfs_lookup_delayed_item - look up the delayed item by key
+ * @delayed_node: pointer to the delayed node
+ * @key: the key to look up
+ * @prev: used to store the prev item if the right item isn't found
+ * @next: used to store the next item if the right item isn't found
+ *
+ * Note: if we don't find the right item, we will return the prev item and
+ * the next item.
+ */
+static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
+ struct rb_root *root,
+ struct btrfs_key *key,
+ struct btrfs_delayed_item **prev,
+ struct btrfs_delayed_item **next)
+{
+ struct rb_node *node, *prev_node = NULL;
+ struct btrfs_delayed_item *delayed_item = NULL;
+ int ret = 0;
+
+ node = root->rb_node;
+
+ while (node) {
+ delayed_item = rb_entry(node, struct btrfs_delayed_item,
+ rb_node);
+ prev_node = node;
+ ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
+ if (ret < 0)
+ node = node->rb_right;
+ else if (ret > 0)
+ node = node->rb_left;
+ else
+ return delayed_item;
+ }
+
+ if (prev) {
+ if (!prev_node)
+ *prev = NULL;
+ else if (ret < 0)
+ *prev = delayed_item;
+ else if ((node = rb_prev(prev_node)) != NULL) {
+ *prev = rb_entry(node, struct btrfs_delayed_item,
+ rb_node);
+ } else
+ *prev = NULL;
+ }
+
+ if (next) {
+ if (!prev_node)
+ *next = NULL;
+ else if (ret > 0)
+ *next = delayed_item;
+ else if ((node = rb_next(prev_node)) != NULL) {
+ *next = rb_entry(node, struct btrfs_delayed_item,
+ rb_node);
+ } else
+ *next = NULL;
+ }
+ return NULL;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ NULL, NULL);
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+ NULL, NULL);
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item, *next;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ NULL, &next);
+ if (!item)
+ item = next;
+
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
+ struct btrfs_delayed_node *delayed_node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item, *next;
+
+ item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+ NULL, &next);
+ if (!item)
+ item = next;
+
+ return item;
+}
+
+static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
+ struct btrfs_delayed_item *ins,
+ int action)
+{
+ struct rb_node **p, *node;
+ struct rb_node *parent_node = NULL;
+ struct rb_root *root;
+ struct btrfs_delayed_item *item;
+ int cmp;
+
+ if (action == BTRFS_DELAYED_INSERTION_ITEM)
+ root = &delayed_node->ins_root;
+ else if (action == BTRFS_DELAYED_DELETION_ITEM)
+ root = &delayed_node->del_root;
+ else
+ BUG();
+ p = &root->rb_node;
+ node = &ins->rb_node;
+
+ while (*p) {
+ parent_node = *p;
+ item = rb_entry(parent_node, struct btrfs_delayed_item,
+ rb_node);
+
+ cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
+ if (cmp < 0)
+ p = &(*p)->rb_right;
+ else if (cmp > 0)
+ p = &(*p)->rb_left;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(node, parent_node, p);
+ rb_insert_color(node, root);
+ ins->delayed_node = delayed_node;
+ ins->ins_or_del = action;
+
+ if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
+ action == BTRFS_DELAYED_INSERTION_ITEM &&
+ ins->key.offset >= delayed_node->index_cnt)
+ delayed_node->index_cnt = ins->key.offset + 1;
+
+ delayed_node->count++;
+ atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
+ return 0;
+}
+
+static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
+ struct btrfs_delayed_item *item)
+{
+ return __btrfs_add_delayed_item(node, item,
+ BTRFS_DELAYED_INSERTION_ITEM);
+}
+
+static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
+ struct btrfs_delayed_item *item)
+{
+ return __btrfs_add_delayed_item(node, item,
+ BTRFS_DELAYED_DELETION_ITEM);
+}
+
+static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+{
+ struct rb_root *root;
+ struct btrfs_delayed_root *delayed_root;
+
+ delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+
+ BUG_ON(!delayed_root);
+ BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
+ delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
+
+ if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
+ root = &delayed_item->delayed_node->ins_root;
+ else
+ root = &delayed_item->delayed_node->del_root;
+
+ rb_erase(&delayed_item->rb_node, root);
+ delayed_item->delayed_node->count--;
+ atomic_dec(&delayed_root->items);
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+ waitqueue_active(&delayed_root->wait))
+ wake_up(&delayed_root->wait);
+}
+
+static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
+{
+ if (item) {
+ __btrfs_remove_delayed_item(item);
+ if (atomic_dec_and_test(&item->refs))
+ kfree(item);
+ }
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
+ struct btrfs_delayed_node *delayed_node)
+{
+ struct rb_node *p;
+ struct btrfs_delayed_item *item = NULL;
+
+ p = rb_first(&delayed_node->ins_root);
+ if (p)
+ item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
+ struct btrfs_delayed_node *delayed_node)
+{
+ struct rb_node *p;
+ struct btrfs_delayed_item *item = NULL;
+
+ p = rb_first(&delayed_node->del_root);
+ if (p)
+ item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+ return item;
+}
+
+struct btrfs_delayed_item *__btrfs_next_delayed_item(
+ struct btrfs_delayed_item *item)
+{
+ struct rb_node *p;
+ struct btrfs_delayed_item *next = NULL;
+
+ p = rb_next(&item->rb_node);
+ if (p)
+ next = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+ return next;
+}
+
+static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
+ struct inode *inode)
+{
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ struct btrfs_delayed_node *delayed_node;
+
+ delayed_node = btrfs_inode->delayed_node;
+ if (delayed_node)
+ atomic_inc(&delayed_node->refs);
+
+ return delayed_node;
+}
+
+static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
+ u64 root_id)
+{
+ struct btrfs_key root_key;
+
+ if (root->objectid == root_id)
+ return root;
+
+ root_key.objectid = root_id;
+ root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root_key.offset = (u64)-1;
+ return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
+}
+
+static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_delayed_item *item)
+{
+ struct btrfs_block_rsv *src_rsv;
+ struct btrfs_block_rsv *dst_rsv;
+ u64 num_bytes;
+ int ret;
+
+ if (!trans->bytes_reserved)
+ return 0;
+
+ src_rsv = trans->block_rsv;
+ dst_rsv = &root->fs_info->global_block_rsv;
+
+ num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ if (!ret) {
+ item->bytes_reserved = num_bytes;
+ item->block_rsv = dst_rsv;
+ }
+
+ return ret;
+}
+
+static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+ struct btrfs_delayed_item *item)
+{
+ if (!item->bytes_reserved)
+ return;
+
+ btrfs_block_rsv_release(root, item->block_rsv,
+ item->bytes_reserved);
+}
+
+static int btrfs_delayed_inode_reserve_metadata(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_block_rsv *src_rsv;
+ struct btrfs_block_rsv *dst_rsv;
+ u64 num_bytes;
+ int ret;
+
+ if (!trans->bytes_reserved)
+ return 0;
+
+ src_rsv = trans->block_rsv;
+ dst_rsv = &root->fs_info->global_block_rsv;
+
+ num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ if (!ret)
+ node->bytes_reserved = num_bytes;
+
+ return ret;
+}
+
+static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_block_rsv *rsv;
+
+ if (!node->bytes_reserved)
+ return;
+
+ rsv = &root->fs_info->global_block_rsv;
+ btrfs_block_rsv_release(root, rsv,
+ node->bytes_reserved);
+ node->bytes_reserved = 0;
+}
+
+/*
+ * This helper will insert some continuous items into the same leaf according
+ * to the free space of the leaf.
+ */
+static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *item)
+{
+ struct btrfs_delayed_item *curr, *next;
+ int free_space;
+ int total_data_size = 0, total_size = 0;
+ struct extent_buffer *leaf;
+ char *data_ptr;
+ struct btrfs_key *keys;
+ u32 *data_size;
+ struct list_head head;
+ int slot;
+ int nitems;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!path->nodes[0]);
+
+ leaf = path->nodes[0];
+ free_space = btrfs_leaf_free_space(root, leaf);
+ INIT_LIST_HEAD(&head);
+
+ next = item;
+
+ /*
+ * count the number of the continuous items that we can insert in batch
+ */
+ while (total_size + next->data_len + sizeof(struct btrfs_item) <=
+ free_space) {
+ total_data_size += next->data_len;
+ total_size += next->data_len + sizeof(struct btrfs_item);
+ list_add_tail(&next->tree_list, &head);
+ nitems++;
+
+ curr = next;
+ next = __btrfs_next_delayed_item(curr);
+ if (!next)
+ break;
+
+ if (!btrfs_is_continuous_delayed_item(curr, next))
+ break;
+ }
+
+ if (!nitems) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * we need allocate some memory space, but it might cause the task
+ * to sleep, so we set all locked nodes in the path to blocking locks
+ * first.
+ */
+ btrfs_set_path_blocking(path);
+
+ keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
+ if (!keys) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
+ if (!data_size) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* get keys of all the delayed items */
+ i = 0;
+ list_for_each_entry(next, &head, tree_list) {
+ keys[i] = next->key;
+ data_size[i] = next->data_len;
+ i++;
+ }
+
+ /* reset all the locked nodes in the patch to spinning locks. */
+ btrfs_clear_path_blocking(path, NULL);
+
+ /* insert the keys of the items */
+ ret = setup_items_for_insert(trans, root, path, keys, data_size,
+ total_data_size, total_size, nitems);
+ if (ret)
+ goto error;
+
+ /* insert the dir index items */
+ slot = path->slots[0];
+ list_for_each_entry_safe(curr, next, &head, tree_list) {
+ data_ptr = btrfs_item_ptr(leaf, slot, char);
+ write_extent_buffer(leaf, &curr->data,
+ (unsigned long)data_ptr,
+ curr->data_len);
+ slot++;
+
+ btrfs_delayed_item_release_metadata(root, curr);
+
+ list_del(&curr->tree_list);
+ btrfs_release_delayed_item(curr);
+ }
+
+error:
+ kfree(data_size);
+ kfree(keys);
+out:
+ return ret;
+}
+
+/*
+ * This helper can just do simple insertion that needn't extend item for new
+ * data, such as directory name index insertion, inode insertion.
+ */
+static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *delayed_item)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_item *item;
+ char *ptr;
+ int ret;
+
+ ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
+ delayed_item->data_len);
+ if (ret < 0 && ret != -EEXIST)
+ return ret;
+
+ leaf = path->nodes[0];
+
+ item = btrfs_item_nr(leaf, path->slots[0]);
+ ptr = btrfs_item_ptr(leaf, path->slots[0], char);
+
+ write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
+ delayed_item->data_len);
+ btrfs_mark_buffer_dirty(leaf);
+
+ btrfs_delayed_item_release_metadata(root, delayed_item);
+ return 0;
+}
+
+/*
+ * we insert an item first, then if there are some continuous items, we try
+ * to insert those items into the same leaf.
+ */
+static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_delayed_item *curr, *prev;
+ int ret = 0;
+
+do_again:
+ mutex_lock(&node->mutex);
+ curr = __btrfs_first_delayed_insertion_item(node);
+ if (!curr)
+ goto insert_end;
+
+ ret = btrfs_insert_delayed_item(trans, root, path, curr);
+ if (ret < 0) {
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ goto insert_end;
+ }
+
+ prev = curr;
+ curr = __btrfs_next_delayed_item(prev);
+ if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
+ /* insert the continuous items into the same leaf */
+ path->slots[0]++;
+ btrfs_batch_insert_items(trans, root, path, curr);
+ }
+ btrfs_release_delayed_item(prev);
+ btrfs_mark_buffer_dirty(path->nodes[0]);
+
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ goto do_again;
+
+insert_end:
+ mutex_unlock(&node->mutex);
+ return ret;
+}
+
+static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *item)
+{
+ struct btrfs_delayed_item *curr, *next;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ struct list_head head;
+ int nitems, i, last_item;
+ int ret = 0;
+
+ BUG_ON(!path->nodes[0]);
+
+ leaf = path->nodes[0];
+
+ i = path->slots[0];
+ last_item = btrfs_header_nritems(leaf) - 1;
+ if (i > last_item)
+ return -ENOENT; /* FIXME: Is errno suitable? */
+
+ next = item;
+ INIT_LIST_HEAD(&head);
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ nitems = 0;
+ /*
+ * count the number of the dir index items that we can delete in batch
+ */
+ while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
+ list_add_tail(&next->tree_list, &head);
+ nitems++;
+
+ curr = next;
+ next = __btrfs_next_delayed_item(curr);
+ if (!next)
+ break;
+
+ if (!btrfs_is_continuous_delayed_item(curr, next))
+ break;
+
+ i++;
+ if (i > last_item)
+ break;
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ }
+
+ if (!nitems)
+ return 0;
+
+ ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
+ if (ret)
+ goto out;
+
+ list_for_each_entry_safe(curr, next, &head, tree_list) {
+ btrfs_delayed_item_release_metadata(root, curr);
+ list_del(&curr->tree_list);
+ btrfs_release_delayed_item(curr);
+ }
+
+out:
+ return ret;
+}
+
+static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_root *root,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_delayed_item *curr, *prev;
+ int ret = 0;
+
+do_again:
+ mutex_lock(&node->mutex);
+ curr = __btrfs_first_delayed_deletion_item(node);
+ if (!curr)
+ goto delete_fail;
+
+ ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
+ if (ret < 0)
+ goto delete_fail;
+ else if (ret > 0) {
+ /*
+ * can't find the item which the node points to, so this node
+ * is invalid, just drop it.
+ */
+ prev = curr;
+ curr = __btrfs_next_delayed_item(prev);
+ btrfs_release_delayed_item(prev);
+ ret = 0;
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ if (curr)
+ goto do_again;
+ else
+ goto delete_fail;
+ }
+
+ btrfs_batch_delete_items(trans, root, path, curr);
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ goto do_again;
+
+delete_fail:
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ return ret;
+}
+
+static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+{
+ struct btrfs_delayed_root *delayed_root;
+
+ if (delayed_node && delayed_node->inode_dirty) {
+ BUG_ON(!delayed_node->root);
+ delayed_node->inode_dirty = 0;
+ delayed_node->count--;
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+ atomic_dec(&delayed_root->items);
+ if (atomic_read(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND &&
+ waitqueue_active(&delayed_root->wait))
+ wake_up(&delayed_root->wait);
+ }
+}
+
+static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_key key;
+ struct btrfs_inode_item *inode_item;
+ struct extent_buffer *leaf;
+ int ret;
+
+ mutex_lock(&node->mutex);
+ if (!node->inode_dirty) {
+ mutex_unlock(&node->mutex);
+ return 0;
+ }
+
+ key.objectid = node->inode_id;
+ btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.offset = 0;
+ ret = btrfs_lookup_inode(trans, root, path, &key, 1);
+ if (ret > 0) {
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ mutex_unlock(&node->mutex);
+ return -ENOENT;
+ } else if (ret < 0) {
+ mutex_unlock(&node->mutex);
+ return ret;
+ }
+
+ btrfs_unlock_up_safe(path, 1);
+ leaf = path->nodes[0];
+ inode_item = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_inode_item);
+ write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+ sizeof(struct btrfs_inode_item));
+ btrfs_mark_buffer_dirty(leaf);
++ btrfs_release_path(path);
+
+ btrfs_delayed_inode_release_metadata(root, node);
+ btrfs_release_delayed_inode(node);
+ mutex_unlock(&node->mutex);
+
+ return 0;
+}
+
+/* Called when committing the transaction. */
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_delayed_root *delayed_root;
+ struct btrfs_delayed_node *curr_node, *prev_node;
+ struct btrfs_path *path;
+ int ret = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+ delayed_root = btrfs_get_delayed_root(root);
+
+ curr_node = btrfs_first_delayed_node(delayed_root);
+ while (curr_node) {
+ root = curr_node->root;
+ ret = btrfs_insert_delayed_items(trans, path, root,
+ curr_node);
+ if (!ret)
+ ret = btrfs_delete_delayed_items(trans, path, root,
+ curr_node);
+ if (!ret)
+ ret = btrfs_update_delayed_inode(trans, root, path,
+ curr_node);
+ if (ret) {
+ btrfs_release_delayed_node(curr_node);
+ break;
+ }
+
+ prev_node = curr_node;
+ curr_node = btrfs_next_delayed_node(curr_node);
+ btrfs_release_delayed_node(prev_node);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_node *node)
+{
+ struct btrfs_path *path;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+ ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+ if (!ret)
+ ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+ if (!ret)
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ int ret;
+
+ if (!delayed_node)
+ return 0;
+
+ mutex_lock(&delayed_node->mutex);
+ if (!delayed_node->count) {
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return 0;
+ }
+ mutex_unlock(&delayed_node->mutex);
+
+ ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+ btrfs_release_delayed_node(delayed_node);
+ return ret;
+}
+
+void btrfs_remove_delayed_node(struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node;
+
+ delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
+ if (!delayed_node)
+ return;
+
+ BTRFS_I(inode)->delayed_node = NULL;
+ btrfs_release_delayed_node(delayed_node);
+}
+
+struct btrfs_async_delayed_node {
+ struct btrfs_root *root;
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_work work;
+};
+
+static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
+{
+ struct btrfs_async_delayed_node *async_node;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_path *path;
+ struct btrfs_delayed_node *delayed_node = NULL;
+ struct btrfs_root *root;
+ unsigned long nr = 0;
+ int need_requeue = 0;
+ int ret;
+
+ async_node = container_of(work, struct btrfs_async_delayed_node, work);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ goto out;
+ path->leave_spinning = 1;
+
+ delayed_node = async_node->delayed_node;
+ root = delayed_node->root;
+
+ trans = btrfs_join_transaction(root, 0);
+ if (IS_ERR(trans))
+ goto free_path;
+
+ ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
+ if (!ret)
+ ret = btrfs_delete_delayed_items(trans, path, root,
+ delayed_node);
+
+ if (!ret)
+ btrfs_update_delayed_inode(trans, root, path, delayed_node);
+
+ /*
+ * Maybe new delayed items have been inserted, so we need requeue
+ * the work. Besides that, we must dequeue the empty delayed nodes
+ * to avoid the race between delayed items balance and the worker.
+ * The race like this:
+ * Task1 Worker thread
+ * count == 0, needn't requeue
+ * also needn't insert the
+ * delayed node into prepare
+ * list again.
+ * add lots of delayed items
+ * queue the delayed node
+ * already in the list,
+ * and not in the prepare
+ * list, it means the delayed
+ * node is being dealt with
+ * by the worker.
+ * do delayed items balance
+ * the delayed node is being
+ * dealt with by the worker
+ * now, just wait.
+ * the worker goto idle.
+ * Task1 will sleep until the transaction is commited.
+ */
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->count)
+ need_requeue = 1;
+ else
+ btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
+ delayed_node);
+ mutex_unlock(&delayed_node->mutex);
+
+ nr = trans->blocks_used;
+
+ btrfs_end_transaction_dmeta(trans, root);
+ __btrfs_btree_balance_dirty(root, nr);
+free_path:
+ btrfs_free_path(path);
+out:
+ if (need_requeue)
+ btrfs_requeue_work(&async_node->work);
+ else {
+ btrfs_release_prepared_delayed_node(delayed_node);
+ kfree(async_node);
+ }
+}
+
+static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
+ struct btrfs_root *root, int all)
+{
+ struct btrfs_async_delayed_node *async_node;
+ struct btrfs_delayed_node *curr;
+ int count = 0;
+
+again:
+ curr = btrfs_first_prepared_delayed_node(delayed_root);
+ if (!curr)
+ return 0;
+
+ async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
+ if (!async_node) {
+ btrfs_release_prepared_delayed_node(curr);
+ return -ENOMEM;
+ }
+
+ async_node->root = root;
+ async_node->delayed_node = curr;
+
+ async_node->work.func = btrfs_async_run_delayed_node_done;
+ async_node->work.flags = 0;
+
+ btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
+ count++;
+
+ if (all || count < 4)
+ goto again;
+
+ return 0;
+}
+
+void btrfs_balance_delayed_items(struct btrfs_root *root)
+{
+ struct btrfs_delayed_root *delayed_root;
+
+ delayed_root = btrfs_get_delayed_root(root);
+
+ if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+ return;
+
+ if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
+ int ret;
+ ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
+ if (ret)
+ return;
+
+ wait_event_interruptible_timeout(
+ delayed_root->wait,
+ (atomic_read(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND),
+ HZ);
+ return;
+ }
+
+ btrfs_wq_run_delayed_node(delayed_root, root, 0);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, const char *name,
+ int name_len, struct inode *dir,
+ struct btrfs_disk_key *disk_key, u8 type,
+ u64 index)
+{
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_delayed_item *delayed_item;
+ struct btrfs_dir_item *dir_item;
+ int ret;
+
+ delayed_node = btrfs_get_or_create_delayed_node(dir);
+ if (IS_ERR(delayed_node))
+ return PTR_ERR(delayed_node);
+
+ delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
+ if (!delayed_item) {
+ ret = -ENOMEM;
+ goto release_node;
+ }
+
+ ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+ /*
+ * we have reserved enough space when we start a new transaction,
+ * so reserving metadata failure is impossible
+ */
+ BUG_ON(ret);
+
+ delayed_item->key.objectid = btrfs_ino(dir);
+ btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
+ delayed_item->key.offset = index;
+
+ dir_item = (struct btrfs_dir_item *)delayed_item->data;
+ dir_item->location = *disk_key;
+ dir_item->transid = cpu_to_le64(trans->transid);
+ dir_item->data_len = 0;
+ dir_item->name_len = cpu_to_le16(name_len);
+ dir_item->type = type;
+ memcpy((char *)(dir_item + 1), name, name_len);
+
+ mutex_lock(&delayed_node->mutex);
+ ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "err add delayed dir index item(name: %s) into "
+ "the insertion tree of the delayed node"
+ "(root id: %llu, inode id: %llu, errno: %d)\n",
+ name,
+ (unsigned long long)delayed_node->root->objectid,
+ (unsigned long long)delayed_node->inode_id,
+ ret);
+ BUG();
+ }
+ mutex_unlock(&delayed_node->mutex);
+
+release_node:
+ btrfs_release_delayed_node(delayed_node);
+ return ret;
+}
+
+static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+ struct btrfs_delayed_node *node,
+ struct btrfs_key *key)
+{
+ struct btrfs_delayed_item *item;
+
+ mutex_lock(&node->mutex);
+ item = __btrfs_lookup_delayed_insertion_item(node, key);
+ if (!item) {
+ mutex_unlock(&node->mutex);
+ return 1;
+ }
+
+ btrfs_delayed_item_release_metadata(root, item);
+ btrfs_release_delayed_item(item);
+ mutex_unlock(&node->mutex);
+ return 0;
+}
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *dir,
+ u64 index)
+{
+ struct btrfs_delayed_node *node;
+ struct btrfs_delayed_item *item;
+ struct btrfs_key item_key;
+ int ret;
+
+ node = btrfs_get_or_create_delayed_node(dir);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ item_key.objectid = btrfs_ino(dir);
+ btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
+ item_key.offset = index;
+
+ ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+ if (!ret)
+ goto end;
+
+ item = btrfs_alloc_delayed_item(0);
+ if (!item) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ item->key = item_key;
+
+ ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+ /*
+ * we have reserved enough space when we start a new transaction,
+ * so reserving metadata failure is impossible.
+ */
+ BUG_ON(ret);
+
+ mutex_lock(&node->mutex);
+ ret = __btrfs_add_delayed_deletion_item(node, item);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "err add delayed dir index item(index: %llu) "
+ "into the deletion tree of the delayed node"
+ "(root id: %llu, inode id: %llu, errno: %d)\n",
+ (unsigned long long)index,
+ (unsigned long long)node->root->objectid,
+ (unsigned long long)node->inode_id,
+ ret);
+ BUG();
+ }
+ mutex_unlock(&node->mutex);
+end:
+ btrfs_release_delayed_node(node);
+ return ret;
+}
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
+ int ret = 0;
+
+ if (!delayed_node)
+ return -ENOENT;
+
+ /*
+ * Since we have held i_mutex of this directory, it is impossible that
+ * a new directory index is added into the delayed node and index_cnt
+ * is updated now. So we needn't lock the delayed node.
+ */
+ if (!delayed_node->index_cnt)
+ return -EINVAL;
+
+ BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
+ return ret;
+}
+
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+ struct list_head *del_list)
+{
+ struct btrfs_delayed_node *delayed_node;
+ struct btrfs_delayed_item *item;
+
+ delayed_node = btrfs_get_delayed_node(inode);
+ if (!delayed_node)
+ return;
+
+ mutex_lock(&delayed_node->mutex);
+ item = __btrfs_first_delayed_insertion_item(delayed_node);
+ while (item) {
+ atomic_inc(&item->refs);
+ list_add_tail(&item->readdir_list, ins_list);
+ item = __btrfs_next_delayed_item(item);
+ }
+
+ item = __btrfs_first_delayed_deletion_item(delayed_node);
+ while (item) {
+ atomic_inc(&item->refs);
+ list_add_tail(&item->readdir_list, del_list);
+ item = __btrfs_next_delayed_item(item);
+ }
+ mutex_unlock(&delayed_node->mutex);
+ /*
+ * This delayed node is still cached in the btrfs inode, so refs
+ * must be > 1 now, and we needn't check it is going to be freed
+ * or not.
+ *
+ * Besides that, this function is used to read dir, we do not
+ * insert/delete delayed items in this period. So we also needn't
+ * requeue or dequeue this delayed node.
+ */
+ atomic_dec(&delayed_node->refs);
+}
+
+void btrfs_put_delayed_items(struct list_head *ins_list,
+ struct list_head *del_list)
+{
+ struct btrfs_delayed_item *curr, *next;
+
+ list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+ list_del(&curr->readdir_list);
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+ }
+
+ list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+ list_del(&curr->readdir_list);
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+ }
+}
+
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+ u64 index)
+{
+ struct btrfs_delayed_item *curr, *next;
+ int ret;
+
+ if (list_empty(del_list))
+ return 0;
+
+ list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+ if (curr->key.offset > index)
+ break;
+
+ list_del(&curr->readdir_list);
+ ret = (curr->key.offset == index);
+
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+
+ if (ret)
+ return 1;
+ else
+ continue;
+ }
+ return 0;
+}
+
+/*
+ * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
+ *
+ */
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+ filldir_t filldir,
+ struct list_head *ins_list)
+{
+ struct btrfs_dir_item *di;
+ struct btrfs_delayed_item *curr, *next;
+ struct btrfs_key location;
+ char *name;
+ int name_len;
+ int over = 0;
+ unsigned char d_type;
+
+ if (list_empty(ins_list))
+ return 0;
+
+ /*
+ * Changing the data of the delayed item is impossible. So
+ * we needn't lock them. And we have held i_mutex of the
+ * directory, nobody can delete any directory indexes now.
+ */
+ list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+ list_del(&curr->readdir_list);
+
+ if (curr->key.offset < filp->f_pos) {
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+ continue;
+ }
+
+ filp->f_pos = curr->key.offset;
+
+ di = (struct btrfs_dir_item *)curr->data;
+ name = (char *)(di + 1);
+ name_len = le16_to_cpu(di->name_len);
+
+ d_type = btrfs_filetype_table[di->type];
+ btrfs_disk_key_to_cpu(&location, &di->location);
+
+ over = filldir(dirent, name, name_len, curr->key.offset,
+ location.objectid, d_type);
+
+ if (atomic_dec_and_test(&curr->refs))
+ kfree(curr);
+
+ if (over)
+ return 1;
+ }
+ return 0;
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
+ generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
+ sequence, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
+ transid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
+ nbytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
+ block_group, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+
+static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item *inode_item,
+ struct inode *inode)
+{
+ btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
+ btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
+ btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
+ btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
+ btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
+ btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
+ btrfs_set_stack_inode_generation(inode_item,
+ BTRFS_I(inode)->generation);
+ btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+ btrfs_set_stack_inode_transid(inode_item, trans->transid);
+ btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
+ btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
+ btrfs_set_stack_inode_block_group(inode_item,
+ BTRFS_I(inode)->block_group);
+
+ btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
+ inode->i_atime.tv_sec);
+ btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
+ inode->i_atime.tv_nsec);
+
+ btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
+ inode->i_mtime.tv_sec);
+ btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
+ inode->i_mtime.tv_nsec);
+
+ btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
+ inode->i_ctime.tv_sec);
+ btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
+ inode->i_ctime.tv_nsec);
+}
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node;
+ int ret;
+
+ delayed_node = btrfs_get_or_create_delayed_node(inode);
+ if (IS_ERR(delayed_node))
+ return PTR_ERR(delayed_node);
+
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->inode_dirty) {
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+ goto release_node;
+ }
+
+ ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
+ /*
+ * we must reserve enough space when we start a new transaction,
+ * so reserving metadata failure is impossible
+ */
+ BUG_ON(ret);
+
+ fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+ delayed_node->inode_dirty = 1;
+ delayed_node->count++;
+ atomic_inc(&root->fs_info->delayed_root->items);
+release_node:
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return ret;
+}
+
+static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
+{
+ struct btrfs_root *root = delayed_node->root;
+ struct btrfs_delayed_item *curr_item, *prev_item;
+
+ mutex_lock(&delayed_node->mutex);
+ curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
+ while (curr_item) {
+ btrfs_delayed_item_release_metadata(root, curr_item);
+ prev_item = curr_item;
+ curr_item = __btrfs_next_delayed_item(prev_item);
+ btrfs_release_delayed_item(prev_item);
+ }
+
+ curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
+ while (curr_item) {
+ btrfs_delayed_item_release_metadata(root, curr_item);
+ prev_item = curr_item;
+ curr_item = __btrfs_next_delayed_item(prev_item);
+ btrfs_release_delayed_item(prev_item);
+ }
+
+ if (delayed_node->inode_dirty) {
+ btrfs_delayed_inode_release_metadata(root, delayed_node);
+ btrfs_release_delayed_inode(delayed_node);
+ }
+ mutex_unlock(&delayed_node->mutex);
+}
+
+void btrfs_kill_delayed_inode_items(struct inode *inode)
+{
+ struct btrfs_delayed_node *delayed_node;
+
+ delayed_node = btrfs_get_delayed_node(inode);
+ if (!delayed_node)
+ return;
+
+ __btrfs_kill_delayed_node(delayed_node);
+ btrfs_release_delayed_node(delayed_node);
+}
+
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
+{
+ u64 inode_id = 0;
+ struct btrfs_delayed_node *delayed_nodes[8];
+ int i, n;
+
+ while (1) {
+ spin_lock(&root->inode_lock);
+ n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
+ (void **)delayed_nodes, inode_id,
+ ARRAY_SIZE(delayed_nodes));
+ if (!n) {
+ spin_unlock(&root->inode_lock);
+ break;
+ }
+
+ inode_id = delayed_nodes[n - 1]->inode_id + 1;
+
+ for (i = 0; i < n; i++)
+ atomic_inc(&delayed_nodes[i]->refs);
+ spin_unlock(&root->inode_lock);
+
+ for (i = 0; i < n; i++) {
+ __btrfs_kill_delayed_node(delayed_nodes[i]);
+ btrfs_release_delayed_node(delayed_nodes[i]);
+ }
+ }
+}
* to use for the second index (if one is created).
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, const char *name, int name_len, u64 dir,
- struct btrfs_key *location, u8 type, u64 index)
+ *root, const char *name, int name_len,
+ struct inode *dir, struct btrfs_key *location,
+ u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
struct btrfs_disk_key disk_key;
u32 data_size;
- key.objectid = dir;
+ key.objectid = btrfs_ino(dir);
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
key.offset = btrfs_name_hash(name, name_len);
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
+ btrfs_cpu_key_to_disk(&disk_key, location);
+
data_size = sizeof(*dir_item) + name_len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
name, name_len);
}
leaf = path->nodes[0];
- btrfs_cpu_key_to_disk(&disk_key, location);
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
btrfs_set_dir_type(leaf, dir_item, type);
btrfs_set_dir_data_len(leaf, dir_item, 0);
ret = 0;
goto out_free;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
- btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
- key.offset = index;
- dir_item = insert_with_overflow(trans, root, path, &key, data_size,
- name, name_len);
- if (IS_ERR(dir_item)) {
- ret2 = PTR_ERR(dir_item);
- goto out_free;
- }
- leaf = path->nodes[0];
- btrfs_cpu_key_to_disk(&disk_key, location);
- btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
- btrfs_set_dir_type(leaf, dir_item, type);
- btrfs_set_dir_data_len(leaf, dir_item, 0);
- btrfs_set_dir_name_len(leaf, dir_item, name_len);
- btrfs_set_dir_transid(leaf, dir_item, trans->transid);
- name_ptr = (unsigned long)(dir_item + 1);
- write_extent_buffer(leaf, name, name_ptr, name_len);
- btrfs_mark_buffer_dirty(leaf);
-
+ ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
+ &disk_key, type, index);
out_free:
-
btrfs_free_path(path);
if (ret)
return ret;
#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
+ #include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include "compat.h"
#include "ctree.h"
#include "locking.h"
#include "tree-log.h"
#include "free-space-cache.h"
+#include "inode-map.h"
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
* that covers the entire device
*/
static struct extent_map *btree_get_extent(struct inode *inode,
- struct page *page, size_t page_offset, u64 start, u64 len,
+ struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
}
read_unlock(&em_tree->lock);
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
memcpy(&found, result, csum_size);
read_extent_buffer(buf, &val, 0, csum_size);
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs: %s checksum verify "
+ printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
"failed on %llu wanted %X found %X "
"level %d\n",
root->fs_info->sb->s_id,
(unsigned long long)buf->start, val, found,
btrfs_header_level(buf));
- }
if (result != (char *)&inline_result)
kfree(result);
return 1;
ret = 0;
goto out;
}
- if (printk_ratelimit()) {
- printk("parent transid verify failed on %llu wanted %llu "
+ printk_ratelimited("parent transid verify failed on %llu wanted %llu "
"found %llu\n",
(unsigned long long)eb->start,
(unsigned long long)parent_transid,
(unsigned long long)btrfs_header_generation(eb));
- }
ret = 1;
clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
out:
len = page->private >> 2;
WARN_ON(len == 0);
- eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+ eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
WARN_ON(1);
goto out;
len = page->private >> 2;
WARN_ON(len == 0);
- eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+ eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
ret = -EIO;
goto out;
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs bad tree block start "
+ printk_ratelimited(KERN_INFO "btrfs bad tree block start "
"%llu %llu\n",
(unsigned long long)found_start,
(unsigned long long)eb->start);
- }
ret = -EIO;
goto err;
}
goto err;
}
if (check_tree_block_fsid(root, eb)) {
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs bad fsid on block %llu\n",
+ printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
(unsigned long long)eb->start);
- }
ret = -EIO;
goto err;
}
return 256 * limit;
}
- int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
- {
- return atomic_read(&info->nr_async_bios) >
- btrfs_async_submit_limit(info);
- }
-
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize, GFP_NOFS);
+ bytenr, blocksize);
return eb;
}
struct extent_buffer *eb;
eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize, NULL, GFP_NOFS);
+ bytenr, blocksize, NULL);
return eb;
}
root->name = NULL;
root->in_sysfs = 0;
root->inode_tree = RB_ROOT;
+ INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
root->block_rsv = NULL;
root->orphan_block_rsv = NULL;
root->log_transid = 0;
root->last_log_commit = 0;
extent_io_tree_init(&root->dirty_log_pages,
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
return root;
}
- struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_objectid)
- {
- struct btrfs_root *root;
-
- if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
- return fs_info->tree_root;
- if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
- return fs_info->extent_root;
-
- root = radix_tree_lookup(&fs_info->fs_roots_radix,
- (unsigned long)root_objectid);
- return root;
- }
-
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location)
{
if (IS_ERR(root))
return root;
+ root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
+ if (!root->free_ino_ctl)
+ goto fail;
+ root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
+ GFP_NOFS);
+ if (!root->free_ino_pinned)
+ goto fail;
+
+ btrfs_init_free_ino_ctl(root);
+ mutex_init(&root->fs_commit_mutex);
+ spin_lock_init(&root->cache_lock);
+ init_waitqueue_head(&root->cache_wait);
+
set_anon_super(&root->anon_super, NULL);
if (btrfs_root_refs(&root->root_item) == 0) {
return ERR_PTR(ret);
}
- struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *location,
- const char *name, int namelen)
- {
- return btrfs_read_fs_root_no_name(fs_info, location);
- #if 0
- struct btrfs_root *root;
- int ret;
-
- root = btrfs_read_fs_root_no_name(fs_info, location);
- if (!root)
- return NULL;
-
- if (root->in_sysfs)
- return root;
-
- ret = btrfs_set_root_name(root, name, namelen);
- if (ret) {
- free_extent_buffer(root->node);
- kfree(root);
- return ERR_PTR(ret);
- }
-
- ret = btrfs_sysfs_add_root(root);
- if (ret) {
- free_extent_buffer(root->node);
- kfree(root->name);
- kfree(root);
- return ERR_PTR(ret);
- }
- root->in_sysfs = 1;
- return root;
- #endif
- }
-
static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *tree_root = btrfs_sb(sb);
- struct btrfs_fs_info *fs_info = tree_root->fs_info;
+ struct btrfs_fs_info *fs_info = NULL;
struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
struct btrfs_super_block *disk_super;
- if (!extent_root || !tree_root || !fs_info ||
+ if (!extent_root || !tree_root || !tree_root->fs_info ||
!chunk_root || !dev_root || !csum_root) {
err = -ENOMEM;
goto fail;
}
+ fs_info = tree_root->fs_info;
ret = init_srcu_struct(&fs_info->subvol_srcu);
if (ret) {
INIT_LIST_HEAD(&fs_info->ordered_extents);
spin_lock_init(&fs_info->ordered_extent_lock);
+ fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
+ GFP_NOFS);
+ if (!fs_info->delayed_root) {
+ err = -ENOMEM;
+ goto fail_iput;
+ }
+ btrfs_init_delayed_root(fs_info->delayed_root);
sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
- fs_info->btree_inode->i_mapping,
- GFP_NOFS);
- extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
- GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
+ extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
fs_info->block_group_cache_tree = RB_ROOT;
extent_io_tree_init(&fs_info->freed_extents[0],
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
extent_io_tree_init(&fs_info->freed_extents[1],
- fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->btree_inode->i_mapping);
fs_info->pinned_extents = &fs_info->freed_extents[0];
fs_info->do_barriers = 1;
bh = btrfs_read_dev_super(fs_devices->latest_bdev);
if (!bh) {
err = -EINVAL;
- goto fail_iput;
+ goto fail_alloc;
}
memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
disk_super = &fs_info->super_copy;
if (!btrfs_super_root(disk_super))
- goto fail_iput;
+ goto fail_alloc;
/* check FS state, whether FS is broken. */
fs_info->fs_state |= btrfs_super_flags(disk_super);
ret = btrfs_parse_options(tree_root, options);
if (ret) {
err = ret;
- goto fail_iput;
+ goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super) &
"unsupported optional features (%Lx).\n",
(unsigned long long)features);
err = -EINVAL;
- goto fail_iput;
+ goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super);
"unsupported option features (%Lx).\n",
(unsigned long long)features);
err = -EINVAL;
- goto fail_iput;
+ goto fail_alloc;
}
btrfs_init_workers(&fs_info->generic_worker,
&fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1, &fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
/*
* endios are largely parallel and should have a very
btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
btrfs_start_workers(&fs_info->endio_write_workers, 1);
btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
+ btrfs_start_workers(&fs_info->delayed_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
+ btrfs_stop_workers(&fs_info->delayed_workers);
+fail_alloc:
+ kfree(fs_info->delayed_root);
fail_iput:
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode);
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (printk_ratelimit()) {
- printk(KERN_WARNING "lost page write due to "
+ printk_ratelimited(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
- }
/* note, we dont' set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
+ __btrfs_remove_free_space_cache(root->free_ino_pinned);
+ __btrfs_remove_free_space_cache(root->free_ino_ctl);
free_fs_root(root);
return 0;
}
static void free_fs_root(struct btrfs_root *root)
{
+ iput(root->cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
if (root->anon_super.s_dev) {
down_write(&root->anon_super.s_umount);
}
free_extent_buffer(root->node);
free_extent_buffer(root->commit_root);
+ kfree(root->free_ino_ctl);
+ kfree(root->free_ino_pinned);
kfree(root->name);
kfree(root);
}
del_fs_roots(fs_info);
iput(fs_info->btree_inode);
+ kfree(fs_info->delayed_root);
btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers);
+ btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
}
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+ /*
+ * looks as though older kernels can get into trouble with
+ * this code, they end up stuck in balance_dirty_pages forever
+ */
+ u64 num_dirty;
+ unsigned long thresh = 32 * 1024 * 1024;
+
+ if (current->flags & PF_MEMALLOC)
+ return;
+
+ btrfs_balance_delayed_items(root);
+
+ num_dirty = root->fs_info->dirty_metadata_bytes;
+
+ if (num_dirty > thresh) {
+ balance_dirty_pages_ratelimited_nr(
+ root->fs_info->btree_inode->i_mapping, 1);
+ }
+ return;
+}
+
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
{
/*
* looks as though older kernels can get into trouble with
goto out;
len = page->private >> 2;
- eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
+ eb = find_extent_buffer(io_tree, bytenr, len);
if (!eb)
goto out;
int btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
- struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_objectid);
- struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *location,
- const char *name, int namelen);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_key *location);
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location);
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
- int btrfs_insert_dev_radix(struct btrfs_root *root,
- struct block_device *bdev,
- u64 device_id,
- u64 block_start,
- u64 num_blocks);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
- void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
- int wait_on_tree_block_writeback(struct btrfs_root *root,
- struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
- int btrfs_open_device(struct btrfs_device *dev);
- int btrfs_verify_block_csum(struct btrfs_root *root,
- struct extent_buffer *buf);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
unsigned long bio_flags, u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done);
-
- int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
return (cache->flags & bits) == bits;
}
- void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+ static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{
atomic_inc(&cache->count);
}
WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0);
WARN_ON(cache->reserved_pinned > 0);
+ kfree(cache->free_space_ctl);
kfree(cache);
}
}
break;
caching_ctl->progress = last;
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
up_read(&fs_info->extent_commit_sem);
mutex_unlock(&caching_ctl->mutex);
if (btrfs_transaction_in_commit(fs_info))
atomic_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
+ /*
+ * Mutex was contended, block until it's released and try
+ * again
+ */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
break;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (owner < BTRFS_FIRST_FREE_OBJECTID)
new_size += sizeof(*bi);
return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
key.type = BTRFS_EXTENT_REF_V0_KEY;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
err = 0;
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset))
break;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.offset++;
ret = btrfs_insert_empty_item(trans, root, path, &key,
size);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
fail:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (ret == -ENOENT && parent) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.type = BTRFS_EXTENT_REF_V0_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
}
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
if (ret != -ENOENT)
return ret;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
__run_delayed_extent_op(extent_op, leaf, item);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
path->reada = 1;
path->leave_spinning = 1;
atomic_inc(&ref->refs);
spin_unlock(&delayed_refs->lock);
+ /*
+ * Mutex was contended, block until it's
+ * released and try again
+ */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
atomic_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
- btrfs_release_path(root->fs_info->extent_root, path);
+ btrfs_release_path(path);
+ /*
+ * Mutex was contended, block until it's released and let
+ * caller try again
+ */
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
return ret;
}
- #if 0
- int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, u32 nr_extents)
- {
- struct btrfs_key key;
- struct btrfs_file_extent_item *fi;
- u64 root_gen;
- u32 nritems;
- int i;
- int level;
- int ret = 0;
- int shared = 0;
-
- if (!root->ref_cows)
- return 0;
-
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
- shared = 0;
- root_gen = root->root_key.offset;
- } else {
- shared = 1;
- root_gen = trans->transid - 1;
- }
-
- level = btrfs_header_level(buf);
- nritems = btrfs_header_nritems(buf);
-
- if (level == 0) {
- struct btrfs_leaf_ref *ref;
- struct btrfs_extent_info *info;
-
- ref = btrfs_alloc_leaf_ref(root, nr_extents);
- if (!ref) {
- ret = -ENOMEM;
- goto out;
- }
-
- ref->root_gen = root_gen;
- ref->bytenr = buf->start;
- ref->owner = btrfs_header_owner(buf);
- ref->generation = btrfs_header_generation(buf);
- ref->nritems = nr_extents;
- info = ref->extents;
-
- for (i = 0; nr_extents > 0 && i < nritems; i++) {
- u64 disk_bytenr;
- btrfs_item_key_to_cpu(buf, &key, i);
- if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
- continue;
- fi = btrfs_item_ptr(buf, i,
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(buf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
- disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
- if (disk_bytenr == 0)
- continue;
-
- info->bytenr = disk_bytenr;
- info->num_bytes =
- btrfs_file_extent_disk_num_bytes(buf, fi);
- info->objectid = key.objectid;
- info->offset = key.offset;
- info++;
- }
-
- ret = btrfs_add_leaf_ref(root, ref, shared);
- if (ret == -EEXIST && shared) {
- struct btrfs_leaf_ref *old;
- old = btrfs_lookup_leaf_ref(root, ref->bytenr);
- BUG_ON(!old);
- btrfs_remove_leaf_ref(root, old);
- btrfs_free_leaf_ref(root, old);
- ret = btrfs_add_leaf_ref(root, ref, shared);
- }
- WARN_ON(ret);
- btrfs_free_leaf_ref(root, ref);
- }
- out:
- return ret;
- }
-
- /* when a block goes through cow, we update the reference counts of
- * everything that block points to. The internal pointers of the block
- * can be in just about any order, and it is likely to have clusters of
- * things that are close together and clusters of things that are not.
- *
- * To help reduce the seeks that come with updating all of these reference
- * counts, sort them by byte number before actual updates are done.
- *
- * struct refsort is used to match byte number to slot in the btree block.
- * we sort based on the byte number and then use the slot to actually
- * find the item.
- *
- * struct refsort is smaller than strcut btrfs_item and smaller than
- * struct btrfs_key_ptr. Since we're currently limited to the page size
- * for a btree block, there's no way for a kmalloc of refsorts for a
- * single node to be bigger than a page.
- */
- struct refsort {
- u64 bytenr;
- u32 slot;
- };
-
- /*
- * for passing into sort()
- */
- static int refsort_cmp(const void *a_void, const void *b_void)
- {
- const struct refsort *a = a_void;
- const struct refsort *b = b_void;
-
- if (a->bytenr < b->bytenr)
- return -1;
- if (a->bytenr > b->bytenr)
- return 1;
- return 0;
- }
- #endif
-
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
fail:
if (ret)
return ret;
inode = lookup_free_space_inode(root, block_group, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
ret = PTR_ERR(inode);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto out;
}
out_put:
iput(inode);
out_free:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
block_group->disk_cache_state = dcs;
/* make sure bytes are sectorsize aligned */
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
- if (root == root->fs_info->tree_root) {
+ if (root == root->fs_info->tree_root ||
+ BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
alloc_chunk = 0;
committed = 1;
}
goto again;
}
- #if 0 /* I hope we never need this code again, just in case */
- printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
- "%llu bytes_reserved, " "%llu bytes_pinned, "
- "%llu bytes_readonly, %llu may use %llu total\n",
- (unsigned long long)bytes,
- (unsigned long long)data_sinfo->bytes_used,
- (unsigned long long)data_sinfo->bytes_reserved,
- (unsigned long long)data_sinfo->bytes_pinned,
- (unsigned long long)data_sinfo->bytes_readonly,
- (unsigned long long)data_sinfo->bytes_may_use,
- (unsigned long long)data_sinfo->total_bytes);
- #endif
return -ENOSPC;
}
data_sinfo->bytes_may_use += bytes;
spin_unlock(&block_rsv->lock);
}
- void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
- struct btrfs_block_rsv *dest, u64 num_bytes)
+ static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
+ struct btrfs_block_rsv *dest, u64 num_bytes)
{
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 meta_used;
u64 data_used;
int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
- #if 0
- /*
- * per tree used space accounting can be inaccuracy, so we
- * can't rely on it.
- */
- spin_lock(&fs_info->extent_root->accounting_lock);
- num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
- spin_unlock(&fs_info->extent_root->accounting_lock);
- spin_lock(&fs_info->csum_root->accounting_lock);
- num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
- spin_unlock(&fs_info->csum_root->accounting_lock);
-
- spin_lock(&fs_info->tree_root->accounting_lock);
- num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
- spin_unlock(&fs_info->tree_root->accounting_lock);
- #endif
sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
spin_lock(&sinfo->lock);
data_used = sinfo->bytes_used;
block_rsv->reserved = block_rsv->size;
block_rsv->full = 1;
}
- #if 0
- printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
- block_rsv->size, block_rsv->reserved);
- #endif
+
spin_unlock(&sinfo->lock);
spin_unlock(&block_rsv->lock);
}
WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
}
-static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
-{
- return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
- 3 * num_items;
-}
-
int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int num_items)
if (num_items == 0 || root->fs_info->chunk_root == root)
return 0;
- num_bytes = calc_trans_metadata_size(root, num_items);
+ num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
num_bytes);
if (!ret) {
* If all of the metadata space is used, we can commit
* transaction and use space it freed.
*/
- u64 num_bytes = calc_trans_metadata_size(root, 4);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
void btrfs_orphan_release_metadata(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 num_bytes = calc_trans_metadata_size(root, 4);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
* two for root back/forward refs, two for directory entries
* and one for root of the snapshot.
*/
- u64 num_bytes = calc_trans_metadata_size(root, 5);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
dst_rsv->space_info = src_rsv->space_info;
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
if (nr_extents > reserved_extents) {
nr_extents -= reserved_extents;
- to_reserve = calc_trans_metadata_size(root, nr_extents);
+ to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
} else {
nr_extents = 0;
to_reserve = 0;
to_free = calc_csum_metadata_size(inode, num_bytes);
if (nr_extents > 0)
- to_free += calc_trans_metadata_size(root, nr_extents);
+ to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
NULL, refs_to_drop,
is_data);
BUG_ON(ret);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
owner_objectid, 0);
BUG_ON(ret < 0);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
path->leave_spinning = 1;
key.objectid = bytenr;
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
BUG_ON(ret);
- btrfs_release_path(extent_root, path);
+ btrfs_release_path(path);
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
return 0;
wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
- (cache->free_space >= num_bytes));
+ (cache->free_space_ctl->free_space >= num_bytes));
put_caching_control(caching_ctl);
return 0;
trans->block_rsv = block_rsv;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
BUG_ON(err);
ret = btrfs_del_root(trans, tree_root, &root->root_key);
return ret;
}
- #if 0
- static unsigned long calc_ra(unsigned long start, unsigned long last,
- unsigned long nr)
- {
- return min(last, start + nr - 1);
- }
-
- static noinline int relocate_inode_pages(struct inode *inode, u64 start,
- u64 len)
- {
- u64 page_start;
- u64 page_end;
- unsigned long first_index;
- unsigned long last_index;
- unsigned long i;
- struct page *page;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct file_ra_state *ra;
- struct btrfs_ordered_extent *ordered;
- unsigned int total_read = 0;
- unsigned int total_dirty = 0;
- int ret = 0;
-
- ra = kzalloc(sizeof(*ra), GFP_NOFS);
- if (!ra)
- return -ENOMEM;
-
- mutex_lock(&inode->i_mutex);
- first_index = start >> PAGE_CACHE_SHIFT;
- last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
-
- /* make sure the dirty trick played by the caller work */
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- first_index, last_index);
- if (ret)
- goto out_unlock;
-
- file_ra_state_init(ra, inode->i_mapping);
-
- for (i = first_index ; i <= last_index; i++) {
- if (total_read % ra->ra_pages == 0) {
- btrfs_force_ra(inode->i_mapping, ra, NULL, i,
- calc_ra(i, last_index, ra->ra_pages));
- }
- total_read++;
- again:
- if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
- BUG_ON(1);
- page = grab_cache_page(inode->i_mapping, i);
- if (!page) {
- ret = -ENOMEM;
- goto out_unlock;
- }
- if (!PageUptodate(page)) {
- btrfs_readpage(NULL, page);
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- page_cache_release(page);
- ret = -EIO;
- goto out_unlock;
- }
- }
- wait_on_page_writeback(page);
-
- page_start = (u64)page->index << PAGE_CACHE_SHIFT;
- page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(io_tree, page_start, page_end, GFP_NOFS);
-
- ordered = btrfs_lookup_ordered_extent(inode, page_start);
- if (ordered) {
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
- unlock_page(page);
- page_cache_release(page);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- goto again;
- }
- set_page_extent_mapped(page);
-
- if (i == first_index)
- set_extent_bits(io_tree, page_start, page_end,
- EXTENT_BOUNDARY, GFP_NOFS);
- btrfs_set_extent_delalloc(inode, page_start, page_end);
-
- set_page_dirty(page);
- total_dirty++;
-
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
- unlock_page(page);
- page_cache_release(page);
- }
-
- out_unlock:
- kfree(ra);
- mutex_unlock(&inode->i_mutex);
- balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
- return ret;
- }
-
- static noinline int relocate_data_extent(struct inode *reloc_inode,
- struct btrfs_key *extent_key,
- u64 offset)
- {
- struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
- struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
- struct extent_map *em;
- u64 start = extent_key->objectid - offset;
- u64 end = start + extent_key->offset - 1;
-
- em = alloc_extent_map(GFP_NOFS);
- BUG_ON(!em);
-
- em->start = start;
- em->len = extent_key->offset;
- em->block_len = extent_key->offset;
- em->block_start = extent_key->objectid;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
- set_bit(EXTENT_FLAG_PINNED, &em->flags);
-
- /* setup extent map to cheat btrfs_readpage */
- lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
- while (1) {
- int ret;
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- if (ret != -EEXIST) {
- free_extent_map(em);
- break;
- }
- btrfs_drop_extent_cache(reloc_inode, start, end, 0);
- }
- unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
-
- return relocate_inode_pages(reloc_inode, start, extent_key->offset);
- }
-
- struct btrfs_ref_path {
- u64 extent_start;
- u64 nodes[BTRFS_MAX_LEVEL];
- u64 root_objectid;
- u64 root_generation;
- u64 owner_objectid;
- u32 num_refs;
- int lowest_level;
- int current_level;
- int shared_level;
-
- struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
- u64 new_nodes[BTRFS_MAX_LEVEL];
- };
-
- struct disk_extent {
- u64 ram_bytes;
- u64 disk_bytenr;
- u64 disk_num_bytes;
- u64 offset;
- u64 num_bytes;
- u8 compression;
- u8 encryption;
- u16 other_encoding;
- };
-
- static int is_cowonly_root(u64 root_objectid)
- {
- if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
- root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
- root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
- root_objectid == BTRFS_DEV_TREE_OBJECTID ||
- root_objectid == BTRFS_TREE_LOG_OBJECTID ||
- root_objectid == BTRFS_CSUM_TREE_OBJECTID)
- return 1;
- return 0;
- }
-
- static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_ref_path *ref_path,
- int first_time)
- {
- struct extent_buffer *leaf;
- struct btrfs_path *path;
- struct btrfs_extent_ref *ref;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u64 bytenr;
- u32 nritems;
- int level;
- int ret = 1;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- if (first_time) {
- ref_path->lowest_level = -1;
- ref_path->current_level = -1;
- ref_path->shared_level = -1;
- goto walk_up;
- }
- walk_down:
- level = ref_path->current_level - 1;
- while (level >= -1) {
- u64 parent;
- if (level < ref_path->lowest_level)
- break;
-
- if (level >= 0)
- bytenr = ref_path->nodes[level];
- else
- bytenr = ref_path->extent_start;
- BUG_ON(bytenr == 0);
-
- parent = ref_path->nodes[level + 1];
- ref_path->nodes[level + 1] = 0;
- ref_path->current_level = level;
- BUG_ON(parent == 0);
-
- key.objectid = bytenr;
- key.offset = parent + 1;
- key.type = BTRFS_EXTENT_REF_KEY;
-
- ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- BUG_ON(ret == 0);
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(extent_root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- goto next;
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid == bytenr &&
- found_key.type == BTRFS_EXTENT_REF_KEY) {
- if (level < ref_path->shared_level)
- ref_path->shared_level = level;
- goto found;
- }
- next:
- level--;
- btrfs_release_path(extent_root, path);
- cond_resched();
- }
- /* reached lowest level */
- ret = 1;
- goto out;
- walk_up:
- level = ref_path->current_level;
- while (level < BTRFS_MAX_LEVEL - 1) {
- u64 ref_objectid;
-
- if (level >= 0)
- bytenr = ref_path->nodes[level];
- else
- bytenr = ref_path->extent_start;
-
- BUG_ON(bytenr == 0);
-
- key.objectid = bytenr;
- key.offset = 0;
- key.type = BTRFS_EXTENT_REF_KEY;
-
- ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(extent_root, path);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- /* the extent was freed by someone */
- if (ref_path->lowest_level == level)
- goto out;
- btrfs_release_path(extent_root, path);
- goto walk_down;
- }
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid != bytenr ||
- found_key.type != BTRFS_EXTENT_REF_KEY) {
- /* the extent was freed by someone */
- if (ref_path->lowest_level == level) {
- ret = 1;
- goto out;
- }
- btrfs_release_path(extent_root, path);
- goto walk_down;
- }
- found:
- ref = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref);
- ref_objectid = btrfs_ref_objectid(leaf, ref);
- if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
- if (first_time) {
- level = (int)ref_objectid;
- BUG_ON(level >= BTRFS_MAX_LEVEL);
- ref_path->lowest_level = level;
- ref_path->current_level = level;
- ref_path->nodes[level] = bytenr;
- } else {
- WARN_ON(ref_objectid != level);
- }
- } else {
- WARN_ON(level != -1);
- }
- first_time = 0;
-
- if (ref_path->lowest_level == level) {
- ref_path->owner_objectid = ref_objectid;
- ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
- }
-
- /*
- * the block is tree root or the block isn't in reference
- * counted tree.
- */
- if (found_key.objectid == found_key.offset ||
- is_cowonly_root(btrfs_ref_root(leaf, ref))) {
- ref_path->root_objectid = btrfs_ref_root(leaf, ref);
- ref_path->root_generation =
- btrfs_ref_generation(leaf, ref);
- if (level < 0) {
- /* special reference from the tree log */
- ref_path->nodes[0] = found_key.offset;
- ref_path->current_level = 0;
- }
- ret = 0;
- goto out;
- }
-
- level++;
- BUG_ON(ref_path->nodes[level] != 0);
- ref_path->nodes[level] = found_key.offset;
- ref_path->current_level = level;
-
- /*
- * the reference was created in the running transaction,
- * no need to continue walking up.
- */
- if (btrfs_ref_generation(leaf, ref) == trans->transid) {
- ref_path->root_objectid = btrfs_ref_root(leaf, ref);
- ref_path->root_generation =
- btrfs_ref_generation(leaf, ref);
- ret = 0;
- goto out;
- }
-
- btrfs_release_path(extent_root, path);
- cond_resched();
- }
- /* reached max tree level, but no tree root found. */
- BUG();
- out:
- btrfs_free_path(path);
- return ret;
- }
-
- static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_ref_path *ref_path,
- u64 extent_start)
- {
- memset(ref_path, 0, sizeof(*ref_path));
- ref_path->extent_start = extent_start;
-
- return __next_ref_path(trans, extent_root, ref_path, 1);
- }
-
- static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_ref_path *ref_path)
- {
- return __next_ref_path(trans, extent_root, ref_path, 0);
- }
-
- static noinline int get_new_locations(struct inode *reloc_inode,
- struct btrfs_key *extent_key,
- u64 offset, int no_fragment,
- struct disk_extent **extents,
- int *nr_extents)
- {
- struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
- struct btrfs_path *path;
- struct btrfs_file_extent_item *fi;
- struct extent_buffer *leaf;
- struct disk_extent *exts = *extents;
- struct btrfs_key found_key;
- u64 cur_pos;
- u64 last_byte;
- u32 nritems;
- int nr = 0;
- int max = *nr_extents;
- int ret;
-
- WARN_ON(!no_fragment && *extents);
- if (!exts) {
- max = 1;
- exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
- if (!exts)
- return -ENOMEM;
- }
-
- path = btrfs_alloc_path();
- if (!path) {
- if (exts != *extents)
- kfree(exts);
- return -ENOMEM;
- }
-
- cur_pos = extent_key->objectid - offset;
- last_byte = extent_key->objectid + extent_key->offset;
- ret = btrfs_lookup_file_extent(NULL, root, path,
- btrfs_ino(reloc_inode), cur_pos, 0);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = -ENOENT;
- goto out;
- }
-
- while (1) {
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.offset != cur_pos ||
- found_key.type != BTRFS_EXTENT_DATA_KEY ||
- found_key.objectid != btrfs_ino(reloc_inode))
- break;
-
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) !=
- BTRFS_FILE_EXTENT_REG ||
- btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
- break;
-
- if (nr == max) {
- struct disk_extent *old = exts;
- max *= 2;
- exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
- if (!exts) {
- ret = -ENOMEM;
- goto out;
- }
- memcpy(exts, old, sizeof(*exts) * nr);
- if (old != *extents)
- kfree(old);
- }
-
- exts[nr].disk_bytenr =
- btrfs_file_extent_disk_bytenr(leaf, fi);
- exts[nr].disk_num_bytes =
- btrfs_file_extent_disk_num_bytes(leaf, fi);
- exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
- exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
- exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
- exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
- exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
- exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
- fi);
- BUG_ON(exts[nr].offset > 0);
- BUG_ON(exts[nr].compression || exts[nr].encryption);
- BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
-
- cur_pos += exts[nr].num_bytes;
- nr++;
-
- if (cur_pos + offset >= last_byte)
- break;
-
- if (no_fragment) {
- ret = 1;
- goto out;
- }
- path->slots[0]++;
- }
-
- BUG_ON(cur_pos + offset > last_byte);
- if (cur_pos + offset < last_byte) {
- ret = -ENOENT;
- goto out;
- }
- ret = 0;
- out:
- btrfs_free_path(path);
- if (ret) {
- if (exts != *extents)
- kfree(exts);
- } else {
- *extents = exts;
- *nr_extents = nr;
- }
- return ret;
- }
-
- static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *extent_key,
- struct btrfs_key *leaf_key,
- struct btrfs_ref_path *ref_path,
- struct disk_extent *new_extents,
- int nr_extents)
- {
- struct extent_buffer *leaf;
- struct btrfs_file_extent_item *fi;
- struct inode *inode = NULL;
- struct btrfs_key key;
- u64 lock_start = 0;
- u64 lock_end = 0;
- u64 num_bytes;
- u64 ext_offset;
- u64 search_end = (u64)-1;
- u32 nritems;
- int nr_scaned = 0;
- int extent_locked = 0;
- int extent_type;
- int ret;
-
- memcpy(&key, leaf_key, sizeof(key));
- if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
- if (key.objectid < ref_path->owner_objectid ||
- (key.objectid == ref_path->owner_objectid &&
- key.type < BTRFS_EXTENT_DATA_KEY)) {
- key.objectid = ref_path->owner_objectid;
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = 0;
- }
- }
-
- while (1) {
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- if (ret < 0)
- goto out;
-
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- next:
- if (extent_locked && ret > 0) {
- /*
- * the file extent item was modified by someone
- * before the extent got locked.
- */
- unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- extent_locked = 0;
- }
-
- if (path->slots[0] >= nritems) {
- if (++nr_scaned > 2)
- break;
-
- BUG_ON(extent_locked);
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- }
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-
- if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
- if ((key.objectid > ref_path->owner_objectid) ||
- (key.objectid == ref_path->owner_objectid &&
- key.type > BTRFS_EXTENT_DATA_KEY) ||
- key.offset >= search_end)
- break;
- }
-
- if (inode && key.objectid != btrfs_ino(inode)) {
- BUG_ON(extent_locked);
- btrfs_release_path(root, path);
- mutex_unlock(&inode->i_mutex);
- iput(inode);
- inode = NULL;
- continue;
- }
-
- if (key.type != BTRFS_EXTENT_DATA_KEY) {
- path->slots[0]++;
- ret = 1;
- goto next;
- }
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(leaf, fi);
- if ((extent_type != BTRFS_FILE_EXTENT_REG &&
- extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
- (btrfs_file_extent_disk_bytenr(leaf, fi) !=
- extent_key->objectid)) {
- path->slots[0]++;
- ret = 1;
- goto next;
- }
-
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
- ext_offset = btrfs_file_extent_offset(leaf, fi);
-
- if (search_end == (u64)-1) {
- search_end = key.offset - ext_offset +
- btrfs_file_extent_ram_bytes(leaf, fi);
- }
-
- if (!extent_locked) {
- lock_start = key.offset;
- lock_end = lock_start + num_bytes - 1;
- } else {
- if (lock_start > key.offset ||
- lock_end + 1 < key.offset + num_bytes) {
- unlock_extent(&BTRFS_I(inode)->io_tree,
- lock_start, lock_end, GFP_NOFS);
- extent_locked = 0;
- }
- }
-
- if (!inode) {
- btrfs_release_path(root, path);
-
- inode = btrfs_iget_locked(root->fs_info->sb,
- key.objectid, root);
- if (inode->i_state & I_NEW) {
- BTRFS_I(inode)->root = root;
- BTRFS_I(inode)->location.objectid =
- key.objectid;
- BTRFS_I(inode)->location.type =
- BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.offset = 0;
- btrfs_read_locked_inode(inode);
- unlock_new_inode(inode);
- }
- /*
- * some code call btrfs_commit_transaction while
- * holding the i_mutex, so we can't use mutex_lock
- * here.
- */
- if (is_bad_inode(inode) ||
- !mutex_trylock(&inode->i_mutex)) {
- iput(inode);
- inode = NULL;
- key.offset = (u64)-1;
- goto skip;
- }
- }
-
- if (!extent_locked) {
- struct btrfs_ordered_extent *ordered;
-
- btrfs_release_path(root, path);
-
- lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- ordered = btrfs_lookup_first_ordered_extent(inode,
- lock_end);
- if (ordered &&
- ordered->file_offset <= lock_end &&
- ordered->file_offset + ordered->len > lock_start) {
- unlock_extent(&BTRFS_I(inode)->io_tree,
- lock_start, lock_end, GFP_NOFS);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- key.offset += num_bytes;
- goto skip;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
-
- extent_locked = 1;
- continue;
- }
-
- if (nr_extents == 1) {
- /* update extent pointer in place */
- btrfs_set_file_extent_disk_bytenr(leaf, fi,
- new_extents[0].disk_bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi,
- new_extents[0].disk_num_bytes);
- btrfs_mark_buffer_dirty(leaf);
-
- btrfs_drop_extent_cache(inode, key.offset,
- key.offset + num_bytes - 1, 0);
-
- ret = btrfs_inc_extent_ref(trans, root,
- new_extents[0].disk_bytenr,
- new_extents[0].disk_num_bytes,
- leaf->start,
- root->root_key.objectid,
- trans->transid,
- key.objectid);
- BUG_ON(ret);
-
- ret = btrfs_free_extent(trans, root,
- extent_key->objectid,
- extent_key->offset,
- leaf->start,
- btrfs_header_owner(leaf),
- btrfs_header_generation(leaf),
- key.objectid, 0);
- BUG_ON(ret);
-
- btrfs_release_path(root, path);
- key.offset += num_bytes;
- } else {
- BUG_ON(1);
- #if 0
- u64 alloc_hint;
- u64 extent_len;
- int i;
- /*
- * drop old extent pointer at first, then insert the
- * new pointers one bye one
- */
- btrfs_release_path(root, path);
- ret = btrfs_drop_extents(trans, root, inode, key.offset,
- key.offset + num_bytes,
- key.offset, &alloc_hint);
- BUG_ON(ret);
-
- for (i = 0; i < nr_extents; i++) {
- if (ext_offset >= new_extents[i].num_bytes) {
- ext_offset -= new_extents[i].num_bytes;
- continue;
- }
- extent_len = min(new_extents[i].num_bytes -
- ext_offset, num_bytes);
-
- ret = btrfs_insert_empty_item(trans, root,
- path, &key,
- sizeof(*fi));
- BUG_ON(ret);
-
- leaf = path->nodes[0];
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi,
- trans->transid);
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_disk_bytenr(leaf, fi,
- new_extents[i].disk_bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi,
- new_extents[i].disk_num_bytes);
- btrfs_set_file_extent_ram_bytes(leaf, fi,
- new_extents[i].ram_bytes);
-
- btrfs_set_file_extent_compression(leaf, fi,
- new_extents[i].compression);
- btrfs_set_file_extent_encryption(leaf, fi,
- new_extents[i].encryption);
- btrfs_set_file_extent_other_encoding(leaf, fi,
- new_extents[i].other_encoding);
-
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_len);
- ext_offset += new_extents[i].offset;
- btrfs_set_file_extent_offset(leaf, fi,
- ext_offset);
- btrfs_mark_buffer_dirty(leaf);
-
- btrfs_drop_extent_cache(inode, key.offset,
- key.offset + extent_len - 1, 0);
-
- ret = btrfs_inc_extent_ref(trans, root,
- new_extents[i].disk_bytenr,
- new_extents[i].disk_num_bytes,
- leaf->start,
- root->root_key.objectid,
- trans->transid, key.objectid);
- BUG_ON(ret);
- btrfs_release_path(root, path);
-
- inode_add_bytes(inode, extent_len);
-
- ext_offset = 0;
- num_bytes -= extent_len;
- key.offset += extent_len;
-
- if (num_bytes == 0)
- break;
- }
- BUG_ON(i >= nr_extents);
- #endif
- }
-
- if (extent_locked) {
- unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- extent_locked = 0;
- }
- skip:
- if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
- key.offset >= search_end)
- break;
-
- cond_resched();
- }
- ret = 0;
- out:
- btrfs_release_path(root, path);
- if (inode) {
- mutex_unlock(&inode->i_mutex);
- if (extent_locked) {
- unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
- lock_end, GFP_NOFS);
- }
- iput(inode);
- }
- return ret;
- }
-
- int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf, u64 orig_start)
- {
- int level;
- int ret;
-
- BUG_ON(btrfs_header_generation(buf) != trans->transid);
- BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
-
- level = btrfs_header_level(buf);
- if (level == 0) {
- struct btrfs_leaf_ref *ref;
- struct btrfs_leaf_ref *orig_ref;
-
- orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
- if (!orig_ref)
- return -ENOENT;
-
- ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
- if (!ref) {
- btrfs_free_leaf_ref(root, orig_ref);
- return -ENOMEM;
- }
-
- ref->nritems = orig_ref->nritems;
- memcpy(ref->extents, orig_ref->extents,
- sizeof(ref->extents[0]) * ref->nritems);
-
- btrfs_free_leaf_ref(root, orig_ref);
-
- ref->root_gen = trans->transid;
- ref->bytenr = buf->start;
- ref->owner = btrfs_header_owner(buf);
- ref->generation = btrfs_header_generation(buf);
-
- ret = btrfs_add_leaf_ref(root, ref, 0);
- WARN_ON(ret);
- btrfs_free_leaf_ref(root, ref);
- }
- return 0;
- }
-
- static noinline int invalidate_extent_cache(struct btrfs_root *root,
- struct extent_buffer *leaf,
- struct btrfs_block_group_cache *group,
- struct btrfs_root *target_root)
- {
- struct btrfs_key key;
- struct inode *inode = NULL;
- struct btrfs_file_extent_item *fi;
- struct extent_state *cached_state = NULL;
- u64 num_bytes;
- u64 skip_objectid = 0;
- u32 nritems;
- u32 i;
-
- nritems = btrfs_header_nritems(leaf);
- for (i = 0; i < nritems; i++) {
- btrfs_item_key_to_cpu(leaf, &key, i);
- if (key.objectid == skip_objectid ||
- key.type != BTRFS_EXTENT_DATA_KEY)
- continue;
- fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
- if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
- continue;
- if (!inode || btrfs_ino(inode) != key.objectid) {
- iput(inode);
- inode = btrfs_ilookup(target_root->fs_info->sb,
- key.objectid, target_root, 1);
- }
- if (!inode) {
- skip_objectid = key.objectid;
- continue;
- }
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
- key.offset + num_bytes - 1, 0, &cached_state,
- GFP_NOFS);
- btrfs_drop_extent_cache(inode, key.offset,
- key.offset + num_bytes - 1, 1);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
- key.offset + num_bytes - 1, &cached_state,
- GFP_NOFS);
- cond_resched();
- }
- iput(inode);
- return 0;
- }
-
- static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *leaf,
- struct btrfs_block_group_cache *group,
- struct inode *reloc_inode)
- {
- struct btrfs_key key;
- struct btrfs_key extent_key;
- struct btrfs_file_extent_item *fi;
- struct btrfs_leaf_ref *ref;
- struct disk_extent *new_extent;
- u64 bytenr;
- u64 num_bytes;
- u32 nritems;
- u32 i;
- int ext_index;
- int nr_extent;
- int ret;
-
- new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
- if (!new_extent)
- return -ENOMEM;
-
- ref = btrfs_lookup_leaf_ref(root, leaf->start);
- BUG_ON(!ref);
-
- ext_index = -1;
- nritems = btrfs_header_nritems(leaf);
- for (i = 0; i < nritems; i++) {
- btrfs_item_key_to_cpu(leaf, &key, i);
- if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
- continue;
- fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
- bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
- if (bytenr == 0)
- continue;
-
- ext_index++;
- if (bytenr >= group->key.objectid + group->key.offset ||
- bytenr + num_bytes <= group->key.objectid)
- continue;
-
- extent_key.objectid = bytenr;
- extent_key.offset = num_bytes;
- extent_key.type = BTRFS_EXTENT_ITEM_KEY;
- nr_extent = 1;
- ret = get_new_locations(reloc_inode, &extent_key,
- group->key.objectid, 1,
- &new_extent, &nr_extent);
- if (ret > 0)
- continue;
- BUG_ON(ret < 0);
-
- BUG_ON(ref->extents[ext_index].bytenr != bytenr);
- BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
- ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
- ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
-
- btrfs_set_file_extent_disk_bytenr(leaf, fi,
- new_extent->disk_bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi,
- new_extent->disk_num_bytes);
- btrfs_mark_buffer_dirty(leaf);
-
- ret = btrfs_inc_extent_ref(trans, root,
- new_extent->disk_bytenr,
- new_extent->disk_num_bytes,
- leaf->start,
- root->root_key.objectid,
- trans->transid, key.objectid);
- BUG_ON(ret);
-
- ret = btrfs_free_extent(trans, root,
- bytenr, num_bytes, leaf->start,
- btrfs_header_owner(leaf),
- btrfs_header_generation(leaf),
- key.objectid, 0);
- BUG_ON(ret);
- cond_resched();
- }
- kfree(new_extent);
- BUG_ON(ext_index + 1 != ref->nritems);
- btrfs_free_leaf_ref(root, ref);
- return 0;
- }
-
- int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
- {
- struct btrfs_root *reloc_root;
- int ret;
-
- if (root->reloc_root) {
- reloc_root = root->reloc_root;
- root->reloc_root = NULL;
- list_add(&reloc_root->dead_list,
- &root->fs_info->dead_reloc_roots);
-
- btrfs_set_root_bytenr(&reloc_root->root_item,
- reloc_root->node->start);
- btrfs_set_root_level(&root->root_item,
- btrfs_header_level(reloc_root->node));
- memset(&reloc_root->root_item.drop_progress, 0,
- sizeof(struct btrfs_disk_key));
- reloc_root->root_item.drop_level = 0;
-
- ret = btrfs_update_root(trans, root->fs_info->tree_root,
- &reloc_root->root_key,
- &reloc_root->root_item);
- BUG_ON(ret);
- }
- return 0;
- }
-
- int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
- {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *reloc_root;
- struct btrfs_root *prev_root = NULL;
- struct list_head dead_roots;
- int ret;
- unsigned long nr;
-
- INIT_LIST_HEAD(&dead_roots);
- list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
-
- while (!list_empty(&dead_roots)) {
- reloc_root = list_entry(dead_roots.prev,
- struct btrfs_root, dead_list);
- list_del_init(&reloc_root->dead_list);
-
- BUG_ON(reloc_root->commit_root != NULL);
- while (1) {
- trans = btrfs_join_transaction(root, 1);
- BUG_ON(IS_ERR(trans));
-
- mutex_lock(&root->fs_info->drop_mutex);
- ret = btrfs_drop_snapshot(trans, reloc_root);
- if (ret != -EAGAIN)
- break;
- mutex_unlock(&root->fs_info->drop_mutex);
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, root);
- BUG_ON(ret);
- btrfs_btree_balance_dirty(root, nr);
- }
-
- free_extent_buffer(reloc_root->node);
-
- ret = btrfs_del_root(trans, root->fs_info->tree_root,
- &reloc_root->root_key);
- BUG_ON(ret);
- mutex_unlock(&root->fs_info->drop_mutex);
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, root);
- BUG_ON(ret);
- btrfs_btree_balance_dirty(root, nr);
-
- kfree(prev_root);
- prev_root = reloc_root;
- }
- if (prev_root) {
- btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
- kfree(prev_root);
- }
- return 0;
- }
-
- int btrfs_add_dead_reloc_root(struct btrfs_root *root)
- {
- list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
- return 0;
- }
-
- int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
- {
- struct btrfs_root *reloc_root;
- struct btrfs_trans_handle *trans;
- struct btrfs_key location;
- int found;
- int ret;
-
- mutex_lock(&root->fs_info->tree_reloc_mutex);
- ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
- BUG_ON(ret);
- found = !list_empty(&root->fs_info->dead_reloc_roots);
- mutex_unlock(&root->fs_info->tree_reloc_mutex);
-
- if (found) {
- trans = btrfs_start_transaction(root, 1);
- BUG_ON(IS_ERR(trans));
- ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
- }
-
- location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
- location.offset = (u64)-1;
- location.type = BTRFS_ROOT_ITEM_KEY;
-
- reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
- BUG_ON(!reloc_root);
- ret = btrfs_orphan_cleanup(reloc_root);
- BUG_ON(ret);
- return 0;
- }
-
- static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
- {
- struct btrfs_root *reloc_root;
- struct extent_buffer *eb;
- struct btrfs_root_item *root_item;
- struct btrfs_key root_key;
- int ret;
-
- BUG_ON(!root->ref_cows);
- if (root->reloc_root)
- return 0;
-
- root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
- if (!root_item)
- return -ENOMEM;
-
- ret = btrfs_copy_root(trans, root, root->commit_root,
- &eb, BTRFS_TREE_RELOC_OBJECTID);
- BUG_ON(ret);
-
- root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
- root_key.offset = root->root_key.objectid;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
-
- memcpy(root_item, &root->root_item, sizeof(root_item));
- btrfs_set_root_refs(root_item, 0);
- btrfs_set_root_bytenr(root_item, eb->start);
- btrfs_set_root_level(root_item, btrfs_header_level(eb));
- btrfs_set_root_generation(root_item, trans->transid);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
-
- ret = btrfs_insert_root(trans, root->fs_info->tree_root,
- &root_key, root_item);
- BUG_ON(ret);
- kfree(root_item);
-
- reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
- &root_key);
- BUG_ON(IS_ERR(reloc_root));
- reloc_root->last_trans = trans->transid;
- reloc_root->commit_root = NULL;
- reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
-
- root->reloc_root = reloc_root;
- return 0;
- }
-
- /*
- * Core function of space balance.
- *
- * The idea is using reloc trees to relocate tree blocks in reference
- * counted roots. There is one reloc tree for each subvol, and all
- * reloc trees share same root key objectid. Reloc trees are snapshots
- * of the latest committed roots of subvols (root->commit_root).
- *
- * To relocate a tree block referenced by a subvol, there are two steps.
- * COW the block through subvol's reloc tree, then update block pointer
- * in the subvol to point to the new block. Since all reloc trees share
- * same root key objectid, doing special handing for tree blocks owned
- * by them is easy. Once a tree block has been COWed in one reloc tree,
- * we can use the resulting new block directly when the same block is
- * required to COW again through other reloc trees. By this way, relocated
- * tree blocks are shared between reloc trees, so they are also shared
- * between subvols.
- */
- static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *first_key,
- struct btrfs_ref_path *ref_path,
- struct btrfs_block_group_cache *group,
- struct inode *reloc_inode)
- {
- struct btrfs_root *reloc_root;
- struct extent_buffer *eb = NULL;
- struct btrfs_key *keys;
- u64 *nodes;
- int level;
- int shared_level;
- int lowest_level = 0;
- int ret;
-
- if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
- lowest_level = ref_path->owner_objectid;
-
- if (!root->ref_cows) {
- path->lowest_level = lowest_level;
- ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
- BUG_ON(ret < 0);
- path->lowest_level = 0;
- btrfs_release_path(root, path);
- return 0;
- }
-
- mutex_lock(&root->fs_info->tree_reloc_mutex);
- ret = init_reloc_tree(trans, root);
- BUG_ON(ret);
- reloc_root = root->reloc_root;
-
- shared_level = ref_path->shared_level;
- ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
-
- keys = ref_path->node_keys;
- nodes = ref_path->new_nodes;
- memset(&keys[shared_level + 1], 0,
- sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
- memset(&nodes[shared_level + 1], 0,
- sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
-
- if (nodes[lowest_level] == 0) {
- path->lowest_level = lowest_level;
- ret = btrfs_search_slot(trans, reloc_root, first_key, path,
- 0, 1);
- BUG_ON(ret);
- for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
- eb = path->nodes[level];
- if (!eb || eb == reloc_root->node)
- break;
- nodes[level] = eb->start;
- if (level == 0)
- btrfs_item_key_to_cpu(eb, &keys[level], 0);
- else
- btrfs_node_key_to_cpu(eb, &keys[level], 0);
- }
- if (nodes[0] &&
- ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- eb = path->nodes[0];
- ret = replace_extents_in_leaf(trans, reloc_root, eb,
- group, reloc_inode);
- BUG_ON(ret);
- }
- btrfs_release_path(reloc_root, path);
- } else {
- ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
- lowest_level);
- BUG_ON(ret);
- }
-
- /*
- * replace tree blocks in the fs tree with tree blocks in
- * the reloc tree.
- */
- ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
- BUG_ON(ret < 0);
-
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_search_slot(trans, reloc_root, first_key, path,
- 0, 0);
- BUG_ON(ret);
- extent_buffer_get(path->nodes[0]);
- eb = path->nodes[0];
- btrfs_release_path(reloc_root, path);
- ret = invalidate_extent_cache(reloc_root, eb, group, root);
- BUG_ON(ret);
- free_extent_buffer(eb);
- }
-
- mutex_unlock(&root->fs_info->tree_reloc_mutex);
- path->lowest_level = 0;
- return 0;
- }
-
- static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *first_key,
- struct btrfs_ref_path *ref_path)
- {
- int ret;
-
- ret = relocate_one_path(trans, root, path, first_key,
- ref_path, NULL, NULL);
- BUG_ON(ret);
-
- return 0;
- }
-
- static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root,
- struct btrfs_path *path,
- struct btrfs_key *extent_key)
- {
- int ret;
-
- ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
- if (ret)
- goto out;
- ret = btrfs_del_item(trans, extent_root, path);
- out:
- btrfs_release_path(extent_root, path);
- return ret;
- }
-
- static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
- struct btrfs_ref_path *ref_path)
- {
- struct btrfs_key root_key;
-
- root_key.objectid = ref_path->root_objectid;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
- if (is_cowonly_root(ref_path->root_objectid))
- root_key.offset = 0;
- else
- root_key.offset = (u64)-1;
-
- return btrfs_read_fs_root_no_name(fs_info, &root_key);
- }
-
- static noinline int relocate_one_extent(struct btrfs_root *extent_root,
- struct btrfs_path *path,
- struct btrfs_key *extent_key,
- struct btrfs_block_group_cache *group,
- struct inode *reloc_inode, int pass)
- {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *found_root;
- struct btrfs_ref_path *ref_path = NULL;
- struct disk_extent *new_extents = NULL;
- int nr_extents = 0;
- int loops;
- int ret;
- int level;
- struct btrfs_key first_key;
- u64 prev_block = 0;
-
-
- trans = btrfs_start_transaction(extent_root, 1);
- BUG_ON(IS_ERR(trans));
-
- if (extent_key->objectid == 0) {
- ret = del_extent_zero(trans, extent_root, path, extent_key);
- goto out;
- }
-
- ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
- if (!ref_path) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (loops = 0; ; loops++) {
- if (loops == 0) {
- ret = btrfs_first_ref_path(trans, extent_root, ref_path,
- extent_key->objectid);
- } else {
- ret = btrfs_next_ref_path(trans, extent_root, ref_path);
- }
- if (ret < 0)
- goto out;
- if (ret > 0)
- break;
-
- if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
- ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
- continue;
-
- found_root = read_ref_root(extent_root->fs_info, ref_path);
- BUG_ON(!found_root);
- /*
- * for reference counted tree, only process reference paths
- * rooted at the latest committed root.
- */
- if (found_root->ref_cows &&
- ref_path->root_generation != found_root->root_key.offset)
- continue;
-
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- if (pass == 0) {
- /*
- * copy data extents to new locations
- */
- u64 group_start = group->key.objectid;
- ret = relocate_data_extent(reloc_inode,
- extent_key,
- group_start);
- if (ret < 0)
- goto out;
- break;
- }
- level = 0;
- } else {
- level = ref_path->owner_objectid;
- }
-
- if (prev_block != ref_path->nodes[level]) {
- struct extent_buffer *eb;
- u64 block_start = ref_path->nodes[level];
- u64 block_size = btrfs_level_size(found_root, level);
-
- eb = read_tree_block(found_root, block_start,
- block_size, 0);
- if (!eb) {
- ret = -EIO;
- goto out;
- }
- btrfs_tree_lock(eb);
- BUG_ON(level != btrfs_header_level(eb));
-
- if (level == 0)
- btrfs_item_key_to_cpu(eb, &first_key, 0);
- else
- btrfs_node_key_to_cpu(eb, &first_key, 0);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
- prev_block = block_start;
- }
-
- mutex_lock(&extent_root->fs_info->trans_mutex);
- btrfs_record_root_in_trans(found_root);
- mutex_unlock(&extent_root->fs_info->trans_mutex);
- if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
- /*
- * try to update data extent references while
- * keeping metadata shared between snapshots.
- */
- if (pass == 1) {
- ret = relocate_one_path(trans, found_root,
- path, &first_key, ref_path,
- group, reloc_inode);
- if (ret < 0)
- goto out;
- continue;
- }
- /*
- * use fallback method to process the remaining
- * references.
- */
- if (!new_extents) {
- u64 group_start = group->key.objectid;
- new_extents = kmalloc(sizeof(*new_extents),
- GFP_NOFS);
- if (!new_extents) {
- ret = -ENOMEM;
- goto out;
- }
- nr_extents = 1;
- ret = get_new_locations(reloc_inode,
- extent_key,
- group_start, 1,
- &new_extents,
- &nr_extents);
- if (ret)
- goto out;
- }
- ret = replace_one_extent(trans, found_root,
- path, extent_key,
- &first_key, ref_path,
- new_extents, nr_extents);
- } else {
- ret = relocate_tree_block(trans, found_root, path,
- &first_key, ref_path);
- }
- if (ret < 0)
- goto out;
- }
- ret = 0;
- out:
- btrfs_end_transaction(trans, extent_root);
- kfree(new_extents);
- kfree(ref_path);
- return ret;
- }
- #endif
-
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
{
u64 num_devices;
ret = -ENOMEM;
goto error;
}
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_NOFS);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ ret = -ENOMEM;
+ goto error;
+ }
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
- spin_lock_init(&cache->tree_lock);
cache->fs_info = info;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
if (need_clear)
cache->disk_cache_state = BTRFS_DC_CLEAR;
- /*
- * we only want to have 32k of ram per block group for keeping
- * track of free space, and if we pass 1/2 of that we want to
- * start converting things over to using bitmaps
- */
- cache->extents_thresh = ((1024 * 32) / 2) /
- sizeof(struct btrfs_free_space);
-
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item));
memcpy(&cache->key, &found_key, sizeof(found_key));
key.objectid = found_key.objectid + found_key.offset;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
+ btrfs_init_free_space_ctl(cache);
+
/*
* We need to exclude the super stripes now so that the space
* info has super bytes accounted for, otherwise we'll think
cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache)
return -ENOMEM;
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_NOFS);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ return -ENOMEM;
+ }
cache->key.objectid = chunk_offset;
cache->key.offset = size;
cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info;
- /*
- * we only want to have 32k of ram per block group for keeping track
- * of free space, and if we pass 1/2 of that we want to start
- * converting things over to using bitmaps
- */
- cache->extents_thresh = ((1024 * 32) / 2) /
- sizeof(struct btrfs_free_space);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
- spin_lock_init(&cache->tree_lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
+ btrfs_init_free_space_ctl(cache);
+
btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
cache->flags = type;
if (ret < 0)
goto out;
if (ret > 0)
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
if (ret == 0) {
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
goto out;
- btrfs_release_path(tree_root, path);
+ btrfs_release_path(path);
}
spin_lock(&root->fs_info->block_group_cache_lock);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
+ struct btrfs_super_block *disk_super;
+ u64 features;
+ u64 flags;
+ int mixed = 0;
int ret;
- ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0,
- &space_info);
- if (ret)
- return ret;
+ disk_super = &fs_info->super_copy;
+ if (!btrfs_super_root(disk_super))
+ return 1;
- ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0,
- &space_info);
- if (ret)
- return ret;
+ features = btrfs_super_incompat_flags(disk_super);
+ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = 1;
- ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0,
- &space_info);
+ flags = BTRFS_BLOCK_GROUP_SYSTEM;
+ ret = update_space_info(fs_info, flags, 0, 0, &space_info);
if (ret)
- return ret;
+ goto out;
+ if (mixed) {
+ flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
+ ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ } else {
+ flags = BTRFS_BLOCK_GROUP_METADATA;
+ ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ if (ret)
+ goto out;
+
+ flags = BTRFS_BLOCK_GROUP_DATA;
+ ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ }
+out:
return ret;
}
}
void extent_io_tree_init(struct extent_io_tree *tree,
- struct address_space *mapping, gfp_t mask)
+ struct address_space *mapping)
{
tree->state = RB_ROOT;
INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
NULL, mask);
}
- static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
- NULL, mask);
- }
-
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask)
{
cached_state, mask);
}
- int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
- {
- return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
- }
-
/*
* either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
mask);
}
- /*
- * helper function to set pages and extents in the tree dirty
- */
- int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
- {
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
-
- while (index <= end_index) {
- page = find_get_page(tree->mapping, index);
- BUG_ON(!page);
- __set_page_dirty_nobuffers(page);
- page_cache_release(page);
- index++;
- }
- return 0;
- }
-
/*
* helper function to set both pages and extents in the tree writeback
*/
bio_put(bio);
}
- /*
- * IO done from prepare_write is pretty simple, we just unlock
- * the structs in the extent tree when done, and set the uptodate bits
- * as appropriate.
- */
- static void end_bio_extent_preparewrite(struct bio *bio, int err)
- {
- const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_io_tree *tree;
- u64 start;
- u64 end;
-
- do {
- struct page *page = bvec->bv_page;
- struct extent_state *cached = NULL;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
-
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
- end = start + bvec->bv_len - 1;
-
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
-
- if (uptodate) {
- set_extent_uptodate(tree, start, end, &cached,
- GFP_ATOMIC);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
-
- unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
-
- } while (bvec >= bio->bi_io_vec);
-
- bio_put(bio);
- }
-
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags)
struct btrfs_ordered_extent *ordered;
int ret;
int nr = 0;
- size_t page_offset = 0;
+ size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode->i_sb->s_blocksize;
char *userpage;
struct extent_state *cached = NULL;
- iosize = PAGE_CACHE_SIZE - page_offset;
+ iosize = PAGE_CACHE_SIZE - pg_offset;
userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + page_offset, 0, iosize);
+ memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
break;
}
- em = get_extent(inode, page, page_offset, cur,
+ em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
- if (IS_ERR(em) || !em) {
+ if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
unlock_extent(tree, cur, end, GFP_NOFS);
break;
struct extent_state *cached = NULL;
userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + page_offset, 0, iosize);
+ memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
unlock_extent_cached(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
/* the get_extent function already copied into the page */
check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
/* we have an inline extent but it didn't get marked up
SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
continue;
}
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
pnr -= page->index;
ret = submit_extent_page(READ, tree, page,
- sector, disk_io_size, page_offset,
+ sector, disk_io_size, pg_offset,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
if (ret)
SetPageError(page);
cur = cur + iosize;
- page_offset += iosize;
+ pg_offset += iosize;
}
if (!nr) {
if (!PageError(page))
}
em = epd->get_extent(inode, page, pg_offset, cur,
end - cur + 1, 1);
- if (IS_ERR(em) || !em) {
+ if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
break;
}
return 0;
}
- /*
- * simple commit_write call, set_range_dirty is used to mark both
- * the pages and the extent records as dirty
- */
- int extent_commit_write(struct extent_io_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to)
- {
- loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-
- set_page_extent_mapped(page);
- set_page_dirty(page);
-
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
- mark_inode_dirty(inode);
- }
- return 0;
- }
-
- int extent_prepare_write(struct extent_io_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_extent_t *get_extent)
- {
- u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
- u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
- u64 block_start;
- u64 orig_block_start;
- u64 block_end;
- u64 cur_end;
- struct extent_map *em;
- unsigned blocksize = 1 << inode->i_blkbits;
- size_t page_offset = 0;
- size_t block_off_start;
- size_t block_off_end;
- int err = 0;
- int iocount = 0;
- int ret = 0;
- int isnew;
-
- set_page_extent_mapped(page);
-
- block_start = (page_start + from) & ~((u64)blocksize - 1);
- block_end = (page_start + to - 1) | (blocksize - 1);
- orig_block_start = block_start;
-
- lock_extent(tree, page_start, page_end, GFP_NOFS);
- while (block_start <= block_end) {
- em = get_extent(inode, page, page_offset, block_start,
- block_end - block_start + 1, 1);
- if (IS_ERR(em) || !em)
- goto err;
-
- cur_end = min(block_end, extent_map_end(em) - 1);
- block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
- block_off_end = block_off_start + blocksize;
- isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
-
- if (!PageUptodate(page) && isnew &&
- (block_off_end > to || block_off_start < from)) {
- void *kaddr;
-
- kaddr = kmap_atomic(page, KM_USER0);
- if (block_off_end > to)
- memset(kaddr + to, 0, block_off_end - to);
- if (block_off_start < from)
- memset(kaddr + block_off_start, 0,
- from - block_off_start);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
- if ((em->block_start != EXTENT_MAP_HOLE &&
- em->block_start != EXTENT_MAP_INLINE) &&
- !isnew && !PageUptodate(page) &&
- (block_off_end > to || block_off_start < from) &&
- !test_range_bit(tree, block_start, cur_end,
- EXTENT_UPTODATE, 1, NULL)) {
- u64 sector;
- u64 extent_offset = block_start - em->start;
- size_t iosize;
- sector = (em->block_start + extent_offset) >> 9;
- iosize = (cur_end - block_start + blocksize) &
- ~((u64)blocksize - 1);
- /*
- * we've already got the extent locked, but we
- * need to split the state such that our end_bio
- * handler can clear the lock.
- */
- set_extent_bit(tree, block_start,
- block_start + iosize - 1,
- EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
- ret = submit_extent_page(READ, tree, page,
- sector, iosize, page_offset, em->bdev,
- NULL, 1,
- end_bio_extent_preparewrite, 0,
- 0, 0);
- if (ret && !err)
- err = ret;
- iocount++;
- block_start = block_start + iosize;
- } else {
- struct extent_state *cached = NULL;
-
- set_extent_uptodate(tree, block_start, cur_end, &cached,
- GFP_NOFS);
- unlock_extent_cached(tree, block_start, cur_end,
- &cached, GFP_NOFS);
- block_start = cur_end + 1;
- }
- page_offset = block_start & (PAGE_CACHE_SIZE - 1);
- free_extent_map(em);
- }
- if (iocount) {
- wait_extent_bit(tree, orig_block_start,
- block_end, EXTENT_LOCKED);
- }
- check_page_uptodate(tree, page);
- err:
- /* FIXME, zero out newly allocated blocks on error */
- return err;
- }
-
/*
* a helper for releasepage, this tests for areas of the page that
* are locked or under IO and drops the related state bits if it is safe
len = end - start + 1;
write_lock(&map->lock);
em = lookup_extent_mapping(map, start, len);
- if (!em || IS_ERR(em)) {
+ if (IS_ERR_OR_NULL(em)) {
write_unlock(&map->lock);
break;
}
return try_release_extent_state(map, tree, page, mask);
}
- sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
- get_extent_t *get_extent)
- {
- struct inode *inode = mapping->host;
- struct extent_state *cached_state = NULL;
- u64 start = iblock << inode->i_blkbits;
- sector_t sector = 0;
- size_t blksize = (1 << inode->i_blkbits);
- struct extent_map *em;
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
- 0, &cached_state, GFP_NOFS);
- em = get_extent(inode, NULL, 0, start, blksize, 0);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
- start + blksize - 1, &cached_state, GFP_NOFS);
- if (!em || IS_ERR(em))
- return 0;
-
- if (em->block_start > EXTENT_MAP_LAST_BYTE)
- goto out;
-
- sector = (em->block_start + start - em->start) >> inode->i_blkbits;
- out:
- free_extent_map(em);
- return sector;
- }
-
/*
* helper function for fiemap, which doesn't want to see any holes.
* This maps until we find something past 'last'
break;
len = (len + sectorsize - 1) & ~(sectorsize - 1);
em = get_extent(inode, NULL, 0, offset, len, 0);
- if (!em || IS_ERR(em))
+ if (IS_ERR_OR_NULL(em))
return em;
/* if this isn't a hole return it */
* because there might be preallocation past i_size
*/
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
- path, inode->i_ino, -1, 0);
+ path, btrfs_ino(inode), -1, 0);
if (ret < 0) {
btrfs_free_path(path);
return ret;
found_type = btrfs_key_type(&found_key);
/* No extents, but there might be delalloc bits */
- if (found_key.objectid != inode->i_ino ||
+ if (found_key.objectid != btrfs_ino(inode) ||
found_type != BTRFS_EXTENT_DATA_KEY) {
/* have to trust i_size as the end */
last = (u64)-1;
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
- struct page *page0,
- gfp_t mask)
+ struct page *page0)
{
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
}
rcu_read_unlock();
- eb = __alloc_extent_buffer(tree, start, len, mask);
+ eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
if (!eb)
return NULL;
i = 0;
}
for (; i < num_pages; i++, index++) {
- p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
+ p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
if (!p) {
WARN_ON(1);
goto free_eb;
}
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
- u64 start, unsigned long len,
- gfp_t mask)
+ u64 start, unsigned long len)
{
struct extent_buffer *eb;
return 0;
}
- int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
- struct extent_buffer *eb)
- {
- return wait_on_extent_writeback(tree, eb->start,
- eb->start + eb->len - 1);
- }
-
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb)
{
u32 item_size;
if (item)
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
EXTENT_NODATASUM, GFP_NOFS);
} else {
printk(KERN_INFO "btrfs no csum found "
- "for inode %lu start %llu\n",
- inode->i_ino,
+ "for inode %llu start %llu\n",
+ (unsigned long long)
+ btrfs_ino(inode),
(unsigned long long)offset);
}
item = NULL;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto found;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
if (key.offset < bytenr)
break;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
out:
btrfs_free_path(path);
* at this point, we know the tree has an item, but it isn't big
* enough yet to put our csum in. Grow it
*/
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
if (ret < 0)
}
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
csum_offset = 0;
if (found_next) {
u64 tmp = total_bytes + root->sectorsize;
}
btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
cond_resched();
goto again;
}
}
while (1) {
if (!split)
- split = alloc_extent_map(GFP_NOFS);
+ split = alloc_extent_map();
if (!split2)
- split2 = alloc_extent_map(GFP_NOFS);
+ split2 = alloc_extent_map();
BUG_ON(!split || !split2);
write_lock(&em_tree->lock);
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key new_key;
+ u64 ino = btrfs_ino(inode);
u64 search_start = start;
u64 disk_bytenr = 0;
u64 num_bytes = 0;
while (1) {
recow = 0;
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, ino,
search_start, -1);
if (ret < 0)
break;
if (ret > 0 && path->slots[0] > 0 && search_start == start) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
- if (key.objectid == inode->i_ino &&
+ if (key.objectid == ino &&
key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid > inode->i_ino ||
+ if (key.objectid > ino ||
key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
break;
search_start = max(key.offset, start);
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
ret = btrfs_duplicate_item(trans, root, path,
&new_key);
if (ret == -EAGAIN) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
if (ret < 0)
del_nr = 0;
del_slot = 0;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
continue;
}
int del_slot = 0;
int recow;
int ret;
+ u64 ino = btrfs_ino(inode);
btrfs_drop_extent_cache(inode, start, end - 1, 0);
again:
recow = 0;
split = start;
- key.objectid = inode->i_ino;
+ key.objectid = ino;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = split;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- BUG_ON(key.objectid != inode->i_ino ||
- key.type != BTRFS_EXTENT_DATA_KEY);
+ BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
BUG_ON(btrfs_file_extent_type(leaf, fi) !=
other_start = 0;
other_end = start;
if (extent_mergeable(leaf, path->slots[0] - 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
btrfs_set_item_key_safe(trans, root, path, &new_key);
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
new_key.offset = split;
ret = btrfs_duplicate_item(trans, root, path, &new_key);
if (ret == -EAGAIN) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
BUG_ON(ret < 0);
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
- inode->i_ino, orig_offset);
+ ino, orig_offset);
BUG_ON(ret);
if (split == start) {
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
extent_end = other_end;
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
- inode->i_ino, orig_offset);
+ ino, orig_offset);
BUG_ON(ret);
}
other_start = 0;
other_end = start;
if (extent_mergeable(leaf, path->slots[0] - 1,
- inode->i_ino, bytenr, orig_offset,
+ ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
key.offset = other_start;
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
- inode->i_ino, orig_offset);
+ ino, orig_offset);
BUG_ON(ret);
}
if (del_nr == 0) {
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
- BUG_ON(IS_ERR(em) || !em);
+ BUG_ON(IS_ERR_OR_NULL(em));
last_byte = min(extent_map_end(em), alloc_end);
last_byte = (last_byte + mask) & ~mask;
if (em->block_start == EXTENT_MAP_HOLE ||
#include "transaction.h"
#include "disk-io.h"
#include "extent_io.h"
+#include "inode-map.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
-static void recalculate_thresholds(struct btrfs_block_group_cache
- *block_group);
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
-struct inode *lookup_free_space_inode(struct btrfs_root *root,
- struct btrfs_block_group_cache
- *block_group, struct btrfs_path *path)
+static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 offset)
{
struct btrfs_key key;
struct btrfs_key location;
struct inode *inode = NULL;
int ret;
- spin_lock(&block_group->lock);
- if (block_group->inode)
- inode = igrab(block_group->inode);
- spin_unlock(&block_group->lock);
- if (inode)
- return inode;
-
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ERR_PTR(-ENOENT);
}
struct btrfs_free_space_header);
btrfs_free_space_key(leaf, header, &disk_key);
btrfs_disk_key_to_cpu(&location, &disk_key);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
if (!inode)
inode->i_mapping->flags &= ~__GFP_FS;
+ return inode;
+}
+
+struct inode *lookup_free_space_inode(struct btrfs_root *root,
+ struct btrfs_block_group_cache
+ *block_group, struct btrfs_path *path)
+{
+ struct inode *inode = NULL;
+
+ spin_lock(&block_group->lock);
+ if (block_group->inode)
+ inode = igrab(block_group->inode);
+ spin_unlock(&block_group->lock);
+ if (inode)
+ return inode;
+
+ inode = __lookup_free_space_inode(root, path,
+ block_group->key.objectid);
+ if (IS_ERR(inode))
+ return inode;
+
spin_lock(&block_group->lock);
if (!root->fs_info->closing) {
block_group->inode = igrab(inode);
return inode;
}
-int create_free_space_inode(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_path *path)
+int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u64 ino, u64 offset)
{
struct btrfs_key key;
struct btrfs_disk_key disk_key;
struct btrfs_free_space_header *header;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
- u64 objectid;
int ret;
- ret = btrfs_find_free_objectid(trans, root, 0, &objectid);
- if (ret < 0)
- return ret;
-
- ret = btrfs_insert_empty_inode(trans, root, path, objectid);
+ ret = btrfs_insert_empty_inode(trans, root, path, ino);
if (ret)
return ret;
BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
- btrfs_set_inode_block_group(leaf, inode_item,
- block_group->key.objectid);
+ btrfs_set_inode_block_group(leaf, inode_item, offset);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_free_space_header));
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
leaf = path->nodes[0];
memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
+int create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path)
+{
+ int ret;
+ u64 ino;
+
+ ret = btrfs_find_free_objectid(root, &ino);
+ if (ret < 0)
+ return ret;
+
+ return __create_free_space_inode(root, trans, path, ino,
+ block_group->key.objectid);
+}
+
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_path *path,
return ret;
}
- return btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ return ret;
}
static int readahead_cache(struct inode *inode)
return 0;
}
-int load_free_space_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group)
+int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_path *path, u64 offset)
{
- struct btrfs_root *root = fs_info->tree_root;
- struct inode *inode;
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct page *page;
- struct btrfs_path *path;
u32 *checksums = NULL, *crc;
char *disk_crcs = NULL;
struct btrfs_key key;
u64 num_entries;
u64 num_bitmaps;
u64 generation;
- u64 used = btrfs_block_group_used(&block_group->item);
u32 cur_crc = ~(u32)0;
pgoff_t index = 0;
unsigned long first_page_offset;
int num_checksums;
- int ret = 0;
-
- /*
- * If we're unmounting then just return, since this does a search on the
- * normal root and not the commit root and we could deadlock.
- */
- smp_mb();
- if (fs_info->closing)
- return 0;
-
- /*
- * If this block group has been marked to be cleared for one reason or
- * another then we can't trust the on disk cache, so just return.
- */
- spin_lock(&block_group->lock);
- if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
- spin_unlock(&block_group->lock);
- return 0;
- }
- spin_unlock(&block_group->lock);
+ int ret = 0, ret2;
INIT_LIST_HEAD(&bitmaps);
- path = btrfs_alloc_path();
- if (!path)
- return 0;
-
- inode = lookup_free_space_inode(root, block_group, path);
- if (IS_ERR(inode)) {
- btrfs_free_path(path);
- return 0;
- }
-
/* Nothing in the space cache, goodbye */
- if (!i_size_read(inode)) {
- btrfs_free_path(path);
+ if (!i_size_read(inode))
goto out;
- }
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret) {
- btrfs_free_path(path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0) {
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ ret = 0;
goto out;
}
+ ret = -1;
+
leaf = path->nodes[0];
header = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_header);
num_entries = btrfs_free_space_entries(leaf, header);
num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
generation = btrfs_free_space_generation(leaf, header);
- btrfs_release_path(root, path);
- btrfs_free_path(path);
++ btrfs_release_path(path);
if (BTRFS_I(inode)->generation != generation) {
printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
- " not match free space cache generation (%llu) for "
- "block group %llu\n",
+ " not match free space cache generation (%llu)\n",
(unsigned long long)BTRFS_I(inode)->generation,
- (unsigned long long)generation,
- (unsigned long long)block_group->key.objectid);
- goto free_cache;
+ (unsigned long long)generation);
+ goto out;
}
if (!num_entries)
goto out;
ret = readahead_cache(inode);
- if (ret) {
- ret = 0;
+ if (ret)
goto out;
- }
while (1) {
struct btrfs_free_space_entry *entry;
}
page = grab_cache_page(inode->i_mapping, index);
- if (!page) {
- ret = 0;
+ if (!page)
goto free_cache;
- }
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
unlock_page(page);
page_cache_release(page);
printk(KERN_ERR "btrfs: error reading free "
- "space cache: %llu\n",
- (unsigned long long)
- block_group->key.objectid);
+ "space cache\n");
goto free_cache;
}
}
gen = addr + (sizeof(u32) * num_checksums);
if (*gen != BTRFS_I(inode)->generation) {
printk(KERN_ERR "btrfs: space cache generation"
- " (%llu) does not match inode (%llu) "
- "for block group %llu\n",
+ " (%llu) does not match inode (%llu)\n",
(unsigned long long)*gen,
(unsigned long long)
- BTRFS_I(inode)->generation,
- (unsigned long long)
- block_group->key.objectid);
+ BTRFS_I(inode)->generation);
kunmap(page);
unlock_page(page);
page_cache_release(page);
PAGE_CACHE_SIZE - start_offset);
btrfs_csum_final(cur_crc, (char *)&cur_crc);
if (cur_crc != *crc) {
- printk(KERN_ERR "btrfs: crc mismatch for page %lu in "
- "block group %llu\n", index,
- (unsigned long long)block_group->key.objectid);
+ printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
+ index);
kunmap(page);
unlock_page(page);
page_cache_release(page);
}
if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
- spin_lock(&block_group->tree_lock);
- ret = link_free_space(block_group, e);
- spin_unlock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
+ ret = link_free_space(ctl, e);
+ spin_unlock(&ctl->tree_lock);
BUG_ON(ret);
} else {
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
page_cache_release(page);
goto free_cache;
}
- spin_lock(&block_group->tree_lock);
- ret = link_free_space(block_group, e);
- block_group->total_bitmaps++;
- recalculate_thresholds(block_group);
- spin_unlock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
+ ret2 = link_free_space(ctl, e);
+ ctl->total_bitmaps++;
+ ctl->op->recalc_thresholds(ctl);
+ spin_unlock(&ctl->tree_lock);
list_add_tail(&e->list, &bitmaps);
}
index++;
}
- spin_lock(&block_group->tree_lock);
- if (block_group->free_space != (block_group->key.offset - used -
- block_group->bytes_super)) {
- spin_unlock(&block_group->tree_lock);
- printk(KERN_ERR "block group %llu has an wrong amount of free "
- "space\n", block_group->key.objectid);
- ret = 0;
- goto free_cache;
- }
- spin_unlock(&block_group->tree_lock);
-
ret = 1;
out:
kfree(checksums);
kfree(disk_crcs);
- iput(inode);
return ret;
-
free_cache:
- /* This cache is bogus, make sure it gets cleared */
+ __btrfs_remove_free_space_cache(ctl);
+ goto out;
+}
+
+int load_free_space_cache(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group_cache *block_group)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct btrfs_root *root = fs_info->tree_root;
+ struct inode *inode;
+ struct btrfs_path *path;
+ int ret;
+ bool matched;
+ u64 used = btrfs_block_group_used(&block_group->item);
+
+ /*
+ * If we're unmounting then just return, since this does a search on the
+ * normal root and not the commit root and we could deadlock.
+ */
+ smp_mb();
+ if (fs_info->closing)
+ return 0;
+
+ /*
+ * If this block group has been marked to be cleared for one reason or
+ * another then we can't trust the on disk cache, so just return.
+ */
spin_lock(&block_group->lock);
- block_group->disk_cache_state = BTRFS_DC_CLEAR;
+ if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
spin_unlock(&block_group->lock);
- btrfs_remove_free_space_cache(block_group);
- goto out;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return 0;
+
+ inode = lookup_free_space_inode(root, block_group, path);
+ if (IS_ERR(inode)) {
+ btrfs_free_path(path);
+ return 0;
+ }
+
+ ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
+ path, block_group->key.objectid);
+ btrfs_free_path(path);
+ if (ret <= 0)
+ goto out;
+
+ spin_lock(&ctl->tree_lock);
+ matched = (ctl->free_space == (block_group->key.offset - used -
+ block_group->bytes_super));
+ spin_unlock(&ctl->tree_lock);
+
+ if (!matched) {
+ __btrfs_remove_free_space_cache(ctl);
+ printk(KERN_ERR "block group %llu has an wrong amount of free "
+ "space\n", block_group->key.objectid);
+ ret = -1;
+ }
+out:
+ if (ret < 0) {
+ /* This cache is bogus, make sure it gets cleared */
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_CLEAR;
+ spin_unlock(&block_group->lock);
+ ret = 0;
+
+ printk(KERN_ERR "btrfs: failed to load free space cache "
+ "for block group %llu\n", block_group->key.objectid);
+ }
+
+ iput(inode);
+ return ret;
}
-int btrfs_write_out_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_path *path)
+int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u64 offset)
{
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
- struct inode *inode;
struct rb_node *node;
struct list_head *pos, *n;
struct page **pages;
int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
- int ret = 0;
+ int ret = -1;
bool next_page = false;
bool out_of_space = false;
- root = root->fs_info->tree_root;
-
INIT_LIST_HEAD(&bitmap_list);
- spin_lock(&block_group->lock);
- if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
- spin_unlock(&block_group->lock);
- return 0;
- }
- spin_unlock(&block_group->lock);
-
- inode = lookup_free_space_inode(root, block_group, path);
- if (IS_ERR(inode))
- return 0;
-
- if (!i_size_read(inode)) {
- iput(inode);
+ node = rb_first(&ctl->free_space_offset);
+ if (!node)
return 0;
- }
- node = rb_first(&block_group->free_space_offset);
- if (!node) {
- iput(inode);
- return 0;
- }
+ if (!i_size_read(inode))
+ return -1;
num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
/* We need a checksum per page. */
crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
- if (!crc) {
- iput(inode);
- return 0;
- }
+ if (!crc)
+ return -1;
pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
if (!pages) {
kfree(crc);
- iput(inode);
- return 0;
+ return -1;
}
/* Since the first page has all of our checksums and our generation we
first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
/* Get the cluster for this block_group if it exists */
- if (!list_empty(&block_group->cluster_list))
+ if (block_group && !list_empty(&block_group->cluster_list))
cluster = list_entry(block_group->cluster_list.next,
struct btrfs_free_cluster,
block_group_list);
* When searching for pinned extents, we need to start at our start
* offset.
*/
- start = block_group->key.objectid;
+ if (block_group)
+ start = block_group->key.objectid;
/* Write out the extent entries */
do {
* We want to add any pinned extents to our free space cache
* so we don't leak the space
*/
- while (!next_page && (start < block_group->key.objectid +
- block_group->key.offset)) {
+ while (block_group && !next_page &&
+ (start < block_group->key.objectid +
+ block_group->key.offset)) {
ret = find_first_extent_bit(unpin, start, &start, &end,
EXTENT_DIRTY);
if (ret) {
filemap_write_and_wait(inode->i_mapping);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.offset = block_group->key.objectid;
+ key.offset = offset;
key.type = 0;
ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
if (ret < 0) {
- ret = 0;
+ ret = -1;
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
path->slots[0]--;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
- found_key.offset != block_group->key.objectid) {
- ret = 0;
+ found_key.offset != offset) {
+ ret = -1;
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, NULL,
GFP_NOFS);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto out_free;
}
}
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = 1;
out_free:
- if (ret == 0) {
+ if (ret != 1) {
invalidate_inode_pages2_range(inode->i_mapping, 0, index);
- spin_lock(&block_group->lock);
- block_group->disk_cache_state = BTRFS_DC_ERROR;
- spin_unlock(&block_group->lock);
BTRFS_I(inode)->generation = 0;
}
kfree(checksums);
kfree(pages);
btrfs_update_inode(trans, root, inode);
+ return ret;
+}
+
+int btrfs_write_out_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct inode *inode;
+ int ret = 0;
+
+ root = root->fs_info->tree_root;
+
+ spin_lock(&block_group->lock);
+ if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
+ spin_unlock(&block_group->lock);
+
+ inode = lookup_free_space_inode(root, block_group, path);
+ if (IS_ERR(inode))
+ return 0;
+
+ ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
+ path, block_group->key.objectid);
+ if (ret < 0) {
+ spin_lock(&block_group->lock);
+ block_group->disk_cache_state = BTRFS_DC_ERROR;
+ spin_unlock(&block_group->lock);
+ ret = 0;
+
+ printk(KERN_ERR "btrfs: failed to write free space cace "
+ "for block group %llu\n", block_group->key.objectid);
+ }
+
iput(inode);
return ret;
}
-static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
+static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
u64 offset)
{
BUG_ON(offset < bitmap_start);
offset -= bitmap_start;
- return (unsigned long)(div64_u64(offset, sectorsize));
+ return (unsigned long)(div_u64(offset, unit));
}
-static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
+static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
{
- return (unsigned long)(div64_u64(bytes, sectorsize));
+ return (unsigned long)(div_u64(bytes, unit));
}
-static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
+static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
u64 offset)
{
u64 bitmap_start;
u64 bytes_per_bitmap;
- bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
- bitmap_start = offset - block_group->key.objectid;
+ bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
+ bitmap_start = offset - ctl->start;
bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
bitmap_start *= bytes_per_bitmap;
- bitmap_start += block_group->key.objectid;
+ bitmap_start += ctl->start;
return bitmap_start;
}
* offset.
*/
static struct btrfs_free_space *
-tree_search_offset(struct btrfs_block_group_cache *block_group,
+tree_search_offset(struct btrfs_free_space_ctl *ctl,
u64 offset, int bitmap_only, int fuzzy)
{
- struct rb_node *n = block_group->free_space_offset.rb_node;
+ struct rb_node *n = ctl->free_space_offset.rb_node;
struct btrfs_free_space *entry, *prev = NULL;
/* find entry that is closest to the 'offset' */
break;
}
}
- if (entry->offset + BITS_PER_BITMAP *
- block_group->sectorsize > offset)
+ if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
return entry;
} else if (entry->offset + entry->bytes > offset)
return entry;
while (1) {
if (entry->bitmap) {
if (entry->offset + BITS_PER_BITMAP *
- block_group->sectorsize > offset)
+ ctl->unit > offset)
break;
} else {
if (entry->offset + entry->bytes > offset)
}
static inline void
-__unlink_free_space(struct btrfs_block_group_cache *block_group,
+__unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- rb_erase(&info->offset_index, &block_group->free_space_offset);
- block_group->free_extents--;
+ rb_erase(&info->offset_index, &ctl->free_space_offset);
+ ctl->free_extents--;
}
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
- __unlink_free_space(block_group, info);
- block_group->free_space -= info->bytes;
+ __unlink_free_space(ctl, info);
+ ctl->free_space -= info->bytes;
}
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
int ret = 0;
BUG_ON(!info->bitmap && !info->bytes);
- ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
+ ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
&info->offset_index, (info->bitmap != NULL));
if (ret)
return ret;
- block_group->free_space += info->bytes;
- block_group->free_extents++;
+ ctl->free_space += info->bytes;
+ ctl->free_extents++;
return ret;
}
-static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
+ struct btrfs_block_group_cache *block_group = ctl->private;
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
+ u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+ int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+
+ BUG_ON(ctl->total_bitmaps > max_bitmaps);
/*
* The goal is to keep the total amount of memory used per 1gb of space
* sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
* we add more bitmaps.
*/
- bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+ bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
if (bitmap_bytes >= max_bytes) {
- block_group->extents_thresh = 0;
+ ctl->extents_thresh = 0;
return;
}
extent_bytes = max_bytes - bitmap_bytes;
extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
- block_group->extents_thresh =
+ ctl->extents_thresh =
div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
}
-static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
{
- unsigned long start, end;
- unsigned long i;
+ unsigned long start, count;
- start = offset_to_bit(info->offset, block_group->sectorsize, offset);
- end = start + bytes_to_bits(bytes, block_group->sectorsize);
- BUG_ON(end > BITS_PER_BITMAP);
+ start = offset_to_bit(info->offset, ctl->unit, offset);
+ count = bytes_to_bits(bytes, ctl->unit);
+ BUG_ON(start + count > BITS_PER_BITMAP);
- for (i = start; i < end; i++)
- clear_bit(i, info->bitmap);
+ bitmap_clear(info->bitmap, start, count);
info->bytes -= bytes;
- block_group->free_space -= bytes;
+ ctl->free_space -= bytes;
}
-static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
{
- unsigned long start, end;
- unsigned long i;
+ unsigned long start, count;
- start = offset_to_bit(info->offset, block_group->sectorsize, offset);
- end = start + bytes_to_bits(bytes, block_group->sectorsize);
- BUG_ON(end > BITS_PER_BITMAP);
+ start = offset_to_bit(info->offset, ctl->unit, offset);
+ count = bytes_to_bits(bytes, ctl->unit);
+ BUG_ON(start + count > BITS_PER_BITMAP);
- for (i = start; i < end; i++)
- set_bit(i, info->bitmap);
+ bitmap_set(info->bitmap, start, count);
info->bytes += bytes;
- block_group->free_space += bytes;
+ ctl->free_space += bytes;
}
-static int search_bitmap(struct btrfs_block_group_cache *block_group,
+static int search_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info, u64 *offset,
u64 *bytes)
{
unsigned long bits, i;
unsigned long next_zero;
- i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
+ i = offset_to_bit(bitmap_info->offset, ctl->unit,
max_t(u64, *offset, bitmap_info->offset));
- bits = bytes_to_bits(*bytes, block_group->sectorsize);
+ bits = bytes_to_bits(*bytes, ctl->unit);
for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
i < BITS_PER_BITMAP;
}
if (found_bits) {
- *offset = (u64)(i * block_group->sectorsize) +
- bitmap_info->offset;
- *bytes = (u64)(found_bits) * block_group->sectorsize;
+ *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
+ *bytes = (u64)(found_bits) * ctl->unit;
return 0;
}
return -1;
}
-static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
- *block_group, u64 *offset,
- u64 *bytes, int debug)
+static struct btrfs_free_space *
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
{
struct btrfs_free_space *entry;
struct rb_node *node;
int ret;
- if (!block_group->free_space_offset.rb_node)
+ if (!ctl->free_space_offset.rb_node)
return NULL;
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group, *offset),
- 0, 1);
+ entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
if (!entry)
return NULL;
continue;
if (entry->bitmap) {
- ret = search_bitmap(block_group, entry, offset, bytes);
+ ret = search_bitmap(ctl, entry, offset, bytes);
if (!ret)
return entry;
continue;
return NULL;
}
-static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
+static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset)
{
- u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
- int max_bitmaps = (int)div64_u64(block_group->key.offset +
- bytes_per_bg - 1, bytes_per_bg);
- BUG_ON(block_group->total_bitmaps >= max_bitmaps);
-
- info->offset = offset_to_bitmap(block_group, offset);
+ info->offset = offset_to_bitmap(ctl, offset);
info->bytes = 0;
- link_free_space(block_group, info);
- block_group->total_bitmaps++;
+ link_free_space(ctl, info);
+ ctl->total_bitmaps++;
- recalculate_thresholds(block_group);
+ ctl->op->recalc_thresholds(ctl);
}
-static void free_bitmap(struct btrfs_block_group_cache *block_group,
+static void free_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info)
{
- unlink_free_space(block_group, bitmap_info);
+ unlink_free_space(ctl, bitmap_info);
kfree(bitmap_info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
- block_group->total_bitmaps--;
- recalculate_thresholds(block_group);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
}
-static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
+static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info,
u64 *offset, u64 *bytes)
{
int ret;
again:
- end = bitmap_info->offset +
- (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
+ end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
/*
* XXX - this can go away after a few releases.
search_start = *offset;
search_bytes = *bytes;
search_bytes = min(search_bytes, end - search_start + 1);
- ret = search_bitmap(block_group, bitmap_info, &search_start,
- &search_bytes);
+ ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
BUG_ON(ret < 0 || search_start != *offset);
if (*offset > bitmap_info->offset && *offset + *bytes > end) {
- bitmap_clear_bits(block_group, bitmap_info, *offset,
- end - *offset + 1);
+ bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
*bytes -= end - *offset + 1;
*offset = end + 1;
} else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
- bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
+ bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
*bytes = 0;
}
if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index);
if (!bitmap_info->bytes)
- free_bitmap(block_group, bitmap_info);
+ free_bitmap(ctl, bitmap_info);
/*
* no entry after this bitmap, but we still have bytes to
*/
search_start = *offset;
search_bytes = *bytes;
- ret = search_bitmap(block_group, bitmap_info, &search_start,
+ ret = search_bitmap(ctl, bitmap_info, &search_start,
&search_bytes);
if (ret < 0 || search_start != *offset)
return -EAGAIN;
goto again;
} else if (!bitmap_info->bytes)
- free_bitmap(block_group, bitmap_info);
+ free_bitmap(ctl, bitmap_info);
return 0;
}
-static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_space *info)
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
{
- struct btrfs_free_space *bitmap_info;
- int added = 0;
- u64 bytes, offset, end;
- int ret;
+ struct btrfs_block_group_cache *block_group = ctl->private;
/*
* If we are below the extents threshold then we can add this as an
* extent, and don't have to deal with the bitmap
*/
- if (block_group->free_extents < block_group->extents_thresh) {
+ if (ctl->free_extents < ctl->extents_thresh) {
/*
* If this block group has some small extents we don't want to
* use up all of our free slots in the cache with them, we want
* the overhead of a bitmap if we don't have to.
*/
if (info->bytes <= block_group->sectorsize * 4) {
- if (block_group->free_extents * 2 <=
- block_group->extents_thresh)
- return 0;
+ if (ctl->free_extents * 2 <= ctl->extents_thresh)
+ return false;
} else {
- return 0;
+ return false;
}
}
*/
if (BITS_PER_BITMAP * block_group->sectorsize >
block_group->key.offset)
- return 0;
+ return false;
+
+ return true;
+}
+
+static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ struct btrfs_free_space *bitmap_info;
+ int added = 0;
+ u64 bytes, offset, end;
+ int ret;
bytes = info->bytes;
offset = info->offset;
+ if (!ctl->op->use_bitmap(ctl, info))
+ return 0;
+
again:
- bitmap_info = tree_search_offset(block_group,
- offset_to_bitmap(block_group, offset),
+ bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!bitmap_info) {
BUG_ON(added);
goto new_bitmap;
}
- end = bitmap_info->offset +
- (u64)(BITS_PER_BITMAP * block_group->sectorsize);
+ end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
if (offset >= bitmap_info->offset && offset + bytes > end) {
- bitmap_set_bits(block_group, bitmap_info, offset,
- end - offset);
+ bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
bytes -= end - offset;
offset = end;
added = 0;
} else if (offset >= bitmap_info->offset && offset + bytes <= end) {
- bitmap_set_bits(block_group, bitmap_info, offset, bytes);
+ bitmap_set_bits(ctl, bitmap_info, offset, bytes);
bytes = 0;
} else {
BUG();
new_bitmap:
if (info && info->bitmap) {
- add_new_bitmap(block_group, info, offset);
+ add_new_bitmap(ctl, info, offset);
added = 1;
info = NULL;
goto again;
} else {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
/* no pre-allocated info, allocate a new one */
if (!info) {
info = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS);
if (!info) {
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
ret = -ENOMEM;
goto out;
}
/* allocate the bitmap */
info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
if (!info->bitmap) {
ret = -ENOMEM;
goto out;
return ret;
}
- bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
-static bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
- struct btrfs_free_space *info, bool update_stat)
++static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, bool update_stat)
{
struct btrfs_free_space *left_info;
struct btrfs_free_space *right_info;
* are adding, if there is remove that struct and add a new one to
* cover the entire range
*/
- right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
+ right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
if (right_info && rb_prev(&right_info->offset_index))
left_info = rb_entry(rb_prev(&right_info->offset_index),
struct btrfs_free_space, offset_index);
else
- left_info = tree_search_offset(block_group, offset - 1, 0, 0);
+ left_info = tree_search_offset(ctl, offset - 1, 0, 0);
if (right_info && !right_info->bitmap) {
if (update_stat)
- unlink_free_space(block_group, right_info);
+ unlink_free_space(ctl, right_info);
else
- __unlink_free_space(block_group, right_info);
+ __unlink_free_space(ctl, right_info);
info->bytes += right_info->bytes;
kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true;
if (left_info && !left_info->bitmap &&
left_info->offset + left_info->bytes == offset) {
if (update_stat)
- unlink_free_space(block_group, left_info);
+ unlink_free_space(ctl, left_info);
else
- __unlink_free_space(block_group, left_info);
+ __unlink_free_space(ctl, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
kmem_cache_free(btrfs_free_space_cachep, left_info);
return merged;
}
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes)
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+ u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
int ret = 0;
info->offset = offset;
info->bytes = bytes;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
- if (try_merge_free_space(block_group, info, true))
+ if (try_merge_free_space(ctl, info, true))
goto link;
/*
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
- ret = insert_into_bitmap(block_group, info);
+ ret = insert_into_bitmap(ctl, info);
if (ret < 0) {
goto out;
} else if (ret) {
goto out;
}
link:
- ret = link_free_space(block_group, info);
+ ret = link_free_space(ctl, info);
if (ret)
kmem_cache_free(btrfs_free_space_cachep, info);
out:
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
if (ret) {
printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct btrfs_free_space *next_info = NULL;
int ret = 0;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
again:
- info = tree_search_offset(block_group, offset, 0, 0);
+ info = tree_search_offset(ctl, offset, 0, 0);
if (!info) {
/*
* oops didn't find an extent that matched the space we wanted
* to remove, look for a bitmap instead
*/
- info = tree_search_offset(block_group,
- offset_to_bitmap(block_group, offset),
+ info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!info) {
WARN_ON(1);
offset_index);
if (next_info->bitmap)
- end = next_info->offset + BITS_PER_BITMAP *
- block_group->sectorsize - 1;
+ end = next_info->offset +
+ BITS_PER_BITMAP * ctl->unit - 1;
else
end = next_info->offset + next_info->bytes;
}
if (info->bytes == bytes) {
- unlink_free_space(block_group, info);
+ unlink_free_space(ctl, info);
if (info->bitmap) {
kfree(info->bitmap);
- block_group->total_bitmaps--;
+ ctl->total_bitmaps--;
}
kmem_cache_free(btrfs_free_space_cachep, info);
goto out_lock;
}
if (!info->bitmap && info->offset == offset) {
- unlink_free_space(block_group, info);
+ unlink_free_space(ctl, info);
info->offset += bytes;
info->bytes -= bytes;
- link_free_space(block_group, info);
+ link_free_space(ctl, info);
goto out_lock;
}
* first unlink the old info and then
* insert it again after the hole we're creating
*/
- unlink_free_space(block_group, info);
+ unlink_free_space(ctl, info);
if (offset + bytes < info->offset + info->bytes) {
u64 old_end = info->offset + info->bytes;
info->offset = offset + bytes;
info->bytes = old_end - info->offset;
- ret = link_free_space(block_group, info);
+ ret = link_free_space(ctl, info);
WARN_ON(ret);
if (ret)
goto out_lock;
*/
kmem_cache_free(btrfs_free_space_cachep, info);
}
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
/* step two, insert a new info struct to cover
* anything before the hole
goto out;
}
- ret = remove_from_bitmap(block_group, info, &offset, &bytes);
+ ret = remove_from_bitmap(ctl, info, &offset, &bytes);
if (ret == -EAGAIN)
goto again;
BUG_ON(ret);
out_lock:
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
out:
return ret;
}
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct rb_node *n;
int count = 0;
- for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
+ for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes)
count++;
"\n", count);
}
+static struct btrfs_free_space_op free_space_op = {
+ .recalc_thresholds = recalculate_thresholds,
+ .use_bitmap = use_bitmap,
+};
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+
+ spin_lock_init(&ctl->tree_lock);
+ ctl->unit = block_group->sectorsize;
+ ctl->start = block_group->key.objectid;
+ ctl->private = block_group;
+ ctl->op = &free_space_op;
+
+ /*
+ * we only want to have 32k of ram per block group for keeping
+ * track of free space, and if we pass 1/2 of that we want to
+ * start converting things over to using bitmaps
+ */
+ ctl->extents_thresh = ((1024 * 32) / 2) /
+ sizeof(struct btrfs_free_space);
+}
+
/*
* for a given cluster, put all of its extents back into the free
* space cache. If the block group passed doesn't match the block group
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
struct rb_node *node;
bitmap = (entry->bitmap != NULL);
if (!bitmap)
- try_merge_free_space(block_group, entry, false);
- tree_insert_offset(&block_group->free_space_offset,
+ try_merge_free_space(ctl, entry, false);
+ tree_insert_offset(&ctl->free_space_offset,
entry->offset, &entry->offset_index, bitmap);
}
cluster->root = RB_ROOT;
return 0;
}
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
{
struct btrfs_free_space *info;
struct rb_node *node;
+
+ while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
+ info = rb_entry(node, struct btrfs_free_space, offset_index);
+ unlink_free_space(ctl, info);
+ kfree(info->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ if (need_resched()) {
+ spin_unlock(&ctl->tree_lock);
+ cond_resched();
+ spin_lock(&ctl->tree_lock);
+ }
+ }
+}
+
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+{
+ spin_lock(&ctl->tree_lock);
+ __btrfs_remove_free_space_cache_locked(ctl);
+ spin_unlock(&ctl->tree_lock);
+}
+
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_cluster *cluster;
struct list_head *head;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
while ((head = block_group->cluster_list.next) !=
&block_group->cluster_list) {
cluster = list_entry(head, struct btrfs_free_cluster,
WARN_ON(cluster->block_group != block_group);
__btrfs_return_cluster_to_free_space(block_group, cluster);
if (need_resched()) {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
cond_resched();
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
}
}
+ __btrfs_remove_free_space_cache_locked(ctl);
+ spin_unlock(&ctl->tree_lock);
- while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
- info = rb_entry(node, struct btrfs_free_space, offset_index);
- if (!info->bitmap) {
- unlink_free_space(block_group, info);
- kmem_cache_free(btrfs_free_space_cachep, info);
- } else {
- free_bitmap(block_group, info);
- }
-
- if (need_resched()) {
- spin_unlock(&block_group->tree_lock);
- cond_resched();
- spin_lock(&block_group->tree_lock);
- }
- }
-
- spin_unlock(&block_group->tree_lock);
}
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes, u64 empty_size)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size;
u64 ret = 0;
- spin_lock(&block_group->tree_lock);
- entry = find_free_space(block_group, &offset, &bytes_search, 0);
+ spin_lock(&ctl->tree_lock);
+ entry = find_free_space(ctl, &offset, &bytes_search);
if (!entry)
goto out;
ret = offset;
if (entry->bitmap) {
- bitmap_clear_bits(block_group, entry, offset, bytes);
+ bitmap_clear_bits(ctl, entry, offset, bytes);
if (!entry->bytes)
- free_bitmap(block_group, entry);
+ free_bitmap(ctl, entry);
} else {
- unlink_free_space(block_group, entry);
+ unlink_free_space(ctl, entry);
entry->offset += bytes;
entry->bytes -= bytes;
if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry);
else
- link_free_space(block_group, entry);
+ link_free_space(ctl, entry);
}
out:
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
return ret;
}
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster)
{
+ struct btrfs_free_space_ctl *ctl;
int ret;
/* first, get a safe pointer to the block group */
atomic_inc(&block_group->count);
spin_unlock(&cluster->lock);
+ ctl = block_group->free_space_ctl;
+
/* now return any extents the cluster had on it */
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
/* finally drop our ref */
btrfs_put_block_group(block_group);
struct btrfs_free_space *entry,
u64 bytes, u64 min_start)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int err;
u64 search_start = cluster->window_start;
u64 search_bytes = bytes;
search_start = min_start;
search_bytes = bytes;
- err = search_bitmap(block_group, entry, &search_start,
- &search_bytes);
+ err = search_bitmap(ctl, entry, &search_start, &search_bytes);
if (err)
return 0;
ret = search_start;
- bitmap_clear_bits(block_group, entry, ret, bytes);
+ bitmap_clear_bits(ctl, entry, ret, bytes);
return ret;
}
struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
struct rb_node *node;
u64 ret = 0;
while(1) {
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
- struct rb_node *node;
-
node = rb_next(&entry->offset_index);
if (!node)
break;
cluster, entry, bytes,
min_start);
if (ret == 0) {
- struct rb_node *node;
node = rb_next(&entry->offset_index);
if (!node)
break;
if (!ret)
return 0;
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
- block_group->free_space -= bytes;
+ ctl->free_space -= bytes;
if (entry->bytes == 0) {
- block_group->free_extents--;
+ ctl->free_extents--;
if (entry->bitmap) {
kfree(entry->bitmap);
- block_group->total_bitmaps--;
- recalculate_thresholds(block_group);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
}
kmem_cache_free(btrfs_free_space_cachep, entry);
}
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
return ret;
}
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
unsigned long next_zero;
unsigned long i;
unsigned long search_bits;
cluster->window_start = start * block_group->sectorsize +
entry->offset;
- rb_erase(&entry->offset_index, &block_group->free_space_offset);
+ rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 1);
BUG_ON(ret);
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *first = NULL;
struct btrfs_free_space *entry = NULL;
struct btrfs_free_space *prev = NULL;
u64 max_extent;
u64 max_gap = 128 * 1024;
- entry = tree_search_offset(block_group, offset, 0, 1);
+ entry = tree_search_offset(ctl, offset, 0, 1);
if (!entry)
return -ENOSPC;
if (entry->bitmap)
continue;
- rb_erase(&entry->offset_index, &block_group->free_space_offset);
+ rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 0);
BUG_ON(ret);
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 min_bytes)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
struct rb_node *node;
int ret = -ENOSPC;
- if (block_group->total_bitmaps == 0)
+ if (ctl->total_bitmaps == 0)
return -ENOSPC;
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group, offset),
- 0, 1);
+ entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
if (!entry)
return -ENOSPC;
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
u64 min_bytes;
int ret;
} else
min_bytes = max(bytes, (bytes + empty_size) >> 2);
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
/*
* If we know we don't have enough space to make a cluster don't even
* bother doing all the work to try and find one.
*/
- if (block_group->free_space < min_bytes) {
- spin_unlock(&block_group->tree_lock);
+ if (ctl->free_space < min_bytes) {
+ spin_unlock(&ctl->tree_lock);
return -ENOSPC;
}
}
out:
spin_unlock(&cluster->lock);
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
return ret;
}
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen)
{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 bytes = 0;
*trimmed = 0;
while (start < end) {
- spin_lock(&block_group->tree_lock);
+ spin_lock(&ctl->tree_lock);
- if (block_group->free_space < minlen) {
- spin_unlock(&block_group->tree_lock);
+ if (ctl->free_space < minlen) {
+ spin_unlock(&ctl->tree_lock);
break;
}
- entry = tree_search_offset(block_group, start, 0, 1);
+ entry = tree_search_offset(ctl, start, 0, 1);
if (!entry)
- entry = tree_search_offset(block_group,
- offset_to_bitmap(block_group,
- start),
+ entry = tree_search_offset(ctl,
+ offset_to_bitmap(ctl, start),
1, 1);
if (!entry || entry->offset >= end) {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
break;
}
if (entry->bitmap) {
- ret = search_bitmap(block_group, entry, &start, &bytes);
+ ret = search_bitmap(ctl, entry, &start, &bytes);
if (!ret) {
if (start >= end) {
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
break;
}
bytes = min(bytes, end - start);
- bitmap_clear_bits(block_group, entry,
- start, bytes);
+ bitmap_clear_bits(ctl, entry, start, bytes);
if (entry->bytes == 0)
- free_bitmap(block_group, entry);
+ free_bitmap(ctl, entry);
} else {
start = entry->offset + BITS_PER_BITMAP *
block_group->sectorsize;
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
ret = 0;
continue;
}
} else {
start = entry->offset;
bytes = min(entry->bytes, end - start);
- unlink_free_space(block_group, entry);
+ unlink_free_space(ctl, entry);
kmem_cache_free(btrfs_free_space_cachep, entry);
}
- spin_unlock(&block_group->tree_lock);
+ spin_unlock(&ctl->tree_lock);
if (bytes >= minlen) {
int update_ret;
bytes,
&actually_trimmed);
- btrfs_add_free_space(block_group,
- start, bytes);
+ btrfs_add_free_space(block_group, start, bytes);
if (!update_ret)
btrfs_update_reserved_bytes(block_group,
bytes, 0, 1);
return ret;
}
+
+/*
+ * Find the left-most item in the cache tree, and then return the
+ * smallest inode number in the item.
+ *
+ * Note: the returned inode number may not be the smallest one in
+ * the tree, if the left-most item is a bitmap.
+ */
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
+{
+ struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
+ struct btrfs_free_space *entry = NULL;
+ u64 ino = 0;
+
+ spin_lock(&ctl->tree_lock);
+
+ if (RB_EMPTY_ROOT(&ctl->free_space_offset))
+ goto out;
+
+ entry = rb_entry(rb_first(&ctl->free_space_offset),
+ struct btrfs_free_space, offset_index);
+
+ if (!entry->bitmap) {
+ ino = entry->offset;
+
+ unlink_free_space(ctl, entry);
+ entry->offset++;
+ entry->bytes--;
+ if (!entry->bytes)
+ kmem_cache_free(btrfs_free_space_cachep, entry);
+ else
+ link_free_space(ctl, entry);
+ } else {
+ u64 offset = 0;
+ u64 count = 1;
+ int ret;
+
+ ret = search_bitmap(ctl, entry, &offset, &count);
+ BUG_ON(ret);
+
+ ino = offset;
+ bitmap_clear_bits(ctl, entry, offset, 1);
+ if (entry->bytes == 0)
+ free_bitmap(ctl, entry);
+ }
+out:
+ spin_unlock(&ctl->tree_lock);
+
+ return ino;
+}
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+ struct btrfs_path *path)
+{
+ struct inode *inode = NULL;
+
+ spin_lock(&root->cache_lock);
+ if (root->cache_inode)
+ inode = igrab(root->cache_inode);
+ spin_unlock(&root->cache_lock);
+ if (inode)
+ return inode;
+
+ inode = __lookup_free_space_inode(root, path, 0);
+ if (IS_ERR(inode))
+ return inode;
+
+ spin_lock(&root->cache_lock);
+ if (!root->fs_info->closing)
+ root->cache_inode = igrab(inode);
+ spin_unlock(&root->cache_lock);
+
+ return inode;
+}
+
+int create_free_ino_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
+{
+ return __create_free_space_inode(root, trans, path,
+ BTRFS_FREE_INO_OBJECTID, 0);
+}
+
+int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_path *path;
+ struct inode *inode;
+ int ret = 0;
+ u64 root_gen = btrfs_root_generation(&root->root_item);
+
+ /*
+ * If we're unmounting then just return, since this does a search on the
+ * normal root and not the commit root and we could deadlock.
+ */
+ smp_mb();
+ if (fs_info->closing)
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return 0;
+
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode))
+ goto out;
+
+ if (root_gen != BTRFS_I(inode)->generation)
+ goto out_put;
+
+ ret = __load_free_space_cache(root, inode, ctl, path, 0);
+
+ if (ret < 0)
+ printk(KERN_ERR "btrfs: failed to load free ino cache for "
+ "root %llu\n", root->root_key.objectid);
+out_put:
+ iput(inode);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct inode *inode;
+ int ret;
+
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode))
+ return 0;
+
+ ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
+ if (ret < 0)
+ printk(KERN_ERR "btrfs: failed to write free ino cache "
+ "for root %llu\n", root->root_key.objectid);
+
+ iput(inode);
+ return ret;
+}
* Boston, MA 021110-1307, USA.
*/
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+
#include "ctree.h"
#include "disk-io.h"
+#include "free-space-cache.h"
+#include "inode-map.h"
#include "transaction.h"
-int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
+static int caching_kthread(void *data)
+{
+ struct btrfs_root *root = data;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ u64 last = (u64)-1;
+ int slot;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /* Since the commit root is read-only, we can safely skip locking. */
+ path->skip_locking = 1;
+ path->search_commit_root = 1;
+ path->reada = 2;
+
+ key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ key.offset = 0;
+ key.type = BTRFS_INODE_ITEM_KEY;
+again:
+ /* need to make sure the commit_root doesn't disappear */
+ mutex_lock(&root->fs_commit_mutex);
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ while (1) {
+ smp_mb();
+ if (fs_info->closing > 1)
+ goto out;
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0)
+ break;
+
+ if (need_resched() ||
+ btrfs_transaction_in_commit(fs_info)) {
+ leaf = path->nodes[0];
+
+ if (btrfs_header_nritems(leaf) == 0) {
+ WARN_ON(1);
+ break;
+ }
+
+ /*
+ * Save the key so we can advances forward
+ * in the next search.
+ */
+ btrfs_item_key_to_cpu(leaf, &key, 0);
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
+ root->cache_progress = last;
+ mutex_unlock(&root->fs_commit_mutex);
+ schedule_timeout(1);
+ goto again;
+ } else
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ if (key.type != BTRFS_INODE_ITEM_KEY)
+ goto next;
+
+ if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
+ break;
+
+ if (last != (u64)-1 && last + 1 != key.objectid) {
+ __btrfs_add_free_space(ctl, last + 1,
+ key.objectid - last - 1);
+ wake_up(&root->cache_wait);
+ }
+
+ last = key.objectid;
+next:
+ path->slots[0]++;
+ }
+
+ if (last < BTRFS_LAST_FREE_OBJECTID - 1) {
+ __btrfs_add_free_space(ctl, last + 1,
+ BTRFS_LAST_FREE_OBJECTID - last - 1);
+ }
+
+ spin_lock(&root->cache_lock);
+ root->cached = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->cache_lock);
+
+ root->cache_progress = (u64)-1;
+ btrfs_unpin_free_ino(root);
+out:
+ wake_up(&root->cache_wait);
+ mutex_unlock(&root->fs_commit_mutex);
+
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+static void start_caching(struct btrfs_root *root)
+{
+ struct task_struct *tsk;
+ int ret;
+
+ spin_lock(&root->cache_lock);
+ if (root->cached != BTRFS_CACHE_NO) {
+ spin_unlock(&root->cache_lock);
+ return;
+ }
+
+ root->cached = BTRFS_CACHE_STARTED;
+ spin_unlock(&root->cache_lock);
+
+ ret = load_free_ino_cache(root->fs_info, root);
+ if (ret == 1) {
+ spin_lock(&root->cache_lock);
+ root->cached = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->cache_lock);
+ return;
+ }
+
+ tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
+ root->root_key.objectid);
+ BUG_ON(IS_ERR(tsk));
+}
+
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
+{
+again:
+ *objectid = btrfs_find_ino_for_alloc(root);
+
+ if (*objectid != 0)
+ return 0;
+
+ start_caching(root);
+
+ wait_event(root->cache_wait,
+ root->cached == BTRFS_CACHE_FINISHED ||
+ root->free_ino_ctl->free_space > 0);
+
+ if (root->cached == BTRFS_CACHE_FINISHED &&
+ root->free_ino_ctl->free_space == 0)
+ return -ENOSPC;
+ else
+ goto again;
+}
+
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+again:
+ if (root->cached == BTRFS_CACHE_FINISHED) {
+ __btrfs_add_free_space(ctl, objectid, 1);
+ } else {
+ /*
+ * If we are in the process of caching free ino chunks,
+ * to avoid adding the same inode number to the free_ino
+ * tree twice due to cross transaction, we'll leave it
+ * in the pinned tree until a transaction is committed
+ * or the caching work is done.
+ */
+
+ mutex_lock(&root->fs_commit_mutex);
+ spin_lock(&root->cache_lock);
+ if (root->cached == BTRFS_CACHE_FINISHED) {
+ spin_unlock(&root->cache_lock);
+ mutex_unlock(&root->fs_commit_mutex);
+ goto again;
+ }
+ spin_unlock(&root->cache_lock);
+
+ start_caching(root);
+
+ if (objectid <= root->cache_progress)
+ __btrfs_add_free_space(ctl, objectid, 1);
+ else
+ __btrfs_add_free_space(pinned, objectid, 1);
+
+ mutex_unlock(&root->fs_commit_mutex);
+ }
+}
+
+/*
+ * When a transaction is committed, we'll move those inode numbers which
+ * are smaller than root->cache_progress from pinned tree to free_ino tree,
+ * and others will just be dropped, because the commit root we were
+ * searching has changed.
+ *
+ * Must be called with root->fs_commit_mutex held
+ */
+void btrfs_unpin_free_ino(struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+ struct btrfs_free_space *info;
+ struct rb_node *n;
+ u64 count;
+
+ while (1) {
+ n = rb_first(rbroot);
+ if (!n)
+ break;
+
+ info = rb_entry(n, struct btrfs_free_space, offset_index);
+ BUG_ON(info->bitmap);
+
+ if (info->offset > root->cache_progress)
+ goto free;
+ else if (info->offset + info->bytes > root->cache_progress)
+ count = root->cache_progress - info->offset + 1;
+ else
+ count = info->bytes;
+
+ __btrfs_add_free_space(ctl, info->offset, count);
+free:
+ rb_erase(&info->offset_index, rbroot);
+ kfree(info);
+ }
+}
+
+#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
+#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+
+/*
+ * The goal is to keep the memory used by the free_ino tree won't
+ * exceed the memory if we use bitmaps only.
+ */
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+ struct btrfs_free_space *info;
+ struct rb_node *n;
+ int max_ino;
+ int max_bitmaps;
+
+ n = rb_last(&ctl->free_space_offset);
+ if (!n) {
+ ctl->extents_thresh = INIT_THRESHOLD;
+ return;
+ }
+ info = rb_entry(n, struct btrfs_free_space, offset_index);
+
+ /*
+ * Find the maximum inode number in the filesystem. Note we
+ * ignore the fact that this can be a bitmap, because we are
+ * not doing precise calculation.
+ */
+ max_ino = info->bytes - 1;
+
+ max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
+ if (max_bitmaps <= ctl->total_bitmaps) {
+ ctl->extents_thresh = 0;
+ return;
+ }
+
+ ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
+ PAGE_CACHE_SIZE / sizeof(*info);
+}
+
+/*
+ * We don't fall back to bitmap, if we are below the extents threshold
+ * or this chunk of inode numbers is a big one.
+ */
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ if (ctl->free_extents < ctl->extents_thresh ||
+ info->bytes > INODES_PER_BITMAP / 10)
+ return false;
+
+ return true;
+}
+
+static struct btrfs_free_space_op free_ino_op = {
+ .recalc_thresholds = recalculate_thresholds,
+ .use_bitmap = use_bitmap,
+};
+
+static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+}
+
+static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ /*
+ * We always use extents for two reasons:
+ *
+ * - The pinned tree is only used during the process of caching
+ * work.
+ * - Make code simpler. See btrfs_unpin_free_ino().
+ */
+ return false;
+}
+
+static struct btrfs_free_space_op pinned_free_ino_op = {
+ .recalc_thresholds = pinned_recalc_thresholds,
+ .use_bitmap = pinned_use_bitmap,
+};
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+ spin_lock_init(&ctl->tree_lock);
+ ctl->unit = 1;
+ ctl->start = 0;
+ ctl->private = NULL;
+ ctl->op = &free_ino_op;
+
+ /*
+ * Initially we allow to use 16K of ram to cache chunks of
+ * inode numbers before we resort to bitmaps. This is somewhat
+ * arbitrary, but it will be adjusted in runtime.
+ */
+ ctl->extents_thresh = INIT_THRESHOLD;
+
+ spin_lock_init(&pinned->tree_lock);
+ pinned->unit = 1;
+ pinned->start = 0;
+ pinned->private = NULL;
+ pinned->extents_thresh = 0;
+ pinned->op = &pinned_free_ino_op;
+}
+
+int btrfs_save_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_path *path;
+ struct inode *inode;
+ u64 alloc_hint = 0;
+ int ret;
+ int prealloc;
+ bool retry = false;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+again:
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ if (IS_ERR(inode)) {
+ BUG_ON(retry);
+ retry = true;
+
+ ret = create_free_ino_inode(root, trans, path);
+ if (ret)
+ goto out;
+ goto again;
+ }
+
+ BTRFS_I(inode)->generation = 0;
+ ret = btrfs_update_inode(trans, root, inode);
+ WARN_ON(ret);
+
+ if (i_size_read(inode) > 0) {
+ ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+ if (ret)
+ goto out_put;
+ }
+
+ spin_lock(&root->cache_lock);
+ if (root->cached != BTRFS_CACHE_FINISHED) {
+ ret = -1;
+ spin_unlock(&root->cache_lock);
+ goto out_put;
+ }
+ spin_unlock(&root->cache_lock);
+
+ spin_lock(&ctl->tree_lock);
+ prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
+ prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
+ prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+ spin_unlock(&ctl->tree_lock);
+
+ /* Just to make sure we have enough space */
+ prealloc += 8 * PAGE_CACHE_SIZE;
+
+ ret = btrfs_check_data_free_space(inode, prealloc);
+ if (ret)
+ goto out_put;
+
+ ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
+ prealloc, prealloc, &alloc_hint);
+ if (ret)
+ goto out_put;
+ btrfs_free_reserved_data_space(inode, prealloc);
+
+out_put:
+ iput(inode);
+out:
+ if (ret == 0)
+ ret = btrfs_write_out_ino_cache(root, trans, path);
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
{
struct btrfs_path *path;
int ret;
return ret;
}
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 dirid, u64 *objectid)
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
{
int ret;
mutex_lock(&root->objectid_mutex);
if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_find_highest_inode(root, &root->highest_objectid);
+ ret = btrfs_find_highest_objectid(root,
+ &root->highest_objectid);
if (ret)
goto out;
}
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
+ #include <linux/ratelimit.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
+#include "inode-map.h"
struct btrfs_iget_args {
u64 ino;
path->leave_spinning = 1;
btrfs_set_trans_block_group(trans, inode);
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
key.offset = start;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(cur_size);
async_extent->start +
async_extent->ram_size - 1, 0);
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
BUG_ON(!em);
em->start = async_extent->start;
em->len = async_extent->ram_size;
return alloc_hint;
}
+static inline bool is_free_space_inode(struct btrfs_root *root,
+ struct inode *inode)
+{
+ if (root == root->fs_info->tree_root ||
+ BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+ return true;
+ return false;
+}
+
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
- BUG_ON(root == root->fs_info->tree_root);
+ BUG_ON(is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
(u64)-1, &ins, 1);
BUG_ON(ret);
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
BUG_ON(!em);
em->start = start;
em->orig_start = em->start;
int type;
int nocow;
int check_prev = 1;
- bool nolock = false;
+ bool nolock;
+ u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
BUG_ON(!path);
- if (root == root->fs_info->tree_root) {
- nolock = true;
+
+ nolock = is_free_space_inode(root, inode);
+
+ if (nolock)
trans = btrfs_join_transaction_nolock(root, 1);
- } else {
+ else
trans = btrfs_join_transaction(root, 1);
- }
BUG_ON(IS_ERR(trans));
cow_start = (u64)-1;
cur_offset = start;
while (1) {
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
BUG_ON(ret < 0);
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
- if (found_key.objectid == inode->i_ino &&
+ if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid > inode->i_ino ||
+ if (found_key.objectid > ino ||
found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
- if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+ if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
goto next_slot;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start,
found_key.offset - 1, page_started,
struct extent_map *em;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
BUG_ON(!em);
em->start = cur_offset;
em->orig_start = em->start;
if (cur_offset > end)
break;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- int do_list = (root->root_key.objectid !=
- BTRFS_ROOT_TREE_OBJECTID);
+ bool do_list = !is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC)
*bits &= ~EXTENT_FIRST_DELALLOC;
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- int do_list = (root->root_key.objectid !=
- BTRFS_ROOT_TREE_OBJECTID);
+ bool do_list = !is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC)
*bits &= ~EXTENT_FIRST_DELALLOC;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- if (root == root->fs_info->tree_root)
+ if (is_free_space_inode(root, inode))
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
else
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
&hint, 0);
BUG_ON(ret);
- ins.objectid = inode->i_ino;
+ ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
- inode->i_ino, file_pos, &ins);
+ btrfs_ino(inode), file_pos, &ins);
BUG_ON(ret);
btrfs_free_path(path);
struct extent_state *cached_state = NULL;
int compress_type = 0;
int ret;
- bool nolock = false;
+ bool nolock;
ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
end - start + 1);
return 0;
BUG_ON(!ordered_extent);
- nolock = (root == root->fs_info->tree_root);
+ nolock = is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list));
}
read_unlock(&em_tree->lock);
- if (!em || IS_ERR(em)) {
+ if (IS_ERR_OR_NULL(em)) {
kfree(failrec);
return -EIO;
}
return 0;
zeroit:
- if (printk_ratelimit()) {
- printk(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
- printk_ratelimited(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
- "private %llu\n", page->mapping->host->i_ino,
++ printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
+ "private %llu\n",
+ (unsigned long long)btrfs_ino(page->mapping->host),
(unsigned long long)start, csum,
(unsigned long long)private);
- }
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
- ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+ ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret);
}
spin_unlock(&root->orphan_lock);
if (trans && delete_item) {
- ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+ ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret);
}
break;
/* release the path since we're done with it */
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/*
* this is where we are basically btrfs_lookup, without the
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
*/
- maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
+ maybe_acls = acls_after_inode_item(leaf, path->slots[0],
+ btrfs_ino(inode));
if (!maybe_acls)
cache_no_acl(inode);
struct extent_buffer *leaf;
int ret;
+ /*
+ * If root is tree root, it means this inode is used to
+ * store free space information. And these inodes are updated
+ * when committing the transaction, so they needn't delaye to
+ * be updated, or deadlock will occured.
+ */
+ if (!is_free_space_inode(root, inode)) {
+ ret = btrfs_delayed_update_inode(trans, root, inode);
+ if (!ret)
+ btrfs_set_inode_last_trans(trans, inode);
+ return ret;
+ }
+
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
path->leave_spinning = 1;
- ret = btrfs_lookup_inode(trans, root, path,
- &BTRFS_I(inode)->location, 1);
+ ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
+ 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_inode_item);
+ struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
return ret;
}
-
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
+ u64 ino = btrfs_ino(inode);
+ u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path) {
}
path->leave_spinning = 1;
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto err;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
- ret = btrfs_del_inode_ref(trans, root, name, name_len,
- inode->i_ino,
- dir->i_ino, &index);
+ ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
+ dir_ino, &index);
if (ret) {
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
- "inode %lu parent %lu\n", name_len, name,
- inode->i_ino, dir->i_ino);
+ "inode %llu parent %llu\n", name_len, name,
+ (unsigned long long)ino, (unsigned long long)dir_ino);
goto err;
}
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
- index, name, name_len, -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto err;
- }
- if (!di) {
- ret = -ENOENT;
+ ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+ if (ret)
goto err;
- }
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
- btrfs_release_path(path);
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
- inode, dir->i_ino);
+ inode, dir_ino);
BUG_ON(ret != 0 && ret != -ENOENT);
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
int check_link = 1;
int err = -ENOSPC;
int ret;
+ u64 ino = btrfs_ino(inode);
+ u64 dir_ino = btrfs_ino(dir);
trans = btrfs_start_transaction(root, 10);
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
return trans;
- if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return ERR_PTR(-ENOSPC);
/* check if there is someone else holds reference */
} else {
check_link = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(inode)->location, 0);
} else {
check_link = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret == 0 && S_ISREG(inode->i_mode)) {
ret = btrfs_lookup_file_extent(trans, root, path,
- inode->i_ino, (u64)-1, 0);
+ ino, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
BUG_ON(ret == 0);
if (check_path_shared(root, path))
goto out;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
if (!check_link) {
goto out;
}
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
err = 0;
goto out;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ref = btrfs_lookup_inode_ref(trans, root, path,
dentry->d_name.name, dentry->d_name.len,
- inode->i_ino, dir->i_ino, 0);
+ ino, dir_ino, 0);
if (IS_ERR(ref)) {
err = PTR_ERR(ref);
goto out;
if (check_path_shared(root, path))
goto out;
index = btrfs_inode_ref_index(path->nodes[0], ref);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
+ /*
+ * This is a commit root search, if we can lookup inode item and other
+ * relative items in the commit root, it means the transaction of
+ * dir/file creation has been committed, and the dir index item that we
+ * delay to insert has also been inserted into the commit root. So
+ * we needn't worry about the delayed insertion of the dir index item
+ * here.
+ */
+ di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
struct btrfs_key key;
u64 index;
int ret;
+ u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
- BUG_ON(!di || IS_ERR(di));
+ BUG_ON(IS_ERR_OR_NULL(di));
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
BUG_ON(ret);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
- dir->i_ino, &index, name, name_len);
+ dir_ino, &index, name, name_len);
if (ret < 0) {
BUG_ON(ret != -ENOENT);
- di = btrfs_search_dir_index_item(root, path, dir->i_ino,
+ di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
- BUG_ON(!di || IS_ERR(di));
+ BUG_ON(IS_ERR_OR_NULL(di));
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
index = key.offset;
}
- btrfs_release_path(root, path);
++ btrfs_release_path(path);
- di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
- index, name, name_len, -1);
- BUG_ON(IS_ERR_OR_NULL(di));
-
- leaf = path->nodes[0];
- btrfs_dir_item_key_to_cpu(leaf, di, &key);
- WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
BUG_ON(ret);
- btrfs_release_path(path);
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- btrfs_free_path(path);
return 0;
}
unsigned long nr = 0;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
- inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -ENOTEMPTY;
trans = __unlink_start_trans(dir, dentry);
btrfs_set_trans_block_group(trans, dir);
- if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
return err;
}
- #if 0
- /*
- * when truncating bytes in a file, it is possible to avoid reading
- * the leaves that contain only checksum items. This can be the
- * majority of the IO required to delete a large file, but it must
- * be done carefully.
- *
- * The keys in the level just above the leaves are checked to make sure
- * the lowest key in a given leaf is a csum key, and starts at an offset
- * after the new size.
- *
- * Then the key for the next leaf is checked to make sure it also has
- * a checksum item for the same file. If it does, we know our target leaf
- * contains only checksum items, and it can be safely freed without reading
- * it.
- *
- * This is just an optimization targeted at large files. It may do
- * nothing. It will return 0 unless things went badly.
- */
- static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct inode *inode, u64 new_size)
- {
- struct btrfs_key key;
- int ret;
- int nritems;
- struct btrfs_key found_key;
- struct btrfs_key other_key;
- struct btrfs_leaf_ref *ref;
- u64 leaf_gen;
- u64 leaf_start;
-
- path->lowest_level = 1;
- key.objectid = inode->i_ino;
- key.type = BTRFS_CSUM_ITEM_KEY;
- key.offset = new_size;
- again:
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto out;
-
- if (path->nodes[1] == NULL) {
- ret = 0;
- goto out;
- }
- ret = 0;
- btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
- nritems = btrfs_header_nritems(path->nodes[1]);
-
- if (!nritems)
- goto out;
-
- if (path->slots[1] >= nritems)
- goto next_node;
-
- /* did we find a key greater than anything we want to delete? */
- if (found_key.objectid > inode->i_ino ||
- (found_key.objectid == inode->i_ino && found_key.type > key.type))
- goto out;
-
- /* we check the next key in the node to make sure the leave contains
- * only checksum items. This comparison doesn't work if our
- * leaf is the last one in the node
- */
- if (path->slots[1] + 1 >= nritems) {
- next_node:
- /* search forward from the last key in the node, this
- * will bring us into the next node in the tree
- */
- btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
-
- /* unlikely, but we inc below, so check to be safe */
- if (found_key.offset == (u64)-1)
- goto out;
-
- /* search_forward needs a path with locks held, do the
- * search again for the original key. It is possible
- * this will race with a balance and return a path that
- * we could modify, but this drop is just an optimization
- * and is allowed to miss some leaves.
- */
- btrfs_release_path(root, path);
- found_key.offset++;
-
- /* setup a max key for search_forward */
- other_key.offset = (u64)-1;
- other_key.type = key.type;
- other_key.objectid = key.objectid;
-
- path->keep_locks = 1;
- ret = btrfs_search_forward(root, &found_key, &other_key,
- path, 0, 0);
- path->keep_locks = 0;
- if (ret || found_key.objectid != key.objectid ||
- found_key.type != key.type) {
- ret = 0;
- goto out;
- }
-
- key.offset = found_key.offset;
- btrfs_release_path(root, path);
- cond_resched();
- goto again;
- }
-
- /* we know there's one more slot after us in the tree,
- * read that key so we can verify it is also a checksum item
- */
- btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
-
- if (found_key.objectid < inode->i_ino)
- goto next_key;
-
- if (found_key.type != key.type || found_key.offset < new_size)
- goto next_key;
-
- /*
- * if the key for the next leaf isn't a csum key from this objectid,
- * we can't be sure there aren't good items inside this leaf.
- * Bail out
- */
- if (other_key.objectid != inode->i_ino || other_key.type != key.type)
- goto out;
-
- leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
- leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
- /*
- * it is safe to delete this leaf, it contains only
- * csum items from this inode at an offset >= new_size
- */
- ret = btrfs_del_leaf(trans, root, path, leaf_start);
- BUG_ON(ret);
-
- if (root->ref_cows && leaf_gen < trans->transid) {
- ref = btrfs_alloc_leaf_ref(root, 0);
- if (ref) {
- ref->root_gen = root->root_key.offset;
- ref->bytenr = leaf_start;
- ref->owner = 0;
- ref->generation = leaf_gen;
- ref->nritems = 0;
-
- btrfs_sort_leaf_ref(ref);
-
- ret = btrfs_add_leaf_ref(root, ref, 0);
- WARN_ON(ret);
- btrfs_free_leaf_ref(root, ref);
- } else {
- WARN_ON(1);
- }
- }
- next_key:
- btrfs_release_path(root, path);
-
- if (other_key.objectid == inode->i_ino &&
- other_key.type == key.type && other_key.offset > key.offset) {
- key.offset = other_key.offset;
- cond_resched();
- goto again;
- }
- ret = 0;
- out:
- /* fixup any changes we've made to the path */
- path->lowest_level = 0;
- path->keep_locks = 0;
- btrfs_release_path(root, path);
- return ret;
- }
-
- #endif
-
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
int encoding;
int ret;
int err = 0;
+ u64 ino = btrfs_ino(inode);
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
+ /*
+ * This function is also used to drop the items in the log tree before
+ * we relog the inode, so if root != BTRFS_I(inode)->root, it means
+ * it is used to drop the loged items. So we shouldn't kill the delayed
+ * items.
+ */
+ if (min_type == 0 && root == BTRFS_I(inode)->root)
+ btrfs_kill_delayed_inode_items(inode);
+
path = btrfs_alloc_path();
BUG_ON(!path);
path->reada = -1;
- key.objectid = inode->i_ino;
+ key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
found_type = btrfs_key_type(&found_key);
encoding = 0;
- if (found_key.objectid != inode->i_ino)
+ if (found_key.objectid != ino)
break;
if (found_type < min_type)
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
- inode->i_ino, extent_offset);
+ ino, extent_offset);
BUG_ON(ret);
}
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) {
- if (root->ref_cows) {
+ if (root->ref_cows &&
+ BTRFS_I(inode)->location.objectid !=
+ BTRFS_FREE_INO_OBJECTID) {
err = -EAGAIN;
goto out;
}
BUG_ON(ret);
pending_del_nr = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto search_again;
} else {
path->slots[0]--;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
- BUG_ON(IS_ERR(em) || !em);
+ BUG_ON(IS_ERR_OR_NULL(em));
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
break;
err = btrfs_insert_file_extent(trans, root,
- inode->i_ino, cur_offset, 0,
+ btrfs_ino(inode), cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
if (err)
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
- root == root->fs_info->tree_root))
+ is_free_space_inode(root, inode)))
goto no_delete;
if (is_bad_inode(inode)) {
BUG_ON(ret);
}
+ if (!(root == root->fs_info->tree_root ||
+ root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
+ btrfs_return_ino(root, btrfs_ino(inode));
+
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
path = btrfs_alloc_path();
BUG_ON(!path);
- di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
+ di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
- if (!di || IS_ERR(di))
+ if (IS_ERR_OR_NULL(di))
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
- if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
+ if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
goto out;
if (ret)
goto out;
- btrfs_release_path(root->fs_info->tree_root, path);
+ btrfs_release_path(path);
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
if (IS_ERR(new_root)) {
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
+ u64 ino = btrfs_ino(inode);
again:
p = &root->inode_tree.rb_node;
parent = NULL;
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
- if (inode->i_ino < entry->vfs_inode.i_ino)
+ if (ino < btrfs_ino(&entry->vfs_inode))
p = &parent->rb_left;
- else if (inode->i_ino > entry->vfs_inode.i_ino)
+ else if (ino > btrfs_ino(&entry->vfs_inode))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
- if (objectid < entry->vfs_inode.i_ino)
+ if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
- else if (objectid > entry->vfs_inode.i_ino)
+ else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= entry->vfs_inode.i_ino) {
+ if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
- objectid = entry->vfs_inode.i_ino + 1;
+ objectid = btrfs_ino(&entry->vfs_inode) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
- return args->ino == inode->i_ino &&
+ return args->ino == btrfs_ino(inode) &&
args->root == BTRFS_I(inode)->root;
}
return d_splice_alias(inode, dentry);
}
-static unsigned char btrfs_filetype_table[] = {
+unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
+ struct list_head ins_list;
+ struct list_head del_list;
int ret;
struct extent_buffer *leaf;
int slot;
char tmp_name[32];
char *name_ptr;
int name_len;
+ int is_curr = 0; /* filp->f_pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
/* special case for "." */
if (filp->f_pos == 0) {
- over = filldir(dirent, ".", 1,
- 1, inode->i_ino,
- DT_DIR);
+ over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
if (over)
return 0;
filp->f_pos = 1;
filp->f_pos = 2;
}
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
path->reada = 2;
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ INIT_LIST_HEAD(&ins_list);
+ INIT_LIST_HEAD(&del_list);
+ btrfs_get_delayed_items(inode, &ins_list, &del_list);
+ }
+
btrfs_set_key_type(&key, key_type);
key.offset = filp->f_pos;
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
break;
if (found_key.offset < filp->f_pos)
goto next;
+ if (key_type == BTRFS_DIR_INDEX_KEY &&
+ btrfs_should_delete_dir_index(&del_list,
+ found_key.offset))
+ goto next;
filp->f_pos = found_key.offset;
+ is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
di_cur = 0;
path->slots[0]++;
}
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ if (is_curr)
+ filp->f_pos++;
+ ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
+ &ins_list);
+ if (ret)
+ goto nopos;
+ }
+
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
/*
nopos:
ret = 0;
err:
+ if (key_type == BTRFS_DIR_INDEX_KEY)
+ btrfs_put_delayed_items(&ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
return 0;
smp_mb();
- nolock = (root->fs_info->closing && root == root->fs_info->tree_root);
+ if (root->fs_info->closing && is_free_space_inode(root, inode))
+ nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- if (printk_ratelimit()) {
- printk(KERN_ERR "btrfs: fail to "
+ printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %lu error %ld\n",
- inode->i_ino, PTR_ERR(trans));
+ "dirty inode %llu error %ld\n",
+ (unsigned long long)btrfs_ino(inode),
+ PTR_ERR(trans));
- }
return;
}
btrfs_set_trans_block_group(trans, inode);
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- if (printk_ratelimit()) {
- printk(KERN_ERR "btrfs: fail to "
+ printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %lu error %d\n",
- inode->i_ino, ret);
+ "dirty inode %llu error %d\n",
+ (unsigned long long)btrfs_ino(inode),
+ ret);
- }
}
}
btrfs_end_transaction(trans, root);
+ if (BTRFS_I(inode)->delayed_node)
+ btrfs_balance_delayed_items(root);
}
/*
struct extent_buffer *leaf;
int ret;
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
key.offset = (u64)-1;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (found_key.objectid != inode->i_ino ||
+ if (found_key.objectid != btrfs_ino(inode) ||
btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
- ret = btrfs_set_inode_index_count(dir);
- if (ret)
- return ret;
+ ret = btrfs_inode_delayed_dir_index_count(dir);
+ if (ret) {
+ ret = btrfs_set_inode_index_count(dir);
+ if (ret)
+ return ret;
+ }
}
*index = BTRFS_I(dir)->index_cnt;
return ERR_PTR(-ENOMEM);
}
+ /*
+ * we have to initialize this early, so we can reclaim the inode
+ * number if we fail afterwards in this function.
+ */
+ inode->i_ino = objectid;
+
if (dir) {
trace_btrfs_inode_request(dir);
goto fail;
inode_init_owner(inode, dir, mode);
- inode->i_ino = objectid;
inode_set_bytes(inode, 0);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
+ u64 ino = btrfs_ino(inode);
+ u64 parent_ino = btrfs_ino(parent_inode);
- if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
- key.objectid = inode->i_ino;
+ key.objectid = ino;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
}
- if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
- parent_inode->i_ino,
- index, name, name_len);
+ parent_ino, index, name, name_len);
} else if (add_backref) {
- ret = btrfs_insert_inode_ref(trans, root,
- name, name_len, inode->i_ino,
- parent_inode->i_ino, index);
+ ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
+ parent_ino, index);
}
if (ret == 0) {
ret = btrfs_insert_dir_item(trans, root, name, name_len,
- parent_inode->i_ino, &key,
+ parent_inode, &key,
btrfs_inode_type(inode), index);
BUG_ON(ret);
if (!new_valid_dev(rdev))
return -EINVAL;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
-
/*
* 2 for inode item and ref
* 2 for dir items
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_unlock;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
u64 objectid;
u64 index = 0;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
/*
* 2 for inode item and ref
* 2 for dir items
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_unlock;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
u64 index = 0;
unsigned long nr = 1;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
-
/*
* 2 items for inode and ref
* 2 items for dir items
return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_fail;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, S_IFDIR | mode,
&index);
if (IS_ERR(inode)) {
u64 bytenr;
u64 extent_start = 0;
u64 extent_end = 0;
- u64 objectid = inode->i_ino;
+ u64 objectid = btrfs_ino(inode);
u32 found_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
else
goto out;
}
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
kunmap(page);
free_extent_map(em);
em = NULL;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return ERR_CAST(trans);
em->block_start = EXTENT_MAP_HOLE;
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
"[%llu %llu]\n", (unsigned long long)em->start,
u64 hole_start = start;
u64 hole_len = len;
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
if (!em) {
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
if (!path)
return -ENOMEM;
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
goto out;
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != inode->i_ino ||
+ if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
/* not our file or wrong item type, must cow */
goto out;
* look for other files referencing this extent, if we
* find any we must cow
*/
- if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+ if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr))
goto out;
flush_dcache_page(bvec->bv_page);
if (csum != *private) {
- printk(KERN_ERR "btrfs csum failed ino %lu off"
+ printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
- inode->i_ino, (unsigned long long)start,
+ (unsigned long long)btrfs_ino(inode),
+ (unsigned long long)start,
csum, *private);
err = -EIO;
}
struct btrfs_dio_private *dip = bio->bi_private;
if (err) {
- printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
+ printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
"sector %#Lx len %u err no %d\n",
- dip->inode->i_ino, bio->bi_rw,
+ (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
(unsigned long long)bio->bi_sector, bio->bi_size, err);
dip->errors = 1;
ei->dummy_inode = 0;
ei->force_compress = BTRFS_COMPRESS_NONE;
+ ei->delayed_node = NULL;
+
inode = &ei->vfs_inode;
- extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
- extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
- extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
+ extent_map_tree_init(&ei->extent_tree);
+ extent_io_tree_init(&ei->io_tree, &inode->i_data);
+ extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
mutex_init(&ei->log_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->i_orphan);
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
- printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
- inode->i_ino);
+ printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
+ (unsigned long long)btrfs_ino(inode));
list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->orphan_lock);
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
+ btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
- root != root->fs_info->tree_root)
+ !is_free_space_inode(root, inode))
return 1;
else
return generic_drop_inode(inode);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
- struct inode *newinode = new_dentry->d_inode;
+ struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
u64 root_objectid;
int ret;
+ u64 old_ino = btrfs_ino(old_inode);
- if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
/* we only allow rename subvolume link between subvolumes */
- if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
- if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
- (newinode && newinode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
+ if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
+ (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
- if (S_ISDIR(old_inode->i_mode) && newinode &&
- newinode->i_size > BTRFS_EMPTY_DIR_SIZE)
+ if (S_ISDIR(old_inode->i_mode) && new_inode &&
+ new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
/*
* we're using rename to replace one file with another.
* and the replacement file is large. Start IO on it now so
* we don't add too much work to the end of the transaction
*/
- if (newinode && S_ISREG(old_inode->i_mode) && newinode->i_size &&
+ if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
- if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&root->fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
if (ret)
goto out_fail;
- if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
root->fs_info->last_trans_log_full_commit = trans->transid;
} else {
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
- old_inode->i_ino,
- new_dir->i_ino, index);
+ old_ino,
+ btrfs_ino(new_dir), index);
if (ret)
goto out_fail;
/*
* make sure the inode gets flushed if it is replacing
* something.
*/
- if (newinode && newinode->i_size &&
- old_inode && S_ISREG(old_inode->i_mode)) {
+ if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
btrfs_add_ordered_operation(trans, root, old_inode);
- }
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
- if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
old_dentry->d_name.name,
}
BUG_ON(ret);
- if (newinode) {
- newinode->i_ctime = CURRENT_TIME;
- if (unlikely(newinode->i_ino ==
+ if (new_inode) {
+ new_inode->i_ctime = CURRENT_TIME;
+ if (unlikely(btrfs_ino(new_inode) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- root_objectid = BTRFS_I(newinode)->location.objectid;
+ root_objectid = BTRFS_I(new_inode)->location.objectid;
ret = btrfs_unlink_subvol(trans, dest, new_dir,
root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
- BUG_ON(newinode->i_nlink == 0);
+ BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, new_dir,
new_dentry->d_inode,
new_dentry->d_name.len);
}
BUG_ON(ret);
- if (newinode->i_nlink == 0) {
+ if (new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
BUG_ON(ret);
}
new_dentry->d_name.len, 0, index);
BUG_ON(ret);
- if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = dget_parent(new_dentry);
btrfs_log_new_name(trans, old_inode, old_dir, parent);
dput(parent);
out_fail:
btrfs_end_transaction_throttle(trans, root);
out_notrans:
- if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
return 0;
}
- int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
- int sync)
- {
- struct btrfs_inode *binode;
- struct inode *inode = NULL;
-
- spin_lock(&root->fs_info->delalloc_lock);
- while (!list_empty(&root->fs_info->delalloc_inodes)) {
- binode = list_entry(root->fs_info->delalloc_inodes.next,
- struct btrfs_inode, delalloc_inodes);
- inode = igrab(&binode->vfs_inode);
- if (inode) {
- list_move_tail(&binode->delalloc_inodes,
- &root->fs_info->delalloc_inodes);
- break;
- }
-
- list_del_init(&binode->delalloc_inodes);
- cond_resched_lock(&root->fs_info->delalloc_lock);
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
- if (inode) {
- if (sync) {
- filemap_write_and_wait(inode->i_mapping);
- /*
- * We have to do this because compression doesn't
- * actually set PG_writeback until it submits the pages
- * for IO, which happens in an async thread, so we could
- * race and not actually wait for any writeback pages
- * because they've not been submitted yet. Technically
- * this could still be the case for the ordered stuff
- * since the async thread may not have started to do its
- * work yet. If this becomes the case then we need to
- * figure out a way to make sure that in writepage we
- * wait for any async pages to be submitted before
- * returning so that fdatawait does what its supposed to
- * do.
- */
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
- } else {
- filemap_flush(inode->i_mapping);
- }
- if (delay_iput)
- btrfs_add_delayed_iput(inode);
- else
- iput(inode);
- return 1;
- }
- return 0;
- }
-
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
return -ENAMETOOLONG;
- err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
- if (err)
- return err;
/*
* 2 items for inode item and ref
* 2 items for dir items
btrfs_set_trans_block_group(trans, dir);
+ err = btrfs_find_free_ino(root, &objectid);
+ if (err)
+ goto out_unlock;
+
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len, dir->i_ino, objectid,
+ dentry->d_name.len, btrfs_ino(dir), objectid,
BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
&index);
if (IS_ERR(inode)) {
path = btrfs_alloc_path();
BUG_ON(!path);
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(name_len);
#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
+#include "inode-map.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
iflags |= FS_NOATIME_FL;
if (flags & BTRFS_INODE_DIRSYNC)
iflags |= FS_DIRSYNC_FL;
+ if (flags & BTRFS_INODE_NODATACOW)
+ iflags |= FS_NOCOW_FL;
+
+ if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
+ iflags |= FS_COMPR_FL;
+ else if (flags & BTRFS_INODE_NOCOMPRESS)
+ iflags |= FS_NOCOMP_FL;
return iflags;
}
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
FS_NOATIME_FL | FS_NODUMP_FL | \
FS_SYNC_FL | FS_DIRSYNC_FL | \
- FS_NOCOMP_FL | FS_COMPR_FL | \
- FS_NOCOW_FL | FS_COW_FL))
+ FS_NOCOMP_FL | FS_COMPR_FL |
+ FS_NOCOW_FL))
return -EOPNOTSUPP;
if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
return -EINVAL;
- if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
- return -EINVAL;
-
return 0;
}
ip->flags |= BTRFS_INODE_DIRSYNC;
else
ip->flags &= ~BTRFS_INODE_DIRSYNC;
+ if (flags & FS_NOCOW_FL)
+ ip->flags |= BTRFS_INODE_NODATACOW;
+ else
+ ip->flags &= ~BTRFS_INODE_NODATACOW;
/*
* The COMPRESS flag can only be changed by users, while the NOCOMPRESS
} else if (flags & FS_COMPR_FL) {
ip->flags |= BTRFS_INODE_COMPRESS;
ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ } else {
+ ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
}
- if (flags & FS_NOCOW_FL)
- ip->flags |= BTRFS_INODE_NODATACOW;
- else if (flags & FS_COW_FL)
- ip->flags &= ~BTRFS_INODE_NODATACOW;
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
- ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
- 0, &objectid);
+ ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret) {
dput(parent);
return ret;
BUG_ON(ret);
ret = btrfs_insert_dir_item(trans, root,
- name, namelen, dir->i_ino, &key,
+ name, namelen, dir, &key,
BTRFS_FT_DIR, index);
if (ret)
goto fail;
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
- dir->i_ino, index, name, namelen);
+ btrfs_ino(dir), index, name, namelen);
BUG_ON(ret);
int ret = 0;
u64 flags = 0;
- if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
down_read(&root->fs_info->subvol_sem);
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
- if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
if (copy_from_user(&flags, arg, sizeof(flags)))
}
ret = copy_to_sk(root, path, &key, sk, args->buf,
&sk_offset, &num_found);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret || num_found >= sk->nr_items)
break;
if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
break;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
goto out_dput;
}
- if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
err = -EINVAL;
goto out_dput;
}
}
/* clone data */
- key.objectid = src->i_ino;
+ key.objectid = btrfs_ino(src);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
- key.objectid != src->i_ino)
+ key.objectid != btrfs_ino(src))
break;
if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
datal = btrfs_file_extent_ram_bytes(leaf,
extent);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (key.offset + datal <= off ||
key.offset >= off+len)
goto next;
memcpy(&new_key, &key, sizeof(new_key));
- new_key.objectid = inode->i_ino;
+ new_key.objectid = btrfs_ino(inode);
if (off <= key.offset)
new_key.offset = key.offset + destoff - off;
else
ret = btrfs_inc_extent_ref(trans, root,
disko, diskl, 0,
root->root_key.objectid,
- inode->i_ino,
+ btrfs_ino(inode),
new_key.offset - datao);
BUG_ON(ret);
}
}
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
btrfs_end_transaction(trans, root);
}
next:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
key.offset++;
}
ret = 0;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
out_unlock:
mutex_unlock(&src->i_mutex);
#include "btrfs_inode.h"
#include "async-thread.h"
#include "free-space-cache.h"
+#include "inode-map.h"
/*
* backref_node, mapping_node and tree_block start with this
return 1;
}
+
static int should_ignore_root(struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
*/
return 1;
}
-
/*
* find reloc tree by address of tree root
*/
lower = upper;
upper = NULL;
}
- btrfs_release_path(root, path2);
+ btrfs_release_path(path2);
next:
if (ptr < end) {
ptr += btrfs_extent_inline_ref_size(key.type);
if (ptr >= end)
path1->slots[0]++;
}
- btrfs_release_path(rc->extent_root, path1);
+ btrfs_release_path(path1);
cur->checked = 1;
WARN_ON(exist);
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
- if (objectid < entry->vfs_inode.i_ino)
+ if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
- else if (objectid > entry->vfs_inode.i_ino)
+ else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= entry->vfs_inode.i_ino) {
+ if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
return inode;
}
- objectid = entry->vfs_inode.i_ino + 1;
+ objectid = btrfs_ino(&entry->vfs_inode) + 1;
if (cond_resched_lock(&root->inode_lock))
goto again;
return -ENOMEM;
bytenr -= BTRFS_I(reloc_inode)->index_cnt;
- ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
+ ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
bytenr, 0);
if (ret < 0)
goto out;
if (first) {
inode = find_next_inode(root, key.objectid);
first = 0;
- } else if (inode && inode->i_ino < key.objectid) {
+ } else if (inode && btrfs_ino(inode) < key.objectid) {
btrfs_add_delayed_iput(inode);
inode = find_next_inode(root, key.objectid);
}
- if (inode && inode->i_ino == key.objectid) {
+ if (inode && btrfs_ino(inode) == key.objectid) {
end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset,
btrfs_node_key_to_cpu(path->nodes[level], &key,
path->slots[level]);
- btrfs_release_path(src, path);
+ btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
struct inode *inode = NULL;
u64 objectid;
u64 start, end;
+ u64 ino;
objectid = min_key->objectid;
while (1) {
inode = find_next_inode(root, objectid);
if (!inode)
break;
+ ino = btrfs_ino(inode);
- if (inode->i_ino > max_key->objectid) {
+ if (ino > max_key->objectid) {
iput(inode);
break;
}
- objectid = inode->i_ino + 1;
+ objectid = ino + 1;
if (!S_ISREG(inode->i_mode))
continue;
- if (unlikely(min_key->objectid == inode->i_ino)) {
+ if (unlikely(min_key->objectid == ino)) {
if (min_key->type > BTRFS_EXTENT_DATA_KEY)
continue;
if (min_key->type < BTRFS_EXTENT_DATA_KEY)
start = 0;
}
- if (unlikely(max_key->objectid == inode->i_ino)) {
+ if (unlikely(max_key->objectid == ino)) {
if (max_key->type < BTRFS_EXTENT_DATA_KEY)
continue;
if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
path->locks[upper->level] = 0;
slot = path->slots[upper->level];
- btrfs_release_path(NULL, path);
+ btrfs_release_path(path);
} else {
ret = btrfs_bin_search(upper->eb, key, upper->level,
&slot);
} else {
path->lowest_level = node->level;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret > 0)
ret = 0;
}
struct extent_map *em;
int ret = 0;
- em = alloc_extent_map(GFP_NOFS);
+ em = alloc_extent_map();
if (!em)
return -ENOMEM;
#endif
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
BUG_ON(level == -1);
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- if (!inode || IS_ERR(inode) || is_bad_inode(inode)) {
+ if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
if (inode && !IS_ERR(inode))
iput(inode);
return -ENOENT;
}
path->slots[0]++;
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
if (err)
free_block_list(blocks);
return err;
EXTENT_DIRTY);
if (ret == 0 && start <= key.objectid) {
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
rc->search_start = end + 1;
} else {
rc->search_start = key.objectid + key.offset;
return 0;
}
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
return ret;
}
flags = BTRFS_EXTENT_FLAG_DATA;
if (path_change) {
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
path->search_commit_root = 1;
path->skip_locking = 1;
(flags & BTRFS_EXTENT_FLAG_DATA)) {
ret = add_data_references(rc, &key, path, &blocks);
} else {
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
ret = 0;
}
if (ret < 0) {
}
}
- btrfs_release_path(rc->extent_root, path);
+ btrfs_release_path(path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
GFP_NOFS);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
out:
btrfs_free_path(path);
return ret;
if (IS_ERR(trans))
return ERR_CAST(trans);
- err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out;
INIT_LIST_HEAD(&rc->reloc_roots);
backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree);
- extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
+ extent_io_tree_init(&rc->processed_blocks, NULL);
return rc;
}
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(root->fs_info->tree_root, path);
+ btrfs_release_path(path);
if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
key.type != BTRFS_ROOT_ITEM_KEY)
key.offset--;
}
- btrfs_release_path(root->fs_info->tree_root, path);
+ btrfs_release_path(path);
if (list_empty(&reloc_roots))
goto out;
#include <linux/magic.h>
#include <linux/slab.h>
#include "compat.h"
+#include "delayed-inode.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
* for multiple device setup. Make sure to keep it in sync.
*/
static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+ const char *device_name, void *data)
{
struct block_device *bdev = NULL;
struct super_block *s;
if (error)
return ERR_PTR(error);
- error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
+ error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
if (error)
goto error_free_subvol_name;
if (err)
goto free_extent_io;
- err = btrfs_interface_init();
+ err = btrfs_delayed_inode_init();
if (err)
goto free_extent_map;
+ err = btrfs_interface_init();
+ if (err)
+ goto free_delayed_inode;
+
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
unregister_ioctl:
btrfs_interface_exit();
+free_delayed_inode:
+ btrfs_delayed_inode_exit();
free_extent_map:
extent_map_exit();
free_extent_io:
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
+ btrfs_delayed_inode_exit();
extent_map_exit();
extent_io_exit();
btrfs_interface_exit();
.store = btrfs_root_attr_store,
};
--static struct kobj_type btrfs_root_ktype = {
-- .default_attrs = btrfs_root_attrs,
-- .sysfs_ops = &btrfs_root_attr_ops,
-- .release = btrfs_root_release,
--};
--
--static struct kobj_type btrfs_super_ktype = {
-- .default_attrs = btrfs_super_attrs,
-- .sysfs_ops = &btrfs_super_attr_ops,
-- .release = btrfs_super_release,
--};
--
/* /sys/fs/btrfs/ entry */
static struct kset *btrfs_kset;
- int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
- {
- int error;
- char *name;
- char c;
- int len = strlen(fs->sb->s_id) + 1;
- int i;
-
- name = kmalloc(len, GFP_NOFS);
- if (!name) {
- error = -ENOMEM;
- goto fail;
- }
-
- for (i = 0; i < len; i++) {
- c = fs->sb->s_id[i];
- if (c == '/' || c == '\\')
- c = '!';
- name[i] = c;
- }
- name[len] = '\0';
-
- fs->super_kobj.kset = btrfs_kset;
- error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
- NULL, "%s", name);
- kfree(name);
- if (error)
- goto fail;
-
- return 0;
-
- fail:
- printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
- return error;
- }
-
- int btrfs_sysfs_add_root(struct btrfs_root *root)
- {
- int error;
-
- error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
- &root->fs_info->super_kobj,
- "%s", root->name);
- if (error)
- goto fail;
-
- return 0;
-
- fail:
- printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
- return error;
- }
-
- void btrfs_sysfs_del_root(struct btrfs_root *root)
- {
- kobject_put(&root->root_kobj);
- wait_for_completion(&root->kobj_unregister);
- }
-
- void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
- {
- kobject_put(&fs->super_kobj);
- wait_for_completion(&fs->kobj_unregister);
- }
-
int btrfs_init_sysfs(void)
{
btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
#include "transaction.h"
#include "locking.h"
#include "tree-log.h"
+#include "inode-map.h"
#define BTRFS_ROOT_TRANS_TAG 0
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
- root->fs_info->btree_inode->i_mapping,
- GFP_NOFS);
+ root->fs_info->btree_inode->i_mapping);
spin_lock(&root->fs_info->new_trans_lock);
root->fs_info->running_transaction = cur_trans;
spin_unlock(&root->fs_info->new_trans_lock);
return ret;
}
- #if 0
- /*
- * rate limit against the drop_snapshot code. This helps to slow down new
- * operations if the drop_snapshot code isn't able to keep up.
- */
- static void throttle_on_drops(struct btrfs_root *root)
- {
- struct btrfs_fs_info *info = root->fs_info;
- int harder_count = 0;
-
- harder:
- if (atomic_read(&info->throttles)) {
- DEFINE_WAIT(wait);
- int thr;
- thr = atomic_read(&info->throttle_gen);
-
- do {
- prepare_to_wait(&info->transaction_throttle,
- &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&info->throttles)) {
- finish_wait(&info->transaction_throttle, &wait);
- break;
- }
- schedule();
- finish_wait(&info->transaction_throttle, &wait);
- } while (thr == atomic_read(&info->throttle_gen));
- harder_count++;
-
- if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
- harder_count < 2)
- goto harder;
-
- if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
- harder_count < 10)
- goto harder;
-
- if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
- harder_count < 20)
- goto harder;
- }
- }
- #endif
-
void btrfs_throttle(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->trans_mutex);
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 0, 1);
+ int ret;
+
+ ret = __btrfs_end_transaction(trans, root, 0, 1);
+ if (ret)
+ return ret;
+ return 0;
}
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 1, 1);
+ int ret;
+
+ ret = __btrfs_end_transaction(trans, root, 1, 1);
+ if (ret)
+ return ret;
+ return 0;
}
int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 0, 0);
+ int ret;
+
+ ret = __btrfs_end_transaction(trans, root, 0, 0);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ return __btrfs_end_transaction(trans, root, 1, 1);
}
/*
btrfs_update_reloc_root(trans, root);
btrfs_orphan_commit_root(trans, root);
+ btrfs_save_ino_cache(root, trans);
+
if (root->commit_root != root->node) {
+ mutex_lock(&root->fs_commit_mutex);
switch_commit_root(root);
+ btrfs_unpin_free_ino(root);
+ mutex_unlock(&root->fs_commit_mutex);
+
btrfs_set_root_node(&root->root_item,
root->node);
}
return ret;
}
- #if 0
- /*
- * when dropping snapshots, we generate a ton of delayed refs, and it makes
- * sense not to join the transaction while it is trying to flush the current
- * queue of delayed refs out.
- *
- * This is used by the drop snapshot code only
- */
- static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
- {
- DEFINE_WAIT(wait);
-
- mutex_lock(&info->trans_mutex);
- while (info->running_transaction &&
- info->running_transaction->delayed_refs.flushing) {
- prepare_to_wait(&info->transaction_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&info->trans_mutex);
-
- schedule();
-
- mutex_lock(&info->trans_mutex);
- finish_wait(&info->transaction_wait, &wait);
- }
- mutex_unlock(&info->trans_mutex);
- return 0;
- }
-
- /*
- * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
- * all of them
- */
- int btrfs_drop_dead_root(struct btrfs_root *root)
- {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *tree_root = root->fs_info->tree_root;
- unsigned long nr;
- int ret;
-
- while (1) {
- /*
- * we don't want to jump in and create a bunch of
- * delayed refs if the transaction is starting to close
- */
- wait_transaction_pre_flush(tree_root->fs_info);
- trans = btrfs_start_transaction(tree_root, 1);
-
- /*
- * we've joined a transaction, make sure it isn't
- * closing right now
- */
- if (trans->transaction->delayed_refs.flushing) {
- btrfs_end_transaction(trans, tree_root);
- continue;
- }
-
- ret = btrfs_drop_snapshot(trans, root);
- if (ret != -EAGAIN)
- break;
-
- ret = btrfs_update_root(trans, tree_root,
- &root->root_key,
- &root->root_item);
- if (ret)
- break;
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, tree_root);
- BUG_ON(ret);
-
- btrfs_btree_balance_dirty(tree_root, nr);
- cond_resched();
- }
- BUG_ON(ret);
-
- ret = btrfs_del_root(trans, tree_root, &root->root_key);
- BUG_ON(ret);
-
- nr = trans->blocks_used;
- ret = btrfs_end_transaction(trans, tree_root);
- BUG_ON(ret);
-
- free_extent_buffer(root->node);
- free_extent_buffer(root->commit_root);
- kfree(root);
-
- btrfs_btree_balance_dirty(tree_root, nr);
- return ret;
- }
- #endif
-
/*
* new snapshots need to be created at a very specific time in the
* transaction commit. This does the actual creation
goto fail;
}
- ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
+ ret = btrfs_find_free_objectid(tree_root, &objectid);
if (ret) {
pending->error = ret;
goto fail;
BUG_ON(ret);
ret = btrfs_insert_dir_item(trans, parent_root,
dentry->d_name.name, dentry->d_name.len,
- parent_inode->i_ino, &key,
+ parent_inode, &key,
BTRFS_FT_DIR, index);
BUG_ON(ret);
*/
ret = btrfs_add_root_ref(trans, tree_root, objectid,
parent_root->root_key.objectid,
- parent_inode->i_ino, index,
+ btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
BUG_ON(ret);
dput(parent);
int ret;
list_for_each_entry(pending, head, list) {
+ /*
+ * We must deal with the delayed items before creating
+ * snapshots, or we will create a snapthot with inconsistent
+ * information.
+ */
+ ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
+ BUG_ON(ret);
+
ret = create_pending_snapshot(trans, fs_info, pending);
BUG_ON(ret);
}
BUG_ON(ret);
}
+ ret = btrfs_run_delayed_items(trans, root);
+ BUG_ON(ret);
+
/*
* rename don't use btrfs_join_transaction, so, once we
* set the transaction to blocked above, we aren't going
ret = create_pending_snapshots(trans, root->fs_info);
BUG_ON(ret);
+ ret = btrfs_run_delayed_items(trans, root);
+ BUG_ON(ret);
+
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
BUG_ON(ret);
root = list_entry(list.next, struct btrfs_root, root_list);
list_del(&root->root_list);
+ btrfs_kill_all_delayed_nodes(root);
+
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
btrfs_drop_snapshot(root, NULL, 0);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
- int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root);
- int btrfs_drop_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
int wait_for_unblock);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_throttle(struct btrfs_root *root);
goto insert;
if (item_size == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
dst_copy = kmalloc(item_size, GFP_NOFS);
src_copy = kmalloc(item_size, GFP_NOFS);
if (!dst_copy || !src_copy) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
kfree(dst_copy);
kfree(src_copy);
return -ENOMEM;
* sync
*/
if (ret == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
}
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* try to insert the key into the destination tree */
ret = btrfs_insert_empty_item(trans, root, path,
key, item_size);
}
no_copy:
btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
* file. This must be done before the btrfs_drop_extents run
* so we don't try to drop this extent.
*/
- ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+ ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
start, 0);
if (ret == 0 &&
* we don't have to do anything
*/
if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto out;
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
key->objectid, offset, &ins);
BUG_ON(ret);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (btrfs_file_extent_compression(eb, item)) {
csum_start = ins.objectid;
kfree(sums);
}
} else {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
/* inline extents are easy, we just overwrite them */
return -ENOMEM;
read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode = read_one_inode(root, location.objectid);
BUG_ON(!inode);
goto out;
} else
goto out;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
if (di && !IS_ERR(di)) {
goto out;
match = 1;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return match;
}
read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
/* if we already have a perfect match, we're done */
- if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
+ if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
btrfs_inode_ref_index(eb, ref),
name, namelen)) {
goto out;
if (!backref_in_log(log, key, victim_name,
victim_name_len)) {
btrfs_inc_nlink(inode);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
*/
search_done = 1;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
insert:
/* insert our name */
BUG_ON(ret);
out_nowrite:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
iput(dir);
iput(inode);
return 0;
unsigned long ptr;
unsigned long ptr_end;
int name_len;
+ u64 ino = btrfs_ino(inode);
- key.objectid = inode->i_ino;
+ key.objectid = ino;
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
}
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
- if (key.objectid != inode->i_ino ||
+ if (key.objectid != ino ||
key.type != BTRFS_INODE_REF_KEY)
break;
ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
if (key.offset == 0)
break;
key.offset--;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (nlink != inode->i_nlink) {
inode->i_nlink = nlink;
btrfs_update_inode(trans, root, inode);
if (inode->i_nlink == 0) {
if (S_ISDIR(inode->i_mode)) {
ret = replay_dir_deletes(trans, root, NULL, path,
- inode->i_ino, 1);
+ ino, 1);
BUG_ON(ret);
}
- ret = insert_orphan_item(trans, root, inode->i_ino);
+ ret = insert_orphan_item(trans, root, ino);
BUG_ON(ret);
}
btrfs_free_path(path);
ret = btrfs_del_item(trans, root, path);
BUG_ON(ret);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
inode = read_one_inode(root, key.offset);
BUG_ON(!inode);
*/
key.offset = (u64)-1;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return 0;
}
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (ret == 0) {
btrfs_inc_nlink(inode);
btrfs_update_inode(trans, root, inode);
exists = 1;
else
exists = 0;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (key->type == BTRFS_DIR_ITEM_KEY) {
dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
} else {
BUG();
}
- if (!dst_di || IS_ERR(dst_di)) {
+ if (IS_ERR_OR_NULL(dst_di)) {
/* we need a sequence number to insert, so we only
* do inserts for the BTRFS_DIR_INDEX_KEY types
*/
if (key->type == BTRFS_DIR_INDEX_KEY)
goto insert;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
kfree(name);
iput(dir);
return 0;
insert:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = insert_one_name(trans, root, path, key->objectid, key->offset,
name, name_len, log_type, &log_key);
*end_ret = found_end;
ret = 0;
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || IS_ERR(log_di)) {
+ if (IS_ERR_OR_NULL(log_di)) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
- btrfs_release_path(root, path);
- btrfs_release_path(log, log_path);
+ btrfs_release_path(path);
+ btrfs_release_path(log_path);
inode = read_one_inode(root, location.objectid);
BUG_ON(!inode);
ret = 0;
goto out;
}
- btrfs_release_path(log, log_path);
+ btrfs_release_path(log_path);
kfree(name);
ptr = (unsigned long)(di + 1);
}
ret = 0;
out:
- btrfs_release_path(root, path);
- btrfs_release_path(log, log_path);
+ btrfs_release_path(path);
+ btrfs_release_path(log_path);
return ret;
}
break;
dir_key.offset = found_key.offset + 1;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (range_end == (u64)-1)
break;
range_start = range_end + 1;
if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
key_type = BTRFS_DIR_LOG_INDEX_KEY;
dir_key.type = BTRFS_DIR_INDEX_KEY;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
goto again;
}
out:
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
btrfs_free_path(log_path);
iput(dir);
return ret;
int ret;
int err = 0;
int bytes_del = 0;
+ u64 dir_ino = btrfs_ino(dir);
if (BTRFS_I(dir)->logged_trans < trans->transid)
return 0;
goto out_unlock;
}
- di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
+ di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
err = PTR_ERR(di);
bytes_del += name_len;
BUG_ON(ret);
}
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
- di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
+ di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
index, name, name_len, -1);
if (IS_ERR(di)) {
err = PTR_ERR(di);
if (bytes_del) {
struct btrfs_key key;
- key.objectid = dir->i_ino;
+ key.objectid = dir_ino;
key.offset = 0;
key.type = BTRFS_INODE_ITEM_KEY;
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
if (ret < 0) {
btrfs_mark_buffer_dirty(path->nodes[0]);
} else
ret = 0;
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
}
fail:
btrfs_free_path(path);
log = root->log_root;
mutex_lock(&BTRFS_I(inode)->log_mutex);
- ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
+ ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
dirid, &index);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
if (ret == -ENOSPC) {
struct btrfs_dir_log_item);
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
return 0;
}
int nritems;
u64 first_offset = min_offset;
u64 last_offset = (u64)-1;
+ u64 ino = btrfs_ino(inode);
log = root->log_root;
- max_key.objectid = inode->i_ino;
+ max_key.objectid = ino;
max_key.offset = (u64)-1;
max_key.type = key_type;
- min_key.objectid = inode->i_ino;
+ min_key.objectid = ino;
min_key.type = key_type;
min_key.offset = min_offset;
* we didn't find anything from this transaction, see if there
* is anything at all
*/
- if (ret != 0 || min_key.objectid != inode->i_ino ||
- min_key.type != key_type) {
- min_key.objectid = inode->i_ino;
+ if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
+ min_key.objectid = ino;
min_key.type = key_type;
min_key.offset = (u64)-1;
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
if (ret < 0) {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
return ret;
}
- ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+ ret = btrfs_previous_item(root, path, ino, key_type);
/* if ret == 0 there are items for this type,
* create a range to tell us the last key of this type.
}
/* go backward to find any previous key */
- ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+ ret = btrfs_previous_item(root, path, ino, key_type);
if (ret == 0) {
struct btrfs_key tmp;
btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
}
}
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* find the first key from this transaction again */
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
for (i = path->slots[0]; i < nritems; i++) {
btrfs_item_key_to_cpu(src, &min_key, i);
- if (min_key.objectid != inode->i_ino ||
- min_key.type != key_type)
+ if (min_key.objectid != ino || min_key.type != key_type)
goto done;
ret = overwrite_item(trans, log, dst_path, src, i,
&min_key);
goto done;
}
btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
- if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
+ if (tmp.objectid != ino || tmp.type != key_type) {
last_offset = (u64)-1;
goto done;
}
}
}
done:
- btrfs_release_path(root, path);
- btrfs_release_path(log, dst_path);
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
if (err == 0) {
*last_offset_ret = last_offset;
* is valid
*/
ret = insert_dir_log_key(trans, log, path, key_type,
- inode->i_ino, first_offset,
- last_offset);
+ ino, first_offset, last_offset);
if (ret)
err = ret;
}
ret = btrfs_del_item(trans, log, path);
BUG_ON(ret);
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
}
- btrfs_release_path(log, path);
+ btrfs_release_path(path);
return ret;
}
}
btrfs_mark_buffer_dirty(dst_path->nodes[0]);
- btrfs_release_path(log, dst_path);
+ btrfs_release_path(dst_path);
kfree(ins_data);
/*
int nritems;
int ins_start_slot = 0;
int ins_nr;
+ u64 ino = btrfs_ino(inode);
log = root->log_root;
return -ENOMEM;
}
- min_key.objectid = inode->i_ino;
+ min_key.objectid = ino;
min_key.type = BTRFS_INODE_ITEM_KEY;
min_key.offset = 0;
- max_key.objectid = inode->i_ino;
+ max_key.objectid = ino;
/* today the code can only do partial logging of directories */
if (!S_ISDIR(inode->i_mode))
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+ if (ret) {
+ btrfs_free_path(path);
+ btrfs_free_path(dst_path);
+ return ret;
+ }
+
mutex_lock(&BTRFS_I(inode)->log_mutex);
/*
if (inode_only == LOG_INODE_EXISTS)
max_key_type = BTRFS_XATTR_ITEM_KEY;
- ret = drop_objectid_items(trans, log, path,
- inode->i_ino, max_key_type);
+ ret = drop_objectid_items(trans, log, path, ino, max_key_type);
} else {
ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
}
break;
again:
/* note, ins_nr might be > 0 here, cleanup outside the loop */
- if (min_key.objectid != inode->i_ino)
+ if (min_key.objectid != ino)
break;
if (min_key.type > max_key.type)
break;
}
ins_nr = 0;
}
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (min_key.offset < (u64)-1)
min_key.offset++;
}
WARN_ON(ins_nr);
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
- btrfs_release_path(root, path);
- btrfs_release_path(log, dst_path);
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
ret = log_directory_changes(trans, root, inode, path, dst_path);
if (ret) {
err = ret;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
- btrfs_release_path(log_root_tree, path);
+ btrfs_release_path(path);
if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
break;
if (found_key.offset == 0)
break;
}
- btrfs_release_path(log_root_tree, path);
+ btrfs_release_path(path);
/* step one is to pin it all, step two is to replay just inodes */
if (wc.pin) {
return -ENOMEM;
/* lookup the xattr by name */
- di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
+ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name,
strlen(name), 0);
if (!di) {
ret = -ENODATA;
return -ENOMEM;
/* first lets see if we already have this xattr */
- di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
strlen(name), -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
BUG_ON(ret);
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
/* if we don't have a value then we are removing the xattr */
if (!value)
goto out;
} else {
- btrfs_release_path(root, path);
+ btrfs_release_path(path);
if (flags & XATTR_REPLACE) {
/* we couldn't find the attr to replace */
}
/* ok we have to create a completely new xattr */
- ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
+ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
name, name_len, value, size);
BUG_ON(ret);
out:
* NOTE: we set key.offset = 0; because we want to start with the
* first xattr that we find and walk forward
*/
- key.objectid = inode->i_ino;
+ key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
key.offset = 0;