1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
9 #include <linux/pagemap.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/mutex.h>
13 #include <linux/wait.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/xarray.h>
17 #include <linux/refcount.h>
18 #include <uapi/linux/btrfs_tree.h>
21 #include "accessors.h"
22 #include "extent-io-tree.h"
25 struct btrfs_block_rsv;
26 struct btrfs_trans_handle;
27 struct btrfs_block_group;
29 /* Read ahead values for struct btrfs_path.reada */
35 * Similar to READA_FORWARD but unlike it:
37 * 1) It will trigger readahead even for leaves that are not close to
39 * 2) It also triggers readahead for nodes;
40 * 3) During a search, even when a node or leaf is already in memory, it
41 * will still trigger readahead for other nodes and leaves that follow
44 * This is meant to be used only when we know we are iterating over the
45 * entire tree or a very large part of it.
51 * btrfs_paths remember the path taken from the root down to the leaf.
52 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
53 * to any other levels that are present.
55 * The slots array records the index of the item or block pointer
56 * used while walking the tree.
59 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
60 int slots[BTRFS_MAX_LEVEL];
61 /* if there is real range locking, this locks field will change */
62 u8 locks[BTRFS_MAX_LEVEL];
64 /* keep some upper locks as we walk down */
68 * set by btrfs_split_item, tells search_slot to keep all locks
69 * and to force calls to keep space in the nodes
71 unsigned int search_for_split:1;
72 unsigned int keep_locks:1;
73 unsigned int skip_locking:1;
74 unsigned int search_commit_root:1;
75 unsigned int need_commit_sem:1;
76 unsigned int skip_release_on_error:1;
78 * Indicate that new item (btrfs_search_slot) is extending already
79 * existing item and ins_len contains only the data size and not item
80 * header (ie. sizeof(struct btrfs_item) is not included).
82 unsigned int search_for_extension:1;
83 /* Stop search if any locks need to be taken (for read) */
84 unsigned int nowait:1;
88 * The state of btrfs root
92 * btrfs_record_root_in_trans is a multi-step process, and it can race
93 * with the balancing code. But the race is very small, and only the
94 * first time the root is added to each transaction. So IN_TRANS_SETUP
95 * is used to tell us when more checks are required
97 BTRFS_ROOT_IN_TRANS_SETUP,
100 * Set if tree blocks of this root can be shared by other roots.
101 * Only subvolume trees and their reloc trees have this bit set.
102 * Conflicts with TRACK_DIRTY bit.
104 * This affects two things:
106 * - How balance works
107 * For shareable roots, we need to use reloc tree and do path
108 * replacement for balance, and need various pre/post hooks for
109 * snapshot creation to handle them.
111 * While for non-shareable trees, we just simply do a tree search
114 * - How dirty roots are tracked
115 * For shareable roots, btrfs_record_root_in_trans() is needed to
116 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
117 * don't need to set this manually.
119 BTRFS_ROOT_SHAREABLE,
120 BTRFS_ROOT_TRACK_DIRTY,
122 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
123 BTRFS_ROOT_DEFRAG_RUNNING,
124 BTRFS_ROOT_FORCE_COW,
125 BTRFS_ROOT_MULTI_LOG_TASKS,
130 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
132 * Set for the subvolume tree owning the reloc tree.
134 BTRFS_ROOT_DEAD_RELOC_TREE,
135 /* Mark dead root stored on device whose cleanup needs to be resumed */
136 BTRFS_ROOT_DEAD_TREE,
137 /* The root has a log tree. Used for subvolume roots and the tree root. */
138 BTRFS_ROOT_HAS_LOG_TREE,
139 /* Qgroup flushing is in progress */
140 BTRFS_ROOT_QGROUP_FLUSHING,
141 /* We started the orphan cleanup for this root. */
142 BTRFS_ROOT_ORPHAN_CLEANUP,
143 /* This root has a drop operation that was started previously. */
144 BTRFS_ROOT_UNFINISHED_DROP,
145 /* This reloc root needs to have its buffers lockdep class reset. */
146 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
150 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
151 * code. For detail check comment in fs/btrfs/qgroup.c.
153 struct btrfs_qgroup_swapped_blocks {
155 /* RM_EMPTY_ROOT() of above blocks[] */
157 struct rb_root blocks[BTRFS_MAX_LEVEL];
161 * in ram representation of the tree. extent_root is used for all allocations
162 * and for the extent tree extent_root root.
165 struct rb_node rb_node;
167 struct extent_buffer *node;
169 struct extent_buffer *commit_root;
170 struct btrfs_root *log_root;
171 struct btrfs_root *reloc_root;
174 struct btrfs_root_item root_item;
175 struct btrfs_key root_key;
176 struct btrfs_fs_info *fs_info;
177 struct extent_io_tree dirty_log_pages;
179 struct mutex objectid_mutex;
181 spinlock_t accounting_lock;
182 struct btrfs_block_rsv *block_rsv;
184 struct mutex log_mutex;
185 wait_queue_head_t log_writer_wait;
186 wait_queue_head_t log_commit_wait[2];
187 struct list_head log_ctxs[2];
188 /* Used only for log trees of subvolumes, not for the log root tree */
189 atomic_t log_writers;
190 atomic_t log_commit[2];
191 /* Used only for log trees of subvolumes, not for the log root tree */
194 * Protected by the 'log_mutex' lock but can be read without holding
195 * that lock to avoid unnecessary lock contention, in which case it
196 * should be read using btrfs_get_root_log_transid() except if it's a
197 * log tree in which case it can be directly accessed. Updates to this
198 * field should always use btrfs_set_root_log_transid(), except for log
199 * trees where the field can be updated directly.
202 /* No matter the commit succeeds or not*/
203 int log_transid_committed;
205 * Just be updated when the commit succeeds. Use
206 * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
207 * to access this field.
216 struct btrfs_key defrag_progress;
217 struct btrfs_key defrag_max;
219 /* The dirty list is only used by non-shareable roots */
220 struct list_head dirty_list;
222 struct list_head root_list;
225 * Xarray that keeps track of in-memory inodes, protected by the lock
228 struct xarray inodes;
231 * Xarray that keeps track of delayed nodes of every inode, protected
234 struct xarray delayed_nodes;
236 * right now this just gets used so that a root has its own devid
237 * for stat. It may be used for more later
241 spinlock_t root_item_lock;
244 struct mutex delalloc_mutex;
245 spinlock_t delalloc_lock;
247 * all of the inodes that have delalloc bytes. It is possible for
248 * this list to be empty even when there is still dirty data=ordered
249 * extents waiting to finish IO.
251 struct list_head delalloc_inodes;
252 struct list_head delalloc_root;
253 u64 nr_delalloc_inodes;
255 struct mutex ordered_extent_mutex;
257 * this is used by the balancing code to wait for all the pending
260 spinlock_t ordered_extent_lock;
263 * all of the data=ordered extents pending writeback
264 * these can span multiple transactions and basically include
265 * every dirty data page that isn't from nodatacow
267 struct list_head ordered_extents;
268 struct list_head ordered_root;
269 u64 nr_ordered_extents;
272 * Not empty if this subvolume root has gone through tree block swap
275 * Will be used by reloc_control::dirty_subvol_roots.
277 struct list_head reloc_dirty_list;
280 * Number of currently running SEND ioctls to prevent
281 * manipulation with the read-only status via SUBVOL_SETFLAGS
283 int send_in_progress;
285 * Number of currently running deduplication operations that have a
286 * destination inode belonging to this root. Protected by the lock
289 int dedupe_in_progress;
290 /* For exclusion of snapshot creation and nocow writes */
291 struct btrfs_drew_lock snapshot_lock;
293 atomic_t snapshot_force_cow;
295 /* For qgroup metadata reserved space */
296 spinlock_t qgroup_meta_rsv_lock;
297 u64 qgroup_meta_rsv_pertrans;
298 u64 qgroup_meta_rsv_prealloc;
299 wait_queue_head_t qgroup_flush_wait;
301 /* Number of active swapfiles */
302 atomic_t nr_swapfiles;
304 /* Record pairs of swapped blocks for qgroup */
305 struct btrfs_qgroup_swapped_blocks swapped_blocks;
307 /* Used only by log trees, when logging csum items */
308 struct extent_io_tree log_csum_range;
310 /* Used in simple quotas, track root during relocation. */
311 u64 relocation_src_root;
313 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
317 #ifdef CONFIG_BTRFS_DEBUG
318 struct list_head leak_list;
322 static inline bool btrfs_root_readonly(const struct btrfs_root *root)
324 /* Byte-swap the constant at compile time, root_item::flags is LE */
325 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
328 static inline bool btrfs_root_dead(const struct btrfs_root *root)
330 /* Byte-swap the constant at compile time, root_item::flags is LE */
331 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
334 static inline u64 btrfs_root_id(const struct btrfs_root *root)
336 return root->root_key.objectid;
339 static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
341 return READ_ONCE(root->log_transid);
344 static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
346 WRITE_ONCE(root->log_transid, log_transid);
349 static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
351 return READ_ONCE(root->last_log_commit);
354 static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
356 WRITE_ONCE(root->last_log_commit, commit_id);
359 static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root)
361 return READ_ONCE(root->last_trans);
364 static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid)
366 WRITE_ONCE(root->last_trans, transid);
370 * Structure that conveys information about an extent that is going to replace
371 * all the extents in a file range.
373 struct btrfs_replace_extent_info {
379 /* Pointer to a file extent item of type regular or prealloc. */
382 * Set to true when attempting to replace a file range with a new extent
383 * described by this structure, set to false when attempting to clone an
384 * existing extent into a file range.
387 /* Indicate if we should update the inode's mtime and ctime. */
389 /* Meaningful only if is_new_extent is true. */
392 * Meaningful only if is_new_extent is true.
393 * Used to track how many extent items we have already inserted in a
394 * subvolume tree that refer to the extent described by this structure,
395 * so that we know when to create a new delayed ref or update an existing
401 /* Arguments for btrfs_drop_extents() */
402 struct btrfs_drop_extents_args {
403 /* Input parameters */
406 * If NULL, btrfs_drop_extents() will allocate and free its own path.
407 * If 'replace_extent' is true, this must not be NULL. Also the path
408 * is always released except if 'replace_extent' is true and
409 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
410 * the path is kept locked.
412 struct btrfs_path *path;
413 /* Start offset of the range to drop extents from */
415 /* End (exclusive, last byte + 1) of the range to drop extents from */
417 /* If true drop all the extent maps in the range */
420 * If true it means we want to insert a new extent after dropping all
421 * the extents in the range. If this is true, the 'extent_item_size'
422 * parameter must be set as well and the 'extent_inserted' field will
423 * be set to true by btrfs_drop_extents() if it could insert the new
425 * Note: when this is set to true the path must not be NULL.
429 * Used if 'replace_extent' is true. Size of the file extent item to
430 * insert after dropping all existing extents in the range
432 u32 extent_item_size;
434 /* Output parameters */
437 * Set to the minimum between the input parameter 'end' and the end
438 * (exclusive, last byte + 1) of the last dropped extent. This is always
439 * set even if btrfs_drop_extents() returns an error.
443 * The number of allocated bytes found in the range. This can be smaller
444 * than the range's length when there are holes in the range.
448 * Only set if 'replace_extent' is true. Set to true if we were able
449 * to insert a replacement extent after dropping all extents in the
450 * range, otherwise set to false by btrfs_drop_extents().
451 * Also, if btrfs_drop_extents() has set this to true it means it
452 * returned with the path locked, otherwise if it has set this to
453 * false it has returned with the path released.
455 bool extent_inserted;
458 struct btrfs_file_private {
461 struct extent_state *llseek_cached_state;
462 bool fsync_skip_inode_lock;
465 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
467 return info->nodesize - sizeof(struct btrfs_header);
470 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
472 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
475 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
477 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
480 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
482 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
485 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
486 ((bytes) >> (fs_info)->sectorsize_bits)
488 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
490 return mapping_gfp_constraint(mapping, ~__GFP_FS);
493 void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
494 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
495 u64 num_bytes, u64 *actual_bytes);
496 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
499 int __init btrfs_ctree_init(void);
500 void __cold btrfs_ctree_exit(void);
502 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
503 const struct btrfs_key *key, int *slot);
505 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
507 #ifdef __LITTLE_ENDIAN
510 * Compare two keys, on little-endian the disk order is same as CPU order and
511 * we can avoid the conversion.
513 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
514 const struct btrfs_key *k2)
516 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
518 return btrfs_comp_cpu_keys(k1, k2);
523 /* Compare two keys in a memcmp fashion. */
524 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
525 const struct btrfs_key *k2)
529 btrfs_disk_key_to_cpu(&k1, disk);
531 return btrfs_comp_cpu_keys(&k1, k2);
536 int btrfs_previous_item(struct btrfs_root *root,
537 struct btrfs_path *path, u64 min_objectid,
539 int btrfs_previous_extent_item(struct btrfs_root *root,
540 struct btrfs_path *path, u64 min_objectid);
541 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
542 struct btrfs_path *path,
543 const struct btrfs_key *new_key);
544 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
545 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
546 struct btrfs_key *key, int lowest_level,
548 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
549 struct btrfs_path *path,
551 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
554 int btrfs_cow_block(struct btrfs_trans_handle *trans,
555 struct btrfs_root *root, struct extent_buffer *buf,
556 struct extent_buffer *parent, int parent_slot,
557 struct extent_buffer **cow_ret,
558 enum btrfs_lock_nesting nest);
559 int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
560 struct btrfs_root *root,
561 struct extent_buffer *buf,
562 struct extent_buffer *parent, int parent_slot,
563 struct extent_buffer **cow_ret,
564 u64 search_start, u64 empty_size,
565 enum btrfs_lock_nesting nest);
566 int btrfs_copy_root(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root,
568 struct extent_buffer *buf,
569 struct extent_buffer **cow_ret, u64 new_root_objectid);
570 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
571 struct btrfs_root *root,
572 struct extent_buffer *buf);
573 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
574 struct btrfs_path *path, int level, int slot);
575 void btrfs_extend_item(struct btrfs_trans_handle *trans,
576 struct btrfs_path *path, u32 data_size);
577 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
578 struct btrfs_path *path, u32 new_size, int from_end);
579 int btrfs_split_item(struct btrfs_trans_handle *trans,
580 struct btrfs_root *root,
581 struct btrfs_path *path,
582 const struct btrfs_key *new_key,
583 unsigned long split_offset);
584 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
585 struct btrfs_root *root,
586 struct btrfs_path *path,
587 const struct btrfs_key *new_key);
588 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
589 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
590 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
591 const struct btrfs_key *key, struct btrfs_path *p,
592 int ins_len, int cow);
593 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
594 struct btrfs_path *p, u64 time_seq);
595 int btrfs_search_slot_for_read(struct btrfs_root *root,
596 const struct btrfs_key *key,
597 struct btrfs_path *p, int find_higher,
599 void btrfs_release_path(struct btrfs_path *p);
600 struct btrfs_path *btrfs_alloc_path(void);
601 void btrfs_free_path(struct btrfs_path *p);
603 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
604 struct btrfs_path *path, int slot, int nr);
605 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
606 struct btrfs_root *root,
607 struct btrfs_path *path)
609 return btrfs_del_items(trans, root, path, path->slots[0], 1);
613 * Describes a batch of items to insert in a btree. This is used by
614 * btrfs_insert_empty_items().
616 struct btrfs_item_batch {
618 * Pointer to an array containing the keys of the items to insert (in
621 const struct btrfs_key *keys;
622 /* Pointer to an array containing the data size for each item to insert. */
623 const u32 *data_sizes;
625 * The sum of data sizes for all items. The caller can compute this while
626 * setting up the data_sizes array, so it ends up being more efficient
627 * than having btrfs_insert_empty_items() or setup_item_for_insert()
628 * doing it, as it would avoid an extra loop over a potentially large
629 * array, and in the case of setup_item_for_insert(), we would be doing
630 * it while holding a write lock on a leaf and often on upper level nodes
631 * too, unnecessarily increasing the size of a critical section.
634 /* Size of the keys and data_sizes arrays (number of items in the batch). */
638 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
639 struct btrfs_root *root,
640 struct btrfs_path *path,
641 const struct btrfs_key *key,
643 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
644 const struct btrfs_key *key, void *data, u32 data_size);
645 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
646 struct btrfs_root *root,
647 struct btrfs_path *path,
648 const struct btrfs_item_batch *batch);
650 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
651 struct btrfs_root *root,
652 struct btrfs_path *path,
653 const struct btrfs_key *key,
656 struct btrfs_item_batch batch;
659 batch.data_sizes = &data_size;
660 batch.total_data_size = data_size;
663 return btrfs_insert_empty_items(trans, root, path, &batch);
666 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
669 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
670 struct btrfs_path *path);
672 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
673 struct btrfs_path *path);
676 * Search in @root for a given @key, and store the slot found in @found_key.
678 * @root: The root node of the tree.
679 * @key: The key we are looking for.
680 * @found_key: Will hold the found item.
681 * @path: Holds the current slot/leaf.
682 * @iter_ret: Contains the value returned from btrfs_search_slot or
683 * btrfs_get_next_valid_item, whichever was executed last.
685 * The @iter_ret is an output variable that will contain the return value of
686 * btrfs_search_slot, if it encountered an error, or the value returned from
687 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
688 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
690 * It's recommended to use a separate variable for iter_ret and then use it to
691 * set the function return value so there's no confusion of the 0/1/errno
692 * values stemming from btrfs_search_slot.
694 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
695 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
697 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
701 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
704 * Search the tree again to find a leaf with greater keys.
706 * Returns 0 if it found something or 1 if there are no greater leaves.
707 * Returns < 0 on error.
709 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
711 return btrfs_next_old_leaf(root, path, 0);
714 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
716 return btrfs_next_old_item(root, p, 0);
718 int btrfs_leaf_free_space(const struct extent_buffer *leaf);
720 static inline int is_fstree(u64 rootid)
722 if (rootid == BTRFS_FS_TREE_OBJECTID ||
723 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
724 !btrfs_qgroup_level(rootid)))
729 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
731 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
734 u16 btrfs_csum_type_size(u16 type);
735 int btrfs_super_csum_size(const struct btrfs_super_block *s);
736 const char *btrfs_super_csum_name(u16 csum_type);
737 const char *btrfs_super_csum_driver(u16 csum_type);
738 size_t __attribute_const__ btrfs_get_num_csums(void);
741 * We use page status Private2 to indicate there is an ordered extent with
744 * Rename the Private2 accessors to Ordered, to improve readability.
746 #define PageOrdered(page) PagePrivate2(page)
747 #define SetPageOrdered(page) SetPagePrivate2(page)
748 #define ClearPageOrdered(page) ClearPagePrivate2(page)
749 #define folio_test_ordered(folio) folio_test_private_2(folio)
750 #define folio_set_ordered(folio) folio_set_private_2(folio)
751 #define folio_clear_ordered(folio) folio_clear_private_2(folio)