1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
10 #include <linux/sched/signal.h>
11 #include <linux/highmem.h>
13 #include <linux/rwsem.h>
14 #include <linux/semaphore.h>
15 #include <linux/completion.h>
16 #include <linux/backing-dev.h>
17 #include <linux/wait.h>
18 #include <linux/slab.h>
19 #include <trace/events/btrfs.h>
20 #include <asm/unaligned.h>
21 #include <linux/pagemap.h>
22 #include <linux/btrfs.h>
23 #include <linux/btrfs_tree.h>
24 #include <linux/workqueue.h>
25 #include <linux/security.h>
26 #include <linux/sizes.h>
27 #include <linux/dynamic_debug.h>
28 #include <linux/refcount.h>
29 #include <linux/crc32c.h>
30 #include <linux/iomap.h>
31 #include "extent-io-tree.h"
32 #include "extent_io.h"
33 #include "extent_map.h"
34 #include "async-thread.h"
35 #include "block-rsv.h"
39 struct btrfs_trans_handle;
40 struct btrfs_transaction;
41 struct btrfs_pending_snapshot;
42 struct btrfs_delayed_ref_root;
43 struct btrfs_space_info;
44 struct btrfs_block_group;
45 struct btrfs_ordered_sum;
48 struct btrfs_ioctl_encoded_io_args;
50 struct btrfs_fs_devices;
51 struct btrfs_balance_control;
52 struct btrfs_delayed_root;
55 #define BTRFS_OLDEST_GENERATION 0ULL
57 #define BTRFS_EMPTY_DIR_SIZE 0
59 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
61 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
63 static inline unsigned long btrfs_chunk_item_size(int num_stripes)
65 BUG_ON(num_stripes == 0);
66 return sizeof(struct btrfs_chunk) +
67 sizeof(struct btrfs_stripe) * (num_stripes - 1);
70 #define BTRFS_SUPER_INFO_OFFSET SZ_64K
71 #define BTRFS_SUPER_INFO_SIZE 4096
72 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
75 * The reserved space at the beginning of each device.
76 * It covers the primary super block and leaves space for potential use by other
77 * tools like bootloaders or to lower potential damage of accidental overwrite.
79 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
81 /* Read ahead values for struct btrfs_path.reada */
87 * Similar to READA_FORWARD but unlike it:
89 * 1) It will trigger readahead even for leaves that are not close to
91 * 2) It also triggers readahead for nodes;
92 * 3) During a search, even when a node or leaf is already in memory, it
93 * will still trigger readahead for other nodes and leaves that follow
96 * This is meant to be used only when we know we are iterating over the
97 * entire tree or a very large part of it.
103 * btrfs_paths remember the path taken from the root down to the leaf.
104 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
105 * to any other levels that are present.
107 * The slots array records the index of the item or block pointer
108 * used while walking the tree.
111 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
112 int slots[BTRFS_MAX_LEVEL];
113 /* if there is real range locking, this locks field will change */
114 u8 locks[BTRFS_MAX_LEVEL];
116 /* keep some upper locks as we walk down */
120 * set by btrfs_split_item, tells search_slot to keep all locks
121 * and to force calls to keep space in the nodes
123 unsigned int search_for_split:1;
124 unsigned int keep_locks:1;
125 unsigned int skip_locking:1;
126 unsigned int search_commit_root:1;
127 unsigned int need_commit_sem:1;
128 unsigned int skip_release_on_error:1;
130 * Indicate that new item (btrfs_search_slot) is extending already
131 * existing item and ins_len contains only the data size and not item
132 * header (ie. sizeof(struct btrfs_item) is not included).
134 unsigned int search_for_extension:1;
135 /* Stop search if any locks need to be taken (for read) */
136 unsigned int nowait:1;
139 struct btrfs_dev_replace {
140 u64 replace_state; /* see #define above */
141 time64_t time_started; /* seconds since 1-Jan-1970 */
142 time64_t time_stopped; /* seconds since 1-Jan-1970 */
143 atomic64_t num_write_errors;
144 atomic64_t num_uncorrectable_read_errors;
147 u64 committed_cursor_left;
148 u64 cursor_left_last_write_of_item;
151 u64 cont_reading_from_srcdev_mode; /* see #define above */
154 int item_needs_writeback;
155 struct btrfs_device *srcdev;
156 struct btrfs_device *tgtdev;
158 struct mutex lock_finishing_cancel_unmount;
159 struct rw_semaphore rwsem;
161 struct btrfs_scrub_progress scrub_progress;
163 struct percpu_counter bio_counter;
164 wait_queue_head_t replace_wait;
168 * free clusters are used to claim free space in relatively large chunks,
169 * allowing us to do less seeky writes. They are used for all metadata
170 * allocations. In ssd_spread mode they are also used for data allocations.
172 struct btrfs_free_cluster {
174 spinlock_t refill_lock;
177 /* largest extent in this cluster */
180 /* first extent starting offset */
183 /* We did a full search and couldn't create a cluster */
186 struct btrfs_block_group *block_group;
188 * when a cluster is allocated from a block group, we put the
189 * cluster onto a list in the block group so that it can
190 * be freed before the block group is freed.
192 struct list_head block_group_list;
195 /* Discard control. */
197 * Async discard uses multiple lists to differentiate the discard filter
198 * parameters. Index 0 is for completely free block groups where we need to
199 * ensure the entire block group is trimmed without being lossy. Indices
200 * afterwards represent monotonically decreasing discard filter sizes to
201 * prioritize what should be discarded next.
203 #define BTRFS_NR_DISCARD_LISTS 3
204 #define BTRFS_DISCARD_INDEX_UNUSED 0
205 #define BTRFS_DISCARD_INDEX_START 1
207 struct btrfs_discard_ctl {
208 struct workqueue_struct *discard_workers;
209 struct delayed_work work;
211 struct btrfs_block_group *block_group;
212 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
214 u64 prev_discard_time;
215 atomic_t discardable_extents;
216 atomic64_t discardable_bytes;
217 u64 max_discard_size;
221 u64 discard_extent_bytes;
222 u64 discard_bitmap_bytes;
223 atomic64_t discard_bytes_saved;
227 * Exclusive operations (device replace, resize, device add/remove, balance)
229 enum btrfs_exclusive_operation {
231 BTRFS_EXCLOP_BALANCE_PAUSED,
232 BTRFS_EXCLOP_BALANCE,
233 BTRFS_EXCLOP_DEV_ADD,
234 BTRFS_EXCLOP_DEV_REMOVE,
235 BTRFS_EXCLOP_DEV_REPLACE,
237 BTRFS_EXCLOP_SWAP_ACTIVATE,
240 /* Store data about transaction commits, exported via sysfs. */
241 struct btrfs_commit_stats {
242 /* Total number of commits */
244 /* The maximum commit duration so far in ns */
246 /* The last commit duration in ns */
248 /* The total commit duration in ns */
249 u64 total_commit_dur;
252 struct btrfs_fs_info {
253 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
255 struct btrfs_root *tree_root;
256 struct btrfs_root *chunk_root;
257 struct btrfs_root *dev_root;
258 struct btrfs_root *fs_root;
259 struct btrfs_root *quota_root;
260 struct btrfs_root *uuid_root;
261 struct btrfs_root *data_reloc_root;
262 struct btrfs_root *block_group_root;
264 /* the log root tree is a directory of all the other log roots */
265 struct btrfs_root *log_root_tree;
267 /* The tree that holds the global roots (csum, extent, etc) */
268 rwlock_t global_root_lock;
269 struct rb_root global_root_tree;
271 spinlock_t fs_roots_radix_lock;
272 struct radix_tree_root fs_roots_radix;
274 /* block group cache stuff */
275 rwlock_t block_group_cache_lock;
276 struct rb_root_cached block_group_cache_tree;
278 /* keep track of unallocated space */
279 atomic64_t free_chunk_space;
281 /* Track ranges which are used by log trees blocks/logged data extents */
282 struct extent_io_tree excluded_extents;
284 /* logical->physical extent mapping */
285 struct extent_map_tree mapping_tree;
288 * block reservation for extent, checksum, root tree and
289 * delayed dir index item
291 struct btrfs_block_rsv global_block_rsv;
292 /* block reservation for metadata operations */
293 struct btrfs_block_rsv trans_block_rsv;
294 /* block reservation for chunk tree */
295 struct btrfs_block_rsv chunk_block_rsv;
296 /* block reservation for delayed operations */
297 struct btrfs_block_rsv delayed_block_rsv;
298 /* block reservation for delayed refs */
299 struct btrfs_block_rsv delayed_refs_rsv;
301 struct btrfs_block_rsv empty_block_rsv;
304 u64 last_trans_committed;
306 * Generation of the last transaction used for block group relocation
307 * since the filesystem was last mounted (or 0 if none happened yet).
308 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
310 u64 last_reloc_trans;
311 u64 avg_delayed_ref_runtime;
314 * this is updated to the current trans every time a full commit
315 * is required instead of the faster short fsync log commits
317 u64 last_trans_log_full_commit;
318 unsigned long mount_opt;
320 unsigned long compress_type:4;
321 unsigned int compress_level;
324 * It is a suggestive number, the read side is safe even it gets a
325 * wrong number because we will write out the data into a regular
326 * extent. The write side(mount/remount) is under ->s_umount lock,
327 * so it is also safe.
331 struct btrfs_transaction *running_transaction;
332 wait_queue_head_t transaction_throttle;
333 wait_queue_head_t transaction_wait;
334 wait_queue_head_t transaction_blocked_wait;
335 wait_queue_head_t async_submit_wait;
338 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
339 * when they are updated.
341 * Because we do not clear the flags for ever, so we needn't use
342 * the lock on the read side.
344 * We also needn't use the lock when we mount the fs, because
345 * there is no other task which will update the flag.
347 spinlock_t super_lock;
348 struct btrfs_super_block *super_copy;
349 struct btrfs_super_block *super_for_commit;
350 struct super_block *sb;
351 struct inode *btree_inode;
352 struct mutex tree_log_mutex;
353 struct mutex transaction_kthread_mutex;
354 struct mutex cleaner_mutex;
355 struct mutex chunk_mutex;
358 * this is taken to make sure we don't set block groups ro after
359 * the free space cache has been allocated on them
361 struct mutex ro_block_group_mutex;
363 /* this is used during read/modify/write to make sure
364 * no two ios are trying to mod the same stripe at the same
367 struct btrfs_stripe_hash_table *stripe_hash_table;
370 * this protects the ordered operations list only while we are
371 * processing all of the entries on it. This way we make
372 * sure the commit code doesn't find the list temporarily empty
373 * because another function happens to be doing non-waiting preflush
374 * before jumping into the main commit.
376 struct mutex ordered_operations_mutex;
378 struct rw_semaphore commit_root_sem;
380 struct rw_semaphore cleanup_work_sem;
382 struct rw_semaphore subvol_sem;
384 spinlock_t trans_lock;
386 * the reloc mutex goes with the trans lock, it is taken
387 * during commit to protect us from the relocation code
389 struct mutex reloc_mutex;
391 struct list_head trans_list;
392 struct list_head dead_roots;
393 struct list_head caching_block_groups;
395 spinlock_t delayed_iput_lock;
396 struct list_head delayed_iputs;
397 atomic_t nr_delayed_iputs;
398 wait_queue_head_t delayed_iputs_wait;
400 atomic64_t tree_mod_seq;
402 /* this protects tree_mod_log and tree_mod_seq_list */
403 rwlock_t tree_mod_log_lock;
404 struct rb_root tree_mod_log;
405 struct list_head tree_mod_seq_list;
407 atomic_t async_delalloc_pages;
410 * this is used to protect the following list -- ordered_roots.
412 spinlock_t ordered_root_lock;
415 * all fs/file tree roots in which there are data=ordered extents
416 * pending writeback are added into this list.
418 * these can span multiple transactions and basically include
419 * every dirty data page that isn't from nodatacow
421 struct list_head ordered_roots;
423 struct mutex delalloc_root_mutex;
424 spinlock_t delalloc_root_lock;
425 /* all fs/file tree roots that have delalloc inodes. */
426 struct list_head delalloc_roots;
429 * there is a pool of worker threads for checksumming during writes
430 * and a pool for checksumming after reads. This is because readers
431 * can run with FS locks held, and the writers may be waiting for
432 * those locks. We don't want ordering in the pending list to cause
433 * deadlocks, and so the two are serviced separately.
435 * A third pool does submit_bio to avoid deadlocking with the other
438 struct btrfs_workqueue *workers;
439 struct btrfs_workqueue *hipri_workers;
440 struct btrfs_workqueue *delalloc_workers;
441 struct btrfs_workqueue *flush_workers;
442 struct workqueue_struct *endio_workers;
443 struct workqueue_struct *endio_meta_workers;
444 struct workqueue_struct *endio_raid56_workers;
445 struct workqueue_struct *rmw_workers;
446 struct workqueue_struct *compressed_write_workers;
447 struct btrfs_workqueue *endio_write_workers;
448 struct btrfs_workqueue *endio_freespace_worker;
449 struct btrfs_workqueue *caching_workers;
452 * fixup workers take dirty pages that didn't properly go through
453 * the cow mechanism and make them safe to write. It happens
454 * for the sys_munmap function call path
456 struct btrfs_workqueue *fixup_workers;
457 struct btrfs_workqueue *delayed_workers;
459 struct task_struct *transaction_kthread;
460 struct task_struct *cleaner_kthread;
461 u32 thread_pool_size;
463 struct kobject *space_info_kobj;
464 struct kobject *qgroups_kobj;
465 struct kobject *discard_kobj;
467 /* used to keep from writing metadata until there is a nice batch */
468 struct percpu_counter dirty_metadata_bytes;
469 struct percpu_counter delalloc_bytes;
470 struct percpu_counter ordered_bytes;
471 s32 dirty_metadata_batch;
474 struct list_head dirty_cowonly_roots;
476 struct btrfs_fs_devices *fs_devices;
479 * The space_info list is effectively read only after initial
480 * setup. It is populated at mount time and cleaned up after
481 * all block groups are removed. RCU is used to protect it.
483 struct list_head space_info;
485 struct btrfs_space_info *data_sinfo;
487 struct reloc_control *reloc_ctl;
489 /* data_alloc_cluster is only used in ssd_spread mode */
490 struct btrfs_free_cluster data_alloc_cluster;
492 /* all metadata allocations go through this cluster */
493 struct btrfs_free_cluster meta_alloc_cluster;
495 /* auto defrag inodes go here */
496 spinlock_t defrag_inodes_lock;
497 struct rb_root defrag_inodes;
498 atomic_t defrag_running;
500 /* Used to protect avail_{data, metadata, system}_alloc_bits */
501 seqlock_t profiles_lock;
503 * these three are in extended format (availability of single
504 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
505 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits)
507 u64 avail_data_alloc_bits;
508 u64 avail_metadata_alloc_bits;
509 u64 avail_system_alloc_bits;
511 /* restriper state */
512 spinlock_t balance_lock;
513 struct mutex balance_mutex;
514 atomic_t balance_pause_req;
515 atomic_t balance_cancel_req;
516 struct btrfs_balance_control *balance_ctl;
517 wait_queue_head_t balance_wait_q;
519 /* Cancellation requests for chunk relocation */
520 atomic_t reloc_cancel_req;
522 u32 data_chunk_allocations;
527 /* private scrub information */
528 struct mutex scrub_lock;
529 atomic_t scrubs_running;
530 atomic_t scrub_pause_req;
531 atomic_t scrubs_paused;
532 atomic_t scrub_cancel_req;
533 wait_queue_head_t scrub_pause_wait;
535 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
538 refcount_t scrub_workers_refcnt;
539 struct workqueue_struct *scrub_workers;
540 struct workqueue_struct *scrub_wr_completion_workers;
541 struct workqueue_struct *scrub_parity_workers;
542 struct btrfs_subpage_info *subpage_info;
544 struct btrfs_discard_ctl discard_ctl;
546 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
547 u32 check_integrity_print_mask;
549 /* is qgroup tracking in a consistent state? */
552 /* holds configuration and tracking. Protected by qgroup_lock */
553 struct rb_root qgroup_tree;
554 spinlock_t qgroup_lock;
557 * used to avoid frequently calling ulist_alloc()/ulist_free()
558 * when doing qgroup accounting, it must be protected by qgroup_lock.
560 struct ulist *qgroup_ulist;
563 * Protect user change for quota operations. If a transaction is needed,
564 * it must be started before locking this lock.
566 struct mutex qgroup_ioctl_lock;
568 /* list of dirty qgroups to be written at next commit */
569 struct list_head dirty_qgroups;
571 /* used by qgroup for an efficient tree traversal */
574 /* qgroup rescan items */
575 struct mutex qgroup_rescan_lock; /* protects the progress item */
576 struct btrfs_key qgroup_rescan_progress;
577 struct btrfs_workqueue *qgroup_rescan_workers;
578 struct completion qgroup_rescan_completion;
579 struct btrfs_work qgroup_rescan_work;
580 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
581 u8 qgroup_drop_subtree_thres;
583 /* filesystem state */
584 unsigned long fs_state;
586 struct btrfs_delayed_root *delayed_root;
588 /* Extent buffer radix tree */
589 spinlock_t buffer_lock;
590 /* Entries are eb->start / sectorsize */
591 struct radix_tree_root buffer_radix;
593 /* next backup root to be overwritten */
594 int backup_root_index;
596 /* device replace state */
597 struct btrfs_dev_replace dev_replace;
599 struct semaphore uuid_tree_rescan_sem;
601 /* Used to reclaim the metadata space in the background. */
602 struct work_struct async_reclaim_work;
603 struct work_struct async_data_reclaim_work;
604 struct work_struct preempt_reclaim_work;
606 /* Reclaim partially filled block groups in the background */
607 struct work_struct reclaim_bgs_work;
608 struct list_head reclaim_bgs;
609 int bg_reclaim_threshold;
611 spinlock_t unused_bgs_lock;
612 struct list_head unused_bgs;
613 struct mutex unused_bg_unpin_mutex;
614 /* Protect block groups that are going to be deleted */
615 struct mutex reclaim_bgs_lock;
617 /* Cached block sizes */
620 /* ilog2 of sectorsize, use to avoid 64bit division */
627 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
628 * filesystem, on zoned it depends on the device constraints.
632 /* Block groups and devices containing active swapfiles. */
633 spinlock_t swapfile_pins_lock;
634 struct rb_root swapfile_pins;
636 struct crypto_shash *csum_shash;
638 /* Type of exclusive operation running, protected by super_lock */
639 enum btrfs_exclusive_operation exclusive_operation;
642 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
643 * if the mode is enabled
647 /* Max size to emit ZONE_APPEND write command */
648 u64 max_zone_append_size;
649 struct mutex zoned_meta_io_lock;
650 spinlock_t treelog_bg_lock;
654 * Start of the dedicated data relocation block group, protected by
655 * relocation_bg_lock.
657 spinlock_t relocation_bg_lock;
659 struct mutex zoned_data_reloc_io_lock;
663 spinlock_t zone_active_bgs_lock;
664 struct list_head zone_active_bgs;
666 /* Updates are not protected by any lock */
667 struct btrfs_commit_stats commit_stats;
670 * Last generation where we dropped a non-relocation root.
671 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
672 * to change it and to read it, respectively.
674 u64 last_root_drop_gen;
677 * Annotations for transaction events (structures are empty when
678 * compiled without lockdep).
680 struct lockdep_map btrfs_trans_num_writers_map;
681 struct lockdep_map btrfs_trans_num_extwriters_map;
682 struct lockdep_map btrfs_state_change_map[4];
683 struct lockdep_map btrfs_trans_pending_ordered_map;
684 struct lockdep_map btrfs_ordered_extent_map;
686 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
687 spinlock_t ref_verify_lock;
688 struct rb_root block_tree;
691 #ifdef CONFIG_BTRFS_DEBUG
692 struct kobject *debug_kobj;
693 struct list_head allocated_roots;
695 spinlock_t eb_leak_lock;
696 struct list_head allocated_ebs;
700 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
703 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
706 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
708 return READ_ONCE(fs_info->last_root_drop_gen);
711 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
713 return sb->s_fs_info;
717 * Take the number of bytes to be checksummed and figure out how many leaves
718 * it would require to store the csums for that many bytes.
720 static inline u64 btrfs_csum_bytes_to_leaves(
721 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
723 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
725 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
729 * Use this if we would be adding new items, as we could split nodes as we cow
732 static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
735 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
739 * Doing a truncate or a modification won't result in new nodes or leaves, just
740 * what we need for COW.
742 static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
745 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
748 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
749 sizeof(struct btrfs_item))
751 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
753 return fs_info->zone_size > 0;
757 * Count how many fs_info->max_extent_size cover the @size
759 static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
761 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
763 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
766 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
769 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
770 enum btrfs_exclusive_operation type);
771 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
772 enum btrfs_exclusive_operation type);
773 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
774 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
775 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
776 enum btrfs_exclusive_operation op);
779 * The state of btrfs root
783 * btrfs_record_root_in_trans is a multi-step process, and it can race
784 * with the balancing code. But the race is very small, and only the
785 * first time the root is added to each transaction. So IN_TRANS_SETUP
786 * is used to tell us when more checks are required
788 BTRFS_ROOT_IN_TRANS_SETUP,
791 * Set if tree blocks of this root can be shared by other roots.
792 * Only subvolume trees and their reloc trees have this bit set.
793 * Conflicts with TRACK_DIRTY bit.
795 * This affects two things:
797 * - How balance works
798 * For shareable roots, we need to use reloc tree and do path
799 * replacement for balance, and need various pre/post hooks for
800 * snapshot creation to handle them.
802 * While for non-shareable trees, we just simply do a tree search
805 * - How dirty roots are tracked
806 * For shareable roots, btrfs_record_root_in_trans() is needed to
807 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
808 * don't need to set this manually.
810 BTRFS_ROOT_SHAREABLE,
811 BTRFS_ROOT_TRACK_DIRTY,
813 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
814 BTRFS_ROOT_DEFRAG_RUNNING,
815 BTRFS_ROOT_FORCE_COW,
816 BTRFS_ROOT_MULTI_LOG_TASKS,
821 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
823 * Set for the subvolume tree owning the reloc tree.
825 BTRFS_ROOT_DEAD_RELOC_TREE,
826 /* Mark dead root stored on device whose cleanup needs to be resumed */
827 BTRFS_ROOT_DEAD_TREE,
828 /* The root has a log tree. Used for subvolume roots and the tree root. */
829 BTRFS_ROOT_HAS_LOG_TREE,
830 /* Qgroup flushing is in progress */
831 BTRFS_ROOT_QGROUP_FLUSHING,
832 /* We started the orphan cleanup for this root. */
833 BTRFS_ROOT_ORPHAN_CLEANUP,
834 /* This root has a drop operation that was started previously. */
835 BTRFS_ROOT_UNFINISHED_DROP,
836 /* This reloc root needs to have its buffers lockdep class reset. */
837 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
840 enum btrfs_lockdep_trans_states {
841 BTRFS_LOCKDEP_TRANS_COMMIT_START,
842 BTRFS_LOCKDEP_TRANS_UNBLOCKED,
843 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
844 BTRFS_LOCKDEP_TRANS_COMPLETED,
848 * Lockdep annotation for wait events.
850 * @owner: The struct where the lockdep map is defined
851 * @lock: The lockdep map corresponding to a wait event
853 * This macro is used to annotate a wait event. In this case a thread acquires
854 * the lockdep map as writer (exclusive lock) because it has to block until all
855 * the threads that hold the lock as readers signal the condition for the wait
856 * event and release their locks.
858 #define btrfs_might_wait_for_event(owner, lock) \
860 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
861 rwsem_release(&owner->lock##_map, _THIS_IP_); \
865 * Protection for the resource/condition of a wait event.
867 * @owner: The struct where the lockdep map is defined
868 * @lock: The lockdep map corresponding to a wait event
870 * Many threads can modify the condition for the wait event at the same time
871 * and signal the threads that block on the wait event. The threads that modify
872 * the condition and do the signaling acquire the lock as readers (shared
875 #define btrfs_lockdep_acquire(owner, lock) \
876 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
879 * Used after signaling the condition for a wait event to release the lockdep
880 * map held by a reader thread.
882 #define btrfs_lockdep_release(owner, lock) \
883 rwsem_release(&owner->lock##_map, _THIS_IP_)
886 * Macros for the transaction states wait events, similar to the generic wait
889 #define btrfs_might_wait_for_state(owner, i) \
891 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
892 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
895 #define btrfs_trans_state_lockdep_acquire(owner, i) \
896 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
898 #define btrfs_trans_state_lockdep_release(owner, i) \
899 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
901 /* Initialization of the lockdep map */
902 #define btrfs_lockdep_init_map(owner, lock) \
904 static struct lock_class_key lock##_key; \
905 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
908 /* Initialization of the transaction states lockdep maps. */
909 #define btrfs_state_lockdep_init_map(owner, lock, state) \
911 static struct lock_class_key lock##_key; \
912 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
917 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
918 * code. For detail check comment in fs/btrfs/qgroup.c.
920 struct btrfs_qgroup_swapped_blocks {
922 /* RM_EMPTY_ROOT() of above blocks[] */
924 struct rb_root blocks[BTRFS_MAX_LEVEL];
928 * in ram representation of the tree. extent_root is used for all allocations
929 * and for the extent tree extent_root root.
932 struct rb_node rb_node;
934 struct extent_buffer *node;
936 struct extent_buffer *commit_root;
937 struct btrfs_root *log_root;
938 struct btrfs_root *reloc_root;
941 struct btrfs_root_item root_item;
942 struct btrfs_key root_key;
943 struct btrfs_fs_info *fs_info;
944 struct extent_io_tree dirty_log_pages;
946 struct mutex objectid_mutex;
948 spinlock_t accounting_lock;
949 struct btrfs_block_rsv *block_rsv;
951 struct mutex log_mutex;
952 wait_queue_head_t log_writer_wait;
953 wait_queue_head_t log_commit_wait[2];
954 struct list_head log_ctxs[2];
955 /* Used only for log trees of subvolumes, not for the log root tree */
956 atomic_t log_writers;
957 atomic_t log_commit[2];
958 /* Used only for log trees of subvolumes, not for the log root tree */
961 /* No matter the commit succeeds or not*/
962 int log_transid_committed;
963 /* Just be updated when the commit succeeds. */
973 struct btrfs_key defrag_progress;
974 struct btrfs_key defrag_max;
976 /* The dirty list is only used by non-shareable roots */
977 struct list_head dirty_list;
979 struct list_head root_list;
981 spinlock_t log_extents_lock[2];
982 struct list_head logged_list[2];
984 spinlock_t inode_lock;
985 /* red-black tree that keeps track of in-memory inodes */
986 struct rb_root inode_tree;
989 * radix tree that keeps track of delayed nodes of every inode,
990 * protected by inode_lock
992 struct radix_tree_root delayed_nodes_tree;
994 * right now this just gets used so that a root has its own devid
995 * for stat. It may be used for more later
999 spinlock_t root_item_lock;
1002 struct mutex delalloc_mutex;
1003 spinlock_t delalloc_lock;
1005 * all of the inodes that have delalloc bytes. It is possible for
1006 * this list to be empty even when there is still dirty data=ordered
1007 * extents waiting to finish IO.
1009 struct list_head delalloc_inodes;
1010 struct list_head delalloc_root;
1011 u64 nr_delalloc_inodes;
1013 struct mutex ordered_extent_mutex;
1015 * this is used by the balancing code to wait for all the pending
1018 spinlock_t ordered_extent_lock;
1021 * all of the data=ordered extents pending writeback
1022 * these can span multiple transactions and basically include
1023 * every dirty data page that isn't from nodatacow
1025 struct list_head ordered_extents;
1026 struct list_head ordered_root;
1027 u64 nr_ordered_extents;
1030 * Not empty if this subvolume root has gone through tree block swap
1033 * Will be used by reloc_control::dirty_subvol_roots.
1035 struct list_head reloc_dirty_list;
1038 * Number of currently running SEND ioctls to prevent
1039 * manipulation with the read-only status via SUBVOL_SETFLAGS
1041 int send_in_progress;
1043 * Number of currently running deduplication operations that have a
1044 * destination inode belonging to this root. Protected by the lock
1047 int dedupe_in_progress;
1048 /* For exclusion of snapshot creation and nocow writes */
1049 struct btrfs_drew_lock snapshot_lock;
1051 atomic_t snapshot_force_cow;
1053 /* For qgroup metadata reserved space */
1054 spinlock_t qgroup_meta_rsv_lock;
1055 u64 qgroup_meta_rsv_pertrans;
1056 u64 qgroup_meta_rsv_prealloc;
1057 wait_queue_head_t qgroup_flush_wait;
1059 /* Number of active swapfiles */
1060 atomic_t nr_swapfiles;
1062 /* Record pairs of swapped blocks for qgroup */
1063 struct btrfs_qgroup_swapped_blocks swapped_blocks;
1065 /* Used only by log trees, when logging csum items */
1066 struct extent_io_tree log_csum_range;
1068 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1072 #ifdef CONFIG_BTRFS_DEBUG
1073 struct list_head leak_list;
1078 * Structure that conveys information about an extent that is going to replace
1079 * all the extents in a file range.
1081 struct btrfs_replace_extent_info {
1087 /* Pointer to a file extent item of type regular or prealloc. */
1090 * Set to true when attempting to replace a file range with a new extent
1091 * described by this structure, set to false when attempting to clone an
1092 * existing extent into a file range.
1095 /* Indicate if we should update the inode's mtime and ctime. */
1097 /* Meaningful only if is_new_extent is true. */
1098 int qgroup_reserved;
1100 * Meaningful only if is_new_extent is true.
1101 * Used to track how many extent items we have already inserted in a
1102 * subvolume tree that refer to the extent described by this structure,
1103 * so that we know when to create a new delayed ref or update an existing
1109 /* Arguments for btrfs_drop_extents() */
1110 struct btrfs_drop_extents_args {
1111 /* Input parameters */
1114 * If NULL, btrfs_drop_extents() will allocate and free its own path.
1115 * If 'replace_extent' is true, this must not be NULL. Also the path
1116 * is always released except if 'replace_extent' is true and
1117 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
1118 * the path is kept locked.
1120 struct btrfs_path *path;
1121 /* Start offset of the range to drop extents from */
1123 /* End (exclusive, last byte + 1) of the range to drop extents from */
1125 /* If true drop all the extent maps in the range */
1128 * If true it means we want to insert a new extent after dropping all
1129 * the extents in the range. If this is true, the 'extent_item_size'
1130 * parameter must be set as well and the 'extent_inserted' field will
1131 * be set to true by btrfs_drop_extents() if it could insert the new
1133 * Note: when this is set to true the path must not be NULL.
1135 bool replace_extent;
1137 * Used if 'replace_extent' is true. Size of the file extent item to
1138 * insert after dropping all existing extents in the range
1140 u32 extent_item_size;
1142 /* Output parameters */
1145 * Set to the minimum between the input parameter 'end' and the end
1146 * (exclusive, last byte + 1) of the last dropped extent. This is always
1147 * set even if btrfs_drop_extents() returns an error.
1151 * The number of allocated bytes found in the range. This can be smaller
1152 * than the range's length when there are holes in the range.
1156 * Only set if 'replace_extent' is true. Set to true if we were able
1157 * to insert a replacement extent after dropping all extents in the
1158 * range, otherwise set to false by btrfs_drop_extents().
1159 * Also, if btrfs_drop_extents() has set this to true it means it
1160 * returned with the path locked, otherwise if it has set this to
1161 * false it has returned with the path released.
1163 bool extent_inserted;
1166 struct btrfs_file_private {
1171 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
1174 return info->nodesize - sizeof(struct btrfs_header);
1177 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
1179 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
1182 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
1184 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
1187 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \
1188 (offsetof(struct btrfs_file_extent_item, disk_bytenr))
1189 static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
1191 return BTRFS_MAX_ITEM_SIZE(info) -
1192 BTRFS_FILE_EXTENT_INLINE_DATA_START;
1195 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
1197 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
1200 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
1201 ((bytes) >> (fs_info)->sectorsize_bits)
1203 static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
1205 return crc32c(crc, address, length);
1208 static inline void btrfs_crc32c_final(u32 crc, u8 *result)
1210 put_unaligned_le32(~crc, result);
1213 static inline u64 btrfs_name_hash(const char *name, int len)
1215 return crc32c((u32)~1, name, len);
1219 * Figure the key offset of an extended inode ref
1221 static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
1224 return (u64) crc32c(parent_objectid, name, len);
1227 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
1229 return mapping_gfp_constraint(mapping, ~__GFP_FS);
1234 enum btrfs_inline_ref_type {
1235 BTRFS_REF_TYPE_INVALID,
1236 BTRFS_REF_TYPE_BLOCK,
1237 BTRFS_REF_TYPE_DATA,
1241 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1242 struct btrfs_extent_inline_ref *iref,
1243 enum btrfs_inline_ref_type is_data);
1244 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
1247 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
1248 u64 start, u64 num_bytes);
1249 void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
1250 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1251 unsigned long count);
1252 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1253 struct btrfs_delayed_ref_root *delayed_refs,
1254 struct btrfs_delayed_ref_head *head);
1255 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
1256 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
1257 struct btrfs_fs_info *fs_info, u64 bytenr,
1258 u64 offset, int metadata, u64 *refs, u64 *flags);
1259 int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
1261 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
1262 u64 bytenr, u64 num_bytes);
1263 int btrfs_exclude_logged_extents(struct extent_buffer *eb);
1264 int btrfs_cross_ref_exist(struct btrfs_root *root,
1265 u64 objectid, u64 offset, u64 bytenr, bool strict,
1266 struct btrfs_path *path);
1267 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
1268 struct btrfs_root *root,
1269 u64 parent, u64 root_objectid,
1270 const struct btrfs_disk_key *key,
1271 int level, u64 hint,
1273 enum btrfs_lock_nesting nest);
1274 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
1276 struct extent_buffer *buf,
1277 u64 parent, int last_ref);
1278 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
1279 struct btrfs_root *root, u64 owner,
1280 u64 offset, u64 ram_bytes,
1281 struct btrfs_key *ins);
1282 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
1283 u64 root_objectid, u64 owner, u64 offset,
1284 struct btrfs_key *ins);
1285 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
1286 u64 min_alloc_size, u64 empty_size, u64 hint_byte,
1287 struct btrfs_key *ins, int is_data, int delalloc);
1288 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1289 struct extent_buffer *buf, int full_backref);
1290 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1291 struct extent_buffer *buf, int full_backref);
1292 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
1293 struct extent_buffer *eb, u64 flags, int level);
1294 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
1296 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
1297 u64 start, u64 len, int delalloc);
1298 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
1300 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
1301 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1302 struct btrfs_ref *generic_ref);
1304 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
1306 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
1307 struct btrfs_block_rsv *rsv,
1308 int nitems, bool use_global_rsv);
1309 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
1310 struct btrfs_block_rsv *rsv);
1311 void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
1313 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
1314 u64 disk_num_bytes, bool noflush);
1315 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
1316 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
1317 u64 start, u64 end);
1318 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1319 u64 num_bytes, u64 *actual_bytes);
1320 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
1322 int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
1323 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
1324 struct btrfs_fs_info *fs_info);
1325 int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
1326 void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
1327 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
1330 int __init btrfs_ctree_init(void);
1331 void __cold btrfs_ctree_exit(void);
1332 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1334 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
1335 int btrfs_previous_item(struct btrfs_root *root,
1336 struct btrfs_path *path, u64 min_objectid,
1338 int btrfs_previous_extent_item(struct btrfs_root *root,
1339 struct btrfs_path *path, u64 min_objectid);
1340 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
1341 struct btrfs_path *path,
1342 const struct btrfs_key *new_key);
1343 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
1344 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
1345 struct btrfs_key *key, int lowest_level,
1347 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
1348 struct btrfs_path *path,
1350 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1353 int btrfs_cow_block(struct btrfs_trans_handle *trans,
1354 struct btrfs_root *root, struct extent_buffer *buf,
1355 struct extent_buffer *parent, int parent_slot,
1356 struct extent_buffer **cow_ret,
1357 enum btrfs_lock_nesting nest);
1358 int btrfs_copy_root(struct btrfs_trans_handle *trans,
1359 struct btrfs_root *root,
1360 struct extent_buffer *buf,
1361 struct extent_buffer **cow_ret, u64 new_root_objectid);
1362 int btrfs_block_can_be_shared(struct btrfs_root *root,
1363 struct extent_buffer *buf);
1364 void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
1365 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
1366 int btrfs_split_item(struct btrfs_trans_handle *trans,
1367 struct btrfs_root *root,
1368 struct btrfs_path *path,
1369 const struct btrfs_key *new_key,
1370 unsigned long split_offset);
1371 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
1372 struct btrfs_root *root,
1373 struct btrfs_path *path,
1374 const struct btrfs_key *new_key);
1375 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1376 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
1377 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1378 const struct btrfs_key *key, struct btrfs_path *p,
1379 int ins_len, int cow);
1380 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
1381 struct btrfs_path *p, u64 time_seq);
1382 int btrfs_search_slot_for_read(struct btrfs_root *root,
1383 const struct btrfs_key *key,
1384 struct btrfs_path *p, int find_higher,
1386 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1387 struct btrfs_root *root, struct extent_buffer *parent,
1388 int start_slot, u64 *last_ret,
1389 struct btrfs_key *progress);
1390 void btrfs_release_path(struct btrfs_path *p);
1391 struct btrfs_path *btrfs_alloc_path(void);
1392 void btrfs_free_path(struct btrfs_path *p);
1394 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1395 struct btrfs_path *path, int slot, int nr);
1396 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
1397 struct btrfs_root *root,
1398 struct btrfs_path *path)
1400 return btrfs_del_items(trans, root, path, path->slots[0], 1);
1404 * Describes a batch of items to insert in a btree. This is used by
1405 * btrfs_insert_empty_items().
1407 struct btrfs_item_batch {
1409 * Pointer to an array containing the keys of the items to insert (in
1412 const struct btrfs_key *keys;
1413 /* Pointer to an array containing the data size for each item to insert. */
1414 const u32 *data_sizes;
1416 * The sum of data sizes for all items. The caller can compute this while
1417 * setting up the data_sizes array, so it ends up being more efficient
1418 * than having btrfs_insert_empty_items() or setup_item_for_insert()
1419 * doing it, as it would avoid an extra loop over a potentially large
1420 * array, and in the case of setup_item_for_insert(), we would be doing
1421 * it while holding a write lock on a leaf and often on upper level nodes
1422 * too, unnecessarily increasing the size of a critical section.
1424 u32 total_data_size;
1425 /* Size of the keys and data_sizes arrays (number of items in the batch). */
1429 void btrfs_setup_item_for_insert(struct btrfs_root *root,
1430 struct btrfs_path *path,
1431 const struct btrfs_key *key,
1433 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1434 const struct btrfs_key *key, void *data, u32 data_size);
1435 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
1436 struct btrfs_root *root,
1437 struct btrfs_path *path,
1438 const struct btrfs_item_batch *batch);
1440 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
1441 struct btrfs_root *root,
1442 struct btrfs_path *path,
1443 const struct btrfs_key *key,
1446 struct btrfs_item_batch batch;
1449 batch.data_sizes = &data_size;
1450 batch.total_data_size = data_size;
1453 return btrfs_insert_empty_items(trans, root, path, &batch);
1456 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
1457 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
1460 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
1461 struct btrfs_path *path);
1463 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
1464 struct btrfs_path *path);
1467 * Search in @root for a given @key, and store the slot found in @found_key.
1469 * @root: The root node of the tree.
1470 * @key: The key we are looking for.
1471 * @found_key: Will hold the found item.
1472 * @path: Holds the current slot/leaf.
1473 * @iter_ret: Contains the value returned from btrfs_search_slot or
1474 * btrfs_get_next_valid_item, whichever was executed last.
1476 * The @iter_ret is an output variable that will contain the return value of
1477 * btrfs_search_slot, if it encountered an error, or the value returned from
1478 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
1479 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
1481 * It's recommended to use a separate variable for iter_ret and then use it to
1482 * set the function return value so there's no confusion of the 0/1/errno
1483 * values stemming from btrfs_search_slot.
1485 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
1486 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
1487 (iter_ret) >= 0 && \
1488 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
1489 (path)->slots[0]++ \
1492 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
1495 * Search the tree again to find a leaf with greater keys.
1497 * Returns 0 if it found something or 1 if there are no greater leaves.
1498 * Returns < 0 on error.
1500 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
1502 return btrfs_next_old_leaf(root, path, 0);
1505 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
1507 return btrfs_next_old_item(root, p, 0);
1509 int btrfs_leaf_free_space(struct extent_buffer *leaf);
1510 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
1512 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
1513 struct btrfs_root *root,
1514 struct extent_buffer *node,
1515 struct extent_buffer *parent);
1518 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
1519 u64 ref_id, u64 dirid, u64 sequence,
1520 const struct qstr *name);
1521 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
1522 u64 ref_id, u64 dirid, u64 *sequence,
1523 const struct qstr *name);
1524 int btrfs_del_root(struct btrfs_trans_handle *trans,
1525 const struct btrfs_key *key);
1526 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1527 const struct btrfs_key *key,
1528 struct btrfs_root_item *item);
1529 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
1530 struct btrfs_root *root,
1531 struct btrfs_key *key,
1532 struct btrfs_root_item *item);
1533 int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
1534 struct btrfs_path *path, struct btrfs_root_item *root_item,
1535 struct btrfs_key *root_key);
1536 int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
1537 void btrfs_set_root_node(struct btrfs_root_item *item,
1538 struct extent_buffer *node);
1539 void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
1540 void btrfs_update_root_times(struct btrfs_trans_handle *trans,
1541 struct btrfs_root *root);
1544 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
1546 int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
1548 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
1551 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1552 const struct qstr *name);
1553 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
1554 const struct qstr *name, struct btrfs_inode *dir,
1555 struct btrfs_key *location, u8 type, u64 index);
1556 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1557 struct btrfs_root *root,
1558 struct btrfs_path *path, u64 dir,
1559 const struct qstr *name, int mod);
1560 struct btrfs_dir_item *
1561 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
1562 struct btrfs_root *root,
1563 struct btrfs_path *path, u64 dir,
1564 u64 index, const struct qstr *name, int mod);
1565 struct btrfs_dir_item *
1566 btrfs_search_dir_index_item(struct btrfs_root *root,
1567 struct btrfs_path *path, u64 dirid,
1568 const struct qstr *name);
1569 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
1570 struct btrfs_root *root,
1571 struct btrfs_path *path,
1572 struct btrfs_dir_item *di);
1573 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
1574 struct btrfs_root *root,
1575 struct btrfs_path *path, u64 objectid,
1576 const char *name, u16 name_len,
1577 const void *data, u16 data_len);
1578 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1579 struct btrfs_root *root,
1580 struct btrfs_path *path, u64 dir,
1581 const char *name, u16 name_len,
1583 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
1584 struct btrfs_path *path,
1589 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
1590 struct btrfs_root *root, u64 offset);
1591 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
1592 struct btrfs_root *root, u64 offset);
1593 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
1596 int btrfs_del_csums(struct btrfs_trans_handle *trans,
1597 struct btrfs_root *root, u64 bytenr, u64 len);
1598 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst);
1599 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
1600 struct btrfs_root *root, u64 objectid, u64 pos,
1602 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
1603 struct btrfs_root *root,
1604 struct btrfs_path *path, u64 objectid,
1605 u64 bytenr, int mod);
1606 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
1607 struct btrfs_root *root,
1608 struct btrfs_ordered_sum *sums);
1609 blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
1610 u64 offset, bool one_ordered);
1611 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
1612 struct list_head *list, int search_commit,
1614 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
1615 const struct btrfs_path *path,
1616 struct btrfs_file_extent_item *fi,
1617 const bool new_inline,
1618 struct extent_map *em);
1619 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
1621 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
1623 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size);
1624 u64 btrfs_file_extent_end(const struct btrfs_path *path);
1627 void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num);
1628 void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
1629 int mirror_num, enum btrfs_compression_type compress_type);
1630 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
1631 u32 pgoff, u8 *csum, const u8 * const csum_expected);
1632 int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
1633 u32 bio_offset, struct page *page, u32 pgoff);
1634 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
1635 u32 bio_offset, struct page *page,
1636 u64 start, u64 end);
1637 int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
1638 u32 bio_offset, struct page *page, u32 pgoff);
1639 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
1640 u64 *orig_start, u64 *orig_block_len,
1641 u64 *ram_bytes, bool nowait, bool strict);
1643 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1644 struct btrfs_inode *inode);
1645 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
1646 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
1647 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
1648 struct btrfs_inode *dir, struct btrfs_inode *inode,
1649 const struct qstr *name);
1650 int btrfs_add_link(struct btrfs_trans_handle *trans,
1651 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
1652 const struct qstr *name, int add_backref, u64 index);
1653 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
1654 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
1657 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
1658 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
1659 bool in_reclaim_context);
1660 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
1661 unsigned int extra_bits,
1662 struct extent_state **cached_state);
1663 struct btrfs_new_inode_args {
1666 struct dentry *dentry;
1667 struct inode *inode;
1672 * Output from btrfs_new_inode_prepare(), input to
1673 * btrfs_create_new_inode().
1675 struct posix_acl *default_acl;
1676 struct posix_acl *acl;
1678 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
1679 unsigned int *trans_num_items);
1680 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
1681 struct btrfs_new_inode_args *args);
1682 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
1683 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
1685 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
1687 void btrfs_clear_delalloc_extent(struct inode *inode,
1688 struct extent_state *state, u32 bits);
1689 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
1690 struct extent_state *other);
1691 void btrfs_split_delalloc_extent(struct inode *inode,
1692 struct extent_state *orig, u64 split);
1693 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
1694 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
1695 void btrfs_evict_inode(struct inode *inode);
1696 struct inode *btrfs_alloc_inode(struct super_block *sb);
1697 void btrfs_destroy_inode(struct inode *inode);
1698 void btrfs_free_inode(struct inode *inode);
1699 int btrfs_drop_inode(struct inode *inode);
1700 int __init btrfs_init_cachep(void);
1701 void __cold btrfs_destroy_cachep(void);
1702 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
1703 struct btrfs_root *root, struct btrfs_path *path);
1704 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root);
1705 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
1706 struct page *page, size_t pg_offset,
1707 u64 start, u64 end);
1708 int btrfs_update_inode(struct btrfs_trans_handle *trans,
1709 struct btrfs_root *root, struct btrfs_inode *inode);
1710 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
1711 struct btrfs_root *root, struct btrfs_inode *inode);
1712 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
1713 struct btrfs_inode *inode);
1714 int btrfs_orphan_cleanup(struct btrfs_root *root);
1715 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
1716 void btrfs_add_delayed_iput(struct inode *inode);
1717 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
1718 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
1719 int btrfs_prealloc_file_range(struct inode *inode, int mode,
1720 u64 start, u64 num_bytes, u64 min_size,
1721 loff_t actual_len, u64 *alloc_hint);
1722 int btrfs_prealloc_file_range_trans(struct inode *inode,
1723 struct btrfs_trans_handle *trans, int mode,
1724 u64 start, u64 num_bytes, u64 min_size,
1725 loff_t actual_len, u64 *alloc_hint);
1726 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
1727 u64 start, u64 end, int *page_started, unsigned long *nr_written,
1728 struct writeback_control *wbc);
1729 int btrfs_writepage_cow_fixup(struct page *page);
1730 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
1731 struct page *page, u64 start,
1732 u64 end, bool uptodate);
1733 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
1735 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
1736 u64 file_offset, u64 disk_bytenr,
1738 struct page **pages);
1739 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
1740 struct btrfs_ioctl_encoded_io_args *encoded);
1741 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1742 const struct btrfs_ioctl_encoded_io_args *encoded);
1744 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
1745 size_t done_before);
1746 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
1747 size_t done_before);
1749 extern const struct dentry_operations btrfs_dentry_operations;
1751 /* Inode locking type flags, by default the exclusive lock is taken */
1752 enum btrfs_ilock_type {
1753 ENUM_BIT(BTRFS_ILOCK_SHARED),
1754 ENUM_BIT(BTRFS_ILOCK_TRY),
1755 ENUM_BIT(BTRFS_ILOCK_MMAP),
1758 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags);
1759 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags);
1760 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
1761 const u64 add_bytes,
1762 const u64 del_bytes);
1763 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);
1766 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1767 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1768 int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
1769 int btrfs_fileattr_set(struct user_namespace *mnt_userns,
1770 struct dentry *dentry, struct fileattr *fa);
1771 int btrfs_ioctl_get_supported_features(void __user *arg);
1772 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
1773 int __pure btrfs_is_empty_uuid(u8 *uuid);
1774 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1775 struct btrfs_ioctl_defrag_range_args *range,
1776 u64 newer_than, unsigned long max_to_defrag);
1777 void btrfs_get_block_group_info(struct list_head *groups_list,
1778 struct btrfs_ioctl_space_info *space);
1779 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
1780 struct btrfs_ioctl_balance_args *bargs);
1783 int __init btrfs_auto_defrag_init(void);
1784 void __cold btrfs_auto_defrag_exit(void);
1785 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
1786 struct btrfs_inode *inode, u32 extent_thresh);
1787 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
1788 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
1789 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
1790 extern const struct file_operations btrfs_file_operations;
1791 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1792 struct btrfs_root *root, struct btrfs_inode *inode,
1793 struct btrfs_drop_extents_args *args);
1794 int btrfs_replace_file_extents(struct btrfs_inode *inode,
1795 struct btrfs_path *path, const u64 start,
1797 struct btrfs_replace_extent_info *extent_info,
1798 struct btrfs_trans_handle **trans_out);
1799 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1800 struct btrfs_inode *inode, u64 start, u64 end);
1801 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1802 const struct btrfs_ioctl_encoded_io_args *encoded);
1803 int btrfs_release_file(struct inode *inode, struct file *file);
1804 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
1805 size_t num_pages, loff_t pos, size_t write_bytes,
1806 struct extent_state **cached, bool noreserve);
1807 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
1808 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1809 size_t *write_bytes, bool nowait);
1810 void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
1811 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
1812 u64 *delalloc_start_ret, u64 *delalloc_end_ret);
1815 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
1816 struct btrfs_root *root);
1819 int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
1820 unsigned long new_flags);
1821 int btrfs_sync_fs(struct super_block *sb, int wait);
1822 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1823 u64 subvol_objectid);
1825 #if BITS_PER_LONG == 32
1826 #define BTRFS_32BIT_MAX_FILE_SIZE (((u64)ULONG_MAX + 1) << PAGE_SHIFT)
1828 * The warning threshold is 5/8th of the MAX_LFS_FILESIZE that limits the logical
1829 * addresses of extents.
1831 * For 4K page size it's about 10T, for 64K it's 160T.
1833 #define BTRFS_32BIT_EARLY_WARN_THRESHOLD (BTRFS_32BIT_MAX_FILE_SIZE * 5 / 8)
1834 void btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info);
1835 void btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info);
1839 * Get the correct offset inside the page of extent buffer.
1841 * @eb: target extent buffer
1842 * @start: offset inside the extent buffer
1844 * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
1846 static inline size_t get_eb_offset_in_page(const struct extent_buffer *eb,
1847 unsigned long offset)
1850 * For sectorsize == PAGE_SIZE case, eb->start will always be aligned
1851 * to PAGE_SIZE, thus adding it won't cause any difference.
1853 * For sectorsize < PAGE_SIZE, we must only read the data that belongs
1854 * to the eb, thus we have to take the eb->start into consideration.
1856 return offset_in_page(offset + eb->start);
1859 static inline unsigned long get_eb_page_index(unsigned long offset)
1862 * For sectorsize == PAGE_SIZE case, plain >> PAGE_SHIFT is enough.
1864 * For sectorsize < PAGE_SIZE case, we only support 64K PAGE_SIZE,
1865 * and have ensured that all tree blocks are contained in one page,
1866 * thus we always get index == 0.
1868 return offset >> PAGE_SHIFT;
1872 * Use that for functions that are conditionally exported for sanity tests but
1875 #ifndef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1876 #define EXPORT_FOR_TESTS static
1878 #define EXPORT_FOR_TESTS
1882 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
1883 struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu);
1884 int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
1885 struct posix_acl *acl, int type);
1886 int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
1887 struct posix_acl *acl, int type);
1889 #define btrfs_get_acl NULL
1890 #define btrfs_set_acl NULL
1891 static inline int __btrfs_set_acl(struct btrfs_trans_handle *trans,
1892 struct inode *inode, struct posix_acl *acl,
1900 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
1901 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1902 struct btrfs_root *root);
1903 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1904 struct btrfs_root *root);
1905 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
1906 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len);
1907 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
1908 struct btrfs_root *root, struct extent_buffer *buf,
1909 struct extent_buffer *cow);
1910 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
1911 u64 *bytes_to_reserve);
1912 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
1913 struct btrfs_pending_snapshot *pending);
1914 int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
1915 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info,
1917 int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
1920 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
1921 u64 end, struct btrfs_scrub_progress *progress,
1922 int readonly, int is_dev_replace);
1923 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
1924 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
1925 int btrfs_scrub_cancel(struct btrfs_fs_info *info);
1926 int btrfs_scrub_cancel_dev(struct btrfs_device *dev);
1927 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
1928 struct btrfs_scrub_progress *progress);
1931 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
1932 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
1934 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
1936 btrfs_bio_counter_sub(fs_info, 1);
1939 static inline int is_fstree(u64 rootid)
1941 if (rootid == BTRFS_FS_TREE_OBJECTID ||
1942 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
1943 !btrfs_qgroup_level(rootid)))
1948 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
1950 return signal_pending(current);
1954 #ifdef CONFIG_FS_VERITY
1956 extern const struct fsverity_operations btrfs_verityops;
1957 int btrfs_drop_verity_items(struct btrfs_inode *inode);
1958 int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size);
1962 static inline int btrfs_drop_verity_items(struct btrfs_inode *inode)
1967 static inline int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
1975 /* Sanity test specific functions */
1976 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1977 void btrfs_test_destroy_inode(struct inode *inode);
1980 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
1982 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
1986 * We use page status Private2 to indicate there is an ordered extent with
1989 * Rename the Private2 accessors to Ordered, to improve readability.
1991 #define PageOrdered(page) PagePrivate2(page)
1992 #define SetPageOrdered(page) SetPagePrivate2(page)
1993 #define ClearPageOrdered(page) ClearPagePrivate2(page)
1994 #define folio_test_ordered(folio) folio_test_private_2(folio)
1995 #define folio_set_ordered(folio) folio_set_private_2(folio)
1996 #define folio_clear_ordered(folio) folio_clear_private_2(folio)