1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/blkdev.h>
8 #include <linux/btrfs_tree.h>
9 #include <linux/sizes.h>
10 #include "extent-io-tree.h"
11 #include "extent_map.h"
12 #include "async-thread.h"
13 #include "block-rsv.h"
15 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
17 #define BTRFS_OLDEST_GENERATION 0ULL
19 #define BTRFS_EMPTY_DIR_SIZE 0
21 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
23 #define BTRFS_SUPER_INFO_OFFSET SZ_64K
24 #define BTRFS_SUPER_INFO_SIZE 4096
25 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
28 * The reserved space at the beginning of each device. It covers the primary
29 * super block and leaves space for potential use by other tools like
30 * bootloaders or to lower potential damage of accidental overwrite.
32 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
34 * Runtime (in-memory) states of filesystem
37 /* Global indicator of serious filesystem errors */
40 * Filesystem is being remounted, allow to skip some operations, like
43 BTRFS_FS_STATE_REMOUNTING,
44 /* Filesystem in RO mode */
46 /* Track if a transaction abort has been reported on this filesystem */
47 BTRFS_FS_STATE_TRANS_ABORTED,
49 * Bio operations should be blocked on this filesystem because a source
50 * or target device is being destroyed as part of a device replace
52 BTRFS_FS_STATE_DEV_REPLACING,
53 /* The btrfs_fs_info created for self-tests */
54 BTRFS_FS_STATE_DUMMY_FS_INFO,
56 BTRFS_FS_STATE_NO_CSUMS,
58 /* Indicates there was an error cleaning up a log tree. */
59 BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
65 BTRFS_FS_CLOSING_START,
66 BTRFS_FS_CLOSING_DONE,
67 BTRFS_FS_LOG_RECOVERING,
69 BTRFS_FS_QUOTA_ENABLED,
70 BTRFS_FS_UPDATE_UUID_TREE_GEN,
71 BTRFS_FS_CREATING_FREE_SPACE_TREE,
75 BTRFS_FS_QUOTA_OVERRIDE,
76 /* Used to record internally whether fs has been frozen */
79 * Indicate that balance has been set up from the ioctl and is in the
80 * main phase. The fs_info::balance_ctl is initialized.
82 BTRFS_FS_BALANCE_RUNNING,
85 * Indicate that relocation of a chunk has started, it's set per chunk
86 * and is toggled between chunks.
88 BTRFS_FS_RELOC_RUNNING,
90 /* Indicate that the cleaner thread is awake and doing something. */
91 BTRFS_FS_CLEANER_RUNNING,
94 * The checksumming has an optimized version and is considered fast,
95 * so we don't need to offload checksums to workqueues.
97 BTRFS_FS_CSUM_IMPL_FAST,
99 /* Indicate that the discard workqueue can service discards. */
100 BTRFS_FS_DISCARD_RUNNING,
102 /* Indicate that we need to cleanup space cache v1 */
103 BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
105 /* Indicate that we can't trust the free space tree for caching yet */
106 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
108 /* Indicate whether there are any tree modification log users */
109 BTRFS_FS_TREE_MOD_LOG_USERS,
111 /* Indicate that we want the transaction kthread to commit right now. */
112 BTRFS_FS_COMMIT_TRANS,
114 /* Indicate we have half completed snapshot deletions pending. */
115 BTRFS_FS_UNFINISHED_DROPS,
117 /* Indicate we have to finish a zone to do next allocation. */
118 BTRFS_FS_NEED_ZONE_FINISH,
120 /* Indicate that we want to commit the transaction. */
121 BTRFS_FS_NEED_TRANS_COMMIT,
123 /* This is set when active zone tracking is needed. */
124 BTRFS_FS_ACTIVE_ZONE_TRACKING,
127 * Indicate if we have some features changed, this is mostly for
128 * cleaner thread to update the sysfs interface.
130 BTRFS_FS_FEATURE_CHANGED,
132 #if BITS_PER_LONG == 32
133 /* Indicate if we have error/warn message printed on 32bit systems */
134 BTRFS_FS_32BIT_ERROR,
140 * Flags for mount options.
142 * Note: don't forget to add new options to btrfs_show_options()
145 BTRFS_MOUNT_NODATASUM = (1UL << 0),
146 BTRFS_MOUNT_NODATACOW = (1UL << 1),
147 BTRFS_MOUNT_NOBARRIER = (1UL << 2),
148 BTRFS_MOUNT_SSD = (1UL << 3),
149 BTRFS_MOUNT_DEGRADED = (1UL << 4),
150 BTRFS_MOUNT_COMPRESS = (1UL << 5),
151 BTRFS_MOUNT_NOTREELOG = (1UL << 6),
152 BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7),
153 BTRFS_MOUNT_SSD_SPREAD = (1UL << 8),
154 BTRFS_MOUNT_NOSSD = (1UL << 9),
155 BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10),
156 BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11),
157 BTRFS_MOUNT_SPACE_CACHE = (1UL << 12),
158 BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13),
159 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14),
160 BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15),
161 BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
162 BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
163 BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
164 BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
165 BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
166 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
167 BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
168 BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
169 BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
170 BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
171 BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
172 BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
173 BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
174 BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
175 BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
176 BTRFS_MOUNT_NODISCARD = (1UL << 31),
180 * Compat flags that we support. If any incompat flags are set other than the
181 * ones specified below then we will fail to mount
183 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
184 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
185 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
187 #define BTRFS_FEATURE_COMPAT_RO_SUPP \
188 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
189 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
190 BTRFS_FEATURE_COMPAT_RO_VERITY | \
191 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
193 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
194 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
196 #ifdef CONFIG_BTRFS_DEBUG
198 * Extent tree v2 supported only with CONFIG_BTRFS_DEBUG
200 #define BTRFS_FEATURE_INCOMPAT_SUPP \
201 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
202 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
203 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
204 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
205 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
206 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
207 BTRFS_FEATURE_INCOMPAT_RAID56 | \
208 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
209 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
210 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
211 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
212 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
213 BTRFS_FEATURE_INCOMPAT_ZONED | \
214 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
216 #define BTRFS_FEATURE_INCOMPAT_SUPP \
217 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
218 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
219 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
220 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
221 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
222 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
223 BTRFS_FEATURE_INCOMPAT_RAID56 | \
224 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
225 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
226 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
227 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
228 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
229 BTRFS_FEATURE_INCOMPAT_ZONED)
232 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
233 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
234 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
236 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
237 #define BTRFS_DEFAULT_MAX_INLINE (2048)
239 struct btrfs_dev_replace {
240 /* See #define above */
242 /* Seconds since 1-Jan-1970 */
243 time64_t time_started;
244 /* Seconds since 1-Jan-1970 */
245 time64_t time_stopped;
246 atomic64_t num_write_errors;
247 atomic64_t num_uncorrectable_read_errors;
250 u64 committed_cursor_left;
251 u64 cursor_left_last_write_of_item;
254 /* See #define above */
255 u64 cont_reading_from_srcdev_mode;
258 int item_needs_writeback;
259 struct btrfs_device *srcdev;
260 struct btrfs_device *tgtdev;
262 struct mutex lock_finishing_cancel_unmount;
263 struct rw_semaphore rwsem;
265 struct btrfs_scrub_progress scrub_progress;
267 struct percpu_counter bio_counter;
268 wait_queue_head_t replace_wait;
272 * Free clusters are used to claim free space in relatively large chunks,
273 * allowing us to do less seeky writes. They are used for all metadata
274 * allocations. In ssd_spread mode they are also used for data allocations.
276 struct btrfs_free_cluster {
278 spinlock_t refill_lock;
281 /* Largest extent in this cluster */
284 /* First extent starting offset */
287 /* We did a full search and couldn't create a cluster */
290 struct btrfs_block_group *block_group;
292 * When a cluster is allocated from a block group, we put the cluster
293 * onto a list in the block group so that it can be freed before the
294 * block group is freed.
296 struct list_head block_group_list;
299 /* Discard control. */
301 * Async discard uses multiple lists to differentiate the discard filter
302 * parameters. Index 0 is for completely free block groups where we need to
303 * ensure the entire block group is trimmed without being lossy. Indices
304 * afterwards represent monotonically decreasing discard filter sizes to
305 * prioritize what should be discarded next.
307 #define BTRFS_NR_DISCARD_LISTS 3
308 #define BTRFS_DISCARD_INDEX_UNUSED 0
309 #define BTRFS_DISCARD_INDEX_START 1
311 struct btrfs_discard_ctl {
312 struct workqueue_struct *discard_workers;
313 struct delayed_work work;
315 struct btrfs_block_group *block_group;
316 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
318 u64 prev_discard_time;
319 atomic_t discardable_extents;
320 atomic64_t discardable_bytes;
321 u64 max_discard_size;
325 u64 discard_extent_bytes;
326 u64 discard_bitmap_bytes;
327 atomic64_t discard_bytes_saved;
331 * Exclusive operations (device replace, resize, device add/remove, balance)
333 enum btrfs_exclusive_operation {
335 BTRFS_EXCLOP_BALANCE_PAUSED,
336 BTRFS_EXCLOP_BALANCE,
337 BTRFS_EXCLOP_DEV_ADD,
338 BTRFS_EXCLOP_DEV_REMOVE,
339 BTRFS_EXCLOP_DEV_REPLACE,
341 BTRFS_EXCLOP_SWAP_ACTIVATE,
344 /* Store data about transaction commits, exported via sysfs. */
345 struct btrfs_commit_stats {
346 /* Total number of commits */
348 /* The maximum commit duration so far in ns */
350 /* The last commit duration in ns */
352 /* The total commit duration in ns */
353 u64 total_commit_dur;
356 struct btrfs_fs_info {
357 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
359 struct btrfs_root *tree_root;
360 struct btrfs_root *chunk_root;
361 struct btrfs_root *dev_root;
362 struct btrfs_root *fs_root;
363 struct btrfs_root *quota_root;
364 struct btrfs_root *uuid_root;
365 struct btrfs_root *data_reloc_root;
366 struct btrfs_root *block_group_root;
368 /* The log root tree is a directory of all the other log roots */
369 struct btrfs_root *log_root_tree;
371 /* The tree that holds the global roots (csum, extent, etc) */
372 rwlock_t global_root_lock;
373 struct rb_root global_root_tree;
375 spinlock_t fs_roots_radix_lock;
376 struct radix_tree_root fs_roots_radix;
378 /* Block group cache stuff */
379 rwlock_t block_group_cache_lock;
380 struct rb_root_cached block_group_cache_tree;
382 /* Keep track of unallocated space */
383 atomic64_t free_chunk_space;
385 /* Track ranges which are used by log trees blocks/logged data extents */
386 struct extent_io_tree excluded_extents;
388 /* logical->physical extent mapping */
389 struct extent_map_tree mapping_tree;
392 * Block reservation for extent, checksum, root tree and delayed dir
395 struct btrfs_block_rsv global_block_rsv;
396 /* Block reservation for metadata operations */
397 struct btrfs_block_rsv trans_block_rsv;
398 /* Block reservation for chunk tree */
399 struct btrfs_block_rsv chunk_block_rsv;
400 /* Block reservation for delayed operations */
401 struct btrfs_block_rsv delayed_block_rsv;
402 /* Block reservation for delayed refs */
403 struct btrfs_block_rsv delayed_refs_rsv;
405 struct btrfs_block_rsv empty_block_rsv;
408 u64 last_trans_committed;
410 * Generation of the last transaction used for block group relocation
411 * since the filesystem was last mounted (or 0 if none happened yet).
412 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
414 u64 last_reloc_trans;
415 u64 avg_delayed_ref_runtime;
418 * This is updated to the current trans every time a full commit is
419 * required instead of the faster short fsync log commits
421 u64 last_trans_log_full_commit;
422 unsigned long mount_opt;
424 unsigned long compress_type:4;
425 unsigned int compress_level;
428 * It is a suggestive number, the read side is safe even it gets a
429 * wrong number because we will write out the data into a regular
430 * extent. The write side(mount/remount) is under ->s_umount lock,
431 * so it is also safe.
435 struct btrfs_transaction *running_transaction;
436 wait_queue_head_t transaction_throttle;
437 wait_queue_head_t transaction_wait;
438 wait_queue_head_t transaction_blocked_wait;
439 wait_queue_head_t async_submit_wait;
442 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
443 * when they are updated.
445 * Because we do not clear the flags for ever, so we needn't use
446 * the lock on the read side.
448 * We also needn't use the lock when we mount the fs, because
449 * there is no other task which will update the flag.
451 spinlock_t super_lock;
452 struct btrfs_super_block *super_copy;
453 struct btrfs_super_block *super_for_commit;
454 struct super_block *sb;
455 struct inode *btree_inode;
456 struct mutex tree_log_mutex;
457 struct mutex transaction_kthread_mutex;
458 struct mutex cleaner_mutex;
459 struct mutex chunk_mutex;
462 * This is taken to make sure we don't set block groups ro after the
463 * free space cache has been allocated on them.
465 struct mutex ro_block_group_mutex;
468 * This is used during read/modify/write to make sure no two ios are
469 * trying to mod the same stripe at the same time.
471 struct btrfs_stripe_hash_table *stripe_hash_table;
474 * This protects the ordered operations list only while we are
475 * processing all of the entries on it. This way we make sure the
476 * commit code doesn't find the list temporarily empty because another
477 * function happens to be doing non-waiting preflush before jumping
478 * into the main commit.
480 struct mutex ordered_operations_mutex;
482 struct rw_semaphore commit_root_sem;
484 struct rw_semaphore cleanup_work_sem;
486 struct rw_semaphore subvol_sem;
488 spinlock_t trans_lock;
490 * The reloc mutex goes with the trans lock, it is taken during commit
491 * to protect us from the relocation code.
493 struct mutex reloc_mutex;
495 struct list_head trans_list;
496 struct list_head dead_roots;
497 struct list_head caching_block_groups;
499 spinlock_t delayed_iput_lock;
500 struct list_head delayed_iputs;
501 atomic_t nr_delayed_iputs;
502 wait_queue_head_t delayed_iputs_wait;
504 atomic64_t tree_mod_seq;
506 /* This protects tree_mod_log and tree_mod_seq_list */
507 rwlock_t tree_mod_log_lock;
508 struct rb_root tree_mod_log;
509 struct list_head tree_mod_seq_list;
511 atomic_t async_delalloc_pages;
513 /* This is used to protect the following list -- ordered_roots. */
514 spinlock_t ordered_root_lock;
517 * All fs/file tree roots in which there are data=ordered extents
518 * pending writeback are added into this list.
520 * These can span multiple transactions and basically include every
521 * dirty data page that isn't from nodatacow.
523 struct list_head ordered_roots;
525 struct mutex delalloc_root_mutex;
526 spinlock_t delalloc_root_lock;
527 /* All fs/file tree roots that have delalloc inodes. */
528 struct list_head delalloc_roots;
531 * There is a pool of worker threads for checksumming during writes and
532 * a pool for checksumming after reads. This is because readers can
533 * run with FS locks held, and the writers may be waiting for those
534 * locks. We don't want ordering in the pending list to cause
535 * deadlocks, and so the two are serviced separately.
537 * A third pool does submit_bio to avoid deadlocking with the other two.
539 struct btrfs_workqueue *workers;
540 struct btrfs_workqueue *hipri_workers;
541 struct btrfs_workqueue *delalloc_workers;
542 struct btrfs_workqueue *flush_workers;
543 struct workqueue_struct *endio_workers;
544 struct workqueue_struct *endio_meta_workers;
545 struct workqueue_struct *rmw_workers;
546 struct workqueue_struct *compressed_write_workers;
547 struct btrfs_workqueue *endio_write_workers;
548 struct btrfs_workqueue *endio_freespace_worker;
549 struct btrfs_workqueue *caching_workers;
552 * Fixup workers take dirty pages that didn't properly go through the
553 * cow mechanism and make them safe to write. It happens for the
554 * sys_munmap function call path.
556 struct btrfs_workqueue *fixup_workers;
557 struct btrfs_workqueue *delayed_workers;
559 struct task_struct *transaction_kthread;
560 struct task_struct *cleaner_kthread;
561 u32 thread_pool_size;
563 struct kobject *space_info_kobj;
564 struct kobject *qgroups_kobj;
565 struct kobject *discard_kobj;
567 /* Used to keep from writing metadata until there is a nice batch */
568 struct percpu_counter dirty_metadata_bytes;
569 struct percpu_counter delalloc_bytes;
570 struct percpu_counter ordered_bytes;
571 s32 dirty_metadata_batch;
574 struct list_head dirty_cowonly_roots;
576 struct btrfs_fs_devices *fs_devices;
579 * The space_info list is effectively read only after initial setup.
580 * It is populated at mount time and cleaned up after all block groups
581 * are removed. RCU is used to protect it.
583 struct list_head space_info;
585 struct btrfs_space_info *data_sinfo;
587 struct reloc_control *reloc_ctl;
589 /* data_alloc_cluster is only used in ssd_spread mode */
590 struct btrfs_free_cluster data_alloc_cluster;
592 /* All metadata allocations go through this cluster. */
593 struct btrfs_free_cluster meta_alloc_cluster;
595 /* Auto defrag inodes go here. */
596 spinlock_t defrag_inodes_lock;
597 struct rb_root defrag_inodes;
598 atomic_t defrag_running;
600 /* Used to protect avail_{data, metadata, system}_alloc_bits */
601 seqlock_t profiles_lock;
603 * These three are in extended format (availability of single chunks is
604 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
605 * by corresponding BTRFS_BLOCK_GROUP_* bits)
607 u64 avail_data_alloc_bits;
608 u64 avail_metadata_alloc_bits;
609 u64 avail_system_alloc_bits;
612 spinlock_t balance_lock;
613 struct mutex balance_mutex;
614 atomic_t balance_pause_req;
615 atomic_t balance_cancel_req;
616 struct btrfs_balance_control *balance_ctl;
617 wait_queue_head_t balance_wait_q;
619 /* Cancellation requests for chunk relocation */
620 atomic_t reloc_cancel_req;
622 u32 data_chunk_allocations;
627 /* Private scrub information */
628 struct mutex scrub_lock;
629 atomic_t scrubs_running;
630 atomic_t scrub_pause_req;
631 atomic_t scrubs_paused;
632 atomic_t scrub_cancel_req;
633 wait_queue_head_t scrub_pause_wait;
635 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
638 refcount_t scrub_workers_refcnt;
639 struct workqueue_struct *scrub_workers;
640 struct workqueue_struct *scrub_wr_completion_workers;
641 struct workqueue_struct *scrub_parity_workers;
642 struct btrfs_subpage_info *subpage_info;
644 struct btrfs_discard_ctl discard_ctl;
646 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
647 u32 check_integrity_print_mask;
649 /* Is qgroup tracking in a consistent state? */
652 /* Holds configuration and tracking. Protected by qgroup_lock. */
653 struct rb_root qgroup_tree;
654 spinlock_t qgroup_lock;
657 * Used to avoid frequently calling ulist_alloc()/ulist_free()
658 * when doing qgroup accounting, it must be protected by qgroup_lock.
660 struct ulist *qgroup_ulist;
663 * Protect user change for quota operations. If a transaction is needed,
664 * it must be started before locking this lock.
666 struct mutex qgroup_ioctl_lock;
668 /* List of dirty qgroups to be written at next commit. */
669 struct list_head dirty_qgroups;
671 /* Used by qgroup for an efficient tree traversal. */
674 /* Qgroup rescan items. */
675 /* Protects the progress item */
676 struct mutex qgroup_rescan_lock;
677 struct btrfs_key qgroup_rescan_progress;
678 struct btrfs_workqueue *qgroup_rescan_workers;
679 struct completion qgroup_rescan_completion;
680 struct btrfs_work qgroup_rescan_work;
681 /* Protected by qgroup_rescan_lock */
682 bool qgroup_rescan_running;
683 u8 qgroup_drop_subtree_thres;
685 /* Filesystem state */
686 unsigned long fs_state;
688 struct btrfs_delayed_root *delayed_root;
690 /* Extent buffer radix tree */
691 spinlock_t buffer_lock;
692 /* Entries are eb->start / sectorsize */
693 struct radix_tree_root buffer_radix;
695 /* Next backup root to be overwritten */
696 int backup_root_index;
698 /* Device replace state */
699 struct btrfs_dev_replace dev_replace;
701 struct semaphore uuid_tree_rescan_sem;
703 /* Used to reclaim the metadata space in the background. */
704 struct work_struct async_reclaim_work;
705 struct work_struct async_data_reclaim_work;
706 struct work_struct preempt_reclaim_work;
708 /* Reclaim partially filled block groups in the background */
709 struct work_struct reclaim_bgs_work;
710 struct list_head reclaim_bgs;
711 int bg_reclaim_threshold;
713 spinlock_t unused_bgs_lock;
714 struct list_head unused_bgs;
715 struct mutex unused_bg_unpin_mutex;
716 /* Protect block groups that are going to be deleted */
717 struct mutex reclaim_bgs_lock;
719 /* Cached block sizes */
722 /* ilog2 of sectorsize, use to avoid 64bit division */
729 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
730 * filesystem, on zoned it depends on the device constraints.
734 /* Block groups and devices containing active swapfiles. */
735 spinlock_t swapfile_pins_lock;
736 struct rb_root swapfile_pins;
738 struct crypto_shash *csum_shash;
740 /* Type of exclusive operation running, protected by super_lock */
741 enum btrfs_exclusive_operation exclusive_operation;
744 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
745 * if the mode is enabled
749 /* Constraints for ZONE_APPEND commands: */
750 struct queue_limits limits;
751 u64 max_zone_append_size;
753 struct mutex zoned_meta_io_lock;
754 spinlock_t treelog_bg_lock;
758 * Start of the dedicated data relocation block group, protected by
759 * relocation_bg_lock.
761 spinlock_t relocation_bg_lock;
763 struct mutex zoned_data_reloc_io_lock;
767 spinlock_t zone_active_bgs_lock;
768 struct list_head zone_active_bgs;
770 /* Updates are not protected by any lock */
771 struct btrfs_commit_stats commit_stats;
774 * Last generation where we dropped a non-relocation root.
775 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
776 * to change it and to read it, respectively.
778 u64 last_root_drop_gen;
781 * Annotations for transaction events (structures are empty when
782 * compiled without lockdep).
784 struct lockdep_map btrfs_trans_num_writers_map;
785 struct lockdep_map btrfs_trans_num_extwriters_map;
786 struct lockdep_map btrfs_state_change_map[4];
787 struct lockdep_map btrfs_trans_pending_ordered_map;
788 struct lockdep_map btrfs_ordered_extent_map;
790 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
791 spinlock_t ref_verify_lock;
792 struct rb_root block_tree;
795 #ifdef CONFIG_BTRFS_DEBUG
796 struct kobject *debug_kobj;
797 struct list_head allocated_roots;
799 spinlock_t eb_leak_lock;
800 struct list_head allocated_ebs;
804 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
807 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
810 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
812 return READ_ONCE(fs_info->last_root_drop_gen);
816 * Take the number of bytes to be checksummed and figure out how many leaves
817 * it would require to store the csums for that many bytes.
819 static inline u64 btrfs_csum_bytes_to_leaves(
820 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
822 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
824 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
828 * Use this if we would be adding new items, as we could split nodes as we cow
831 static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
834 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
838 * Doing a truncate or a modification won't result in new nodes or leaves, just
839 * what we need for COW.
841 static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
844 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
847 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
848 sizeof(struct btrfs_item))
850 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
852 return fs_info->zone_size > 0;
856 * Count how many fs_info->max_extent_size cover the @size
858 static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
860 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
862 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
865 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
868 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
869 enum btrfs_exclusive_operation type);
870 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
871 enum btrfs_exclusive_operation type);
872 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
873 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
874 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
875 enum btrfs_exclusive_operation op);
877 /* Compatibility and incompatibility defines */
878 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
880 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
882 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
884 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
887 #define __btrfs_fs_incompat(fs_info, flags) \
888 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
890 #define __btrfs_fs_compat_ro(fs_info, flags) \
891 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
893 #define btrfs_set_fs_incompat(__fs_info, opt) \
894 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
896 #define btrfs_clear_fs_incompat(__fs_info, opt) \
897 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
899 #define btrfs_fs_incompat(fs_info, opt) \
900 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
902 #define btrfs_set_fs_compat_ro(__fs_info, opt) \
903 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
905 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \
906 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
908 #define btrfs_fs_compat_ro(fs_info, opt) \
909 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
911 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
912 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
913 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
914 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
917 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \
919 if (!btrfs_test_opt(fs_info, opt)) \
920 btrfs_info(fs_info, fmt, ##args); \
921 btrfs_set_opt(fs_info->mount_opt, opt); \
924 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \
926 if (btrfs_test_opt(fs_info, opt)) \
927 btrfs_info(fs_info, fmt, ##args); \
928 btrfs_clear_opt(fs_info->mount_opt, opt); \
931 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
933 /* Do it this way so we only ever do one test_bit in the normal case. */
934 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
935 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
943 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
944 * anything except sleeping. This function is used to check the status of
946 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
947 * since setting and checking for SB_RDONLY in the superblock's flags is not
950 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
952 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
953 btrfs_fs_closing(fs_info);
956 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
958 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
961 #define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
962 &(fs_info)->fs_state)))
963 #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
964 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
965 &(fs_info)->fs_state)))
967 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
969 #define EXPORT_FOR_TESTS
971 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
973 return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
976 void btrfs_test_destroy_inode(struct inode *inode);
980 #define EXPORT_FOR_TESTS static
982 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)