]>
Commit | Line | Data |
---|---|---|
c7f13d42 JB |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | ||
3 | #ifndef BTRFS_FS_H | |
4 | #define BTRFS_FS_H | |
5 | ||
243cf8d1 | 6 | #include <linux/blkdev.h> |
3683fbbc | 7 | #include <linux/sizes.h> |
5693a128 DS |
8 | #include <linux/time64.h> |
9 | #include <linux/compiler.h> | |
10 | #include <linux/math.h> | |
11 | #include <linux/atomic.h> | |
5693a128 DS |
12 | #include <linux/percpu_counter.h> |
13 | #include <linux/completion.h> | |
14 | #include <linux/lockdep.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/rwlock_types.h> | |
18 | #include <linux/rwsem.h> | |
19 | #include <linux/semaphore.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/radix-tree.h> | |
22 | #include <linux/workqueue.h> | |
23 | #include <linux/wait.h> | |
24 | #include <linux/wait_bit.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/rbtree.h> | |
27 | #include <uapi/linux/btrfs.h> | |
28 | #include <uapi/linux/btrfs_tree.h> | |
3683fbbc | 29 | #include "extent-io-tree.h" |
3683fbbc JB |
30 | #include "async-thread.h" |
31 | #include "block-rsv.h" | |
5693a128 DS |
32 | #include "fs.h" |
33 | ||
34 | struct inode; | |
35 | struct super_block; | |
36 | struct kobject; | |
37 | struct reloc_control; | |
38 | struct crypto_shash; | |
39 | struct ulist; | |
40 | struct btrfs_device; | |
41 | struct btrfs_block_group; | |
42 | struct btrfs_root; | |
43 | struct btrfs_fs_devices; | |
44 | struct btrfs_transaction; | |
45 | struct btrfs_delayed_root; | |
46 | struct btrfs_balance_control; | |
47 | struct btrfs_subpage_info; | |
48 | struct btrfs_stripe_hash_table; | |
49 | struct btrfs_space_info; | |
3683fbbc | 50 | |
a56159d4 JB |
51 | #define BTRFS_MAX_EXTENT_SIZE SZ_128M |
52 | ||
53 | #define BTRFS_OLDEST_GENERATION 0ULL | |
54 | ||
55 | #define BTRFS_EMPTY_DIR_SIZE 0 | |
56 | ||
57 | #define BTRFS_DIRTY_METADATA_THRESH SZ_32M | |
58 | ||
59 | #define BTRFS_SUPER_INFO_OFFSET SZ_64K | |
60 | #define BTRFS_SUPER_INFO_SIZE 4096 | |
61 | static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); | |
62 | ||
5630e2bc FM |
63 | /* |
64 | * Number of metadata items necessary for an unlink operation: | |
65 | * | |
66 | * 1 for the possible orphan item | |
67 | * 1 for the dir item | |
68 | * 1 for the dir index | |
69 | * 1 for the inode ref | |
70 | * 1 for the inode | |
71 | * 1 for the parent inode | |
72 | */ | |
73 | #define BTRFS_UNLINK_METADATA_UNITS 6 | |
74 | ||
a56159d4 JB |
75 | /* |
76 | * The reserved space at the beginning of each device. It covers the primary | |
77 | * super block and leaves space for potential use by other tools like | |
78 | * bootloaders or to lower potential damage of accidental overwrite. | |
79 | */ | |
80 | #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M) | |
ec8eb376 JB |
81 | /* |
82 | * Runtime (in-memory) states of filesystem | |
83 | */ | |
84 | enum { | |
ec8eb376 JB |
85 | /* |
86 | * Filesystem is being remounted, allow to skip some operations, like | |
87 | * defrag | |
88 | */ | |
89 | BTRFS_FS_STATE_REMOUNTING, | |
90 | /* Filesystem in RO mode */ | |
91 | BTRFS_FS_STATE_RO, | |
92 | /* Track if a transaction abort has been reported on this filesystem */ | |
93 | BTRFS_FS_STATE_TRANS_ABORTED, | |
94 | /* | |
95 | * Bio operations should be blocked on this filesystem because a source | |
96 | * or target device is being destroyed as part of a device replace | |
97 | */ | |
98 | BTRFS_FS_STATE_DEV_REPLACING, | |
99 | /* The btrfs_fs_info created for self-tests */ | |
100 | BTRFS_FS_STATE_DUMMY_FS_INFO, | |
101 | ||
102 | BTRFS_FS_STATE_NO_CSUMS, | |
103 | ||
104 | /* Indicates there was an error cleaning up a log tree. */ | |
105 | BTRFS_FS_STATE_LOG_CLEANUP_ERROR, | |
106 | ||
107 | BTRFS_FS_STATE_COUNT | |
108 | }; | |
109 | ||
7966a6b5 JB |
110 | enum { |
111 | BTRFS_FS_CLOSING_START, | |
112 | BTRFS_FS_CLOSING_DONE, | |
113 | BTRFS_FS_LOG_RECOVERING, | |
114 | BTRFS_FS_OPEN, | |
115 | BTRFS_FS_QUOTA_ENABLED, | |
116 | BTRFS_FS_UPDATE_UUID_TREE_GEN, | |
117 | BTRFS_FS_CREATING_FREE_SPACE_TREE, | |
118 | BTRFS_FS_BTREE_ERR, | |
119 | BTRFS_FS_LOG1_ERR, | |
120 | BTRFS_FS_LOG2_ERR, | |
121 | BTRFS_FS_QUOTA_OVERRIDE, | |
122 | /* Used to record internally whether fs has been frozen */ | |
123 | BTRFS_FS_FROZEN, | |
124 | /* | |
125 | * Indicate that balance has been set up from the ioctl and is in the | |
126 | * main phase. The fs_info::balance_ctl is initialized. | |
127 | */ | |
128 | BTRFS_FS_BALANCE_RUNNING, | |
129 | ||
130 | /* | |
131 | * Indicate that relocation of a chunk has started, it's set per chunk | |
132 | * and is toggled between chunks. | |
133 | */ | |
134 | BTRFS_FS_RELOC_RUNNING, | |
135 | ||
136 | /* Indicate that the cleaner thread is awake and doing something. */ | |
137 | BTRFS_FS_CLEANER_RUNNING, | |
138 | ||
139 | /* | |
140 | * The checksumming has an optimized version and is considered fast, | |
141 | * so we don't need to offload checksums to workqueues. | |
142 | */ | |
143 | BTRFS_FS_CSUM_IMPL_FAST, | |
144 | ||
145 | /* Indicate that the discard workqueue can service discards. */ | |
146 | BTRFS_FS_DISCARD_RUNNING, | |
147 | ||
148 | /* Indicate that we need to cleanup space cache v1 */ | |
149 | BTRFS_FS_CLEANUP_SPACE_CACHE_V1, | |
150 | ||
151 | /* Indicate that we can't trust the free space tree for caching yet */ | |
152 | BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, | |
153 | ||
154 | /* Indicate whether there are any tree modification log users */ | |
155 | BTRFS_FS_TREE_MOD_LOG_USERS, | |
156 | ||
157 | /* Indicate that we want the transaction kthread to commit right now. */ | |
158 | BTRFS_FS_COMMIT_TRANS, | |
159 | ||
160 | /* Indicate we have half completed snapshot deletions pending. */ | |
161 | BTRFS_FS_UNFINISHED_DROPS, | |
162 | ||
163 | /* Indicate we have to finish a zone to do next allocation. */ | |
164 | BTRFS_FS_NEED_ZONE_FINISH, | |
165 | ||
c52cc7b7 JB |
166 | /* Indicate that we want to commit the transaction. */ |
167 | BTRFS_FS_NEED_TRANS_COMMIT, | |
168 | ||
bf1f1fec JB |
169 | /* This is set when active zone tracking is needed. */ |
170 | BTRFS_FS_ACTIVE_ZONE_TRACKING, | |
85e79ec7 | 171 | |
b7625f46 QW |
172 | /* |
173 | * Indicate if we have some features changed, this is mostly for | |
174 | * cleaner thread to update the sysfs interface. | |
175 | */ | |
176 | BTRFS_FS_FEATURE_CHANGED, | |
177 | ||
6d3a6194 QW |
178 | /* |
179 | * Indicate that we have found a tree block which is only aligned to | |
180 | * sectorsize, but not to nodesize. This should be rare nowadays. | |
181 | */ | |
182 | BTRFS_FS_UNALIGNED_TREE_BLOCK, | |
183 | ||
7966a6b5 JB |
184 | #if BITS_PER_LONG == 32 |
185 | /* Indicate if we have error/warn message printed on 32bit systems */ | |
186 | BTRFS_FS_32BIT_ERROR, | |
187 | BTRFS_FS_32BIT_WARN, | |
188 | #endif | |
189 | }; | |
190 | ||
fc97a410 JB |
191 | /* |
192 | * Flags for mount options. | |
193 | * | |
194 | * Note: don't forget to add new options to btrfs_show_options() | |
195 | */ | |
196 | enum { | |
197 | BTRFS_MOUNT_NODATASUM = (1UL << 0), | |
198 | BTRFS_MOUNT_NODATACOW = (1UL << 1), | |
199 | BTRFS_MOUNT_NOBARRIER = (1UL << 2), | |
200 | BTRFS_MOUNT_SSD = (1UL << 3), | |
201 | BTRFS_MOUNT_DEGRADED = (1UL << 4), | |
202 | BTRFS_MOUNT_COMPRESS = (1UL << 5), | |
203 | BTRFS_MOUNT_NOTREELOG = (1UL << 6), | |
204 | BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7), | |
205 | BTRFS_MOUNT_SSD_SPREAD = (1UL << 8), | |
206 | BTRFS_MOUNT_NOSSD = (1UL << 9), | |
207 | BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10), | |
208 | BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11), | |
209 | BTRFS_MOUNT_SPACE_CACHE = (1UL << 12), | |
210 | BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13), | |
211 | BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14), | |
212 | BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15), | |
213 | BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16), | |
214 | BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17), | |
215 | BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18), | |
732fab95 QW |
216 | BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 19), |
217 | BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 20), | |
218 | BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 21), | |
219 | BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 22), | |
220 | BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 23), | |
221 | BTRFS_MOUNT_NOLOGREPLAY = (1UL << 24), | |
222 | BTRFS_MOUNT_REF_VERIFY = (1UL << 25), | |
223 | BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 26), | |
224 | BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 27), | |
225 | BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 28), | |
226 | BTRFS_MOUNT_NODISCARD = (1UL << 29), | |
2496bff6 | 227 | BTRFS_MOUNT_NOSPACECACHE = (1UL << 30), |
fc97a410 JB |
228 | }; |
229 | ||
d83eb482 JB |
230 | /* |
231 | * Compat flags that we support. If any incompat flags are set other than the | |
232 | * ones specified below then we will fail to mount | |
233 | */ | |
234 | #define BTRFS_FEATURE_COMPAT_SUPP 0ULL | |
235 | #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL | |
236 | #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL | |
237 | ||
238 | #define BTRFS_FEATURE_COMPAT_RO_SUPP \ | |
239 | (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ | |
240 | BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \ | |
241 | BTRFS_FEATURE_COMPAT_RO_VERITY | \ | |
242 | BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE) | |
243 | ||
244 | #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL | |
245 | #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL | |
246 | ||
0f202b25 | 247 | #define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \ |
d83eb482 JB |
248 | (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ |
249 | BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ | |
250 | BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ | |
251 | BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ | |
252 | BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ | |
253 | BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ | |
254 | BTRFS_FEATURE_INCOMPAT_RAID56 | \ | |
255 | BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ | |
256 | BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ | |
257 | BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ | |
258 | BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ | |
259 | BTRFS_FEATURE_INCOMPAT_RAID1C34 | \ | |
182940f4 BB |
260 | BTRFS_FEATURE_INCOMPAT_ZONED | \ |
261 | BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA) | |
0f202b25 AJ |
262 | |
263 | #ifdef CONFIG_BTRFS_DEBUG | |
264 | /* | |
265 | * Features under developmen like Extent tree v2 support is enabled | |
266 | * only under CONFIG_BTRFS_DEBUG. | |
267 | */ | |
268 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ | |
269 | (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \ | |
e9b9b911 | 270 | BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \ |
d83eb482 | 271 | BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2) |
0f202b25 | 272 | |
d83eb482 | 273 | #else |
0f202b25 AJ |
274 | |
275 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ | |
276 | (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE) | |
277 | ||
d83eb482 JB |
278 | #endif |
279 | ||
280 | #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ | |
281 | (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) | |
282 | #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL | |
283 | ||
fc97a410 JB |
284 | #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) |
285 | #define BTRFS_DEFAULT_MAX_INLINE (2048) | |
286 | ||
a56159d4 JB |
287 | struct btrfs_dev_replace { |
288 | /* See #define above */ | |
289 | u64 replace_state; | |
290 | /* Seconds since 1-Jan-1970 */ | |
291 | time64_t time_started; | |
292 | /* Seconds since 1-Jan-1970 */ | |
293 | time64_t time_stopped; | |
294 | atomic64_t num_write_errors; | |
295 | atomic64_t num_uncorrectable_read_errors; | |
296 | ||
297 | u64 cursor_left; | |
298 | u64 committed_cursor_left; | |
299 | u64 cursor_left_last_write_of_item; | |
300 | u64 cursor_right; | |
301 | ||
302 | /* See #define above */ | |
303 | u64 cont_reading_from_srcdev_mode; | |
304 | ||
305 | int is_valid; | |
306 | int item_needs_writeback; | |
307 | struct btrfs_device *srcdev; | |
308 | struct btrfs_device *tgtdev; | |
309 | ||
310 | struct mutex lock_finishing_cancel_unmount; | |
311 | struct rw_semaphore rwsem; | |
312 | ||
313 | struct btrfs_scrub_progress scrub_progress; | |
314 | ||
315 | struct percpu_counter bio_counter; | |
316 | wait_queue_head_t replace_wait; | |
317 | }; | |
318 | ||
319 | /* | |
320 | * Free clusters are used to claim free space in relatively large chunks, | |
321 | * allowing us to do less seeky writes. They are used for all metadata | |
322 | * allocations. In ssd_spread mode they are also used for data allocations. | |
323 | */ | |
324 | struct btrfs_free_cluster { | |
325 | spinlock_t lock; | |
326 | spinlock_t refill_lock; | |
327 | struct rb_root root; | |
328 | ||
329 | /* Largest extent in this cluster */ | |
330 | u64 max_size; | |
331 | ||
332 | /* First extent starting offset */ | |
333 | u64 window_start; | |
334 | ||
335 | /* We did a full search and couldn't create a cluster */ | |
336 | bool fragmented; | |
337 | ||
338 | struct btrfs_block_group *block_group; | |
339 | /* | |
340 | * When a cluster is allocated from a block group, we put the cluster | |
341 | * onto a list in the block group so that it can be freed before the | |
342 | * block group is freed. | |
343 | */ | |
344 | struct list_head block_group_list; | |
345 | }; | |
346 | ||
347 | /* Discard control. */ | |
348 | /* | |
349 | * Async discard uses multiple lists to differentiate the discard filter | |
350 | * parameters. Index 0 is for completely free block groups where we need to | |
351 | * ensure the entire block group is trimmed without being lossy. Indices | |
352 | * afterwards represent monotonically decreasing discard filter sizes to | |
353 | * prioritize what should be discarded next. | |
354 | */ | |
355 | #define BTRFS_NR_DISCARD_LISTS 3 | |
356 | #define BTRFS_DISCARD_INDEX_UNUSED 0 | |
357 | #define BTRFS_DISCARD_INDEX_START 1 | |
358 | ||
359 | struct btrfs_discard_ctl { | |
360 | struct workqueue_struct *discard_workers; | |
361 | struct delayed_work work; | |
362 | spinlock_t lock; | |
363 | struct btrfs_block_group *block_group; | |
364 | struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; | |
365 | u64 prev_discard; | |
366 | u64 prev_discard_time; | |
367 | atomic_t discardable_extents; | |
368 | atomic64_t discardable_bytes; | |
369 | u64 max_discard_size; | |
370 | u64 delay_ms; | |
371 | u32 iops_limit; | |
372 | u32 kbps_limit; | |
373 | u64 discard_extent_bytes; | |
374 | u64 discard_bitmap_bytes; | |
375 | atomic64_t discard_bytes_saved; | |
376 | }; | |
377 | ||
378 | /* | |
379 | * Exclusive operations (device replace, resize, device add/remove, balance) | |
380 | */ | |
381 | enum btrfs_exclusive_operation { | |
382 | BTRFS_EXCLOP_NONE, | |
383 | BTRFS_EXCLOP_BALANCE_PAUSED, | |
384 | BTRFS_EXCLOP_BALANCE, | |
385 | BTRFS_EXCLOP_DEV_ADD, | |
386 | BTRFS_EXCLOP_DEV_REMOVE, | |
387 | BTRFS_EXCLOP_DEV_REPLACE, | |
388 | BTRFS_EXCLOP_RESIZE, | |
389 | BTRFS_EXCLOP_SWAP_ACTIVATE, | |
390 | }; | |
391 | ||
392 | /* Store data about transaction commits, exported via sysfs. */ | |
393 | struct btrfs_commit_stats { | |
394 | /* Total number of commits */ | |
395 | u64 commit_count; | |
396 | /* The maximum commit duration so far in ns */ | |
397 | u64 max_commit_dur; | |
398 | /* The last commit duration in ns */ | |
399 | u64 last_commit_dur; | |
400 | /* The total commit duration in ns */ | |
401 | u64 total_commit_dur; | |
402 | }; | |
403 | ||
404 | struct btrfs_fs_info { | |
405 | u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; | |
406 | unsigned long flags; | |
407 | struct btrfs_root *tree_root; | |
408 | struct btrfs_root *chunk_root; | |
409 | struct btrfs_root *dev_root; | |
410 | struct btrfs_root *fs_root; | |
411 | struct btrfs_root *quota_root; | |
412 | struct btrfs_root *uuid_root; | |
413 | struct btrfs_root *data_reloc_root; | |
414 | struct btrfs_root *block_group_root; | |
51502090 | 415 | struct btrfs_root *stripe_root; |
a56159d4 JB |
416 | |
417 | /* The log root tree is a directory of all the other log roots */ | |
418 | struct btrfs_root *log_root_tree; | |
419 | ||
420 | /* The tree that holds the global roots (csum, extent, etc) */ | |
421 | rwlock_t global_root_lock; | |
422 | struct rb_root global_root_tree; | |
423 | ||
424 | spinlock_t fs_roots_radix_lock; | |
425 | struct radix_tree_root fs_roots_radix; | |
426 | ||
427 | /* Block group cache stuff */ | |
428 | rwlock_t block_group_cache_lock; | |
429 | struct rb_root_cached block_group_cache_tree; | |
430 | ||
431 | /* Keep track of unallocated space */ | |
432 | atomic64_t free_chunk_space; | |
433 | ||
434 | /* Track ranges which are used by log trees blocks/logged data extents */ | |
435 | struct extent_io_tree excluded_extents; | |
436 | ||
437 | /* logical->physical extent mapping */ | |
7dc66abb FM |
438 | struct rb_root_cached mapping_tree; |
439 | rwlock_t mapping_tree_lock; | |
a56159d4 JB |
440 | |
441 | /* | |
442 | * Block reservation for extent, checksum, root tree and delayed dir | |
443 | * index item. | |
444 | */ | |
445 | struct btrfs_block_rsv global_block_rsv; | |
446 | /* Block reservation for metadata operations */ | |
447 | struct btrfs_block_rsv trans_block_rsv; | |
448 | /* Block reservation for chunk tree */ | |
449 | struct btrfs_block_rsv chunk_block_rsv; | |
450 | /* Block reservation for delayed operations */ | |
451 | struct btrfs_block_rsv delayed_block_rsv; | |
452 | /* Block reservation for delayed refs */ | |
453 | struct btrfs_block_rsv delayed_refs_rsv; | |
454 | ||
455 | struct btrfs_block_rsv empty_block_rsv; | |
456 | ||
4a4f8fe2 FM |
457 | /* |
458 | * Updated while holding the lock 'trans_lock'. Due to the life cycle of | |
459 | * a transaction, it can be directly read while holding a transaction | |
460 | * handle, everywhere else must be read with btrfs_get_fs_generation(). | |
461 | * Should always be updated using btrfs_set_fs_generation(). | |
462 | */ | |
a56159d4 | 463 | u64 generation; |
0124855f FM |
464 | /* |
465 | * Always use btrfs_get_last_trans_committed() and | |
466 | * btrfs_set_last_trans_committed() to read and update this field. | |
467 | */ | |
a56159d4 JB |
468 | u64 last_trans_committed; |
469 | /* | |
470 | * Generation of the last transaction used for block group relocation | |
471 | * since the filesystem was last mounted (or 0 if none happened yet). | |
472 | * Must be written and read while holding btrfs_fs_info::commit_root_sem. | |
473 | */ | |
474 | u64 last_reloc_trans; | |
a56159d4 JB |
475 | |
476 | /* | |
477 | * This is updated to the current trans every time a full commit is | |
478 | * required instead of the faster short fsync log commits | |
479 | */ | |
480 | u64 last_trans_log_full_commit; | |
481 | unsigned long mount_opt; | |
482 | ||
483 | unsigned long compress_type:4; | |
484 | unsigned int compress_level; | |
485 | u32 commit_interval; | |
486 | /* | |
487 | * It is a suggestive number, the read side is safe even it gets a | |
488 | * wrong number because we will write out the data into a regular | |
489 | * extent. The write side(mount/remount) is under ->s_umount lock, | |
490 | * so it is also safe. | |
491 | */ | |
492 | u64 max_inline; | |
493 | ||
494 | struct btrfs_transaction *running_transaction; | |
495 | wait_queue_head_t transaction_throttle; | |
496 | wait_queue_head_t transaction_wait; | |
497 | wait_queue_head_t transaction_blocked_wait; | |
498 | wait_queue_head_t async_submit_wait; | |
499 | ||
500 | /* | |
501 | * Used to protect the incompat_flags, compat_flags, compat_ro_flags | |
502 | * when they are updated. | |
503 | * | |
504 | * Because we do not clear the flags for ever, so we needn't use | |
505 | * the lock on the read side. | |
506 | * | |
507 | * We also needn't use the lock when we mount the fs, because | |
508 | * there is no other task which will update the flag. | |
509 | */ | |
510 | spinlock_t super_lock; | |
511 | struct btrfs_super_block *super_copy; | |
512 | struct btrfs_super_block *super_for_commit; | |
513 | struct super_block *sb; | |
514 | struct inode *btree_inode; | |
515 | struct mutex tree_log_mutex; | |
516 | struct mutex transaction_kthread_mutex; | |
517 | struct mutex cleaner_mutex; | |
518 | struct mutex chunk_mutex; | |
519 | ||
520 | /* | |
521 | * This is taken to make sure we don't set block groups ro after the | |
522 | * free space cache has been allocated on them. | |
523 | */ | |
524 | struct mutex ro_block_group_mutex; | |
525 | ||
526 | /* | |
527 | * This is used during read/modify/write to make sure no two ios are | |
528 | * trying to mod the same stripe at the same time. | |
529 | */ | |
530 | struct btrfs_stripe_hash_table *stripe_hash_table; | |
531 | ||
532 | /* | |
533 | * This protects the ordered operations list only while we are | |
534 | * processing all of the entries on it. This way we make sure the | |
535 | * commit code doesn't find the list temporarily empty because another | |
536 | * function happens to be doing non-waiting preflush before jumping | |
537 | * into the main commit. | |
538 | */ | |
539 | struct mutex ordered_operations_mutex; | |
540 | ||
541 | struct rw_semaphore commit_root_sem; | |
542 | ||
543 | struct rw_semaphore cleanup_work_sem; | |
544 | ||
545 | struct rw_semaphore subvol_sem; | |
546 | ||
547 | spinlock_t trans_lock; | |
548 | /* | |
549 | * The reloc mutex goes with the trans lock, it is taken during commit | |
550 | * to protect us from the relocation code. | |
551 | */ | |
552 | struct mutex reloc_mutex; | |
553 | ||
554 | struct list_head trans_list; | |
555 | struct list_head dead_roots; | |
556 | struct list_head caching_block_groups; | |
557 | ||
558 | spinlock_t delayed_iput_lock; | |
559 | struct list_head delayed_iputs; | |
560 | atomic_t nr_delayed_iputs; | |
561 | wait_queue_head_t delayed_iputs_wait; | |
562 | ||
563 | atomic64_t tree_mod_seq; | |
564 | ||
565 | /* This protects tree_mod_log and tree_mod_seq_list */ | |
566 | rwlock_t tree_mod_log_lock; | |
567 | struct rb_root tree_mod_log; | |
568 | struct list_head tree_mod_seq_list; | |
569 | ||
570 | atomic_t async_delalloc_pages; | |
571 | ||
572 | /* This is used to protect the following list -- ordered_roots. */ | |
573 | spinlock_t ordered_root_lock; | |
574 | ||
575 | /* | |
576 | * All fs/file tree roots in which there are data=ordered extents | |
577 | * pending writeback are added into this list. | |
578 | * | |
579 | * These can span multiple transactions and basically include every | |
580 | * dirty data page that isn't from nodatacow. | |
581 | */ | |
582 | struct list_head ordered_roots; | |
583 | ||
584 | struct mutex delalloc_root_mutex; | |
585 | spinlock_t delalloc_root_lock; | |
586 | /* All fs/file tree roots that have delalloc inodes. */ | |
587 | struct list_head delalloc_roots; | |
588 | ||
589 | /* | |
590 | * There is a pool of worker threads for checksumming during writes and | |
591 | * a pool for checksumming after reads. This is because readers can | |
592 | * run with FS locks held, and the writers may be waiting for those | |
593 | * locks. We don't want ordering in the pending list to cause | |
594 | * deadlocks, and so the two are serviced separately. | |
595 | * | |
596 | * A third pool does submit_bio to avoid deadlocking with the other two. | |
597 | */ | |
598 | struct btrfs_workqueue *workers; | |
a56159d4 JB |
599 | struct btrfs_workqueue *delalloc_workers; |
600 | struct btrfs_workqueue *flush_workers; | |
601 | struct workqueue_struct *endio_workers; | |
602 | struct workqueue_struct *endio_meta_workers; | |
a56159d4 JB |
603 | struct workqueue_struct *rmw_workers; |
604 | struct workqueue_struct *compressed_write_workers; | |
605 | struct btrfs_workqueue *endio_write_workers; | |
606 | struct btrfs_workqueue *endio_freespace_worker; | |
607 | struct btrfs_workqueue *caching_workers; | |
608 | ||
609 | /* | |
610 | * Fixup workers take dirty pages that didn't properly go through the | |
611 | * cow mechanism and make them safe to write. It happens for the | |
612 | * sys_munmap function call path. | |
613 | */ | |
614 | struct btrfs_workqueue *fixup_workers; | |
615 | struct btrfs_workqueue *delayed_workers; | |
616 | ||
617 | struct task_struct *transaction_kthread; | |
618 | struct task_struct *cleaner_kthread; | |
619 | u32 thread_pool_size; | |
620 | ||
621 | struct kobject *space_info_kobj; | |
622 | struct kobject *qgroups_kobj; | |
623 | struct kobject *discard_kobj; | |
624 | ||
625 | /* Used to keep from writing metadata until there is a nice batch */ | |
626 | struct percpu_counter dirty_metadata_bytes; | |
627 | struct percpu_counter delalloc_bytes; | |
628 | struct percpu_counter ordered_bytes; | |
629 | s32 dirty_metadata_batch; | |
630 | s32 delalloc_batch; | |
631 | ||
f1d97e76 | 632 | struct percpu_counter evictable_extent_maps; |
956a17d9 FM |
633 | u64 extent_map_shrinker_last_root; |
634 | u64 extent_map_shrinker_last_ino; | |
f1d97e76 | 635 | |
08eb2ad9 | 636 | /* Protected by 'trans_lock'. */ |
a56159d4 JB |
637 | struct list_head dirty_cowonly_roots; |
638 | ||
639 | struct btrfs_fs_devices *fs_devices; | |
640 | ||
641 | /* | |
642 | * The space_info list is effectively read only after initial setup. | |
643 | * It is populated at mount time and cleaned up after all block groups | |
644 | * are removed. RCU is used to protect it. | |
645 | */ | |
646 | struct list_head space_info; | |
647 | ||
648 | struct btrfs_space_info *data_sinfo; | |
649 | ||
650 | struct reloc_control *reloc_ctl; | |
651 | ||
652 | /* data_alloc_cluster is only used in ssd_spread mode */ | |
653 | struct btrfs_free_cluster data_alloc_cluster; | |
654 | ||
655 | /* All metadata allocations go through this cluster. */ | |
656 | struct btrfs_free_cluster meta_alloc_cluster; | |
657 | ||
658 | /* Auto defrag inodes go here. */ | |
659 | spinlock_t defrag_inodes_lock; | |
660 | struct rb_root defrag_inodes; | |
661 | atomic_t defrag_running; | |
662 | ||
663 | /* Used to protect avail_{data, metadata, system}_alloc_bits */ | |
664 | seqlock_t profiles_lock; | |
665 | /* | |
666 | * These three are in extended format (availability of single chunks is | |
667 | * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted | |
668 | * by corresponding BTRFS_BLOCK_GROUP_* bits) | |
669 | */ | |
670 | u64 avail_data_alloc_bits; | |
671 | u64 avail_metadata_alloc_bits; | |
672 | u64 avail_system_alloc_bits; | |
673 | ||
674 | /* Balance state */ | |
675 | spinlock_t balance_lock; | |
676 | struct mutex balance_mutex; | |
677 | atomic_t balance_pause_req; | |
678 | atomic_t balance_cancel_req; | |
679 | struct btrfs_balance_control *balance_ctl; | |
680 | wait_queue_head_t balance_wait_q; | |
681 | ||
682 | /* Cancellation requests for chunk relocation */ | |
683 | atomic_t reloc_cancel_req; | |
684 | ||
685 | u32 data_chunk_allocations; | |
686 | u32 metadata_ratio; | |
687 | ||
688 | void *bdev_holder; | |
689 | ||
690 | /* Private scrub information */ | |
691 | struct mutex scrub_lock; | |
692 | atomic_t scrubs_running; | |
693 | atomic_t scrub_pause_req; | |
694 | atomic_t scrubs_paused; | |
695 | atomic_t scrub_cancel_req; | |
696 | wait_queue_head_t scrub_pause_wait; | |
697 | /* | |
698 | * The worker pointers are NULL iff the refcount is 0, ie. scrub is not | |
699 | * running. | |
700 | */ | |
701 | refcount_t scrub_workers_refcnt; | |
702 | struct workqueue_struct *scrub_workers; | |
a56159d4 JB |
703 | struct btrfs_subpage_info *subpage_info; |
704 | ||
705 | struct btrfs_discard_ctl discard_ctl; | |
706 | ||
a56159d4 JB |
707 | /* Is qgroup tracking in a consistent state? */ |
708 | u64 qgroup_flags; | |
709 | ||
710 | /* Holds configuration and tracking. Protected by qgroup_lock. */ | |
711 | struct rb_root qgroup_tree; | |
712 | spinlock_t qgroup_lock; | |
713 | ||
714 | /* | |
715 | * Used to avoid frequently calling ulist_alloc()/ulist_free() | |
716 | * when doing qgroup accounting, it must be protected by qgroup_lock. | |
717 | */ | |
718 | struct ulist *qgroup_ulist; | |
719 | ||
720 | /* | |
721 | * Protect user change for quota operations. If a transaction is needed, | |
722 | * it must be started before locking this lock. | |
723 | */ | |
724 | struct mutex qgroup_ioctl_lock; | |
725 | ||
726 | /* List of dirty qgroups to be written at next commit. */ | |
727 | struct list_head dirty_qgroups; | |
728 | ||
729 | /* Used by qgroup for an efficient tree traversal. */ | |
730 | u64 qgroup_seq; | |
731 | ||
732 | /* Qgroup rescan items. */ | |
733 | /* Protects the progress item */ | |
734 | struct mutex qgroup_rescan_lock; | |
735 | struct btrfs_key qgroup_rescan_progress; | |
736 | struct btrfs_workqueue *qgroup_rescan_workers; | |
737 | struct completion qgroup_rescan_completion; | |
738 | struct btrfs_work qgroup_rescan_work; | |
739 | /* Protected by qgroup_rescan_lock */ | |
740 | bool qgroup_rescan_running; | |
741 | u8 qgroup_drop_subtree_thres; | |
bd7c1ea3 | 742 | u64 qgroup_enable_gen; |
a56159d4 | 743 | |
ae3364e5 FM |
744 | /* |
745 | * If this is not 0, then it indicates a serious filesystem error has | |
746 | * happened and it contains that error (negative errno value). | |
747 | */ | |
748 | int fs_error; | |
749 | ||
a56159d4 JB |
750 | /* Filesystem state */ |
751 | unsigned long fs_state; | |
752 | ||
753 | struct btrfs_delayed_root *delayed_root; | |
754 | ||
755 | /* Extent buffer radix tree */ | |
756 | spinlock_t buffer_lock; | |
757 | /* Entries are eb->start / sectorsize */ | |
758 | struct radix_tree_root buffer_radix; | |
759 | ||
760 | /* Next backup root to be overwritten */ | |
761 | int backup_root_index; | |
762 | ||
763 | /* Device replace state */ | |
764 | struct btrfs_dev_replace dev_replace; | |
765 | ||
766 | struct semaphore uuid_tree_rescan_sem; | |
767 | ||
768 | /* Used to reclaim the metadata space in the background. */ | |
769 | struct work_struct async_reclaim_work; | |
770 | struct work_struct async_data_reclaim_work; | |
771 | struct work_struct preempt_reclaim_work; | |
772 | ||
773 | /* Reclaim partially filled block groups in the background */ | |
774 | struct work_struct reclaim_bgs_work; | |
4d945011 | 775 | /* Protected by unused_bgs_lock. */ |
a56159d4 JB |
776 | struct list_head reclaim_bgs; |
777 | int bg_reclaim_threshold; | |
778 | ||
4d945011 | 779 | /* Protects the lists unused_bgs and reclaim_bgs. */ |
a56159d4 | 780 | spinlock_t unused_bgs_lock; |
4d945011 | 781 | /* Protected by unused_bgs_lock. */ |
a56159d4 JB |
782 | struct list_head unused_bgs; |
783 | struct mutex unused_bg_unpin_mutex; | |
784 | /* Protect block groups that are going to be deleted */ | |
785 | struct mutex reclaim_bgs_lock; | |
786 | ||
787 | /* Cached block sizes */ | |
788 | u32 nodesize; | |
789 | u32 sectorsize; | |
790 | /* ilog2 of sectorsize, use to avoid 64bit division */ | |
791 | u32 sectorsize_bits; | |
792 | u32 csum_size; | |
793 | u32 csums_per_leaf; | |
794 | u32 stripesize; | |
795 | ||
796 | /* | |
797 | * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular | |
798 | * filesystem, on zoned it depends on the device constraints. | |
799 | */ | |
800 | u64 max_extent_size; | |
801 | ||
802 | /* Block groups and devices containing active swapfiles. */ | |
803 | spinlock_t swapfile_pins_lock; | |
804 | struct rb_root swapfile_pins; | |
805 | ||
806 | struct crypto_shash *csum_shash; | |
807 | ||
808 | /* Type of exclusive operation running, protected by super_lock */ | |
809 | enum btrfs_exclusive_operation exclusive_operation; | |
810 | ||
811 | /* | |
812 | * Zone size > 0 when in ZONED mode, otherwise it's used for a check | |
813 | * if the mode is enabled | |
814 | */ | |
815 | u64 zone_size; | |
816 | ||
243cf8d1 CH |
817 | /* Constraints for ZONE_APPEND commands: */ |
818 | struct queue_limits limits; | |
a56159d4 | 819 | u64 max_zone_append_size; |
243cf8d1 | 820 | |
a56159d4 JB |
821 | struct mutex zoned_meta_io_lock; |
822 | spinlock_t treelog_bg_lock; | |
823 | u64 treelog_bg; | |
824 | ||
825 | /* | |
826 | * Start of the dedicated data relocation block group, protected by | |
827 | * relocation_bg_lock. | |
828 | */ | |
829 | spinlock_t relocation_bg_lock; | |
830 | u64 data_reloc_bg; | |
831 | struct mutex zoned_data_reloc_io_lock; | |
832 | ||
13bb483d NA |
833 | struct btrfs_block_group *active_meta_bg; |
834 | struct btrfs_block_group *active_system_bg; | |
835 | ||
a56159d4 JB |
836 | u64 nr_global_roots; |
837 | ||
838 | spinlock_t zone_active_bgs_lock; | |
839 | struct list_head zone_active_bgs; | |
840 | ||
841 | /* Updates are not protected by any lock */ | |
842 | struct btrfs_commit_stats commit_stats; | |
843 | ||
844 | /* | |
845 | * Last generation where we dropped a non-relocation root. | |
846 | * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen() | |
847 | * to change it and to read it, respectively. | |
848 | */ | |
849 | u64 last_root_drop_gen; | |
850 | ||
851 | /* | |
852 | * Annotations for transaction events (structures are empty when | |
853 | * compiled without lockdep). | |
854 | */ | |
855 | struct lockdep_map btrfs_trans_num_writers_map; | |
856 | struct lockdep_map btrfs_trans_num_extwriters_map; | |
857 | struct lockdep_map btrfs_state_change_map[4]; | |
858 | struct lockdep_map btrfs_trans_pending_ordered_map; | |
859 | struct lockdep_map btrfs_ordered_extent_map; | |
860 | ||
861 | #ifdef CONFIG_BTRFS_FS_REF_VERIFY | |
862 | spinlock_t ref_verify_lock; | |
863 | struct rb_root block_tree; | |
864 | #endif | |
865 | ||
866 | #ifdef CONFIG_BTRFS_DEBUG | |
867 | struct kobject *debug_kobj; | |
868 | struct list_head allocated_roots; | |
869 | ||
870 | spinlock_t eb_leak_lock; | |
871 | struct list_head allocated_ebs; | |
872 | #endif | |
873 | }; | |
874 | ||
c8293894 DS |
875 | #define page_to_inode(_page) (BTRFS_I(_Generic((_page), \ |
876 | struct page *: (_page))->mapping->host)) | |
877 | #define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \ | |
878 | struct folio *: (_folio))->mapping->host)) | |
879 | ||
b33d2e53 DS |
880 | #define page_to_fs_info(_page) (page_to_inode(_page)->root->fs_info) |
881 | #define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info) | |
882 | ||
41044b41 DS |
883 | #define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \ |
884 | struct inode *: (_inode)))->root->fs_info) | |
885 | ||
4a4f8fe2 FM |
886 | static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info) |
887 | { | |
888 | return READ_ONCE(fs_info->generation); | |
889 | } | |
890 | ||
891 | static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen) | |
892 | { | |
893 | WRITE_ONCE(fs_info->generation, gen); | |
894 | } | |
895 | ||
0124855f FM |
896 | static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info) |
897 | { | |
898 | return READ_ONCE(fs_info->last_trans_committed); | |
899 | } | |
900 | ||
901 | static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen) | |
902 | { | |
903 | WRITE_ONCE(fs_info->last_trans_committed, gen); | |
904 | } | |
905 | ||
a56159d4 JB |
906 | static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info, |
907 | u64 gen) | |
908 | { | |
909 | WRITE_ONCE(fs_info->last_root_drop_gen, gen); | |
910 | } | |
911 | ||
912 | static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info) | |
913 | { | |
914 | return READ_ONCE(fs_info->last_root_drop_gen); | |
915 | } | |
916 | ||
a56159d4 JB |
917 | /* |
918 | * Take the number of bytes to be checksummed and figure out how many leaves | |
919 | * it would require to store the csums for that many bytes. | |
920 | */ | |
921 | static inline u64 btrfs_csum_bytes_to_leaves( | |
922 | const struct btrfs_fs_info *fs_info, u64 csum_bytes) | |
923 | { | |
924 | const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits; | |
925 | ||
926 | return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf); | |
927 | } | |
928 | ||
929 | /* | |
930 | * Use this if we would be adding new items, as we could split nodes as we cow | |
931 | * down the tree. | |
932 | */ | |
d1085c9c | 933 | static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info, |
a56159d4 JB |
934 | unsigned num_items) |
935 | { | |
936 | return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; | |
937 | } | |
938 | ||
939 | /* | |
940 | * Doing a truncate or a modification won't result in new nodes or leaves, just | |
941 | * what we need for COW. | |
942 | */ | |
d1085c9c | 943 | static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info, |
a56159d4 JB |
944 | unsigned num_items) |
945 | { | |
946 | return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; | |
947 | } | |
948 | ||
949 | #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \ | |
950 | sizeof(struct btrfs_item)) | |
951 | ||
952 | static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info) | |
953 | { | |
dd8b7b04 | 954 | return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0; |
a56159d4 JB |
955 | } |
956 | ||
957 | /* | |
958 | * Count how many fs_info->max_extent_size cover the @size | |
959 | */ | |
960 | static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size) | |
961 | { | |
962 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
963 | if (!fs_info) | |
964 | return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); | |
965 | #endif | |
966 | ||
967 | return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size); | |
968 | } | |
969 | ||
970 | bool btrfs_exclop_start(struct btrfs_fs_info *fs_info, | |
971 | enum btrfs_exclusive_operation type); | |
972 | bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info, | |
973 | enum btrfs_exclusive_operation type); | |
974 | void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info); | |
975 | void btrfs_exclop_finish(struct btrfs_fs_info *fs_info); | |
976 | void btrfs_exclop_balance(struct btrfs_fs_info *fs_info, | |
977 | enum btrfs_exclusive_operation op); | |
978 | ||
5ab2b180 DS |
979 | int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args); |
980 | ||
c7f13d42 JB |
981 | /* Compatibility and incompatibility defines */ |
982 | void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag, | |
983 | const char *name); | |
984 | void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag, | |
985 | const char *name); | |
986 | void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag, | |
987 | const char *name); | |
988 | void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag, | |
989 | const char *name); | |
990 | ||
0d3a9cf8 JB |
991 | #define __btrfs_fs_incompat(fs_info, flags) \ |
992 | (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags))) | |
993 | ||
994 | #define __btrfs_fs_compat_ro(fs_info, flags) \ | |
995 | (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags))) | |
996 | ||
c7f13d42 JB |
997 | #define btrfs_set_fs_incompat(__fs_info, opt) \ |
998 | __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt) | |
999 | ||
1000 | #define btrfs_clear_fs_incompat(__fs_info, opt) \ | |
1001 | __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt) | |
1002 | ||
1003 | #define btrfs_fs_incompat(fs_info, opt) \ | |
1004 | __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) | |
1005 | ||
1006 | #define btrfs_set_fs_compat_ro(__fs_info, opt) \ | |
1007 | __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt) | |
1008 | ||
1009 | #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ | |
1010 | __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt) | |
1011 | ||
1012 | #define btrfs_fs_compat_ro(fs_info, opt) \ | |
1013 | __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) | |
1014 | ||
fc97a410 JB |
1015 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
1016 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | |
1017 | #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) | |
1018 | #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ | |
1019 | BTRFS_MOUNT_##opt) | |
1020 | ||
c7f13d42 JB |
1021 | static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) |
1022 | { | |
1023 | /* Do it this way so we only ever do one test_bit in the normal case. */ | |
1024 | if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) { | |
1025 | if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)) | |
1026 | return 2; | |
1027 | return 1; | |
1028 | } | |
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | /* | |
1033 | * If we remount the fs to be R/O or umount the fs, the cleaner needn't do | |
1034 | * anything except sleeping. This function is used to check the status of | |
1035 | * the fs. | |
1036 | * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount, | |
1037 | * since setting and checking for SB_RDONLY in the superblock's flags is not | |
1038 | * atomic. | |
1039 | */ | |
1040 | static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info) | |
1041 | { | |
1042 | return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) || | |
1043 | btrfs_fs_closing(fs_info); | |
1044 | } | |
1045 | ||
7966a6b5 JB |
1046 | static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info) |
1047 | { | |
1048 | clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags); | |
1049 | } | |
1050 | ||
ae3364e5 FM |
1051 | #define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error)) |
1052 | ||
ec8eb376 JB |
1053 | #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \ |
1054 | (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \ | |
1055 | &(fs_info)->fs_state))) | |
1056 | ||
1057 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
6a6b4daf JB |
1058 | |
1059 | #define EXPORT_FOR_TESTS | |
1060 | ||
ec8eb376 JB |
1061 | static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) |
1062 | { | |
1063 | return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); | |
1064 | } | |
6a6b4daf JB |
1065 | |
1066 | void btrfs_test_destroy_inode(struct inode *inode); | |
1067 | ||
ec8eb376 | 1068 | #else |
6a6b4daf JB |
1069 | |
1070 | #define EXPORT_FOR_TESTS static | |
1071 | ||
ec8eb376 JB |
1072 | static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) |
1073 | { | |
1074 | return 0; | |
1075 | } | |
1076 | #endif | |
1077 | ||
c7f13d42 | 1078 | #endif |