]>
Commit | Line | Data |
---|---|---|
c7f13d42 JB |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | ||
3 | #ifndef BTRFS_FS_H | |
4 | #define BTRFS_FS_H | |
5 | ||
243cf8d1 | 6 | #include <linux/blkdev.h> |
3683fbbc JB |
7 | #include <linux/fs.h> |
8 | #include <linux/btrfs_tree.h> | |
9 | #include <linux/sizes.h> | |
10 | #include "extent-io-tree.h" | |
11 | #include "extent_map.h" | |
12 | #include "async-thread.h" | |
13 | #include "block-rsv.h" | |
14 | ||
a56159d4 JB |
15 | #define BTRFS_MAX_EXTENT_SIZE SZ_128M |
16 | ||
17 | #define BTRFS_OLDEST_GENERATION 0ULL | |
18 | ||
19 | #define BTRFS_EMPTY_DIR_SIZE 0 | |
20 | ||
21 | #define BTRFS_DIRTY_METADATA_THRESH SZ_32M | |
22 | ||
23 | #define BTRFS_SUPER_INFO_OFFSET SZ_64K | |
24 | #define BTRFS_SUPER_INFO_SIZE 4096 | |
25 | static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); | |
26 | ||
5630e2bc FM |
27 | /* |
28 | * Number of metadata items necessary for an unlink operation: | |
29 | * | |
30 | * 1 for the possible orphan item | |
31 | * 1 for the dir item | |
32 | * 1 for the dir index | |
33 | * 1 for the inode ref | |
34 | * 1 for the inode | |
35 | * 1 for the parent inode | |
36 | */ | |
37 | #define BTRFS_UNLINK_METADATA_UNITS 6 | |
38 | ||
a56159d4 JB |
39 | /* |
40 | * The reserved space at the beginning of each device. It covers the primary | |
41 | * super block and leaves space for potential use by other tools like | |
42 | * bootloaders or to lower potential damage of accidental overwrite. | |
43 | */ | |
44 | #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M) | |
ec8eb376 JB |
45 | /* |
46 | * Runtime (in-memory) states of filesystem | |
47 | */ | |
48 | enum { | |
ec8eb376 JB |
49 | /* |
50 | * Filesystem is being remounted, allow to skip some operations, like | |
51 | * defrag | |
52 | */ | |
53 | BTRFS_FS_STATE_REMOUNTING, | |
54 | /* Filesystem in RO mode */ | |
55 | BTRFS_FS_STATE_RO, | |
56 | /* Track if a transaction abort has been reported on this filesystem */ | |
57 | BTRFS_FS_STATE_TRANS_ABORTED, | |
58 | /* | |
59 | * Bio operations should be blocked on this filesystem because a source | |
60 | * or target device is being destroyed as part of a device replace | |
61 | */ | |
62 | BTRFS_FS_STATE_DEV_REPLACING, | |
63 | /* The btrfs_fs_info created for self-tests */ | |
64 | BTRFS_FS_STATE_DUMMY_FS_INFO, | |
65 | ||
66 | BTRFS_FS_STATE_NO_CSUMS, | |
67 | ||
68 | /* Indicates there was an error cleaning up a log tree. */ | |
69 | BTRFS_FS_STATE_LOG_CLEANUP_ERROR, | |
70 | ||
71 | BTRFS_FS_STATE_COUNT | |
72 | }; | |
73 | ||
7966a6b5 JB |
74 | enum { |
75 | BTRFS_FS_CLOSING_START, | |
76 | BTRFS_FS_CLOSING_DONE, | |
77 | BTRFS_FS_LOG_RECOVERING, | |
78 | BTRFS_FS_OPEN, | |
79 | BTRFS_FS_QUOTA_ENABLED, | |
80 | BTRFS_FS_UPDATE_UUID_TREE_GEN, | |
81 | BTRFS_FS_CREATING_FREE_SPACE_TREE, | |
82 | BTRFS_FS_BTREE_ERR, | |
83 | BTRFS_FS_LOG1_ERR, | |
84 | BTRFS_FS_LOG2_ERR, | |
85 | BTRFS_FS_QUOTA_OVERRIDE, | |
86 | /* Used to record internally whether fs has been frozen */ | |
87 | BTRFS_FS_FROZEN, | |
88 | /* | |
89 | * Indicate that balance has been set up from the ioctl and is in the | |
90 | * main phase. The fs_info::balance_ctl is initialized. | |
91 | */ | |
92 | BTRFS_FS_BALANCE_RUNNING, | |
93 | ||
94 | /* | |
95 | * Indicate that relocation of a chunk has started, it's set per chunk | |
96 | * and is toggled between chunks. | |
97 | */ | |
98 | BTRFS_FS_RELOC_RUNNING, | |
99 | ||
100 | /* Indicate that the cleaner thread is awake and doing something. */ | |
101 | BTRFS_FS_CLEANER_RUNNING, | |
102 | ||
103 | /* | |
104 | * The checksumming has an optimized version and is considered fast, | |
105 | * so we don't need to offload checksums to workqueues. | |
106 | */ | |
107 | BTRFS_FS_CSUM_IMPL_FAST, | |
108 | ||
109 | /* Indicate that the discard workqueue can service discards. */ | |
110 | BTRFS_FS_DISCARD_RUNNING, | |
111 | ||
112 | /* Indicate that we need to cleanup space cache v1 */ | |
113 | BTRFS_FS_CLEANUP_SPACE_CACHE_V1, | |
114 | ||
115 | /* Indicate that we can't trust the free space tree for caching yet */ | |
116 | BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, | |
117 | ||
118 | /* Indicate whether there are any tree modification log users */ | |
119 | BTRFS_FS_TREE_MOD_LOG_USERS, | |
120 | ||
121 | /* Indicate that we want the transaction kthread to commit right now. */ | |
122 | BTRFS_FS_COMMIT_TRANS, | |
123 | ||
124 | /* Indicate we have half completed snapshot deletions pending. */ | |
125 | BTRFS_FS_UNFINISHED_DROPS, | |
126 | ||
127 | /* Indicate we have to finish a zone to do next allocation. */ | |
128 | BTRFS_FS_NEED_ZONE_FINISH, | |
129 | ||
c52cc7b7 JB |
130 | /* Indicate that we want to commit the transaction. */ |
131 | BTRFS_FS_NEED_TRANS_COMMIT, | |
132 | ||
bf1f1fec JB |
133 | /* This is set when active zone tracking is needed. */ |
134 | BTRFS_FS_ACTIVE_ZONE_TRACKING, | |
85e79ec7 | 135 | |
b7625f46 QW |
136 | /* |
137 | * Indicate if we have some features changed, this is mostly for | |
138 | * cleaner thread to update the sysfs interface. | |
139 | */ | |
140 | BTRFS_FS_FEATURE_CHANGED, | |
141 | ||
6d3a6194 QW |
142 | /* |
143 | * Indicate that we have found a tree block which is only aligned to | |
144 | * sectorsize, but not to nodesize. This should be rare nowadays. | |
145 | */ | |
146 | BTRFS_FS_UNALIGNED_TREE_BLOCK, | |
147 | ||
7966a6b5 JB |
148 | #if BITS_PER_LONG == 32 |
149 | /* Indicate if we have error/warn message printed on 32bit systems */ | |
150 | BTRFS_FS_32BIT_ERROR, | |
151 | BTRFS_FS_32BIT_WARN, | |
152 | #endif | |
153 | }; | |
154 | ||
fc97a410 JB |
155 | /* |
156 | * Flags for mount options. | |
157 | * | |
158 | * Note: don't forget to add new options to btrfs_show_options() | |
159 | */ | |
160 | enum { | |
161 | BTRFS_MOUNT_NODATASUM = (1UL << 0), | |
162 | BTRFS_MOUNT_NODATACOW = (1UL << 1), | |
163 | BTRFS_MOUNT_NOBARRIER = (1UL << 2), | |
164 | BTRFS_MOUNT_SSD = (1UL << 3), | |
165 | BTRFS_MOUNT_DEGRADED = (1UL << 4), | |
166 | BTRFS_MOUNT_COMPRESS = (1UL << 5), | |
167 | BTRFS_MOUNT_NOTREELOG = (1UL << 6), | |
168 | BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7), | |
169 | BTRFS_MOUNT_SSD_SPREAD = (1UL << 8), | |
170 | BTRFS_MOUNT_NOSSD = (1UL << 9), | |
171 | BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10), | |
172 | BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11), | |
173 | BTRFS_MOUNT_SPACE_CACHE = (1UL << 12), | |
174 | BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13), | |
175 | BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14), | |
176 | BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15), | |
177 | BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16), | |
178 | BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17), | |
179 | BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18), | |
732fab95 QW |
180 | BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 19), |
181 | BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 20), | |
182 | BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 21), | |
183 | BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 22), | |
184 | BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 23), | |
185 | BTRFS_MOUNT_NOLOGREPLAY = (1UL << 24), | |
186 | BTRFS_MOUNT_REF_VERIFY = (1UL << 25), | |
187 | BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 26), | |
188 | BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 27), | |
189 | BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 28), | |
190 | BTRFS_MOUNT_NODISCARD = (1UL << 29), | |
2496bff6 | 191 | BTRFS_MOUNT_NOSPACECACHE = (1UL << 30), |
fc97a410 JB |
192 | }; |
193 | ||
d83eb482 JB |
194 | /* |
195 | * Compat flags that we support. If any incompat flags are set other than the | |
196 | * ones specified below then we will fail to mount | |
197 | */ | |
198 | #define BTRFS_FEATURE_COMPAT_SUPP 0ULL | |
199 | #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL | |
200 | #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL | |
201 | ||
202 | #define BTRFS_FEATURE_COMPAT_RO_SUPP \ | |
203 | (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ | |
204 | BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \ | |
205 | BTRFS_FEATURE_COMPAT_RO_VERITY | \ | |
206 | BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE) | |
207 | ||
208 | #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL | |
209 | #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL | |
210 | ||
0f202b25 | 211 | #define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \ |
d83eb482 JB |
212 | (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ |
213 | BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ | |
214 | BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ | |
215 | BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ | |
216 | BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ | |
217 | BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ | |
218 | BTRFS_FEATURE_INCOMPAT_RAID56 | \ | |
219 | BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ | |
220 | BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ | |
221 | BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ | |
222 | BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ | |
223 | BTRFS_FEATURE_INCOMPAT_RAID1C34 | \ | |
182940f4 BB |
224 | BTRFS_FEATURE_INCOMPAT_ZONED | \ |
225 | BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA) | |
0f202b25 AJ |
226 | |
227 | #ifdef CONFIG_BTRFS_DEBUG | |
228 | /* | |
229 | * Features under developmen like Extent tree v2 support is enabled | |
230 | * only under CONFIG_BTRFS_DEBUG. | |
231 | */ | |
232 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ | |
233 | (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \ | |
e9b9b911 | 234 | BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \ |
d83eb482 | 235 | BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2) |
0f202b25 | 236 | |
d83eb482 | 237 | #else |
0f202b25 AJ |
238 | |
239 | #define BTRFS_FEATURE_INCOMPAT_SUPP \ | |
240 | (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE) | |
241 | ||
d83eb482 JB |
242 | #endif |
243 | ||
244 | #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ | |
245 | (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) | |
246 | #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL | |
247 | ||
fc97a410 JB |
248 | #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) |
249 | #define BTRFS_DEFAULT_MAX_INLINE (2048) | |
250 | ||
a56159d4 JB |
251 | struct btrfs_dev_replace { |
252 | /* See #define above */ | |
253 | u64 replace_state; | |
254 | /* Seconds since 1-Jan-1970 */ | |
255 | time64_t time_started; | |
256 | /* Seconds since 1-Jan-1970 */ | |
257 | time64_t time_stopped; | |
258 | atomic64_t num_write_errors; | |
259 | atomic64_t num_uncorrectable_read_errors; | |
260 | ||
261 | u64 cursor_left; | |
262 | u64 committed_cursor_left; | |
263 | u64 cursor_left_last_write_of_item; | |
264 | u64 cursor_right; | |
265 | ||
266 | /* See #define above */ | |
267 | u64 cont_reading_from_srcdev_mode; | |
268 | ||
269 | int is_valid; | |
270 | int item_needs_writeback; | |
271 | struct btrfs_device *srcdev; | |
272 | struct btrfs_device *tgtdev; | |
273 | ||
274 | struct mutex lock_finishing_cancel_unmount; | |
275 | struct rw_semaphore rwsem; | |
276 | ||
277 | struct btrfs_scrub_progress scrub_progress; | |
278 | ||
279 | struct percpu_counter bio_counter; | |
280 | wait_queue_head_t replace_wait; | |
281 | }; | |
282 | ||
283 | /* | |
284 | * Free clusters are used to claim free space in relatively large chunks, | |
285 | * allowing us to do less seeky writes. They are used for all metadata | |
286 | * allocations. In ssd_spread mode they are also used for data allocations. | |
287 | */ | |
288 | struct btrfs_free_cluster { | |
289 | spinlock_t lock; | |
290 | spinlock_t refill_lock; | |
291 | struct rb_root root; | |
292 | ||
293 | /* Largest extent in this cluster */ | |
294 | u64 max_size; | |
295 | ||
296 | /* First extent starting offset */ | |
297 | u64 window_start; | |
298 | ||
299 | /* We did a full search and couldn't create a cluster */ | |
300 | bool fragmented; | |
301 | ||
302 | struct btrfs_block_group *block_group; | |
303 | /* | |
304 | * When a cluster is allocated from a block group, we put the cluster | |
305 | * onto a list in the block group so that it can be freed before the | |
306 | * block group is freed. | |
307 | */ | |
308 | struct list_head block_group_list; | |
309 | }; | |
310 | ||
311 | /* Discard control. */ | |
312 | /* | |
313 | * Async discard uses multiple lists to differentiate the discard filter | |
314 | * parameters. Index 0 is for completely free block groups where we need to | |
315 | * ensure the entire block group is trimmed without being lossy. Indices | |
316 | * afterwards represent monotonically decreasing discard filter sizes to | |
317 | * prioritize what should be discarded next. | |
318 | */ | |
319 | #define BTRFS_NR_DISCARD_LISTS 3 | |
320 | #define BTRFS_DISCARD_INDEX_UNUSED 0 | |
321 | #define BTRFS_DISCARD_INDEX_START 1 | |
322 | ||
323 | struct btrfs_discard_ctl { | |
324 | struct workqueue_struct *discard_workers; | |
325 | struct delayed_work work; | |
326 | spinlock_t lock; | |
327 | struct btrfs_block_group *block_group; | |
328 | struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; | |
329 | u64 prev_discard; | |
330 | u64 prev_discard_time; | |
331 | atomic_t discardable_extents; | |
332 | atomic64_t discardable_bytes; | |
333 | u64 max_discard_size; | |
334 | u64 delay_ms; | |
335 | u32 iops_limit; | |
336 | u32 kbps_limit; | |
337 | u64 discard_extent_bytes; | |
338 | u64 discard_bitmap_bytes; | |
339 | atomic64_t discard_bytes_saved; | |
340 | }; | |
341 | ||
342 | /* | |
343 | * Exclusive operations (device replace, resize, device add/remove, balance) | |
344 | */ | |
345 | enum btrfs_exclusive_operation { | |
346 | BTRFS_EXCLOP_NONE, | |
347 | BTRFS_EXCLOP_BALANCE_PAUSED, | |
348 | BTRFS_EXCLOP_BALANCE, | |
349 | BTRFS_EXCLOP_DEV_ADD, | |
350 | BTRFS_EXCLOP_DEV_REMOVE, | |
351 | BTRFS_EXCLOP_DEV_REPLACE, | |
352 | BTRFS_EXCLOP_RESIZE, | |
353 | BTRFS_EXCLOP_SWAP_ACTIVATE, | |
354 | }; | |
355 | ||
356 | /* Store data about transaction commits, exported via sysfs. */ | |
357 | struct btrfs_commit_stats { | |
358 | /* Total number of commits */ | |
359 | u64 commit_count; | |
360 | /* The maximum commit duration so far in ns */ | |
361 | u64 max_commit_dur; | |
362 | /* The last commit duration in ns */ | |
363 | u64 last_commit_dur; | |
364 | /* The total commit duration in ns */ | |
365 | u64 total_commit_dur; | |
366 | }; | |
367 | ||
368 | struct btrfs_fs_info { | |
369 | u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; | |
370 | unsigned long flags; | |
371 | struct btrfs_root *tree_root; | |
372 | struct btrfs_root *chunk_root; | |
373 | struct btrfs_root *dev_root; | |
374 | struct btrfs_root *fs_root; | |
375 | struct btrfs_root *quota_root; | |
376 | struct btrfs_root *uuid_root; | |
377 | struct btrfs_root *data_reloc_root; | |
378 | struct btrfs_root *block_group_root; | |
51502090 | 379 | struct btrfs_root *stripe_root; |
a56159d4 JB |
380 | |
381 | /* The log root tree is a directory of all the other log roots */ | |
382 | struct btrfs_root *log_root_tree; | |
383 | ||
384 | /* The tree that holds the global roots (csum, extent, etc) */ | |
385 | rwlock_t global_root_lock; | |
386 | struct rb_root global_root_tree; | |
387 | ||
388 | spinlock_t fs_roots_radix_lock; | |
389 | struct radix_tree_root fs_roots_radix; | |
390 | ||
391 | /* Block group cache stuff */ | |
392 | rwlock_t block_group_cache_lock; | |
393 | struct rb_root_cached block_group_cache_tree; | |
394 | ||
395 | /* Keep track of unallocated space */ | |
396 | atomic64_t free_chunk_space; | |
397 | ||
398 | /* Track ranges which are used by log trees blocks/logged data extents */ | |
399 | struct extent_io_tree excluded_extents; | |
400 | ||
401 | /* logical->physical extent mapping */ | |
7dc66abb FM |
402 | struct rb_root_cached mapping_tree; |
403 | rwlock_t mapping_tree_lock; | |
a56159d4 JB |
404 | |
405 | /* | |
406 | * Block reservation for extent, checksum, root tree and delayed dir | |
407 | * index item. | |
408 | */ | |
409 | struct btrfs_block_rsv global_block_rsv; | |
410 | /* Block reservation for metadata operations */ | |
411 | struct btrfs_block_rsv trans_block_rsv; | |
412 | /* Block reservation for chunk tree */ | |
413 | struct btrfs_block_rsv chunk_block_rsv; | |
414 | /* Block reservation for delayed operations */ | |
415 | struct btrfs_block_rsv delayed_block_rsv; | |
416 | /* Block reservation for delayed refs */ | |
417 | struct btrfs_block_rsv delayed_refs_rsv; | |
418 | ||
419 | struct btrfs_block_rsv empty_block_rsv; | |
420 | ||
4a4f8fe2 FM |
421 | /* |
422 | * Updated while holding the lock 'trans_lock'. Due to the life cycle of | |
423 | * a transaction, it can be directly read while holding a transaction | |
424 | * handle, everywhere else must be read with btrfs_get_fs_generation(). | |
425 | * Should always be updated using btrfs_set_fs_generation(). | |
426 | */ | |
a56159d4 | 427 | u64 generation; |
0124855f FM |
428 | /* |
429 | * Always use btrfs_get_last_trans_committed() and | |
430 | * btrfs_set_last_trans_committed() to read and update this field. | |
431 | */ | |
a56159d4 JB |
432 | u64 last_trans_committed; |
433 | /* | |
434 | * Generation of the last transaction used for block group relocation | |
435 | * since the filesystem was last mounted (or 0 if none happened yet). | |
436 | * Must be written and read while holding btrfs_fs_info::commit_root_sem. | |
437 | */ | |
438 | u64 last_reloc_trans; | |
a56159d4 JB |
439 | |
440 | /* | |
441 | * This is updated to the current trans every time a full commit is | |
442 | * required instead of the faster short fsync log commits | |
443 | */ | |
444 | u64 last_trans_log_full_commit; | |
445 | unsigned long mount_opt; | |
446 | ||
447 | unsigned long compress_type:4; | |
448 | unsigned int compress_level; | |
449 | u32 commit_interval; | |
450 | /* | |
451 | * It is a suggestive number, the read side is safe even it gets a | |
452 | * wrong number because we will write out the data into a regular | |
453 | * extent. The write side(mount/remount) is under ->s_umount lock, | |
454 | * so it is also safe. | |
455 | */ | |
456 | u64 max_inline; | |
457 | ||
458 | struct btrfs_transaction *running_transaction; | |
459 | wait_queue_head_t transaction_throttle; | |
460 | wait_queue_head_t transaction_wait; | |
461 | wait_queue_head_t transaction_blocked_wait; | |
462 | wait_queue_head_t async_submit_wait; | |
463 | ||
464 | /* | |
465 | * Used to protect the incompat_flags, compat_flags, compat_ro_flags | |
466 | * when they are updated. | |
467 | * | |
468 | * Because we do not clear the flags for ever, so we needn't use | |
469 | * the lock on the read side. | |
470 | * | |
471 | * We also needn't use the lock when we mount the fs, because | |
472 | * there is no other task which will update the flag. | |
473 | */ | |
474 | spinlock_t super_lock; | |
475 | struct btrfs_super_block *super_copy; | |
476 | struct btrfs_super_block *super_for_commit; | |
477 | struct super_block *sb; | |
478 | struct inode *btree_inode; | |
479 | struct mutex tree_log_mutex; | |
480 | struct mutex transaction_kthread_mutex; | |
481 | struct mutex cleaner_mutex; | |
482 | struct mutex chunk_mutex; | |
483 | ||
484 | /* | |
485 | * This is taken to make sure we don't set block groups ro after the | |
486 | * free space cache has been allocated on them. | |
487 | */ | |
488 | struct mutex ro_block_group_mutex; | |
489 | ||
490 | /* | |
491 | * This is used during read/modify/write to make sure no two ios are | |
492 | * trying to mod the same stripe at the same time. | |
493 | */ | |
494 | struct btrfs_stripe_hash_table *stripe_hash_table; | |
495 | ||
496 | /* | |
497 | * This protects the ordered operations list only while we are | |
498 | * processing all of the entries on it. This way we make sure the | |
499 | * commit code doesn't find the list temporarily empty because another | |
500 | * function happens to be doing non-waiting preflush before jumping | |
501 | * into the main commit. | |
502 | */ | |
503 | struct mutex ordered_operations_mutex; | |
504 | ||
505 | struct rw_semaphore commit_root_sem; | |
506 | ||
507 | struct rw_semaphore cleanup_work_sem; | |
508 | ||
509 | struct rw_semaphore subvol_sem; | |
510 | ||
511 | spinlock_t trans_lock; | |
512 | /* | |
513 | * The reloc mutex goes with the trans lock, it is taken during commit | |
514 | * to protect us from the relocation code. | |
515 | */ | |
516 | struct mutex reloc_mutex; | |
517 | ||
518 | struct list_head trans_list; | |
519 | struct list_head dead_roots; | |
520 | struct list_head caching_block_groups; | |
521 | ||
522 | spinlock_t delayed_iput_lock; | |
523 | struct list_head delayed_iputs; | |
524 | atomic_t nr_delayed_iputs; | |
525 | wait_queue_head_t delayed_iputs_wait; | |
526 | ||
527 | atomic64_t tree_mod_seq; | |
528 | ||
529 | /* This protects tree_mod_log and tree_mod_seq_list */ | |
530 | rwlock_t tree_mod_log_lock; | |
531 | struct rb_root tree_mod_log; | |
532 | struct list_head tree_mod_seq_list; | |
533 | ||
534 | atomic_t async_delalloc_pages; | |
535 | ||
536 | /* This is used to protect the following list -- ordered_roots. */ | |
537 | spinlock_t ordered_root_lock; | |
538 | ||
539 | /* | |
540 | * All fs/file tree roots in which there are data=ordered extents | |
541 | * pending writeback are added into this list. | |
542 | * | |
543 | * These can span multiple transactions and basically include every | |
544 | * dirty data page that isn't from nodatacow. | |
545 | */ | |
546 | struct list_head ordered_roots; | |
547 | ||
548 | struct mutex delalloc_root_mutex; | |
549 | spinlock_t delalloc_root_lock; | |
550 | /* All fs/file tree roots that have delalloc inodes. */ | |
551 | struct list_head delalloc_roots; | |
552 | ||
553 | /* | |
554 | * There is a pool of worker threads for checksumming during writes and | |
555 | * a pool for checksumming after reads. This is because readers can | |
556 | * run with FS locks held, and the writers may be waiting for those | |
557 | * locks. We don't want ordering in the pending list to cause | |
558 | * deadlocks, and so the two are serviced separately. | |
559 | * | |
560 | * A third pool does submit_bio to avoid deadlocking with the other two. | |
561 | */ | |
562 | struct btrfs_workqueue *workers; | |
a56159d4 JB |
563 | struct btrfs_workqueue *delalloc_workers; |
564 | struct btrfs_workqueue *flush_workers; | |
565 | struct workqueue_struct *endio_workers; | |
566 | struct workqueue_struct *endio_meta_workers; | |
a56159d4 JB |
567 | struct workqueue_struct *rmw_workers; |
568 | struct workqueue_struct *compressed_write_workers; | |
569 | struct btrfs_workqueue *endio_write_workers; | |
570 | struct btrfs_workqueue *endio_freespace_worker; | |
571 | struct btrfs_workqueue *caching_workers; | |
572 | ||
573 | /* | |
574 | * Fixup workers take dirty pages that didn't properly go through the | |
575 | * cow mechanism and make them safe to write. It happens for the | |
576 | * sys_munmap function call path. | |
577 | */ | |
578 | struct btrfs_workqueue *fixup_workers; | |
579 | struct btrfs_workqueue *delayed_workers; | |
580 | ||
581 | struct task_struct *transaction_kthread; | |
582 | struct task_struct *cleaner_kthread; | |
583 | u32 thread_pool_size; | |
584 | ||
585 | struct kobject *space_info_kobj; | |
586 | struct kobject *qgroups_kobj; | |
587 | struct kobject *discard_kobj; | |
588 | ||
589 | /* Used to keep from writing metadata until there is a nice batch */ | |
590 | struct percpu_counter dirty_metadata_bytes; | |
591 | struct percpu_counter delalloc_bytes; | |
592 | struct percpu_counter ordered_bytes; | |
593 | s32 dirty_metadata_batch; | |
594 | s32 delalloc_batch; | |
595 | ||
08eb2ad9 | 596 | /* Protected by 'trans_lock'. */ |
a56159d4 JB |
597 | struct list_head dirty_cowonly_roots; |
598 | ||
599 | struct btrfs_fs_devices *fs_devices; | |
600 | ||
601 | /* | |
602 | * The space_info list is effectively read only after initial setup. | |
603 | * It is populated at mount time and cleaned up after all block groups | |
604 | * are removed. RCU is used to protect it. | |
605 | */ | |
606 | struct list_head space_info; | |
607 | ||
608 | struct btrfs_space_info *data_sinfo; | |
609 | ||
610 | struct reloc_control *reloc_ctl; | |
611 | ||
612 | /* data_alloc_cluster is only used in ssd_spread mode */ | |
613 | struct btrfs_free_cluster data_alloc_cluster; | |
614 | ||
615 | /* All metadata allocations go through this cluster. */ | |
616 | struct btrfs_free_cluster meta_alloc_cluster; | |
617 | ||
618 | /* Auto defrag inodes go here. */ | |
619 | spinlock_t defrag_inodes_lock; | |
620 | struct rb_root defrag_inodes; | |
621 | atomic_t defrag_running; | |
622 | ||
623 | /* Used to protect avail_{data, metadata, system}_alloc_bits */ | |
624 | seqlock_t profiles_lock; | |
625 | /* | |
626 | * These three are in extended format (availability of single chunks is | |
627 | * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted | |
628 | * by corresponding BTRFS_BLOCK_GROUP_* bits) | |
629 | */ | |
630 | u64 avail_data_alloc_bits; | |
631 | u64 avail_metadata_alloc_bits; | |
632 | u64 avail_system_alloc_bits; | |
633 | ||
634 | /* Balance state */ | |
635 | spinlock_t balance_lock; | |
636 | struct mutex balance_mutex; | |
637 | atomic_t balance_pause_req; | |
638 | atomic_t balance_cancel_req; | |
639 | struct btrfs_balance_control *balance_ctl; | |
640 | wait_queue_head_t balance_wait_q; | |
641 | ||
642 | /* Cancellation requests for chunk relocation */ | |
643 | atomic_t reloc_cancel_req; | |
644 | ||
645 | u32 data_chunk_allocations; | |
646 | u32 metadata_ratio; | |
647 | ||
648 | void *bdev_holder; | |
649 | ||
650 | /* Private scrub information */ | |
651 | struct mutex scrub_lock; | |
652 | atomic_t scrubs_running; | |
653 | atomic_t scrub_pause_req; | |
654 | atomic_t scrubs_paused; | |
655 | atomic_t scrub_cancel_req; | |
656 | wait_queue_head_t scrub_pause_wait; | |
657 | /* | |
658 | * The worker pointers are NULL iff the refcount is 0, ie. scrub is not | |
659 | * running. | |
660 | */ | |
661 | refcount_t scrub_workers_refcnt; | |
662 | struct workqueue_struct *scrub_workers; | |
a56159d4 JB |
663 | struct btrfs_subpage_info *subpage_info; |
664 | ||
665 | struct btrfs_discard_ctl discard_ctl; | |
666 | ||
a56159d4 JB |
667 | /* Is qgroup tracking in a consistent state? */ |
668 | u64 qgroup_flags; | |
669 | ||
670 | /* Holds configuration and tracking. Protected by qgroup_lock. */ | |
671 | struct rb_root qgroup_tree; | |
672 | spinlock_t qgroup_lock; | |
673 | ||
674 | /* | |
675 | * Used to avoid frequently calling ulist_alloc()/ulist_free() | |
676 | * when doing qgroup accounting, it must be protected by qgroup_lock. | |
677 | */ | |
678 | struct ulist *qgroup_ulist; | |
679 | ||
680 | /* | |
681 | * Protect user change for quota operations. If a transaction is needed, | |
682 | * it must be started before locking this lock. | |
683 | */ | |
684 | struct mutex qgroup_ioctl_lock; | |
685 | ||
686 | /* List of dirty qgroups to be written at next commit. */ | |
687 | struct list_head dirty_qgroups; | |
688 | ||
689 | /* Used by qgroup for an efficient tree traversal. */ | |
690 | u64 qgroup_seq; | |
691 | ||
692 | /* Qgroup rescan items. */ | |
693 | /* Protects the progress item */ | |
694 | struct mutex qgroup_rescan_lock; | |
695 | struct btrfs_key qgroup_rescan_progress; | |
696 | struct btrfs_workqueue *qgroup_rescan_workers; | |
697 | struct completion qgroup_rescan_completion; | |
698 | struct btrfs_work qgroup_rescan_work; | |
699 | /* Protected by qgroup_rescan_lock */ | |
700 | bool qgroup_rescan_running; | |
701 | u8 qgroup_drop_subtree_thres; | |
bd7c1ea3 | 702 | u64 qgroup_enable_gen; |
a56159d4 | 703 | |
ae3364e5 FM |
704 | /* |
705 | * If this is not 0, then it indicates a serious filesystem error has | |
706 | * happened and it contains that error (negative errno value). | |
707 | */ | |
708 | int fs_error; | |
709 | ||
a56159d4 JB |
710 | /* Filesystem state */ |
711 | unsigned long fs_state; | |
712 | ||
713 | struct btrfs_delayed_root *delayed_root; | |
714 | ||
715 | /* Extent buffer radix tree */ | |
716 | spinlock_t buffer_lock; | |
717 | /* Entries are eb->start / sectorsize */ | |
718 | struct radix_tree_root buffer_radix; | |
719 | ||
720 | /* Next backup root to be overwritten */ | |
721 | int backup_root_index; | |
722 | ||
723 | /* Device replace state */ | |
724 | struct btrfs_dev_replace dev_replace; | |
725 | ||
726 | struct semaphore uuid_tree_rescan_sem; | |
727 | ||
728 | /* Used to reclaim the metadata space in the background. */ | |
729 | struct work_struct async_reclaim_work; | |
730 | struct work_struct async_data_reclaim_work; | |
731 | struct work_struct preempt_reclaim_work; | |
732 | ||
733 | /* Reclaim partially filled block groups in the background */ | |
734 | struct work_struct reclaim_bgs_work; | |
735 | struct list_head reclaim_bgs; | |
736 | int bg_reclaim_threshold; | |
737 | ||
738 | spinlock_t unused_bgs_lock; | |
739 | struct list_head unused_bgs; | |
740 | struct mutex unused_bg_unpin_mutex; | |
741 | /* Protect block groups that are going to be deleted */ | |
742 | struct mutex reclaim_bgs_lock; | |
743 | ||
744 | /* Cached block sizes */ | |
745 | u32 nodesize; | |
746 | u32 sectorsize; | |
747 | /* ilog2 of sectorsize, use to avoid 64bit division */ | |
748 | u32 sectorsize_bits; | |
749 | u32 csum_size; | |
750 | u32 csums_per_leaf; | |
751 | u32 stripesize; | |
752 | ||
753 | /* | |
754 | * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular | |
755 | * filesystem, on zoned it depends on the device constraints. | |
756 | */ | |
757 | u64 max_extent_size; | |
758 | ||
759 | /* Block groups and devices containing active swapfiles. */ | |
760 | spinlock_t swapfile_pins_lock; | |
761 | struct rb_root swapfile_pins; | |
762 | ||
763 | struct crypto_shash *csum_shash; | |
764 | ||
765 | /* Type of exclusive operation running, protected by super_lock */ | |
766 | enum btrfs_exclusive_operation exclusive_operation; | |
767 | ||
768 | /* | |
769 | * Zone size > 0 when in ZONED mode, otherwise it's used for a check | |
770 | * if the mode is enabled | |
771 | */ | |
772 | u64 zone_size; | |
773 | ||
243cf8d1 CH |
774 | /* Constraints for ZONE_APPEND commands: */ |
775 | struct queue_limits limits; | |
a56159d4 | 776 | u64 max_zone_append_size; |
243cf8d1 | 777 | |
a56159d4 JB |
778 | struct mutex zoned_meta_io_lock; |
779 | spinlock_t treelog_bg_lock; | |
780 | u64 treelog_bg; | |
781 | ||
782 | /* | |
783 | * Start of the dedicated data relocation block group, protected by | |
784 | * relocation_bg_lock. | |
785 | */ | |
786 | spinlock_t relocation_bg_lock; | |
787 | u64 data_reloc_bg; | |
788 | struct mutex zoned_data_reloc_io_lock; | |
789 | ||
13bb483d NA |
790 | struct btrfs_block_group *active_meta_bg; |
791 | struct btrfs_block_group *active_system_bg; | |
792 | ||
a56159d4 JB |
793 | u64 nr_global_roots; |
794 | ||
795 | spinlock_t zone_active_bgs_lock; | |
796 | struct list_head zone_active_bgs; | |
797 | ||
798 | /* Updates are not protected by any lock */ | |
799 | struct btrfs_commit_stats commit_stats; | |
800 | ||
801 | /* | |
802 | * Last generation where we dropped a non-relocation root. | |
803 | * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen() | |
804 | * to change it and to read it, respectively. | |
805 | */ | |
806 | u64 last_root_drop_gen; | |
807 | ||
808 | /* | |
809 | * Annotations for transaction events (structures are empty when | |
810 | * compiled without lockdep). | |
811 | */ | |
812 | struct lockdep_map btrfs_trans_num_writers_map; | |
813 | struct lockdep_map btrfs_trans_num_extwriters_map; | |
814 | struct lockdep_map btrfs_state_change_map[4]; | |
815 | struct lockdep_map btrfs_trans_pending_ordered_map; | |
816 | struct lockdep_map btrfs_ordered_extent_map; | |
817 | ||
818 | #ifdef CONFIG_BTRFS_FS_REF_VERIFY | |
819 | spinlock_t ref_verify_lock; | |
820 | struct rb_root block_tree; | |
821 | #endif | |
822 | ||
823 | #ifdef CONFIG_BTRFS_DEBUG | |
824 | struct kobject *debug_kobj; | |
825 | struct list_head allocated_roots; | |
826 | ||
827 | spinlock_t eb_leak_lock; | |
828 | struct list_head allocated_ebs; | |
829 | #endif | |
830 | }; | |
831 | ||
4a4f8fe2 FM |
832 | static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info) |
833 | { | |
834 | return READ_ONCE(fs_info->generation); | |
835 | } | |
836 | ||
837 | static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen) | |
838 | { | |
839 | WRITE_ONCE(fs_info->generation, gen); | |
840 | } | |
841 | ||
0124855f FM |
842 | static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info) |
843 | { | |
844 | return READ_ONCE(fs_info->last_trans_committed); | |
845 | } | |
846 | ||
847 | static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen) | |
848 | { | |
849 | WRITE_ONCE(fs_info->last_trans_committed, gen); | |
850 | } | |
851 | ||
a56159d4 JB |
852 | static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info, |
853 | u64 gen) | |
854 | { | |
855 | WRITE_ONCE(fs_info->last_root_drop_gen, gen); | |
856 | } | |
857 | ||
858 | static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info) | |
859 | { | |
860 | return READ_ONCE(fs_info->last_root_drop_gen); | |
861 | } | |
862 | ||
a56159d4 JB |
863 | /* |
864 | * Take the number of bytes to be checksummed and figure out how many leaves | |
865 | * it would require to store the csums for that many bytes. | |
866 | */ | |
867 | static inline u64 btrfs_csum_bytes_to_leaves( | |
868 | const struct btrfs_fs_info *fs_info, u64 csum_bytes) | |
869 | { | |
870 | const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits; | |
871 | ||
872 | return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf); | |
873 | } | |
874 | ||
875 | /* | |
876 | * Use this if we would be adding new items, as we could split nodes as we cow | |
877 | * down the tree. | |
878 | */ | |
d1085c9c | 879 | static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info, |
a56159d4 JB |
880 | unsigned num_items) |
881 | { | |
882 | return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; | |
883 | } | |
884 | ||
885 | /* | |
886 | * Doing a truncate or a modification won't result in new nodes or leaves, just | |
887 | * what we need for COW. | |
888 | */ | |
d1085c9c | 889 | static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info, |
a56159d4 JB |
890 | unsigned num_items) |
891 | { | |
892 | return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; | |
893 | } | |
894 | ||
895 | #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \ | |
896 | sizeof(struct btrfs_item)) | |
897 | ||
898 | static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info) | |
899 | { | |
dd8b7b04 | 900 | return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0; |
a56159d4 JB |
901 | } |
902 | ||
903 | /* | |
904 | * Count how many fs_info->max_extent_size cover the @size | |
905 | */ | |
906 | static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size) | |
907 | { | |
908 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
909 | if (!fs_info) | |
910 | return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); | |
911 | #endif | |
912 | ||
913 | return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size); | |
914 | } | |
915 | ||
916 | bool btrfs_exclop_start(struct btrfs_fs_info *fs_info, | |
917 | enum btrfs_exclusive_operation type); | |
918 | bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info, | |
919 | enum btrfs_exclusive_operation type); | |
920 | void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info); | |
921 | void btrfs_exclop_finish(struct btrfs_fs_info *fs_info); | |
922 | void btrfs_exclop_balance(struct btrfs_fs_info *fs_info, | |
923 | enum btrfs_exclusive_operation op); | |
924 | ||
c7f13d42 JB |
925 | /* Compatibility and incompatibility defines */ |
926 | void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag, | |
927 | const char *name); | |
928 | void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag, | |
929 | const char *name); | |
930 | void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag, | |
931 | const char *name); | |
932 | void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag, | |
933 | const char *name); | |
934 | ||
0d3a9cf8 JB |
935 | #define __btrfs_fs_incompat(fs_info, flags) \ |
936 | (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags))) | |
937 | ||
938 | #define __btrfs_fs_compat_ro(fs_info, flags) \ | |
939 | (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags))) | |
940 | ||
c7f13d42 JB |
941 | #define btrfs_set_fs_incompat(__fs_info, opt) \ |
942 | __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt) | |
943 | ||
944 | #define btrfs_clear_fs_incompat(__fs_info, opt) \ | |
945 | __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt) | |
946 | ||
947 | #define btrfs_fs_incompat(fs_info, opt) \ | |
948 | __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) | |
949 | ||
950 | #define btrfs_set_fs_compat_ro(__fs_info, opt) \ | |
951 | __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt) | |
952 | ||
953 | #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ | |
954 | __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt) | |
955 | ||
956 | #define btrfs_fs_compat_ro(fs_info, opt) \ | |
957 | __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) | |
958 | ||
fc97a410 JB |
959 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
960 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | |
961 | #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) | |
962 | #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ | |
963 | BTRFS_MOUNT_##opt) | |
964 | ||
c7f13d42 JB |
965 | static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) |
966 | { | |
967 | /* Do it this way so we only ever do one test_bit in the normal case. */ | |
968 | if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) { | |
969 | if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)) | |
970 | return 2; | |
971 | return 1; | |
972 | } | |
973 | return 0; | |
974 | } | |
975 | ||
976 | /* | |
977 | * If we remount the fs to be R/O or umount the fs, the cleaner needn't do | |
978 | * anything except sleeping. This function is used to check the status of | |
979 | * the fs. | |
980 | * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount, | |
981 | * since setting and checking for SB_RDONLY in the superblock's flags is not | |
982 | * atomic. | |
983 | */ | |
984 | static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info) | |
985 | { | |
986 | return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) || | |
987 | btrfs_fs_closing(fs_info); | |
988 | } | |
989 | ||
7966a6b5 JB |
990 | static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info) |
991 | { | |
992 | clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags); | |
993 | } | |
994 | ||
ae3364e5 FM |
995 | #define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error)) |
996 | ||
ec8eb376 JB |
997 | #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \ |
998 | (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \ | |
999 | &(fs_info)->fs_state))) | |
1000 | ||
1001 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
6a6b4daf JB |
1002 | |
1003 | #define EXPORT_FOR_TESTS | |
1004 | ||
ec8eb376 JB |
1005 | static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) |
1006 | { | |
1007 | return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); | |
1008 | } | |
6a6b4daf JB |
1009 | |
1010 | void btrfs_test_destroy_inode(struct inode *inode); | |
1011 | ||
ec8eb376 | 1012 | #else |
6a6b4daf JB |
1013 | |
1014 | #define EXPORT_FOR_TESTS static | |
1015 | ||
ec8eb376 JB |
1016 | static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) |
1017 | { | |
1018 | return 0; | |
1019 | } | |
1020 | #endif | |
1021 | ||
c7f13d42 | 1022 | #endif |