]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
6cbd5570 CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
6cbd5570 CM |
4 | */ |
5 | ||
e20d96d6 | 6 | #include <linux/fs.h> |
d98237b3 | 7 | #include <linux/blkdev.h> |
0f7d52f4 | 8 | #include <linux/radix-tree.h> |
35b7e476 | 9 | #include <linux/writeback.h> |
ce9adaa5 | 10 | #include <linux/workqueue.h> |
a74a4b97 | 11 | #include <linux/kthread.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
784b4e29 | 13 | #include <linux/migrate.h> |
7a36ddec | 14 | #include <linux/ratelimit.h> |
6463fe58 | 15 | #include <linux/uuid.h> |
803b2f54 | 16 | #include <linux/semaphore.h> |
540adea3 | 17 | #include <linux/error-injection.h> |
9678c543 | 18 | #include <linux/crc32c.h> |
b89f6d1f | 19 | #include <linux/sched/mm.h> |
7e75bf3f | 20 | #include <asm/unaligned.h> |
6d97c6e3 | 21 | #include <crypto/hash.h> |
eb60ceac CM |
22 | #include "ctree.h" |
23 | #include "disk-io.h" | |
e089f05c | 24 | #include "transaction.h" |
0f7d52f4 | 25 | #include "btrfs_inode.h" |
0b86a832 | 26 | #include "volumes.h" |
db94535d | 27 | #include "print-tree.h" |
925baedd | 28 | #include "locking.h" |
e02119d5 | 29 | #include "tree-log.h" |
fa9c0d79 | 30 | #include "free-space-cache.h" |
70f6d82e | 31 | #include "free-space-tree.h" |
581bb050 | 32 | #include "inode-map.h" |
21adbd5c | 33 | #include "check-integrity.h" |
606686ee | 34 | #include "rcu-string.h" |
8dabb742 | 35 | #include "dev-replace.h" |
53b381b3 | 36 | #include "raid56.h" |
5ac1d209 | 37 | #include "sysfs.h" |
fcebe456 | 38 | #include "qgroup.h" |
ebb8765b | 39 | #include "compression.h" |
557ea5dd | 40 | #include "tree-checker.h" |
fd708b81 | 41 | #include "ref-verify.h" |
aac0023c | 42 | #include "block-group.h" |
b0643e59 | 43 | #include "discard.h" |
f603bb94 | 44 | #include "space-info.h" |
eb60ceac | 45 | |
319e4d06 QW |
46 | #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ |
47 | BTRFS_HEADER_FLAG_RELOC |\ | |
48 | BTRFS_SUPER_FLAG_ERROR |\ | |
49 | BTRFS_SUPER_FLAG_SEEDING |\ | |
e2731e55 AJ |
50 | BTRFS_SUPER_FLAG_METADUMP |\ |
51 | BTRFS_SUPER_FLAG_METADUMP_V2) | |
319e4d06 | 52 | |
8b712842 | 53 | static void end_workqueue_fn(struct btrfs_work *work); |
143bede5 | 54 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
acce952b | 55 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2ff7e61e | 56 | struct btrfs_fs_info *fs_info); |
143bede5 | 57 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); |
2ff7e61e | 58 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
acce952b | 59 | struct extent_io_tree *dirty_pages, |
60 | int mark); | |
2ff7e61e | 61 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
acce952b | 62 | struct extent_io_tree *pinned_extents); |
2ff7e61e JM |
63 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); |
64 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); | |
ce9adaa5 | 65 | |
d352ac68 | 66 | /* |
97eb6b69 DS |
67 | * btrfs_end_io_wq structs are used to do processing in task context when an IO |
68 | * is complete. This is used during reads to verify checksums, and it is used | |
d352ac68 CM |
69 | * by writes to insert metadata for new file extents after IO is complete. |
70 | */ | |
97eb6b69 | 71 | struct btrfs_end_io_wq { |
ce9adaa5 CM |
72 | struct bio *bio; |
73 | bio_end_io_t *end_io; | |
74 | void *private; | |
75 | struct btrfs_fs_info *info; | |
4e4cbee9 | 76 | blk_status_t status; |
bfebd8b5 | 77 | enum btrfs_wq_endio_type metadata; |
8b712842 | 78 | struct btrfs_work work; |
ce9adaa5 | 79 | }; |
0da5468f | 80 | |
97eb6b69 DS |
81 | static struct kmem_cache *btrfs_end_io_wq_cache; |
82 | ||
83 | int __init btrfs_end_io_wq_init(void) | |
84 | { | |
85 | btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", | |
86 | sizeof(struct btrfs_end_io_wq), | |
87 | 0, | |
fba4b697 | 88 | SLAB_MEM_SPREAD, |
97eb6b69 DS |
89 | NULL); |
90 | if (!btrfs_end_io_wq_cache) | |
91 | return -ENOMEM; | |
92 | return 0; | |
93 | } | |
94 | ||
e67c718b | 95 | void __cold btrfs_end_io_wq_exit(void) |
97eb6b69 | 96 | { |
5598e900 | 97 | kmem_cache_destroy(btrfs_end_io_wq_cache); |
97eb6b69 DS |
98 | } |
99 | ||
141386e1 JB |
100 | static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info) |
101 | { | |
102 | if (fs_info->csum_shash) | |
103 | crypto_free_shash(fs_info->csum_shash); | |
104 | } | |
105 | ||
d352ac68 CM |
106 | /* |
107 | * async submit bios are used to offload expensive checksumming | |
108 | * onto the worker threads. They checksum file and metadata bios | |
109 | * just before they are sent down the IO stack. | |
110 | */ | |
44b8bd7e | 111 | struct async_submit_bio { |
c6100a4b | 112 | void *private_data; |
44b8bd7e | 113 | struct bio *bio; |
a758781d | 114 | extent_submit_bio_start_t *submit_bio_start; |
44b8bd7e | 115 | int mirror_num; |
eaf25d93 CM |
116 | /* |
117 | * bio_offset is optional, can be used if the pages in the bio | |
118 | * can't tell us where in the file the bio should go | |
119 | */ | |
120 | u64 bio_offset; | |
8b712842 | 121 | struct btrfs_work work; |
4e4cbee9 | 122 | blk_status_t status; |
44b8bd7e CM |
123 | }; |
124 | ||
85d4e461 CM |
125 | /* |
126 | * Lockdep class keys for extent_buffer->lock's in this root. For a given | |
127 | * eb, the lockdep key is determined by the btrfs_root it belongs to and | |
128 | * the level the eb occupies in the tree. | |
129 | * | |
130 | * Different roots are used for different purposes and may nest inside each | |
131 | * other and they require separate keysets. As lockdep keys should be | |
132 | * static, assign keysets according to the purpose of the root as indicated | |
4fd786e6 MT |
133 | * by btrfs_root->root_key.objectid. This ensures that all special purpose |
134 | * roots have separate keysets. | |
4008c04a | 135 | * |
85d4e461 CM |
136 | * Lock-nesting across peer nodes is always done with the immediate parent |
137 | * node locked thus preventing deadlock. As lockdep doesn't know this, use | |
138 | * subclass to avoid triggering lockdep warning in such cases. | |
4008c04a | 139 | * |
85d4e461 CM |
140 | * The key is set by the readpage_end_io_hook after the buffer has passed |
141 | * csum validation but before the pages are unlocked. It is also set by | |
142 | * btrfs_init_new_buffer on freshly allocated blocks. | |
4008c04a | 143 | * |
85d4e461 CM |
144 | * We also add a check to make sure the highest level of the tree is the |
145 | * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code | |
146 | * needs update as well. | |
4008c04a CM |
147 | */ |
148 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
149 | # if BTRFS_MAX_LEVEL != 8 | |
150 | # error | |
151 | # endif | |
85d4e461 CM |
152 | |
153 | static struct btrfs_lockdep_keyset { | |
154 | u64 id; /* root objectid */ | |
155 | const char *name_stem; /* lock name stem */ | |
156 | char names[BTRFS_MAX_LEVEL + 1][20]; | |
157 | struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; | |
158 | } btrfs_lockdep_keysets[] = { | |
159 | { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, | |
160 | { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, | |
161 | { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, | |
162 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, | |
163 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, | |
164 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, | |
60b62978 | 165 | { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, |
85d4e461 CM |
166 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, |
167 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, | |
168 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, | |
13fd8da9 | 169 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, |
6b20e0ad | 170 | { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, |
85d4e461 | 171 | { .id = 0, .name_stem = "tree" }, |
4008c04a | 172 | }; |
85d4e461 CM |
173 | |
174 | void __init btrfs_init_lockdep(void) | |
175 | { | |
176 | int i, j; | |
177 | ||
178 | /* initialize lockdep class names */ | |
179 | for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { | |
180 | struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; | |
181 | ||
182 | for (j = 0; j < ARRAY_SIZE(ks->names); j++) | |
183 | snprintf(ks->names[j], sizeof(ks->names[j]), | |
184 | "btrfs-%s-%02d", ks->name_stem, j); | |
185 | } | |
186 | } | |
187 | ||
188 | void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, | |
189 | int level) | |
190 | { | |
191 | struct btrfs_lockdep_keyset *ks; | |
192 | ||
193 | BUG_ON(level >= ARRAY_SIZE(ks->keys)); | |
194 | ||
195 | /* find the matching keyset, id 0 is the default entry */ | |
196 | for (ks = btrfs_lockdep_keysets; ks->id; ks++) | |
197 | if (ks->id == objectid) | |
198 | break; | |
199 | ||
200 | lockdep_set_class_and_name(&eb->lock, | |
201 | &ks->keys[level], ks->names[level]); | |
202 | } | |
203 | ||
4008c04a CM |
204 | #endif |
205 | ||
d352ac68 | 206 | /* |
2996e1f8 | 207 | * Compute the csum of a btree block and store the result to provided buffer. |
d352ac68 | 208 | */ |
c67b3892 | 209 | static void csum_tree_block(struct extent_buffer *buf, u8 *result) |
19c00ddc | 210 | { |
d5178578 | 211 | struct btrfs_fs_info *fs_info = buf->fs_info; |
e9be5a30 | 212 | const int num_pages = fs_info->nodesize >> PAGE_SHIFT; |
d5178578 | 213 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
19c00ddc | 214 | char *kaddr; |
e9be5a30 | 215 | int i; |
d5178578 JT |
216 | |
217 | shash->tfm = fs_info->csum_shash; | |
218 | crypto_shash_init(shash); | |
e9be5a30 DS |
219 | kaddr = page_address(buf->pages[0]); |
220 | crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, | |
221 | PAGE_SIZE - BTRFS_CSUM_SIZE); | |
19c00ddc | 222 | |
e9be5a30 DS |
223 | for (i = 1; i < num_pages; i++) { |
224 | kaddr = page_address(buf->pages[i]); | |
225 | crypto_shash_update(shash, kaddr, PAGE_SIZE); | |
19c00ddc | 226 | } |
71a63551 | 227 | memset(result, 0, BTRFS_CSUM_SIZE); |
d5178578 | 228 | crypto_shash_final(shash, result); |
19c00ddc CM |
229 | } |
230 | ||
d352ac68 CM |
231 | /* |
232 | * we can't consider a given block up to date unless the transid of the | |
233 | * block matches the transid in the parent node's pointer. This is how we | |
234 | * detect blocks that either didn't get written at all or got written | |
235 | * in the wrong place. | |
236 | */ | |
1259ab75 | 237 | static int verify_parent_transid(struct extent_io_tree *io_tree, |
b9fab919 CM |
238 | struct extent_buffer *eb, u64 parent_transid, |
239 | int atomic) | |
1259ab75 | 240 | { |
2ac55d41 | 241 | struct extent_state *cached_state = NULL; |
1259ab75 | 242 | int ret; |
2755a0de | 243 | bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); |
1259ab75 CM |
244 | |
245 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) | |
246 | return 0; | |
247 | ||
b9fab919 CM |
248 | if (atomic) |
249 | return -EAGAIN; | |
250 | ||
a26e8c9f JB |
251 | if (need_lock) { |
252 | btrfs_tree_read_lock(eb); | |
300aa896 | 253 | btrfs_set_lock_blocking_read(eb); |
a26e8c9f JB |
254 | } |
255 | ||
2ac55d41 | 256 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, |
ff13db41 | 257 | &cached_state); |
0b32f4bb | 258 | if (extent_buffer_uptodate(eb) && |
1259ab75 CM |
259 | btrfs_header_generation(eb) == parent_transid) { |
260 | ret = 0; | |
261 | goto out; | |
262 | } | |
94647322 DS |
263 | btrfs_err_rl(eb->fs_info, |
264 | "parent transid verify failed on %llu wanted %llu found %llu", | |
265 | eb->start, | |
29549aec | 266 | parent_transid, btrfs_header_generation(eb)); |
1259ab75 | 267 | ret = 1; |
a26e8c9f JB |
268 | |
269 | /* | |
270 | * Things reading via commit roots that don't have normal protection, | |
271 | * like send, can have a really old block in cache that may point at a | |
01327610 | 272 | * block that has been freed and re-allocated. So don't clear uptodate |
a26e8c9f JB |
273 | * if we find an eb that is under IO (dirty/writeback) because we could |
274 | * end up reading in the stale data and then writing it back out and | |
275 | * making everybody very sad. | |
276 | */ | |
277 | if (!extent_buffer_under_io(eb)) | |
278 | clear_extent_buffer_uptodate(eb); | |
33958dc6 | 279 | out: |
2ac55d41 | 280 | unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, |
e43bbe5e | 281 | &cached_state); |
472b909f JB |
282 | if (need_lock) |
283 | btrfs_tree_read_unlock_blocking(eb); | |
1259ab75 | 284 | return ret; |
1259ab75 CM |
285 | } |
286 | ||
e7e16f48 JT |
287 | static bool btrfs_supported_super_csum(u16 csum_type) |
288 | { | |
289 | switch (csum_type) { | |
290 | case BTRFS_CSUM_TYPE_CRC32: | |
3951e7f0 | 291 | case BTRFS_CSUM_TYPE_XXHASH: |
3831bf00 | 292 | case BTRFS_CSUM_TYPE_SHA256: |
352ae07b | 293 | case BTRFS_CSUM_TYPE_BLAKE2: |
e7e16f48 JT |
294 | return true; |
295 | default: | |
296 | return false; | |
297 | } | |
298 | } | |
299 | ||
1104a885 DS |
300 | /* |
301 | * Return 0 if the superblock checksum type matches the checksum value of that | |
302 | * algorithm. Pass the raw disk superblock data. | |
303 | */ | |
ab8d0fc4 JM |
304 | static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, |
305 | char *raw_disk_sb) | |
1104a885 DS |
306 | { |
307 | struct btrfs_super_block *disk_sb = | |
308 | (struct btrfs_super_block *)raw_disk_sb; | |
51bce6c9 | 309 | char result[BTRFS_CSUM_SIZE]; |
d5178578 JT |
310 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
311 | ||
312 | shash->tfm = fs_info->csum_shash; | |
1104a885 | 313 | |
51bce6c9 JT |
314 | /* |
315 | * The super_block structure does not span the whole | |
316 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is | |
317 | * filled with zeros and is included in the checksum. | |
318 | */ | |
fd08001f EB |
319 | crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE, |
320 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); | |
1104a885 | 321 | |
51bce6c9 JT |
322 | if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb))) |
323 | return 1; | |
1104a885 | 324 | |
e7e16f48 | 325 | return 0; |
1104a885 DS |
326 | } |
327 | ||
e064d5e9 | 328 | int btrfs_verify_level_key(struct extent_buffer *eb, int level, |
448de471 | 329 | struct btrfs_key *first_key, u64 parent_transid) |
581c1760 | 330 | { |
e064d5e9 | 331 | struct btrfs_fs_info *fs_info = eb->fs_info; |
581c1760 QW |
332 | int found_level; |
333 | struct btrfs_key found_key; | |
334 | int ret; | |
335 | ||
336 | found_level = btrfs_header_level(eb); | |
337 | if (found_level != level) { | |
63489055 QW |
338 | WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), |
339 | KERN_ERR "BTRFS: tree level check failed\n"); | |
581c1760 QW |
340 | btrfs_err(fs_info, |
341 | "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", | |
342 | eb->start, level, found_level); | |
581c1760 QW |
343 | return -EIO; |
344 | } | |
345 | ||
346 | if (!first_key) | |
347 | return 0; | |
348 | ||
5d41be6f QW |
349 | /* |
350 | * For live tree block (new tree blocks in current transaction), | |
351 | * we need proper lock context to avoid race, which is impossible here. | |
352 | * So we only checks tree blocks which is read from disk, whose | |
353 | * generation <= fs_info->last_trans_committed. | |
354 | */ | |
355 | if (btrfs_header_generation(eb) > fs_info->last_trans_committed) | |
356 | return 0; | |
62fdaa52 QW |
357 | |
358 | /* We have @first_key, so this @eb must have at least one item */ | |
359 | if (btrfs_header_nritems(eb) == 0) { | |
360 | btrfs_err(fs_info, | |
361 | "invalid tree nritems, bytenr=%llu nritems=0 expect >0", | |
362 | eb->start); | |
363 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); | |
364 | return -EUCLEAN; | |
365 | } | |
366 | ||
581c1760 QW |
367 | if (found_level) |
368 | btrfs_node_key_to_cpu(eb, &found_key, 0); | |
369 | else | |
370 | btrfs_item_key_to_cpu(eb, &found_key, 0); | |
371 | ret = btrfs_comp_cpu_keys(first_key, &found_key); | |
372 | ||
581c1760 | 373 | if (ret) { |
63489055 QW |
374 | WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), |
375 | KERN_ERR "BTRFS: tree first key check failed\n"); | |
581c1760 | 376 | btrfs_err(fs_info, |
ff76a864 LB |
377 | "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", |
378 | eb->start, parent_transid, first_key->objectid, | |
379 | first_key->type, first_key->offset, | |
380 | found_key.objectid, found_key.type, | |
381 | found_key.offset); | |
581c1760 | 382 | } |
581c1760 QW |
383 | return ret; |
384 | } | |
385 | ||
d352ac68 CM |
386 | /* |
387 | * helper to read a given tree block, doing retries as required when | |
388 | * the checksums don't match and we have alternate mirrors to try. | |
581c1760 QW |
389 | * |
390 | * @parent_transid: expected transid, skip check if 0 | |
391 | * @level: expected level, mandatory check | |
392 | * @first_key: expected key of first slot, skip check if NULL | |
d352ac68 | 393 | */ |
5ab12d1f | 394 | static int btree_read_extent_buffer_pages(struct extent_buffer *eb, |
581c1760 QW |
395 | u64 parent_transid, int level, |
396 | struct btrfs_key *first_key) | |
f188591e | 397 | { |
5ab12d1f | 398 | struct btrfs_fs_info *fs_info = eb->fs_info; |
f188591e | 399 | struct extent_io_tree *io_tree; |
ea466794 | 400 | int failed = 0; |
f188591e CM |
401 | int ret; |
402 | int num_copies = 0; | |
403 | int mirror_num = 0; | |
ea466794 | 404 | int failed_mirror = 0; |
f188591e | 405 | |
0b246afa | 406 | io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; |
f188591e | 407 | while (1) { |
f8397d69 | 408 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
c2ccfbc6 | 409 | ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num); |
256dd1bb | 410 | if (!ret) { |
581c1760 | 411 | if (verify_parent_transid(io_tree, eb, |
b9fab919 | 412 | parent_transid, 0)) |
256dd1bb | 413 | ret = -EIO; |
e064d5e9 | 414 | else if (btrfs_verify_level_key(eb, level, |
448de471 | 415 | first_key, parent_transid)) |
581c1760 QW |
416 | ret = -EUCLEAN; |
417 | else | |
418 | break; | |
256dd1bb | 419 | } |
d397712b | 420 | |
0b246afa | 421 | num_copies = btrfs_num_copies(fs_info, |
f188591e | 422 | eb->start, eb->len); |
4235298e | 423 | if (num_copies == 1) |
ea466794 | 424 | break; |
4235298e | 425 | |
5cf1ab56 JB |
426 | if (!failed_mirror) { |
427 | failed = 1; | |
428 | failed_mirror = eb->read_mirror; | |
429 | } | |
430 | ||
f188591e | 431 | mirror_num++; |
ea466794 JB |
432 | if (mirror_num == failed_mirror) |
433 | mirror_num++; | |
434 | ||
4235298e | 435 | if (mirror_num > num_copies) |
ea466794 | 436 | break; |
f188591e | 437 | } |
ea466794 | 438 | |
c0901581 | 439 | if (failed && !ret && failed_mirror) |
20a1fbf9 | 440 | btrfs_repair_eb_io_failure(eb, failed_mirror); |
ea466794 JB |
441 | |
442 | return ret; | |
f188591e | 443 | } |
19c00ddc | 444 | |
d352ac68 | 445 | /* |
d397712b CM |
446 | * checksum a dirty tree block before IO. This has extra checks to make sure |
447 | * we only fill in the checksum field in the first page of a multi-page block | |
d352ac68 | 448 | */ |
d397712b | 449 | |
01d58472 | 450 | static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) |
19c00ddc | 451 | { |
4eee4fa4 | 452 | u64 start = page_offset(page); |
19c00ddc | 453 | u64 found_start; |
2996e1f8 JT |
454 | u8 result[BTRFS_CSUM_SIZE]; |
455 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); | |
19c00ddc | 456 | struct extent_buffer *eb; |
8d47a0d8 | 457 | int ret; |
f188591e | 458 | |
4f2de97a JB |
459 | eb = (struct extent_buffer *)page->private; |
460 | if (page != eb->pages[0]) | |
461 | return 0; | |
0f805531 | 462 | |
19c00ddc | 463 | found_start = btrfs_header_bytenr(eb); |
0f805531 AL |
464 | /* |
465 | * Please do not consolidate these warnings into a single if. | |
466 | * It is useful to know what went wrong. | |
467 | */ | |
468 | if (WARN_ON(found_start != start)) | |
469 | return -EUCLEAN; | |
470 | if (WARN_ON(!PageUptodate(page))) | |
471 | return -EUCLEAN; | |
472 | ||
de37aa51 | 473 | ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, |
9a8658e3 DS |
474 | offsetof(struct btrfs_header, fsid), |
475 | BTRFS_FSID_SIZE) == 0); | |
0f805531 | 476 | |
c67b3892 | 477 | csum_tree_block(eb, result); |
2996e1f8 | 478 | |
8d47a0d8 QW |
479 | if (btrfs_header_level(eb)) |
480 | ret = btrfs_check_node(eb); | |
481 | else | |
482 | ret = btrfs_check_leaf_full(eb); | |
483 | ||
484 | if (ret < 0) { | |
c06631b0 | 485 | btrfs_print_tree(eb, 0); |
8d47a0d8 QW |
486 | btrfs_err(fs_info, |
487 | "block=%llu write time tree block corruption detected", | |
488 | eb->start); | |
c06631b0 | 489 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); |
8d47a0d8 QW |
490 | return ret; |
491 | } | |
2996e1f8 | 492 | write_extent_buffer(eb, result, 0, csum_size); |
8d47a0d8 | 493 | |
2996e1f8 | 494 | return 0; |
19c00ddc CM |
495 | } |
496 | ||
b0c9b3b0 | 497 | static int check_tree_block_fsid(struct extent_buffer *eb) |
2b82032c | 498 | { |
b0c9b3b0 | 499 | struct btrfs_fs_info *fs_info = eb->fs_info; |
944d3f9f | 500 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; |
44880fdc | 501 | u8 fsid[BTRFS_FSID_SIZE]; |
944d3f9f | 502 | u8 *metadata_uuid; |
2b82032c | 503 | |
9a8658e3 DS |
504 | read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid), |
505 | BTRFS_FSID_SIZE); | |
944d3f9f NB |
506 | /* |
507 | * Checking the incompat flag is only valid for the current fs. For | |
508 | * seed devices it's forbidden to have their uuid changed so reading | |
509 | * ->fsid in this case is fine | |
510 | */ | |
511 | if (btrfs_fs_incompat(fs_info, METADATA_UUID)) | |
512 | metadata_uuid = fs_devices->metadata_uuid; | |
513 | else | |
514 | metadata_uuid = fs_devices->fsid; | |
515 | ||
516 | if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) | |
517 | return 0; | |
518 | ||
519 | list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) | |
520 | if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE)) | |
521 | return 0; | |
522 | ||
523 | return 1; | |
2b82032c YZ |
524 | } |
525 | ||
9a446d6a NB |
526 | int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset, |
527 | struct page *page, u64 start, u64 end, | |
528 | int mirror) | |
ce9adaa5 | 529 | { |
ce9adaa5 CM |
530 | u64 found_start; |
531 | int found_level; | |
ce9adaa5 | 532 | struct extent_buffer *eb; |
15b6e8a8 DS |
533 | struct btrfs_fs_info *fs_info; |
534 | u16 csum_size; | |
f188591e | 535 | int ret = 0; |
2996e1f8 | 536 | u8 result[BTRFS_CSUM_SIZE]; |
727011e0 | 537 | int reads_done; |
ce9adaa5 | 538 | |
ce9adaa5 CM |
539 | if (!page->private) |
540 | goto out; | |
d397712b | 541 | |
4f2de97a | 542 | eb = (struct extent_buffer *)page->private; |
15b6e8a8 DS |
543 | fs_info = eb->fs_info; |
544 | csum_size = btrfs_super_csum_size(fs_info->super_copy); | |
d397712b | 545 | |
0b32f4bb JB |
546 | /* the pending IO might have been the only thing that kept this buffer |
547 | * in memory. Make sure we have a ref for all this other checks | |
548 | */ | |
67439dad | 549 | atomic_inc(&eb->refs); |
0b32f4bb JB |
550 | |
551 | reads_done = atomic_dec_and_test(&eb->io_pages); | |
727011e0 CM |
552 | if (!reads_done) |
553 | goto err; | |
f188591e | 554 | |
5cf1ab56 | 555 | eb->read_mirror = mirror; |
656f30db | 556 | if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { |
ea466794 JB |
557 | ret = -EIO; |
558 | goto err; | |
559 | } | |
560 | ||
ce9adaa5 | 561 | found_start = btrfs_header_bytenr(eb); |
727011e0 | 562 | if (found_start != eb->start) { |
893bf4b1 SY |
563 | btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu", |
564 | eb->start, found_start); | |
f188591e | 565 | ret = -EIO; |
ce9adaa5 CM |
566 | goto err; |
567 | } | |
b0c9b3b0 | 568 | if (check_tree_block_fsid(eb)) { |
02873e43 ZL |
569 | btrfs_err_rl(fs_info, "bad fsid on block %llu", |
570 | eb->start); | |
1259ab75 CM |
571 | ret = -EIO; |
572 | goto err; | |
573 | } | |
ce9adaa5 | 574 | found_level = btrfs_header_level(eb); |
1c24c3ce | 575 | if (found_level >= BTRFS_MAX_LEVEL) { |
893bf4b1 SY |
576 | btrfs_err(fs_info, "bad tree block level %d on %llu", |
577 | (int)btrfs_header_level(eb), eb->start); | |
1c24c3ce JB |
578 | ret = -EIO; |
579 | goto err; | |
580 | } | |
ce9adaa5 | 581 | |
85d4e461 CM |
582 | btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), |
583 | eb, found_level); | |
4008c04a | 584 | |
c67b3892 | 585 | csum_tree_block(eb, result); |
a826d6dc | 586 | |
2996e1f8 | 587 | if (memcmp_extent_buffer(eb, result, 0, csum_size)) { |
35be8851 | 588 | u8 val[BTRFS_CSUM_SIZE] = { 0 }; |
2996e1f8 JT |
589 | |
590 | read_extent_buffer(eb, &val, 0, csum_size); | |
591 | btrfs_warn_rl(fs_info, | |
35be8851 | 592 | "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d", |
2996e1f8 | 593 | fs_info->sb->s_id, eb->start, |
35be8851 JT |
594 | CSUM_FMT_VALUE(csum_size, val), |
595 | CSUM_FMT_VALUE(csum_size, result), | |
596 | btrfs_header_level(eb)); | |
2996e1f8 JT |
597 | ret = -EUCLEAN; |
598 | goto err; | |
599 | } | |
600 | ||
a826d6dc JB |
601 | /* |
602 | * If this is a leaf block and it is corrupt, set the corrupt bit so | |
603 | * that we don't try and read the other copies of this block, just | |
604 | * return -EIO. | |
605 | */ | |
1c4360ee | 606 | if (found_level == 0 && btrfs_check_leaf_full(eb)) { |
a826d6dc JB |
607 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
608 | ret = -EIO; | |
609 | } | |
ce9adaa5 | 610 | |
813fd1dc | 611 | if (found_level > 0 && btrfs_check_node(eb)) |
053ab70f LB |
612 | ret = -EIO; |
613 | ||
0b32f4bb JB |
614 | if (!ret) |
615 | set_extent_buffer_uptodate(eb); | |
75391f0d QW |
616 | else |
617 | btrfs_err(fs_info, | |
618 | "block=%llu read time tree block corruption detected", | |
619 | eb->start); | |
ce9adaa5 | 620 | err: |
79fb65a1 JB |
621 | if (reads_done && |
622 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | |
d48d71aa | 623 | btree_readahead_hook(eb, ret); |
4bb31e92 | 624 | |
53b381b3 DW |
625 | if (ret) { |
626 | /* | |
627 | * our io error hook is going to dec the io pages | |
628 | * again, we have to make sure it has something | |
629 | * to decrement | |
630 | */ | |
631 | atomic_inc(&eb->io_pages); | |
0b32f4bb | 632 | clear_extent_buffer_uptodate(eb); |
53b381b3 | 633 | } |
0b32f4bb | 634 | free_extent_buffer(eb); |
ce9adaa5 | 635 | out: |
f188591e | 636 | return ret; |
ce9adaa5 CM |
637 | } |
638 | ||
4246a0b6 | 639 | static void end_workqueue_bio(struct bio *bio) |
ce9adaa5 | 640 | { |
97eb6b69 | 641 | struct btrfs_end_io_wq *end_io_wq = bio->bi_private; |
ce9adaa5 | 642 | struct btrfs_fs_info *fs_info; |
9e0af237 | 643 | struct btrfs_workqueue *wq; |
ce9adaa5 | 644 | |
ce9adaa5 | 645 | fs_info = end_io_wq->info; |
4e4cbee9 | 646 | end_io_wq->status = bio->bi_status; |
d20f7043 | 647 | |
37226b21 | 648 | if (bio_op(bio) == REQ_OP_WRITE) { |
a0cac0ec | 649 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) |
9e0af237 | 650 | wq = fs_info->endio_meta_write_workers; |
a0cac0ec | 651 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) |
9e0af237 | 652 | wq = fs_info->endio_freespace_worker; |
a0cac0ec | 653 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
9e0af237 | 654 | wq = fs_info->endio_raid56_workers; |
a0cac0ec | 655 | else |
9e0af237 | 656 | wq = fs_info->endio_write_workers; |
d20f7043 | 657 | } else { |
5c047a69 | 658 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
9e0af237 | 659 | wq = fs_info->endio_raid56_workers; |
a0cac0ec | 660 | else if (end_io_wq->metadata) |
9e0af237 | 661 | wq = fs_info->endio_meta_workers; |
a0cac0ec | 662 | else |
9e0af237 | 663 | wq = fs_info->endio_workers; |
d20f7043 | 664 | } |
9e0af237 | 665 | |
a0cac0ec | 666 | btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); |
9e0af237 | 667 | btrfs_queue_work(wq, &end_io_wq->work); |
ce9adaa5 CM |
668 | } |
669 | ||
4e4cbee9 | 670 | blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
bfebd8b5 | 671 | enum btrfs_wq_endio_type metadata) |
0b86a832 | 672 | { |
97eb6b69 | 673 | struct btrfs_end_io_wq *end_io_wq; |
8b110e39 | 674 | |
97eb6b69 | 675 | end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); |
ce9adaa5 | 676 | if (!end_io_wq) |
4e4cbee9 | 677 | return BLK_STS_RESOURCE; |
ce9adaa5 CM |
678 | |
679 | end_io_wq->private = bio->bi_private; | |
680 | end_io_wq->end_io = bio->bi_end_io; | |
22c59948 | 681 | end_io_wq->info = info; |
4e4cbee9 | 682 | end_io_wq->status = 0; |
ce9adaa5 | 683 | end_io_wq->bio = bio; |
22c59948 | 684 | end_io_wq->metadata = metadata; |
ce9adaa5 CM |
685 | |
686 | bio->bi_private = end_io_wq; | |
687 | bio->bi_end_io = end_workqueue_bio; | |
22c59948 CM |
688 | return 0; |
689 | } | |
690 | ||
4a69a410 CM |
691 | static void run_one_async_start(struct btrfs_work *work) |
692 | { | |
4a69a410 | 693 | struct async_submit_bio *async; |
4e4cbee9 | 694 | blk_status_t ret; |
4a69a410 CM |
695 | |
696 | async = container_of(work, struct async_submit_bio, work); | |
c6100a4b | 697 | ret = async->submit_bio_start(async->private_data, async->bio, |
79787eaa JM |
698 | async->bio_offset); |
699 | if (ret) | |
4e4cbee9 | 700 | async->status = ret; |
4a69a410 CM |
701 | } |
702 | ||
06ea01b1 DS |
703 | /* |
704 | * In order to insert checksums into the metadata in large chunks, we wait | |
705 | * until bio submission time. All the pages in the bio are checksummed and | |
706 | * sums are attached onto the ordered extent record. | |
707 | * | |
708 | * At IO completion time the csums attached on the ordered extent record are | |
709 | * inserted into the tree. | |
710 | */ | |
4a69a410 | 711 | static void run_one_async_done(struct btrfs_work *work) |
8b712842 | 712 | { |
8b712842 | 713 | struct async_submit_bio *async; |
06ea01b1 DS |
714 | struct inode *inode; |
715 | blk_status_t ret; | |
8b712842 CM |
716 | |
717 | async = container_of(work, struct async_submit_bio, work); | |
06ea01b1 | 718 | inode = async->private_data; |
4854ddd0 | 719 | |
bb7ab3b9 | 720 | /* If an error occurred we just want to clean up the bio and move on */ |
4e4cbee9 CH |
721 | if (async->status) { |
722 | async->bio->bi_status = async->status; | |
4246a0b6 | 723 | bio_endio(async->bio); |
79787eaa JM |
724 | return; |
725 | } | |
726 | ||
ec39f769 CM |
727 | /* |
728 | * All of the bios that pass through here are from async helpers. | |
729 | * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context. | |
730 | * This changes nothing when cgroups aren't in use. | |
731 | */ | |
732 | async->bio->bi_opf |= REQ_CGROUP_PUNT; | |
08635bae | 733 | ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num); |
06ea01b1 DS |
734 | if (ret) { |
735 | async->bio->bi_status = ret; | |
736 | bio_endio(async->bio); | |
737 | } | |
4a69a410 CM |
738 | } |
739 | ||
740 | static void run_one_async_free(struct btrfs_work *work) | |
741 | { | |
742 | struct async_submit_bio *async; | |
743 | ||
744 | async = container_of(work, struct async_submit_bio, work); | |
8b712842 CM |
745 | kfree(async); |
746 | } | |
747 | ||
8c27cb35 LT |
748 | blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, |
749 | int mirror_num, unsigned long bio_flags, | |
750 | u64 bio_offset, void *private_data, | |
e288c080 | 751 | extent_submit_bio_start_t *submit_bio_start) |
44b8bd7e CM |
752 | { |
753 | struct async_submit_bio *async; | |
754 | ||
755 | async = kmalloc(sizeof(*async), GFP_NOFS); | |
756 | if (!async) | |
4e4cbee9 | 757 | return BLK_STS_RESOURCE; |
44b8bd7e | 758 | |
c6100a4b | 759 | async->private_data = private_data; |
44b8bd7e CM |
760 | async->bio = bio; |
761 | async->mirror_num = mirror_num; | |
4a69a410 | 762 | async->submit_bio_start = submit_bio_start; |
4a69a410 | 763 | |
a0cac0ec OS |
764 | btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, |
765 | run_one_async_free); | |
4a69a410 | 766 | |
eaf25d93 | 767 | async->bio_offset = bio_offset; |
8c8bee1d | 768 | |
4e4cbee9 | 769 | async->status = 0; |
79787eaa | 770 | |
67f055c7 | 771 | if (op_is_sync(bio->bi_opf)) |
5cdc7ad3 | 772 | btrfs_set_work_high_priority(&async->work); |
d313d7a3 | 773 | |
5cdc7ad3 | 774 | btrfs_queue_work(fs_info->workers, &async->work); |
44b8bd7e CM |
775 | return 0; |
776 | } | |
777 | ||
4e4cbee9 | 778 | static blk_status_t btree_csum_one_bio(struct bio *bio) |
ce3ed71a | 779 | { |
2c30c71b | 780 | struct bio_vec *bvec; |
ce3ed71a | 781 | struct btrfs_root *root; |
2b070cfe | 782 | int ret = 0; |
6dc4f100 | 783 | struct bvec_iter_all iter_all; |
ce3ed71a | 784 | |
c09abff8 | 785 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
2b070cfe | 786 | bio_for_each_segment_all(bvec, bio, iter_all) { |
ce3ed71a | 787 | root = BTRFS_I(bvec->bv_page->mapping->host)->root; |
01d58472 | 788 | ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); |
79787eaa JM |
789 | if (ret) |
790 | break; | |
ce3ed71a | 791 | } |
2c30c71b | 792 | |
4e4cbee9 | 793 | return errno_to_blk_status(ret); |
ce3ed71a CM |
794 | } |
795 | ||
d0ee3934 | 796 | static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio, |
8c27cb35 | 797 | u64 bio_offset) |
22c59948 | 798 | { |
8b712842 CM |
799 | /* |
800 | * when we're called for a write, we're already in the async | |
5443be45 | 801 | * submission context. Just jump into btrfs_map_bio |
8b712842 | 802 | */ |
79787eaa | 803 | return btree_csum_one_bio(bio); |
4a69a410 | 804 | } |
22c59948 | 805 | |
9b4e675a DS |
806 | static int check_async_write(struct btrfs_fs_info *fs_info, |
807 | struct btrfs_inode *bi) | |
de0022b9 | 808 | { |
6300463b LB |
809 | if (atomic_read(&bi->sync_writers)) |
810 | return 0; | |
9b4e675a | 811 | if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags)) |
de0022b9 | 812 | return 0; |
de0022b9 JB |
813 | return 1; |
814 | } | |
815 | ||
1b36294a NB |
816 | blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, |
817 | int mirror_num, unsigned long bio_flags) | |
44b8bd7e | 818 | { |
0b246afa | 819 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
9b4e675a | 820 | int async = check_async_write(fs_info, BTRFS_I(inode)); |
4e4cbee9 | 821 | blk_status_t ret; |
cad321ad | 822 | |
37226b21 | 823 | if (bio_op(bio) != REQ_OP_WRITE) { |
4a69a410 CM |
824 | /* |
825 | * called for a read, do the setup so that checksum validation | |
826 | * can happen in the async kernel threads | |
827 | */ | |
0b246afa JM |
828 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
829 | BTRFS_WQ_ENDIO_METADATA); | |
1d4284bd | 830 | if (ret) |
61891923 | 831 | goto out_w_error; |
08635bae | 832 | ret = btrfs_map_bio(fs_info, bio, mirror_num); |
de0022b9 JB |
833 | } else if (!async) { |
834 | ret = btree_csum_one_bio(bio); | |
835 | if (ret) | |
61891923 | 836 | goto out_w_error; |
08635bae | 837 | ret = btrfs_map_bio(fs_info, bio, mirror_num); |
61891923 SB |
838 | } else { |
839 | /* | |
840 | * kthread helpers are used to submit writes so that | |
841 | * checksumming can happen in parallel across all CPUs | |
842 | */ | |
c6100a4b | 843 | ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, |
e68f2ee7 | 844 | 0, inode, btree_submit_bio_start); |
44b8bd7e | 845 | } |
d313d7a3 | 846 | |
4246a0b6 CH |
847 | if (ret) |
848 | goto out_w_error; | |
849 | return 0; | |
850 | ||
61891923 | 851 | out_w_error: |
4e4cbee9 | 852 | bio->bi_status = ret; |
4246a0b6 | 853 | bio_endio(bio); |
61891923 | 854 | return ret; |
44b8bd7e CM |
855 | } |
856 | ||
3dd1462e | 857 | #ifdef CONFIG_MIGRATION |
784b4e29 | 858 | static int btree_migratepage(struct address_space *mapping, |
a6bc32b8 MG |
859 | struct page *newpage, struct page *page, |
860 | enum migrate_mode mode) | |
784b4e29 CM |
861 | { |
862 | /* | |
863 | * we can't safely write a btree page from here, | |
864 | * we haven't done the locking hook | |
865 | */ | |
866 | if (PageDirty(page)) | |
867 | return -EAGAIN; | |
868 | /* | |
869 | * Buffers may be managed in a filesystem specific way. | |
870 | * We must have no buffers or drop them. | |
871 | */ | |
872 | if (page_has_private(page) && | |
873 | !try_to_release_page(page, GFP_KERNEL)) | |
874 | return -EAGAIN; | |
a6bc32b8 | 875 | return migrate_page(mapping, newpage, page, mode); |
784b4e29 | 876 | } |
3dd1462e | 877 | #endif |
784b4e29 | 878 | |
0da5468f CM |
879 | |
880 | static int btree_writepages(struct address_space *mapping, | |
881 | struct writeback_control *wbc) | |
882 | { | |
e2d84521 MX |
883 | struct btrfs_fs_info *fs_info; |
884 | int ret; | |
885 | ||
d8d5f3e1 | 886 | if (wbc->sync_mode == WB_SYNC_NONE) { |
448d640b CM |
887 | |
888 | if (wbc->for_kupdate) | |
889 | return 0; | |
890 | ||
e2d84521 | 891 | fs_info = BTRFS_I(mapping->host)->root->fs_info; |
b9473439 | 892 | /* this is a bit racy, but that's ok */ |
d814a491 EL |
893 | ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
894 | BTRFS_DIRTY_METADATA_THRESH, | |
895 | fs_info->dirty_metadata_batch); | |
e2d84521 | 896 | if (ret < 0) |
793955bc | 897 | return 0; |
793955bc | 898 | } |
0b32f4bb | 899 | return btree_write_cache_pages(mapping, wbc); |
0da5468f CM |
900 | } |
901 | ||
70dec807 | 902 | static int btree_releasepage(struct page *page, gfp_t gfp_flags) |
5f39d397 | 903 | { |
98509cfc | 904 | if (PageWriteback(page) || PageDirty(page)) |
d397712b | 905 | return 0; |
0c4e538b | 906 | |
f7a52a40 | 907 | return try_release_extent_buffer(page); |
d98237b3 CM |
908 | } |
909 | ||
d47992f8 LC |
910 | static void btree_invalidatepage(struct page *page, unsigned int offset, |
911 | unsigned int length) | |
d98237b3 | 912 | { |
d1310b2e CM |
913 | struct extent_io_tree *tree; |
914 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
5f39d397 CM |
915 | extent_invalidatepage(tree, page, offset); |
916 | btree_releasepage(page, GFP_NOFS); | |
9ad6b7bc | 917 | if (PagePrivate(page)) { |
efe120a0 FH |
918 | btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, |
919 | "page private not zero on page %llu", | |
920 | (unsigned long long)page_offset(page)); | |
d1b89bc0 | 921 | detach_page_private(page); |
9ad6b7bc | 922 | } |
d98237b3 CM |
923 | } |
924 | ||
0b32f4bb JB |
925 | static int btree_set_page_dirty(struct page *page) |
926 | { | |
bb146eb2 | 927 | #ifdef DEBUG |
0b32f4bb JB |
928 | struct extent_buffer *eb; |
929 | ||
930 | BUG_ON(!PagePrivate(page)); | |
931 | eb = (struct extent_buffer *)page->private; | |
932 | BUG_ON(!eb); | |
933 | BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | |
934 | BUG_ON(!atomic_read(&eb->refs)); | |
935 | btrfs_assert_tree_locked(eb); | |
bb146eb2 | 936 | #endif |
0b32f4bb JB |
937 | return __set_page_dirty_nobuffers(page); |
938 | } | |
939 | ||
7f09410b | 940 | static const struct address_space_operations btree_aops = { |
0da5468f | 941 | .writepages = btree_writepages, |
5f39d397 CM |
942 | .releasepage = btree_releasepage, |
943 | .invalidatepage = btree_invalidatepage, | |
5a92bc88 | 944 | #ifdef CONFIG_MIGRATION |
784b4e29 | 945 | .migratepage = btree_migratepage, |
5a92bc88 | 946 | #endif |
0b32f4bb | 947 | .set_page_dirty = btree_set_page_dirty, |
d98237b3 CM |
948 | }; |
949 | ||
2ff7e61e | 950 | void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) |
090d1875 | 951 | { |
5f39d397 | 952 | struct extent_buffer *buf = NULL; |
537f38f0 | 953 | int ret; |
090d1875 | 954 | |
2ff7e61e | 955 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 | 956 | if (IS_ERR(buf)) |
6197d86e | 957 | return; |
537f38f0 | 958 | |
c2ccfbc6 | 959 | ret = read_extent_buffer_pages(buf, WAIT_NONE, 0); |
537f38f0 NB |
960 | if (ret < 0) |
961 | free_extent_buffer_stale(buf); | |
962 | else | |
963 | free_extent_buffer(buf); | |
090d1875 CM |
964 | } |
965 | ||
2ff7e61e JM |
966 | struct extent_buffer *btrfs_find_create_tree_block( |
967 | struct btrfs_fs_info *fs_info, | |
968 | u64 bytenr) | |
0999df54 | 969 | { |
0b246afa JM |
970 | if (btrfs_is_testing(fs_info)) |
971 | return alloc_test_extent_buffer(fs_info, bytenr); | |
972 | return alloc_extent_buffer(fs_info, bytenr); | |
0999df54 CM |
973 | } |
974 | ||
581c1760 QW |
975 | /* |
976 | * Read tree block at logical address @bytenr and do variant basic but critical | |
977 | * verification. | |
978 | * | |
979 | * @parent_transid: expected transid of this tree block, skip check if 0 | |
980 | * @level: expected level, mandatory check | |
981 | * @first_key: expected key in slot 0, skip check if NULL | |
982 | */ | |
2ff7e61e | 983 | struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, |
581c1760 QW |
984 | u64 parent_transid, int level, |
985 | struct btrfs_key *first_key) | |
0999df54 CM |
986 | { |
987 | struct extent_buffer *buf = NULL; | |
0999df54 CM |
988 | int ret; |
989 | ||
2ff7e61e | 990 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 LB |
991 | if (IS_ERR(buf)) |
992 | return buf; | |
0999df54 | 993 | |
5ab12d1f | 994 | ret = btree_read_extent_buffer_pages(buf, parent_transid, |
581c1760 | 995 | level, first_key); |
0f0fe8f7 | 996 | if (ret) { |
537f38f0 | 997 | free_extent_buffer_stale(buf); |
64c043de | 998 | return ERR_PTR(ret); |
0f0fe8f7 | 999 | } |
5f39d397 | 1000 | return buf; |
ce9adaa5 | 1001 | |
eb60ceac CM |
1002 | } |
1003 | ||
6a884d7d | 1004 | void btrfs_clean_tree_block(struct extent_buffer *buf) |
ed2ff2cb | 1005 | { |
6a884d7d | 1006 | struct btrfs_fs_info *fs_info = buf->fs_info; |
55c69072 | 1007 | if (btrfs_header_generation(buf) == |
e2d84521 | 1008 | fs_info->running_transaction->transid) { |
b9447ef8 | 1009 | btrfs_assert_tree_locked(buf); |
b4ce94de | 1010 | |
b9473439 | 1011 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { |
104b4e51 NB |
1012 | percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, |
1013 | -buf->len, | |
1014 | fs_info->dirty_metadata_batch); | |
ed7b63eb | 1015 | /* ugh, clear_extent_buffer_dirty needs to lock the page */ |
8bead258 | 1016 | btrfs_set_lock_blocking_write(buf); |
ed7b63eb JB |
1017 | clear_extent_buffer_dirty(buf); |
1018 | } | |
925baedd | 1019 | } |
5f39d397 CM |
1020 | } |
1021 | ||
da17066c | 1022 | static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, |
143bede5 | 1023 | u64 objectid) |
d97e63b6 | 1024 | { |
7c0260ee | 1025 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
96dfcb46 | 1026 | root->fs_info = fs_info; |
cfaa7295 | 1027 | root->node = NULL; |
a28ec197 | 1028 | root->commit_root = NULL; |
27cdeb70 | 1029 | root->state = 0; |
d68fc57b | 1030 | root->orphan_cleanup_state = 0; |
0b86a832 | 1031 | |
0f7d52f4 | 1032 | root->last_trans = 0; |
13a8a7c8 | 1033 | root->highest_objectid = 0; |
eb73c1b7 | 1034 | root->nr_delalloc_inodes = 0; |
199c2a9c | 1035 | root->nr_ordered_extents = 0; |
6bef4d31 | 1036 | root->inode_tree = RB_ROOT; |
16cdcec7 | 1037 | INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); |
f0486c68 | 1038 | root->block_rsv = NULL; |
0b86a832 CM |
1039 | |
1040 | INIT_LIST_HEAD(&root->dirty_list); | |
5d4f98a2 | 1041 | INIT_LIST_HEAD(&root->root_list); |
eb73c1b7 MX |
1042 | INIT_LIST_HEAD(&root->delalloc_inodes); |
1043 | INIT_LIST_HEAD(&root->delalloc_root); | |
199c2a9c MX |
1044 | INIT_LIST_HEAD(&root->ordered_extents); |
1045 | INIT_LIST_HEAD(&root->ordered_root); | |
d2311e69 | 1046 | INIT_LIST_HEAD(&root->reloc_dirty_list); |
2ab28f32 JB |
1047 | INIT_LIST_HEAD(&root->logged_list[0]); |
1048 | INIT_LIST_HEAD(&root->logged_list[1]); | |
5d4f98a2 | 1049 | spin_lock_init(&root->inode_lock); |
eb73c1b7 | 1050 | spin_lock_init(&root->delalloc_lock); |
199c2a9c | 1051 | spin_lock_init(&root->ordered_extent_lock); |
f0486c68 | 1052 | spin_lock_init(&root->accounting_lock); |
2ab28f32 JB |
1053 | spin_lock_init(&root->log_extents_lock[0]); |
1054 | spin_lock_init(&root->log_extents_lock[1]); | |
8287475a | 1055 | spin_lock_init(&root->qgroup_meta_rsv_lock); |
a2135011 | 1056 | mutex_init(&root->objectid_mutex); |
e02119d5 | 1057 | mutex_init(&root->log_mutex); |
31f3d255 | 1058 | mutex_init(&root->ordered_extent_mutex); |
573bfb72 | 1059 | mutex_init(&root->delalloc_mutex); |
c53e9653 | 1060 | init_waitqueue_head(&root->qgroup_flush_wait); |
7237f183 YZ |
1061 | init_waitqueue_head(&root->log_writer_wait); |
1062 | init_waitqueue_head(&root->log_commit_wait[0]); | |
1063 | init_waitqueue_head(&root->log_commit_wait[1]); | |
8b050d35 MX |
1064 | INIT_LIST_HEAD(&root->log_ctxs[0]); |
1065 | INIT_LIST_HEAD(&root->log_ctxs[1]); | |
7237f183 YZ |
1066 | atomic_set(&root->log_commit[0], 0); |
1067 | atomic_set(&root->log_commit[1], 0); | |
1068 | atomic_set(&root->log_writers, 0); | |
2ecb7923 | 1069 | atomic_set(&root->log_batch, 0); |
0700cea7 | 1070 | refcount_set(&root->refs, 1); |
8ecebf4d | 1071 | atomic_set(&root->snapshot_force_cow, 0); |
eede2bf3 | 1072 | atomic_set(&root->nr_swapfiles, 0); |
7237f183 | 1073 | root->log_transid = 0; |
d1433deb | 1074 | root->log_transid_committed = -1; |
257c62e1 | 1075 | root->last_log_commit = 0; |
e289f03e | 1076 | if (!dummy) { |
43eb5f29 QW |
1077 | extent_io_tree_init(fs_info, &root->dirty_log_pages, |
1078 | IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); | |
e289f03e FM |
1079 | extent_io_tree_init(fs_info, &root->log_csum_range, |
1080 | IO_TREE_LOG_CSUM_RANGE, NULL); | |
1081 | } | |
017e5369 | 1082 | |
3768f368 CM |
1083 | memset(&root->root_key, 0, sizeof(root->root_key)); |
1084 | memset(&root->root_item, 0, sizeof(root->root_item)); | |
6702ed49 | 1085 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); |
4d775673 | 1086 | root->root_key.objectid = objectid; |
0ee5dc67 | 1087 | root->anon_dev = 0; |
8ea05e3a | 1088 | |
5f3ab90a | 1089 | spin_lock_init(&root->root_item_lock); |
370a11b8 | 1090 | btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); |
bd647ce3 JB |
1091 | #ifdef CONFIG_BTRFS_DEBUG |
1092 | INIT_LIST_HEAD(&root->leak_list); | |
1093 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1094 | list_add_tail(&root->leak_list, &fs_info->allocated_roots); | |
1095 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
1096 | #endif | |
3768f368 CM |
1097 | } |
1098 | ||
74e4d827 | 1099 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, |
96dfcb46 | 1100 | u64 objectid, gfp_t flags) |
6f07e42e | 1101 | { |
74e4d827 | 1102 | struct btrfs_root *root = kzalloc(sizeof(*root), flags); |
6f07e42e | 1103 | if (root) |
96dfcb46 | 1104 | __setup_root(root, fs_info, objectid); |
6f07e42e AV |
1105 | return root; |
1106 | } | |
1107 | ||
06ea65a3 JB |
1108 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
1109 | /* Should only be used by the testing infrastructure */ | |
da17066c | 1110 | struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) |
06ea65a3 JB |
1111 | { |
1112 | struct btrfs_root *root; | |
1113 | ||
7c0260ee JM |
1114 | if (!fs_info) |
1115 | return ERR_PTR(-EINVAL); | |
1116 | ||
96dfcb46 | 1117 | root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); |
06ea65a3 JB |
1118 | if (!root) |
1119 | return ERR_PTR(-ENOMEM); | |
da17066c | 1120 | |
b9ef22de | 1121 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
faa2dbf0 | 1122 | root->alloc_bytenr = 0; |
06ea65a3 JB |
1123 | |
1124 | return root; | |
1125 | } | |
1126 | #endif | |
1127 | ||
20897f5c | 1128 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, |
20897f5c AJ |
1129 | u64 objectid) |
1130 | { | |
9b7a2440 | 1131 | struct btrfs_fs_info *fs_info = trans->fs_info; |
20897f5c AJ |
1132 | struct extent_buffer *leaf; |
1133 | struct btrfs_root *tree_root = fs_info->tree_root; | |
1134 | struct btrfs_root *root; | |
1135 | struct btrfs_key key; | |
b89f6d1f | 1136 | unsigned int nofs_flag; |
20897f5c | 1137 | int ret = 0; |
20897f5c | 1138 | |
b89f6d1f FM |
1139 | /* |
1140 | * We're holding a transaction handle, so use a NOFS memory allocation | |
1141 | * context to avoid deadlock if reclaim happens. | |
1142 | */ | |
1143 | nofs_flag = memalloc_nofs_save(); | |
96dfcb46 | 1144 | root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL); |
b89f6d1f | 1145 | memalloc_nofs_restore(nofs_flag); |
20897f5c AJ |
1146 | if (!root) |
1147 | return ERR_PTR(-ENOMEM); | |
1148 | ||
20897f5c AJ |
1149 | root->root_key.objectid = objectid; |
1150 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | |
1151 | root->root_key.offset = 0; | |
1152 | ||
9631e4cc JB |
1153 | leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0, |
1154 | BTRFS_NESTING_NORMAL); | |
20897f5c AJ |
1155 | if (IS_ERR(leaf)) { |
1156 | ret = PTR_ERR(leaf); | |
1dd05682 | 1157 | leaf = NULL; |
20897f5c AJ |
1158 | goto fail; |
1159 | } | |
1160 | ||
20897f5c | 1161 | root->node = leaf; |
20897f5c AJ |
1162 | btrfs_mark_buffer_dirty(leaf); |
1163 | ||
1164 | root->commit_root = btrfs_root_node(root); | |
27cdeb70 | 1165 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
20897f5c AJ |
1166 | |
1167 | root->root_item.flags = 0; | |
1168 | root->root_item.byte_limit = 0; | |
1169 | btrfs_set_root_bytenr(&root->root_item, leaf->start); | |
1170 | btrfs_set_root_generation(&root->root_item, trans->transid); | |
1171 | btrfs_set_root_level(&root->root_item, 0); | |
1172 | btrfs_set_root_refs(&root->root_item, 1); | |
1173 | btrfs_set_root_used(&root->root_item, leaf->len); | |
1174 | btrfs_set_root_last_snapshot(&root->root_item, 0); | |
1175 | btrfs_set_root_dirid(&root->root_item, 0); | |
33d85fda | 1176 | if (is_fstree(objectid)) |
807fc790 AS |
1177 | generate_random_guid(root->root_item.uuid); |
1178 | else | |
1179 | export_guid(root->root_item.uuid, &guid_null); | |
20897f5c AJ |
1180 | root->root_item.drop_level = 0; |
1181 | ||
1182 | key.objectid = objectid; | |
1183 | key.type = BTRFS_ROOT_ITEM_KEY; | |
1184 | key.offset = 0; | |
1185 | ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); | |
1186 | if (ret) | |
1187 | goto fail; | |
1188 | ||
1189 | btrfs_tree_unlock(leaf); | |
1190 | ||
1dd05682 TI |
1191 | return root; |
1192 | ||
20897f5c | 1193 | fail: |
8c38938c | 1194 | if (leaf) |
1dd05682 | 1195 | btrfs_tree_unlock(leaf); |
00246528 | 1196 | btrfs_put_root(root); |
20897f5c | 1197 | |
1dd05682 | 1198 | return ERR_PTR(ret); |
20897f5c AJ |
1199 | } |
1200 | ||
7237f183 YZ |
1201 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, |
1202 | struct btrfs_fs_info *fs_info) | |
0f7d52f4 CM |
1203 | { |
1204 | struct btrfs_root *root; | |
7237f183 | 1205 | struct extent_buffer *leaf; |
e02119d5 | 1206 | |
96dfcb46 | 1207 | root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS); |
e02119d5 | 1208 | if (!root) |
7237f183 | 1209 | return ERR_PTR(-ENOMEM); |
e02119d5 | 1210 | |
e02119d5 CM |
1211 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; |
1212 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | |
1213 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; | |
27cdeb70 | 1214 | |
7237f183 | 1215 | /* |
92a7cc42 | 1216 | * DON'T set SHAREABLE bit for log trees. |
27cdeb70 | 1217 | * |
92a7cc42 QW |
1218 | * Log trees are not exposed to user space thus can't be snapshotted, |
1219 | * and they go away before a real commit is actually done. | |
1220 | * | |
1221 | * They do store pointers to file data extents, and those reference | |
1222 | * counts still get updated (along with back refs to the log tree). | |
7237f183 | 1223 | */ |
e02119d5 | 1224 | |
4d75f8a9 | 1225 | leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, |
9631e4cc | 1226 | NULL, 0, 0, 0, BTRFS_NESTING_NORMAL); |
7237f183 | 1227 | if (IS_ERR(leaf)) { |
00246528 | 1228 | btrfs_put_root(root); |
7237f183 YZ |
1229 | return ERR_CAST(leaf); |
1230 | } | |
e02119d5 | 1231 | |
7237f183 | 1232 | root->node = leaf; |
e02119d5 | 1233 | |
e02119d5 CM |
1234 | btrfs_mark_buffer_dirty(root->node); |
1235 | btrfs_tree_unlock(root->node); | |
7237f183 YZ |
1236 | return root; |
1237 | } | |
1238 | ||
1239 | int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, | |
1240 | struct btrfs_fs_info *fs_info) | |
1241 | { | |
1242 | struct btrfs_root *log_root; | |
1243 | ||
1244 | log_root = alloc_log_tree(trans, fs_info); | |
1245 | if (IS_ERR(log_root)) | |
1246 | return PTR_ERR(log_root); | |
1247 | WARN_ON(fs_info->log_root_tree); | |
1248 | fs_info->log_root_tree = log_root; | |
1249 | return 0; | |
1250 | } | |
1251 | ||
1252 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | |
1253 | struct btrfs_root *root) | |
1254 | { | |
0b246afa | 1255 | struct btrfs_fs_info *fs_info = root->fs_info; |
7237f183 YZ |
1256 | struct btrfs_root *log_root; |
1257 | struct btrfs_inode_item *inode_item; | |
1258 | ||
0b246afa | 1259 | log_root = alloc_log_tree(trans, fs_info); |
7237f183 YZ |
1260 | if (IS_ERR(log_root)) |
1261 | return PTR_ERR(log_root); | |
1262 | ||
1263 | log_root->last_trans = trans->transid; | |
1264 | log_root->root_key.offset = root->root_key.objectid; | |
1265 | ||
1266 | inode_item = &log_root->root_item.inode; | |
3cae210f QW |
1267 | btrfs_set_stack_inode_generation(inode_item, 1); |
1268 | btrfs_set_stack_inode_size(inode_item, 3); | |
1269 | btrfs_set_stack_inode_nlink(inode_item, 1); | |
da17066c | 1270 | btrfs_set_stack_inode_nbytes(inode_item, |
0b246afa | 1271 | fs_info->nodesize); |
3cae210f | 1272 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); |
7237f183 | 1273 | |
5d4f98a2 | 1274 | btrfs_set_root_node(&log_root->root_item, log_root->node); |
7237f183 YZ |
1275 | |
1276 | WARN_ON(root->log_root); | |
1277 | root->log_root = log_root; | |
1278 | root->log_transid = 0; | |
d1433deb | 1279 | root->log_transid_committed = -1; |
257c62e1 | 1280 | root->last_log_commit = 0; |
e02119d5 CM |
1281 | return 0; |
1282 | } | |
1283 | ||
49d11bea JB |
1284 | static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root, |
1285 | struct btrfs_path *path, | |
1286 | struct btrfs_key *key) | |
e02119d5 CM |
1287 | { |
1288 | struct btrfs_root *root; | |
1289 | struct btrfs_fs_info *fs_info = tree_root->fs_info; | |
84234f3a | 1290 | u64 generation; |
cb517eab | 1291 | int ret; |
581c1760 | 1292 | int level; |
0f7d52f4 | 1293 | |
96dfcb46 | 1294 | root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS); |
49d11bea JB |
1295 | if (!root) |
1296 | return ERR_PTR(-ENOMEM); | |
0f7d52f4 | 1297 | |
cb517eab MX |
1298 | ret = btrfs_find_root(tree_root, key, path, |
1299 | &root->root_item, &root->root_key); | |
0f7d52f4 | 1300 | if (ret) { |
13a8a7c8 YZ |
1301 | if (ret > 0) |
1302 | ret = -ENOENT; | |
49d11bea | 1303 | goto fail; |
0f7d52f4 | 1304 | } |
13a8a7c8 | 1305 | |
84234f3a | 1306 | generation = btrfs_root_generation(&root->root_item); |
581c1760 | 1307 | level = btrfs_root_level(&root->root_item); |
2ff7e61e JM |
1308 | root->node = read_tree_block(fs_info, |
1309 | btrfs_root_bytenr(&root->root_item), | |
581c1760 | 1310 | generation, level, NULL); |
64c043de LB |
1311 | if (IS_ERR(root->node)) { |
1312 | ret = PTR_ERR(root->node); | |
8c38938c | 1313 | root->node = NULL; |
49d11bea | 1314 | goto fail; |
cb517eab MX |
1315 | } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { |
1316 | ret = -EIO; | |
49d11bea | 1317 | goto fail; |
416bc658 | 1318 | } |
5d4f98a2 | 1319 | root->commit_root = btrfs_root_node(root); |
cb517eab | 1320 | return root; |
49d11bea | 1321 | fail: |
00246528 | 1322 | btrfs_put_root(root); |
49d11bea JB |
1323 | return ERR_PTR(ret); |
1324 | } | |
1325 | ||
1326 | struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, | |
1327 | struct btrfs_key *key) | |
1328 | { | |
1329 | struct btrfs_root *root; | |
1330 | struct btrfs_path *path; | |
1331 | ||
1332 | path = btrfs_alloc_path(); | |
1333 | if (!path) | |
1334 | return ERR_PTR(-ENOMEM); | |
1335 | root = read_tree_root_path(tree_root, path, key); | |
1336 | btrfs_free_path(path); | |
1337 | ||
1338 | return root; | |
cb517eab MX |
1339 | } |
1340 | ||
2dfb1e43 QW |
1341 | /* |
1342 | * Initialize subvolume root in-memory structure | |
1343 | * | |
1344 | * @anon_dev: anonymous device to attach to the root, if zero, allocate new | |
1345 | */ | |
1346 | static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) | |
cb517eab MX |
1347 | { |
1348 | int ret; | |
dcc3eb96 | 1349 | unsigned int nofs_flag; |
cb517eab MX |
1350 | |
1351 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); | |
1352 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), | |
1353 | GFP_NOFS); | |
1354 | if (!root->free_ino_pinned || !root->free_ino_ctl) { | |
1355 | ret = -ENOMEM; | |
1356 | goto fail; | |
1357 | } | |
1358 | ||
dcc3eb96 NB |
1359 | /* |
1360 | * We might be called under a transaction (e.g. indirect backref | |
1361 | * resolution) which could deadlock if it triggers memory reclaim | |
1362 | */ | |
1363 | nofs_flag = memalloc_nofs_save(); | |
1364 | ret = btrfs_drew_lock_init(&root->snapshot_lock); | |
1365 | memalloc_nofs_restore(nofs_flag); | |
1366 | if (ret) | |
8257b2dc | 1367 | goto fail; |
8257b2dc | 1368 | |
aeb935a4 QW |
1369 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && |
1370 | root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
92a7cc42 | 1371 | set_bit(BTRFS_ROOT_SHAREABLE, &root->state); |
f39e4571 JB |
1372 | btrfs_check_and_init_root_item(&root->root_item); |
1373 | } | |
1374 | ||
cb517eab | 1375 | btrfs_init_free_ino_ctl(root); |
57cdc8db DS |
1376 | spin_lock_init(&root->ino_cache_lock); |
1377 | init_waitqueue_head(&root->ino_cache_wait); | |
cb517eab | 1378 | |
851fd730 QW |
1379 | /* |
1380 | * Don't assign anonymous block device to roots that are not exposed to | |
1381 | * userspace, the id pool is limited to 1M | |
1382 | */ | |
1383 | if (is_fstree(root->root_key.objectid) && | |
1384 | btrfs_root_refs(&root->root_item) > 0) { | |
2dfb1e43 QW |
1385 | if (!anon_dev) { |
1386 | ret = get_anon_bdev(&root->anon_dev); | |
1387 | if (ret) | |
1388 | goto fail; | |
1389 | } else { | |
1390 | root->anon_dev = anon_dev; | |
1391 | } | |
851fd730 | 1392 | } |
f32e48e9 CR |
1393 | |
1394 | mutex_lock(&root->objectid_mutex); | |
1395 | ret = btrfs_find_highest_objectid(root, | |
1396 | &root->highest_objectid); | |
1397 | if (ret) { | |
1398 | mutex_unlock(&root->objectid_mutex); | |
876d2cf1 | 1399 | goto fail; |
f32e48e9 CR |
1400 | } |
1401 | ||
1402 | ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); | |
1403 | ||
1404 | mutex_unlock(&root->objectid_mutex); | |
1405 | ||
cb517eab MX |
1406 | return 0; |
1407 | fail: | |
84db5ccf | 1408 | /* The caller is responsible to call btrfs_free_fs_root */ |
cb517eab MX |
1409 | return ret; |
1410 | } | |
1411 | ||
a98db0f3 JB |
1412 | static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
1413 | u64 root_id) | |
cb517eab MX |
1414 | { |
1415 | struct btrfs_root *root; | |
1416 | ||
1417 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1418 | root = radix_tree_lookup(&fs_info->fs_roots_radix, | |
1419 | (unsigned long)root_id); | |
bc44d7c4 | 1420 | if (root) |
00246528 | 1421 | root = btrfs_grab_root(root); |
cb517eab MX |
1422 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1423 | return root; | |
1424 | } | |
1425 | ||
49d11bea JB |
1426 | static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info, |
1427 | u64 objectid) | |
1428 | { | |
1429 | if (objectid == BTRFS_ROOT_TREE_OBJECTID) | |
1430 | return btrfs_grab_root(fs_info->tree_root); | |
1431 | if (objectid == BTRFS_EXTENT_TREE_OBJECTID) | |
1432 | return btrfs_grab_root(fs_info->extent_root); | |
1433 | if (objectid == BTRFS_CHUNK_TREE_OBJECTID) | |
1434 | return btrfs_grab_root(fs_info->chunk_root); | |
1435 | if (objectid == BTRFS_DEV_TREE_OBJECTID) | |
1436 | return btrfs_grab_root(fs_info->dev_root); | |
1437 | if (objectid == BTRFS_CSUM_TREE_OBJECTID) | |
1438 | return btrfs_grab_root(fs_info->csum_root); | |
1439 | if (objectid == BTRFS_QUOTA_TREE_OBJECTID) | |
1440 | return btrfs_grab_root(fs_info->quota_root) ? | |
1441 | fs_info->quota_root : ERR_PTR(-ENOENT); | |
1442 | if (objectid == BTRFS_UUID_TREE_OBJECTID) | |
1443 | return btrfs_grab_root(fs_info->uuid_root) ? | |
1444 | fs_info->uuid_root : ERR_PTR(-ENOENT); | |
1445 | if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) | |
1446 | return btrfs_grab_root(fs_info->free_space_root) ? | |
1447 | fs_info->free_space_root : ERR_PTR(-ENOENT); | |
1448 | return NULL; | |
1449 | } | |
1450 | ||
cb517eab MX |
1451 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, |
1452 | struct btrfs_root *root) | |
1453 | { | |
1454 | int ret; | |
1455 | ||
e1860a77 | 1456 | ret = radix_tree_preload(GFP_NOFS); |
cb517eab MX |
1457 | if (ret) |
1458 | return ret; | |
1459 | ||
1460 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1461 | ret = radix_tree_insert(&fs_info->fs_roots_radix, | |
1462 | (unsigned long)root->root_key.objectid, | |
1463 | root); | |
af01d2e5 | 1464 | if (ret == 0) { |
00246528 | 1465 | btrfs_grab_root(root); |
27cdeb70 | 1466 | set_bit(BTRFS_ROOT_IN_RADIX, &root->state); |
af01d2e5 | 1467 | } |
cb517eab MX |
1468 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1469 | radix_tree_preload_end(); | |
1470 | ||
1471 | return ret; | |
1472 | } | |
1473 | ||
bd647ce3 JB |
1474 | void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info) |
1475 | { | |
1476 | #ifdef CONFIG_BTRFS_DEBUG | |
1477 | struct btrfs_root *root; | |
1478 | ||
1479 | while (!list_empty(&fs_info->allocated_roots)) { | |
457f1864 JB |
1480 | char buf[BTRFS_ROOT_NAME_BUF_LEN]; |
1481 | ||
bd647ce3 JB |
1482 | root = list_first_entry(&fs_info->allocated_roots, |
1483 | struct btrfs_root, leak_list); | |
457f1864 JB |
1484 | btrfs_err(fs_info, "leaked root %s refcount %d", |
1485 | btrfs_root_name(root->root_key.objectid, buf), | |
bd647ce3 JB |
1486 | refcount_read(&root->refs)); |
1487 | while (refcount_read(&root->refs) > 1) | |
00246528 JB |
1488 | btrfs_put_root(root); |
1489 | btrfs_put_root(root); | |
bd647ce3 JB |
1490 | } |
1491 | #endif | |
1492 | } | |
1493 | ||
0d4b0463 JB |
1494 | void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) |
1495 | { | |
141386e1 JB |
1496 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
1497 | percpu_counter_destroy(&fs_info->delalloc_bytes); | |
1498 | percpu_counter_destroy(&fs_info->dio_bytes); | |
1499 | percpu_counter_destroy(&fs_info->dev_replace.bio_counter); | |
1500 | btrfs_free_csum_hash(fs_info); | |
1501 | btrfs_free_stripe_hash_table(fs_info); | |
1502 | btrfs_free_ref_cache(fs_info); | |
0d4b0463 JB |
1503 | kfree(fs_info->balance_ctl); |
1504 | kfree(fs_info->delayed_root); | |
00246528 JB |
1505 | btrfs_put_root(fs_info->extent_root); |
1506 | btrfs_put_root(fs_info->tree_root); | |
1507 | btrfs_put_root(fs_info->chunk_root); | |
1508 | btrfs_put_root(fs_info->dev_root); | |
1509 | btrfs_put_root(fs_info->csum_root); | |
1510 | btrfs_put_root(fs_info->quota_root); | |
1511 | btrfs_put_root(fs_info->uuid_root); | |
1512 | btrfs_put_root(fs_info->free_space_root); | |
1513 | btrfs_put_root(fs_info->fs_root); | |
aeb935a4 | 1514 | btrfs_put_root(fs_info->data_reloc_root); |
bd647ce3 | 1515 | btrfs_check_leaked_roots(fs_info); |
3fd63727 | 1516 | btrfs_extent_buffer_leak_debug_check(fs_info); |
0d4b0463 JB |
1517 | kfree(fs_info->super_copy); |
1518 | kfree(fs_info->super_for_commit); | |
1519 | kvfree(fs_info); | |
1520 | } | |
1521 | ||
1522 | ||
2dfb1e43 QW |
1523 | /* |
1524 | * Get an in-memory reference of a root structure. | |
1525 | * | |
1526 | * For essential trees like root/extent tree, we grab it from fs_info directly. | |
1527 | * For subvolume trees, we check the cached filesystem roots first. If not | |
1528 | * found, then read it from disk and add it to cached fs roots. | |
1529 | * | |
1530 | * Caller should release the root by calling btrfs_put_root() after the usage. | |
1531 | * | |
1532 | * NOTE: Reloc and log trees can't be read by this function as they share the | |
1533 | * same root objectid. | |
1534 | * | |
1535 | * @objectid: root id | |
1536 | * @anon_dev: preallocated anonymous block device number for new roots, | |
1537 | * pass 0 for new allocation. | |
1538 | * @check_ref: whether to check root item references, If true, return -ENOENT | |
1539 | * for orphan roots | |
1540 | */ | |
1541 | static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, | |
1542 | u64 objectid, dev_t anon_dev, | |
1543 | bool check_ref) | |
5eda7b5e CM |
1544 | { |
1545 | struct btrfs_root *root; | |
381cf658 | 1546 | struct btrfs_path *path; |
1d4c08e0 | 1547 | struct btrfs_key key; |
5eda7b5e CM |
1548 | int ret; |
1549 | ||
49d11bea JB |
1550 | root = btrfs_get_global_root(fs_info, objectid); |
1551 | if (root) | |
1552 | return root; | |
4df27c4d | 1553 | again: |
56e9357a | 1554 | root = btrfs_lookup_fs_root(fs_info, objectid); |
48475471 | 1555 | if (root) { |
2dfb1e43 QW |
1556 | /* Shouldn't get preallocated anon_dev for cached roots */ |
1557 | ASSERT(!anon_dev); | |
bc44d7c4 | 1558 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) { |
00246528 | 1559 | btrfs_put_root(root); |
48475471 | 1560 | return ERR_PTR(-ENOENT); |
bc44d7c4 | 1561 | } |
5eda7b5e | 1562 | return root; |
48475471 | 1563 | } |
5eda7b5e | 1564 | |
56e9357a DS |
1565 | key.objectid = objectid; |
1566 | key.type = BTRFS_ROOT_ITEM_KEY; | |
1567 | key.offset = (u64)-1; | |
1568 | root = btrfs_read_tree_root(fs_info->tree_root, &key); | |
5eda7b5e CM |
1569 | if (IS_ERR(root)) |
1570 | return root; | |
3394e160 | 1571 | |
c00869f1 | 1572 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) { |
cb517eab | 1573 | ret = -ENOENT; |
581bb050 | 1574 | goto fail; |
35a30d7c | 1575 | } |
581bb050 | 1576 | |
2dfb1e43 | 1577 | ret = btrfs_init_fs_root(root, anon_dev); |
ac08aedf CM |
1578 | if (ret) |
1579 | goto fail; | |
3394e160 | 1580 | |
381cf658 DS |
1581 | path = btrfs_alloc_path(); |
1582 | if (!path) { | |
1583 | ret = -ENOMEM; | |
1584 | goto fail; | |
1585 | } | |
1d4c08e0 DS |
1586 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
1587 | key.type = BTRFS_ORPHAN_ITEM_KEY; | |
56e9357a | 1588 | key.offset = objectid; |
1d4c08e0 DS |
1589 | |
1590 | ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); | |
381cf658 | 1591 | btrfs_free_path(path); |
d68fc57b YZ |
1592 | if (ret < 0) |
1593 | goto fail; | |
1594 | if (ret == 0) | |
27cdeb70 | 1595 | set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); |
d68fc57b | 1596 | |
cb517eab | 1597 | ret = btrfs_insert_fs_root(fs_info, root); |
0f7d52f4 | 1598 | if (ret) { |
00246528 | 1599 | btrfs_put_root(root); |
4785e24f | 1600 | if (ret == -EEXIST) |
4df27c4d | 1601 | goto again; |
4df27c4d | 1602 | goto fail; |
0f7d52f4 | 1603 | } |
edbd8d4e | 1604 | return root; |
4df27c4d | 1605 | fail: |
8c38938c | 1606 | btrfs_put_root(root); |
4df27c4d | 1607 | return ERR_PTR(ret); |
edbd8d4e CM |
1608 | } |
1609 | ||
2dfb1e43 QW |
1610 | /* |
1611 | * Get in-memory reference of a root structure | |
1612 | * | |
1613 | * @objectid: tree objectid | |
1614 | * @check_ref: if set, verify that the tree exists and the item has at least | |
1615 | * one reference | |
1616 | */ | |
1617 | struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, | |
1618 | u64 objectid, bool check_ref) | |
1619 | { | |
1620 | return btrfs_get_root_ref(fs_info, objectid, 0, check_ref); | |
1621 | } | |
1622 | ||
1623 | /* | |
1624 | * Get in-memory reference of a root structure, created as new, optionally pass | |
1625 | * the anonymous block device id | |
1626 | * | |
1627 | * @objectid: tree objectid | |
1628 | * @anon_dev: if zero, allocate a new anonymous block device or use the | |
1629 | * parameter value | |
1630 | */ | |
1631 | struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, | |
1632 | u64 objectid, dev_t anon_dev) | |
1633 | { | |
1634 | return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); | |
1635 | } | |
1636 | ||
49d11bea JB |
1637 | /* |
1638 | * btrfs_get_fs_root_commit_root - return a root for the given objectid | |
1639 | * @fs_info: the fs_info | |
1640 | * @objectid: the objectid we need to lookup | |
1641 | * | |
1642 | * This is exclusively used for backref walking, and exists specifically because | |
1643 | * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref | |
1644 | * creation time, which means we may have to read the tree_root in order to look | |
1645 | * up a fs root that is not in memory. If the root is not in memory we will | |
1646 | * read the tree root commit root and look up the fs root from there. This is a | |
1647 | * temporary root, it will not be inserted into the radix tree as it doesn't | |
1648 | * have the most uptodate information, it'll simply be discarded once the | |
1649 | * backref code is finished using the root. | |
1650 | */ | |
1651 | struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, | |
1652 | struct btrfs_path *path, | |
1653 | u64 objectid) | |
1654 | { | |
1655 | struct btrfs_root *root; | |
1656 | struct btrfs_key key; | |
1657 | ||
1658 | ASSERT(path->search_commit_root && path->skip_locking); | |
1659 | ||
1660 | /* | |
1661 | * This can return -ENOENT if we ask for a root that doesn't exist, but | |
1662 | * since this is called via the backref walking code we won't be looking | |
1663 | * up a root that doesn't exist, unless there's corruption. So if root | |
1664 | * != NULL just return it. | |
1665 | */ | |
1666 | root = btrfs_get_global_root(fs_info, objectid); | |
1667 | if (root) | |
1668 | return root; | |
1669 | ||
1670 | root = btrfs_lookup_fs_root(fs_info, objectid); | |
1671 | if (root) | |
1672 | return root; | |
1673 | ||
1674 | key.objectid = objectid; | |
1675 | key.type = BTRFS_ROOT_ITEM_KEY; | |
1676 | key.offset = (u64)-1; | |
1677 | root = read_tree_root_path(fs_info->tree_root, path, &key); | |
1678 | btrfs_release_path(path); | |
1679 | ||
1680 | return root; | |
1681 | } | |
1682 | ||
8b712842 CM |
1683 | /* |
1684 | * called by the kthread helper functions to finally call the bio end_io | |
1685 | * functions. This is where read checksum verification actually happens | |
1686 | */ | |
1687 | static void end_workqueue_fn(struct btrfs_work *work) | |
ce9adaa5 | 1688 | { |
ce9adaa5 | 1689 | struct bio *bio; |
97eb6b69 | 1690 | struct btrfs_end_io_wq *end_io_wq; |
ce9adaa5 | 1691 | |
97eb6b69 | 1692 | end_io_wq = container_of(work, struct btrfs_end_io_wq, work); |
8b712842 | 1693 | bio = end_io_wq->bio; |
ce9adaa5 | 1694 | |
4e4cbee9 | 1695 | bio->bi_status = end_io_wq->status; |
8b712842 CM |
1696 | bio->bi_private = end_io_wq->private; |
1697 | bio->bi_end_io = end_io_wq->end_io; | |
4246a0b6 | 1698 | bio_endio(bio); |
9be490f1 | 1699 | kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); |
44b8bd7e CM |
1700 | } |
1701 | ||
a74a4b97 CM |
1702 | static int cleaner_kthread(void *arg) |
1703 | { | |
1704 | struct btrfs_root *root = arg; | |
0b246afa | 1705 | struct btrfs_fs_info *fs_info = root->fs_info; |
d0278245 | 1706 | int again; |
a74a4b97 | 1707 | |
d6fd0ae2 | 1708 | while (1) { |
d0278245 | 1709 | again = 0; |
a74a4b97 | 1710 | |
fd340d0f JB |
1711 | set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); |
1712 | ||
d0278245 | 1713 | /* Make the cleaner go to sleep early. */ |
2ff7e61e | 1714 | if (btrfs_need_cleaner_sleep(fs_info)) |
d0278245 MX |
1715 | goto sleep; |
1716 | ||
90c711ab ZB |
1717 | /* |
1718 | * Do not do anything if we might cause open_ctree() to block | |
1719 | * before we have finished mounting the filesystem. | |
1720 | */ | |
0b246afa | 1721 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) |
90c711ab ZB |
1722 | goto sleep; |
1723 | ||
0b246afa | 1724 | if (!mutex_trylock(&fs_info->cleaner_mutex)) |
d0278245 MX |
1725 | goto sleep; |
1726 | ||
dc7f370c MX |
1727 | /* |
1728 | * Avoid the problem that we change the status of the fs | |
1729 | * during the above check and trylock. | |
1730 | */ | |
2ff7e61e | 1731 | if (btrfs_need_cleaner_sleep(fs_info)) { |
0b246afa | 1732 | mutex_unlock(&fs_info->cleaner_mutex); |
dc7f370c | 1733 | goto sleep; |
76dda93c | 1734 | } |
a74a4b97 | 1735 | |
2ff7e61e | 1736 | btrfs_run_delayed_iputs(fs_info); |
c2d6cb16 | 1737 | |
d0278245 | 1738 | again = btrfs_clean_one_deleted_snapshot(root); |
0b246afa | 1739 | mutex_unlock(&fs_info->cleaner_mutex); |
d0278245 MX |
1740 | |
1741 | /* | |
05323cd1 MX |
1742 | * The defragger has dealt with the R/O remount and umount, |
1743 | * needn't do anything special here. | |
d0278245 | 1744 | */ |
0b246afa | 1745 | btrfs_run_defrag_inodes(fs_info); |
67c5e7d4 FM |
1746 | |
1747 | /* | |
1748 | * Acquires fs_info->delete_unused_bgs_mutex to avoid racing | |
1749 | * with relocation (btrfs_relocate_chunk) and relocation | |
1750 | * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) | |
1751 | * after acquiring fs_info->delete_unused_bgs_mutex. So we | |
1752 | * can't hold, nor need to, fs_info->cleaner_mutex when deleting | |
1753 | * unused block groups. | |
1754 | */ | |
0b246afa | 1755 | btrfs_delete_unused_bgs(fs_info); |
d0278245 | 1756 | sleep: |
fd340d0f | 1757 | clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); |
d6fd0ae2 OS |
1758 | if (kthread_should_park()) |
1759 | kthread_parkme(); | |
1760 | if (kthread_should_stop()) | |
1761 | return 0; | |
838fe188 | 1762 | if (!again) { |
a74a4b97 | 1763 | set_current_state(TASK_INTERRUPTIBLE); |
d6fd0ae2 | 1764 | schedule(); |
a74a4b97 CM |
1765 | __set_current_state(TASK_RUNNING); |
1766 | } | |
da288d28 | 1767 | } |
a74a4b97 CM |
1768 | } |
1769 | ||
1770 | static int transaction_kthread(void *arg) | |
1771 | { | |
1772 | struct btrfs_root *root = arg; | |
0b246afa | 1773 | struct btrfs_fs_info *fs_info = root->fs_info; |
a74a4b97 CM |
1774 | struct btrfs_trans_handle *trans; |
1775 | struct btrfs_transaction *cur; | |
8929ecfa | 1776 | u64 transid; |
a944442c | 1777 | time64_t now; |
a74a4b97 | 1778 | unsigned long delay; |
914b2007 | 1779 | bool cannot_commit; |
a74a4b97 CM |
1780 | |
1781 | do { | |
914b2007 | 1782 | cannot_commit = false; |
0b246afa JM |
1783 | delay = HZ * fs_info->commit_interval; |
1784 | mutex_lock(&fs_info->transaction_kthread_mutex); | |
a74a4b97 | 1785 | |
0b246afa JM |
1786 | spin_lock(&fs_info->trans_lock); |
1787 | cur = fs_info->running_transaction; | |
a74a4b97 | 1788 | if (!cur) { |
0b246afa | 1789 | spin_unlock(&fs_info->trans_lock); |
a74a4b97 CM |
1790 | goto sleep; |
1791 | } | |
31153d81 | 1792 | |
afd48513 | 1793 | now = ktime_get_seconds(); |
3296bf56 | 1794 | if (cur->state < TRANS_STATE_COMMIT_START && |
8b87dc17 | 1795 | (now < cur->start_time || |
0b246afa JM |
1796 | now - cur->start_time < fs_info->commit_interval)) { |
1797 | spin_unlock(&fs_info->trans_lock); | |
a74a4b97 CM |
1798 | delay = HZ * 5; |
1799 | goto sleep; | |
1800 | } | |
8929ecfa | 1801 | transid = cur->transid; |
0b246afa | 1802 | spin_unlock(&fs_info->trans_lock); |
56bec294 | 1803 | |
79787eaa | 1804 | /* If the file system is aborted, this will always fail. */ |
354aa0fb | 1805 | trans = btrfs_attach_transaction(root); |
914b2007 | 1806 | if (IS_ERR(trans)) { |
354aa0fb MX |
1807 | if (PTR_ERR(trans) != -ENOENT) |
1808 | cannot_commit = true; | |
79787eaa | 1809 | goto sleep; |
914b2007 | 1810 | } |
8929ecfa | 1811 | if (transid == trans->transid) { |
3a45bb20 | 1812 | btrfs_commit_transaction(trans); |
8929ecfa | 1813 | } else { |
3a45bb20 | 1814 | btrfs_end_transaction(trans); |
8929ecfa | 1815 | } |
a74a4b97 | 1816 | sleep: |
0b246afa JM |
1817 | wake_up_process(fs_info->cleaner_kthread); |
1818 | mutex_unlock(&fs_info->transaction_kthread_mutex); | |
a74a4b97 | 1819 | |
4e121c06 | 1820 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, |
0b246afa | 1821 | &fs_info->fs_state))) |
2ff7e61e | 1822 | btrfs_cleanup_transaction(fs_info); |
ce63f891 | 1823 | if (!kthread_should_stop() && |
0b246afa | 1824 | (!btrfs_transaction_blocked(fs_info) || |
ce63f891 | 1825 | cannot_commit)) |
bc5511d0 | 1826 | schedule_timeout_interruptible(delay); |
a74a4b97 CM |
1827 | } while (!kthread_should_stop()); |
1828 | return 0; | |
1829 | } | |
1830 | ||
af31f5e5 | 1831 | /* |
01f0f9da NB |
1832 | * This will find the highest generation in the array of root backups. The |
1833 | * index of the highest array is returned, or -EINVAL if we can't find | |
1834 | * anything. | |
af31f5e5 CM |
1835 | * |
1836 | * We check to make sure the array is valid by comparing the | |
1837 | * generation of the latest root in the array with the generation | |
1838 | * in the super block. If they don't match we pitch it. | |
1839 | */ | |
01f0f9da | 1840 | static int find_newest_super_backup(struct btrfs_fs_info *info) |
af31f5e5 | 1841 | { |
01f0f9da | 1842 | const u64 newest_gen = btrfs_super_generation(info->super_copy); |
af31f5e5 | 1843 | u64 cur; |
af31f5e5 CM |
1844 | struct btrfs_root_backup *root_backup; |
1845 | int i; | |
1846 | ||
1847 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { | |
1848 | root_backup = info->super_copy->super_roots + i; | |
1849 | cur = btrfs_backup_tree_root_gen(root_backup); | |
1850 | if (cur == newest_gen) | |
01f0f9da | 1851 | return i; |
af31f5e5 CM |
1852 | } |
1853 | ||
01f0f9da | 1854 | return -EINVAL; |
af31f5e5 CM |
1855 | } |
1856 | ||
af31f5e5 CM |
1857 | /* |
1858 | * copy all the root pointers into the super backup array. | |
1859 | * this will bump the backup pointer by one when it is | |
1860 | * done | |
1861 | */ | |
1862 | static void backup_super_roots(struct btrfs_fs_info *info) | |
1863 | { | |
6ef108dd | 1864 | const int next_backup = info->backup_root_index; |
af31f5e5 | 1865 | struct btrfs_root_backup *root_backup; |
af31f5e5 CM |
1866 | |
1867 | root_backup = info->super_for_commit->super_roots + next_backup; | |
1868 | ||
1869 | /* | |
1870 | * make sure all of our padding and empty slots get zero filled | |
1871 | * regardless of which ones we use today | |
1872 | */ | |
1873 | memset(root_backup, 0, sizeof(*root_backup)); | |
1874 | ||
1875 | info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; | |
1876 | ||
1877 | btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); | |
1878 | btrfs_set_backup_tree_root_gen(root_backup, | |
1879 | btrfs_header_generation(info->tree_root->node)); | |
1880 | ||
1881 | btrfs_set_backup_tree_root_level(root_backup, | |
1882 | btrfs_header_level(info->tree_root->node)); | |
1883 | ||
1884 | btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); | |
1885 | btrfs_set_backup_chunk_root_gen(root_backup, | |
1886 | btrfs_header_generation(info->chunk_root->node)); | |
1887 | btrfs_set_backup_chunk_root_level(root_backup, | |
1888 | btrfs_header_level(info->chunk_root->node)); | |
1889 | ||
1890 | btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); | |
1891 | btrfs_set_backup_extent_root_gen(root_backup, | |
1892 | btrfs_header_generation(info->extent_root->node)); | |
1893 | btrfs_set_backup_extent_root_level(root_backup, | |
1894 | btrfs_header_level(info->extent_root->node)); | |
1895 | ||
7c7e82a7 CM |
1896 | /* |
1897 | * we might commit during log recovery, which happens before we set | |
1898 | * the fs_root. Make sure it is valid before we fill it in. | |
1899 | */ | |
1900 | if (info->fs_root && info->fs_root->node) { | |
1901 | btrfs_set_backup_fs_root(root_backup, | |
1902 | info->fs_root->node->start); | |
1903 | btrfs_set_backup_fs_root_gen(root_backup, | |
af31f5e5 | 1904 | btrfs_header_generation(info->fs_root->node)); |
7c7e82a7 | 1905 | btrfs_set_backup_fs_root_level(root_backup, |
af31f5e5 | 1906 | btrfs_header_level(info->fs_root->node)); |
7c7e82a7 | 1907 | } |
af31f5e5 CM |
1908 | |
1909 | btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); | |
1910 | btrfs_set_backup_dev_root_gen(root_backup, | |
1911 | btrfs_header_generation(info->dev_root->node)); | |
1912 | btrfs_set_backup_dev_root_level(root_backup, | |
1913 | btrfs_header_level(info->dev_root->node)); | |
1914 | ||
1915 | btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); | |
1916 | btrfs_set_backup_csum_root_gen(root_backup, | |
1917 | btrfs_header_generation(info->csum_root->node)); | |
1918 | btrfs_set_backup_csum_root_level(root_backup, | |
1919 | btrfs_header_level(info->csum_root->node)); | |
1920 | ||
1921 | btrfs_set_backup_total_bytes(root_backup, | |
1922 | btrfs_super_total_bytes(info->super_copy)); | |
1923 | btrfs_set_backup_bytes_used(root_backup, | |
1924 | btrfs_super_bytes_used(info->super_copy)); | |
1925 | btrfs_set_backup_num_devices(root_backup, | |
1926 | btrfs_super_num_devices(info->super_copy)); | |
1927 | ||
1928 | /* | |
1929 | * if we don't copy this out to the super_copy, it won't get remembered | |
1930 | * for the next commit | |
1931 | */ | |
1932 | memcpy(&info->super_copy->super_roots, | |
1933 | &info->super_for_commit->super_roots, | |
1934 | sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); | |
1935 | } | |
1936 | ||
bd2336b2 NB |
1937 | /* |
1938 | * read_backup_root - Reads a backup root based on the passed priority. Prio 0 | |
1939 | * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots | |
1940 | * | |
1941 | * fs_info - filesystem whose backup roots need to be read | |
1942 | * priority - priority of backup root required | |
1943 | * | |
1944 | * Returns backup root index on success and -EINVAL otherwise. | |
1945 | */ | |
1946 | static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) | |
1947 | { | |
1948 | int backup_index = find_newest_super_backup(fs_info); | |
1949 | struct btrfs_super_block *super = fs_info->super_copy; | |
1950 | struct btrfs_root_backup *root_backup; | |
1951 | ||
1952 | if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { | |
1953 | if (priority == 0) | |
1954 | return backup_index; | |
1955 | ||
1956 | backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; | |
1957 | backup_index %= BTRFS_NUM_BACKUP_ROOTS; | |
1958 | } else { | |
1959 | return -EINVAL; | |
1960 | } | |
1961 | ||
1962 | root_backup = super->super_roots + backup_index; | |
1963 | ||
1964 | btrfs_set_super_generation(super, | |
1965 | btrfs_backup_tree_root_gen(root_backup)); | |
1966 | btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); | |
1967 | btrfs_set_super_root_level(super, | |
1968 | btrfs_backup_tree_root_level(root_backup)); | |
1969 | btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); | |
1970 | ||
1971 | /* | |
1972 | * Fixme: the total bytes and num_devices need to match or we should | |
1973 | * need a fsck | |
1974 | */ | |
1975 | btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); | |
1976 | btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); | |
1977 | ||
1978 | return backup_index; | |
1979 | } | |
1980 | ||
7abadb64 LB |
1981 | /* helper to cleanup workers */ |
1982 | static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) | |
1983 | { | |
dc6e3209 | 1984 | btrfs_destroy_workqueue(fs_info->fixup_workers); |
afe3d242 | 1985 | btrfs_destroy_workqueue(fs_info->delalloc_workers); |
5cdc7ad3 | 1986 | btrfs_destroy_workqueue(fs_info->workers); |
fccb5d86 | 1987 | btrfs_destroy_workqueue(fs_info->endio_workers); |
fccb5d86 | 1988 | btrfs_destroy_workqueue(fs_info->endio_raid56_workers); |
d05a33ac | 1989 | btrfs_destroy_workqueue(fs_info->rmw_workers); |
fccb5d86 QW |
1990 | btrfs_destroy_workqueue(fs_info->endio_write_workers); |
1991 | btrfs_destroy_workqueue(fs_info->endio_freespace_worker); | |
5b3bc44e | 1992 | btrfs_destroy_workqueue(fs_info->delayed_workers); |
e66f0bb1 | 1993 | btrfs_destroy_workqueue(fs_info->caching_workers); |
736cfa15 | 1994 | btrfs_destroy_workqueue(fs_info->readahead_workers); |
a44903ab | 1995 | btrfs_destroy_workqueue(fs_info->flush_workers); |
fc97fab0 | 1996 | btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); |
b0643e59 DZ |
1997 | if (fs_info->discard_ctl.discard_workers) |
1998 | destroy_workqueue(fs_info->discard_ctl.discard_workers); | |
a9b9477d FM |
1999 | /* |
2000 | * Now that all other work queues are destroyed, we can safely destroy | |
2001 | * the queues used for metadata I/O, since tasks from those other work | |
2002 | * queues can do metadata I/O operations. | |
2003 | */ | |
2004 | btrfs_destroy_workqueue(fs_info->endio_meta_workers); | |
2005 | btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); | |
7abadb64 LB |
2006 | } |
2007 | ||
2e9f5954 R |
2008 | static void free_root_extent_buffers(struct btrfs_root *root) |
2009 | { | |
2010 | if (root) { | |
2011 | free_extent_buffer(root->node); | |
2012 | free_extent_buffer(root->commit_root); | |
2013 | root->node = NULL; | |
2014 | root->commit_root = NULL; | |
2015 | } | |
2016 | } | |
2017 | ||
af31f5e5 | 2018 | /* helper to cleanup tree roots */ |
4273eaff | 2019 | static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) |
af31f5e5 | 2020 | { |
2e9f5954 | 2021 | free_root_extent_buffers(info->tree_root); |
655b09fe | 2022 | |
2e9f5954 R |
2023 | free_root_extent_buffers(info->dev_root); |
2024 | free_root_extent_buffers(info->extent_root); | |
2025 | free_root_extent_buffers(info->csum_root); | |
2026 | free_root_extent_buffers(info->quota_root); | |
2027 | free_root_extent_buffers(info->uuid_root); | |
8c38938c | 2028 | free_root_extent_buffers(info->fs_root); |
aeb935a4 | 2029 | free_root_extent_buffers(info->data_reloc_root); |
4273eaff | 2030 | if (free_chunk_root) |
2e9f5954 | 2031 | free_root_extent_buffers(info->chunk_root); |
70f6d82e | 2032 | free_root_extent_buffers(info->free_space_root); |
af31f5e5 CM |
2033 | } |
2034 | ||
8c38938c JB |
2035 | void btrfs_put_root(struct btrfs_root *root) |
2036 | { | |
2037 | if (!root) | |
2038 | return; | |
2039 | ||
2040 | if (refcount_dec_and_test(&root->refs)) { | |
2041 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); | |
1dae7e0e | 2042 | WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); |
8c38938c JB |
2043 | if (root->anon_dev) |
2044 | free_anon_bdev(root->anon_dev); | |
2045 | btrfs_drew_lock_destroy(&root->snapshot_lock); | |
923eb523 | 2046 | free_root_extent_buffers(root); |
8c38938c JB |
2047 | kfree(root->free_ino_ctl); |
2048 | kfree(root->free_ino_pinned); | |
2049 | #ifdef CONFIG_BTRFS_DEBUG | |
2050 | spin_lock(&root->fs_info->fs_roots_radix_lock); | |
2051 | list_del_init(&root->leak_list); | |
2052 | spin_unlock(&root->fs_info->fs_roots_radix_lock); | |
2053 | #endif | |
2054 | kfree(root); | |
2055 | } | |
2056 | } | |
2057 | ||
faa2dbf0 | 2058 | void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) |
171f6537 JB |
2059 | { |
2060 | int ret; | |
2061 | struct btrfs_root *gang[8]; | |
2062 | int i; | |
2063 | ||
2064 | while (!list_empty(&fs_info->dead_roots)) { | |
2065 | gang[0] = list_entry(fs_info->dead_roots.next, | |
2066 | struct btrfs_root, root_list); | |
2067 | list_del(&gang[0]->root_list); | |
2068 | ||
8c38938c | 2069 | if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) |
cb517eab | 2070 | btrfs_drop_and_free_fs_root(fs_info, gang[0]); |
dc9492c1 | 2071 | btrfs_put_root(gang[0]); |
171f6537 JB |
2072 | } |
2073 | ||
2074 | while (1) { | |
2075 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, | |
2076 | (void **)gang, 0, | |
2077 | ARRAY_SIZE(gang)); | |
2078 | if (!ret) | |
2079 | break; | |
2080 | for (i = 0; i < ret; i++) | |
cb517eab | 2081 | btrfs_drop_and_free_fs_root(fs_info, gang[i]); |
171f6537 JB |
2082 | } |
2083 | } | |
af31f5e5 | 2084 | |
638aa7ed ES |
2085 | static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) |
2086 | { | |
2087 | mutex_init(&fs_info->scrub_lock); | |
2088 | atomic_set(&fs_info->scrubs_running, 0); | |
2089 | atomic_set(&fs_info->scrub_pause_req, 0); | |
2090 | atomic_set(&fs_info->scrubs_paused, 0); | |
2091 | atomic_set(&fs_info->scrub_cancel_req, 0); | |
2092 | init_waitqueue_head(&fs_info->scrub_pause_wait); | |
ff09c4ca | 2093 | refcount_set(&fs_info->scrub_workers_refcnt, 0); |
638aa7ed ES |
2094 | } |
2095 | ||
779a65a4 ES |
2096 | static void btrfs_init_balance(struct btrfs_fs_info *fs_info) |
2097 | { | |
2098 | spin_lock_init(&fs_info->balance_lock); | |
2099 | mutex_init(&fs_info->balance_mutex); | |
779a65a4 ES |
2100 | atomic_set(&fs_info->balance_pause_req, 0); |
2101 | atomic_set(&fs_info->balance_cancel_req, 0); | |
2102 | fs_info->balance_ctl = NULL; | |
2103 | init_waitqueue_head(&fs_info->balance_wait_q); | |
2104 | } | |
2105 | ||
6bccf3ab | 2106 | static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) |
f37938e0 | 2107 | { |
2ff7e61e JM |
2108 | struct inode *inode = fs_info->btree_inode; |
2109 | ||
2110 | inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; | |
2111 | set_nlink(inode, 1); | |
f37938e0 ES |
2112 | /* |
2113 | * we set the i_size on the btree inode to the max possible int. | |
2114 | * the real end of the address space is determined by all of | |
2115 | * the devices in the system | |
2116 | */ | |
2ff7e61e JM |
2117 | inode->i_size = OFFSET_MAX; |
2118 | inode->i_mapping->a_ops = &btree_aops; | |
f37938e0 | 2119 | |
2ff7e61e | 2120 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
43eb5f29 | 2121 | extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, |
2c53a14d | 2122 | IO_TREE_BTREE_INODE_IO, inode); |
7b439738 | 2123 | BTRFS_I(inode)->io_tree.track_uptodate = false; |
2ff7e61e | 2124 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree); |
f37938e0 | 2125 | |
5c8fd99f | 2126 | BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root); |
2ff7e61e JM |
2127 | memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); |
2128 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); | |
2129 | btrfs_insert_inode_hash(inode); | |
f37938e0 ES |
2130 | } |
2131 | ||
ad618368 ES |
2132 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) |
2133 | { | |
ad618368 | 2134 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); |
129827e3 | 2135 | init_rwsem(&fs_info->dev_replace.rwsem); |
7f8d236a | 2136 | init_waitqueue_head(&fs_info->dev_replace.replace_wait); |
ad618368 ES |
2137 | } |
2138 | ||
f9e92e40 ES |
2139 | static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) |
2140 | { | |
2141 | spin_lock_init(&fs_info->qgroup_lock); | |
2142 | mutex_init(&fs_info->qgroup_ioctl_lock); | |
2143 | fs_info->qgroup_tree = RB_ROOT; | |
f9e92e40 ES |
2144 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); |
2145 | fs_info->qgroup_seq = 1; | |
f9e92e40 | 2146 | fs_info->qgroup_ulist = NULL; |
d2c609b8 | 2147 | fs_info->qgroup_rescan_running = false; |
f9e92e40 ES |
2148 | mutex_init(&fs_info->qgroup_rescan_lock); |
2149 | } | |
2150 | ||
2a458198 ES |
2151 | static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, |
2152 | struct btrfs_fs_devices *fs_devices) | |
2153 | { | |
f7b885be | 2154 | u32 max_active = fs_info->thread_pool_size; |
6f011058 | 2155 | unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; |
2a458198 ES |
2156 | |
2157 | fs_info->workers = | |
cb001095 JM |
2158 | btrfs_alloc_workqueue(fs_info, "worker", |
2159 | flags | WQ_HIGHPRI, max_active, 16); | |
2a458198 ES |
2160 | |
2161 | fs_info->delalloc_workers = | |
cb001095 JM |
2162 | btrfs_alloc_workqueue(fs_info, "delalloc", |
2163 | flags, max_active, 2); | |
2a458198 ES |
2164 | |
2165 | fs_info->flush_workers = | |
cb001095 JM |
2166 | btrfs_alloc_workqueue(fs_info, "flush_delalloc", |
2167 | flags, max_active, 0); | |
2a458198 ES |
2168 | |
2169 | fs_info->caching_workers = | |
cb001095 | 2170 | btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); |
2a458198 | 2171 | |
2a458198 | 2172 | fs_info->fixup_workers = |
cb001095 | 2173 | btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); |
2a458198 ES |
2174 | |
2175 | /* | |
2176 | * endios are largely parallel and should have a very | |
2177 | * low idle thresh | |
2178 | */ | |
2179 | fs_info->endio_workers = | |
cb001095 | 2180 | btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); |
2a458198 | 2181 | fs_info->endio_meta_workers = |
cb001095 JM |
2182 | btrfs_alloc_workqueue(fs_info, "endio-meta", flags, |
2183 | max_active, 4); | |
2a458198 | 2184 | fs_info->endio_meta_write_workers = |
cb001095 JM |
2185 | btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, |
2186 | max_active, 2); | |
2a458198 | 2187 | fs_info->endio_raid56_workers = |
cb001095 JM |
2188 | btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, |
2189 | max_active, 4); | |
2a458198 | 2190 | fs_info->rmw_workers = |
cb001095 | 2191 | btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); |
2a458198 | 2192 | fs_info->endio_write_workers = |
cb001095 JM |
2193 | btrfs_alloc_workqueue(fs_info, "endio-write", flags, |
2194 | max_active, 2); | |
2a458198 | 2195 | fs_info->endio_freespace_worker = |
cb001095 JM |
2196 | btrfs_alloc_workqueue(fs_info, "freespace-write", flags, |
2197 | max_active, 0); | |
2a458198 | 2198 | fs_info->delayed_workers = |
cb001095 JM |
2199 | btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, |
2200 | max_active, 0); | |
2a458198 | 2201 | fs_info->readahead_workers = |
cb001095 JM |
2202 | btrfs_alloc_workqueue(fs_info, "readahead", flags, |
2203 | max_active, 2); | |
2a458198 | 2204 | fs_info->qgroup_rescan_workers = |
cb001095 | 2205 | btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); |
b0643e59 DZ |
2206 | fs_info->discard_ctl.discard_workers = |
2207 | alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1); | |
2a458198 ES |
2208 | |
2209 | if (!(fs_info->workers && fs_info->delalloc_workers && | |
ba8a9d07 | 2210 | fs_info->flush_workers && |
2a458198 ES |
2211 | fs_info->endio_workers && fs_info->endio_meta_workers && |
2212 | fs_info->endio_meta_write_workers && | |
2a458198 ES |
2213 | fs_info->endio_write_workers && fs_info->endio_raid56_workers && |
2214 | fs_info->endio_freespace_worker && fs_info->rmw_workers && | |
2215 | fs_info->caching_workers && fs_info->readahead_workers && | |
2216 | fs_info->fixup_workers && fs_info->delayed_workers && | |
b0643e59 DZ |
2217 | fs_info->qgroup_rescan_workers && |
2218 | fs_info->discard_ctl.discard_workers)) { | |
2a458198 ES |
2219 | return -ENOMEM; |
2220 | } | |
2221 | ||
2222 | return 0; | |
2223 | } | |
2224 | ||
6d97c6e3 JT |
2225 | static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) |
2226 | { | |
2227 | struct crypto_shash *csum_shash; | |
b4e967be | 2228 | const char *csum_driver = btrfs_super_csum_driver(csum_type); |
6d97c6e3 | 2229 | |
b4e967be | 2230 | csum_shash = crypto_alloc_shash(csum_driver, 0, 0); |
6d97c6e3 JT |
2231 | |
2232 | if (IS_ERR(csum_shash)) { | |
2233 | btrfs_err(fs_info, "error allocating %s hash for checksum", | |
b4e967be | 2234 | csum_driver); |
6d97c6e3 JT |
2235 | return PTR_ERR(csum_shash); |
2236 | } | |
2237 | ||
2238 | fs_info->csum_shash = csum_shash; | |
2239 | ||
2240 | return 0; | |
2241 | } | |
2242 | ||
63443bf5 ES |
2243 | static int btrfs_replay_log(struct btrfs_fs_info *fs_info, |
2244 | struct btrfs_fs_devices *fs_devices) | |
2245 | { | |
2246 | int ret; | |
63443bf5 ES |
2247 | struct btrfs_root *log_tree_root; |
2248 | struct btrfs_super_block *disk_super = fs_info->super_copy; | |
2249 | u64 bytenr = btrfs_super_log_root(disk_super); | |
581c1760 | 2250 | int level = btrfs_super_log_root_level(disk_super); |
63443bf5 ES |
2251 | |
2252 | if (fs_devices->rw_devices == 0) { | |
f14d104d | 2253 | btrfs_warn(fs_info, "log replay required on RO media"); |
63443bf5 ES |
2254 | return -EIO; |
2255 | } | |
2256 | ||
96dfcb46 JB |
2257 | log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, |
2258 | GFP_KERNEL); | |
63443bf5 ES |
2259 | if (!log_tree_root) |
2260 | return -ENOMEM; | |
2261 | ||
2ff7e61e | 2262 | log_tree_root->node = read_tree_block(fs_info, bytenr, |
581c1760 QW |
2263 | fs_info->generation + 1, |
2264 | level, NULL); | |
64c043de | 2265 | if (IS_ERR(log_tree_root->node)) { |
f14d104d | 2266 | btrfs_warn(fs_info, "failed to read log tree"); |
0eeff236 | 2267 | ret = PTR_ERR(log_tree_root->node); |
8c38938c | 2268 | log_tree_root->node = NULL; |
00246528 | 2269 | btrfs_put_root(log_tree_root); |
0eeff236 | 2270 | return ret; |
64c043de | 2271 | } else if (!extent_buffer_uptodate(log_tree_root->node)) { |
f14d104d | 2272 | btrfs_err(fs_info, "failed to read log tree"); |
00246528 | 2273 | btrfs_put_root(log_tree_root); |
63443bf5 ES |
2274 | return -EIO; |
2275 | } | |
2276 | /* returns with log_tree_root freed on success */ | |
2277 | ret = btrfs_recover_log_trees(log_tree_root); | |
2278 | if (ret) { | |
0b246afa JM |
2279 | btrfs_handle_fs_error(fs_info, ret, |
2280 | "Failed to recover log tree"); | |
00246528 | 2281 | btrfs_put_root(log_tree_root); |
63443bf5 ES |
2282 | return ret; |
2283 | } | |
2284 | ||
bc98a42c | 2285 | if (sb_rdonly(fs_info->sb)) { |
6bccf3ab | 2286 | ret = btrfs_commit_super(fs_info); |
63443bf5 ES |
2287 | if (ret) |
2288 | return ret; | |
2289 | } | |
2290 | ||
2291 | return 0; | |
2292 | } | |
2293 | ||
6bccf3ab | 2294 | static int btrfs_read_roots(struct btrfs_fs_info *fs_info) |
4bbcaa64 | 2295 | { |
6bccf3ab | 2296 | struct btrfs_root *tree_root = fs_info->tree_root; |
a4f3d2c4 | 2297 | struct btrfs_root *root; |
4bbcaa64 ES |
2298 | struct btrfs_key location; |
2299 | int ret; | |
2300 | ||
6bccf3ab JM |
2301 | BUG_ON(!fs_info->tree_root); |
2302 | ||
4bbcaa64 ES |
2303 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; |
2304 | location.type = BTRFS_ROOT_ITEM_KEY; | |
2305 | location.offset = 0; | |
2306 | ||
a4f3d2c4 | 2307 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2308 | if (IS_ERR(root)) { |
2309 | ret = PTR_ERR(root); | |
2310 | goto out; | |
2311 | } | |
a4f3d2c4 DS |
2312 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2313 | fs_info->extent_root = root; | |
4bbcaa64 ES |
2314 | |
2315 | location.objectid = BTRFS_DEV_TREE_OBJECTID; | |
a4f3d2c4 | 2316 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2317 | if (IS_ERR(root)) { |
2318 | ret = PTR_ERR(root); | |
2319 | goto out; | |
2320 | } | |
a4f3d2c4 DS |
2321 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2322 | fs_info->dev_root = root; | |
4bbcaa64 ES |
2323 | btrfs_init_devices_late(fs_info); |
2324 | ||
2325 | location.objectid = BTRFS_CSUM_TREE_OBJECTID; | |
a4f3d2c4 | 2326 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2327 | if (IS_ERR(root)) { |
2328 | ret = PTR_ERR(root); | |
2329 | goto out; | |
2330 | } | |
a4f3d2c4 DS |
2331 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2332 | fs_info->csum_root = root; | |
4bbcaa64 | 2333 | |
aeb935a4 QW |
2334 | /* |
2335 | * This tree can share blocks with some other fs tree during relocation | |
2336 | * and we need a proper setup by btrfs_get_fs_root | |
2337 | */ | |
56e9357a DS |
2338 | root = btrfs_get_fs_root(tree_root->fs_info, |
2339 | BTRFS_DATA_RELOC_TREE_OBJECTID, true); | |
aeb935a4 QW |
2340 | if (IS_ERR(root)) { |
2341 | ret = PTR_ERR(root); | |
2342 | goto out; | |
2343 | } | |
2344 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); | |
2345 | fs_info->data_reloc_root = root; | |
2346 | ||
4bbcaa64 | 2347 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; |
a4f3d2c4 DS |
2348 | root = btrfs_read_tree_root(tree_root, &location); |
2349 | if (!IS_ERR(root)) { | |
2350 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); | |
afcdd129 | 2351 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
a4f3d2c4 | 2352 | fs_info->quota_root = root; |
4bbcaa64 ES |
2353 | } |
2354 | ||
2355 | location.objectid = BTRFS_UUID_TREE_OBJECTID; | |
a4f3d2c4 DS |
2356 | root = btrfs_read_tree_root(tree_root, &location); |
2357 | if (IS_ERR(root)) { | |
2358 | ret = PTR_ERR(root); | |
4bbcaa64 | 2359 | if (ret != -ENOENT) |
f50f4353 | 2360 | goto out; |
4bbcaa64 | 2361 | } else { |
a4f3d2c4 DS |
2362 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2363 | fs_info->uuid_root = root; | |
4bbcaa64 ES |
2364 | } |
2365 | ||
70f6d82e OS |
2366 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
2367 | location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; | |
2368 | root = btrfs_read_tree_root(tree_root, &location); | |
f50f4353 LB |
2369 | if (IS_ERR(root)) { |
2370 | ret = PTR_ERR(root); | |
2371 | goto out; | |
2372 | } | |
70f6d82e OS |
2373 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2374 | fs_info->free_space_root = root; | |
2375 | } | |
2376 | ||
4bbcaa64 | 2377 | return 0; |
f50f4353 LB |
2378 | out: |
2379 | btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", | |
2380 | location.objectid, ret); | |
2381 | return ret; | |
4bbcaa64 ES |
2382 | } |
2383 | ||
069ec957 QW |
2384 | /* |
2385 | * Real super block validation | |
2386 | * NOTE: super csum type and incompat features will not be checked here. | |
2387 | * | |
2388 | * @sb: super block to check | |
2389 | * @mirror_num: the super block number to check its bytenr: | |
2390 | * 0 the primary (1st) sb | |
2391 | * 1, 2 2nd and 3rd backup copy | |
2392 | * -1 skip bytenr check | |
2393 | */ | |
2394 | static int validate_super(struct btrfs_fs_info *fs_info, | |
2395 | struct btrfs_super_block *sb, int mirror_num) | |
21a852b0 | 2396 | { |
21a852b0 QW |
2397 | u64 nodesize = btrfs_super_nodesize(sb); |
2398 | u64 sectorsize = btrfs_super_sectorsize(sb); | |
2399 | int ret = 0; | |
2400 | ||
2401 | if (btrfs_super_magic(sb) != BTRFS_MAGIC) { | |
2402 | btrfs_err(fs_info, "no valid FS found"); | |
2403 | ret = -EINVAL; | |
2404 | } | |
2405 | if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) { | |
2406 | btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu", | |
2407 | btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); | |
2408 | ret = -EINVAL; | |
2409 | } | |
2410 | if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2411 | btrfs_err(fs_info, "tree_root level too big: %d >= %d", | |
2412 | btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); | |
2413 | ret = -EINVAL; | |
2414 | } | |
2415 | if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2416 | btrfs_err(fs_info, "chunk_root level too big: %d >= %d", | |
2417 | btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); | |
2418 | ret = -EINVAL; | |
2419 | } | |
2420 | if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2421 | btrfs_err(fs_info, "log_root level too big: %d >= %d", | |
2422 | btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); | |
2423 | ret = -EINVAL; | |
2424 | } | |
2425 | ||
2426 | /* | |
2427 | * Check sectorsize and nodesize first, other check will need it. | |
2428 | * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. | |
2429 | */ | |
2430 | if (!is_power_of_2(sectorsize) || sectorsize < 4096 || | |
2431 | sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { | |
2432 | btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); | |
2433 | ret = -EINVAL; | |
2434 | } | |
2435 | /* Only PAGE SIZE is supported yet */ | |
2436 | if (sectorsize != PAGE_SIZE) { | |
2437 | btrfs_err(fs_info, | |
2438 | "sectorsize %llu not supported yet, only support %lu", | |
2439 | sectorsize, PAGE_SIZE); | |
2440 | ret = -EINVAL; | |
2441 | } | |
2442 | if (!is_power_of_2(nodesize) || nodesize < sectorsize || | |
2443 | nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { | |
2444 | btrfs_err(fs_info, "invalid nodesize %llu", nodesize); | |
2445 | ret = -EINVAL; | |
2446 | } | |
2447 | if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { | |
2448 | btrfs_err(fs_info, "invalid leafsize %u, should be %llu", | |
2449 | le32_to_cpu(sb->__unused_leafsize), nodesize); | |
2450 | ret = -EINVAL; | |
2451 | } | |
2452 | ||
2453 | /* Root alignment check */ | |
2454 | if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { | |
2455 | btrfs_warn(fs_info, "tree_root block unaligned: %llu", | |
2456 | btrfs_super_root(sb)); | |
2457 | ret = -EINVAL; | |
2458 | } | |
2459 | if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { | |
2460 | btrfs_warn(fs_info, "chunk_root block unaligned: %llu", | |
2461 | btrfs_super_chunk_root(sb)); | |
2462 | ret = -EINVAL; | |
2463 | } | |
2464 | if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { | |
2465 | btrfs_warn(fs_info, "log_root block unaligned: %llu", | |
2466 | btrfs_super_log_root(sb)); | |
2467 | ret = -EINVAL; | |
2468 | } | |
2469 | ||
de37aa51 | 2470 | if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid, |
7239ff4b | 2471 | BTRFS_FSID_SIZE) != 0) { |
21a852b0 | 2472 | btrfs_err(fs_info, |
7239ff4b | 2473 | "dev_item UUID does not match metadata fsid: %pU != %pU", |
de37aa51 | 2474 | fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); |
21a852b0 QW |
2475 | ret = -EINVAL; |
2476 | } | |
2477 | ||
2478 | /* | |
2479 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are | |
2480 | * done later | |
2481 | */ | |
2482 | if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { | |
2483 | btrfs_err(fs_info, "bytes_used is too small %llu", | |
2484 | btrfs_super_bytes_used(sb)); | |
2485 | ret = -EINVAL; | |
2486 | } | |
2487 | if (!is_power_of_2(btrfs_super_stripesize(sb))) { | |
2488 | btrfs_err(fs_info, "invalid stripesize %u", | |
2489 | btrfs_super_stripesize(sb)); | |
2490 | ret = -EINVAL; | |
2491 | } | |
2492 | if (btrfs_super_num_devices(sb) > (1UL << 31)) | |
2493 | btrfs_warn(fs_info, "suspicious number of devices: %llu", | |
2494 | btrfs_super_num_devices(sb)); | |
2495 | if (btrfs_super_num_devices(sb) == 0) { | |
2496 | btrfs_err(fs_info, "number of devices is 0"); | |
2497 | ret = -EINVAL; | |
2498 | } | |
2499 | ||
069ec957 QW |
2500 | if (mirror_num >= 0 && |
2501 | btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) { | |
21a852b0 QW |
2502 | btrfs_err(fs_info, "super offset mismatch %llu != %u", |
2503 | btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); | |
2504 | ret = -EINVAL; | |
2505 | } | |
2506 | ||
2507 | /* | |
2508 | * Obvious sys_chunk_array corruptions, it must hold at least one key | |
2509 | * and one chunk | |
2510 | */ | |
2511 | if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { | |
2512 | btrfs_err(fs_info, "system chunk array too big %u > %u", | |
2513 | btrfs_super_sys_array_size(sb), | |
2514 | BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); | |
2515 | ret = -EINVAL; | |
2516 | } | |
2517 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) | |
2518 | + sizeof(struct btrfs_chunk)) { | |
2519 | btrfs_err(fs_info, "system chunk array too small %u < %zu", | |
2520 | btrfs_super_sys_array_size(sb), | |
2521 | sizeof(struct btrfs_disk_key) | |
2522 | + sizeof(struct btrfs_chunk)); | |
2523 | ret = -EINVAL; | |
2524 | } | |
2525 | ||
2526 | /* | |
2527 | * The generation is a global counter, we'll trust it more than the others | |
2528 | * but it's still possible that it's the one that's wrong. | |
2529 | */ | |
2530 | if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) | |
2531 | btrfs_warn(fs_info, | |
2532 | "suspicious: generation < chunk_root_generation: %llu < %llu", | |
2533 | btrfs_super_generation(sb), | |
2534 | btrfs_super_chunk_root_generation(sb)); | |
2535 | if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) | |
2536 | && btrfs_super_cache_generation(sb) != (u64)-1) | |
2537 | btrfs_warn(fs_info, | |
2538 | "suspicious: generation < cache_generation: %llu < %llu", | |
2539 | btrfs_super_generation(sb), | |
2540 | btrfs_super_cache_generation(sb)); | |
2541 | ||
2542 | return ret; | |
2543 | } | |
2544 | ||
069ec957 QW |
2545 | /* |
2546 | * Validation of super block at mount time. | |
2547 | * Some checks already done early at mount time, like csum type and incompat | |
2548 | * flags will be skipped. | |
2549 | */ | |
2550 | static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) | |
2551 | { | |
2552 | return validate_super(fs_info, fs_info->super_copy, 0); | |
2553 | } | |
2554 | ||
75cb857d QW |
2555 | /* |
2556 | * Validation of super block at write time. | |
2557 | * Some checks like bytenr check will be skipped as their values will be | |
2558 | * overwritten soon. | |
2559 | * Extra checks like csum type and incompat flags will be done here. | |
2560 | */ | |
2561 | static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, | |
2562 | struct btrfs_super_block *sb) | |
2563 | { | |
2564 | int ret; | |
2565 | ||
2566 | ret = validate_super(fs_info, sb, -1); | |
2567 | if (ret < 0) | |
2568 | goto out; | |
e7e16f48 | 2569 | if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) { |
75cb857d QW |
2570 | ret = -EUCLEAN; |
2571 | btrfs_err(fs_info, "invalid csum type, has %u want %u", | |
2572 | btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); | |
2573 | goto out; | |
2574 | } | |
2575 | if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { | |
2576 | ret = -EUCLEAN; | |
2577 | btrfs_err(fs_info, | |
2578 | "invalid incompat flags, has 0x%llx valid mask 0x%llx", | |
2579 | btrfs_super_incompat_flags(sb), | |
2580 | (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); | |
2581 | goto out; | |
2582 | } | |
2583 | out: | |
2584 | if (ret < 0) | |
2585 | btrfs_err(fs_info, | |
2586 | "super block corruption detected before writing it to disk"); | |
2587 | return ret; | |
2588 | } | |
2589 | ||
6ef108dd | 2590 | static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) |
b8522a1e | 2591 | { |
6ef108dd | 2592 | int backup_index = find_newest_super_backup(fs_info); |
b8522a1e NB |
2593 | struct btrfs_super_block *sb = fs_info->super_copy; |
2594 | struct btrfs_root *tree_root = fs_info->tree_root; | |
2595 | bool handle_error = false; | |
2596 | int ret = 0; | |
2597 | int i; | |
2598 | ||
2599 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { | |
2600 | u64 generation; | |
2601 | int level; | |
2602 | ||
2603 | if (handle_error) { | |
2604 | if (!IS_ERR(tree_root->node)) | |
2605 | free_extent_buffer(tree_root->node); | |
2606 | tree_root->node = NULL; | |
2607 | ||
2608 | if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) | |
2609 | break; | |
2610 | ||
2611 | free_root_pointers(fs_info, 0); | |
2612 | ||
2613 | /* | |
2614 | * Don't use the log in recovery mode, it won't be | |
2615 | * valid | |
2616 | */ | |
2617 | btrfs_set_super_log_root(sb, 0); | |
2618 | ||
2619 | /* We can't trust the free space cache either */ | |
2620 | btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); | |
2621 | ||
2622 | ret = read_backup_root(fs_info, i); | |
6ef108dd | 2623 | backup_index = ret; |
b8522a1e NB |
2624 | if (ret < 0) |
2625 | return ret; | |
2626 | } | |
2627 | generation = btrfs_super_generation(sb); | |
2628 | level = btrfs_super_root_level(sb); | |
2629 | tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb), | |
2630 | generation, level, NULL); | |
217f5004 | 2631 | if (IS_ERR(tree_root->node)) { |
b8522a1e | 2632 | handle_error = true; |
217f5004 NB |
2633 | ret = PTR_ERR(tree_root->node); |
2634 | tree_root->node = NULL; | |
2635 | btrfs_warn(fs_info, "couldn't read tree root"); | |
2636 | continue; | |
b8522a1e | 2637 | |
217f5004 NB |
2638 | } else if (!extent_buffer_uptodate(tree_root->node)) { |
2639 | handle_error = true; | |
2640 | ret = -EIO; | |
2641 | btrfs_warn(fs_info, "error while reading tree root"); | |
b8522a1e NB |
2642 | continue; |
2643 | } | |
2644 | ||
2645 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); | |
2646 | tree_root->commit_root = btrfs_root_node(tree_root); | |
2647 | btrfs_set_root_refs(&tree_root->root_item, 1); | |
2648 | ||
336a0d8d NB |
2649 | /* |
2650 | * No need to hold btrfs_root::objectid_mutex since the fs | |
2651 | * hasn't been fully initialised and we are the only user | |
2652 | */ | |
b8522a1e NB |
2653 | ret = btrfs_find_highest_objectid(tree_root, |
2654 | &tree_root->highest_objectid); | |
2655 | if (ret < 0) { | |
b8522a1e NB |
2656 | handle_error = true; |
2657 | continue; | |
2658 | } | |
2659 | ||
2660 | ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); | |
b8522a1e NB |
2661 | |
2662 | ret = btrfs_read_roots(fs_info); | |
2663 | if (ret < 0) { | |
2664 | handle_error = true; | |
2665 | continue; | |
2666 | } | |
2667 | ||
2668 | /* All successful */ | |
2669 | fs_info->generation = generation; | |
2670 | fs_info->last_trans_committed = generation; | |
6ef108dd NB |
2671 | |
2672 | /* Always begin writing backup roots after the one being used */ | |
2673 | if (backup_index < 0) { | |
2674 | fs_info->backup_root_index = 0; | |
2675 | } else { | |
2676 | fs_info->backup_root_index = backup_index + 1; | |
2677 | fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; | |
2678 | } | |
b8522a1e NB |
2679 | break; |
2680 | } | |
2681 | ||
2682 | return ret; | |
2683 | } | |
2684 | ||
8260edba | 2685 | void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) |
2e635a27 | 2686 | { |
76dda93c | 2687 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
f28491e0 | 2688 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
8fd17795 | 2689 | INIT_LIST_HEAD(&fs_info->trans_list); |
facda1e7 | 2690 | INIT_LIST_HEAD(&fs_info->dead_roots); |
24bbcf04 | 2691 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
eb73c1b7 | 2692 | INIT_LIST_HEAD(&fs_info->delalloc_roots); |
11833d66 | 2693 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
eb73c1b7 | 2694 | spin_lock_init(&fs_info->delalloc_root_lock); |
a4abeea4 | 2695 | spin_lock_init(&fs_info->trans_lock); |
76dda93c | 2696 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
24bbcf04 | 2697 | spin_lock_init(&fs_info->delayed_iput_lock); |
4cb5300b | 2698 | spin_lock_init(&fs_info->defrag_inodes_lock); |
ceda0864 | 2699 | spin_lock_init(&fs_info->super_lock); |
f28491e0 | 2700 | spin_lock_init(&fs_info->buffer_lock); |
47ab2a6c | 2701 | spin_lock_init(&fs_info->unused_bgs_lock); |
f29021b2 | 2702 | rwlock_init(&fs_info->tree_mod_log_lock); |
d7c15171 | 2703 | mutex_init(&fs_info->unused_bg_unpin_mutex); |
67c5e7d4 | 2704 | mutex_init(&fs_info->delete_unused_bgs_mutex); |
7585717f | 2705 | mutex_init(&fs_info->reloc_mutex); |
573bfb72 | 2706 | mutex_init(&fs_info->delalloc_root_mutex); |
de98ced9 | 2707 | seqlock_init(&fs_info->profiles_lock); |
19c00ddc | 2708 | |
0b86a832 | 2709 | INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); |
6324fbf3 | 2710 | INIT_LIST_HEAD(&fs_info->space_info); |
f29021b2 | 2711 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
47ab2a6c | 2712 | INIT_LIST_HEAD(&fs_info->unused_bgs); |
bd647ce3 JB |
2713 | #ifdef CONFIG_BTRFS_DEBUG |
2714 | INIT_LIST_HEAD(&fs_info->allocated_roots); | |
3fd63727 JB |
2715 | INIT_LIST_HEAD(&fs_info->allocated_ebs); |
2716 | spin_lock_init(&fs_info->eb_leak_lock); | |
bd647ce3 | 2717 | #endif |
c8bf1b67 | 2718 | extent_map_tree_init(&fs_info->mapping_tree); |
66d8f3dd MX |
2719 | btrfs_init_block_rsv(&fs_info->global_block_rsv, |
2720 | BTRFS_BLOCK_RSV_GLOBAL); | |
66d8f3dd MX |
2721 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); |
2722 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); | |
2723 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); | |
2724 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, | |
2725 | BTRFS_BLOCK_RSV_DELOPS); | |
ba2c4d4e JB |
2726 | btrfs_init_block_rsv(&fs_info->delayed_refs_rsv, |
2727 | BTRFS_BLOCK_RSV_DELREFS); | |
2728 | ||
771ed689 | 2729 | atomic_set(&fs_info->async_delalloc_pages, 0); |
4cb5300b | 2730 | atomic_set(&fs_info->defrag_running, 0); |
2fefd558 | 2731 | atomic_set(&fs_info->reada_works_cnt, 0); |
034f784d | 2732 | atomic_set(&fs_info->nr_delayed_iputs, 0); |
fc36ed7e | 2733 | atomic64_set(&fs_info->tree_mod_seq, 0); |
95ac567a | 2734 | fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; |
9ed74f2d | 2735 | fs_info->metadata_ratio = 0; |
4cb5300b | 2736 | fs_info->defrag_inodes = RB_ROOT; |
a5ed45f8 | 2737 | atomic64_set(&fs_info->free_chunk_space, 0); |
f29021b2 | 2738 | fs_info->tree_mod_log = RB_ROOT; |
8b87dc17 | 2739 | fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; |
f8c269d7 | 2740 | fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ |
90519d66 | 2741 | /* readahead state */ |
d0164adc | 2742 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); |
90519d66 | 2743 | spin_lock_init(&fs_info->reada_lock); |
fd708b81 | 2744 | btrfs_init_ref_verify(fs_info); |
c8b97818 | 2745 | |
b34b086c CM |
2746 | fs_info->thread_pool_size = min_t(unsigned long, |
2747 | num_online_cpus() + 2, 8); | |
0afbaf8c | 2748 | |
199c2a9c MX |
2749 | INIT_LIST_HEAD(&fs_info->ordered_roots); |
2750 | spin_lock_init(&fs_info->ordered_root_lock); | |
69fe2d75 | 2751 | |
638aa7ed | 2752 | btrfs_init_scrub(fs_info); |
21adbd5c SB |
2753 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
2754 | fs_info->check_integrity_print_mask = 0; | |
2755 | #endif | |
779a65a4 | 2756 | btrfs_init_balance(fs_info); |
57056740 | 2757 | btrfs_init_async_reclaim_work(fs_info); |
a2de733c | 2758 | |
0f9dd46c | 2759 | spin_lock_init(&fs_info->block_group_cache_lock); |
6bef4d31 | 2760 | fs_info->block_group_cache_tree = RB_ROOT; |
a1897fdd | 2761 | fs_info->first_logical_byte = (u64)-1; |
0f9dd46c | 2762 | |
fe119a6e NB |
2763 | extent_io_tree_init(fs_info, &fs_info->excluded_extents, |
2764 | IO_TREE_FS_EXCLUDED_EXTENTS, NULL); | |
afcdd129 | 2765 | set_bit(BTRFS_FS_BARRIER, &fs_info->flags); |
39279cc3 | 2766 | |
5a3f23d5 | 2767 | mutex_init(&fs_info->ordered_operations_mutex); |
e02119d5 | 2768 | mutex_init(&fs_info->tree_log_mutex); |
925baedd | 2769 | mutex_init(&fs_info->chunk_mutex); |
a74a4b97 CM |
2770 | mutex_init(&fs_info->transaction_kthread_mutex); |
2771 | mutex_init(&fs_info->cleaner_mutex); | |
1bbc621e | 2772 | mutex_init(&fs_info->ro_block_group_mutex); |
9e351cc8 | 2773 | init_rwsem(&fs_info->commit_root_sem); |
c71bf099 | 2774 | init_rwsem(&fs_info->cleanup_work_sem); |
76dda93c | 2775 | init_rwsem(&fs_info->subvol_sem); |
803b2f54 | 2776 | sema_init(&fs_info->uuid_tree_rescan_sem, 1); |
fa9c0d79 | 2777 | |
ad618368 | 2778 | btrfs_init_dev_replace_locks(fs_info); |
f9e92e40 | 2779 | btrfs_init_qgroup(fs_info); |
b0643e59 | 2780 | btrfs_discard_init(fs_info); |
416ac51d | 2781 | |
fa9c0d79 CM |
2782 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); |
2783 | btrfs_init_free_cluster(&fs_info->data_alloc_cluster); | |
2784 | ||
e6dcd2dc | 2785 | init_waitqueue_head(&fs_info->transaction_throttle); |
f9295749 | 2786 | init_waitqueue_head(&fs_info->transaction_wait); |
bb9c12c9 | 2787 | init_waitqueue_head(&fs_info->transaction_blocked_wait); |
4854ddd0 | 2788 | init_waitqueue_head(&fs_info->async_submit_wait); |
034f784d | 2789 | init_waitqueue_head(&fs_info->delayed_iputs_wait); |
3768f368 | 2790 | |
da17066c JM |
2791 | /* Usable values until the real ones are cached from the superblock */ |
2792 | fs_info->nodesize = 4096; | |
2793 | fs_info->sectorsize = 4096; | |
2794 | fs_info->stripesize = 4096; | |
2795 | ||
eede2bf3 OS |
2796 | spin_lock_init(&fs_info->swapfile_pins_lock); |
2797 | fs_info->swapfile_pins = RB_ROOT; | |
2798 | ||
9e967495 | 2799 | fs_info->send_in_progress = 0; |
8260edba JB |
2800 | } |
2801 | ||
2802 | static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb) | |
2803 | { | |
2804 | int ret; | |
2805 | ||
2806 | fs_info->sb = sb; | |
2807 | sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; | |
2808 | sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); | |
9e967495 | 2809 | |
ae18c37a JB |
2810 | ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL); |
2811 | if (ret) | |
c75e8394 | 2812 | return ret; |
ae18c37a JB |
2813 | |
2814 | ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); | |
2815 | if (ret) | |
c75e8394 | 2816 | return ret; |
ae18c37a JB |
2817 | |
2818 | fs_info->dirty_metadata_batch = PAGE_SIZE * | |
2819 | (1 + ilog2(nr_cpu_ids)); | |
2820 | ||
2821 | ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); | |
2822 | if (ret) | |
c75e8394 | 2823 | return ret; |
ae18c37a JB |
2824 | |
2825 | ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, | |
2826 | GFP_KERNEL); | |
2827 | if (ret) | |
c75e8394 | 2828 | return ret; |
ae18c37a JB |
2829 | |
2830 | fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), | |
2831 | GFP_KERNEL); | |
c75e8394 JB |
2832 | if (!fs_info->delayed_root) |
2833 | return -ENOMEM; | |
ae18c37a JB |
2834 | btrfs_init_delayed_root(fs_info->delayed_root); |
2835 | ||
c75e8394 | 2836 | return btrfs_alloc_stripe_hash_table(fs_info); |
ae18c37a JB |
2837 | } |
2838 | ||
97f4dd09 NB |
2839 | static int btrfs_uuid_rescan_kthread(void *data) |
2840 | { | |
2841 | struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; | |
2842 | int ret; | |
2843 | ||
2844 | /* | |
2845 | * 1st step is to iterate through the existing UUID tree and | |
2846 | * to delete all entries that contain outdated data. | |
2847 | * 2nd step is to add all missing entries to the UUID tree. | |
2848 | */ | |
2849 | ret = btrfs_uuid_tree_iterate(fs_info); | |
2850 | if (ret < 0) { | |
c94bec2c JB |
2851 | if (ret != -EINTR) |
2852 | btrfs_warn(fs_info, "iterating uuid_tree failed %d", | |
2853 | ret); | |
97f4dd09 NB |
2854 | up(&fs_info->uuid_tree_rescan_sem); |
2855 | return ret; | |
2856 | } | |
2857 | return btrfs_uuid_scan_kthread(data); | |
2858 | } | |
2859 | ||
2860 | static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) | |
2861 | { | |
2862 | struct task_struct *task; | |
2863 | ||
2864 | down(&fs_info->uuid_tree_rescan_sem); | |
2865 | task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); | |
2866 | if (IS_ERR(task)) { | |
2867 | /* fs_info->update_uuid_tree_gen remains 0 in all error case */ | |
2868 | btrfs_warn(fs_info, "failed to start uuid_rescan task"); | |
2869 | up(&fs_info->uuid_tree_rescan_sem); | |
2870 | return PTR_ERR(task); | |
2871 | } | |
2872 | ||
2873 | return 0; | |
2874 | } | |
2875 | ||
ae18c37a JB |
2876 | int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, |
2877 | char *options) | |
2878 | { | |
2879 | u32 sectorsize; | |
2880 | u32 nodesize; | |
2881 | u32 stripesize; | |
2882 | u64 generation; | |
2883 | u64 features; | |
2884 | u16 csum_type; | |
ae18c37a JB |
2885 | struct btrfs_super_block *disk_super; |
2886 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); | |
2887 | struct btrfs_root *tree_root; | |
2888 | struct btrfs_root *chunk_root; | |
2889 | int ret; | |
2890 | int err = -EINVAL; | |
2891 | int clear_free_space_tree = 0; | |
2892 | int level; | |
2893 | ||
8260edba | 2894 | ret = init_mount_fs_info(fs_info, sb); |
53b381b3 | 2895 | if (ret) { |
83c8266a | 2896 | err = ret; |
ae18c37a | 2897 | goto fail; |
53b381b3 DW |
2898 | } |
2899 | ||
ae18c37a JB |
2900 | /* These need to be init'ed before we start creating inodes and such. */ |
2901 | tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, | |
2902 | GFP_KERNEL); | |
2903 | fs_info->tree_root = tree_root; | |
2904 | chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID, | |
2905 | GFP_KERNEL); | |
2906 | fs_info->chunk_root = chunk_root; | |
2907 | if (!tree_root || !chunk_root) { | |
2908 | err = -ENOMEM; | |
c75e8394 | 2909 | goto fail; |
ae18c37a JB |
2910 | } |
2911 | ||
2912 | fs_info->btree_inode = new_inode(sb); | |
2913 | if (!fs_info->btree_inode) { | |
2914 | err = -ENOMEM; | |
c75e8394 | 2915 | goto fail; |
ae18c37a JB |
2916 | } |
2917 | mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); | |
2918 | btrfs_init_btree_inode(fs_info); | |
2919 | ||
3c4bb26b | 2920 | invalidate_bdev(fs_devices->latest_bdev); |
1104a885 DS |
2921 | |
2922 | /* | |
2923 | * Read super block and check the signature bytes only | |
2924 | */ | |
8f32380d JT |
2925 | disk_super = btrfs_read_dev_super(fs_devices->latest_bdev); |
2926 | if (IS_ERR(disk_super)) { | |
2927 | err = PTR_ERR(disk_super); | |
16cdcec7 | 2928 | goto fail_alloc; |
20b45077 | 2929 | } |
39279cc3 | 2930 | |
8dc3f22c | 2931 | /* |
260db43c | 2932 | * Verify the type first, if that or the checksum value are |
8dc3f22c JT |
2933 | * corrupted, we'll find out |
2934 | */ | |
8f32380d | 2935 | csum_type = btrfs_super_csum_type(disk_super); |
51bce6c9 | 2936 | if (!btrfs_supported_super_csum(csum_type)) { |
8dc3f22c | 2937 | btrfs_err(fs_info, "unsupported checksum algorithm: %u", |
51bce6c9 | 2938 | csum_type); |
8dc3f22c | 2939 | err = -EINVAL; |
8f32380d | 2940 | btrfs_release_disk_super(disk_super); |
8dc3f22c JT |
2941 | goto fail_alloc; |
2942 | } | |
2943 | ||
6d97c6e3 JT |
2944 | ret = btrfs_init_csum_hash(fs_info, csum_type); |
2945 | if (ret) { | |
2946 | err = ret; | |
8f32380d | 2947 | btrfs_release_disk_super(disk_super); |
6d97c6e3 JT |
2948 | goto fail_alloc; |
2949 | } | |
2950 | ||
1104a885 DS |
2951 | /* |
2952 | * We want to check superblock checksum, the type is stored inside. | |
2953 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). | |
2954 | */ | |
8f32380d | 2955 | if (btrfs_check_super_csum(fs_info, (u8 *)disk_super)) { |
05135f59 | 2956 | btrfs_err(fs_info, "superblock checksum mismatch"); |
1104a885 | 2957 | err = -EINVAL; |
8f32380d | 2958 | btrfs_release_disk_super(disk_super); |
141386e1 | 2959 | goto fail_alloc; |
1104a885 DS |
2960 | } |
2961 | ||
2962 | /* | |
2963 | * super_copy is zeroed at allocation time and we never touch the | |
2964 | * following bytes up to INFO_SIZE, the checksum is calculated from | |
2965 | * the whole block of INFO_SIZE | |
2966 | */ | |
8f32380d JT |
2967 | memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy)); |
2968 | btrfs_release_disk_super(disk_super); | |
5f39d397 | 2969 | |
fbc6feae NB |
2970 | disk_super = fs_info->super_copy; |
2971 | ||
de37aa51 NB |
2972 | ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid, |
2973 | BTRFS_FSID_SIZE)); | |
2974 | ||
7239ff4b | 2975 | if (btrfs_fs_incompat(fs_info, METADATA_UUID)) { |
de37aa51 NB |
2976 | ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid, |
2977 | fs_info->super_copy->metadata_uuid, | |
2978 | BTRFS_FSID_SIZE)); | |
7239ff4b | 2979 | } |
0b86a832 | 2980 | |
fbc6feae NB |
2981 | features = btrfs_super_flags(disk_super); |
2982 | if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { | |
2983 | features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2; | |
2984 | btrfs_set_super_flags(disk_super, features); | |
2985 | btrfs_info(fs_info, | |
2986 | "found metadata UUID change in progress flag, clearing"); | |
2987 | } | |
2988 | ||
2989 | memcpy(fs_info->super_for_commit, fs_info->super_copy, | |
2990 | sizeof(*fs_info->super_for_commit)); | |
de37aa51 | 2991 | |
069ec957 | 2992 | ret = btrfs_validate_mount_super(fs_info); |
1104a885 | 2993 | if (ret) { |
05135f59 | 2994 | btrfs_err(fs_info, "superblock contains fatal errors"); |
1104a885 | 2995 | err = -EINVAL; |
141386e1 | 2996 | goto fail_alloc; |
1104a885 DS |
2997 | } |
2998 | ||
0f7d52f4 | 2999 | if (!btrfs_super_root(disk_super)) |
141386e1 | 3000 | goto fail_alloc; |
0f7d52f4 | 3001 | |
acce952b | 3002 | /* check FS state, whether FS is broken. */ |
87533c47 MX |
3003 | if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) |
3004 | set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); | |
acce952b | 3005 | |
75e7cb7f LB |
3006 | /* |
3007 | * In the long term, we'll store the compression type in the super | |
3008 | * block, and it'll be used for per file compression control. | |
3009 | */ | |
3010 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; | |
3011 | ||
2ff7e61e | 3012 | ret = btrfs_parse_options(fs_info, options, sb->s_flags); |
2b82032c YZ |
3013 | if (ret) { |
3014 | err = ret; | |
141386e1 | 3015 | goto fail_alloc; |
2b82032c | 3016 | } |
dfe25020 | 3017 | |
f2b636e8 JB |
3018 | features = btrfs_super_incompat_flags(disk_super) & |
3019 | ~BTRFS_FEATURE_INCOMPAT_SUPP; | |
3020 | if (features) { | |
05135f59 DS |
3021 | btrfs_err(fs_info, |
3022 | "cannot mount because of unsupported optional features (%llx)", | |
3023 | features); | |
f2b636e8 | 3024 | err = -EINVAL; |
141386e1 | 3025 | goto fail_alloc; |
f2b636e8 JB |
3026 | } |
3027 | ||
5d4f98a2 | 3028 | features = btrfs_super_incompat_flags(disk_super); |
a6fa6fae | 3029 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
0b246afa | 3030 | if (fs_info->compress_type == BTRFS_COMPRESS_LZO) |
a6fa6fae | 3031 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
5c1aab1d NT |
3032 | else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) |
3033 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; | |
727011e0 | 3034 | |
3173a18f | 3035 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
05135f59 | 3036 | btrfs_info(fs_info, "has skinny extents"); |
3173a18f | 3037 | |
727011e0 CM |
3038 | /* |
3039 | * flag our filesystem as having big metadata blocks if | |
3040 | * they are bigger than the page size | |
3041 | */ | |
09cbfeaf | 3042 | if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
727011e0 | 3043 | if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
05135f59 DS |
3044 | btrfs_info(fs_info, |
3045 | "flagging fs with big metadata feature"); | |
727011e0 CM |
3046 | features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
3047 | } | |
3048 | ||
bc3f116f | 3049 | nodesize = btrfs_super_nodesize(disk_super); |
bc3f116f | 3050 | sectorsize = btrfs_super_sectorsize(disk_super); |
b7f67055 | 3051 | stripesize = sectorsize; |
707e8a07 | 3052 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
963d678b | 3053 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
bc3f116f | 3054 | |
da17066c JM |
3055 | /* Cache block sizes */ |
3056 | fs_info->nodesize = nodesize; | |
3057 | fs_info->sectorsize = sectorsize; | |
3058 | fs_info->stripesize = stripesize; | |
3059 | ||
bc3f116f CM |
3060 | /* |
3061 | * mixed block groups end up with duplicate but slightly offset | |
3062 | * extent buffers for the same range. It leads to corruptions | |
3063 | */ | |
3064 | if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && | |
707e8a07 | 3065 | (sectorsize != nodesize)) { |
05135f59 DS |
3066 | btrfs_err(fs_info, |
3067 | "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", | |
3068 | nodesize, sectorsize); | |
141386e1 | 3069 | goto fail_alloc; |
bc3f116f CM |
3070 | } |
3071 | ||
ceda0864 MX |
3072 | /* |
3073 | * Needn't use the lock because there is no other task which will | |
3074 | * update the flag. | |
3075 | */ | |
a6fa6fae | 3076 | btrfs_set_super_incompat_flags(disk_super, features); |
5d4f98a2 | 3077 | |
f2b636e8 JB |
3078 | features = btrfs_super_compat_ro_flags(disk_super) & |
3079 | ~BTRFS_FEATURE_COMPAT_RO_SUPP; | |
bc98a42c | 3080 | if (!sb_rdonly(sb) && features) { |
05135f59 DS |
3081 | btrfs_err(fs_info, |
3082 | "cannot mount read-write because of unsupported optional features (%llx)", | |
c1c9ff7c | 3083 | features); |
f2b636e8 | 3084 | err = -EINVAL; |
141386e1 | 3085 | goto fail_alloc; |
f2b636e8 | 3086 | } |
61d92c32 | 3087 | |
2a458198 ES |
3088 | ret = btrfs_init_workqueues(fs_info, fs_devices); |
3089 | if (ret) { | |
3090 | err = ret; | |
0dc3b84a JB |
3091 | goto fail_sb_buffer; |
3092 | } | |
4543df7e | 3093 | |
9e11ceee JK |
3094 | sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); |
3095 | sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); | |
4575c9cc | 3096 | |
a061fc8d CM |
3097 | sb->s_blocksize = sectorsize; |
3098 | sb->s_blocksize_bits = blksize_bits(sectorsize); | |
de37aa51 | 3099 | memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); |
db94535d | 3100 | |
925baedd | 3101 | mutex_lock(&fs_info->chunk_mutex); |
6bccf3ab | 3102 | ret = btrfs_read_sys_array(fs_info); |
925baedd | 3103 | mutex_unlock(&fs_info->chunk_mutex); |
84eed90f | 3104 | if (ret) { |
05135f59 | 3105 | btrfs_err(fs_info, "failed to read the system array: %d", ret); |
5d4f98a2 | 3106 | goto fail_sb_buffer; |
84eed90f | 3107 | } |
0b86a832 | 3108 | |
84234f3a | 3109 | generation = btrfs_super_chunk_root_generation(disk_super); |
581c1760 | 3110 | level = btrfs_super_chunk_root_level(disk_super); |
0b86a832 | 3111 | |
2ff7e61e | 3112 | chunk_root->node = read_tree_block(fs_info, |
0b86a832 | 3113 | btrfs_super_chunk_root(disk_super), |
581c1760 | 3114 | generation, level, NULL); |
64c043de LB |
3115 | if (IS_ERR(chunk_root->node) || |
3116 | !extent_buffer_uptodate(chunk_root->node)) { | |
05135f59 | 3117 | btrfs_err(fs_info, "failed to read chunk root"); |
e5fffbac | 3118 | if (!IS_ERR(chunk_root->node)) |
3119 | free_extent_buffer(chunk_root->node); | |
95ab1f64 | 3120 | chunk_root->node = NULL; |
af31f5e5 | 3121 | goto fail_tree_roots; |
83121942 | 3122 | } |
5d4f98a2 YZ |
3123 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); |
3124 | chunk_root->commit_root = btrfs_root_node(chunk_root); | |
0b86a832 | 3125 | |
e17cade2 | 3126 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, |
c4ac7541 DS |
3127 | offsetof(struct btrfs_header, chunk_tree_uuid), |
3128 | BTRFS_UUID_SIZE); | |
e17cade2 | 3129 | |
5b4aacef | 3130 | ret = btrfs_read_chunk_tree(fs_info); |
2b82032c | 3131 | if (ret) { |
05135f59 | 3132 | btrfs_err(fs_info, "failed to read chunk tree: %d", ret); |
af31f5e5 | 3133 | goto fail_tree_roots; |
2b82032c | 3134 | } |
0b86a832 | 3135 | |
8dabb742 | 3136 | /* |
9b99b115 AJ |
3137 | * Keep the devid that is marked to be the target device for the |
3138 | * device replace procedure | |
8dabb742 | 3139 | */ |
9b99b115 | 3140 | btrfs_free_extra_devids(fs_devices, 0); |
dfe25020 | 3141 | |
a6b0d5c8 | 3142 | if (!fs_devices->latest_bdev) { |
05135f59 | 3143 | btrfs_err(fs_info, "failed to read devices"); |
a6b0d5c8 CM |
3144 | goto fail_tree_roots; |
3145 | } | |
3146 | ||
b8522a1e | 3147 | ret = init_tree_roots(fs_info); |
4bbcaa64 | 3148 | if (ret) |
b8522a1e | 3149 | goto fail_tree_roots; |
8929ecfa | 3150 | |
75ec1db8 JB |
3151 | /* |
3152 | * If we have a uuid root and we're not being told to rescan we need to | |
3153 | * check the generation here so we can set the | |
3154 | * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the | |
3155 | * transaction during a balance or the log replay without updating the | |
3156 | * uuid generation, and then if we crash we would rescan the uuid tree, | |
3157 | * even though it was perfectly fine. | |
3158 | */ | |
3159 | if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && | |
3160 | fs_info->generation == btrfs_super_uuid_tree_generation(disk_super)) | |
3161 | set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); | |
3162 | ||
cf90d884 QW |
3163 | ret = btrfs_verify_dev_extents(fs_info); |
3164 | if (ret) { | |
3165 | btrfs_err(fs_info, | |
3166 | "failed to verify dev extents against chunks: %d", | |
3167 | ret); | |
3168 | goto fail_block_groups; | |
3169 | } | |
68310a5e ID |
3170 | ret = btrfs_recover_balance(fs_info); |
3171 | if (ret) { | |
05135f59 | 3172 | btrfs_err(fs_info, "failed to recover balance: %d", ret); |
68310a5e ID |
3173 | goto fail_block_groups; |
3174 | } | |
3175 | ||
733f4fbb SB |
3176 | ret = btrfs_init_dev_stats(fs_info); |
3177 | if (ret) { | |
05135f59 | 3178 | btrfs_err(fs_info, "failed to init dev_stats: %d", ret); |
733f4fbb SB |
3179 | goto fail_block_groups; |
3180 | } | |
3181 | ||
8dabb742 SB |
3182 | ret = btrfs_init_dev_replace(fs_info); |
3183 | if (ret) { | |
05135f59 | 3184 | btrfs_err(fs_info, "failed to init dev_replace: %d", ret); |
8dabb742 SB |
3185 | goto fail_block_groups; |
3186 | } | |
3187 | ||
9b99b115 | 3188 | btrfs_free_extra_devids(fs_devices, 1); |
8dabb742 | 3189 | |
c6761a9e | 3190 | ret = btrfs_sysfs_add_fsid(fs_devices); |
b7c35e81 | 3191 | if (ret) { |
05135f59 DS |
3192 | btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", |
3193 | ret); | |
b7c35e81 AJ |
3194 | goto fail_block_groups; |
3195 | } | |
3196 | ||
96f3136e | 3197 | ret = btrfs_sysfs_add_mounted(fs_info); |
c59021f8 | 3198 | if (ret) { |
05135f59 | 3199 | btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); |
b7c35e81 | 3200 | goto fail_fsdev_sysfs; |
c59021f8 | 3201 | } |
3202 | ||
c59021f8 | 3203 | ret = btrfs_init_space_info(fs_info); |
3204 | if (ret) { | |
05135f59 | 3205 | btrfs_err(fs_info, "failed to initialize space info: %d", ret); |
2365dd3c | 3206 | goto fail_sysfs; |
c59021f8 | 3207 | } |
3208 | ||
5b4aacef | 3209 | ret = btrfs_read_block_groups(fs_info); |
1b1d1f66 | 3210 | if (ret) { |
05135f59 | 3211 | btrfs_err(fs_info, "failed to read block groups: %d", ret); |
2365dd3c | 3212 | goto fail_sysfs; |
1b1d1f66 | 3213 | } |
4330e183 | 3214 | |
6528b99d | 3215 | if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) { |
05135f59 | 3216 | btrfs_warn(fs_info, |
52042d8e | 3217 | "writable mount is not allowed due to too many missing devices"); |
2365dd3c | 3218 | goto fail_sysfs; |
292fd7fc | 3219 | } |
9078a3e1 | 3220 | |
a74a4b97 CM |
3221 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, |
3222 | "btrfs-cleaner"); | |
57506d50 | 3223 | if (IS_ERR(fs_info->cleaner_kthread)) |
2365dd3c | 3224 | goto fail_sysfs; |
a74a4b97 CM |
3225 | |
3226 | fs_info->transaction_kthread = kthread_run(transaction_kthread, | |
3227 | tree_root, | |
3228 | "btrfs-transaction"); | |
57506d50 | 3229 | if (IS_ERR(fs_info->transaction_kthread)) |
3f157a2f | 3230 | goto fail_cleaner; |
a74a4b97 | 3231 | |
583b7231 | 3232 | if (!btrfs_test_opt(fs_info, NOSSD) && |
c289811c | 3233 | !fs_info->fs_devices->rotating) { |
583b7231 | 3234 | btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); |
c289811c CM |
3235 | } |
3236 | ||
572d9ab7 | 3237 | /* |
01327610 | 3238 | * Mount does not set all options immediately, we can do it now and do |
572d9ab7 DS |
3239 | * not have to wait for transaction commit |
3240 | */ | |
3241 | btrfs_apply_pending_changes(fs_info); | |
3818aea2 | 3242 | |
21adbd5c | 3243 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
0b246afa | 3244 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { |
2ff7e61e | 3245 | ret = btrfsic_mount(fs_info, fs_devices, |
0b246afa | 3246 | btrfs_test_opt(fs_info, |
21adbd5c SB |
3247 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? |
3248 | 1 : 0, | |
3249 | fs_info->check_integrity_print_mask); | |
3250 | if (ret) | |
05135f59 DS |
3251 | btrfs_warn(fs_info, |
3252 | "failed to initialize integrity check module: %d", | |
3253 | ret); | |
21adbd5c SB |
3254 | } |
3255 | #endif | |
bcef60f2 AJ |
3256 | ret = btrfs_read_qgroup_config(fs_info); |
3257 | if (ret) | |
3258 | goto fail_trans_kthread; | |
21adbd5c | 3259 | |
fd708b81 JB |
3260 | if (btrfs_build_ref_tree(fs_info)) |
3261 | btrfs_err(fs_info, "couldn't build ref tree"); | |
3262 | ||
96da0919 QW |
3263 | /* do not make disk changes in broken FS or nologreplay is given */ |
3264 | if (btrfs_super_log_root(disk_super) != 0 && | |
0b246afa | 3265 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
e8294f2f | 3266 | btrfs_info(fs_info, "start tree-log replay"); |
63443bf5 | 3267 | ret = btrfs_replay_log(fs_info, fs_devices); |
79787eaa | 3268 | if (ret) { |
63443bf5 | 3269 | err = ret; |
28c16cbb | 3270 | goto fail_qgroup; |
79787eaa | 3271 | } |
e02119d5 | 3272 | } |
1a40e23b | 3273 | |
6bccf3ab | 3274 | ret = btrfs_find_orphan_roots(fs_info); |
79787eaa | 3275 | if (ret) |
28c16cbb | 3276 | goto fail_qgroup; |
76dda93c | 3277 | |
bc98a42c | 3278 | if (!sb_rdonly(sb)) { |
d68fc57b | 3279 | ret = btrfs_cleanup_fs_roots(fs_info); |
44c44af2 | 3280 | if (ret) |
28c16cbb | 3281 | goto fail_qgroup; |
90c711ab ZB |
3282 | |
3283 | mutex_lock(&fs_info->cleaner_mutex); | |
5d4f98a2 | 3284 | ret = btrfs_recover_relocation(tree_root); |
90c711ab | 3285 | mutex_unlock(&fs_info->cleaner_mutex); |
d7ce5843 | 3286 | if (ret < 0) { |
05135f59 DS |
3287 | btrfs_warn(fs_info, "failed to recover relocation: %d", |
3288 | ret); | |
d7ce5843 | 3289 | err = -EINVAL; |
bcef60f2 | 3290 | goto fail_qgroup; |
d7ce5843 | 3291 | } |
7c2ca468 | 3292 | } |
1a40e23b | 3293 | |
56e9357a | 3294 | fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); |
3140c9a3 DC |
3295 | if (IS_ERR(fs_info->fs_root)) { |
3296 | err = PTR_ERR(fs_info->fs_root); | |
f50f4353 | 3297 | btrfs_warn(fs_info, "failed to read fs tree: %d", err); |
315bf8ef | 3298 | fs_info->fs_root = NULL; |
bcef60f2 | 3299 | goto fail_qgroup; |
3140c9a3 | 3300 | } |
c289811c | 3301 | |
bc98a42c | 3302 | if (sb_rdonly(sb)) |
2b6ba629 | 3303 | return 0; |
59641015 | 3304 | |
f8d468a1 OS |
3305 | if (btrfs_test_opt(fs_info, CLEAR_CACHE) && |
3306 | btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { | |
6675df31 OS |
3307 | clear_free_space_tree = 1; |
3308 | } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && | |
3309 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { | |
3310 | btrfs_warn(fs_info, "free space tree is invalid"); | |
3311 | clear_free_space_tree = 1; | |
3312 | } | |
3313 | ||
3314 | if (clear_free_space_tree) { | |
f8d468a1 OS |
3315 | btrfs_info(fs_info, "clearing free space tree"); |
3316 | ret = btrfs_clear_free_space_tree(fs_info); | |
3317 | if (ret) { | |
3318 | btrfs_warn(fs_info, | |
3319 | "failed to clear free space tree: %d", ret); | |
6bccf3ab | 3320 | close_ctree(fs_info); |
f8d468a1 OS |
3321 | return ret; |
3322 | } | |
3323 | } | |
3324 | ||
0b246afa | 3325 | if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && |
511711af | 3326 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
05135f59 | 3327 | btrfs_info(fs_info, "creating free space tree"); |
511711af CM |
3328 | ret = btrfs_create_free_space_tree(fs_info); |
3329 | if (ret) { | |
05135f59 DS |
3330 | btrfs_warn(fs_info, |
3331 | "failed to create free space tree: %d", ret); | |
6bccf3ab | 3332 | close_ctree(fs_info); |
511711af CM |
3333 | return ret; |
3334 | } | |
3335 | } | |
3336 | ||
2b6ba629 ID |
3337 | down_read(&fs_info->cleanup_work_sem); |
3338 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || | |
3339 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { | |
e3acc2a6 | 3340 | up_read(&fs_info->cleanup_work_sem); |
6bccf3ab | 3341 | close_ctree(fs_info); |
2b6ba629 ID |
3342 | return ret; |
3343 | } | |
3344 | up_read(&fs_info->cleanup_work_sem); | |
59641015 | 3345 | |
2b6ba629 ID |
3346 | ret = btrfs_resume_balance_async(fs_info); |
3347 | if (ret) { | |
05135f59 | 3348 | btrfs_warn(fs_info, "failed to resume balance: %d", ret); |
6bccf3ab | 3349 | close_ctree(fs_info); |
2b6ba629 | 3350 | return ret; |
e3acc2a6 JB |
3351 | } |
3352 | ||
8dabb742 SB |
3353 | ret = btrfs_resume_dev_replace_async(fs_info); |
3354 | if (ret) { | |
05135f59 | 3355 | btrfs_warn(fs_info, "failed to resume device replace: %d", ret); |
6bccf3ab | 3356 | close_ctree(fs_info); |
8dabb742 SB |
3357 | return ret; |
3358 | } | |
3359 | ||
b382a324 | 3360 | btrfs_qgroup_rescan_resume(fs_info); |
b0643e59 | 3361 | btrfs_discard_resume(fs_info); |
b382a324 | 3362 | |
4bbcaa64 | 3363 | if (!fs_info->uuid_root) { |
05135f59 | 3364 | btrfs_info(fs_info, "creating UUID tree"); |
f7a81ea4 SB |
3365 | ret = btrfs_create_uuid_tree(fs_info); |
3366 | if (ret) { | |
05135f59 DS |
3367 | btrfs_warn(fs_info, |
3368 | "failed to create the UUID tree: %d", ret); | |
6bccf3ab | 3369 | close_ctree(fs_info); |
f7a81ea4 SB |
3370 | return ret; |
3371 | } | |
0b246afa | 3372 | } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || |
4bbcaa64 ES |
3373 | fs_info->generation != |
3374 | btrfs_super_uuid_tree_generation(disk_super)) { | |
05135f59 | 3375 | btrfs_info(fs_info, "checking UUID tree"); |
70f80175 SB |
3376 | ret = btrfs_check_uuid_tree(fs_info); |
3377 | if (ret) { | |
05135f59 DS |
3378 | btrfs_warn(fs_info, |
3379 | "failed to check the UUID tree: %d", ret); | |
6bccf3ab | 3380 | close_ctree(fs_info); |
70f80175 SB |
3381 | return ret; |
3382 | } | |
f7a81ea4 | 3383 | } |
afcdd129 | 3384 | set_bit(BTRFS_FS_OPEN, &fs_info->flags); |
47ab2a6c | 3385 | |
8dcddfa0 QW |
3386 | /* |
3387 | * backuproot only affect mount behavior, and if open_ctree succeeded, | |
3388 | * no need to keep the flag | |
3389 | */ | |
3390 | btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); | |
3391 | ||
ad2b2c80 | 3392 | return 0; |
39279cc3 | 3393 | |
bcef60f2 AJ |
3394 | fail_qgroup: |
3395 | btrfs_free_qgroup_config(fs_info); | |
7c2ca468 CM |
3396 | fail_trans_kthread: |
3397 | kthread_stop(fs_info->transaction_kthread); | |
2ff7e61e | 3398 | btrfs_cleanup_transaction(fs_info); |
faa2dbf0 | 3399 | btrfs_free_fs_roots(fs_info); |
3f157a2f | 3400 | fail_cleaner: |
a74a4b97 | 3401 | kthread_stop(fs_info->cleaner_kthread); |
7c2ca468 CM |
3402 | |
3403 | /* | |
3404 | * make sure we're done with the btree inode before we stop our | |
3405 | * kthreads | |
3406 | */ | |
3407 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); | |
7c2ca468 | 3408 | |
2365dd3c | 3409 | fail_sysfs: |
6618a59b | 3410 | btrfs_sysfs_remove_mounted(fs_info); |
2365dd3c | 3411 | |
b7c35e81 AJ |
3412 | fail_fsdev_sysfs: |
3413 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); | |
3414 | ||
1b1d1f66 | 3415 | fail_block_groups: |
54067ae9 | 3416 | btrfs_put_block_group_cache(fs_info); |
af31f5e5 CM |
3417 | |
3418 | fail_tree_roots: | |
9e3aa805 JB |
3419 | if (fs_info->data_reloc_root) |
3420 | btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root); | |
4273eaff | 3421 | free_root_pointers(fs_info, true); |
2b8195bb | 3422 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
af31f5e5 | 3423 | |
39279cc3 | 3424 | fail_sb_buffer: |
7abadb64 | 3425 | btrfs_stop_all_workers(fs_info); |
5cdd7db6 | 3426 | btrfs_free_block_groups(fs_info); |
16cdcec7 | 3427 | fail_alloc: |
586e46e2 ID |
3428 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
3429 | ||
4543df7e | 3430 | iput(fs_info->btree_inode); |
7e662854 | 3431 | fail: |
586e46e2 | 3432 | btrfs_close_devices(fs_info->fs_devices); |
ad2b2c80 | 3433 | return err; |
eb60ceac | 3434 | } |
663faf9f | 3435 | ALLOW_ERROR_INJECTION(open_ctree, ERRNO); |
eb60ceac | 3436 | |
314b6dd0 | 3437 | static void btrfs_end_super_write(struct bio *bio) |
f2984462 | 3438 | { |
314b6dd0 JT |
3439 | struct btrfs_device *device = bio->bi_private; |
3440 | struct bio_vec *bvec; | |
3441 | struct bvec_iter_all iter_all; | |
3442 | struct page *page; | |
3443 | ||
3444 | bio_for_each_segment_all(bvec, bio, iter_all) { | |
3445 | page = bvec->bv_page; | |
3446 | ||
3447 | if (bio->bi_status) { | |
3448 | btrfs_warn_rl_in_rcu(device->fs_info, | |
3449 | "lost page write due to IO error on %s (%d)", | |
3450 | rcu_str_deref(device->name), | |
3451 | blk_status_to_errno(bio->bi_status)); | |
3452 | ClearPageUptodate(page); | |
3453 | SetPageError(page); | |
3454 | btrfs_dev_stat_inc_and_print(device, | |
3455 | BTRFS_DEV_STAT_WRITE_ERRS); | |
3456 | } else { | |
3457 | SetPageUptodate(page); | |
3458 | } | |
3459 | ||
3460 | put_page(page); | |
3461 | unlock_page(page); | |
f2984462 | 3462 | } |
314b6dd0 JT |
3463 | |
3464 | bio_put(bio); | |
f2984462 CM |
3465 | } |
3466 | ||
8f32380d JT |
3467 | struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, |
3468 | int copy_num) | |
29c36d72 | 3469 | { |
29c36d72 | 3470 | struct btrfs_super_block *super; |
8f32380d | 3471 | struct page *page; |
29c36d72 | 3472 | u64 bytenr; |
8f32380d | 3473 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
29c36d72 AJ |
3474 | |
3475 | bytenr = btrfs_sb_offset(copy_num); | |
3476 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) | |
8f32380d | 3477 | return ERR_PTR(-EINVAL); |
29c36d72 | 3478 | |
8f32380d JT |
3479 | page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); |
3480 | if (IS_ERR(page)) | |
3481 | return ERR_CAST(page); | |
29c36d72 | 3482 | |
8f32380d | 3483 | super = page_address(page); |
96c2e067 AJ |
3484 | if (btrfs_super_magic(super) != BTRFS_MAGIC) { |
3485 | btrfs_release_disk_super(super); | |
3486 | return ERR_PTR(-ENODATA); | |
3487 | } | |
3488 | ||
3489 | if (btrfs_super_bytenr(super) != bytenr) { | |
8f32380d JT |
3490 | btrfs_release_disk_super(super); |
3491 | return ERR_PTR(-EINVAL); | |
29c36d72 AJ |
3492 | } |
3493 | ||
8f32380d | 3494 | return super; |
29c36d72 AJ |
3495 | } |
3496 | ||
3497 | ||
8f32380d | 3498 | struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) |
a512bbf8 | 3499 | { |
8f32380d | 3500 | struct btrfs_super_block *super, *latest = NULL; |
a512bbf8 YZ |
3501 | int i; |
3502 | u64 transid = 0; | |
a512bbf8 YZ |
3503 | |
3504 | /* we would like to check all the supers, but that would make | |
3505 | * a btrfs mount succeed after a mkfs from a different FS. | |
3506 | * So, we need to add a special mount option to scan for | |
3507 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead | |
3508 | */ | |
3509 | for (i = 0; i < 1; i++) { | |
8f32380d JT |
3510 | super = btrfs_read_dev_one_super(bdev, i); |
3511 | if (IS_ERR(super)) | |
a512bbf8 YZ |
3512 | continue; |
3513 | ||
a512bbf8 | 3514 | if (!latest || btrfs_super_generation(super) > transid) { |
8f32380d JT |
3515 | if (latest) |
3516 | btrfs_release_disk_super(super); | |
3517 | ||
3518 | latest = super; | |
a512bbf8 | 3519 | transid = btrfs_super_generation(super); |
a512bbf8 YZ |
3520 | } |
3521 | } | |
92fc03fb | 3522 | |
8f32380d | 3523 | return super; |
a512bbf8 YZ |
3524 | } |
3525 | ||
4eedeb75 | 3526 | /* |
abbb3b8e | 3527 | * Write superblock @sb to the @device. Do not wait for completion, all the |
314b6dd0 | 3528 | * pages we use for writing are locked. |
4eedeb75 | 3529 | * |
abbb3b8e DS |
3530 | * Write @max_mirrors copies of the superblock, where 0 means default that fit |
3531 | * the expected device size at commit time. Note that max_mirrors must be | |
3532 | * same for write and wait phases. | |
4eedeb75 | 3533 | * |
314b6dd0 | 3534 | * Return number of errors when page is not found or submission fails. |
4eedeb75 | 3535 | */ |
a512bbf8 | 3536 | static int write_dev_supers(struct btrfs_device *device, |
abbb3b8e | 3537 | struct btrfs_super_block *sb, int max_mirrors) |
a512bbf8 | 3538 | { |
d5178578 | 3539 | struct btrfs_fs_info *fs_info = device->fs_info; |
314b6dd0 | 3540 | struct address_space *mapping = device->bdev->bd_inode->i_mapping; |
d5178578 | 3541 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
a512bbf8 | 3542 | int i; |
a512bbf8 | 3543 | int errors = 0; |
a512bbf8 | 3544 | u64 bytenr; |
a512bbf8 YZ |
3545 | |
3546 | if (max_mirrors == 0) | |
3547 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | |
3548 | ||
d5178578 JT |
3549 | shash->tfm = fs_info->csum_shash; |
3550 | ||
a512bbf8 | 3551 | for (i = 0; i < max_mirrors; i++) { |
314b6dd0 JT |
3552 | struct page *page; |
3553 | struct bio *bio; | |
3554 | struct btrfs_super_block *disk_super; | |
3555 | ||
a512bbf8 | 3556 | bytenr = btrfs_sb_offset(i); |
935e5cc9 MX |
3557 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
3558 | device->commit_total_bytes) | |
a512bbf8 YZ |
3559 | break; |
3560 | ||
abbb3b8e | 3561 | btrfs_set_super_bytenr(sb, bytenr); |
4eedeb75 | 3562 | |
fd08001f EB |
3563 | crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE, |
3564 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, | |
3565 | sb->csum); | |
4eedeb75 | 3566 | |
314b6dd0 JT |
3567 | page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT, |
3568 | GFP_NOFS); | |
3569 | if (!page) { | |
abbb3b8e | 3570 | btrfs_err(device->fs_info, |
314b6dd0 | 3571 | "couldn't get super block page for bytenr %llu", |
abbb3b8e DS |
3572 | bytenr); |
3573 | errors++; | |
4eedeb75 | 3574 | continue; |
abbb3b8e | 3575 | } |
634554dc | 3576 | |
314b6dd0 JT |
3577 | /* Bump the refcount for wait_dev_supers() */ |
3578 | get_page(page); | |
a512bbf8 | 3579 | |
314b6dd0 JT |
3580 | disk_super = page_address(page); |
3581 | memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE); | |
4eedeb75 | 3582 | |
314b6dd0 JT |
3583 | /* |
3584 | * Directly use bios here instead of relying on the page cache | |
3585 | * to do I/O, so we don't lose the ability to do integrity | |
3586 | * checking. | |
3587 | */ | |
3588 | bio = bio_alloc(GFP_NOFS, 1); | |
3589 | bio_set_dev(bio, device->bdev); | |
3590 | bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; | |
3591 | bio->bi_private = device; | |
3592 | bio->bi_end_io = btrfs_end_super_write; | |
3593 | __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE, | |
3594 | offset_in_page(bytenr)); | |
a512bbf8 | 3595 | |
387125fc | 3596 | /* |
314b6dd0 JT |
3597 | * We FUA only the first super block. The others we allow to |
3598 | * go down lazy and there's a short window where the on-disk | |
3599 | * copies might still contain the older version. | |
387125fc | 3600 | */ |
314b6dd0 | 3601 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO; |
1b9e619c | 3602 | if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) |
314b6dd0 JT |
3603 | bio->bi_opf |= REQ_FUA; |
3604 | ||
3605 | btrfsic_submit_bio(bio); | |
a512bbf8 YZ |
3606 | } |
3607 | return errors < i ? 0 : -1; | |
3608 | } | |
3609 | ||
abbb3b8e DS |
3610 | /* |
3611 | * Wait for write completion of superblocks done by write_dev_supers, | |
3612 | * @max_mirrors same for write and wait phases. | |
3613 | * | |
314b6dd0 | 3614 | * Return number of errors when page is not found or not marked up to |
abbb3b8e DS |
3615 | * date. |
3616 | */ | |
3617 | static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) | |
3618 | { | |
abbb3b8e DS |
3619 | int i; |
3620 | int errors = 0; | |
b6a535fa | 3621 | bool primary_failed = false; |
abbb3b8e DS |
3622 | u64 bytenr; |
3623 | ||
3624 | if (max_mirrors == 0) | |
3625 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | |
3626 | ||
3627 | for (i = 0; i < max_mirrors; i++) { | |
314b6dd0 JT |
3628 | struct page *page; |
3629 | ||
abbb3b8e DS |
3630 | bytenr = btrfs_sb_offset(i); |
3631 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | |
3632 | device->commit_total_bytes) | |
3633 | break; | |
3634 | ||
314b6dd0 JT |
3635 | page = find_get_page(device->bdev->bd_inode->i_mapping, |
3636 | bytenr >> PAGE_SHIFT); | |
3637 | if (!page) { | |
abbb3b8e | 3638 | errors++; |
b6a535fa HM |
3639 | if (i == 0) |
3640 | primary_failed = true; | |
abbb3b8e DS |
3641 | continue; |
3642 | } | |
314b6dd0 JT |
3643 | /* Page is submitted locked and unlocked once the IO completes */ |
3644 | wait_on_page_locked(page); | |
3645 | if (PageError(page)) { | |
abbb3b8e | 3646 | errors++; |
b6a535fa HM |
3647 | if (i == 0) |
3648 | primary_failed = true; | |
3649 | } | |
abbb3b8e | 3650 | |
314b6dd0 JT |
3651 | /* Drop our reference */ |
3652 | put_page(page); | |
abbb3b8e | 3653 | |
314b6dd0 JT |
3654 | /* Drop the reference from the writing run */ |
3655 | put_page(page); | |
abbb3b8e DS |
3656 | } |
3657 | ||
b6a535fa HM |
3658 | /* log error, force error return */ |
3659 | if (primary_failed) { | |
3660 | btrfs_err(device->fs_info, "error writing primary super block to device %llu", | |
3661 | device->devid); | |
3662 | return -1; | |
3663 | } | |
3664 | ||
abbb3b8e DS |
3665 | return errors < i ? 0 : -1; |
3666 | } | |
3667 | ||
387125fc CM |
3668 | /* |
3669 | * endio for the write_dev_flush, this will wake anyone waiting | |
3670 | * for the barrier when it is done | |
3671 | */ | |
4246a0b6 | 3672 | static void btrfs_end_empty_barrier(struct bio *bio) |
387125fc | 3673 | { |
e0ae9994 | 3674 | complete(bio->bi_private); |
387125fc CM |
3675 | } |
3676 | ||
3677 | /* | |
4fc6441a AJ |
3678 | * Submit a flush request to the device if it supports it. Error handling is |
3679 | * done in the waiting counterpart. | |
387125fc | 3680 | */ |
4fc6441a | 3681 | static void write_dev_flush(struct btrfs_device *device) |
387125fc | 3682 | { |
c2a9c7ab | 3683 | struct request_queue *q = bdev_get_queue(device->bdev); |
e0ae9994 | 3684 | struct bio *bio = device->flush_bio; |
387125fc | 3685 | |
c2a9c7ab | 3686 | if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
4fc6441a | 3687 | return; |
387125fc | 3688 | |
e0ae9994 | 3689 | bio_reset(bio); |
387125fc | 3690 | bio->bi_end_io = btrfs_end_empty_barrier; |
74d46992 | 3691 | bio_set_dev(bio, device->bdev); |
8d910125 | 3692 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; |
387125fc CM |
3693 | init_completion(&device->flush_wait); |
3694 | bio->bi_private = &device->flush_wait; | |
387125fc | 3695 | |
43a01111 | 3696 | btrfsic_submit_bio(bio); |
1c3063b6 | 3697 | set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); |
4fc6441a | 3698 | } |
387125fc | 3699 | |
4fc6441a AJ |
3700 | /* |
3701 | * If the flush bio has been submitted by write_dev_flush, wait for it. | |
3702 | */ | |
8c27cb35 | 3703 | static blk_status_t wait_dev_flush(struct btrfs_device *device) |
4fc6441a | 3704 | { |
4fc6441a | 3705 | struct bio *bio = device->flush_bio; |
387125fc | 3706 | |
1c3063b6 | 3707 | if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)) |
58efbc9f | 3708 | return BLK_STS_OK; |
387125fc | 3709 | |
1c3063b6 | 3710 | clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); |
2980d574 | 3711 | wait_for_completion_io(&device->flush_wait); |
387125fc | 3712 | |
8c27cb35 | 3713 | return bio->bi_status; |
387125fc | 3714 | } |
387125fc | 3715 | |
d10b82fe | 3716 | static int check_barrier_error(struct btrfs_fs_info *fs_info) |
401b41e5 | 3717 | { |
6528b99d | 3718 | if (!btrfs_check_rw_degradable(fs_info, NULL)) |
401b41e5 | 3719 | return -EIO; |
387125fc CM |
3720 | return 0; |
3721 | } | |
3722 | ||
3723 | /* | |
3724 | * send an empty flush down to each device in parallel, | |
3725 | * then wait for them | |
3726 | */ | |
3727 | static int barrier_all_devices(struct btrfs_fs_info *info) | |
3728 | { | |
3729 | struct list_head *head; | |
3730 | struct btrfs_device *dev; | |
5af3e8cc | 3731 | int errors_wait = 0; |
4e4cbee9 | 3732 | blk_status_t ret; |
387125fc | 3733 | |
1538e6c5 | 3734 | lockdep_assert_held(&info->fs_devices->device_list_mutex); |
387125fc CM |
3735 | /* send down all the barriers */ |
3736 | head = &info->fs_devices->devices; | |
1538e6c5 | 3737 | list_for_each_entry(dev, head, dev_list) { |
e6e674bd | 3738 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
f88ba6a2 | 3739 | continue; |
cea7c8bf | 3740 | if (!dev->bdev) |
387125fc | 3741 | continue; |
e12c9621 | 3742 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3743 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
387125fc CM |
3744 | continue; |
3745 | ||
4fc6441a | 3746 | write_dev_flush(dev); |
58efbc9f | 3747 | dev->last_flush_error = BLK_STS_OK; |
387125fc CM |
3748 | } |
3749 | ||
3750 | /* wait for all the barriers */ | |
1538e6c5 | 3751 | list_for_each_entry(dev, head, dev_list) { |
e6e674bd | 3752 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
f88ba6a2 | 3753 | continue; |
387125fc | 3754 | if (!dev->bdev) { |
5af3e8cc | 3755 | errors_wait++; |
387125fc CM |
3756 | continue; |
3757 | } | |
e12c9621 | 3758 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3759 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
387125fc CM |
3760 | continue; |
3761 | ||
4fc6441a | 3762 | ret = wait_dev_flush(dev); |
401b41e5 AJ |
3763 | if (ret) { |
3764 | dev->last_flush_error = ret; | |
66b4993e DS |
3765 | btrfs_dev_stat_inc_and_print(dev, |
3766 | BTRFS_DEV_STAT_FLUSH_ERRS); | |
5af3e8cc | 3767 | errors_wait++; |
401b41e5 AJ |
3768 | } |
3769 | } | |
3770 | ||
cea7c8bf | 3771 | if (errors_wait) { |
401b41e5 AJ |
3772 | /* |
3773 | * At some point we need the status of all disks | |
3774 | * to arrive at the volume status. So error checking | |
3775 | * is being pushed to a separate loop. | |
3776 | */ | |
d10b82fe | 3777 | return check_barrier_error(info); |
387125fc | 3778 | } |
387125fc CM |
3779 | return 0; |
3780 | } | |
3781 | ||
943c6e99 ZL |
3782 | int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) |
3783 | { | |
8789f4fe ZL |
3784 | int raid_type; |
3785 | int min_tolerated = INT_MAX; | |
943c6e99 | 3786 | |
8789f4fe ZL |
3787 | if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || |
3788 | (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) | |
8c3e3582 | 3789 | min_tolerated = min_t(int, min_tolerated, |
8789f4fe ZL |
3790 | btrfs_raid_array[BTRFS_RAID_SINGLE]. |
3791 | tolerated_failures); | |
943c6e99 | 3792 | |
8789f4fe ZL |
3793 | for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { |
3794 | if (raid_type == BTRFS_RAID_SINGLE) | |
3795 | continue; | |
41a6e891 | 3796 | if (!(flags & btrfs_raid_array[raid_type].bg_flag)) |
8789f4fe | 3797 | continue; |
8c3e3582 | 3798 | min_tolerated = min_t(int, min_tolerated, |
8789f4fe ZL |
3799 | btrfs_raid_array[raid_type]. |
3800 | tolerated_failures); | |
3801 | } | |
943c6e99 | 3802 | |
8789f4fe | 3803 | if (min_tolerated == INT_MAX) { |
ab8d0fc4 | 3804 | pr_warn("BTRFS: unknown raid flag: %llu", flags); |
8789f4fe ZL |
3805 | min_tolerated = 0; |
3806 | } | |
3807 | ||
3808 | return min_tolerated; | |
943c6e99 ZL |
3809 | } |
3810 | ||
eece6a9c | 3811 | int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) |
f2984462 | 3812 | { |
e5e9a520 | 3813 | struct list_head *head; |
f2984462 | 3814 | struct btrfs_device *dev; |
a061fc8d | 3815 | struct btrfs_super_block *sb; |
f2984462 | 3816 | struct btrfs_dev_item *dev_item; |
f2984462 CM |
3817 | int ret; |
3818 | int do_barriers; | |
a236aed1 CM |
3819 | int max_errors; |
3820 | int total_errors = 0; | |
a061fc8d | 3821 | u64 flags; |
f2984462 | 3822 | |
0b246afa | 3823 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); |
fed3b381 LB |
3824 | |
3825 | /* | |
3826 | * max_mirrors == 0 indicates we're from commit_transaction, | |
3827 | * not from fsync where the tree roots in fs_info have not | |
3828 | * been consistent on disk. | |
3829 | */ | |
3830 | if (max_mirrors == 0) | |
3831 | backup_super_roots(fs_info); | |
f2984462 | 3832 | |
0b246afa | 3833 | sb = fs_info->super_for_commit; |
a061fc8d | 3834 | dev_item = &sb->dev_item; |
e5e9a520 | 3835 | |
0b246afa JM |
3836 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
3837 | head = &fs_info->fs_devices->devices; | |
3838 | max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; | |
387125fc | 3839 | |
5af3e8cc | 3840 | if (do_barriers) { |
0b246afa | 3841 | ret = barrier_all_devices(fs_info); |
5af3e8cc SB |
3842 | if (ret) { |
3843 | mutex_unlock( | |
0b246afa JM |
3844 | &fs_info->fs_devices->device_list_mutex); |
3845 | btrfs_handle_fs_error(fs_info, ret, | |
3846 | "errors while submitting device barriers."); | |
5af3e8cc SB |
3847 | return ret; |
3848 | } | |
3849 | } | |
387125fc | 3850 | |
1538e6c5 | 3851 | list_for_each_entry(dev, head, dev_list) { |
dfe25020 CM |
3852 | if (!dev->bdev) { |
3853 | total_errors++; | |
3854 | continue; | |
3855 | } | |
e12c9621 | 3856 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3857 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
dfe25020 CM |
3858 | continue; |
3859 | ||
2b82032c | 3860 | btrfs_set_stack_device_generation(dev_item, 0); |
a061fc8d CM |
3861 | btrfs_set_stack_device_type(dev_item, dev->type); |
3862 | btrfs_set_stack_device_id(dev_item, dev->devid); | |
7df69d3e | 3863 | btrfs_set_stack_device_total_bytes(dev_item, |
935e5cc9 | 3864 | dev->commit_total_bytes); |
ce7213c7 MX |
3865 | btrfs_set_stack_device_bytes_used(dev_item, |
3866 | dev->commit_bytes_used); | |
a061fc8d CM |
3867 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); |
3868 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); | |
3869 | btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); | |
3870 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); | |
7239ff4b NB |
3871 | memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, |
3872 | BTRFS_FSID_SIZE); | |
a512bbf8 | 3873 | |
a061fc8d CM |
3874 | flags = btrfs_super_flags(sb); |
3875 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); | |
3876 | ||
75cb857d QW |
3877 | ret = btrfs_validate_write_super(fs_info, sb); |
3878 | if (ret < 0) { | |
3879 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
3880 | btrfs_handle_fs_error(fs_info, -EUCLEAN, | |
3881 | "unexpected superblock corruption detected"); | |
3882 | return -EUCLEAN; | |
3883 | } | |
3884 | ||
abbb3b8e | 3885 | ret = write_dev_supers(dev, sb, max_mirrors); |
a236aed1 CM |
3886 | if (ret) |
3887 | total_errors++; | |
f2984462 | 3888 | } |
a236aed1 | 3889 | if (total_errors > max_errors) { |
0b246afa JM |
3890 | btrfs_err(fs_info, "%d errors while writing supers", |
3891 | total_errors); | |
3892 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
79787eaa | 3893 | |
9d565ba4 | 3894 | /* FUA is masked off if unsupported and can't be the reason */ |
0b246afa JM |
3895 | btrfs_handle_fs_error(fs_info, -EIO, |
3896 | "%d errors while writing supers", | |
3897 | total_errors); | |
9d565ba4 | 3898 | return -EIO; |
a236aed1 | 3899 | } |
f2984462 | 3900 | |
a512bbf8 | 3901 | total_errors = 0; |
1538e6c5 | 3902 | list_for_each_entry(dev, head, dev_list) { |
dfe25020 CM |
3903 | if (!dev->bdev) |
3904 | continue; | |
e12c9621 | 3905 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3906 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
dfe25020 CM |
3907 | continue; |
3908 | ||
abbb3b8e | 3909 | ret = wait_dev_supers(dev, max_mirrors); |
a512bbf8 YZ |
3910 | if (ret) |
3911 | total_errors++; | |
f2984462 | 3912 | } |
0b246afa | 3913 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
a236aed1 | 3914 | if (total_errors > max_errors) { |
0b246afa JM |
3915 | btrfs_handle_fs_error(fs_info, -EIO, |
3916 | "%d errors while writing supers", | |
3917 | total_errors); | |
79787eaa | 3918 | return -EIO; |
a236aed1 | 3919 | } |
f2984462 CM |
3920 | return 0; |
3921 | } | |
3922 | ||
cb517eab MX |
3923 | /* Drop a fs root from the radix tree and free it. */ |
3924 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | |
3925 | struct btrfs_root *root) | |
2619ba1f | 3926 | { |
4785e24f JB |
3927 | bool drop_ref = false; |
3928 | ||
4df27c4d | 3929 | spin_lock(&fs_info->fs_roots_radix_lock); |
2619ba1f CM |
3930 | radix_tree_delete(&fs_info->fs_roots_radix, |
3931 | (unsigned long)root->root_key.objectid); | |
af01d2e5 | 3932 | if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state)) |
4785e24f | 3933 | drop_ref = true; |
4df27c4d | 3934 | spin_unlock(&fs_info->fs_roots_radix_lock); |
76dda93c | 3935 | |
1c1ea4f7 | 3936 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
ef67963d | 3937 | ASSERT(root->log_root == NULL); |
1c1ea4f7 | 3938 | if (root->reloc_root) { |
00246528 | 3939 | btrfs_put_root(root->reloc_root); |
1c1ea4f7 LB |
3940 | root->reloc_root = NULL; |
3941 | } | |
3942 | } | |
3321719e | 3943 | |
faa2dbf0 JB |
3944 | if (root->free_ino_pinned) |
3945 | __btrfs_remove_free_space_cache(root->free_ino_pinned); | |
3946 | if (root->free_ino_ctl) | |
3947 | __btrfs_remove_free_space_cache(root->free_ino_ctl); | |
0e996e7f JB |
3948 | if (root->ino_cache_inode) { |
3949 | iput(root->ino_cache_inode); | |
3950 | root->ino_cache_inode = NULL; | |
3951 | } | |
4785e24f JB |
3952 | if (drop_ref) |
3953 | btrfs_put_root(root); | |
2619ba1f CM |
3954 | } |
3955 | ||
c146afad | 3956 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) |
cfaa7295 | 3957 | { |
c146afad YZ |
3958 | u64 root_objectid = 0; |
3959 | struct btrfs_root *gang[8]; | |
65d33fd7 QW |
3960 | int i = 0; |
3961 | int err = 0; | |
3962 | unsigned int ret = 0; | |
e089f05c | 3963 | |
c146afad | 3964 | while (1) { |
efc34534 | 3965 | spin_lock(&fs_info->fs_roots_radix_lock); |
c146afad YZ |
3966 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
3967 | (void **)gang, root_objectid, | |
3968 | ARRAY_SIZE(gang)); | |
65d33fd7 | 3969 | if (!ret) { |
efc34534 | 3970 | spin_unlock(&fs_info->fs_roots_radix_lock); |
c146afad | 3971 | break; |
65d33fd7 | 3972 | } |
5d4f98a2 | 3973 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
65d33fd7 | 3974 | |
c146afad | 3975 | for (i = 0; i < ret; i++) { |
65d33fd7 QW |
3976 | /* Avoid to grab roots in dead_roots */ |
3977 | if (btrfs_root_refs(&gang[i]->root_item) == 0) { | |
3978 | gang[i] = NULL; | |
3979 | continue; | |
3980 | } | |
3981 | /* grab all the search result for later use */ | |
00246528 | 3982 | gang[i] = btrfs_grab_root(gang[i]); |
65d33fd7 | 3983 | } |
efc34534 | 3984 | spin_unlock(&fs_info->fs_roots_radix_lock); |
66b4ffd1 | 3985 | |
65d33fd7 QW |
3986 | for (i = 0; i < ret; i++) { |
3987 | if (!gang[i]) | |
3988 | continue; | |
c146afad | 3989 | root_objectid = gang[i]->root_key.objectid; |
66b4ffd1 JB |
3990 | err = btrfs_orphan_cleanup(gang[i]); |
3991 | if (err) | |
65d33fd7 | 3992 | break; |
00246528 | 3993 | btrfs_put_root(gang[i]); |
c146afad YZ |
3994 | } |
3995 | root_objectid++; | |
3996 | } | |
65d33fd7 QW |
3997 | |
3998 | /* release the uncleaned roots due to error */ | |
3999 | for (; i < ret; i++) { | |
4000 | if (gang[i]) | |
00246528 | 4001 | btrfs_put_root(gang[i]); |
65d33fd7 QW |
4002 | } |
4003 | return err; | |
c146afad | 4004 | } |
a2135011 | 4005 | |
6bccf3ab | 4006 | int btrfs_commit_super(struct btrfs_fs_info *fs_info) |
c146afad | 4007 | { |
6bccf3ab | 4008 | struct btrfs_root *root = fs_info->tree_root; |
c146afad | 4009 | struct btrfs_trans_handle *trans; |
a74a4b97 | 4010 | |
0b246afa | 4011 | mutex_lock(&fs_info->cleaner_mutex); |
2ff7e61e | 4012 | btrfs_run_delayed_iputs(fs_info); |
0b246afa JM |
4013 | mutex_unlock(&fs_info->cleaner_mutex); |
4014 | wake_up_process(fs_info->cleaner_kthread); | |
c71bf099 YZ |
4015 | |
4016 | /* wait until ongoing cleanup work done */ | |
0b246afa JM |
4017 | down_write(&fs_info->cleanup_work_sem); |
4018 | up_write(&fs_info->cleanup_work_sem); | |
c71bf099 | 4019 | |
7a7eaa40 | 4020 | trans = btrfs_join_transaction(root); |
3612b495 TI |
4021 | if (IS_ERR(trans)) |
4022 | return PTR_ERR(trans); | |
3a45bb20 | 4023 | return btrfs_commit_transaction(trans); |
c146afad YZ |
4024 | } |
4025 | ||
b105e927 | 4026 | void __cold close_ctree(struct btrfs_fs_info *fs_info) |
c146afad | 4027 | { |
c146afad YZ |
4028 | int ret; |
4029 | ||
afcdd129 | 4030 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); |
d6fd0ae2 OS |
4031 | /* |
4032 | * We don't want the cleaner to start new transactions, add more delayed | |
4033 | * iputs, etc. while we're closing. We can't use kthread_stop() yet | |
4034 | * because that frees the task_struct, and the transaction kthread might | |
4035 | * still try to wake up the cleaner. | |
4036 | */ | |
4037 | kthread_park(fs_info->cleaner_kthread); | |
c146afad | 4038 | |
7343dd61 | 4039 | /* wait for the qgroup rescan worker to stop */ |
d06f23d6 | 4040 | btrfs_qgroup_wait_for_completion(fs_info, false); |
7343dd61 | 4041 | |
803b2f54 SB |
4042 | /* wait for the uuid_scan task to finish */ |
4043 | down(&fs_info->uuid_tree_rescan_sem); | |
4044 | /* avoid complains from lockdep et al., set sem back to initial state */ | |
4045 | up(&fs_info->uuid_tree_rescan_sem); | |
4046 | ||
837d5b6e | 4047 | /* pause restriper - we want to resume on mount */ |
aa1b8cd4 | 4048 | btrfs_pause_balance(fs_info); |
837d5b6e | 4049 | |
8dabb742 SB |
4050 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
4051 | ||
aa1b8cd4 | 4052 | btrfs_scrub_cancel(fs_info); |
4cb5300b CM |
4053 | |
4054 | /* wait for any defraggers to finish */ | |
4055 | wait_event(fs_info->transaction_wait, | |
4056 | (atomic_read(&fs_info->defrag_running) == 0)); | |
4057 | ||
4058 | /* clear out the rbtree of defraggable inodes */ | |
26176e7c | 4059 | btrfs_cleanup_defrag_inodes(fs_info); |
4cb5300b | 4060 | |
21c7e756 | 4061 | cancel_work_sync(&fs_info->async_reclaim_work); |
57056740 | 4062 | cancel_work_sync(&fs_info->async_data_reclaim_work); |
21c7e756 | 4063 | |
b0643e59 DZ |
4064 | /* Cancel or finish ongoing discard work */ |
4065 | btrfs_discard_cleanup(fs_info); | |
4066 | ||
bc98a42c | 4067 | if (!sb_rdonly(fs_info->sb)) { |
e44163e1 | 4068 | /* |
d6fd0ae2 OS |
4069 | * The cleaner kthread is stopped, so do one final pass over |
4070 | * unused block groups. | |
e44163e1 | 4071 | */ |
0b246afa | 4072 | btrfs_delete_unused_bgs(fs_info); |
e44163e1 | 4073 | |
f0cc2cd7 FM |
4074 | /* |
4075 | * There might be existing delayed inode workers still running | |
4076 | * and holding an empty delayed inode item. We must wait for | |
4077 | * them to complete first because they can create a transaction. | |
4078 | * This happens when someone calls btrfs_balance_delayed_items() | |
4079 | * and then a transaction commit runs the same delayed nodes | |
4080 | * before any delayed worker has done something with the nodes. | |
4081 | * We must wait for any worker here and not at transaction | |
4082 | * commit time since that could cause a deadlock. | |
4083 | * This is a very rare case. | |
4084 | */ | |
4085 | btrfs_flush_workqueue(fs_info->delayed_workers); | |
4086 | ||
6bccf3ab | 4087 | ret = btrfs_commit_super(fs_info); |
acce952b | 4088 | if (ret) |
04892340 | 4089 | btrfs_err(fs_info, "commit super ret %d", ret); |
acce952b | 4090 | } |
4091 | ||
af722733 LB |
4092 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) || |
4093 | test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) | |
2ff7e61e | 4094 | btrfs_error_commit_super(fs_info); |
0f7d52f4 | 4095 | |
e3029d9f AV |
4096 | kthread_stop(fs_info->transaction_kthread); |
4097 | kthread_stop(fs_info->cleaner_kthread); | |
8929ecfa | 4098 | |
e187831e | 4099 | ASSERT(list_empty(&fs_info->delayed_iputs)); |
afcdd129 | 4100 | set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); |
f25784b3 | 4101 | |
5958253c QW |
4102 | if (btrfs_check_quota_leak(fs_info)) { |
4103 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); | |
4104 | btrfs_err(fs_info, "qgroup reserved space leaked"); | |
4105 | } | |
4106 | ||
04892340 | 4107 | btrfs_free_qgroup_config(fs_info); |
fe816d0f | 4108 | ASSERT(list_empty(&fs_info->delalloc_roots)); |
bcef60f2 | 4109 | |
963d678b | 4110 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { |
04892340 | 4111 | btrfs_info(fs_info, "at unmount delalloc count %lld", |
963d678b | 4112 | percpu_counter_sum(&fs_info->delalloc_bytes)); |
b0c68f8b | 4113 | } |
bcc63abb | 4114 | |
4297ff84 JB |
4115 | if (percpu_counter_sum(&fs_info->dio_bytes)) |
4116 | btrfs_info(fs_info, "at unmount dio bytes count %lld", | |
4117 | percpu_counter_sum(&fs_info->dio_bytes)); | |
4118 | ||
6618a59b | 4119 | btrfs_sysfs_remove_mounted(fs_info); |
b7c35e81 | 4120 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); |
5ac1d209 | 4121 | |
1a4319cc LB |
4122 | btrfs_put_block_group_cache(fs_info); |
4123 | ||
de348ee0 WS |
4124 | /* |
4125 | * we must make sure there is not any read request to | |
4126 | * submit after we stopping all workers. | |
4127 | */ | |
4128 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | |
96192499 JB |
4129 | btrfs_stop_all_workers(fs_info); |
4130 | ||
afcdd129 | 4131 | clear_bit(BTRFS_FS_OPEN, &fs_info->flags); |
4273eaff | 4132 | free_root_pointers(fs_info, true); |
8c38938c | 4133 | btrfs_free_fs_roots(fs_info); |
9ad6b7bc | 4134 | |
4e19443d JB |
4135 | /* |
4136 | * We must free the block groups after dropping the fs_roots as we could | |
4137 | * have had an IO error and have left over tree log blocks that aren't | |
4138 | * cleaned up until the fs roots are freed. This makes the block group | |
4139 | * accounting appear to be wrong because there's pending reserved bytes, | |
4140 | * so make sure we do the block group cleanup afterwards. | |
4141 | */ | |
4142 | btrfs_free_block_groups(fs_info); | |
4143 | ||
13e6c37b | 4144 | iput(fs_info->btree_inode); |
d6bfde87 | 4145 | |
21adbd5c | 4146 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
0b246afa | 4147 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) |
2ff7e61e | 4148 | btrfsic_unmount(fs_info->fs_devices); |
21adbd5c SB |
4149 | #endif |
4150 | ||
0b86a832 | 4151 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
68c94e55 | 4152 | btrfs_close_devices(fs_info->fs_devices); |
eb60ceac CM |
4153 | } |
4154 | ||
b9fab919 CM |
4155 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, |
4156 | int atomic) | |
5f39d397 | 4157 | { |
1259ab75 | 4158 | int ret; |
727011e0 | 4159 | struct inode *btree_inode = buf->pages[0]->mapping->host; |
1259ab75 | 4160 | |
0b32f4bb | 4161 | ret = extent_buffer_uptodate(buf); |
1259ab75 CM |
4162 | if (!ret) |
4163 | return ret; | |
4164 | ||
4165 | ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, | |
b9fab919 CM |
4166 | parent_transid, atomic); |
4167 | if (ret == -EAGAIN) | |
4168 | return ret; | |
1259ab75 | 4169 | return !ret; |
5f39d397 CM |
4170 | } |
4171 | ||
5f39d397 CM |
4172 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) |
4173 | { | |
0b246afa | 4174 | struct btrfs_fs_info *fs_info; |
06ea65a3 | 4175 | struct btrfs_root *root; |
5f39d397 | 4176 | u64 transid = btrfs_header_generation(buf); |
b9473439 | 4177 | int was_dirty; |
b4ce94de | 4178 | |
06ea65a3 JB |
4179 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
4180 | /* | |
4181 | * This is a fast path so only do this check if we have sanity tests | |
52042d8e | 4182 | * enabled. Normal people shouldn't be using unmapped buffers as dirty |
06ea65a3 JB |
4183 | * outside of the sanity tests. |
4184 | */ | |
b0132a3b | 4185 | if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) |
06ea65a3 JB |
4186 | return; |
4187 | #endif | |
4188 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; | |
0b246afa | 4189 | fs_info = root->fs_info; |
b9447ef8 | 4190 | btrfs_assert_tree_locked(buf); |
0b246afa | 4191 | if (transid != fs_info->generation) |
5d163e0e | 4192 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", |
0b246afa | 4193 | buf->start, transid, fs_info->generation); |
0b32f4bb | 4194 | was_dirty = set_extent_buffer_dirty(buf); |
e2d84521 | 4195 | if (!was_dirty) |
104b4e51 NB |
4196 | percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, |
4197 | buf->len, | |
4198 | fs_info->dirty_metadata_batch); | |
1f21ef0a | 4199 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
69fc6cbb QW |
4200 | /* |
4201 | * Since btrfs_mark_buffer_dirty() can be called with item pointer set | |
4202 | * but item data not updated. | |
4203 | * So here we should only check item pointers, not item data. | |
4204 | */ | |
4205 | if (btrfs_header_level(buf) == 0 && | |
cfdaad5e | 4206 | btrfs_check_leaf_relaxed(buf)) { |
a4f78750 | 4207 | btrfs_print_leaf(buf); |
1f21ef0a FM |
4208 | ASSERT(0); |
4209 | } | |
4210 | #endif | |
eb60ceac CM |
4211 | } |
4212 | ||
2ff7e61e | 4213 | static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, |
b53d3f5d | 4214 | int flush_delayed) |
16cdcec7 MX |
4215 | { |
4216 | /* | |
4217 | * looks as though older kernels can get into trouble with | |
4218 | * this code, they end up stuck in balance_dirty_pages forever | |
4219 | */ | |
e2d84521 | 4220 | int ret; |
16cdcec7 MX |
4221 | |
4222 | if (current->flags & PF_MEMALLOC) | |
4223 | return; | |
4224 | ||
b53d3f5d | 4225 | if (flush_delayed) |
2ff7e61e | 4226 | btrfs_balance_delayed_items(fs_info); |
16cdcec7 | 4227 | |
d814a491 EL |
4228 | ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
4229 | BTRFS_DIRTY_METADATA_THRESH, | |
4230 | fs_info->dirty_metadata_batch); | |
e2d84521 | 4231 | if (ret > 0) { |
0b246afa | 4232 | balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); |
16cdcec7 | 4233 | } |
16cdcec7 MX |
4234 | } |
4235 | ||
2ff7e61e | 4236 | void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) |
35b7e476 | 4237 | { |
2ff7e61e | 4238 | __btrfs_btree_balance_dirty(fs_info, 1); |
b53d3f5d | 4239 | } |
585ad2c3 | 4240 | |
2ff7e61e | 4241 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) |
b53d3f5d | 4242 | { |
2ff7e61e | 4243 | __btrfs_btree_balance_dirty(fs_info, 0); |
35b7e476 | 4244 | } |
6b80053d | 4245 | |
581c1760 QW |
4246 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level, |
4247 | struct btrfs_key *first_key) | |
6b80053d | 4248 | { |
5ab12d1f | 4249 | return btree_read_extent_buffer_pages(buf, parent_transid, |
581c1760 | 4250 | level, first_key); |
6b80053d | 4251 | } |
0da5468f | 4252 | |
2ff7e61e | 4253 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) |
acce952b | 4254 | { |
fe816d0f NB |
4255 | /* cleanup FS via transaction */ |
4256 | btrfs_cleanup_transaction(fs_info); | |
4257 | ||
0b246afa | 4258 | mutex_lock(&fs_info->cleaner_mutex); |
2ff7e61e | 4259 | btrfs_run_delayed_iputs(fs_info); |
0b246afa | 4260 | mutex_unlock(&fs_info->cleaner_mutex); |
acce952b | 4261 | |
0b246afa JM |
4262 | down_write(&fs_info->cleanup_work_sem); |
4263 | up_write(&fs_info->cleanup_work_sem); | |
acce952b | 4264 | } |
4265 | ||
ef67963d JB |
4266 | static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) |
4267 | { | |
4268 | struct btrfs_root *gang[8]; | |
4269 | u64 root_objectid = 0; | |
4270 | int ret; | |
4271 | ||
4272 | spin_lock(&fs_info->fs_roots_radix_lock); | |
4273 | while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, | |
4274 | (void **)gang, root_objectid, | |
4275 | ARRAY_SIZE(gang))) != 0) { | |
4276 | int i; | |
4277 | ||
4278 | for (i = 0; i < ret; i++) | |
4279 | gang[i] = btrfs_grab_root(gang[i]); | |
4280 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
4281 | ||
4282 | for (i = 0; i < ret; i++) { | |
4283 | if (!gang[i]) | |
4284 | continue; | |
4285 | root_objectid = gang[i]->root_key.objectid; | |
4286 | btrfs_free_log(NULL, gang[i]); | |
4287 | btrfs_put_root(gang[i]); | |
4288 | } | |
4289 | root_objectid++; | |
4290 | spin_lock(&fs_info->fs_roots_radix_lock); | |
4291 | } | |
4292 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
4293 | btrfs_free_log_root_tree(NULL, fs_info); | |
4294 | } | |
4295 | ||
143bede5 | 4296 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
acce952b | 4297 | { |
acce952b | 4298 | struct btrfs_ordered_extent *ordered; |
acce952b | 4299 | |
199c2a9c | 4300 | spin_lock(&root->ordered_extent_lock); |
779880ef JB |
4301 | /* |
4302 | * This will just short circuit the ordered completion stuff which will | |
4303 | * make sure the ordered extent gets properly cleaned up. | |
4304 | */ | |
199c2a9c | 4305 | list_for_each_entry(ordered, &root->ordered_extents, |
779880ef JB |
4306 | root_extent_list) |
4307 | set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); | |
199c2a9c MX |
4308 | spin_unlock(&root->ordered_extent_lock); |
4309 | } | |
4310 | ||
4311 | static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |
4312 | { | |
4313 | struct btrfs_root *root; | |
4314 | struct list_head splice; | |
4315 | ||
4316 | INIT_LIST_HEAD(&splice); | |
4317 | ||
4318 | spin_lock(&fs_info->ordered_root_lock); | |
4319 | list_splice_init(&fs_info->ordered_roots, &splice); | |
4320 | while (!list_empty(&splice)) { | |
4321 | root = list_first_entry(&splice, struct btrfs_root, | |
4322 | ordered_root); | |
1de2cfde JB |
4323 | list_move_tail(&root->ordered_root, |
4324 | &fs_info->ordered_roots); | |
199c2a9c | 4325 | |
2a85d9ca | 4326 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
4327 | btrfs_destroy_ordered_extents(root); |
4328 | ||
2a85d9ca LB |
4329 | cond_resched(); |
4330 | spin_lock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
4331 | } |
4332 | spin_unlock(&fs_info->ordered_root_lock); | |
74d5d229 JB |
4333 | |
4334 | /* | |
4335 | * We need this here because if we've been flipped read-only we won't | |
4336 | * get sync() from the umount, so we need to make sure any ordered | |
4337 | * extents that haven't had their dirty pages IO start writeout yet | |
4338 | * actually get run and error out properly. | |
4339 | */ | |
4340 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); | |
acce952b | 4341 | } |
4342 | ||
35a3621b | 4343 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2ff7e61e | 4344 | struct btrfs_fs_info *fs_info) |
acce952b | 4345 | { |
4346 | struct rb_node *node; | |
4347 | struct btrfs_delayed_ref_root *delayed_refs; | |
4348 | struct btrfs_delayed_ref_node *ref; | |
4349 | int ret = 0; | |
4350 | ||
4351 | delayed_refs = &trans->delayed_refs; | |
4352 | ||
4353 | spin_lock(&delayed_refs->lock); | |
d7df2c79 | 4354 | if (atomic_read(&delayed_refs->num_entries) == 0) { |
cfece4db | 4355 | spin_unlock(&delayed_refs->lock); |
b79ce3dd | 4356 | btrfs_debug(fs_info, "delayed_refs has NO entry"); |
acce952b | 4357 | return ret; |
4358 | } | |
4359 | ||
5c9d028b | 4360 | while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { |
d7df2c79 | 4361 | struct btrfs_delayed_ref_head *head; |
0e0adbcf | 4362 | struct rb_node *n; |
e78417d1 | 4363 | bool pin_bytes = false; |
acce952b | 4364 | |
d7df2c79 JB |
4365 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
4366 | href_node); | |
3069bd26 | 4367 | if (btrfs_delayed_ref_lock(delayed_refs, head)) |
d7df2c79 | 4368 | continue; |
3069bd26 | 4369 | |
d7df2c79 | 4370 | spin_lock(&head->lock); |
e3d03965 | 4371 | while ((n = rb_first_cached(&head->ref_tree)) != NULL) { |
0e0adbcf JB |
4372 | ref = rb_entry(n, struct btrfs_delayed_ref_node, |
4373 | ref_node); | |
d7df2c79 | 4374 | ref->in_tree = 0; |
e3d03965 | 4375 | rb_erase_cached(&ref->ref_node, &head->ref_tree); |
0e0adbcf | 4376 | RB_CLEAR_NODE(&ref->ref_node); |
1d57ee94 WX |
4377 | if (!list_empty(&ref->add_list)) |
4378 | list_del(&ref->add_list); | |
d7df2c79 JB |
4379 | atomic_dec(&delayed_refs->num_entries); |
4380 | btrfs_put_delayed_ref(ref); | |
e78417d1 | 4381 | } |
d7df2c79 JB |
4382 | if (head->must_insert_reserved) |
4383 | pin_bytes = true; | |
4384 | btrfs_free_delayed_extent_op(head->extent_op); | |
fa781cea | 4385 | btrfs_delete_ref_head(delayed_refs, head); |
d7df2c79 JB |
4386 | spin_unlock(&head->lock); |
4387 | spin_unlock(&delayed_refs->lock); | |
4388 | mutex_unlock(&head->mutex); | |
acce952b | 4389 | |
f603bb94 NB |
4390 | if (pin_bytes) { |
4391 | struct btrfs_block_group *cache; | |
4392 | ||
4393 | cache = btrfs_lookup_block_group(fs_info, head->bytenr); | |
4394 | BUG_ON(!cache); | |
4395 | ||
4396 | spin_lock(&cache->space_info->lock); | |
4397 | spin_lock(&cache->lock); | |
4398 | cache->pinned += head->num_bytes; | |
4399 | btrfs_space_info_update_bytes_pinned(fs_info, | |
4400 | cache->space_info, head->num_bytes); | |
4401 | cache->reserved -= head->num_bytes; | |
4402 | cache->space_info->bytes_reserved -= head->num_bytes; | |
4403 | spin_unlock(&cache->lock); | |
4404 | spin_unlock(&cache->space_info->lock); | |
4405 | percpu_counter_add_batch( | |
4406 | &cache->space_info->total_bytes_pinned, | |
4407 | head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH); | |
4408 | ||
4409 | btrfs_put_block_group(cache); | |
4410 | ||
4411 | btrfs_error_unpin_extent_range(fs_info, head->bytenr, | |
4412 | head->bytenr + head->num_bytes - 1); | |
4413 | } | |
31890da0 | 4414 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
d278850e | 4415 | btrfs_put_delayed_ref_head(head); |
acce952b | 4416 | cond_resched(); |
4417 | spin_lock(&delayed_refs->lock); | |
4418 | } | |
81f7eb00 | 4419 | btrfs_qgroup_destroy_extent_records(trans); |
acce952b | 4420 | |
4421 | spin_unlock(&delayed_refs->lock); | |
4422 | ||
4423 | return ret; | |
4424 | } | |
4425 | ||
143bede5 | 4426 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) |
acce952b | 4427 | { |
4428 | struct btrfs_inode *btrfs_inode; | |
4429 | struct list_head splice; | |
4430 | ||
4431 | INIT_LIST_HEAD(&splice); | |
4432 | ||
eb73c1b7 MX |
4433 | spin_lock(&root->delalloc_lock); |
4434 | list_splice_init(&root->delalloc_inodes, &splice); | |
acce952b | 4435 | |
4436 | while (!list_empty(&splice)) { | |
fe816d0f | 4437 | struct inode *inode = NULL; |
eb73c1b7 MX |
4438 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, |
4439 | delalloc_inodes); | |
fe816d0f | 4440 | __btrfs_del_delalloc_inode(root, btrfs_inode); |
eb73c1b7 | 4441 | spin_unlock(&root->delalloc_lock); |
acce952b | 4442 | |
fe816d0f NB |
4443 | /* |
4444 | * Make sure we get a live inode and that it'll not disappear | |
4445 | * meanwhile. | |
4446 | */ | |
4447 | inode = igrab(&btrfs_inode->vfs_inode); | |
4448 | if (inode) { | |
4449 | invalidate_inode_pages2(inode->i_mapping); | |
4450 | iput(inode); | |
4451 | } | |
eb73c1b7 | 4452 | spin_lock(&root->delalloc_lock); |
acce952b | 4453 | } |
eb73c1b7 MX |
4454 | spin_unlock(&root->delalloc_lock); |
4455 | } | |
4456 | ||
4457 | static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) | |
4458 | { | |
4459 | struct btrfs_root *root; | |
4460 | struct list_head splice; | |
4461 | ||
4462 | INIT_LIST_HEAD(&splice); | |
4463 | ||
4464 | spin_lock(&fs_info->delalloc_root_lock); | |
4465 | list_splice_init(&fs_info->delalloc_roots, &splice); | |
4466 | while (!list_empty(&splice)) { | |
4467 | root = list_first_entry(&splice, struct btrfs_root, | |
4468 | delalloc_root); | |
00246528 | 4469 | root = btrfs_grab_root(root); |
eb73c1b7 MX |
4470 | BUG_ON(!root); |
4471 | spin_unlock(&fs_info->delalloc_root_lock); | |
4472 | ||
4473 | btrfs_destroy_delalloc_inodes(root); | |
00246528 | 4474 | btrfs_put_root(root); |
eb73c1b7 MX |
4475 | |
4476 | spin_lock(&fs_info->delalloc_root_lock); | |
4477 | } | |
4478 | spin_unlock(&fs_info->delalloc_root_lock); | |
acce952b | 4479 | } |
4480 | ||
2ff7e61e | 4481 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
acce952b | 4482 | struct extent_io_tree *dirty_pages, |
4483 | int mark) | |
4484 | { | |
4485 | int ret; | |
acce952b | 4486 | struct extent_buffer *eb; |
4487 | u64 start = 0; | |
4488 | u64 end; | |
acce952b | 4489 | |
4490 | while (1) { | |
4491 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | |
e6138876 | 4492 | mark, NULL); |
acce952b | 4493 | if (ret) |
4494 | break; | |
4495 | ||
91166212 | 4496 | clear_extent_bits(dirty_pages, start, end, mark); |
acce952b | 4497 | while (start <= end) { |
0b246afa JM |
4498 | eb = find_extent_buffer(fs_info, start); |
4499 | start += fs_info->nodesize; | |
fd8b2b61 | 4500 | if (!eb) |
acce952b | 4501 | continue; |
fd8b2b61 | 4502 | wait_on_extent_buffer_writeback(eb); |
acce952b | 4503 | |
fd8b2b61 JB |
4504 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
4505 | &eb->bflags)) | |
4506 | clear_extent_buffer_dirty(eb); | |
4507 | free_extent_buffer_stale(eb); | |
acce952b | 4508 | } |
4509 | } | |
4510 | ||
4511 | return ret; | |
4512 | } | |
4513 | ||
2ff7e61e | 4514 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
fe119a6e | 4515 | struct extent_io_tree *unpin) |
acce952b | 4516 | { |
acce952b | 4517 | u64 start; |
4518 | u64 end; | |
4519 | int ret; | |
4520 | ||
acce952b | 4521 | while (1) { |
0e6ec385 FM |
4522 | struct extent_state *cached_state = NULL; |
4523 | ||
fcd5e742 LF |
4524 | /* |
4525 | * The btrfs_finish_extent_commit() may get the same range as | |
4526 | * ours between find_first_extent_bit and clear_extent_dirty. | |
4527 | * Hence, hold the unused_bg_unpin_mutex to avoid double unpin | |
4528 | * the same extent range. | |
4529 | */ | |
4530 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | |
acce952b | 4531 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
0e6ec385 | 4532 | EXTENT_DIRTY, &cached_state); |
fcd5e742 LF |
4533 | if (ret) { |
4534 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | |
acce952b | 4535 | break; |
fcd5e742 | 4536 | } |
acce952b | 4537 | |
0e6ec385 FM |
4538 | clear_extent_dirty(unpin, start, end, &cached_state); |
4539 | free_extent_state(cached_state); | |
2ff7e61e | 4540 | btrfs_error_unpin_extent_range(fs_info, start, end); |
fcd5e742 | 4541 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
acce952b | 4542 | cond_resched(); |
4543 | } | |
4544 | ||
4545 | return 0; | |
4546 | } | |
4547 | ||
32da5386 | 4548 | static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) |
c79a1751 LB |
4549 | { |
4550 | struct inode *inode; | |
4551 | ||
4552 | inode = cache->io_ctl.inode; | |
4553 | if (inode) { | |
4554 | invalidate_inode_pages2(inode->i_mapping); | |
4555 | BTRFS_I(inode)->generation = 0; | |
4556 | cache->io_ctl.inode = NULL; | |
4557 | iput(inode); | |
4558 | } | |
bbc37d6e | 4559 | ASSERT(cache->io_ctl.pages == NULL); |
c79a1751 LB |
4560 | btrfs_put_block_group(cache); |
4561 | } | |
4562 | ||
4563 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | |
2ff7e61e | 4564 | struct btrfs_fs_info *fs_info) |
c79a1751 | 4565 | { |
32da5386 | 4566 | struct btrfs_block_group *cache; |
c79a1751 LB |
4567 | |
4568 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4569 | while (!list_empty(&cur_trans->dirty_bgs)) { | |
4570 | cache = list_first_entry(&cur_trans->dirty_bgs, | |
32da5386 | 4571 | struct btrfs_block_group, |
c79a1751 | 4572 | dirty_list); |
c79a1751 LB |
4573 | |
4574 | if (!list_empty(&cache->io_list)) { | |
4575 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4576 | list_del_init(&cache->io_list); | |
4577 | btrfs_cleanup_bg_io(cache); | |
4578 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4579 | } | |
4580 | ||
4581 | list_del_init(&cache->dirty_list); | |
4582 | spin_lock(&cache->lock); | |
4583 | cache->disk_cache_state = BTRFS_DC_ERROR; | |
4584 | spin_unlock(&cache->lock); | |
4585 | ||
4586 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4587 | btrfs_put_block_group(cache); | |
ba2c4d4e | 4588 | btrfs_delayed_refs_rsv_release(fs_info, 1); |
c79a1751 LB |
4589 | spin_lock(&cur_trans->dirty_bgs_lock); |
4590 | } | |
4591 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4592 | ||
45ae2c18 NB |
4593 | /* |
4594 | * Refer to the definition of io_bgs member for details why it's safe | |
4595 | * to use it without any locking | |
4596 | */ | |
c79a1751 LB |
4597 | while (!list_empty(&cur_trans->io_bgs)) { |
4598 | cache = list_first_entry(&cur_trans->io_bgs, | |
32da5386 | 4599 | struct btrfs_block_group, |
c79a1751 | 4600 | io_list); |
c79a1751 LB |
4601 | |
4602 | list_del_init(&cache->io_list); | |
4603 | spin_lock(&cache->lock); | |
4604 | cache->disk_cache_state = BTRFS_DC_ERROR; | |
4605 | spin_unlock(&cache->lock); | |
4606 | btrfs_cleanup_bg_io(cache); | |
4607 | } | |
4608 | } | |
4609 | ||
49b25e05 | 4610 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
2ff7e61e | 4611 | struct btrfs_fs_info *fs_info) |
49b25e05 | 4612 | { |
bbbf7243 NB |
4613 | struct btrfs_device *dev, *tmp; |
4614 | ||
2ff7e61e | 4615 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
c79a1751 LB |
4616 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
4617 | ASSERT(list_empty(&cur_trans->io_bgs)); | |
4618 | ||
bbbf7243 NB |
4619 | list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list, |
4620 | post_commit_list) { | |
4621 | list_del_init(&dev->post_commit_list); | |
4622 | } | |
4623 | ||
2ff7e61e | 4624 | btrfs_destroy_delayed_refs(cur_trans, fs_info); |
49b25e05 | 4625 | |
4a9d8bde | 4626 | cur_trans->state = TRANS_STATE_COMMIT_START; |
0b246afa | 4627 | wake_up(&fs_info->transaction_blocked_wait); |
49b25e05 | 4628 | |
4a9d8bde | 4629 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
0b246afa | 4630 | wake_up(&fs_info->transaction_wait); |
49b25e05 | 4631 | |
ccdf9b30 | 4632 | btrfs_destroy_delayed_inodes(fs_info); |
49b25e05 | 4633 | |
2ff7e61e | 4634 | btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, |
49b25e05 | 4635 | EXTENT_DIRTY); |
fe119a6e | 4636 | btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); |
49b25e05 | 4637 | |
4a9d8bde MX |
4638 | cur_trans->state =TRANS_STATE_COMPLETED; |
4639 | wake_up(&cur_trans->commit_wait); | |
49b25e05 JM |
4640 | } |
4641 | ||
2ff7e61e | 4642 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) |
acce952b | 4643 | { |
4644 | struct btrfs_transaction *t; | |
acce952b | 4645 | |
0b246afa | 4646 | mutex_lock(&fs_info->transaction_kthread_mutex); |
acce952b | 4647 | |
0b246afa JM |
4648 | spin_lock(&fs_info->trans_lock); |
4649 | while (!list_empty(&fs_info->trans_list)) { | |
4650 | t = list_first_entry(&fs_info->trans_list, | |
724e2315 JB |
4651 | struct btrfs_transaction, list); |
4652 | if (t->state >= TRANS_STATE_COMMIT_START) { | |
9b64f57d | 4653 | refcount_inc(&t->use_count); |
0b246afa | 4654 | spin_unlock(&fs_info->trans_lock); |
2ff7e61e | 4655 | btrfs_wait_for_commit(fs_info, t->transid); |
724e2315 | 4656 | btrfs_put_transaction(t); |
0b246afa | 4657 | spin_lock(&fs_info->trans_lock); |
724e2315 JB |
4658 | continue; |
4659 | } | |
0b246afa | 4660 | if (t == fs_info->running_transaction) { |
724e2315 | 4661 | t->state = TRANS_STATE_COMMIT_DOING; |
0b246afa | 4662 | spin_unlock(&fs_info->trans_lock); |
724e2315 JB |
4663 | /* |
4664 | * We wait for 0 num_writers since we don't hold a trans | |
4665 | * handle open currently for this transaction. | |
4666 | */ | |
4667 | wait_event(t->writer_wait, | |
4668 | atomic_read(&t->num_writers) == 0); | |
4669 | } else { | |
0b246afa | 4670 | spin_unlock(&fs_info->trans_lock); |
724e2315 | 4671 | } |
2ff7e61e | 4672 | btrfs_cleanup_one_transaction(t, fs_info); |
4a9d8bde | 4673 | |
0b246afa JM |
4674 | spin_lock(&fs_info->trans_lock); |
4675 | if (t == fs_info->running_transaction) | |
4676 | fs_info->running_transaction = NULL; | |
acce952b | 4677 | list_del_init(&t->list); |
0b246afa | 4678 | spin_unlock(&fs_info->trans_lock); |
acce952b | 4679 | |
724e2315 | 4680 | btrfs_put_transaction(t); |
2ff7e61e | 4681 | trace_btrfs_transaction_commit(fs_info->tree_root); |
0b246afa | 4682 | spin_lock(&fs_info->trans_lock); |
724e2315 | 4683 | } |
0b246afa JM |
4684 | spin_unlock(&fs_info->trans_lock); |
4685 | btrfs_destroy_all_ordered_extents(fs_info); | |
ccdf9b30 JM |
4686 | btrfs_destroy_delayed_inodes(fs_info); |
4687 | btrfs_assert_delayed_root_empty(fs_info); | |
0b246afa | 4688 | btrfs_destroy_all_delalloc_inodes(fs_info); |
ef67963d | 4689 | btrfs_drop_all_logs(fs_info); |
0b246afa | 4690 | mutex_unlock(&fs_info->transaction_kthread_mutex); |
acce952b | 4691 | |
4692 | return 0; | |
4693 | } |