]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
6cbd5570 CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
6cbd5570 CM |
4 | */ |
5 | ||
e20d96d6 | 6 | #include <linux/fs.h> |
d98237b3 | 7 | #include <linux/blkdev.h> |
0f7d52f4 | 8 | #include <linux/radix-tree.h> |
35b7e476 | 9 | #include <linux/writeback.h> |
d397712b | 10 | #include <linux/buffer_head.h> |
ce9adaa5 | 11 | #include <linux/workqueue.h> |
a74a4b97 | 12 | #include <linux/kthread.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
784b4e29 | 14 | #include <linux/migrate.h> |
7a36ddec | 15 | #include <linux/ratelimit.h> |
6463fe58 | 16 | #include <linux/uuid.h> |
803b2f54 | 17 | #include <linux/semaphore.h> |
540adea3 | 18 | #include <linux/error-injection.h> |
9678c543 | 19 | #include <linux/crc32c.h> |
7e75bf3f | 20 | #include <asm/unaligned.h> |
eb60ceac CM |
21 | #include "ctree.h" |
22 | #include "disk-io.h" | |
e089f05c | 23 | #include "transaction.h" |
0f7d52f4 | 24 | #include "btrfs_inode.h" |
0b86a832 | 25 | #include "volumes.h" |
db94535d | 26 | #include "print-tree.h" |
925baedd | 27 | #include "locking.h" |
e02119d5 | 28 | #include "tree-log.h" |
fa9c0d79 | 29 | #include "free-space-cache.h" |
70f6d82e | 30 | #include "free-space-tree.h" |
581bb050 | 31 | #include "inode-map.h" |
21adbd5c | 32 | #include "check-integrity.h" |
606686ee | 33 | #include "rcu-string.h" |
8dabb742 | 34 | #include "dev-replace.h" |
53b381b3 | 35 | #include "raid56.h" |
5ac1d209 | 36 | #include "sysfs.h" |
fcebe456 | 37 | #include "qgroup.h" |
ebb8765b | 38 | #include "compression.h" |
557ea5dd | 39 | #include "tree-checker.h" |
fd708b81 | 40 | #include "ref-verify.h" |
eb60ceac | 41 | |
de0022b9 JB |
42 | #ifdef CONFIG_X86 |
43 | #include <asm/cpufeature.h> | |
44 | #endif | |
45 | ||
319e4d06 QW |
46 | #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ |
47 | BTRFS_HEADER_FLAG_RELOC |\ | |
48 | BTRFS_SUPER_FLAG_ERROR |\ | |
49 | BTRFS_SUPER_FLAG_SEEDING |\ | |
e2731e55 AJ |
50 | BTRFS_SUPER_FLAG_METADUMP |\ |
51 | BTRFS_SUPER_FLAG_METADUMP_V2) | |
319e4d06 | 52 | |
e8c9f186 | 53 | static const struct extent_io_ops btree_extent_io_ops; |
8b712842 | 54 | static void end_workqueue_fn(struct btrfs_work *work); |
143bede5 | 55 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
acce952b | 56 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2ff7e61e | 57 | struct btrfs_fs_info *fs_info); |
143bede5 | 58 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); |
2ff7e61e | 59 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
acce952b | 60 | struct extent_io_tree *dirty_pages, |
61 | int mark); | |
2ff7e61e | 62 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
acce952b | 63 | struct extent_io_tree *pinned_extents); |
2ff7e61e JM |
64 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); |
65 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); | |
ce9adaa5 | 66 | |
d352ac68 | 67 | /* |
97eb6b69 DS |
68 | * btrfs_end_io_wq structs are used to do processing in task context when an IO |
69 | * is complete. This is used during reads to verify checksums, and it is used | |
d352ac68 CM |
70 | * by writes to insert metadata for new file extents after IO is complete. |
71 | */ | |
97eb6b69 | 72 | struct btrfs_end_io_wq { |
ce9adaa5 CM |
73 | struct bio *bio; |
74 | bio_end_io_t *end_io; | |
75 | void *private; | |
76 | struct btrfs_fs_info *info; | |
4e4cbee9 | 77 | blk_status_t status; |
bfebd8b5 | 78 | enum btrfs_wq_endio_type metadata; |
8b712842 | 79 | struct btrfs_work work; |
ce9adaa5 | 80 | }; |
0da5468f | 81 | |
97eb6b69 DS |
82 | static struct kmem_cache *btrfs_end_io_wq_cache; |
83 | ||
84 | int __init btrfs_end_io_wq_init(void) | |
85 | { | |
86 | btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", | |
87 | sizeof(struct btrfs_end_io_wq), | |
88 | 0, | |
fba4b697 | 89 | SLAB_MEM_SPREAD, |
97eb6b69 DS |
90 | NULL); |
91 | if (!btrfs_end_io_wq_cache) | |
92 | return -ENOMEM; | |
93 | return 0; | |
94 | } | |
95 | ||
e67c718b | 96 | void __cold btrfs_end_io_wq_exit(void) |
97eb6b69 | 97 | { |
5598e900 | 98 | kmem_cache_destroy(btrfs_end_io_wq_cache); |
97eb6b69 DS |
99 | } |
100 | ||
d352ac68 CM |
101 | /* |
102 | * async submit bios are used to offload expensive checksumming | |
103 | * onto the worker threads. They checksum file and metadata bios | |
104 | * just before they are sent down the IO stack. | |
105 | */ | |
44b8bd7e | 106 | struct async_submit_bio { |
c6100a4b | 107 | void *private_data; |
44b8bd7e | 108 | struct bio *bio; |
a758781d | 109 | extent_submit_bio_start_t *submit_bio_start; |
44b8bd7e | 110 | int mirror_num; |
eaf25d93 CM |
111 | /* |
112 | * bio_offset is optional, can be used if the pages in the bio | |
113 | * can't tell us where in the file the bio should go | |
114 | */ | |
115 | u64 bio_offset; | |
8b712842 | 116 | struct btrfs_work work; |
4e4cbee9 | 117 | blk_status_t status; |
44b8bd7e CM |
118 | }; |
119 | ||
85d4e461 CM |
120 | /* |
121 | * Lockdep class keys for extent_buffer->lock's in this root. For a given | |
122 | * eb, the lockdep key is determined by the btrfs_root it belongs to and | |
123 | * the level the eb occupies in the tree. | |
124 | * | |
125 | * Different roots are used for different purposes and may nest inside each | |
126 | * other and they require separate keysets. As lockdep keys should be | |
127 | * static, assign keysets according to the purpose of the root as indicated | |
4fd786e6 MT |
128 | * by btrfs_root->root_key.objectid. This ensures that all special purpose |
129 | * roots have separate keysets. | |
4008c04a | 130 | * |
85d4e461 CM |
131 | * Lock-nesting across peer nodes is always done with the immediate parent |
132 | * node locked thus preventing deadlock. As lockdep doesn't know this, use | |
133 | * subclass to avoid triggering lockdep warning in such cases. | |
4008c04a | 134 | * |
85d4e461 CM |
135 | * The key is set by the readpage_end_io_hook after the buffer has passed |
136 | * csum validation but before the pages are unlocked. It is also set by | |
137 | * btrfs_init_new_buffer on freshly allocated blocks. | |
4008c04a | 138 | * |
85d4e461 CM |
139 | * We also add a check to make sure the highest level of the tree is the |
140 | * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code | |
141 | * needs update as well. | |
4008c04a CM |
142 | */ |
143 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
144 | # if BTRFS_MAX_LEVEL != 8 | |
145 | # error | |
146 | # endif | |
85d4e461 CM |
147 | |
148 | static struct btrfs_lockdep_keyset { | |
149 | u64 id; /* root objectid */ | |
150 | const char *name_stem; /* lock name stem */ | |
151 | char names[BTRFS_MAX_LEVEL + 1][20]; | |
152 | struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; | |
153 | } btrfs_lockdep_keysets[] = { | |
154 | { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, | |
155 | { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, | |
156 | { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, | |
157 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, | |
158 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, | |
159 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, | |
60b62978 | 160 | { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, |
85d4e461 CM |
161 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, |
162 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, | |
163 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, | |
13fd8da9 | 164 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, |
6b20e0ad | 165 | { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, |
85d4e461 | 166 | { .id = 0, .name_stem = "tree" }, |
4008c04a | 167 | }; |
85d4e461 CM |
168 | |
169 | void __init btrfs_init_lockdep(void) | |
170 | { | |
171 | int i, j; | |
172 | ||
173 | /* initialize lockdep class names */ | |
174 | for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { | |
175 | struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; | |
176 | ||
177 | for (j = 0; j < ARRAY_SIZE(ks->names); j++) | |
178 | snprintf(ks->names[j], sizeof(ks->names[j]), | |
179 | "btrfs-%s-%02d", ks->name_stem, j); | |
180 | } | |
181 | } | |
182 | ||
183 | void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, | |
184 | int level) | |
185 | { | |
186 | struct btrfs_lockdep_keyset *ks; | |
187 | ||
188 | BUG_ON(level >= ARRAY_SIZE(ks->keys)); | |
189 | ||
190 | /* find the matching keyset, id 0 is the default entry */ | |
191 | for (ks = btrfs_lockdep_keysets; ks->id; ks++) | |
192 | if (ks->id == objectid) | |
193 | break; | |
194 | ||
195 | lockdep_set_class_and_name(&eb->lock, | |
196 | &ks->keys[level], ks->names[level]); | |
197 | } | |
198 | ||
4008c04a CM |
199 | #endif |
200 | ||
d352ac68 CM |
201 | /* |
202 | * extents on the btree inode are pretty simple, there's one extent | |
203 | * that covers the entire device | |
204 | */ | |
6af49dbd | 205 | struct extent_map *btree_get_extent(struct btrfs_inode *inode, |
306e16ce | 206 | struct page *page, size_t pg_offset, u64 start, u64 len, |
b2950863 | 207 | int create) |
7eccb903 | 208 | { |
3ffbd68c | 209 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
fc4f21b1 | 210 | struct extent_map_tree *em_tree = &inode->extent_tree; |
5f39d397 CM |
211 | struct extent_map *em; |
212 | int ret; | |
213 | ||
890871be | 214 | read_lock(&em_tree->lock); |
d1310b2e | 215 | em = lookup_extent_mapping(em_tree, start, len); |
a061fc8d | 216 | if (em) { |
0b246afa | 217 | em->bdev = fs_info->fs_devices->latest_bdev; |
890871be | 218 | read_unlock(&em_tree->lock); |
5f39d397 | 219 | goto out; |
a061fc8d | 220 | } |
890871be | 221 | read_unlock(&em_tree->lock); |
7b13b7b1 | 222 | |
172ddd60 | 223 | em = alloc_extent_map(); |
5f39d397 CM |
224 | if (!em) { |
225 | em = ERR_PTR(-ENOMEM); | |
226 | goto out; | |
227 | } | |
228 | em->start = 0; | |
0afbaf8c | 229 | em->len = (u64)-1; |
c8b97818 | 230 | em->block_len = (u64)-1; |
5f39d397 | 231 | em->block_start = 0; |
0b246afa | 232 | em->bdev = fs_info->fs_devices->latest_bdev; |
d1310b2e | 233 | |
890871be | 234 | write_lock(&em_tree->lock); |
09a2a8f9 | 235 | ret = add_extent_mapping(em_tree, em, 0); |
5f39d397 CM |
236 | if (ret == -EEXIST) { |
237 | free_extent_map(em); | |
7b13b7b1 | 238 | em = lookup_extent_mapping(em_tree, start, len); |
b4f359ab | 239 | if (!em) |
0433f20d | 240 | em = ERR_PTR(-EIO); |
5f39d397 | 241 | } else if (ret) { |
7b13b7b1 | 242 | free_extent_map(em); |
0433f20d | 243 | em = ERR_PTR(ret); |
5f39d397 | 244 | } |
890871be | 245 | write_unlock(&em_tree->lock); |
7b13b7b1 | 246 | |
5f39d397 CM |
247 | out: |
248 | return em; | |
7eccb903 CM |
249 | } |
250 | ||
9ed57367 | 251 | u32 btrfs_csum_data(const char *data, u32 seed, size_t len) |
19c00ddc | 252 | { |
9678c543 | 253 | return crc32c(seed, data, len); |
19c00ddc CM |
254 | } |
255 | ||
0b5e3daf | 256 | void btrfs_csum_final(u32 crc, u8 *result) |
19c00ddc | 257 | { |
7e75bf3f | 258 | put_unaligned_le32(~crc, result); |
19c00ddc CM |
259 | } |
260 | ||
d352ac68 CM |
261 | /* |
262 | * compute the csum for a btree block, and either verify it or write it | |
263 | * into the csum field of the block. | |
264 | */ | |
01d58472 DD |
265 | static int csum_tree_block(struct btrfs_fs_info *fs_info, |
266 | struct extent_buffer *buf, | |
19c00ddc CM |
267 | int verify) |
268 | { | |
01d58472 | 269 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
71a63551 | 270 | char result[BTRFS_CSUM_SIZE]; |
19c00ddc CM |
271 | unsigned long len; |
272 | unsigned long cur_len; | |
273 | unsigned long offset = BTRFS_CSUM_SIZE; | |
19c00ddc CM |
274 | char *kaddr; |
275 | unsigned long map_start; | |
276 | unsigned long map_len; | |
277 | int err; | |
278 | u32 crc = ~(u32)0; | |
279 | ||
280 | len = buf->len - offset; | |
d397712b | 281 | while (len > 0) { |
19c00ddc | 282 | err = map_private_extent_buffer(buf, offset, 32, |
a6591715 | 283 | &kaddr, &map_start, &map_len); |
d397712b | 284 | if (err) |
8bd98f0e | 285 | return err; |
19c00ddc | 286 | cur_len = min(len, map_len - (offset - map_start)); |
b0496686 | 287 | crc = btrfs_csum_data(kaddr + offset - map_start, |
19c00ddc CM |
288 | crc, cur_len); |
289 | len -= cur_len; | |
290 | offset += cur_len; | |
19c00ddc | 291 | } |
71a63551 | 292 | memset(result, 0, BTRFS_CSUM_SIZE); |
607d432d | 293 | |
19c00ddc CM |
294 | btrfs_csum_final(crc, result); |
295 | ||
296 | if (verify) { | |
607d432d | 297 | if (memcmp_extent_buffer(buf, result, 0, csum_size)) { |
e4204ded CM |
298 | u32 val; |
299 | u32 found = 0; | |
607d432d | 300 | memcpy(&found, result, csum_size); |
e4204ded | 301 | |
607d432d | 302 | read_extent_buffer(buf, &val, 0, csum_size); |
94647322 | 303 | btrfs_warn_rl(fs_info, |
5d163e0e | 304 | "%s checksum verify failed on %llu wanted %X found %X level %d", |
01d58472 | 305 | fs_info->sb->s_id, buf->start, |
efe120a0 | 306 | val, found, btrfs_header_level(buf)); |
8bd98f0e | 307 | return -EUCLEAN; |
19c00ddc CM |
308 | } |
309 | } else { | |
607d432d | 310 | write_extent_buffer(buf, result, 0, csum_size); |
19c00ddc | 311 | } |
71a63551 | 312 | |
19c00ddc CM |
313 | return 0; |
314 | } | |
315 | ||
d352ac68 CM |
316 | /* |
317 | * we can't consider a given block up to date unless the transid of the | |
318 | * block matches the transid in the parent node's pointer. This is how we | |
319 | * detect blocks that either didn't get written at all or got written | |
320 | * in the wrong place. | |
321 | */ | |
1259ab75 | 322 | static int verify_parent_transid(struct extent_io_tree *io_tree, |
b9fab919 CM |
323 | struct extent_buffer *eb, u64 parent_transid, |
324 | int atomic) | |
1259ab75 | 325 | { |
2ac55d41 | 326 | struct extent_state *cached_state = NULL; |
1259ab75 | 327 | int ret; |
2755a0de | 328 | bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); |
1259ab75 CM |
329 | |
330 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) | |
331 | return 0; | |
332 | ||
b9fab919 CM |
333 | if (atomic) |
334 | return -EAGAIN; | |
335 | ||
a26e8c9f JB |
336 | if (need_lock) { |
337 | btrfs_tree_read_lock(eb); | |
338 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | |
339 | } | |
340 | ||
2ac55d41 | 341 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, |
ff13db41 | 342 | &cached_state); |
0b32f4bb | 343 | if (extent_buffer_uptodate(eb) && |
1259ab75 CM |
344 | btrfs_header_generation(eb) == parent_transid) { |
345 | ret = 0; | |
346 | goto out; | |
347 | } | |
94647322 DS |
348 | btrfs_err_rl(eb->fs_info, |
349 | "parent transid verify failed on %llu wanted %llu found %llu", | |
350 | eb->start, | |
29549aec | 351 | parent_transid, btrfs_header_generation(eb)); |
1259ab75 | 352 | ret = 1; |
a26e8c9f JB |
353 | |
354 | /* | |
355 | * Things reading via commit roots that don't have normal protection, | |
356 | * like send, can have a really old block in cache that may point at a | |
01327610 | 357 | * block that has been freed and re-allocated. So don't clear uptodate |
a26e8c9f JB |
358 | * if we find an eb that is under IO (dirty/writeback) because we could |
359 | * end up reading in the stale data and then writing it back out and | |
360 | * making everybody very sad. | |
361 | */ | |
362 | if (!extent_buffer_under_io(eb)) | |
363 | clear_extent_buffer_uptodate(eb); | |
33958dc6 | 364 | out: |
2ac55d41 | 365 | unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, |
e43bbe5e | 366 | &cached_state); |
472b909f JB |
367 | if (need_lock) |
368 | btrfs_tree_read_unlock_blocking(eb); | |
1259ab75 | 369 | return ret; |
1259ab75 CM |
370 | } |
371 | ||
1104a885 DS |
372 | /* |
373 | * Return 0 if the superblock checksum type matches the checksum value of that | |
374 | * algorithm. Pass the raw disk superblock data. | |
375 | */ | |
ab8d0fc4 JM |
376 | static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, |
377 | char *raw_disk_sb) | |
1104a885 DS |
378 | { |
379 | struct btrfs_super_block *disk_sb = | |
380 | (struct btrfs_super_block *)raw_disk_sb; | |
381 | u16 csum_type = btrfs_super_csum_type(disk_sb); | |
382 | int ret = 0; | |
383 | ||
384 | if (csum_type == BTRFS_CSUM_TYPE_CRC32) { | |
385 | u32 crc = ~(u32)0; | |
776c4a7c | 386 | char result[sizeof(crc)]; |
1104a885 DS |
387 | |
388 | /* | |
389 | * The super_block structure does not span the whole | |
390 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space | |
01327610 | 391 | * is filled with zeros and is included in the checksum. |
1104a885 DS |
392 | */ |
393 | crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, | |
394 | crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); | |
395 | btrfs_csum_final(crc, result); | |
396 | ||
776c4a7c | 397 | if (memcmp(raw_disk_sb, result, sizeof(result))) |
1104a885 DS |
398 | ret = 1; |
399 | } | |
400 | ||
401 | if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { | |
ab8d0fc4 | 402 | btrfs_err(fs_info, "unsupported checksum algorithm %u", |
1104a885 DS |
403 | csum_type); |
404 | ret = 1; | |
405 | } | |
406 | ||
407 | return ret; | |
408 | } | |
409 | ||
581c1760 QW |
410 | static int verify_level_key(struct btrfs_fs_info *fs_info, |
411 | struct extent_buffer *eb, int level, | |
ff76a864 | 412 | struct btrfs_key *first_key, u64 parent_transid) |
581c1760 QW |
413 | { |
414 | int found_level; | |
415 | struct btrfs_key found_key; | |
416 | int ret; | |
417 | ||
418 | found_level = btrfs_header_level(eb); | |
419 | if (found_level != level) { | |
420 | #ifdef CONFIG_BTRFS_DEBUG | |
421 | WARN_ON(1); | |
422 | btrfs_err(fs_info, | |
423 | "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", | |
424 | eb->start, level, found_level); | |
425 | #endif | |
426 | return -EIO; | |
427 | } | |
428 | ||
429 | if (!first_key) | |
430 | return 0; | |
431 | ||
5d41be6f QW |
432 | /* |
433 | * For live tree block (new tree blocks in current transaction), | |
434 | * we need proper lock context to avoid race, which is impossible here. | |
435 | * So we only checks tree blocks which is read from disk, whose | |
436 | * generation <= fs_info->last_trans_committed. | |
437 | */ | |
438 | if (btrfs_header_generation(eb) > fs_info->last_trans_committed) | |
439 | return 0; | |
581c1760 QW |
440 | if (found_level) |
441 | btrfs_node_key_to_cpu(eb, &found_key, 0); | |
442 | else | |
443 | btrfs_item_key_to_cpu(eb, &found_key, 0); | |
444 | ret = btrfs_comp_cpu_keys(first_key, &found_key); | |
445 | ||
446 | #ifdef CONFIG_BTRFS_DEBUG | |
447 | if (ret) { | |
448 | WARN_ON(1); | |
449 | btrfs_err(fs_info, | |
ff76a864 LB |
450 | "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", |
451 | eb->start, parent_transid, first_key->objectid, | |
452 | first_key->type, first_key->offset, | |
453 | found_key.objectid, found_key.type, | |
454 | found_key.offset); | |
581c1760 QW |
455 | } |
456 | #endif | |
457 | return ret; | |
458 | } | |
459 | ||
d352ac68 CM |
460 | /* |
461 | * helper to read a given tree block, doing retries as required when | |
462 | * the checksums don't match and we have alternate mirrors to try. | |
581c1760 QW |
463 | * |
464 | * @parent_transid: expected transid, skip check if 0 | |
465 | * @level: expected level, mandatory check | |
466 | * @first_key: expected key of first slot, skip check if NULL | |
d352ac68 | 467 | */ |
2ff7e61e | 468 | static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, |
f188591e | 469 | struct extent_buffer *eb, |
581c1760 QW |
470 | u64 parent_transid, int level, |
471 | struct btrfs_key *first_key) | |
f188591e CM |
472 | { |
473 | struct extent_io_tree *io_tree; | |
ea466794 | 474 | int failed = 0; |
f188591e CM |
475 | int ret; |
476 | int num_copies = 0; | |
477 | int mirror_num = 0; | |
ea466794 | 478 | int failed_mirror = 0; |
f188591e | 479 | |
0b246afa | 480 | io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; |
f188591e | 481 | while (1) { |
f8397d69 | 482 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
8436ea91 | 483 | ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, |
6af49dbd | 484 | mirror_num); |
256dd1bb | 485 | if (!ret) { |
581c1760 | 486 | if (verify_parent_transid(io_tree, eb, |
b9fab919 | 487 | parent_transid, 0)) |
256dd1bb | 488 | ret = -EIO; |
581c1760 | 489 | else if (verify_level_key(fs_info, eb, level, |
ff76a864 | 490 | first_key, parent_transid)) |
581c1760 QW |
491 | ret = -EUCLEAN; |
492 | else | |
493 | break; | |
256dd1bb | 494 | } |
d397712b | 495 | |
0b246afa | 496 | num_copies = btrfs_num_copies(fs_info, |
f188591e | 497 | eb->start, eb->len); |
4235298e | 498 | if (num_copies == 1) |
ea466794 | 499 | break; |
4235298e | 500 | |
5cf1ab56 JB |
501 | if (!failed_mirror) { |
502 | failed = 1; | |
503 | failed_mirror = eb->read_mirror; | |
504 | } | |
505 | ||
f188591e | 506 | mirror_num++; |
ea466794 JB |
507 | if (mirror_num == failed_mirror) |
508 | mirror_num++; | |
509 | ||
4235298e | 510 | if (mirror_num > num_copies) |
ea466794 | 511 | break; |
f188591e | 512 | } |
ea466794 | 513 | |
c0901581 | 514 | if (failed && !ret && failed_mirror) |
2ff7e61e | 515 | repair_eb_io_failure(fs_info, eb, failed_mirror); |
ea466794 JB |
516 | |
517 | return ret; | |
f188591e | 518 | } |
19c00ddc | 519 | |
d352ac68 | 520 | /* |
d397712b CM |
521 | * checksum a dirty tree block before IO. This has extra checks to make sure |
522 | * we only fill in the checksum field in the first page of a multi-page block | |
d352ac68 | 523 | */ |
d397712b | 524 | |
01d58472 | 525 | static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) |
19c00ddc | 526 | { |
4eee4fa4 | 527 | u64 start = page_offset(page); |
19c00ddc | 528 | u64 found_start; |
19c00ddc | 529 | struct extent_buffer *eb; |
f188591e | 530 | |
4f2de97a JB |
531 | eb = (struct extent_buffer *)page->private; |
532 | if (page != eb->pages[0]) | |
533 | return 0; | |
0f805531 | 534 | |
19c00ddc | 535 | found_start = btrfs_header_bytenr(eb); |
0f805531 AL |
536 | /* |
537 | * Please do not consolidate these warnings into a single if. | |
538 | * It is useful to know what went wrong. | |
539 | */ | |
540 | if (WARN_ON(found_start != start)) | |
541 | return -EUCLEAN; | |
542 | if (WARN_ON(!PageUptodate(page))) | |
543 | return -EUCLEAN; | |
544 | ||
545 | ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, | |
546 | btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); | |
547 | ||
8bd98f0e | 548 | return csum_tree_block(fs_info, eb, 0); |
19c00ddc CM |
549 | } |
550 | ||
01d58472 | 551 | static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, |
2b82032c YZ |
552 | struct extent_buffer *eb) |
553 | { | |
01d58472 | 554 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
44880fdc | 555 | u8 fsid[BTRFS_FSID_SIZE]; |
2b82032c YZ |
556 | int ret = 1; |
557 | ||
0a4e5586 | 558 | read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); |
2b82032c YZ |
559 | while (fs_devices) { |
560 | if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { | |
561 | ret = 0; | |
562 | break; | |
563 | } | |
564 | fs_devices = fs_devices->seed; | |
565 | } | |
566 | return ret; | |
567 | } | |
568 | ||
facc8a22 MX |
569 | static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
570 | u64 phy_offset, struct page *page, | |
571 | u64 start, u64 end, int mirror) | |
ce9adaa5 | 572 | { |
ce9adaa5 CM |
573 | u64 found_start; |
574 | int found_level; | |
ce9adaa5 CM |
575 | struct extent_buffer *eb; |
576 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | |
02873e43 | 577 | struct btrfs_fs_info *fs_info = root->fs_info; |
f188591e | 578 | int ret = 0; |
727011e0 | 579 | int reads_done; |
ce9adaa5 | 580 | |
ce9adaa5 CM |
581 | if (!page->private) |
582 | goto out; | |
d397712b | 583 | |
4f2de97a | 584 | eb = (struct extent_buffer *)page->private; |
d397712b | 585 | |
0b32f4bb JB |
586 | /* the pending IO might have been the only thing that kept this buffer |
587 | * in memory. Make sure we have a ref for all this other checks | |
588 | */ | |
589 | extent_buffer_get(eb); | |
590 | ||
591 | reads_done = atomic_dec_and_test(&eb->io_pages); | |
727011e0 CM |
592 | if (!reads_done) |
593 | goto err; | |
f188591e | 594 | |
5cf1ab56 | 595 | eb->read_mirror = mirror; |
656f30db | 596 | if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { |
ea466794 JB |
597 | ret = -EIO; |
598 | goto err; | |
599 | } | |
600 | ||
ce9adaa5 | 601 | found_start = btrfs_header_bytenr(eb); |
727011e0 | 602 | if (found_start != eb->start) { |
893bf4b1 SY |
603 | btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu", |
604 | eb->start, found_start); | |
f188591e | 605 | ret = -EIO; |
ce9adaa5 CM |
606 | goto err; |
607 | } | |
02873e43 ZL |
608 | if (check_tree_block_fsid(fs_info, eb)) { |
609 | btrfs_err_rl(fs_info, "bad fsid on block %llu", | |
610 | eb->start); | |
1259ab75 CM |
611 | ret = -EIO; |
612 | goto err; | |
613 | } | |
ce9adaa5 | 614 | found_level = btrfs_header_level(eb); |
1c24c3ce | 615 | if (found_level >= BTRFS_MAX_LEVEL) { |
893bf4b1 SY |
616 | btrfs_err(fs_info, "bad tree block level %d on %llu", |
617 | (int)btrfs_header_level(eb), eb->start); | |
1c24c3ce JB |
618 | ret = -EIO; |
619 | goto err; | |
620 | } | |
ce9adaa5 | 621 | |
85d4e461 CM |
622 | btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), |
623 | eb, found_level); | |
4008c04a | 624 | |
02873e43 | 625 | ret = csum_tree_block(fs_info, eb, 1); |
8bd98f0e | 626 | if (ret) |
a826d6dc | 627 | goto err; |
a826d6dc JB |
628 | |
629 | /* | |
630 | * If this is a leaf block and it is corrupt, set the corrupt bit so | |
631 | * that we don't try and read the other copies of this block, just | |
632 | * return -EIO. | |
633 | */ | |
2f659546 | 634 | if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) { |
a826d6dc JB |
635 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
636 | ret = -EIO; | |
637 | } | |
ce9adaa5 | 638 | |
2f659546 | 639 | if (found_level > 0 && btrfs_check_node(fs_info, eb)) |
053ab70f LB |
640 | ret = -EIO; |
641 | ||
0b32f4bb JB |
642 | if (!ret) |
643 | set_extent_buffer_uptodate(eb); | |
ce9adaa5 | 644 | err: |
79fb65a1 JB |
645 | if (reads_done && |
646 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | |
d48d71aa | 647 | btree_readahead_hook(eb, ret); |
4bb31e92 | 648 | |
53b381b3 DW |
649 | if (ret) { |
650 | /* | |
651 | * our io error hook is going to dec the io pages | |
652 | * again, we have to make sure it has something | |
653 | * to decrement | |
654 | */ | |
655 | atomic_inc(&eb->io_pages); | |
0b32f4bb | 656 | clear_extent_buffer_uptodate(eb); |
53b381b3 | 657 | } |
0b32f4bb | 658 | free_extent_buffer(eb); |
ce9adaa5 | 659 | out: |
f188591e | 660 | return ret; |
ce9adaa5 CM |
661 | } |
662 | ||
ea466794 | 663 | static int btree_io_failed_hook(struct page *page, int failed_mirror) |
4bb31e92 | 664 | { |
4bb31e92 | 665 | struct extent_buffer *eb; |
4bb31e92 | 666 | |
4f2de97a | 667 | eb = (struct extent_buffer *)page->private; |
656f30db | 668 | set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); |
5cf1ab56 | 669 | eb->read_mirror = failed_mirror; |
53b381b3 | 670 | atomic_dec(&eb->io_pages); |
ea466794 | 671 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
d48d71aa | 672 | btree_readahead_hook(eb, -EIO); |
4bb31e92 AJ |
673 | return -EIO; /* we fixed nothing */ |
674 | } | |
675 | ||
4246a0b6 | 676 | static void end_workqueue_bio(struct bio *bio) |
ce9adaa5 | 677 | { |
97eb6b69 | 678 | struct btrfs_end_io_wq *end_io_wq = bio->bi_private; |
ce9adaa5 | 679 | struct btrfs_fs_info *fs_info; |
9e0af237 LB |
680 | struct btrfs_workqueue *wq; |
681 | btrfs_work_func_t func; | |
ce9adaa5 | 682 | |
ce9adaa5 | 683 | fs_info = end_io_wq->info; |
4e4cbee9 | 684 | end_io_wq->status = bio->bi_status; |
d20f7043 | 685 | |
37226b21 | 686 | if (bio_op(bio) == REQ_OP_WRITE) { |
9e0af237 LB |
687 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
688 | wq = fs_info->endio_meta_write_workers; | |
689 | func = btrfs_endio_meta_write_helper; | |
690 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { | |
691 | wq = fs_info->endio_freespace_worker; | |
692 | func = btrfs_freespace_write_helper; | |
693 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { | |
694 | wq = fs_info->endio_raid56_workers; | |
695 | func = btrfs_endio_raid56_helper; | |
696 | } else { | |
697 | wq = fs_info->endio_write_workers; | |
698 | func = btrfs_endio_write_helper; | |
699 | } | |
d20f7043 | 700 | } else { |
8b110e39 MX |
701 | if (unlikely(end_io_wq->metadata == |
702 | BTRFS_WQ_ENDIO_DIO_REPAIR)) { | |
703 | wq = fs_info->endio_repair_workers; | |
704 | func = btrfs_endio_repair_helper; | |
705 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { | |
9e0af237 LB |
706 | wq = fs_info->endio_raid56_workers; |
707 | func = btrfs_endio_raid56_helper; | |
708 | } else if (end_io_wq->metadata) { | |
709 | wq = fs_info->endio_meta_workers; | |
710 | func = btrfs_endio_meta_helper; | |
711 | } else { | |
712 | wq = fs_info->endio_workers; | |
713 | func = btrfs_endio_helper; | |
714 | } | |
d20f7043 | 715 | } |
9e0af237 LB |
716 | |
717 | btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); | |
718 | btrfs_queue_work(wq, &end_io_wq->work); | |
ce9adaa5 CM |
719 | } |
720 | ||
4e4cbee9 | 721 | blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
bfebd8b5 | 722 | enum btrfs_wq_endio_type metadata) |
0b86a832 | 723 | { |
97eb6b69 | 724 | struct btrfs_end_io_wq *end_io_wq; |
8b110e39 | 725 | |
97eb6b69 | 726 | end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); |
ce9adaa5 | 727 | if (!end_io_wq) |
4e4cbee9 | 728 | return BLK_STS_RESOURCE; |
ce9adaa5 CM |
729 | |
730 | end_io_wq->private = bio->bi_private; | |
731 | end_io_wq->end_io = bio->bi_end_io; | |
22c59948 | 732 | end_io_wq->info = info; |
4e4cbee9 | 733 | end_io_wq->status = 0; |
ce9adaa5 | 734 | end_io_wq->bio = bio; |
22c59948 | 735 | end_io_wq->metadata = metadata; |
ce9adaa5 CM |
736 | |
737 | bio->bi_private = end_io_wq; | |
738 | bio->bi_end_io = end_workqueue_bio; | |
22c59948 CM |
739 | return 0; |
740 | } | |
741 | ||
4a69a410 CM |
742 | static void run_one_async_start(struct btrfs_work *work) |
743 | { | |
4a69a410 | 744 | struct async_submit_bio *async; |
4e4cbee9 | 745 | blk_status_t ret; |
4a69a410 CM |
746 | |
747 | async = container_of(work, struct async_submit_bio, work); | |
c6100a4b | 748 | ret = async->submit_bio_start(async->private_data, async->bio, |
79787eaa JM |
749 | async->bio_offset); |
750 | if (ret) | |
4e4cbee9 | 751 | async->status = ret; |
4a69a410 CM |
752 | } |
753 | ||
754 | static void run_one_async_done(struct btrfs_work *work) | |
8b712842 | 755 | { |
8b712842 CM |
756 | struct async_submit_bio *async; |
757 | ||
758 | async = container_of(work, struct async_submit_bio, work); | |
4854ddd0 | 759 | |
bb7ab3b9 | 760 | /* If an error occurred we just want to clean up the bio and move on */ |
4e4cbee9 CH |
761 | if (async->status) { |
762 | async->bio->bi_status = async->status; | |
4246a0b6 | 763 | bio_endio(async->bio); |
79787eaa JM |
764 | return; |
765 | } | |
766 | ||
e288c080 | 767 | btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num); |
4a69a410 CM |
768 | } |
769 | ||
770 | static void run_one_async_free(struct btrfs_work *work) | |
771 | { | |
772 | struct async_submit_bio *async; | |
773 | ||
774 | async = container_of(work, struct async_submit_bio, work); | |
8b712842 CM |
775 | kfree(async); |
776 | } | |
777 | ||
8c27cb35 LT |
778 | blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, |
779 | int mirror_num, unsigned long bio_flags, | |
780 | u64 bio_offset, void *private_data, | |
e288c080 | 781 | extent_submit_bio_start_t *submit_bio_start) |
44b8bd7e CM |
782 | { |
783 | struct async_submit_bio *async; | |
784 | ||
785 | async = kmalloc(sizeof(*async), GFP_NOFS); | |
786 | if (!async) | |
4e4cbee9 | 787 | return BLK_STS_RESOURCE; |
44b8bd7e | 788 | |
c6100a4b | 789 | async->private_data = private_data; |
44b8bd7e CM |
790 | async->bio = bio; |
791 | async->mirror_num = mirror_num; | |
4a69a410 | 792 | async->submit_bio_start = submit_bio_start; |
4a69a410 | 793 | |
9e0af237 | 794 | btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, |
5cdc7ad3 | 795 | run_one_async_done, run_one_async_free); |
4a69a410 | 796 | |
eaf25d93 | 797 | async->bio_offset = bio_offset; |
8c8bee1d | 798 | |
4e4cbee9 | 799 | async->status = 0; |
79787eaa | 800 | |
67f055c7 | 801 | if (op_is_sync(bio->bi_opf)) |
5cdc7ad3 | 802 | btrfs_set_work_high_priority(&async->work); |
d313d7a3 | 803 | |
5cdc7ad3 | 804 | btrfs_queue_work(fs_info->workers, &async->work); |
44b8bd7e CM |
805 | return 0; |
806 | } | |
807 | ||
4e4cbee9 | 808 | static blk_status_t btree_csum_one_bio(struct bio *bio) |
ce3ed71a | 809 | { |
2c30c71b | 810 | struct bio_vec *bvec; |
ce3ed71a | 811 | struct btrfs_root *root; |
2c30c71b | 812 | int i, ret = 0; |
ce3ed71a | 813 | |
c09abff8 | 814 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
2c30c71b | 815 | bio_for_each_segment_all(bvec, bio, i) { |
ce3ed71a | 816 | root = BTRFS_I(bvec->bv_page->mapping->host)->root; |
01d58472 | 817 | ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); |
79787eaa JM |
818 | if (ret) |
819 | break; | |
ce3ed71a | 820 | } |
2c30c71b | 821 | |
4e4cbee9 | 822 | return errno_to_blk_status(ret); |
ce3ed71a CM |
823 | } |
824 | ||
d0ee3934 | 825 | static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio, |
8c27cb35 | 826 | u64 bio_offset) |
22c59948 | 827 | { |
8b712842 CM |
828 | /* |
829 | * when we're called for a write, we're already in the async | |
5443be45 | 830 | * submission context. Just jump into btrfs_map_bio |
8b712842 | 831 | */ |
79787eaa | 832 | return btree_csum_one_bio(bio); |
4a69a410 | 833 | } |
22c59948 | 834 | |
18fdc679 | 835 | static int check_async_write(struct btrfs_inode *bi) |
de0022b9 | 836 | { |
6300463b LB |
837 | if (atomic_read(&bi->sync_writers)) |
838 | return 0; | |
de0022b9 | 839 | #ifdef CONFIG_X86 |
bc696ca0 | 840 | if (static_cpu_has(X86_FEATURE_XMM4_2)) |
de0022b9 JB |
841 | return 0; |
842 | #endif | |
843 | return 1; | |
844 | } | |
845 | ||
8c27cb35 LT |
846 | static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio, |
847 | int mirror_num, unsigned long bio_flags, | |
848 | u64 bio_offset) | |
44b8bd7e | 849 | { |
c6100a4b | 850 | struct inode *inode = private_data; |
0b246afa | 851 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
18fdc679 | 852 | int async = check_async_write(BTRFS_I(inode)); |
4e4cbee9 | 853 | blk_status_t ret; |
cad321ad | 854 | |
37226b21 | 855 | if (bio_op(bio) != REQ_OP_WRITE) { |
4a69a410 CM |
856 | /* |
857 | * called for a read, do the setup so that checksum validation | |
858 | * can happen in the async kernel threads | |
859 | */ | |
0b246afa JM |
860 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
861 | BTRFS_WQ_ENDIO_METADATA); | |
1d4284bd | 862 | if (ret) |
61891923 | 863 | goto out_w_error; |
2ff7e61e | 864 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
de0022b9 JB |
865 | } else if (!async) { |
866 | ret = btree_csum_one_bio(bio); | |
867 | if (ret) | |
61891923 | 868 | goto out_w_error; |
2ff7e61e | 869 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
61891923 SB |
870 | } else { |
871 | /* | |
872 | * kthread helpers are used to submit writes so that | |
873 | * checksumming can happen in parallel across all CPUs | |
874 | */ | |
c6100a4b JB |
875 | ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, |
876 | bio_offset, private_data, | |
e288c080 | 877 | btree_submit_bio_start); |
44b8bd7e | 878 | } |
d313d7a3 | 879 | |
4246a0b6 CH |
880 | if (ret) |
881 | goto out_w_error; | |
882 | return 0; | |
883 | ||
61891923 | 884 | out_w_error: |
4e4cbee9 | 885 | bio->bi_status = ret; |
4246a0b6 | 886 | bio_endio(bio); |
61891923 | 887 | return ret; |
44b8bd7e CM |
888 | } |
889 | ||
3dd1462e | 890 | #ifdef CONFIG_MIGRATION |
784b4e29 | 891 | static int btree_migratepage(struct address_space *mapping, |
a6bc32b8 MG |
892 | struct page *newpage, struct page *page, |
893 | enum migrate_mode mode) | |
784b4e29 CM |
894 | { |
895 | /* | |
896 | * we can't safely write a btree page from here, | |
897 | * we haven't done the locking hook | |
898 | */ | |
899 | if (PageDirty(page)) | |
900 | return -EAGAIN; | |
901 | /* | |
902 | * Buffers may be managed in a filesystem specific way. | |
903 | * We must have no buffers or drop them. | |
904 | */ | |
905 | if (page_has_private(page) && | |
906 | !try_to_release_page(page, GFP_KERNEL)) | |
907 | return -EAGAIN; | |
a6bc32b8 | 908 | return migrate_page(mapping, newpage, page, mode); |
784b4e29 | 909 | } |
3dd1462e | 910 | #endif |
784b4e29 | 911 | |
0da5468f CM |
912 | |
913 | static int btree_writepages(struct address_space *mapping, | |
914 | struct writeback_control *wbc) | |
915 | { | |
e2d84521 MX |
916 | struct btrfs_fs_info *fs_info; |
917 | int ret; | |
918 | ||
d8d5f3e1 | 919 | if (wbc->sync_mode == WB_SYNC_NONE) { |
448d640b CM |
920 | |
921 | if (wbc->for_kupdate) | |
922 | return 0; | |
923 | ||
e2d84521 | 924 | fs_info = BTRFS_I(mapping->host)->root->fs_info; |
b9473439 | 925 | /* this is a bit racy, but that's ok */ |
d814a491 EL |
926 | ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
927 | BTRFS_DIRTY_METADATA_THRESH, | |
928 | fs_info->dirty_metadata_batch); | |
e2d84521 | 929 | if (ret < 0) |
793955bc | 930 | return 0; |
793955bc | 931 | } |
0b32f4bb | 932 | return btree_write_cache_pages(mapping, wbc); |
0da5468f CM |
933 | } |
934 | ||
b2950863 | 935 | static int btree_readpage(struct file *file, struct page *page) |
5f39d397 | 936 | { |
d1310b2e CM |
937 | struct extent_io_tree *tree; |
938 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
8ddc7d9c | 939 | return extent_read_full_page(tree, page, btree_get_extent, 0); |
5f39d397 | 940 | } |
22b0ebda | 941 | |
70dec807 | 942 | static int btree_releasepage(struct page *page, gfp_t gfp_flags) |
5f39d397 | 943 | { |
98509cfc | 944 | if (PageWriteback(page) || PageDirty(page)) |
d397712b | 945 | return 0; |
0c4e538b | 946 | |
f7a52a40 | 947 | return try_release_extent_buffer(page); |
d98237b3 CM |
948 | } |
949 | ||
d47992f8 LC |
950 | static void btree_invalidatepage(struct page *page, unsigned int offset, |
951 | unsigned int length) | |
d98237b3 | 952 | { |
d1310b2e CM |
953 | struct extent_io_tree *tree; |
954 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
5f39d397 CM |
955 | extent_invalidatepage(tree, page, offset); |
956 | btree_releasepage(page, GFP_NOFS); | |
9ad6b7bc | 957 | if (PagePrivate(page)) { |
efe120a0 FH |
958 | btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, |
959 | "page private not zero on page %llu", | |
960 | (unsigned long long)page_offset(page)); | |
9ad6b7bc CM |
961 | ClearPagePrivate(page); |
962 | set_page_private(page, 0); | |
09cbfeaf | 963 | put_page(page); |
9ad6b7bc | 964 | } |
d98237b3 CM |
965 | } |
966 | ||
0b32f4bb JB |
967 | static int btree_set_page_dirty(struct page *page) |
968 | { | |
bb146eb2 | 969 | #ifdef DEBUG |
0b32f4bb JB |
970 | struct extent_buffer *eb; |
971 | ||
972 | BUG_ON(!PagePrivate(page)); | |
973 | eb = (struct extent_buffer *)page->private; | |
974 | BUG_ON(!eb); | |
975 | BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | |
976 | BUG_ON(!atomic_read(&eb->refs)); | |
977 | btrfs_assert_tree_locked(eb); | |
bb146eb2 | 978 | #endif |
0b32f4bb JB |
979 | return __set_page_dirty_nobuffers(page); |
980 | } | |
981 | ||
7f09410b | 982 | static const struct address_space_operations btree_aops = { |
d98237b3 | 983 | .readpage = btree_readpage, |
0da5468f | 984 | .writepages = btree_writepages, |
5f39d397 CM |
985 | .releasepage = btree_releasepage, |
986 | .invalidatepage = btree_invalidatepage, | |
5a92bc88 | 987 | #ifdef CONFIG_MIGRATION |
784b4e29 | 988 | .migratepage = btree_migratepage, |
5a92bc88 | 989 | #endif |
0b32f4bb | 990 | .set_page_dirty = btree_set_page_dirty, |
d98237b3 CM |
991 | }; |
992 | ||
2ff7e61e | 993 | void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) |
090d1875 | 994 | { |
5f39d397 | 995 | struct extent_buffer *buf = NULL; |
2ff7e61e | 996 | struct inode *btree_inode = fs_info->btree_inode; |
090d1875 | 997 | |
2ff7e61e | 998 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 | 999 | if (IS_ERR(buf)) |
6197d86e | 1000 | return; |
d1310b2e | 1001 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, |
6af49dbd | 1002 | buf, WAIT_NONE, 0); |
5f39d397 | 1003 | free_extent_buffer(buf); |
090d1875 CM |
1004 | } |
1005 | ||
2ff7e61e | 1006 | int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, |
ab0fff03 AJ |
1007 | int mirror_num, struct extent_buffer **eb) |
1008 | { | |
1009 | struct extent_buffer *buf = NULL; | |
2ff7e61e | 1010 | struct inode *btree_inode = fs_info->btree_inode; |
ab0fff03 AJ |
1011 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; |
1012 | int ret; | |
1013 | ||
2ff7e61e | 1014 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 | 1015 | if (IS_ERR(buf)) |
ab0fff03 AJ |
1016 | return 0; |
1017 | ||
1018 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); | |
1019 | ||
8436ea91 | 1020 | ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, |
6af49dbd | 1021 | mirror_num); |
ab0fff03 AJ |
1022 | if (ret) { |
1023 | free_extent_buffer(buf); | |
1024 | return ret; | |
1025 | } | |
1026 | ||
1027 | if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { | |
1028 | free_extent_buffer(buf); | |
1029 | return -EIO; | |
0b32f4bb | 1030 | } else if (extent_buffer_uptodate(buf)) { |
ab0fff03 AJ |
1031 | *eb = buf; |
1032 | } else { | |
1033 | free_extent_buffer(buf); | |
1034 | } | |
1035 | return 0; | |
1036 | } | |
1037 | ||
2ff7e61e JM |
1038 | struct extent_buffer *btrfs_find_create_tree_block( |
1039 | struct btrfs_fs_info *fs_info, | |
1040 | u64 bytenr) | |
0999df54 | 1041 | { |
0b246afa JM |
1042 | if (btrfs_is_testing(fs_info)) |
1043 | return alloc_test_extent_buffer(fs_info, bytenr); | |
1044 | return alloc_extent_buffer(fs_info, bytenr); | |
0999df54 CM |
1045 | } |
1046 | ||
1047 | ||
e02119d5 CM |
1048 | int btrfs_write_tree_block(struct extent_buffer *buf) |
1049 | { | |
727011e0 | 1050 | return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, |
8aa38c31 | 1051 | buf->start + buf->len - 1); |
e02119d5 CM |
1052 | } |
1053 | ||
3189ff77 | 1054 | void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) |
e02119d5 | 1055 | { |
3189ff77 JL |
1056 | filemap_fdatawait_range(buf->pages[0]->mapping, |
1057 | buf->start, buf->start + buf->len - 1); | |
e02119d5 CM |
1058 | } |
1059 | ||
581c1760 QW |
1060 | /* |
1061 | * Read tree block at logical address @bytenr and do variant basic but critical | |
1062 | * verification. | |
1063 | * | |
1064 | * @parent_transid: expected transid of this tree block, skip check if 0 | |
1065 | * @level: expected level, mandatory check | |
1066 | * @first_key: expected key in slot 0, skip check if NULL | |
1067 | */ | |
2ff7e61e | 1068 | struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, |
581c1760 QW |
1069 | u64 parent_transid, int level, |
1070 | struct btrfs_key *first_key) | |
0999df54 CM |
1071 | { |
1072 | struct extent_buffer *buf = NULL; | |
0999df54 CM |
1073 | int ret; |
1074 | ||
2ff7e61e | 1075 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 LB |
1076 | if (IS_ERR(buf)) |
1077 | return buf; | |
0999df54 | 1078 | |
581c1760 QW |
1079 | ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid, |
1080 | level, first_key); | |
0f0fe8f7 FDBM |
1081 | if (ret) { |
1082 | free_extent_buffer(buf); | |
64c043de | 1083 | return ERR_PTR(ret); |
0f0fe8f7 | 1084 | } |
5f39d397 | 1085 | return buf; |
ce9adaa5 | 1086 | |
eb60ceac CM |
1087 | } |
1088 | ||
7c302b49 | 1089 | void clean_tree_block(struct btrfs_fs_info *fs_info, |
d5c13f92 | 1090 | struct extent_buffer *buf) |
ed2ff2cb | 1091 | { |
55c69072 | 1092 | if (btrfs_header_generation(buf) == |
e2d84521 | 1093 | fs_info->running_transaction->transid) { |
b9447ef8 | 1094 | btrfs_assert_tree_locked(buf); |
b4ce94de | 1095 | |
b9473439 | 1096 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { |
104b4e51 NB |
1097 | percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, |
1098 | -buf->len, | |
1099 | fs_info->dirty_metadata_batch); | |
ed7b63eb JB |
1100 | /* ugh, clear_extent_buffer_dirty needs to lock the page */ |
1101 | btrfs_set_lock_blocking(buf); | |
1102 | clear_extent_buffer_dirty(buf); | |
1103 | } | |
925baedd | 1104 | } |
5f39d397 CM |
1105 | } |
1106 | ||
8257b2dc MX |
1107 | static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) |
1108 | { | |
1109 | struct btrfs_subvolume_writers *writers; | |
1110 | int ret; | |
1111 | ||
1112 | writers = kmalloc(sizeof(*writers), GFP_NOFS); | |
1113 | if (!writers) | |
1114 | return ERR_PTR(-ENOMEM); | |
1115 | ||
8a5a916d | 1116 | ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); |
8257b2dc MX |
1117 | if (ret < 0) { |
1118 | kfree(writers); | |
1119 | return ERR_PTR(ret); | |
1120 | } | |
1121 | ||
1122 | init_waitqueue_head(&writers->wait); | |
1123 | return writers; | |
1124 | } | |
1125 | ||
1126 | static void | |
1127 | btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) | |
1128 | { | |
1129 | percpu_counter_destroy(&writers->counter); | |
1130 | kfree(writers); | |
1131 | } | |
1132 | ||
da17066c | 1133 | static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, |
143bede5 | 1134 | u64 objectid) |
d97e63b6 | 1135 | { |
7c0260ee | 1136 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
cfaa7295 | 1137 | root->node = NULL; |
a28ec197 | 1138 | root->commit_root = NULL; |
27cdeb70 | 1139 | root->state = 0; |
d68fc57b | 1140 | root->orphan_cleanup_state = 0; |
0b86a832 | 1141 | |
0f7d52f4 | 1142 | root->last_trans = 0; |
13a8a7c8 | 1143 | root->highest_objectid = 0; |
eb73c1b7 | 1144 | root->nr_delalloc_inodes = 0; |
199c2a9c | 1145 | root->nr_ordered_extents = 0; |
6bef4d31 | 1146 | root->inode_tree = RB_ROOT; |
16cdcec7 | 1147 | INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); |
f0486c68 | 1148 | root->block_rsv = NULL; |
0b86a832 CM |
1149 | |
1150 | INIT_LIST_HEAD(&root->dirty_list); | |
5d4f98a2 | 1151 | INIT_LIST_HEAD(&root->root_list); |
eb73c1b7 MX |
1152 | INIT_LIST_HEAD(&root->delalloc_inodes); |
1153 | INIT_LIST_HEAD(&root->delalloc_root); | |
199c2a9c MX |
1154 | INIT_LIST_HEAD(&root->ordered_extents); |
1155 | INIT_LIST_HEAD(&root->ordered_root); | |
2ab28f32 JB |
1156 | INIT_LIST_HEAD(&root->logged_list[0]); |
1157 | INIT_LIST_HEAD(&root->logged_list[1]); | |
5d4f98a2 | 1158 | spin_lock_init(&root->inode_lock); |
eb73c1b7 | 1159 | spin_lock_init(&root->delalloc_lock); |
199c2a9c | 1160 | spin_lock_init(&root->ordered_extent_lock); |
f0486c68 | 1161 | spin_lock_init(&root->accounting_lock); |
2ab28f32 JB |
1162 | spin_lock_init(&root->log_extents_lock[0]); |
1163 | spin_lock_init(&root->log_extents_lock[1]); | |
8287475a | 1164 | spin_lock_init(&root->qgroup_meta_rsv_lock); |
a2135011 | 1165 | mutex_init(&root->objectid_mutex); |
e02119d5 | 1166 | mutex_init(&root->log_mutex); |
31f3d255 | 1167 | mutex_init(&root->ordered_extent_mutex); |
573bfb72 | 1168 | mutex_init(&root->delalloc_mutex); |
7237f183 YZ |
1169 | init_waitqueue_head(&root->log_writer_wait); |
1170 | init_waitqueue_head(&root->log_commit_wait[0]); | |
1171 | init_waitqueue_head(&root->log_commit_wait[1]); | |
8b050d35 MX |
1172 | INIT_LIST_HEAD(&root->log_ctxs[0]); |
1173 | INIT_LIST_HEAD(&root->log_ctxs[1]); | |
7237f183 YZ |
1174 | atomic_set(&root->log_commit[0], 0); |
1175 | atomic_set(&root->log_commit[1], 0); | |
1176 | atomic_set(&root->log_writers, 0); | |
2ecb7923 | 1177 | atomic_set(&root->log_batch, 0); |
0700cea7 | 1178 | refcount_set(&root->refs, 1); |
ea14b57f | 1179 | atomic_set(&root->will_be_snapshotted, 0); |
8ecebf4d | 1180 | atomic_set(&root->snapshot_force_cow, 0); |
7237f183 | 1181 | root->log_transid = 0; |
d1433deb | 1182 | root->log_transid_committed = -1; |
257c62e1 | 1183 | root->last_log_commit = 0; |
7c0260ee | 1184 | if (!dummy) |
c6100a4b | 1185 | extent_io_tree_init(&root->dirty_log_pages, NULL); |
017e5369 | 1186 | |
3768f368 CM |
1187 | memset(&root->root_key, 0, sizeof(root->root_key)); |
1188 | memset(&root->root_item, 0, sizeof(root->root_item)); | |
6702ed49 | 1189 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); |
7c0260ee | 1190 | if (!dummy) |
06ea65a3 JB |
1191 | root->defrag_trans_start = fs_info->generation; |
1192 | else | |
1193 | root->defrag_trans_start = 0; | |
4d775673 | 1194 | root->root_key.objectid = objectid; |
0ee5dc67 | 1195 | root->anon_dev = 0; |
8ea05e3a | 1196 | |
5f3ab90a | 1197 | spin_lock_init(&root->root_item_lock); |
3768f368 CM |
1198 | } |
1199 | ||
74e4d827 DS |
1200 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, |
1201 | gfp_t flags) | |
6f07e42e | 1202 | { |
74e4d827 | 1203 | struct btrfs_root *root = kzalloc(sizeof(*root), flags); |
6f07e42e AV |
1204 | if (root) |
1205 | root->fs_info = fs_info; | |
1206 | return root; | |
1207 | } | |
1208 | ||
06ea65a3 JB |
1209 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
1210 | /* Should only be used by the testing infrastructure */ | |
da17066c | 1211 | struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) |
06ea65a3 JB |
1212 | { |
1213 | struct btrfs_root *root; | |
1214 | ||
7c0260ee JM |
1215 | if (!fs_info) |
1216 | return ERR_PTR(-EINVAL); | |
1217 | ||
1218 | root = btrfs_alloc_root(fs_info, GFP_KERNEL); | |
06ea65a3 JB |
1219 | if (!root) |
1220 | return ERR_PTR(-ENOMEM); | |
da17066c | 1221 | |
b9ef22de | 1222 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
da17066c | 1223 | __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
faa2dbf0 | 1224 | root->alloc_bytenr = 0; |
06ea65a3 JB |
1225 | |
1226 | return root; | |
1227 | } | |
1228 | #endif | |
1229 | ||
20897f5c AJ |
1230 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, |
1231 | struct btrfs_fs_info *fs_info, | |
1232 | u64 objectid) | |
1233 | { | |
1234 | struct extent_buffer *leaf; | |
1235 | struct btrfs_root *tree_root = fs_info->tree_root; | |
1236 | struct btrfs_root *root; | |
1237 | struct btrfs_key key; | |
1238 | int ret = 0; | |
33d85fda | 1239 | uuid_le uuid = NULL_UUID_LE; |
20897f5c | 1240 | |
74e4d827 | 1241 | root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
20897f5c AJ |
1242 | if (!root) |
1243 | return ERR_PTR(-ENOMEM); | |
1244 | ||
da17066c | 1245 | __setup_root(root, fs_info, objectid); |
20897f5c AJ |
1246 | root->root_key.objectid = objectid; |
1247 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | |
1248 | root->root_key.offset = 0; | |
1249 | ||
4d75f8a9 | 1250 | leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); |
20897f5c AJ |
1251 | if (IS_ERR(leaf)) { |
1252 | ret = PTR_ERR(leaf); | |
1dd05682 | 1253 | leaf = NULL; |
20897f5c AJ |
1254 | goto fail; |
1255 | } | |
1256 | ||
20897f5c | 1257 | root->node = leaf; |
20897f5c AJ |
1258 | btrfs_mark_buffer_dirty(leaf); |
1259 | ||
1260 | root->commit_root = btrfs_root_node(root); | |
27cdeb70 | 1261 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
20897f5c AJ |
1262 | |
1263 | root->root_item.flags = 0; | |
1264 | root->root_item.byte_limit = 0; | |
1265 | btrfs_set_root_bytenr(&root->root_item, leaf->start); | |
1266 | btrfs_set_root_generation(&root->root_item, trans->transid); | |
1267 | btrfs_set_root_level(&root->root_item, 0); | |
1268 | btrfs_set_root_refs(&root->root_item, 1); | |
1269 | btrfs_set_root_used(&root->root_item, leaf->len); | |
1270 | btrfs_set_root_last_snapshot(&root->root_item, 0); | |
1271 | btrfs_set_root_dirid(&root->root_item, 0); | |
33d85fda QW |
1272 | if (is_fstree(objectid)) |
1273 | uuid_le_gen(&uuid); | |
6463fe58 | 1274 | memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); |
20897f5c AJ |
1275 | root->root_item.drop_level = 0; |
1276 | ||
1277 | key.objectid = objectid; | |
1278 | key.type = BTRFS_ROOT_ITEM_KEY; | |
1279 | key.offset = 0; | |
1280 | ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); | |
1281 | if (ret) | |
1282 | goto fail; | |
1283 | ||
1284 | btrfs_tree_unlock(leaf); | |
1285 | ||
1dd05682 TI |
1286 | return root; |
1287 | ||
20897f5c | 1288 | fail: |
1dd05682 TI |
1289 | if (leaf) { |
1290 | btrfs_tree_unlock(leaf); | |
59885b39 | 1291 | free_extent_buffer(root->commit_root); |
1dd05682 TI |
1292 | free_extent_buffer(leaf); |
1293 | } | |
1294 | kfree(root); | |
20897f5c | 1295 | |
1dd05682 | 1296 | return ERR_PTR(ret); |
20897f5c AJ |
1297 | } |
1298 | ||
7237f183 YZ |
1299 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, |
1300 | struct btrfs_fs_info *fs_info) | |
0f7d52f4 CM |
1301 | { |
1302 | struct btrfs_root *root; | |
7237f183 | 1303 | struct extent_buffer *leaf; |
e02119d5 | 1304 | |
74e4d827 | 1305 | root = btrfs_alloc_root(fs_info, GFP_NOFS); |
e02119d5 | 1306 | if (!root) |
7237f183 | 1307 | return ERR_PTR(-ENOMEM); |
e02119d5 | 1308 | |
da17066c | 1309 | __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
e02119d5 CM |
1310 | |
1311 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; | |
1312 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | |
1313 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; | |
27cdeb70 | 1314 | |
7237f183 | 1315 | /* |
27cdeb70 MX |
1316 | * DON'T set REF_COWS for log trees |
1317 | * | |
7237f183 YZ |
1318 | * log trees do not get reference counted because they go away |
1319 | * before a real commit is actually done. They do store pointers | |
1320 | * to file data extents, and those reference counts still get | |
1321 | * updated (along with back refs to the log tree). | |
1322 | */ | |
e02119d5 | 1323 | |
4d75f8a9 DS |
1324 | leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, |
1325 | NULL, 0, 0, 0); | |
7237f183 YZ |
1326 | if (IS_ERR(leaf)) { |
1327 | kfree(root); | |
1328 | return ERR_CAST(leaf); | |
1329 | } | |
e02119d5 | 1330 | |
7237f183 | 1331 | root->node = leaf; |
e02119d5 | 1332 | |
e02119d5 CM |
1333 | btrfs_mark_buffer_dirty(root->node); |
1334 | btrfs_tree_unlock(root->node); | |
7237f183 YZ |
1335 | return root; |
1336 | } | |
1337 | ||
1338 | int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, | |
1339 | struct btrfs_fs_info *fs_info) | |
1340 | { | |
1341 | struct btrfs_root *log_root; | |
1342 | ||
1343 | log_root = alloc_log_tree(trans, fs_info); | |
1344 | if (IS_ERR(log_root)) | |
1345 | return PTR_ERR(log_root); | |
1346 | WARN_ON(fs_info->log_root_tree); | |
1347 | fs_info->log_root_tree = log_root; | |
1348 | return 0; | |
1349 | } | |
1350 | ||
1351 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | |
1352 | struct btrfs_root *root) | |
1353 | { | |
0b246afa | 1354 | struct btrfs_fs_info *fs_info = root->fs_info; |
7237f183 YZ |
1355 | struct btrfs_root *log_root; |
1356 | struct btrfs_inode_item *inode_item; | |
1357 | ||
0b246afa | 1358 | log_root = alloc_log_tree(trans, fs_info); |
7237f183 YZ |
1359 | if (IS_ERR(log_root)) |
1360 | return PTR_ERR(log_root); | |
1361 | ||
1362 | log_root->last_trans = trans->transid; | |
1363 | log_root->root_key.offset = root->root_key.objectid; | |
1364 | ||
1365 | inode_item = &log_root->root_item.inode; | |
3cae210f QW |
1366 | btrfs_set_stack_inode_generation(inode_item, 1); |
1367 | btrfs_set_stack_inode_size(inode_item, 3); | |
1368 | btrfs_set_stack_inode_nlink(inode_item, 1); | |
da17066c | 1369 | btrfs_set_stack_inode_nbytes(inode_item, |
0b246afa | 1370 | fs_info->nodesize); |
3cae210f | 1371 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); |
7237f183 | 1372 | |
5d4f98a2 | 1373 | btrfs_set_root_node(&log_root->root_item, log_root->node); |
7237f183 YZ |
1374 | |
1375 | WARN_ON(root->log_root); | |
1376 | root->log_root = log_root; | |
1377 | root->log_transid = 0; | |
d1433deb | 1378 | root->log_transid_committed = -1; |
257c62e1 | 1379 | root->last_log_commit = 0; |
e02119d5 CM |
1380 | return 0; |
1381 | } | |
1382 | ||
35a3621b SB |
1383 | static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, |
1384 | struct btrfs_key *key) | |
e02119d5 CM |
1385 | { |
1386 | struct btrfs_root *root; | |
1387 | struct btrfs_fs_info *fs_info = tree_root->fs_info; | |
0f7d52f4 | 1388 | struct btrfs_path *path; |
84234f3a | 1389 | u64 generation; |
cb517eab | 1390 | int ret; |
581c1760 | 1391 | int level; |
0f7d52f4 | 1392 | |
cb517eab MX |
1393 | path = btrfs_alloc_path(); |
1394 | if (!path) | |
0f7d52f4 | 1395 | return ERR_PTR(-ENOMEM); |
cb517eab | 1396 | |
74e4d827 | 1397 | root = btrfs_alloc_root(fs_info, GFP_NOFS); |
cb517eab MX |
1398 | if (!root) { |
1399 | ret = -ENOMEM; | |
1400 | goto alloc_fail; | |
0f7d52f4 CM |
1401 | } |
1402 | ||
da17066c | 1403 | __setup_root(root, fs_info, key->objectid); |
0f7d52f4 | 1404 | |
cb517eab MX |
1405 | ret = btrfs_find_root(tree_root, key, path, |
1406 | &root->root_item, &root->root_key); | |
0f7d52f4 | 1407 | if (ret) { |
13a8a7c8 YZ |
1408 | if (ret > 0) |
1409 | ret = -ENOENT; | |
cb517eab | 1410 | goto find_fail; |
0f7d52f4 | 1411 | } |
13a8a7c8 | 1412 | |
84234f3a | 1413 | generation = btrfs_root_generation(&root->root_item); |
581c1760 | 1414 | level = btrfs_root_level(&root->root_item); |
2ff7e61e JM |
1415 | root->node = read_tree_block(fs_info, |
1416 | btrfs_root_bytenr(&root->root_item), | |
581c1760 | 1417 | generation, level, NULL); |
64c043de LB |
1418 | if (IS_ERR(root->node)) { |
1419 | ret = PTR_ERR(root->node); | |
cb517eab MX |
1420 | goto find_fail; |
1421 | } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { | |
1422 | ret = -EIO; | |
64c043de LB |
1423 | free_extent_buffer(root->node); |
1424 | goto find_fail; | |
416bc658 | 1425 | } |
5d4f98a2 | 1426 | root->commit_root = btrfs_root_node(root); |
13a8a7c8 | 1427 | out: |
cb517eab MX |
1428 | btrfs_free_path(path); |
1429 | return root; | |
1430 | ||
cb517eab MX |
1431 | find_fail: |
1432 | kfree(root); | |
1433 | alloc_fail: | |
1434 | root = ERR_PTR(ret); | |
1435 | goto out; | |
1436 | } | |
1437 | ||
1438 | struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, | |
1439 | struct btrfs_key *location) | |
1440 | { | |
1441 | struct btrfs_root *root; | |
1442 | ||
1443 | root = btrfs_read_tree_root(tree_root, location); | |
1444 | if (IS_ERR(root)) | |
1445 | return root; | |
1446 | ||
1447 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | |
27cdeb70 | 1448 | set_bit(BTRFS_ROOT_REF_COWS, &root->state); |
08fe4db1 LZ |
1449 | btrfs_check_and_init_root_item(&root->root_item); |
1450 | } | |
13a8a7c8 | 1451 | |
5eda7b5e CM |
1452 | return root; |
1453 | } | |
1454 | ||
cb517eab MX |
1455 | int btrfs_init_fs_root(struct btrfs_root *root) |
1456 | { | |
1457 | int ret; | |
8257b2dc | 1458 | struct btrfs_subvolume_writers *writers; |
cb517eab MX |
1459 | |
1460 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); | |
1461 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), | |
1462 | GFP_NOFS); | |
1463 | if (!root->free_ino_pinned || !root->free_ino_ctl) { | |
1464 | ret = -ENOMEM; | |
1465 | goto fail; | |
1466 | } | |
1467 | ||
8257b2dc MX |
1468 | writers = btrfs_alloc_subvolume_writers(); |
1469 | if (IS_ERR(writers)) { | |
1470 | ret = PTR_ERR(writers); | |
1471 | goto fail; | |
1472 | } | |
1473 | root->subv_writers = writers; | |
1474 | ||
cb517eab | 1475 | btrfs_init_free_ino_ctl(root); |
57cdc8db DS |
1476 | spin_lock_init(&root->ino_cache_lock); |
1477 | init_waitqueue_head(&root->ino_cache_wait); | |
cb517eab MX |
1478 | |
1479 | ret = get_anon_bdev(&root->anon_dev); | |
1480 | if (ret) | |
876d2cf1 | 1481 | goto fail; |
f32e48e9 CR |
1482 | |
1483 | mutex_lock(&root->objectid_mutex); | |
1484 | ret = btrfs_find_highest_objectid(root, | |
1485 | &root->highest_objectid); | |
1486 | if (ret) { | |
1487 | mutex_unlock(&root->objectid_mutex); | |
876d2cf1 | 1488 | goto fail; |
f32e48e9 CR |
1489 | } |
1490 | ||
1491 | ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); | |
1492 | ||
1493 | mutex_unlock(&root->objectid_mutex); | |
1494 | ||
cb517eab MX |
1495 | return 0; |
1496 | fail: | |
84db5ccf | 1497 | /* The caller is responsible to call btrfs_free_fs_root */ |
cb517eab MX |
1498 | return ret; |
1499 | } | |
1500 | ||
35bbb97f JM |
1501 | struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
1502 | u64 root_id) | |
cb517eab MX |
1503 | { |
1504 | struct btrfs_root *root; | |
1505 | ||
1506 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1507 | root = radix_tree_lookup(&fs_info->fs_roots_radix, | |
1508 | (unsigned long)root_id); | |
1509 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
1510 | return root; | |
1511 | } | |
1512 | ||
1513 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, | |
1514 | struct btrfs_root *root) | |
1515 | { | |
1516 | int ret; | |
1517 | ||
e1860a77 | 1518 | ret = radix_tree_preload(GFP_NOFS); |
cb517eab MX |
1519 | if (ret) |
1520 | return ret; | |
1521 | ||
1522 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1523 | ret = radix_tree_insert(&fs_info->fs_roots_radix, | |
1524 | (unsigned long)root->root_key.objectid, | |
1525 | root); | |
1526 | if (ret == 0) | |
27cdeb70 | 1527 | set_bit(BTRFS_ROOT_IN_RADIX, &root->state); |
cb517eab MX |
1528 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1529 | radix_tree_preload_end(); | |
1530 | ||
1531 | return ret; | |
1532 | } | |
1533 | ||
c00869f1 MX |
1534 | struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, |
1535 | struct btrfs_key *location, | |
1536 | bool check_ref) | |
5eda7b5e CM |
1537 | { |
1538 | struct btrfs_root *root; | |
381cf658 | 1539 | struct btrfs_path *path; |
1d4c08e0 | 1540 | struct btrfs_key key; |
5eda7b5e CM |
1541 | int ret; |
1542 | ||
edbd8d4e CM |
1543 | if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) |
1544 | return fs_info->tree_root; | |
1545 | if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) | |
1546 | return fs_info->extent_root; | |
8f18cf13 CM |
1547 | if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) |
1548 | return fs_info->chunk_root; | |
1549 | if (location->objectid == BTRFS_DEV_TREE_OBJECTID) | |
1550 | return fs_info->dev_root; | |
0403e47e YZ |
1551 | if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) |
1552 | return fs_info->csum_root; | |
bcef60f2 AJ |
1553 | if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) |
1554 | return fs_info->quota_root ? fs_info->quota_root : | |
1555 | ERR_PTR(-ENOENT); | |
f7a81ea4 SB |
1556 | if (location->objectid == BTRFS_UUID_TREE_OBJECTID) |
1557 | return fs_info->uuid_root ? fs_info->uuid_root : | |
1558 | ERR_PTR(-ENOENT); | |
70f6d82e OS |
1559 | if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) |
1560 | return fs_info->free_space_root ? fs_info->free_space_root : | |
1561 | ERR_PTR(-ENOENT); | |
4df27c4d | 1562 | again: |
cb517eab | 1563 | root = btrfs_lookup_fs_root(fs_info, location->objectid); |
48475471 | 1564 | if (root) { |
c00869f1 | 1565 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) |
48475471 | 1566 | return ERR_PTR(-ENOENT); |
5eda7b5e | 1567 | return root; |
48475471 | 1568 | } |
5eda7b5e | 1569 | |
cb517eab | 1570 | root = btrfs_read_fs_root(fs_info->tree_root, location); |
5eda7b5e CM |
1571 | if (IS_ERR(root)) |
1572 | return root; | |
3394e160 | 1573 | |
c00869f1 | 1574 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) { |
cb517eab | 1575 | ret = -ENOENT; |
581bb050 | 1576 | goto fail; |
35a30d7c | 1577 | } |
581bb050 | 1578 | |
cb517eab | 1579 | ret = btrfs_init_fs_root(root); |
ac08aedf CM |
1580 | if (ret) |
1581 | goto fail; | |
3394e160 | 1582 | |
381cf658 DS |
1583 | path = btrfs_alloc_path(); |
1584 | if (!path) { | |
1585 | ret = -ENOMEM; | |
1586 | goto fail; | |
1587 | } | |
1d4c08e0 DS |
1588 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
1589 | key.type = BTRFS_ORPHAN_ITEM_KEY; | |
1590 | key.offset = location->objectid; | |
1591 | ||
1592 | ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); | |
381cf658 | 1593 | btrfs_free_path(path); |
d68fc57b YZ |
1594 | if (ret < 0) |
1595 | goto fail; | |
1596 | if (ret == 0) | |
27cdeb70 | 1597 | set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); |
d68fc57b | 1598 | |
cb517eab | 1599 | ret = btrfs_insert_fs_root(fs_info, root); |
0f7d52f4 | 1600 | if (ret) { |
4df27c4d | 1601 | if (ret == -EEXIST) { |
84db5ccf | 1602 | btrfs_free_fs_root(root); |
4df27c4d YZ |
1603 | goto again; |
1604 | } | |
1605 | goto fail; | |
0f7d52f4 | 1606 | } |
edbd8d4e | 1607 | return root; |
4df27c4d | 1608 | fail: |
84db5ccf | 1609 | btrfs_free_fs_root(root); |
4df27c4d | 1610 | return ERR_PTR(ret); |
edbd8d4e CM |
1611 | } |
1612 | ||
04160088 CM |
1613 | static int btrfs_congested_fn(void *congested_data, int bdi_bits) |
1614 | { | |
1615 | struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; | |
1616 | int ret = 0; | |
04160088 CM |
1617 | struct btrfs_device *device; |
1618 | struct backing_dev_info *bdi; | |
b7967db7 | 1619 | |
1f78160c XG |
1620 | rcu_read_lock(); |
1621 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { | |
dfe25020 CM |
1622 | if (!device->bdev) |
1623 | continue; | |
efa7c9f9 | 1624 | bdi = device->bdev->bd_bdi; |
ff9ea323 | 1625 | if (bdi_congested(bdi, bdi_bits)) { |
04160088 CM |
1626 | ret = 1; |
1627 | break; | |
1628 | } | |
1629 | } | |
1f78160c | 1630 | rcu_read_unlock(); |
04160088 CM |
1631 | return ret; |
1632 | } | |
1633 | ||
8b712842 CM |
1634 | /* |
1635 | * called by the kthread helper functions to finally call the bio end_io | |
1636 | * functions. This is where read checksum verification actually happens | |
1637 | */ | |
1638 | static void end_workqueue_fn(struct btrfs_work *work) | |
ce9adaa5 | 1639 | { |
ce9adaa5 | 1640 | struct bio *bio; |
97eb6b69 | 1641 | struct btrfs_end_io_wq *end_io_wq; |
ce9adaa5 | 1642 | |
97eb6b69 | 1643 | end_io_wq = container_of(work, struct btrfs_end_io_wq, work); |
8b712842 | 1644 | bio = end_io_wq->bio; |
ce9adaa5 | 1645 | |
4e4cbee9 | 1646 | bio->bi_status = end_io_wq->status; |
8b712842 CM |
1647 | bio->bi_private = end_io_wq->private; |
1648 | bio->bi_end_io = end_io_wq->end_io; | |
97eb6b69 | 1649 | kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); |
4246a0b6 | 1650 | bio_endio(bio); |
44b8bd7e CM |
1651 | } |
1652 | ||
a74a4b97 CM |
1653 | static int cleaner_kthread(void *arg) |
1654 | { | |
1655 | struct btrfs_root *root = arg; | |
0b246afa | 1656 | struct btrfs_fs_info *fs_info = root->fs_info; |
d0278245 | 1657 | int again; |
a74a4b97 | 1658 | |
d6fd0ae2 | 1659 | while (1) { |
d0278245 | 1660 | again = 0; |
a74a4b97 | 1661 | |
d0278245 | 1662 | /* Make the cleaner go to sleep early. */ |
2ff7e61e | 1663 | if (btrfs_need_cleaner_sleep(fs_info)) |
d0278245 MX |
1664 | goto sleep; |
1665 | ||
90c711ab ZB |
1666 | /* |
1667 | * Do not do anything if we might cause open_ctree() to block | |
1668 | * before we have finished mounting the filesystem. | |
1669 | */ | |
0b246afa | 1670 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) |
90c711ab ZB |
1671 | goto sleep; |
1672 | ||
0b246afa | 1673 | if (!mutex_trylock(&fs_info->cleaner_mutex)) |
d0278245 MX |
1674 | goto sleep; |
1675 | ||
dc7f370c MX |
1676 | /* |
1677 | * Avoid the problem that we change the status of the fs | |
1678 | * during the above check and trylock. | |
1679 | */ | |
2ff7e61e | 1680 | if (btrfs_need_cleaner_sleep(fs_info)) { |
0b246afa | 1681 | mutex_unlock(&fs_info->cleaner_mutex); |
dc7f370c | 1682 | goto sleep; |
76dda93c | 1683 | } |
a74a4b97 | 1684 | |
0b246afa | 1685 | mutex_lock(&fs_info->cleaner_delayed_iput_mutex); |
2ff7e61e | 1686 | btrfs_run_delayed_iputs(fs_info); |
0b246afa | 1687 | mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); |
c2d6cb16 | 1688 | |
d0278245 | 1689 | again = btrfs_clean_one_deleted_snapshot(root); |
0b246afa | 1690 | mutex_unlock(&fs_info->cleaner_mutex); |
d0278245 MX |
1691 | |
1692 | /* | |
05323cd1 MX |
1693 | * The defragger has dealt with the R/O remount and umount, |
1694 | * needn't do anything special here. | |
d0278245 | 1695 | */ |
0b246afa | 1696 | btrfs_run_defrag_inodes(fs_info); |
67c5e7d4 FM |
1697 | |
1698 | /* | |
1699 | * Acquires fs_info->delete_unused_bgs_mutex to avoid racing | |
1700 | * with relocation (btrfs_relocate_chunk) and relocation | |
1701 | * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) | |
1702 | * after acquiring fs_info->delete_unused_bgs_mutex. So we | |
1703 | * can't hold, nor need to, fs_info->cleaner_mutex when deleting | |
1704 | * unused block groups. | |
1705 | */ | |
0b246afa | 1706 | btrfs_delete_unused_bgs(fs_info); |
d0278245 | 1707 | sleep: |
d6fd0ae2 OS |
1708 | if (kthread_should_park()) |
1709 | kthread_parkme(); | |
1710 | if (kthread_should_stop()) | |
1711 | return 0; | |
838fe188 | 1712 | if (!again) { |
a74a4b97 | 1713 | set_current_state(TASK_INTERRUPTIBLE); |
d6fd0ae2 | 1714 | schedule(); |
a74a4b97 CM |
1715 | __set_current_state(TASK_RUNNING); |
1716 | } | |
da288d28 | 1717 | } |
a74a4b97 CM |
1718 | } |
1719 | ||
1720 | static int transaction_kthread(void *arg) | |
1721 | { | |
1722 | struct btrfs_root *root = arg; | |
0b246afa | 1723 | struct btrfs_fs_info *fs_info = root->fs_info; |
a74a4b97 CM |
1724 | struct btrfs_trans_handle *trans; |
1725 | struct btrfs_transaction *cur; | |
8929ecfa | 1726 | u64 transid; |
a944442c | 1727 | time64_t now; |
a74a4b97 | 1728 | unsigned long delay; |
914b2007 | 1729 | bool cannot_commit; |
a74a4b97 CM |
1730 | |
1731 | do { | |
914b2007 | 1732 | cannot_commit = false; |
0b246afa JM |
1733 | delay = HZ * fs_info->commit_interval; |
1734 | mutex_lock(&fs_info->transaction_kthread_mutex); | |
a74a4b97 | 1735 | |
0b246afa JM |
1736 | spin_lock(&fs_info->trans_lock); |
1737 | cur = fs_info->running_transaction; | |
a74a4b97 | 1738 | if (!cur) { |
0b246afa | 1739 | spin_unlock(&fs_info->trans_lock); |
a74a4b97 CM |
1740 | goto sleep; |
1741 | } | |
31153d81 | 1742 | |
afd48513 | 1743 | now = ktime_get_seconds(); |
4a9d8bde | 1744 | if (cur->state < TRANS_STATE_BLOCKED && |
a514d638 | 1745 | !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && |
8b87dc17 | 1746 | (now < cur->start_time || |
0b246afa JM |
1747 | now - cur->start_time < fs_info->commit_interval)) { |
1748 | spin_unlock(&fs_info->trans_lock); | |
a74a4b97 CM |
1749 | delay = HZ * 5; |
1750 | goto sleep; | |
1751 | } | |
8929ecfa | 1752 | transid = cur->transid; |
0b246afa | 1753 | spin_unlock(&fs_info->trans_lock); |
56bec294 | 1754 | |
79787eaa | 1755 | /* If the file system is aborted, this will always fail. */ |
354aa0fb | 1756 | trans = btrfs_attach_transaction(root); |
914b2007 | 1757 | if (IS_ERR(trans)) { |
354aa0fb MX |
1758 | if (PTR_ERR(trans) != -ENOENT) |
1759 | cannot_commit = true; | |
79787eaa | 1760 | goto sleep; |
914b2007 | 1761 | } |
8929ecfa | 1762 | if (transid == trans->transid) { |
3a45bb20 | 1763 | btrfs_commit_transaction(trans); |
8929ecfa | 1764 | } else { |
3a45bb20 | 1765 | btrfs_end_transaction(trans); |
8929ecfa | 1766 | } |
a74a4b97 | 1767 | sleep: |
0b246afa JM |
1768 | wake_up_process(fs_info->cleaner_kthread); |
1769 | mutex_unlock(&fs_info->transaction_kthread_mutex); | |
a74a4b97 | 1770 | |
4e121c06 | 1771 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, |
0b246afa | 1772 | &fs_info->fs_state))) |
2ff7e61e | 1773 | btrfs_cleanup_transaction(fs_info); |
ce63f891 | 1774 | if (!kthread_should_stop() && |
0b246afa | 1775 | (!btrfs_transaction_blocked(fs_info) || |
ce63f891 | 1776 | cannot_commit)) |
bc5511d0 | 1777 | schedule_timeout_interruptible(delay); |
a74a4b97 CM |
1778 | } while (!kthread_should_stop()); |
1779 | return 0; | |
1780 | } | |
1781 | ||
af31f5e5 CM |
1782 | /* |
1783 | * this will find the highest generation in the array of | |
1784 | * root backups. The index of the highest array is returned, | |
1785 | * or -1 if we can't find anything. | |
1786 | * | |
1787 | * We check to make sure the array is valid by comparing the | |
1788 | * generation of the latest root in the array with the generation | |
1789 | * in the super block. If they don't match we pitch it. | |
1790 | */ | |
1791 | static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) | |
1792 | { | |
1793 | u64 cur; | |
1794 | int newest_index = -1; | |
1795 | struct btrfs_root_backup *root_backup; | |
1796 | int i; | |
1797 | ||
1798 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { | |
1799 | root_backup = info->super_copy->super_roots + i; | |
1800 | cur = btrfs_backup_tree_root_gen(root_backup); | |
1801 | if (cur == newest_gen) | |
1802 | newest_index = i; | |
1803 | } | |
1804 | ||
1805 | /* check to see if we actually wrapped around */ | |
1806 | if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { | |
1807 | root_backup = info->super_copy->super_roots; | |
1808 | cur = btrfs_backup_tree_root_gen(root_backup); | |
1809 | if (cur == newest_gen) | |
1810 | newest_index = 0; | |
1811 | } | |
1812 | return newest_index; | |
1813 | } | |
1814 | ||
1815 | ||
1816 | /* | |
1817 | * find the oldest backup so we know where to store new entries | |
1818 | * in the backup array. This will set the backup_root_index | |
1819 | * field in the fs_info struct | |
1820 | */ | |
1821 | static void find_oldest_super_backup(struct btrfs_fs_info *info, | |
1822 | u64 newest_gen) | |
1823 | { | |
1824 | int newest_index = -1; | |
1825 | ||
1826 | newest_index = find_newest_super_backup(info, newest_gen); | |
1827 | /* if there was garbage in there, just move along */ | |
1828 | if (newest_index == -1) { | |
1829 | info->backup_root_index = 0; | |
1830 | } else { | |
1831 | info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; | |
1832 | } | |
1833 | } | |
1834 | ||
1835 | /* | |
1836 | * copy all the root pointers into the super backup array. | |
1837 | * this will bump the backup pointer by one when it is | |
1838 | * done | |
1839 | */ | |
1840 | static void backup_super_roots(struct btrfs_fs_info *info) | |
1841 | { | |
1842 | int next_backup; | |
1843 | struct btrfs_root_backup *root_backup; | |
1844 | int last_backup; | |
1845 | ||
1846 | next_backup = info->backup_root_index; | |
1847 | last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % | |
1848 | BTRFS_NUM_BACKUP_ROOTS; | |
1849 | ||
1850 | /* | |
1851 | * just overwrite the last backup if we're at the same generation | |
1852 | * this happens only at umount | |
1853 | */ | |
1854 | root_backup = info->super_for_commit->super_roots + last_backup; | |
1855 | if (btrfs_backup_tree_root_gen(root_backup) == | |
1856 | btrfs_header_generation(info->tree_root->node)) | |
1857 | next_backup = last_backup; | |
1858 | ||
1859 | root_backup = info->super_for_commit->super_roots + next_backup; | |
1860 | ||
1861 | /* | |
1862 | * make sure all of our padding and empty slots get zero filled | |
1863 | * regardless of which ones we use today | |
1864 | */ | |
1865 | memset(root_backup, 0, sizeof(*root_backup)); | |
1866 | ||
1867 | info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; | |
1868 | ||
1869 | btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); | |
1870 | btrfs_set_backup_tree_root_gen(root_backup, | |
1871 | btrfs_header_generation(info->tree_root->node)); | |
1872 | ||
1873 | btrfs_set_backup_tree_root_level(root_backup, | |
1874 | btrfs_header_level(info->tree_root->node)); | |
1875 | ||
1876 | btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); | |
1877 | btrfs_set_backup_chunk_root_gen(root_backup, | |
1878 | btrfs_header_generation(info->chunk_root->node)); | |
1879 | btrfs_set_backup_chunk_root_level(root_backup, | |
1880 | btrfs_header_level(info->chunk_root->node)); | |
1881 | ||
1882 | btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); | |
1883 | btrfs_set_backup_extent_root_gen(root_backup, | |
1884 | btrfs_header_generation(info->extent_root->node)); | |
1885 | btrfs_set_backup_extent_root_level(root_backup, | |
1886 | btrfs_header_level(info->extent_root->node)); | |
1887 | ||
7c7e82a7 CM |
1888 | /* |
1889 | * we might commit during log recovery, which happens before we set | |
1890 | * the fs_root. Make sure it is valid before we fill it in. | |
1891 | */ | |
1892 | if (info->fs_root && info->fs_root->node) { | |
1893 | btrfs_set_backup_fs_root(root_backup, | |
1894 | info->fs_root->node->start); | |
1895 | btrfs_set_backup_fs_root_gen(root_backup, | |
af31f5e5 | 1896 | btrfs_header_generation(info->fs_root->node)); |
7c7e82a7 | 1897 | btrfs_set_backup_fs_root_level(root_backup, |
af31f5e5 | 1898 | btrfs_header_level(info->fs_root->node)); |
7c7e82a7 | 1899 | } |
af31f5e5 CM |
1900 | |
1901 | btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); | |
1902 | btrfs_set_backup_dev_root_gen(root_backup, | |
1903 | btrfs_header_generation(info->dev_root->node)); | |
1904 | btrfs_set_backup_dev_root_level(root_backup, | |
1905 | btrfs_header_level(info->dev_root->node)); | |
1906 | ||
1907 | btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); | |
1908 | btrfs_set_backup_csum_root_gen(root_backup, | |
1909 | btrfs_header_generation(info->csum_root->node)); | |
1910 | btrfs_set_backup_csum_root_level(root_backup, | |
1911 | btrfs_header_level(info->csum_root->node)); | |
1912 | ||
1913 | btrfs_set_backup_total_bytes(root_backup, | |
1914 | btrfs_super_total_bytes(info->super_copy)); | |
1915 | btrfs_set_backup_bytes_used(root_backup, | |
1916 | btrfs_super_bytes_used(info->super_copy)); | |
1917 | btrfs_set_backup_num_devices(root_backup, | |
1918 | btrfs_super_num_devices(info->super_copy)); | |
1919 | ||
1920 | /* | |
1921 | * if we don't copy this out to the super_copy, it won't get remembered | |
1922 | * for the next commit | |
1923 | */ | |
1924 | memcpy(&info->super_copy->super_roots, | |
1925 | &info->super_for_commit->super_roots, | |
1926 | sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); | |
1927 | } | |
1928 | ||
1929 | /* | |
1930 | * this copies info out of the root backup array and back into | |
1931 | * the in-memory super block. It is meant to help iterate through | |
1932 | * the array, so you send it the number of backups you've already | |
1933 | * tried and the last backup index you used. | |
1934 | * | |
1935 | * this returns -1 when it has tried all the backups | |
1936 | */ | |
1937 | static noinline int next_root_backup(struct btrfs_fs_info *info, | |
1938 | struct btrfs_super_block *super, | |
1939 | int *num_backups_tried, int *backup_index) | |
1940 | { | |
1941 | struct btrfs_root_backup *root_backup; | |
1942 | int newest = *backup_index; | |
1943 | ||
1944 | if (*num_backups_tried == 0) { | |
1945 | u64 gen = btrfs_super_generation(super); | |
1946 | ||
1947 | newest = find_newest_super_backup(info, gen); | |
1948 | if (newest == -1) | |
1949 | return -1; | |
1950 | ||
1951 | *backup_index = newest; | |
1952 | *num_backups_tried = 1; | |
1953 | } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { | |
1954 | /* we've tried all the backups, all done */ | |
1955 | return -1; | |
1956 | } else { | |
1957 | /* jump to the next oldest backup */ | |
1958 | newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % | |
1959 | BTRFS_NUM_BACKUP_ROOTS; | |
1960 | *backup_index = newest; | |
1961 | *num_backups_tried += 1; | |
1962 | } | |
1963 | root_backup = super->super_roots + newest; | |
1964 | ||
1965 | btrfs_set_super_generation(super, | |
1966 | btrfs_backup_tree_root_gen(root_backup)); | |
1967 | btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); | |
1968 | btrfs_set_super_root_level(super, | |
1969 | btrfs_backup_tree_root_level(root_backup)); | |
1970 | btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); | |
1971 | ||
1972 | /* | |
1973 | * fixme: the total bytes and num_devices need to match or we should | |
1974 | * need a fsck | |
1975 | */ | |
1976 | btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); | |
1977 | btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); | |
1978 | return 0; | |
1979 | } | |
1980 | ||
7abadb64 LB |
1981 | /* helper to cleanup workers */ |
1982 | static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) | |
1983 | { | |
dc6e3209 | 1984 | btrfs_destroy_workqueue(fs_info->fixup_workers); |
afe3d242 | 1985 | btrfs_destroy_workqueue(fs_info->delalloc_workers); |
5cdc7ad3 | 1986 | btrfs_destroy_workqueue(fs_info->workers); |
fccb5d86 | 1987 | btrfs_destroy_workqueue(fs_info->endio_workers); |
fccb5d86 | 1988 | btrfs_destroy_workqueue(fs_info->endio_raid56_workers); |
8b110e39 | 1989 | btrfs_destroy_workqueue(fs_info->endio_repair_workers); |
d05a33ac | 1990 | btrfs_destroy_workqueue(fs_info->rmw_workers); |
fccb5d86 QW |
1991 | btrfs_destroy_workqueue(fs_info->endio_write_workers); |
1992 | btrfs_destroy_workqueue(fs_info->endio_freespace_worker); | |
a8c93d4e | 1993 | btrfs_destroy_workqueue(fs_info->submit_workers); |
5b3bc44e | 1994 | btrfs_destroy_workqueue(fs_info->delayed_workers); |
e66f0bb1 | 1995 | btrfs_destroy_workqueue(fs_info->caching_workers); |
736cfa15 | 1996 | btrfs_destroy_workqueue(fs_info->readahead_workers); |
a44903ab | 1997 | btrfs_destroy_workqueue(fs_info->flush_workers); |
fc97fab0 | 1998 | btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); |
a79b7d4b | 1999 | btrfs_destroy_workqueue(fs_info->extent_workers); |
a9b9477d FM |
2000 | /* |
2001 | * Now that all other work queues are destroyed, we can safely destroy | |
2002 | * the queues used for metadata I/O, since tasks from those other work | |
2003 | * queues can do metadata I/O operations. | |
2004 | */ | |
2005 | btrfs_destroy_workqueue(fs_info->endio_meta_workers); | |
2006 | btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); | |
7abadb64 LB |
2007 | } |
2008 | ||
2e9f5954 R |
2009 | static void free_root_extent_buffers(struct btrfs_root *root) |
2010 | { | |
2011 | if (root) { | |
2012 | free_extent_buffer(root->node); | |
2013 | free_extent_buffer(root->commit_root); | |
2014 | root->node = NULL; | |
2015 | root->commit_root = NULL; | |
2016 | } | |
2017 | } | |
2018 | ||
af31f5e5 CM |
2019 | /* helper to cleanup tree roots */ |
2020 | static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) | |
2021 | { | |
2e9f5954 | 2022 | free_root_extent_buffers(info->tree_root); |
655b09fe | 2023 | |
2e9f5954 R |
2024 | free_root_extent_buffers(info->dev_root); |
2025 | free_root_extent_buffers(info->extent_root); | |
2026 | free_root_extent_buffers(info->csum_root); | |
2027 | free_root_extent_buffers(info->quota_root); | |
2028 | free_root_extent_buffers(info->uuid_root); | |
2029 | if (chunk_root) | |
2030 | free_root_extent_buffers(info->chunk_root); | |
70f6d82e | 2031 | free_root_extent_buffers(info->free_space_root); |
af31f5e5 CM |
2032 | } |
2033 | ||
faa2dbf0 | 2034 | void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) |
171f6537 JB |
2035 | { |
2036 | int ret; | |
2037 | struct btrfs_root *gang[8]; | |
2038 | int i; | |
2039 | ||
2040 | while (!list_empty(&fs_info->dead_roots)) { | |
2041 | gang[0] = list_entry(fs_info->dead_roots.next, | |
2042 | struct btrfs_root, root_list); | |
2043 | list_del(&gang[0]->root_list); | |
2044 | ||
27cdeb70 | 2045 | if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { |
cb517eab | 2046 | btrfs_drop_and_free_fs_root(fs_info, gang[0]); |
171f6537 JB |
2047 | } else { |
2048 | free_extent_buffer(gang[0]->node); | |
2049 | free_extent_buffer(gang[0]->commit_root); | |
b0feb9d9 | 2050 | btrfs_put_fs_root(gang[0]); |
171f6537 JB |
2051 | } |
2052 | } | |
2053 | ||
2054 | while (1) { | |
2055 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, | |
2056 | (void **)gang, 0, | |
2057 | ARRAY_SIZE(gang)); | |
2058 | if (!ret) | |
2059 | break; | |
2060 | for (i = 0; i < ret; i++) | |
cb517eab | 2061 | btrfs_drop_and_free_fs_root(fs_info, gang[i]); |
171f6537 | 2062 | } |
1a4319cc LB |
2063 | |
2064 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { | |
2065 | btrfs_free_log_root_tree(NULL, fs_info); | |
2ff7e61e | 2066 | btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
1a4319cc | 2067 | } |
171f6537 | 2068 | } |
af31f5e5 | 2069 | |
638aa7ed ES |
2070 | static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) |
2071 | { | |
2072 | mutex_init(&fs_info->scrub_lock); | |
2073 | atomic_set(&fs_info->scrubs_running, 0); | |
2074 | atomic_set(&fs_info->scrub_pause_req, 0); | |
2075 | atomic_set(&fs_info->scrubs_paused, 0); | |
2076 | atomic_set(&fs_info->scrub_cancel_req, 0); | |
2077 | init_waitqueue_head(&fs_info->scrub_pause_wait); | |
2078 | fs_info->scrub_workers_refcnt = 0; | |
2079 | } | |
2080 | ||
779a65a4 ES |
2081 | static void btrfs_init_balance(struct btrfs_fs_info *fs_info) |
2082 | { | |
2083 | spin_lock_init(&fs_info->balance_lock); | |
2084 | mutex_init(&fs_info->balance_mutex); | |
779a65a4 ES |
2085 | atomic_set(&fs_info->balance_pause_req, 0); |
2086 | atomic_set(&fs_info->balance_cancel_req, 0); | |
2087 | fs_info->balance_ctl = NULL; | |
2088 | init_waitqueue_head(&fs_info->balance_wait_q); | |
2089 | } | |
2090 | ||
6bccf3ab | 2091 | static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) |
f37938e0 | 2092 | { |
2ff7e61e JM |
2093 | struct inode *inode = fs_info->btree_inode; |
2094 | ||
2095 | inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; | |
2096 | set_nlink(inode, 1); | |
f37938e0 ES |
2097 | /* |
2098 | * we set the i_size on the btree inode to the max possible int. | |
2099 | * the real end of the address space is determined by all of | |
2100 | * the devices in the system | |
2101 | */ | |
2ff7e61e JM |
2102 | inode->i_size = OFFSET_MAX; |
2103 | inode->i_mapping->a_ops = &btree_aops; | |
f37938e0 | 2104 | |
2ff7e61e | 2105 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
c6100a4b | 2106 | extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode); |
2ff7e61e JM |
2107 | BTRFS_I(inode)->io_tree.track_uptodate = 0; |
2108 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree); | |
f37938e0 | 2109 | |
2ff7e61e | 2110 | BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; |
f37938e0 | 2111 | |
2ff7e61e JM |
2112 | BTRFS_I(inode)->root = fs_info->tree_root; |
2113 | memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); | |
2114 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); | |
2115 | btrfs_insert_inode_hash(inode); | |
f37938e0 ES |
2116 | } |
2117 | ||
ad618368 ES |
2118 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) |
2119 | { | |
ad618368 | 2120 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); |
73beece9 | 2121 | rwlock_init(&fs_info->dev_replace.lock); |
73beece9 | 2122 | atomic_set(&fs_info->dev_replace.blocking_readers, 0); |
7f8d236a | 2123 | init_waitqueue_head(&fs_info->dev_replace.replace_wait); |
73beece9 | 2124 | init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); |
ad618368 ES |
2125 | } |
2126 | ||
f9e92e40 ES |
2127 | static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) |
2128 | { | |
2129 | spin_lock_init(&fs_info->qgroup_lock); | |
2130 | mutex_init(&fs_info->qgroup_ioctl_lock); | |
2131 | fs_info->qgroup_tree = RB_ROOT; | |
2132 | fs_info->qgroup_op_tree = RB_ROOT; | |
2133 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); | |
2134 | fs_info->qgroup_seq = 1; | |
f9e92e40 | 2135 | fs_info->qgroup_ulist = NULL; |
d2c609b8 | 2136 | fs_info->qgroup_rescan_running = false; |
f9e92e40 ES |
2137 | mutex_init(&fs_info->qgroup_rescan_lock); |
2138 | } | |
2139 | ||
2a458198 ES |
2140 | static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, |
2141 | struct btrfs_fs_devices *fs_devices) | |
2142 | { | |
f7b885be | 2143 | u32 max_active = fs_info->thread_pool_size; |
6f011058 | 2144 | unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; |
2a458198 ES |
2145 | |
2146 | fs_info->workers = | |
cb001095 JM |
2147 | btrfs_alloc_workqueue(fs_info, "worker", |
2148 | flags | WQ_HIGHPRI, max_active, 16); | |
2a458198 ES |
2149 | |
2150 | fs_info->delalloc_workers = | |
cb001095 JM |
2151 | btrfs_alloc_workqueue(fs_info, "delalloc", |
2152 | flags, max_active, 2); | |
2a458198 ES |
2153 | |
2154 | fs_info->flush_workers = | |
cb001095 JM |
2155 | btrfs_alloc_workqueue(fs_info, "flush_delalloc", |
2156 | flags, max_active, 0); | |
2a458198 ES |
2157 | |
2158 | fs_info->caching_workers = | |
cb001095 | 2159 | btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); |
2a458198 ES |
2160 | |
2161 | /* | |
2162 | * a higher idle thresh on the submit workers makes it much more | |
2163 | * likely that bios will be send down in a sane order to the | |
2164 | * devices | |
2165 | */ | |
2166 | fs_info->submit_workers = | |
cb001095 | 2167 | btrfs_alloc_workqueue(fs_info, "submit", flags, |
2a458198 ES |
2168 | min_t(u64, fs_devices->num_devices, |
2169 | max_active), 64); | |
2170 | ||
2171 | fs_info->fixup_workers = | |
cb001095 | 2172 | btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); |
2a458198 ES |
2173 | |
2174 | /* | |
2175 | * endios are largely parallel and should have a very | |
2176 | * low idle thresh | |
2177 | */ | |
2178 | fs_info->endio_workers = | |
cb001095 | 2179 | btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); |
2a458198 | 2180 | fs_info->endio_meta_workers = |
cb001095 JM |
2181 | btrfs_alloc_workqueue(fs_info, "endio-meta", flags, |
2182 | max_active, 4); | |
2a458198 | 2183 | fs_info->endio_meta_write_workers = |
cb001095 JM |
2184 | btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, |
2185 | max_active, 2); | |
2a458198 | 2186 | fs_info->endio_raid56_workers = |
cb001095 JM |
2187 | btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, |
2188 | max_active, 4); | |
2a458198 | 2189 | fs_info->endio_repair_workers = |
cb001095 | 2190 | btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); |
2a458198 | 2191 | fs_info->rmw_workers = |
cb001095 | 2192 | btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); |
2a458198 | 2193 | fs_info->endio_write_workers = |
cb001095 JM |
2194 | btrfs_alloc_workqueue(fs_info, "endio-write", flags, |
2195 | max_active, 2); | |
2a458198 | 2196 | fs_info->endio_freespace_worker = |
cb001095 JM |
2197 | btrfs_alloc_workqueue(fs_info, "freespace-write", flags, |
2198 | max_active, 0); | |
2a458198 | 2199 | fs_info->delayed_workers = |
cb001095 JM |
2200 | btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, |
2201 | max_active, 0); | |
2a458198 | 2202 | fs_info->readahead_workers = |
cb001095 JM |
2203 | btrfs_alloc_workqueue(fs_info, "readahead", flags, |
2204 | max_active, 2); | |
2a458198 | 2205 | fs_info->qgroup_rescan_workers = |
cb001095 | 2206 | btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); |
2a458198 | 2207 | fs_info->extent_workers = |
cb001095 | 2208 | btrfs_alloc_workqueue(fs_info, "extent-refs", flags, |
2a458198 ES |
2209 | min_t(u64, fs_devices->num_devices, |
2210 | max_active), 8); | |
2211 | ||
2212 | if (!(fs_info->workers && fs_info->delalloc_workers && | |
2213 | fs_info->submit_workers && fs_info->flush_workers && | |
2214 | fs_info->endio_workers && fs_info->endio_meta_workers && | |
2215 | fs_info->endio_meta_write_workers && | |
2216 | fs_info->endio_repair_workers && | |
2217 | fs_info->endio_write_workers && fs_info->endio_raid56_workers && | |
2218 | fs_info->endio_freespace_worker && fs_info->rmw_workers && | |
2219 | fs_info->caching_workers && fs_info->readahead_workers && | |
2220 | fs_info->fixup_workers && fs_info->delayed_workers && | |
2221 | fs_info->extent_workers && | |
2222 | fs_info->qgroup_rescan_workers)) { | |
2223 | return -ENOMEM; | |
2224 | } | |
2225 | ||
2226 | return 0; | |
2227 | } | |
2228 | ||
63443bf5 ES |
2229 | static int btrfs_replay_log(struct btrfs_fs_info *fs_info, |
2230 | struct btrfs_fs_devices *fs_devices) | |
2231 | { | |
2232 | int ret; | |
63443bf5 ES |
2233 | struct btrfs_root *log_tree_root; |
2234 | struct btrfs_super_block *disk_super = fs_info->super_copy; | |
2235 | u64 bytenr = btrfs_super_log_root(disk_super); | |
581c1760 | 2236 | int level = btrfs_super_log_root_level(disk_super); |
63443bf5 ES |
2237 | |
2238 | if (fs_devices->rw_devices == 0) { | |
f14d104d | 2239 | btrfs_warn(fs_info, "log replay required on RO media"); |
63443bf5 ES |
2240 | return -EIO; |
2241 | } | |
2242 | ||
74e4d827 | 2243 | log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
63443bf5 ES |
2244 | if (!log_tree_root) |
2245 | return -ENOMEM; | |
2246 | ||
da17066c | 2247 | __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
63443bf5 | 2248 | |
2ff7e61e | 2249 | log_tree_root->node = read_tree_block(fs_info, bytenr, |
581c1760 QW |
2250 | fs_info->generation + 1, |
2251 | level, NULL); | |
64c043de | 2252 | if (IS_ERR(log_tree_root->node)) { |
f14d104d | 2253 | btrfs_warn(fs_info, "failed to read log tree"); |
0eeff236 | 2254 | ret = PTR_ERR(log_tree_root->node); |
64c043de | 2255 | kfree(log_tree_root); |
0eeff236 | 2256 | return ret; |
64c043de | 2257 | } else if (!extent_buffer_uptodate(log_tree_root->node)) { |
f14d104d | 2258 | btrfs_err(fs_info, "failed to read log tree"); |
63443bf5 ES |
2259 | free_extent_buffer(log_tree_root->node); |
2260 | kfree(log_tree_root); | |
2261 | return -EIO; | |
2262 | } | |
2263 | /* returns with log_tree_root freed on success */ | |
2264 | ret = btrfs_recover_log_trees(log_tree_root); | |
2265 | if (ret) { | |
0b246afa JM |
2266 | btrfs_handle_fs_error(fs_info, ret, |
2267 | "Failed to recover log tree"); | |
63443bf5 ES |
2268 | free_extent_buffer(log_tree_root->node); |
2269 | kfree(log_tree_root); | |
2270 | return ret; | |
2271 | } | |
2272 | ||
bc98a42c | 2273 | if (sb_rdonly(fs_info->sb)) { |
6bccf3ab | 2274 | ret = btrfs_commit_super(fs_info); |
63443bf5 ES |
2275 | if (ret) |
2276 | return ret; | |
2277 | } | |
2278 | ||
2279 | return 0; | |
2280 | } | |
2281 | ||
6bccf3ab | 2282 | static int btrfs_read_roots(struct btrfs_fs_info *fs_info) |
4bbcaa64 | 2283 | { |
6bccf3ab | 2284 | struct btrfs_root *tree_root = fs_info->tree_root; |
a4f3d2c4 | 2285 | struct btrfs_root *root; |
4bbcaa64 ES |
2286 | struct btrfs_key location; |
2287 | int ret; | |
2288 | ||
6bccf3ab JM |
2289 | BUG_ON(!fs_info->tree_root); |
2290 | ||
4bbcaa64 ES |
2291 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; |
2292 | location.type = BTRFS_ROOT_ITEM_KEY; | |
2293 | location.offset = 0; | |
2294 | ||
a4f3d2c4 | 2295 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2296 | if (IS_ERR(root)) { |
2297 | ret = PTR_ERR(root); | |
2298 | goto out; | |
2299 | } | |
a4f3d2c4 DS |
2300 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2301 | fs_info->extent_root = root; | |
4bbcaa64 ES |
2302 | |
2303 | location.objectid = BTRFS_DEV_TREE_OBJECTID; | |
a4f3d2c4 | 2304 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2305 | if (IS_ERR(root)) { |
2306 | ret = PTR_ERR(root); | |
2307 | goto out; | |
2308 | } | |
a4f3d2c4 DS |
2309 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2310 | fs_info->dev_root = root; | |
4bbcaa64 ES |
2311 | btrfs_init_devices_late(fs_info); |
2312 | ||
2313 | location.objectid = BTRFS_CSUM_TREE_OBJECTID; | |
a4f3d2c4 | 2314 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2315 | if (IS_ERR(root)) { |
2316 | ret = PTR_ERR(root); | |
2317 | goto out; | |
2318 | } | |
a4f3d2c4 DS |
2319 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2320 | fs_info->csum_root = root; | |
4bbcaa64 ES |
2321 | |
2322 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; | |
a4f3d2c4 DS |
2323 | root = btrfs_read_tree_root(tree_root, &location); |
2324 | if (!IS_ERR(root)) { | |
2325 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); | |
afcdd129 | 2326 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
a4f3d2c4 | 2327 | fs_info->quota_root = root; |
4bbcaa64 ES |
2328 | } |
2329 | ||
2330 | location.objectid = BTRFS_UUID_TREE_OBJECTID; | |
a4f3d2c4 DS |
2331 | root = btrfs_read_tree_root(tree_root, &location); |
2332 | if (IS_ERR(root)) { | |
2333 | ret = PTR_ERR(root); | |
4bbcaa64 | 2334 | if (ret != -ENOENT) |
f50f4353 | 2335 | goto out; |
4bbcaa64 | 2336 | } else { |
a4f3d2c4 DS |
2337 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2338 | fs_info->uuid_root = root; | |
4bbcaa64 ES |
2339 | } |
2340 | ||
70f6d82e OS |
2341 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
2342 | location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; | |
2343 | root = btrfs_read_tree_root(tree_root, &location); | |
f50f4353 LB |
2344 | if (IS_ERR(root)) { |
2345 | ret = PTR_ERR(root); | |
2346 | goto out; | |
2347 | } | |
70f6d82e OS |
2348 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2349 | fs_info->free_space_root = root; | |
2350 | } | |
2351 | ||
4bbcaa64 | 2352 | return 0; |
f50f4353 LB |
2353 | out: |
2354 | btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", | |
2355 | location.objectid, ret); | |
2356 | return ret; | |
4bbcaa64 ES |
2357 | } |
2358 | ||
069ec957 QW |
2359 | /* |
2360 | * Real super block validation | |
2361 | * NOTE: super csum type and incompat features will not be checked here. | |
2362 | * | |
2363 | * @sb: super block to check | |
2364 | * @mirror_num: the super block number to check its bytenr: | |
2365 | * 0 the primary (1st) sb | |
2366 | * 1, 2 2nd and 3rd backup copy | |
2367 | * -1 skip bytenr check | |
2368 | */ | |
2369 | static int validate_super(struct btrfs_fs_info *fs_info, | |
2370 | struct btrfs_super_block *sb, int mirror_num) | |
21a852b0 | 2371 | { |
21a852b0 QW |
2372 | u64 nodesize = btrfs_super_nodesize(sb); |
2373 | u64 sectorsize = btrfs_super_sectorsize(sb); | |
2374 | int ret = 0; | |
2375 | ||
2376 | if (btrfs_super_magic(sb) != BTRFS_MAGIC) { | |
2377 | btrfs_err(fs_info, "no valid FS found"); | |
2378 | ret = -EINVAL; | |
2379 | } | |
2380 | if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) { | |
2381 | btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu", | |
2382 | btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); | |
2383 | ret = -EINVAL; | |
2384 | } | |
2385 | if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2386 | btrfs_err(fs_info, "tree_root level too big: %d >= %d", | |
2387 | btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); | |
2388 | ret = -EINVAL; | |
2389 | } | |
2390 | if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2391 | btrfs_err(fs_info, "chunk_root level too big: %d >= %d", | |
2392 | btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); | |
2393 | ret = -EINVAL; | |
2394 | } | |
2395 | if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2396 | btrfs_err(fs_info, "log_root level too big: %d >= %d", | |
2397 | btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); | |
2398 | ret = -EINVAL; | |
2399 | } | |
2400 | ||
2401 | /* | |
2402 | * Check sectorsize and nodesize first, other check will need it. | |
2403 | * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. | |
2404 | */ | |
2405 | if (!is_power_of_2(sectorsize) || sectorsize < 4096 || | |
2406 | sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { | |
2407 | btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); | |
2408 | ret = -EINVAL; | |
2409 | } | |
2410 | /* Only PAGE SIZE is supported yet */ | |
2411 | if (sectorsize != PAGE_SIZE) { | |
2412 | btrfs_err(fs_info, | |
2413 | "sectorsize %llu not supported yet, only support %lu", | |
2414 | sectorsize, PAGE_SIZE); | |
2415 | ret = -EINVAL; | |
2416 | } | |
2417 | if (!is_power_of_2(nodesize) || nodesize < sectorsize || | |
2418 | nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { | |
2419 | btrfs_err(fs_info, "invalid nodesize %llu", nodesize); | |
2420 | ret = -EINVAL; | |
2421 | } | |
2422 | if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { | |
2423 | btrfs_err(fs_info, "invalid leafsize %u, should be %llu", | |
2424 | le32_to_cpu(sb->__unused_leafsize), nodesize); | |
2425 | ret = -EINVAL; | |
2426 | } | |
2427 | ||
2428 | /* Root alignment check */ | |
2429 | if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { | |
2430 | btrfs_warn(fs_info, "tree_root block unaligned: %llu", | |
2431 | btrfs_super_root(sb)); | |
2432 | ret = -EINVAL; | |
2433 | } | |
2434 | if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { | |
2435 | btrfs_warn(fs_info, "chunk_root block unaligned: %llu", | |
2436 | btrfs_super_chunk_root(sb)); | |
2437 | ret = -EINVAL; | |
2438 | } | |
2439 | if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { | |
2440 | btrfs_warn(fs_info, "log_root block unaligned: %llu", | |
2441 | btrfs_super_log_root(sb)); | |
2442 | ret = -EINVAL; | |
2443 | } | |
2444 | ||
2445 | if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) { | |
2446 | btrfs_err(fs_info, | |
2447 | "dev_item UUID does not match fsid: %pU != %pU", | |
2448 | fs_info->fsid, sb->dev_item.fsid); | |
2449 | ret = -EINVAL; | |
2450 | } | |
2451 | ||
2452 | /* | |
2453 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are | |
2454 | * done later | |
2455 | */ | |
2456 | if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { | |
2457 | btrfs_err(fs_info, "bytes_used is too small %llu", | |
2458 | btrfs_super_bytes_used(sb)); | |
2459 | ret = -EINVAL; | |
2460 | } | |
2461 | if (!is_power_of_2(btrfs_super_stripesize(sb))) { | |
2462 | btrfs_err(fs_info, "invalid stripesize %u", | |
2463 | btrfs_super_stripesize(sb)); | |
2464 | ret = -EINVAL; | |
2465 | } | |
2466 | if (btrfs_super_num_devices(sb) > (1UL << 31)) | |
2467 | btrfs_warn(fs_info, "suspicious number of devices: %llu", | |
2468 | btrfs_super_num_devices(sb)); | |
2469 | if (btrfs_super_num_devices(sb) == 0) { | |
2470 | btrfs_err(fs_info, "number of devices is 0"); | |
2471 | ret = -EINVAL; | |
2472 | } | |
2473 | ||
069ec957 QW |
2474 | if (mirror_num >= 0 && |
2475 | btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) { | |
21a852b0 QW |
2476 | btrfs_err(fs_info, "super offset mismatch %llu != %u", |
2477 | btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); | |
2478 | ret = -EINVAL; | |
2479 | } | |
2480 | ||
2481 | /* | |
2482 | * Obvious sys_chunk_array corruptions, it must hold at least one key | |
2483 | * and one chunk | |
2484 | */ | |
2485 | if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { | |
2486 | btrfs_err(fs_info, "system chunk array too big %u > %u", | |
2487 | btrfs_super_sys_array_size(sb), | |
2488 | BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); | |
2489 | ret = -EINVAL; | |
2490 | } | |
2491 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) | |
2492 | + sizeof(struct btrfs_chunk)) { | |
2493 | btrfs_err(fs_info, "system chunk array too small %u < %zu", | |
2494 | btrfs_super_sys_array_size(sb), | |
2495 | sizeof(struct btrfs_disk_key) | |
2496 | + sizeof(struct btrfs_chunk)); | |
2497 | ret = -EINVAL; | |
2498 | } | |
2499 | ||
2500 | /* | |
2501 | * The generation is a global counter, we'll trust it more than the others | |
2502 | * but it's still possible that it's the one that's wrong. | |
2503 | */ | |
2504 | if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) | |
2505 | btrfs_warn(fs_info, | |
2506 | "suspicious: generation < chunk_root_generation: %llu < %llu", | |
2507 | btrfs_super_generation(sb), | |
2508 | btrfs_super_chunk_root_generation(sb)); | |
2509 | if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) | |
2510 | && btrfs_super_cache_generation(sb) != (u64)-1) | |
2511 | btrfs_warn(fs_info, | |
2512 | "suspicious: generation < cache_generation: %llu < %llu", | |
2513 | btrfs_super_generation(sb), | |
2514 | btrfs_super_cache_generation(sb)); | |
2515 | ||
2516 | return ret; | |
2517 | } | |
2518 | ||
069ec957 QW |
2519 | /* |
2520 | * Validation of super block at mount time. | |
2521 | * Some checks already done early at mount time, like csum type and incompat | |
2522 | * flags will be skipped. | |
2523 | */ | |
2524 | static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) | |
2525 | { | |
2526 | return validate_super(fs_info, fs_info->super_copy, 0); | |
2527 | } | |
2528 | ||
75cb857d QW |
2529 | /* |
2530 | * Validation of super block at write time. | |
2531 | * Some checks like bytenr check will be skipped as their values will be | |
2532 | * overwritten soon. | |
2533 | * Extra checks like csum type and incompat flags will be done here. | |
2534 | */ | |
2535 | static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, | |
2536 | struct btrfs_super_block *sb) | |
2537 | { | |
2538 | int ret; | |
2539 | ||
2540 | ret = validate_super(fs_info, sb, -1); | |
2541 | if (ret < 0) | |
2542 | goto out; | |
2543 | if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) { | |
2544 | ret = -EUCLEAN; | |
2545 | btrfs_err(fs_info, "invalid csum type, has %u want %u", | |
2546 | btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); | |
2547 | goto out; | |
2548 | } | |
2549 | if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { | |
2550 | ret = -EUCLEAN; | |
2551 | btrfs_err(fs_info, | |
2552 | "invalid incompat flags, has 0x%llx valid mask 0x%llx", | |
2553 | btrfs_super_incompat_flags(sb), | |
2554 | (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); | |
2555 | goto out; | |
2556 | } | |
2557 | out: | |
2558 | if (ret < 0) | |
2559 | btrfs_err(fs_info, | |
2560 | "super block corruption detected before writing it to disk"); | |
2561 | return ret; | |
2562 | } | |
2563 | ||
ad2b2c80 AV |
2564 | int open_ctree(struct super_block *sb, |
2565 | struct btrfs_fs_devices *fs_devices, | |
2566 | char *options) | |
2e635a27 | 2567 | { |
db94535d CM |
2568 | u32 sectorsize; |
2569 | u32 nodesize; | |
87ee04eb | 2570 | u32 stripesize; |
84234f3a | 2571 | u64 generation; |
f2b636e8 | 2572 | u64 features; |
3de4586c | 2573 | struct btrfs_key location; |
a061fc8d | 2574 | struct buffer_head *bh; |
4d34b278 | 2575 | struct btrfs_super_block *disk_super; |
815745cf | 2576 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
f84a8bd6 | 2577 | struct btrfs_root *tree_root; |
4d34b278 | 2578 | struct btrfs_root *chunk_root; |
eb60ceac | 2579 | int ret; |
e58ca020 | 2580 | int err = -EINVAL; |
af31f5e5 CM |
2581 | int num_backups_tried = 0; |
2582 | int backup_index = 0; | |
6675df31 | 2583 | int clear_free_space_tree = 0; |
581c1760 | 2584 | int level; |
4543df7e | 2585 | |
74e4d827 DS |
2586 | tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
2587 | chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); | |
cb517eab | 2588 | if (!tree_root || !chunk_root) { |
39279cc3 CM |
2589 | err = -ENOMEM; |
2590 | goto fail; | |
2591 | } | |
76dda93c YZ |
2592 | |
2593 | ret = init_srcu_struct(&fs_info->subvol_srcu); | |
2594 | if (ret) { | |
2595 | err = ret; | |
2596 | goto fail; | |
2597 | } | |
2598 | ||
908c7f19 | 2599 | ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); |
e2d84521 MX |
2600 | if (ret) { |
2601 | err = ret; | |
9e11ceee | 2602 | goto fail_srcu; |
e2d84521 | 2603 | } |
09cbfeaf | 2604 | fs_info->dirty_metadata_batch = PAGE_SIZE * |
e2d84521 MX |
2605 | (1 + ilog2(nr_cpu_ids)); |
2606 | ||
908c7f19 | 2607 | ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); |
963d678b MX |
2608 | if (ret) { |
2609 | err = ret; | |
2610 | goto fail_dirty_metadata_bytes; | |
2611 | } | |
2612 | ||
7f8d236a DS |
2613 | ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, |
2614 | GFP_KERNEL); | |
c404e0dc MX |
2615 | if (ret) { |
2616 | err = ret; | |
2617 | goto fail_delalloc_bytes; | |
2618 | } | |
2619 | ||
76dda93c | 2620 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
f28491e0 | 2621 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
8fd17795 | 2622 | INIT_LIST_HEAD(&fs_info->trans_list); |
facda1e7 | 2623 | INIT_LIST_HEAD(&fs_info->dead_roots); |
24bbcf04 | 2624 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
eb73c1b7 | 2625 | INIT_LIST_HEAD(&fs_info->delalloc_roots); |
11833d66 | 2626 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
75cb379d JM |
2627 | INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); |
2628 | spin_lock_init(&fs_info->pending_raid_kobjs_lock); | |
eb73c1b7 | 2629 | spin_lock_init(&fs_info->delalloc_root_lock); |
a4abeea4 | 2630 | spin_lock_init(&fs_info->trans_lock); |
76dda93c | 2631 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
24bbcf04 | 2632 | spin_lock_init(&fs_info->delayed_iput_lock); |
4cb5300b | 2633 | spin_lock_init(&fs_info->defrag_inodes_lock); |
f29021b2 | 2634 | spin_lock_init(&fs_info->tree_mod_seq_lock); |
ceda0864 | 2635 | spin_lock_init(&fs_info->super_lock); |
fcebe456 | 2636 | spin_lock_init(&fs_info->qgroup_op_lock); |
f28491e0 | 2637 | spin_lock_init(&fs_info->buffer_lock); |
47ab2a6c | 2638 | spin_lock_init(&fs_info->unused_bgs_lock); |
f29021b2 | 2639 | rwlock_init(&fs_info->tree_mod_log_lock); |
d7c15171 | 2640 | mutex_init(&fs_info->unused_bg_unpin_mutex); |
67c5e7d4 | 2641 | mutex_init(&fs_info->delete_unused_bgs_mutex); |
7585717f | 2642 | mutex_init(&fs_info->reloc_mutex); |
573bfb72 | 2643 | mutex_init(&fs_info->delalloc_root_mutex); |
c2d6cb16 | 2644 | mutex_init(&fs_info->cleaner_delayed_iput_mutex); |
de98ced9 | 2645 | seqlock_init(&fs_info->profiles_lock); |
19c00ddc | 2646 | |
0b86a832 | 2647 | INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); |
6324fbf3 | 2648 | INIT_LIST_HEAD(&fs_info->space_info); |
f29021b2 | 2649 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
47ab2a6c | 2650 | INIT_LIST_HEAD(&fs_info->unused_bgs); |
0b86a832 | 2651 | btrfs_mapping_init(&fs_info->mapping_tree); |
66d8f3dd MX |
2652 | btrfs_init_block_rsv(&fs_info->global_block_rsv, |
2653 | BTRFS_BLOCK_RSV_GLOBAL); | |
66d8f3dd MX |
2654 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); |
2655 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); | |
2656 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); | |
2657 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, | |
2658 | BTRFS_BLOCK_RSV_DELOPS); | |
771ed689 | 2659 | atomic_set(&fs_info->async_delalloc_pages, 0); |
4cb5300b | 2660 | atomic_set(&fs_info->defrag_running, 0); |
fcebe456 | 2661 | atomic_set(&fs_info->qgroup_op_seq, 0); |
2fefd558 | 2662 | atomic_set(&fs_info->reada_works_cnt, 0); |
fc36ed7e | 2663 | atomic64_set(&fs_info->tree_mod_seq, 0); |
e20d96d6 | 2664 | fs_info->sb = sb; |
95ac567a | 2665 | fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; |
9ed74f2d | 2666 | fs_info->metadata_ratio = 0; |
4cb5300b | 2667 | fs_info->defrag_inodes = RB_ROOT; |
a5ed45f8 | 2668 | atomic64_set(&fs_info->free_chunk_space, 0); |
f29021b2 | 2669 | fs_info->tree_mod_log = RB_ROOT; |
8b87dc17 | 2670 | fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; |
f8c269d7 | 2671 | fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ |
90519d66 | 2672 | /* readahead state */ |
d0164adc | 2673 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); |
90519d66 | 2674 | spin_lock_init(&fs_info->reada_lock); |
fd708b81 | 2675 | btrfs_init_ref_verify(fs_info); |
c8b97818 | 2676 | |
b34b086c CM |
2677 | fs_info->thread_pool_size = min_t(unsigned long, |
2678 | num_online_cpus() + 2, 8); | |
0afbaf8c | 2679 | |
199c2a9c MX |
2680 | INIT_LIST_HEAD(&fs_info->ordered_roots); |
2681 | spin_lock_init(&fs_info->ordered_root_lock); | |
69fe2d75 JB |
2682 | |
2683 | fs_info->btree_inode = new_inode(sb); | |
2684 | if (!fs_info->btree_inode) { | |
2685 | err = -ENOMEM; | |
2686 | goto fail_bio_counter; | |
2687 | } | |
2688 | mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); | |
2689 | ||
16cdcec7 | 2690 | fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), |
74e4d827 | 2691 | GFP_KERNEL); |
16cdcec7 MX |
2692 | if (!fs_info->delayed_root) { |
2693 | err = -ENOMEM; | |
2694 | goto fail_iput; | |
2695 | } | |
2696 | btrfs_init_delayed_root(fs_info->delayed_root); | |
3eaa2885 | 2697 | |
638aa7ed | 2698 | btrfs_init_scrub(fs_info); |
21adbd5c SB |
2699 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
2700 | fs_info->check_integrity_print_mask = 0; | |
2701 | #endif | |
779a65a4 | 2702 | btrfs_init_balance(fs_info); |
21c7e756 | 2703 | btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); |
a2de733c | 2704 | |
9f6d2510 DS |
2705 | sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; |
2706 | sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); | |
a061fc8d | 2707 | |
6bccf3ab | 2708 | btrfs_init_btree_inode(fs_info); |
76dda93c | 2709 | |
0f9dd46c | 2710 | spin_lock_init(&fs_info->block_group_cache_lock); |
6bef4d31 | 2711 | fs_info->block_group_cache_tree = RB_ROOT; |
a1897fdd | 2712 | fs_info->first_logical_byte = (u64)-1; |
0f9dd46c | 2713 | |
c6100a4b JB |
2714 | extent_io_tree_init(&fs_info->freed_extents[0], NULL); |
2715 | extent_io_tree_init(&fs_info->freed_extents[1], NULL); | |
11833d66 | 2716 | fs_info->pinned_extents = &fs_info->freed_extents[0]; |
afcdd129 | 2717 | set_bit(BTRFS_FS_BARRIER, &fs_info->flags); |
39279cc3 | 2718 | |
5a3f23d5 | 2719 | mutex_init(&fs_info->ordered_operations_mutex); |
e02119d5 | 2720 | mutex_init(&fs_info->tree_log_mutex); |
925baedd | 2721 | mutex_init(&fs_info->chunk_mutex); |
a74a4b97 CM |
2722 | mutex_init(&fs_info->transaction_kthread_mutex); |
2723 | mutex_init(&fs_info->cleaner_mutex); | |
1bbc621e | 2724 | mutex_init(&fs_info->ro_block_group_mutex); |
9e351cc8 | 2725 | init_rwsem(&fs_info->commit_root_sem); |
c71bf099 | 2726 | init_rwsem(&fs_info->cleanup_work_sem); |
76dda93c | 2727 | init_rwsem(&fs_info->subvol_sem); |
803b2f54 | 2728 | sema_init(&fs_info->uuid_tree_rescan_sem, 1); |
fa9c0d79 | 2729 | |
ad618368 | 2730 | btrfs_init_dev_replace_locks(fs_info); |
f9e92e40 | 2731 | btrfs_init_qgroup(fs_info); |
416ac51d | 2732 | |
fa9c0d79 CM |
2733 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); |
2734 | btrfs_init_free_cluster(&fs_info->data_alloc_cluster); | |
2735 | ||
e6dcd2dc | 2736 | init_waitqueue_head(&fs_info->transaction_throttle); |
f9295749 | 2737 | init_waitqueue_head(&fs_info->transaction_wait); |
bb9c12c9 | 2738 | init_waitqueue_head(&fs_info->transaction_blocked_wait); |
4854ddd0 | 2739 | init_waitqueue_head(&fs_info->async_submit_wait); |
3768f368 | 2740 | |
04216820 FM |
2741 | INIT_LIST_HEAD(&fs_info->pinned_chunks); |
2742 | ||
da17066c JM |
2743 | /* Usable values until the real ones are cached from the superblock */ |
2744 | fs_info->nodesize = 4096; | |
2745 | fs_info->sectorsize = 4096; | |
2746 | fs_info->stripesize = 4096; | |
2747 | ||
53b381b3 DW |
2748 | ret = btrfs_alloc_stripe_hash_table(fs_info); |
2749 | if (ret) { | |
83c8266a | 2750 | err = ret; |
53b381b3 DW |
2751 | goto fail_alloc; |
2752 | } | |
2753 | ||
da17066c | 2754 | __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
7eccb903 | 2755 | |
3c4bb26b | 2756 | invalidate_bdev(fs_devices->latest_bdev); |
1104a885 DS |
2757 | |
2758 | /* | |
2759 | * Read super block and check the signature bytes only | |
2760 | */ | |
a512bbf8 | 2761 | bh = btrfs_read_dev_super(fs_devices->latest_bdev); |
92fc03fb AJ |
2762 | if (IS_ERR(bh)) { |
2763 | err = PTR_ERR(bh); | |
16cdcec7 | 2764 | goto fail_alloc; |
20b45077 | 2765 | } |
39279cc3 | 2766 | |
1104a885 DS |
2767 | /* |
2768 | * We want to check superblock checksum, the type is stored inside. | |
2769 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). | |
2770 | */ | |
ab8d0fc4 | 2771 | if (btrfs_check_super_csum(fs_info, bh->b_data)) { |
05135f59 | 2772 | btrfs_err(fs_info, "superblock checksum mismatch"); |
1104a885 | 2773 | err = -EINVAL; |
b2acdddf | 2774 | brelse(bh); |
1104a885 DS |
2775 | goto fail_alloc; |
2776 | } | |
2777 | ||
2778 | /* | |
2779 | * super_copy is zeroed at allocation time and we never touch the | |
2780 | * following bytes up to INFO_SIZE, the checksum is calculated from | |
2781 | * the whole block of INFO_SIZE | |
2782 | */ | |
6c41761f DS |
2783 | memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); |
2784 | memcpy(fs_info->super_for_commit, fs_info->super_copy, | |
2785 | sizeof(*fs_info->super_for_commit)); | |
a061fc8d | 2786 | brelse(bh); |
5f39d397 | 2787 | |
6c41761f | 2788 | memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); |
0b86a832 | 2789 | |
069ec957 | 2790 | ret = btrfs_validate_mount_super(fs_info); |
1104a885 | 2791 | if (ret) { |
05135f59 | 2792 | btrfs_err(fs_info, "superblock contains fatal errors"); |
1104a885 DS |
2793 | err = -EINVAL; |
2794 | goto fail_alloc; | |
2795 | } | |
2796 | ||
6c41761f | 2797 | disk_super = fs_info->super_copy; |
0f7d52f4 | 2798 | if (!btrfs_super_root(disk_super)) |
16cdcec7 | 2799 | goto fail_alloc; |
0f7d52f4 | 2800 | |
acce952b | 2801 | /* check FS state, whether FS is broken. */ |
87533c47 MX |
2802 | if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) |
2803 | set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); | |
acce952b | 2804 | |
af31f5e5 CM |
2805 | /* |
2806 | * run through our array of backup supers and setup | |
2807 | * our ring pointer to the oldest one | |
2808 | */ | |
2809 | generation = btrfs_super_generation(disk_super); | |
2810 | find_oldest_super_backup(fs_info, generation); | |
2811 | ||
75e7cb7f LB |
2812 | /* |
2813 | * In the long term, we'll store the compression type in the super | |
2814 | * block, and it'll be used for per file compression control. | |
2815 | */ | |
2816 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; | |
2817 | ||
2ff7e61e | 2818 | ret = btrfs_parse_options(fs_info, options, sb->s_flags); |
2b82032c YZ |
2819 | if (ret) { |
2820 | err = ret; | |
16cdcec7 | 2821 | goto fail_alloc; |
2b82032c | 2822 | } |
dfe25020 | 2823 | |
f2b636e8 JB |
2824 | features = btrfs_super_incompat_flags(disk_super) & |
2825 | ~BTRFS_FEATURE_INCOMPAT_SUPP; | |
2826 | if (features) { | |
05135f59 DS |
2827 | btrfs_err(fs_info, |
2828 | "cannot mount because of unsupported optional features (%llx)", | |
2829 | features); | |
f2b636e8 | 2830 | err = -EINVAL; |
16cdcec7 | 2831 | goto fail_alloc; |
f2b636e8 JB |
2832 | } |
2833 | ||
5d4f98a2 | 2834 | features = btrfs_super_incompat_flags(disk_super); |
a6fa6fae | 2835 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
0b246afa | 2836 | if (fs_info->compress_type == BTRFS_COMPRESS_LZO) |
a6fa6fae | 2837 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
5c1aab1d NT |
2838 | else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) |
2839 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; | |
727011e0 | 2840 | |
3173a18f | 2841 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
05135f59 | 2842 | btrfs_info(fs_info, "has skinny extents"); |
3173a18f | 2843 | |
727011e0 CM |
2844 | /* |
2845 | * flag our filesystem as having big metadata blocks if | |
2846 | * they are bigger than the page size | |
2847 | */ | |
09cbfeaf | 2848 | if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
727011e0 | 2849 | if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
05135f59 DS |
2850 | btrfs_info(fs_info, |
2851 | "flagging fs with big metadata feature"); | |
727011e0 CM |
2852 | features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
2853 | } | |
2854 | ||
bc3f116f | 2855 | nodesize = btrfs_super_nodesize(disk_super); |
bc3f116f | 2856 | sectorsize = btrfs_super_sectorsize(disk_super); |
b7f67055 | 2857 | stripesize = sectorsize; |
707e8a07 | 2858 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
963d678b | 2859 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
bc3f116f | 2860 | |
da17066c JM |
2861 | /* Cache block sizes */ |
2862 | fs_info->nodesize = nodesize; | |
2863 | fs_info->sectorsize = sectorsize; | |
2864 | fs_info->stripesize = stripesize; | |
2865 | ||
bc3f116f CM |
2866 | /* |
2867 | * mixed block groups end up with duplicate but slightly offset | |
2868 | * extent buffers for the same range. It leads to corruptions | |
2869 | */ | |
2870 | if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && | |
707e8a07 | 2871 | (sectorsize != nodesize)) { |
05135f59 DS |
2872 | btrfs_err(fs_info, |
2873 | "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", | |
2874 | nodesize, sectorsize); | |
bc3f116f CM |
2875 | goto fail_alloc; |
2876 | } | |
2877 | ||
ceda0864 MX |
2878 | /* |
2879 | * Needn't use the lock because there is no other task which will | |
2880 | * update the flag. | |
2881 | */ | |
a6fa6fae | 2882 | btrfs_set_super_incompat_flags(disk_super, features); |
5d4f98a2 | 2883 | |
f2b636e8 JB |
2884 | features = btrfs_super_compat_ro_flags(disk_super) & |
2885 | ~BTRFS_FEATURE_COMPAT_RO_SUPP; | |
bc98a42c | 2886 | if (!sb_rdonly(sb) && features) { |
05135f59 DS |
2887 | btrfs_err(fs_info, |
2888 | "cannot mount read-write because of unsupported optional features (%llx)", | |
c1c9ff7c | 2889 | features); |
f2b636e8 | 2890 | err = -EINVAL; |
16cdcec7 | 2891 | goto fail_alloc; |
f2b636e8 | 2892 | } |
61d92c32 | 2893 | |
2a458198 ES |
2894 | ret = btrfs_init_workqueues(fs_info, fs_devices); |
2895 | if (ret) { | |
2896 | err = ret; | |
0dc3b84a JB |
2897 | goto fail_sb_buffer; |
2898 | } | |
4543df7e | 2899 | |
9e11ceee JK |
2900 | sb->s_bdi->congested_fn = btrfs_congested_fn; |
2901 | sb->s_bdi->congested_data = fs_info; | |
2902 | sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; | |
d4417e22 | 2903 | sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE; |
9e11ceee JK |
2904 | sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); |
2905 | sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); | |
4575c9cc | 2906 | |
a061fc8d CM |
2907 | sb->s_blocksize = sectorsize; |
2908 | sb->s_blocksize_bits = blksize_bits(sectorsize); | |
ee87cf5e | 2909 | memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE); |
db94535d | 2910 | |
925baedd | 2911 | mutex_lock(&fs_info->chunk_mutex); |
6bccf3ab | 2912 | ret = btrfs_read_sys_array(fs_info); |
925baedd | 2913 | mutex_unlock(&fs_info->chunk_mutex); |
84eed90f | 2914 | if (ret) { |
05135f59 | 2915 | btrfs_err(fs_info, "failed to read the system array: %d", ret); |
5d4f98a2 | 2916 | goto fail_sb_buffer; |
84eed90f | 2917 | } |
0b86a832 | 2918 | |
84234f3a | 2919 | generation = btrfs_super_chunk_root_generation(disk_super); |
581c1760 | 2920 | level = btrfs_super_chunk_root_level(disk_super); |
0b86a832 | 2921 | |
da17066c | 2922 | __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); |
0b86a832 | 2923 | |
2ff7e61e | 2924 | chunk_root->node = read_tree_block(fs_info, |
0b86a832 | 2925 | btrfs_super_chunk_root(disk_super), |
581c1760 | 2926 | generation, level, NULL); |
64c043de LB |
2927 | if (IS_ERR(chunk_root->node) || |
2928 | !extent_buffer_uptodate(chunk_root->node)) { | |
05135f59 | 2929 | btrfs_err(fs_info, "failed to read chunk root"); |
e5fffbac | 2930 | if (!IS_ERR(chunk_root->node)) |
2931 | free_extent_buffer(chunk_root->node); | |
95ab1f64 | 2932 | chunk_root->node = NULL; |
af31f5e5 | 2933 | goto fail_tree_roots; |
83121942 | 2934 | } |
5d4f98a2 YZ |
2935 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); |
2936 | chunk_root->commit_root = btrfs_root_node(chunk_root); | |
0b86a832 | 2937 | |
e17cade2 | 2938 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, |
b308bc2f | 2939 | btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); |
e17cade2 | 2940 | |
5b4aacef | 2941 | ret = btrfs_read_chunk_tree(fs_info); |
2b82032c | 2942 | if (ret) { |
05135f59 | 2943 | btrfs_err(fs_info, "failed to read chunk tree: %d", ret); |
af31f5e5 | 2944 | goto fail_tree_roots; |
2b82032c | 2945 | } |
0b86a832 | 2946 | |
8dabb742 | 2947 | /* |
9b99b115 AJ |
2948 | * Keep the devid that is marked to be the target device for the |
2949 | * device replace procedure | |
8dabb742 | 2950 | */ |
9b99b115 | 2951 | btrfs_free_extra_devids(fs_devices, 0); |
dfe25020 | 2952 | |
a6b0d5c8 | 2953 | if (!fs_devices->latest_bdev) { |
05135f59 | 2954 | btrfs_err(fs_info, "failed to read devices"); |
a6b0d5c8 CM |
2955 | goto fail_tree_roots; |
2956 | } | |
2957 | ||
af31f5e5 | 2958 | retry_root_backup: |
84234f3a | 2959 | generation = btrfs_super_generation(disk_super); |
581c1760 | 2960 | level = btrfs_super_root_level(disk_super); |
0b86a832 | 2961 | |
2ff7e61e | 2962 | tree_root->node = read_tree_block(fs_info, |
db94535d | 2963 | btrfs_super_root(disk_super), |
581c1760 | 2964 | generation, level, NULL); |
64c043de LB |
2965 | if (IS_ERR(tree_root->node) || |
2966 | !extent_buffer_uptodate(tree_root->node)) { | |
05135f59 | 2967 | btrfs_warn(fs_info, "failed to read tree root"); |
e5fffbac | 2968 | if (!IS_ERR(tree_root->node)) |
2969 | free_extent_buffer(tree_root->node); | |
95ab1f64 | 2970 | tree_root->node = NULL; |
af31f5e5 | 2971 | goto recovery_tree_root; |
83121942 | 2972 | } |
af31f5e5 | 2973 | |
5d4f98a2 YZ |
2974 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
2975 | tree_root->commit_root = btrfs_root_node(tree_root); | |
69e9c6c6 | 2976 | btrfs_set_root_refs(&tree_root->root_item, 1); |
db94535d | 2977 | |
f32e48e9 CR |
2978 | mutex_lock(&tree_root->objectid_mutex); |
2979 | ret = btrfs_find_highest_objectid(tree_root, | |
2980 | &tree_root->highest_objectid); | |
2981 | if (ret) { | |
2982 | mutex_unlock(&tree_root->objectid_mutex); | |
2983 | goto recovery_tree_root; | |
2984 | } | |
2985 | ||
2986 | ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); | |
2987 | ||
2988 | mutex_unlock(&tree_root->objectid_mutex); | |
2989 | ||
6bccf3ab | 2990 | ret = btrfs_read_roots(fs_info); |
4bbcaa64 | 2991 | if (ret) |
af31f5e5 | 2992 | goto recovery_tree_root; |
f7a81ea4 | 2993 | |
8929ecfa YZ |
2994 | fs_info->generation = generation; |
2995 | fs_info->last_trans_committed = generation; | |
8929ecfa | 2996 | |
cf90d884 QW |
2997 | ret = btrfs_verify_dev_extents(fs_info); |
2998 | if (ret) { | |
2999 | btrfs_err(fs_info, | |
3000 | "failed to verify dev extents against chunks: %d", | |
3001 | ret); | |
3002 | goto fail_block_groups; | |
3003 | } | |
68310a5e ID |
3004 | ret = btrfs_recover_balance(fs_info); |
3005 | if (ret) { | |
05135f59 | 3006 | btrfs_err(fs_info, "failed to recover balance: %d", ret); |
68310a5e ID |
3007 | goto fail_block_groups; |
3008 | } | |
3009 | ||
733f4fbb SB |
3010 | ret = btrfs_init_dev_stats(fs_info); |
3011 | if (ret) { | |
05135f59 | 3012 | btrfs_err(fs_info, "failed to init dev_stats: %d", ret); |
733f4fbb SB |
3013 | goto fail_block_groups; |
3014 | } | |
3015 | ||
8dabb742 SB |
3016 | ret = btrfs_init_dev_replace(fs_info); |
3017 | if (ret) { | |
05135f59 | 3018 | btrfs_err(fs_info, "failed to init dev_replace: %d", ret); |
8dabb742 SB |
3019 | goto fail_block_groups; |
3020 | } | |
3021 | ||
9b99b115 | 3022 | btrfs_free_extra_devids(fs_devices, 1); |
8dabb742 | 3023 | |
b7c35e81 AJ |
3024 | ret = btrfs_sysfs_add_fsid(fs_devices, NULL); |
3025 | if (ret) { | |
05135f59 DS |
3026 | btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", |
3027 | ret); | |
b7c35e81 AJ |
3028 | goto fail_block_groups; |
3029 | } | |
3030 | ||
3031 | ret = btrfs_sysfs_add_device(fs_devices); | |
3032 | if (ret) { | |
05135f59 DS |
3033 | btrfs_err(fs_info, "failed to init sysfs device interface: %d", |
3034 | ret); | |
b7c35e81 AJ |
3035 | goto fail_fsdev_sysfs; |
3036 | } | |
3037 | ||
96f3136e | 3038 | ret = btrfs_sysfs_add_mounted(fs_info); |
c59021f8 | 3039 | if (ret) { |
05135f59 | 3040 | btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); |
b7c35e81 | 3041 | goto fail_fsdev_sysfs; |
c59021f8 | 3042 | } |
3043 | ||
c59021f8 | 3044 | ret = btrfs_init_space_info(fs_info); |
3045 | if (ret) { | |
05135f59 | 3046 | btrfs_err(fs_info, "failed to initialize space info: %d", ret); |
2365dd3c | 3047 | goto fail_sysfs; |
c59021f8 | 3048 | } |
3049 | ||
5b4aacef | 3050 | ret = btrfs_read_block_groups(fs_info); |
1b1d1f66 | 3051 | if (ret) { |
05135f59 | 3052 | btrfs_err(fs_info, "failed to read block groups: %d", ret); |
2365dd3c | 3053 | goto fail_sysfs; |
1b1d1f66 | 3054 | } |
4330e183 | 3055 | |
6528b99d | 3056 | if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) { |
05135f59 | 3057 | btrfs_warn(fs_info, |
4330e183 | 3058 | "writeable mount is not allowed due to too many missing devices"); |
2365dd3c | 3059 | goto fail_sysfs; |
292fd7fc | 3060 | } |
9078a3e1 | 3061 | |
a74a4b97 CM |
3062 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, |
3063 | "btrfs-cleaner"); | |
57506d50 | 3064 | if (IS_ERR(fs_info->cleaner_kthread)) |
2365dd3c | 3065 | goto fail_sysfs; |
a74a4b97 CM |
3066 | |
3067 | fs_info->transaction_kthread = kthread_run(transaction_kthread, | |
3068 | tree_root, | |
3069 | "btrfs-transaction"); | |
57506d50 | 3070 | if (IS_ERR(fs_info->transaction_kthread)) |
3f157a2f | 3071 | goto fail_cleaner; |
a74a4b97 | 3072 | |
583b7231 | 3073 | if (!btrfs_test_opt(fs_info, NOSSD) && |
c289811c | 3074 | !fs_info->fs_devices->rotating) { |
583b7231 | 3075 | btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); |
c289811c CM |
3076 | } |
3077 | ||
572d9ab7 | 3078 | /* |
01327610 | 3079 | * Mount does not set all options immediately, we can do it now and do |
572d9ab7 DS |
3080 | * not have to wait for transaction commit |
3081 | */ | |
3082 | btrfs_apply_pending_changes(fs_info); | |
3818aea2 | 3083 | |
21adbd5c | 3084 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
0b246afa | 3085 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { |
2ff7e61e | 3086 | ret = btrfsic_mount(fs_info, fs_devices, |
0b246afa | 3087 | btrfs_test_opt(fs_info, |
21adbd5c SB |
3088 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? |
3089 | 1 : 0, | |
3090 | fs_info->check_integrity_print_mask); | |
3091 | if (ret) | |
05135f59 DS |
3092 | btrfs_warn(fs_info, |
3093 | "failed to initialize integrity check module: %d", | |
3094 | ret); | |
21adbd5c SB |
3095 | } |
3096 | #endif | |
bcef60f2 AJ |
3097 | ret = btrfs_read_qgroup_config(fs_info); |
3098 | if (ret) | |
3099 | goto fail_trans_kthread; | |
21adbd5c | 3100 | |
fd708b81 JB |
3101 | if (btrfs_build_ref_tree(fs_info)) |
3102 | btrfs_err(fs_info, "couldn't build ref tree"); | |
3103 | ||
96da0919 QW |
3104 | /* do not make disk changes in broken FS or nologreplay is given */ |
3105 | if (btrfs_super_log_root(disk_super) != 0 && | |
0b246afa | 3106 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
63443bf5 | 3107 | ret = btrfs_replay_log(fs_info, fs_devices); |
79787eaa | 3108 | if (ret) { |
63443bf5 | 3109 | err = ret; |
28c16cbb | 3110 | goto fail_qgroup; |
79787eaa | 3111 | } |
e02119d5 | 3112 | } |
1a40e23b | 3113 | |
6bccf3ab | 3114 | ret = btrfs_find_orphan_roots(fs_info); |
79787eaa | 3115 | if (ret) |
28c16cbb | 3116 | goto fail_qgroup; |
76dda93c | 3117 | |
bc98a42c | 3118 | if (!sb_rdonly(sb)) { |
d68fc57b | 3119 | ret = btrfs_cleanup_fs_roots(fs_info); |
44c44af2 | 3120 | if (ret) |
28c16cbb | 3121 | goto fail_qgroup; |
90c711ab ZB |
3122 | |
3123 | mutex_lock(&fs_info->cleaner_mutex); | |
5d4f98a2 | 3124 | ret = btrfs_recover_relocation(tree_root); |
90c711ab | 3125 | mutex_unlock(&fs_info->cleaner_mutex); |
d7ce5843 | 3126 | if (ret < 0) { |
05135f59 DS |
3127 | btrfs_warn(fs_info, "failed to recover relocation: %d", |
3128 | ret); | |
d7ce5843 | 3129 | err = -EINVAL; |
bcef60f2 | 3130 | goto fail_qgroup; |
d7ce5843 | 3131 | } |
7c2ca468 | 3132 | } |
1a40e23b | 3133 | |
3de4586c CM |
3134 | location.objectid = BTRFS_FS_TREE_OBJECTID; |
3135 | location.type = BTRFS_ROOT_ITEM_KEY; | |
cb517eab | 3136 | location.offset = 0; |
3de4586c | 3137 | |
3de4586c | 3138 | fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); |
3140c9a3 DC |
3139 | if (IS_ERR(fs_info->fs_root)) { |
3140 | err = PTR_ERR(fs_info->fs_root); | |
f50f4353 | 3141 | btrfs_warn(fs_info, "failed to read fs tree: %d", err); |
bcef60f2 | 3142 | goto fail_qgroup; |
3140c9a3 | 3143 | } |
c289811c | 3144 | |
bc98a42c | 3145 | if (sb_rdonly(sb)) |
2b6ba629 | 3146 | return 0; |
59641015 | 3147 | |
f8d468a1 OS |
3148 | if (btrfs_test_opt(fs_info, CLEAR_CACHE) && |
3149 | btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { | |
6675df31 OS |
3150 | clear_free_space_tree = 1; |
3151 | } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && | |
3152 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { | |
3153 | btrfs_warn(fs_info, "free space tree is invalid"); | |
3154 | clear_free_space_tree = 1; | |
3155 | } | |
3156 | ||
3157 | if (clear_free_space_tree) { | |
f8d468a1 OS |
3158 | btrfs_info(fs_info, "clearing free space tree"); |
3159 | ret = btrfs_clear_free_space_tree(fs_info); | |
3160 | if (ret) { | |
3161 | btrfs_warn(fs_info, | |
3162 | "failed to clear free space tree: %d", ret); | |
6bccf3ab | 3163 | close_ctree(fs_info); |
f8d468a1 OS |
3164 | return ret; |
3165 | } | |
3166 | } | |
3167 | ||
0b246afa | 3168 | if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && |
511711af | 3169 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
05135f59 | 3170 | btrfs_info(fs_info, "creating free space tree"); |
511711af CM |
3171 | ret = btrfs_create_free_space_tree(fs_info); |
3172 | if (ret) { | |
05135f59 DS |
3173 | btrfs_warn(fs_info, |
3174 | "failed to create free space tree: %d", ret); | |
6bccf3ab | 3175 | close_ctree(fs_info); |
511711af CM |
3176 | return ret; |
3177 | } | |
3178 | } | |
3179 | ||
2b6ba629 ID |
3180 | down_read(&fs_info->cleanup_work_sem); |
3181 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || | |
3182 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { | |
e3acc2a6 | 3183 | up_read(&fs_info->cleanup_work_sem); |
6bccf3ab | 3184 | close_ctree(fs_info); |
2b6ba629 ID |
3185 | return ret; |
3186 | } | |
3187 | up_read(&fs_info->cleanup_work_sem); | |
59641015 | 3188 | |
2b6ba629 ID |
3189 | ret = btrfs_resume_balance_async(fs_info); |
3190 | if (ret) { | |
05135f59 | 3191 | btrfs_warn(fs_info, "failed to resume balance: %d", ret); |
6bccf3ab | 3192 | close_ctree(fs_info); |
2b6ba629 | 3193 | return ret; |
e3acc2a6 JB |
3194 | } |
3195 | ||
8dabb742 SB |
3196 | ret = btrfs_resume_dev_replace_async(fs_info); |
3197 | if (ret) { | |
05135f59 | 3198 | btrfs_warn(fs_info, "failed to resume device replace: %d", ret); |
6bccf3ab | 3199 | close_ctree(fs_info); |
8dabb742 SB |
3200 | return ret; |
3201 | } | |
3202 | ||
b382a324 JS |
3203 | btrfs_qgroup_rescan_resume(fs_info); |
3204 | ||
4bbcaa64 | 3205 | if (!fs_info->uuid_root) { |
05135f59 | 3206 | btrfs_info(fs_info, "creating UUID tree"); |
f7a81ea4 SB |
3207 | ret = btrfs_create_uuid_tree(fs_info); |
3208 | if (ret) { | |
05135f59 DS |
3209 | btrfs_warn(fs_info, |
3210 | "failed to create the UUID tree: %d", ret); | |
6bccf3ab | 3211 | close_ctree(fs_info); |
f7a81ea4 SB |
3212 | return ret; |
3213 | } | |
0b246afa | 3214 | } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || |
4bbcaa64 ES |
3215 | fs_info->generation != |
3216 | btrfs_super_uuid_tree_generation(disk_super)) { | |
05135f59 | 3217 | btrfs_info(fs_info, "checking UUID tree"); |
70f80175 SB |
3218 | ret = btrfs_check_uuid_tree(fs_info); |
3219 | if (ret) { | |
05135f59 DS |
3220 | btrfs_warn(fs_info, |
3221 | "failed to check the UUID tree: %d", ret); | |
6bccf3ab | 3222 | close_ctree(fs_info); |
70f80175 SB |
3223 | return ret; |
3224 | } | |
3225 | } else { | |
afcdd129 | 3226 | set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); |
f7a81ea4 | 3227 | } |
afcdd129 | 3228 | set_bit(BTRFS_FS_OPEN, &fs_info->flags); |
47ab2a6c | 3229 | |
8dcddfa0 QW |
3230 | /* |
3231 | * backuproot only affect mount behavior, and if open_ctree succeeded, | |
3232 | * no need to keep the flag | |
3233 | */ | |
3234 | btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); | |
3235 | ||
ad2b2c80 | 3236 | return 0; |
39279cc3 | 3237 | |
bcef60f2 AJ |
3238 | fail_qgroup: |
3239 | btrfs_free_qgroup_config(fs_info); | |
7c2ca468 CM |
3240 | fail_trans_kthread: |
3241 | kthread_stop(fs_info->transaction_kthread); | |
2ff7e61e | 3242 | btrfs_cleanup_transaction(fs_info); |
faa2dbf0 | 3243 | btrfs_free_fs_roots(fs_info); |
3f157a2f | 3244 | fail_cleaner: |
a74a4b97 | 3245 | kthread_stop(fs_info->cleaner_kthread); |
7c2ca468 CM |
3246 | |
3247 | /* | |
3248 | * make sure we're done with the btree inode before we stop our | |
3249 | * kthreads | |
3250 | */ | |
3251 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); | |
7c2ca468 | 3252 | |
2365dd3c | 3253 | fail_sysfs: |
6618a59b | 3254 | btrfs_sysfs_remove_mounted(fs_info); |
2365dd3c | 3255 | |
b7c35e81 AJ |
3256 | fail_fsdev_sysfs: |
3257 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); | |
3258 | ||
1b1d1f66 | 3259 | fail_block_groups: |
54067ae9 | 3260 | btrfs_put_block_group_cache(fs_info); |
af31f5e5 CM |
3261 | |
3262 | fail_tree_roots: | |
3263 | free_root_pointers(fs_info, 1); | |
2b8195bb | 3264 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
af31f5e5 | 3265 | |
39279cc3 | 3266 | fail_sb_buffer: |
7abadb64 | 3267 | btrfs_stop_all_workers(fs_info); |
5cdd7db6 | 3268 | btrfs_free_block_groups(fs_info); |
16cdcec7 | 3269 | fail_alloc: |
4543df7e | 3270 | fail_iput: |
586e46e2 ID |
3271 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
3272 | ||
4543df7e | 3273 | iput(fs_info->btree_inode); |
c404e0dc | 3274 | fail_bio_counter: |
7f8d236a | 3275 | percpu_counter_destroy(&fs_info->dev_replace.bio_counter); |
963d678b MX |
3276 | fail_delalloc_bytes: |
3277 | percpu_counter_destroy(&fs_info->delalloc_bytes); | |
e2d84521 MX |
3278 | fail_dirty_metadata_bytes: |
3279 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); | |
76dda93c YZ |
3280 | fail_srcu: |
3281 | cleanup_srcu_struct(&fs_info->subvol_srcu); | |
7e662854 | 3282 | fail: |
53b381b3 | 3283 | btrfs_free_stripe_hash_table(fs_info); |
586e46e2 | 3284 | btrfs_close_devices(fs_info->fs_devices); |
ad2b2c80 | 3285 | return err; |
af31f5e5 CM |
3286 | |
3287 | recovery_tree_root: | |
0b246afa | 3288 | if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) |
af31f5e5 CM |
3289 | goto fail_tree_roots; |
3290 | ||
3291 | free_root_pointers(fs_info, 0); | |
3292 | ||
3293 | /* don't use the log in recovery mode, it won't be valid */ | |
3294 | btrfs_set_super_log_root(disk_super, 0); | |
3295 | ||
3296 | /* we can't trust the free space cache either */ | |
3297 | btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); | |
3298 | ||
3299 | ret = next_root_backup(fs_info, fs_info->super_copy, | |
3300 | &num_backups_tried, &backup_index); | |
3301 | if (ret == -1) | |
3302 | goto fail_block_groups; | |
3303 | goto retry_root_backup; | |
eb60ceac | 3304 | } |
663faf9f | 3305 | ALLOW_ERROR_INJECTION(open_ctree, ERRNO); |
eb60ceac | 3306 | |
f2984462 CM |
3307 | static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) |
3308 | { | |
f2984462 CM |
3309 | if (uptodate) { |
3310 | set_buffer_uptodate(bh); | |
3311 | } else { | |
442a4f63 SB |
3312 | struct btrfs_device *device = (struct btrfs_device *) |
3313 | bh->b_private; | |
3314 | ||
fb456252 | 3315 | btrfs_warn_rl_in_rcu(device->fs_info, |
b14af3b4 | 3316 | "lost page write due to IO error on %s", |
606686ee | 3317 | rcu_str_deref(device->name)); |
01327610 | 3318 | /* note, we don't set_buffer_write_io_error because we have |
1259ab75 CM |
3319 | * our own ways of dealing with the IO errors |
3320 | */ | |
f2984462 | 3321 | clear_buffer_uptodate(bh); |
442a4f63 | 3322 | btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); |
f2984462 CM |
3323 | } |
3324 | unlock_buffer(bh); | |
3325 | put_bh(bh); | |
3326 | } | |
3327 | ||
29c36d72 AJ |
3328 | int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, |
3329 | struct buffer_head **bh_ret) | |
3330 | { | |
3331 | struct buffer_head *bh; | |
3332 | struct btrfs_super_block *super; | |
3333 | u64 bytenr; | |
3334 | ||
3335 | bytenr = btrfs_sb_offset(copy_num); | |
3336 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) | |
3337 | return -EINVAL; | |
3338 | ||
9f6d2510 | 3339 | bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE); |
29c36d72 AJ |
3340 | /* |
3341 | * If we fail to read from the underlying devices, as of now | |
3342 | * the best option we have is to mark it EIO. | |
3343 | */ | |
3344 | if (!bh) | |
3345 | return -EIO; | |
3346 | ||
3347 | super = (struct btrfs_super_block *)bh->b_data; | |
3348 | if (btrfs_super_bytenr(super) != bytenr || | |
3349 | btrfs_super_magic(super) != BTRFS_MAGIC) { | |
3350 | brelse(bh); | |
3351 | return -EINVAL; | |
3352 | } | |
3353 | ||
3354 | *bh_ret = bh; | |
3355 | return 0; | |
3356 | } | |
3357 | ||
3358 | ||
a512bbf8 YZ |
3359 | struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) |
3360 | { | |
3361 | struct buffer_head *bh; | |
3362 | struct buffer_head *latest = NULL; | |
3363 | struct btrfs_super_block *super; | |
3364 | int i; | |
3365 | u64 transid = 0; | |
92fc03fb | 3366 | int ret = -EINVAL; |
a512bbf8 YZ |
3367 | |
3368 | /* we would like to check all the supers, but that would make | |
3369 | * a btrfs mount succeed after a mkfs from a different FS. | |
3370 | * So, we need to add a special mount option to scan for | |
3371 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead | |
3372 | */ | |
3373 | for (i = 0; i < 1; i++) { | |
29c36d72 AJ |
3374 | ret = btrfs_read_dev_one_super(bdev, i, &bh); |
3375 | if (ret) | |
a512bbf8 YZ |
3376 | continue; |
3377 | ||
3378 | super = (struct btrfs_super_block *)bh->b_data; | |
a512bbf8 YZ |
3379 | |
3380 | if (!latest || btrfs_super_generation(super) > transid) { | |
3381 | brelse(latest); | |
3382 | latest = bh; | |
3383 | transid = btrfs_super_generation(super); | |
3384 | } else { | |
3385 | brelse(bh); | |
3386 | } | |
3387 | } | |
92fc03fb AJ |
3388 | |
3389 | if (!latest) | |
3390 | return ERR_PTR(ret); | |
3391 | ||
a512bbf8 YZ |
3392 | return latest; |
3393 | } | |
3394 | ||
4eedeb75 | 3395 | /* |
abbb3b8e DS |
3396 | * Write superblock @sb to the @device. Do not wait for completion, all the |
3397 | * buffer heads we write are pinned. | |
4eedeb75 | 3398 | * |
abbb3b8e DS |
3399 | * Write @max_mirrors copies of the superblock, where 0 means default that fit |
3400 | * the expected device size at commit time. Note that max_mirrors must be | |
3401 | * same for write and wait phases. | |
4eedeb75 | 3402 | * |
abbb3b8e | 3403 | * Return number of errors when buffer head is not found or submission fails. |
4eedeb75 | 3404 | */ |
a512bbf8 | 3405 | static int write_dev_supers(struct btrfs_device *device, |
abbb3b8e | 3406 | struct btrfs_super_block *sb, int max_mirrors) |
a512bbf8 YZ |
3407 | { |
3408 | struct buffer_head *bh; | |
3409 | int i; | |
3410 | int ret; | |
3411 | int errors = 0; | |
3412 | u32 crc; | |
3413 | u64 bytenr; | |
1b9e619c | 3414 | int op_flags; |
a512bbf8 YZ |
3415 | |
3416 | if (max_mirrors == 0) | |
3417 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | |
3418 | ||
a512bbf8 YZ |
3419 | for (i = 0; i < max_mirrors; i++) { |
3420 | bytenr = btrfs_sb_offset(i); | |
935e5cc9 MX |
3421 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
3422 | device->commit_total_bytes) | |
a512bbf8 YZ |
3423 | break; |
3424 | ||
abbb3b8e | 3425 | btrfs_set_super_bytenr(sb, bytenr); |
4eedeb75 | 3426 | |
abbb3b8e DS |
3427 | crc = ~(u32)0; |
3428 | crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, | |
3429 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); | |
3430 | btrfs_csum_final(crc, sb->csum); | |
4eedeb75 | 3431 | |
abbb3b8e | 3432 | /* One reference for us, and we leave it for the caller */ |
9f6d2510 | 3433 | bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, |
abbb3b8e DS |
3434 | BTRFS_SUPER_INFO_SIZE); |
3435 | if (!bh) { | |
3436 | btrfs_err(device->fs_info, | |
3437 | "couldn't get super buffer head for bytenr %llu", | |
3438 | bytenr); | |
3439 | errors++; | |
4eedeb75 | 3440 | continue; |
abbb3b8e | 3441 | } |
634554dc | 3442 | |
abbb3b8e | 3443 | memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); |
a512bbf8 | 3444 | |
abbb3b8e DS |
3445 | /* one reference for submit_bh */ |
3446 | get_bh(bh); | |
4eedeb75 | 3447 | |
abbb3b8e DS |
3448 | set_buffer_uptodate(bh); |
3449 | lock_buffer(bh); | |
3450 | bh->b_end_io = btrfs_end_buffer_write_sync; | |
3451 | bh->b_private = device; | |
a512bbf8 | 3452 | |
387125fc CM |
3453 | /* |
3454 | * we fua the first super. The others we allow | |
3455 | * to go down lazy. | |
3456 | */ | |
1b9e619c OS |
3457 | op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
3458 | if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) | |
3459 | op_flags |= REQ_FUA; | |
3460 | ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh); | |
4eedeb75 | 3461 | if (ret) |
a512bbf8 | 3462 | errors++; |
a512bbf8 YZ |
3463 | } |
3464 | return errors < i ? 0 : -1; | |
3465 | } | |
3466 | ||
abbb3b8e DS |
3467 | /* |
3468 | * Wait for write completion of superblocks done by write_dev_supers, | |
3469 | * @max_mirrors same for write and wait phases. | |
3470 | * | |
3471 | * Return number of errors when buffer head is not found or not marked up to | |
3472 | * date. | |
3473 | */ | |
3474 | static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) | |
3475 | { | |
3476 | struct buffer_head *bh; | |
3477 | int i; | |
3478 | int errors = 0; | |
b6a535fa | 3479 | bool primary_failed = false; |
abbb3b8e DS |
3480 | u64 bytenr; |
3481 | ||
3482 | if (max_mirrors == 0) | |
3483 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | |
3484 | ||
3485 | for (i = 0; i < max_mirrors; i++) { | |
3486 | bytenr = btrfs_sb_offset(i); | |
3487 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | |
3488 | device->commit_total_bytes) | |
3489 | break; | |
3490 | ||
9f6d2510 DS |
3491 | bh = __find_get_block(device->bdev, |
3492 | bytenr / BTRFS_BDEV_BLOCKSIZE, | |
abbb3b8e DS |
3493 | BTRFS_SUPER_INFO_SIZE); |
3494 | if (!bh) { | |
3495 | errors++; | |
b6a535fa HM |
3496 | if (i == 0) |
3497 | primary_failed = true; | |
abbb3b8e DS |
3498 | continue; |
3499 | } | |
3500 | wait_on_buffer(bh); | |
b6a535fa | 3501 | if (!buffer_uptodate(bh)) { |
abbb3b8e | 3502 | errors++; |
b6a535fa HM |
3503 | if (i == 0) |
3504 | primary_failed = true; | |
3505 | } | |
abbb3b8e DS |
3506 | |
3507 | /* drop our reference */ | |
3508 | brelse(bh); | |
3509 | ||
3510 | /* drop the reference from the writing run */ | |
3511 | brelse(bh); | |
3512 | } | |
3513 | ||
b6a535fa HM |
3514 | /* log error, force error return */ |
3515 | if (primary_failed) { | |
3516 | btrfs_err(device->fs_info, "error writing primary super block to device %llu", | |
3517 | device->devid); | |
3518 | return -1; | |
3519 | } | |
3520 | ||
abbb3b8e DS |
3521 | return errors < i ? 0 : -1; |
3522 | } | |
3523 | ||
387125fc CM |
3524 | /* |
3525 | * endio for the write_dev_flush, this will wake anyone waiting | |
3526 | * for the barrier when it is done | |
3527 | */ | |
4246a0b6 | 3528 | static void btrfs_end_empty_barrier(struct bio *bio) |
387125fc | 3529 | { |
e0ae9994 | 3530 | complete(bio->bi_private); |
387125fc CM |
3531 | } |
3532 | ||
3533 | /* | |
4fc6441a AJ |
3534 | * Submit a flush request to the device if it supports it. Error handling is |
3535 | * done in the waiting counterpart. | |
387125fc | 3536 | */ |
4fc6441a | 3537 | static void write_dev_flush(struct btrfs_device *device) |
387125fc | 3538 | { |
c2a9c7ab | 3539 | struct request_queue *q = bdev_get_queue(device->bdev); |
e0ae9994 | 3540 | struct bio *bio = device->flush_bio; |
387125fc | 3541 | |
c2a9c7ab | 3542 | if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
4fc6441a | 3543 | return; |
387125fc | 3544 | |
e0ae9994 | 3545 | bio_reset(bio); |
387125fc | 3546 | bio->bi_end_io = btrfs_end_empty_barrier; |
74d46992 | 3547 | bio_set_dev(bio, device->bdev); |
8d910125 | 3548 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; |
387125fc CM |
3549 | init_completion(&device->flush_wait); |
3550 | bio->bi_private = &device->flush_wait; | |
387125fc | 3551 | |
43a01111 | 3552 | btrfsic_submit_bio(bio); |
1c3063b6 | 3553 | set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); |
4fc6441a | 3554 | } |
387125fc | 3555 | |
4fc6441a AJ |
3556 | /* |
3557 | * If the flush bio has been submitted by write_dev_flush, wait for it. | |
3558 | */ | |
8c27cb35 | 3559 | static blk_status_t wait_dev_flush(struct btrfs_device *device) |
4fc6441a | 3560 | { |
4fc6441a | 3561 | struct bio *bio = device->flush_bio; |
387125fc | 3562 | |
1c3063b6 | 3563 | if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)) |
58efbc9f | 3564 | return BLK_STS_OK; |
387125fc | 3565 | |
1c3063b6 | 3566 | clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); |
2980d574 | 3567 | wait_for_completion_io(&device->flush_wait); |
387125fc | 3568 | |
8c27cb35 | 3569 | return bio->bi_status; |
387125fc | 3570 | } |
387125fc | 3571 | |
d10b82fe | 3572 | static int check_barrier_error(struct btrfs_fs_info *fs_info) |
401b41e5 | 3573 | { |
6528b99d | 3574 | if (!btrfs_check_rw_degradable(fs_info, NULL)) |
401b41e5 | 3575 | return -EIO; |
387125fc CM |
3576 | return 0; |
3577 | } | |
3578 | ||
3579 | /* | |
3580 | * send an empty flush down to each device in parallel, | |
3581 | * then wait for them | |
3582 | */ | |
3583 | static int barrier_all_devices(struct btrfs_fs_info *info) | |
3584 | { | |
3585 | struct list_head *head; | |
3586 | struct btrfs_device *dev; | |
5af3e8cc | 3587 | int errors_wait = 0; |
4e4cbee9 | 3588 | blk_status_t ret; |
387125fc | 3589 | |
1538e6c5 | 3590 | lockdep_assert_held(&info->fs_devices->device_list_mutex); |
387125fc CM |
3591 | /* send down all the barriers */ |
3592 | head = &info->fs_devices->devices; | |
1538e6c5 | 3593 | list_for_each_entry(dev, head, dev_list) { |
e6e674bd | 3594 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
f88ba6a2 | 3595 | continue; |
cea7c8bf | 3596 | if (!dev->bdev) |
387125fc | 3597 | continue; |
e12c9621 | 3598 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3599 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
387125fc CM |
3600 | continue; |
3601 | ||
4fc6441a | 3602 | write_dev_flush(dev); |
58efbc9f | 3603 | dev->last_flush_error = BLK_STS_OK; |
387125fc CM |
3604 | } |
3605 | ||
3606 | /* wait for all the barriers */ | |
1538e6c5 | 3607 | list_for_each_entry(dev, head, dev_list) { |
e6e674bd | 3608 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
f88ba6a2 | 3609 | continue; |
387125fc | 3610 | if (!dev->bdev) { |
5af3e8cc | 3611 | errors_wait++; |
387125fc CM |
3612 | continue; |
3613 | } | |
e12c9621 | 3614 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3615 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
387125fc CM |
3616 | continue; |
3617 | ||
4fc6441a | 3618 | ret = wait_dev_flush(dev); |
401b41e5 AJ |
3619 | if (ret) { |
3620 | dev->last_flush_error = ret; | |
66b4993e DS |
3621 | btrfs_dev_stat_inc_and_print(dev, |
3622 | BTRFS_DEV_STAT_FLUSH_ERRS); | |
5af3e8cc | 3623 | errors_wait++; |
401b41e5 AJ |
3624 | } |
3625 | } | |
3626 | ||
cea7c8bf | 3627 | if (errors_wait) { |
401b41e5 AJ |
3628 | /* |
3629 | * At some point we need the status of all disks | |
3630 | * to arrive at the volume status. So error checking | |
3631 | * is being pushed to a separate loop. | |
3632 | */ | |
d10b82fe | 3633 | return check_barrier_error(info); |
387125fc | 3634 | } |
387125fc CM |
3635 | return 0; |
3636 | } | |
3637 | ||
943c6e99 ZL |
3638 | int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) |
3639 | { | |
8789f4fe ZL |
3640 | int raid_type; |
3641 | int min_tolerated = INT_MAX; | |
943c6e99 | 3642 | |
8789f4fe ZL |
3643 | if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || |
3644 | (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) | |
3645 | min_tolerated = min(min_tolerated, | |
3646 | btrfs_raid_array[BTRFS_RAID_SINGLE]. | |
3647 | tolerated_failures); | |
943c6e99 | 3648 | |
8789f4fe ZL |
3649 | for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { |
3650 | if (raid_type == BTRFS_RAID_SINGLE) | |
3651 | continue; | |
41a6e891 | 3652 | if (!(flags & btrfs_raid_array[raid_type].bg_flag)) |
8789f4fe ZL |
3653 | continue; |
3654 | min_tolerated = min(min_tolerated, | |
3655 | btrfs_raid_array[raid_type]. | |
3656 | tolerated_failures); | |
3657 | } | |
943c6e99 | 3658 | |
8789f4fe | 3659 | if (min_tolerated == INT_MAX) { |
ab8d0fc4 | 3660 | pr_warn("BTRFS: unknown raid flag: %llu", flags); |
8789f4fe ZL |
3661 | min_tolerated = 0; |
3662 | } | |
3663 | ||
3664 | return min_tolerated; | |
943c6e99 ZL |
3665 | } |
3666 | ||
eece6a9c | 3667 | int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) |
f2984462 | 3668 | { |
e5e9a520 | 3669 | struct list_head *head; |
f2984462 | 3670 | struct btrfs_device *dev; |
a061fc8d | 3671 | struct btrfs_super_block *sb; |
f2984462 | 3672 | struct btrfs_dev_item *dev_item; |
f2984462 CM |
3673 | int ret; |
3674 | int do_barriers; | |
a236aed1 CM |
3675 | int max_errors; |
3676 | int total_errors = 0; | |
a061fc8d | 3677 | u64 flags; |
f2984462 | 3678 | |
0b246afa | 3679 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); |
fed3b381 LB |
3680 | |
3681 | /* | |
3682 | * max_mirrors == 0 indicates we're from commit_transaction, | |
3683 | * not from fsync where the tree roots in fs_info have not | |
3684 | * been consistent on disk. | |
3685 | */ | |
3686 | if (max_mirrors == 0) | |
3687 | backup_super_roots(fs_info); | |
f2984462 | 3688 | |
0b246afa | 3689 | sb = fs_info->super_for_commit; |
a061fc8d | 3690 | dev_item = &sb->dev_item; |
e5e9a520 | 3691 | |
0b246afa JM |
3692 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
3693 | head = &fs_info->fs_devices->devices; | |
3694 | max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; | |
387125fc | 3695 | |
5af3e8cc | 3696 | if (do_barriers) { |
0b246afa | 3697 | ret = barrier_all_devices(fs_info); |
5af3e8cc SB |
3698 | if (ret) { |
3699 | mutex_unlock( | |
0b246afa JM |
3700 | &fs_info->fs_devices->device_list_mutex); |
3701 | btrfs_handle_fs_error(fs_info, ret, | |
3702 | "errors while submitting device barriers."); | |
5af3e8cc SB |
3703 | return ret; |
3704 | } | |
3705 | } | |
387125fc | 3706 | |
1538e6c5 | 3707 | list_for_each_entry(dev, head, dev_list) { |
dfe25020 CM |
3708 | if (!dev->bdev) { |
3709 | total_errors++; | |
3710 | continue; | |
3711 | } | |
e12c9621 | 3712 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3713 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
dfe25020 CM |
3714 | continue; |
3715 | ||
2b82032c | 3716 | btrfs_set_stack_device_generation(dev_item, 0); |
a061fc8d CM |
3717 | btrfs_set_stack_device_type(dev_item, dev->type); |
3718 | btrfs_set_stack_device_id(dev_item, dev->devid); | |
7df69d3e | 3719 | btrfs_set_stack_device_total_bytes(dev_item, |
935e5cc9 | 3720 | dev->commit_total_bytes); |
ce7213c7 MX |
3721 | btrfs_set_stack_device_bytes_used(dev_item, |
3722 | dev->commit_bytes_used); | |
a061fc8d CM |
3723 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); |
3724 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); | |
3725 | btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); | |
3726 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); | |
44880fdc | 3727 | memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE); |
a512bbf8 | 3728 | |
a061fc8d CM |
3729 | flags = btrfs_super_flags(sb); |
3730 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); | |
3731 | ||
75cb857d QW |
3732 | ret = btrfs_validate_write_super(fs_info, sb); |
3733 | if (ret < 0) { | |
3734 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
3735 | btrfs_handle_fs_error(fs_info, -EUCLEAN, | |
3736 | "unexpected superblock corruption detected"); | |
3737 | return -EUCLEAN; | |
3738 | } | |
3739 | ||
abbb3b8e | 3740 | ret = write_dev_supers(dev, sb, max_mirrors); |
a236aed1 CM |
3741 | if (ret) |
3742 | total_errors++; | |
f2984462 | 3743 | } |
a236aed1 | 3744 | if (total_errors > max_errors) { |
0b246afa JM |
3745 | btrfs_err(fs_info, "%d errors while writing supers", |
3746 | total_errors); | |
3747 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
79787eaa | 3748 | |
9d565ba4 | 3749 | /* FUA is masked off if unsupported and can't be the reason */ |
0b246afa JM |
3750 | btrfs_handle_fs_error(fs_info, -EIO, |
3751 | "%d errors while writing supers", | |
3752 | total_errors); | |
9d565ba4 | 3753 | return -EIO; |
a236aed1 | 3754 | } |
f2984462 | 3755 | |
a512bbf8 | 3756 | total_errors = 0; |
1538e6c5 | 3757 | list_for_each_entry(dev, head, dev_list) { |
dfe25020 CM |
3758 | if (!dev->bdev) |
3759 | continue; | |
e12c9621 | 3760 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3761 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
dfe25020 CM |
3762 | continue; |
3763 | ||
abbb3b8e | 3764 | ret = wait_dev_supers(dev, max_mirrors); |
a512bbf8 YZ |
3765 | if (ret) |
3766 | total_errors++; | |
f2984462 | 3767 | } |
0b246afa | 3768 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
a236aed1 | 3769 | if (total_errors > max_errors) { |
0b246afa JM |
3770 | btrfs_handle_fs_error(fs_info, -EIO, |
3771 | "%d errors while writing supers", | |
3772 | total_errors); | |
79787eaa | 3773 | return -EIO; |
a236aed1 | 3774 | } |
f2984462 CM |
3775 | return 0; |
3776 | } | |
3777 | ||
cb517eab MX |
3778 | /* Drop a fs root from the radix tree and free it. */ |
3779 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | |
3780 | struct btrfs_root *root) | |
2619ba1f | 3781 | { |
4df27c4d | 3782 | spin_lock(&fs_info->fs_roots_radix_lock); |
2619ba1f CM |
3783 | radix_tree_delete(&fs_info->fs_roots_radix, |
3784 | (unsigned long)root->root_key.objectid); | |
4df27c4d | 3785 | spin_unlock(&fs_info->fs_roots_radix_lock); |
76dda93c YZ |
3786 | |
3787 | if (btrfs_root_refs(&root->root_item) == 0) | |
3788 | synchronize_srcu(&fs_info->subvol_srcu); | |
3789 | ||
1c1ea4f7 | 3790 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
3321719e | 3791 | btrfs_free_log(NULL, root); |
1c1ea4f7 LB |
3792 | if (root->reloc_root) { |
3793 | free_extent_buffer(root->reloc_root->node); | |
3794 | free_extent_buffer(root->reloc_root->commit_root); | |
3795 | btrfs_put_fs_root(root->reloc_root); | |
3796 | root->reloc_root = NULL; | |
3797 | } | |
3798 | } | |
3321719e | 3799 | |
faa2dbf0 JB |
3800 | if (root->free_ino_pinned) |
3801 | __btrfs_remove_free_space_cache(root->free_ino_pinned); | |
3802 | if (root->free_ino_ctl) | |
3803 | __btrfs_remove_free_space_cache(root->free_ino_ctl); | |
84db5ccf | 3804 | btrfs_free_fs_root(root); |
4df27c4d YZ |
3805 | } |
3806 | ||
84db5ccf | 3807 | void btrfs_free_fs_root(struct btrfs_root *root) |
4df27c4d | 3808 | { |
57cdc8db | 3809 | iput(root->ino_cache_inode); |
4df27c4d | 3810 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
0ee5dc67 AV |
3811 | if (root->anon_dev) |
3812 | free_anon_bdev(root->anon_dev); | |
8257b2dc MX |
3813 | if (root->subv_writers) |
3814 | btrfs_free_subvolume_writers(root->subv_writers); | |
4df27c4d YZ |
3815 | free_extent_buffer(root->node); |
3816 | free_extent_buffer(root->commit_root); | |
581bb050 LZ |
3817 | kfree(root->free_ino_ctl); |
3818 | kfree(root->free_ino_pinned); | |
b0feb9d9 | 3819 | btrfs_put_fs_root(root); |
2619ba1f CM |
3820 | } |
3821 | ||
c146afad | 3822 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) |
cfaa7295 | 3823 | { |
c146afad YZ |
3824 | u64 root_objectid = 0; |
3825 | struct btrfs_root *gang[8]; | |
65d33fd7 QW |
3826 | int i = 0; |
3827 | int err = 0; | |
3828 | unsigned int ret = 0; | |
3829 | int index; | |
e089f05c | 3830 | |
c146afad | 3831 | while (1) { |
65d33fd7 | 3832 | index = srcu_read_lock(&fs_info->subvol_srcu); |
c146afad YZ |
3833 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
3834 | (void **)gang, root_objectid, | |
3835 | ARRAY_SIZE(gang)); | |
65d33fd7 QW |
3836 | if (!ret) { |
3837 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
c146afad | 3838 | break; |
65d33fd7 | 3839 | } |
5d4f98a2 | 3840 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
65d33fd7 | 3841 | |
c146afad | 3842 | for (i = 0; i < ret; i++) { |
65d33fd7 QW |
3843 | /* Avoid to grab roots in dead_roots */ |
3844 | if (btrfs_root_refs(&gang[i]->root_item) == 0) { | |
3845 | gang[i] = NULL; | |
3846 | continue; | |
3847 | } | |
3848 | /* grab all the search result for later use */ | |
3849 | gang[i] = btrfs_grab_fs_root(gang[i]); | |
3850 | } | |
3851 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
66b4ffd1 | 3852 | |
65d33fd7 QW |
3853 | for (i = 0; i < ret; i++) { |
3854 | if (!gang[i]) | |
3855 | continue; | |
c146afad | 3856 | root_objectid = gang[i]->root_key.objectid; |
66b4ffd1 JB |
3857 | err = btrfs_orphan_cleanup(gang[i]); |
3858 | if (err) | |
65d33fd7 QW |
3859 | break; |
3860 | btrfs_put_fs_root(gang[i]); | |
c146afad YZ |
3861 | } |
3862 | root_objectid++; | |
3863 | } | |
65d33fd7 QW |
3864 | |
3865 | /* release the uncleaned roots due to error */ | |
3866 | for (; i < ret; i++) { | |
3867 | if (gang[i]) | |
3868 | btrfs_put_fs_root(gang[i]); | |
3869 | } | |
3870 | return err; | |
c146afad | 3871 | } |
a2135011 | 3872 | |
6bccf3ab | 3873 | int btrfs_commit_super(struct btrfs_fs_info *fs_info) |
c146afad | 3874 | { |
6bccf3ab | 3875 | struct btrfs_root *root = fs_info->tree_root; |
c146afad | 3876 | struct btrfs_trans_handle *trans; |
a74a4b97 | 3877 | |
0b246afa | 3878 | mutex_lock(&fs_info->cleaner_mutex); |
2ff7e61e | 3879 | btrfs_run_delayed_iputs(fs_info); |
0b246afa JM |
3880 | mutex_unlock(&fs_info->cleaner_mutex); |
3881 | wake_up_process(fs_info->cleaner_kthread); | |
c71bf099 YZ |
3882 | |
3883 | /* wait until ongoing cleanup work done */ | |
0b246afa JM |
3884 | down_write(&fs_info->cleanup_work_sem); |
3885 | up_write(&fs_info->cleanup_work_sem); | |
c71bf099 | 3886 | |
7a7eaa40 | 3887 | trans = btrfs_join_transaction(root); |
3612b495 TI |
3888 | if (IS_ERR(trans)) |
3889 | return PTR_ERR(trans); | |
3a45bb20 | 3890 | return btrfs_commit_transaction(trans); |
c146afad YZ |
3891 | } |
3892 | ||
6bccf3ab | 3893 | void close_ctree(struct btrfs_fs_info *fs_info) |
c146afad | 3894 | { |
c146afad YZ |
3895 | int ret; |
3896 | ||
afcdd129 | 3897 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); |
d6fd0ae2 OS |
3898 | /* |
3899 | * We don't want the cleaner to start new transactions, add more delayed | |
3900 | * iputs, etc. while we're closing. We can't use kthread_stop() yet | |
3901 | * because that frees the task_struct, and the transaction kthread might | |
3902 | * still try to wake up the cleaner. | |
3903 | */ | |
3904 | kthread_park(fs_info->cleaner_kthread); | |
c146afad | 3905 | |
7343dd61 | 3906 | /* wait for the qgroup rescan worker to stop */ |
d06f23d6 | 3907 | btrfs_qgroup_wait_for_completion(fs_info, false); |
7343dd61 | 3908 | |
803b2f54 SB |
3909 | /* wait for the uuid_scan task to finish */ |
3910 | down(&fs_info->uuid_tree_rescan_sem); | |
3911 | /* avoid complains from lockdep et al., set sem back to initial state */ | |
3912 | up(&fs_info->uuid_tree_rescan_sem); | |
3913 | ||
837d5b6e | 3914 | /* pause restriper - we want to resume on mount */ |
aa1b8cd4 | 3915 | btrfs_pause_balance(fs_info); |
837d5b6e | 3916 | |
8dabb742 SB |
3917 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
3918 | ||
aa1b8cd4 | 3919 | btrfs_scrub_cancel(fs_info); |
4cb5300b CM |
3920 | |
3921 | /* wait for any defraggers to finish */ | |
3922 | wait_event(fs_info->transaction_wait, | |
3923 | (atomic_read(&fs_info->defrag_running) == 0)); | |
3924 | ||
3925 | /* clear out the rbtree of defraggable inodes */ | |
26176e7c | 3926 | btrfs_cleanup_defrag_inodes(fs_info); |
4cb5300b | 3927 | |
21c7e756 MX |
3928 | cancel_work_sync(&fs_info->async_reclaim_work); |
3929 | ||
bc98a42c | 3930 | if (!sb_rdonly(fs_info->sb)) { |
e44163e1 | 3931 | /* |
d6fd0ae2 OS |
3932 | * The cleaner kthread is stopped, so do one final pass over |
3933 | * unused block groups. | |
e44163e1 | 3934 | */ |
0b246afa | 3935 | btrfs_delete_unused_bgs(fs_info); |
e44163e1 | 3936 | |
6bccf3ab | 3937 | ret = btrfs_commit_super(fs_info); |
acce952b | 3938 | if (ret) |
04892340 | 3939 | btrfs_err(fs_info, "commit super ret %d", ret); |
acce952b | 3940 | } |
3941 | ||
af722733 LB |
3942 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) || |
3943 | test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) | |
2ff7e61e | 3944 | btrfs_error_commit_super(fs_info); |
0f7d52f4 | 3945 | |
e3029d9f AV |
3946 | kthread_stop(fs_info->transaction_kthread); |
3947 | kthread_stop(fs_info->cleaner_kthread); | |
8929ecfa | 3948 | |
e187831e | 3949 | ASSERT(list_empty(&fs_info->delayed_iputs)); |
afcdd129 | 3950 | set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); |
f25784b3 | 3951 | |
04892340 | 3952 | btrfs_free_qgroup_config(fs_info); |
fe816d0f | 3953 | ASSERT(list_empty(&fs_info->delalloc_roots)); |
bcef60f2 | 3954 | |
963d678b | 3955 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { |
04892340 | 3956 | btrfs_info(fs_info, "at unmount delalloc count %lld", |
963d678b | 3957 | percpu_counter_sum(&fs_info->delalloc_bytes)); |
b0c68f8b | 3958 | } |
bcc63abb | 3959 | |
6618a59b | 3960 | btrfs_sysfs_remove_mounted(fs_info); |
b7c35e81 | 3961 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); |
5ac1d209 | 3962 | |
faa2dbf0 | 3963 | btrfs_free_fs_roots(fs_info); |
d10c5f31 | 3964 | |
1a4319cc LB |
3965 | btrfs_put_block_group_cache(fs_info); |
3966 | ||
de348ee0 WS |
3967 | /* |
3968 | * we must make sure there is not any read request to | |
3969 | * submit after we stopping all workers. | |
3970 | */ | |
3971 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | |
96192499 JB |
3972 | btrfs_stop_all_workers(fs_info); |
3973 | ||
5cdd7db6 FM |
3974 | btrfs_free_block_groups(fs_info); |
3975 | ||
afcdd129 | 3976 | clear_bit(BTRFS_FS_OPEN, &fs_info->flags); |
13e6c37b | 3977 | free_root_pointers(fs_info, 1); |
9ad6b7bc | 3978 | |
13e6c37b | 3979 | iput(fs_info->btree_inode); |
d6bfde87 | 3980 | |
21adbd5c | 3981 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
0b246afa | 3982 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) |
2ff7e61e | 3983 | btrfsic_unmount(fs_info->fs_devices); |
21adbd5c SB |
3984 | #endif |
3985 | ||
dfe25020 | 3986 | btrfs_close_devices(fs_info->fs_devices); |
0b86a832 | 3987 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
b248a415 | 3988 | |
e2d84521 | 3989 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
963d678b | 3990 | percpu_counter_destroy(&fs_info->delalloc_bytes); |
7f8d236a | 3991 | percpu_counter_destroy(&fs_info->dev_replace.bio_counter); |
76dda93c | 3992 | cleanup_srcu_struct(&fs_info->subvol_srcu); |
0b86a832 | 3993 | |
53b381b3 | 3994 | btrfs_free_stripe_hash_table(fs_info); |
fd708b81 | 3995 | btrfs_free_ref_cache(fs_info); |
53b381b3 | 3996 | |
04216820 FM |
3997 | while (!list_empty(&fs_info->pinned_chunks)) { |
3998 | struct extent_map *em; | |
3999 | ||
4000 | em = list_first_entry(&fs_info->pinned_chunks, | |
4001 | struct extent_map, list); | |
4002 | list_del_init(&em->list); | |
4003 | free_extent_map(em); | |
4004 | } | |
eb60ceac CM |
4005 | } |
4006 | ||
b9fab919 CM |
4007 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, |
4008 | int atomic) | |
5f39d397 | 4009 | { |
1259ab75 | 4010 | int ret; |
727011e0 | 4011 | struct inode *btree_inode = buf->pages[0]->mapping->host; |
1259ab75 | 4012 | |
0b32f4bb | 4013 | ret = extent_buffer_uptodate(buf); |
1259ab75 CM |
4014 | if (!ret) |
4015 | return ret; | |
4016 | ||
4017 | ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, | |
b9fab919 CM |
4018 | parent_transid, atomic); |
4019 | if (ret == -EAGAIN) | |
4020 | return ret; | |
1259ab75 | 4021 | return !ret; |
5f39d397 CM |
4022 | } |
4023 | ||
5f39d397 CM |
4024 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) |
4025 | { | |
0b246afa | 4026 | struct btrfs_fs_info *fs_info; |
06ea65a3 | 4027 | struct btrfs_root *root; |
5f39d397 | 4028 | u64 transid = btrfs_header_generation(buf); |
b9473439 | 4029 | int was_dirty; |
b4ce94de | 4030 | |
06ea65a3 JB |
4031 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
4032 | /* | |
4033 | * This is a fast path so only do this check if we have sanity tests | |
b0132a3b | 4034 | * enabled. Normal people shouldn't be using umapped buffers as dirty |
06ea65a3 JB |
4035 | * outside of the sanity tests. |
4036 | */ | |
b0132a3b | 4037 | if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) |
06ea65a3 JB |
4038 | return; |
4039 | #endif | |
4040 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; | |
0b246afa | 4041 | fs_info = root->fs_info; |
b9447ef8 | 4042 | btrfs_assert_tree_locked(buf); |
0b246afa | 4043 | if (transid != fs_info->generation) |
5d163e0e | 4044 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", |
0b246afa | 4045 | buf->start, transid, fs_info->generation); |
0b32f4bb | 4046 | was_dirty = set_extent_buffer_dirty(buf); |
e2d84521 | 4047 | if (!was_dirty) |
104b4e51 NB |
4048 | percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, |
4049 | buf->len, | |
4050 | fs_info->dirty_metadata_batch); | |
1f21ef0a | 4051 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
69fc6cbb QW |
4052 | /* |
4053 | * Since btrfs_mark_buffer_dirty() can be called with item pointer set | |
4054 | * but item data not updated. | |
4055 | * So here we should only check item pointers, not item data. | |
4056 | */ | |
4057 | if (btrfs_header_level(buf) == 0 && | |
2f659546 | 4058 | btrfs_check_leaf_relaxed(fs_info, buf)) { |
a4f78750 | 4059 | btrfs_print_leaf(buf); |
1f21ef0a FM |
4060 | ASSERT(0); |
4061 | } | |
4062 | #endif | |
eb60ceac CM |
4063 | } |
4064 | ||
2ff7e61e | 4065 | static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, |
b53d3f5d | 4066 | int flush_delayed) |
16cdcec7 MX |
4067 | { |
4068 | /* | |
4069 | * looks as though older kernels can get into trouble with | |
4070 | * this code, they end up stuck in balance_dirty_pages forever | |
4071 | */ | |
e2d84521 | 4072 | int ret; |
16cdcec7 MX |
4073 | |
4074 | if (current->flags & PF_MEMALLOC) | |
4075 | return; | |
4076 | ||
b53d3f5d | 4077 | if (flush_delayed) |
2ff7e61e | 4078 | btrfs_balance_delayed_items(fs_info); |
16cdcec7 | 4079 | |
d814a491 EL |
4080 | ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
4081 | BTRFS_DIRTY_METADATA_THRESH, | |
4082 | fs_info->dirty_metadata_batch); | |
e2d84521 | 4083 | if (ret > 0) { |
0b246afa | 4084 | balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); |
16cdcec7 | 4085 | } |
16cdcec7 MX |
4086 | } |
4087 | ||
2ff7e61e | 4088 | void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) |
35b7e476 | 4089 | { |
2ff7e61e | 4090 | __btrfs_btree_balance_dirty(fs_info, 1); |
b53d3f5d | 4091 | } |
585ad2c3 | 4092 | |
2ff7e61e | 4093 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) |
b53d3f5d | 4094 | { |
2ff7e61e | 4095 | __btrfs_btree_balance_dirty(fs_info, 0); |
35b7e476 | 4096 | } |
6b80053d | 4097 | |
581c1760 QW |
4098 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level, |
4099 | struct btrfs_key *first_key) | |
6b80053d | 4100 | { |
727011e0 | 4101 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
2ff7e61e JM |
4102 | struct btrfs_fs_info *fs_info = root->fs_info; |
4103 | ||
581c1760 QW |
4104 | return btree_read_extent_buffer_pages(fs_info, buf, parent_transid, |
4105 | level, first_key); | |
6b80053d | 4106 | } |
0da5468f | 4107 | |
2ff7e61e | 4108 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) |
acce952b | 4109 | { |
fe816d0f NB |
4110 | /* cleanup FS via transaction */ |
4111 | btrfs_cleanup_transaction(fs_info); | |
4112 | ||
0b246afa | 4113 | mutex_lock(&fs_info->cleaner_mutex); |
2ff7e61e | 4114 | btrfs_run_delayed_iputs(fs_info); |
0b246afa | 4115 | mutex_unlock(&fs_info->cleaner_mutex); |
acce952b | 4116 | |
0b246afa JM |
4117 | down_write(&fs_info->cleanup_work_sem); |
4118 | up_write(&fs_info->cleanup_work_sem); | |
acce952b | 4119 | } |
4120 | ||
143bede5 | 4121 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
acce952b | 4122 | { |
acce952b | 4123 | struct btrfs_ordered_extent *ordered; |
acce952b | 4124 | |
199c2a9c | 4125 | spin_lock(&root->ordered_extent_lock); |
779880ef JB |
4126 | /* |
4127 | * This will just short circuit the ordered completion stuff which will | |
4128 | * make sure the ordered extent gets properly cleaned up. | |
4129 | */ | |
199c2a9c | 4130 | list_for_each_entry(ordered, &root->ordered_extents, |
779880ef JB |
4131 | root_extent_list) |
4132 | set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); | |
199c2a9c MX |
4133 | spin_unlock(&root->ordered_extent_lock); |
4134 | } | |
4135 | ||
4136 | static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |
4137 | { | |
4138 | struct btrfs_root *root; | |
4139 | struct list_head splice; | |
4140 | ||
4141 | INIT_LIST_HEAD(&splice); | |
4142 | ||
4143 | spin_lock(&fs_info->ordered_root_lock); | |
4144 | list_splice_init(&fs_info->ordered_roots, &splice); | |
4145 | while (!list_empty(&splice)) { | |
4146 | root = list_first_entry(&splice, struct btrfs_root, | |
4147 | ordered_root); | |
1de2cfde JB |
4148 | list_move_tail(&root->ordered_root, |
4149 | &fs_info->ordered_roots); | |
199c2a9c | 4150 | |
2a85d9ca | 4151 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
4152 | btrfs_destroy_ordered_extents(root); |
4153 | ||
2a85d9ca LB |
4154 | cond_resched(); |
4155 | spin_lock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
4156 | } |
4157 | spin_unlock(&fs_info->ordered_root_lock); | |
acce952b | 4158 | } |
4159 | ||
35a3621b | 4160 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2ff7e61e | 4161 | struct btrfs_fs_info *fs_info) |
acce952b | 4162 | { |
4163 | struct rb_node *node; | |
4164 | struct btrfs_delayed_ref_root *delayed_refs; | |
4165 | struct btrfs_delayed_ref_node *ref; | |
4166 | int ret = 0; | |
4167 | ||
4168 | delayed_refs = &trans->delayed_refs; | |
4169 | ||
4170 | spin_lock(&delayed_refs->lock); | |
d7df2c79 | 4171 | if (atomic_read(&delayed_refs->num_entries) == 0) { |
cfece4db | 4172 | spin_unlock(&delayed_refs->lock); |
0b246afa | 4173 | btrfs_info(fs_info, "delayed_refs has NO entry"); |
acce952b | 4174 | return ret; |
4175 | } | |
4176 | ||
5c9d028b | 4177 | while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { |
d7df2c79 | 4178 | struct btrfs_delayed_ref_head *head; |
0e0adbcf | 4179 | struct rb_node *n; |
e78417d1 | 4180 | bool pin_bytes = false; |
acce952b | 4181 | |
d7df2c79 JB |
4182 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
4183 | href_node); | |
4184 | if (!mutex_trylock(&head->mutex)) { | |
d278850e | 4185 | refcount_inc(&head->refs); |
d7df2c79 | 4186 | spin_unlock(&delayed_refs->lock); |
eb12db69 | 4187 | |
d7df2c79 | 4188 | mutex_lock(&head->mutex); |
e78417d1 | 4189 | mutex_unlock(&head->mutex); |
d278850e | 4190 | btrfs_put_delayed_ref_head(head); |
d7df2c79 JB |
4191 | spin_lock(&delayed_refs->lock); |
4192 | continue; | |
4193 | } | |
4194 | spin_lock(&head->lock); | |
e3d03965 | 4195 | while ((n = rb_first_cached(&head->ref_tree)) != NULL) { |
0e0adbcf JB |
4196 | ref = rb_entry(n, struct btrfs_delayed_ref_node, |
4197 | ref_node); | |
d7df2c79 | 4198 | ref->in_tree = 0; |
e3d03965 | 4199 | rb_erase_cached(&ref->ref_node, &head->ref_tree); |
0e0adbcf | 4200 | RB_CLEAR_NODE(&ref->ref_node); |
1d57ee94 WX |
4201 | if (!list_empty(&ref->add_list)) |
4202 | list_del(&ref->add_list); | |
d7df2c79 JB |
4203 | atomic_dec(&delayed_refs->num_entries); |
4204 | btrfs_put_delayed_ref(ref); | |
e78417d1 | 4205 | } |
d7df2c79 JB |
4206 | if (head->must_insert_reserved) |
4207 | pin_bytes = true; | |
4208 | btrfs_free_delayed_extent_op(head->extent_op); | |
4209 | delayed_refs->num_heads--; | |
4210 | if (head->processing == 0) | |
4211 | delayed_refs->num_heads_ready--; | |
4212 | atomic_dec(&delayed_refs->num_entries); | |
5c9d028b | 4213 | rb_erase_cached(&head->href_node, &delayed_refs->href_root); |
d278850e | 4214 | RB_CLEAR_NODE(&head->href_node); |
d7df2c79 JB |
4215 | spin_unlock(&head->lock); |
4216 | spin_unlock(&delayed_refs->lock); | |
4217 | mutex_unlock(&head->mutex); | |
acce952b | 4218 | |
d7df2c79 | 4219 | if (pin_bytes) |
d278850e JB |
4220 | btrfs_pin_extent(fs_info, head->bytenr, |
4221 | head->num_bytes, 1); | |
4222 | btrfs_put_delayed_ref_head(head); | |
acce952b | 4223 | cond_resched(); |
4224 | spin_lock(&delayed_refs->lock); | |
4225 | } | |
4226 | ||
4227 | spin_unlock(&delayed_refs->lock); | |
4228 | ||
4229 | return ret; | |
4230 | } | |
4231 | ||
143bede5 | 4232 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) |
acce952b | 4233 | { |
4234 | struct btrfs_inode *btrfs_inode; | |
4235 | struct list_head splice; | |
4236 | ||
4237 | INIT_LIST_HEAD(&splice); | |
4238 | ||
eb73c1b7 MX |
4239 | spin_lock(&root->delalloc_lock); |
4240 | list_splice_init(&root->delalloc_inodes, &splice); | |
acce952b | 4241 | |
4242 | while (!list_empty(&splice)) { | |
fe816d0f | 4243 | struct inode *inode = NULL; |
eb73c1b7 MX |
4244 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, |
4245 | delalloc_inodes); | |
fe816d0f | 4246 | __btrfs_del_delalloc_inode(root, btrfs_inode); |
eb73c1b7 | 4247 | spin_unlock(&root->delalloc_lock); |
acce952b | 4248 | |
fe816d0f NB |
4249 | /* |
4250 | * Make sure we get a live inode and that it'll not disappear | |
4251 | * meanwhile. | |
4252 | */ | |
4253 | inode = igrab(&btrfs_inode->vfs_inode); | |
4254 | if (inode) { | |
4255 | invalidate_inode_pages2(inode->i_mapping); | |
4256 | iput(inode); | |
4257 | } | |
eb73c1b7 | 4258 | spin_lock(&root->delalloc_lock); |
acce952b | 4259 | } |
eb73c1b7 MX |
4260 | spin_unlock(&root->delalloc_lock); |
4261 | } | |
4262 | ||
4263 | static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) | |
4264 | { | |
4265 | struct btrfs_root *root; | |
4266 | struct list_head splice; | |
4267 | ||
4268 | INIT_LIST_HEAD(&splice); | |
4269 | ||
4270 | spin_lock(&fs_info->delalloc_root_lock); | |
4271 | list_splice_init(&fs_info->delalloc_roots, &splice); | |
4272 | while (!list_empty(&splice)) { | |
4273 | root = list_first_entry(&splice, struct btrfs_root, | |
4274 | delalloc_root); | |
eb73c1b7 MX |
4275 | root = btrfs_grab_fs_root(root); |
4276 | BUG_ON(!root); | |
4277 | spin_unlock(&fs_info->delalloc_root_lock); | |
4278 | ||
4279 | btrfs_destroy_delalloc_inodes(root); | |
4280 | btrfs_put_fs_root(root); | |
4281 | ||
4282 | spin_lock(&fs_info->delalloc_root_lock); | |
4283 | } | |
4284 | spin_unlock(&fs_info->delalloc_root_lock); | |
acce952b | 4285 | } |
4286 | ||
2ff7e61e | 4287 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
acce952b | 4288 | struct extent_io_tree *dirty_pages, |
4289 | int mark) | |
4290 | { | |
4291 | int ret; | |
acce952b | 4292 | struct extent_buffer *eb; |
4293 | u64 start = 0; | |
4294 | u64 end; | |
acce952b | 4295 | |
4296 | while (1) { | |
4297 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | |
e6138876 | 4298 | mark, NULL); |
acce952b | 4299 | if (ret) |
4300 | break; | |
4301 | ||
91166212 | 4302 | clear_extent_bits(dirty_pages, start, end, mark); |
acce952b | 4303 | while (start <= end) { |
0b246afa JM |
4304 | eb = find_extent_buffer(fs_info, start); |
4305 | start += fs_info->nodesize; | |
fd8b2b61 | 4306 | if (!eb) |
acce952b | 4307 | continue; |
fd8b2b61 | 4308 | wait_on_extent_buffer_writeback(eb); |
acce952b | 4309 | |
fd8b2b61 JB |
4310 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
4311 | &eb->bflags)) | |
4312 | clear_extent_buffer_dirty(eb); | |
4313 | free_extent_buffer_stale(eb); | |
acce952b | 4314 | } |
4315 | } | |
4316 | ||
4317 | return ret; | |
4318 | } | |
4319 | ||
2ff7e61e | 4320 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
acce952b | 4321 | struct extent_io_tree *pinned_extents) |
4322 | { | |
4323 | struct extent_io_tree *unpin; | |
4324 | u64 start; | |
4325 | u64 end; | |
4326 | int ret; | |
ed0eaa14 | 4327 | bool loop = true; |
acce952b | 4328 | |
4329 | unpin = pinned_extents; | |
ed0eaa14 | 4330 | again: |
acce952b | 4331 | while (1) { |
fcd5e742 LF |
4332 | /* |
4333 | * The btrfs_finish_extent_commit() may get the same range as | |
4334 | * ours between find_first_extent_bit and clear_extent_dirty. | |
4335 | * Hence, hold the unused_bg_unpin_mutex to avoid double unpin | |
4336 | * the same extent range. | |
4337 | */ | |
4338 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | |
acce952b | 4339 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
e6138876 | 4340 | EXTENT_DIRTY, NULL); |
fcd5e742 LF |
4341 | if (ret) { |
4342 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | |
acce952b | 4343 | break; |
fcd5e742 | 4344 | } |
acce952b | 4345 | |
af6f8f60 | 4346 | clear_extent_dirty(unpin, start, end); |
2ff7e61e | 4347 | btrfs_error_unpin_extent_range(fs_info, start, end); |
fcd5e742 | 4348 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
acce952b | 4349 | cond_resched(); |
4350 | } | |
4351 | ||
ed0eaa14 | 4352 | if (loop) { |
0b246afa JM |
4353 | if (unpin == &fs_info->freed_extents[0]) |
4354 | unpin = &fs_info->freed_extents[1]; | |
ed0eaa14 | 4355 | else |
0b246afa | 4356 | unpin = &fs_info->freed_extents[0]; |
ed0eaa14 LB |
4357 | loop = false; |
4358 | goto again; | |
4359 | } | |
4360 | ||
acce952b | 4361 | return 0; |
4362 | } | |
4363 | ||
c79a1751 LB |
4364 | static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) |
4365 | { | |
4366 | struct inode *inode; | |
4367 | ||
4368 | inode = cache->io_ctl.inode; | |
4369 | if (inode) { | |
4370 | invalidate_inode_pages2(inode->i_mapping); | |
4371 | BTRFS_I(inode)->generation = 0; | |
4372 | cache->io_ctl.inode = NULL; | |
4373 | iput(inode); | |
4374 | } | |
4375 | btrfs_put_block_group(cache); | |
4376 | } | |
4377 | ||
4378 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | |
2ff7e61e | 4379 | struct btrfs_fs_info *fs_info) |
c79a1751 LB |
4380 | { |
4381 | struct btrfs_block_group_cache *cache; | |
4382 | ||
4383 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4384 | while (!list_empty(&cur_trans->dirty_bgs)) { | |
4385 | cache = list_first_entry(&cur_trans->dirty_bgs, | |
4386 | struct btrfs_block_group_cache, | |
4387 | dirty_list); | |
c79a1751 LB |
4388 | |
4389 | if (!list_empty(&cache->io_list)) { | |
4390 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4391 | list_del_init(&cache->io_list); | |
4392 | btrfs_cleanup_bg_io(cache); | |
4393 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4394 | } | |
4395 | ||
4396 | list_del_init(&cache->dirty_list); | |
4397 | spin_lock(&cache->lock); | |
4398 | cache->disk_cache_state = BTRFS_DC_ERROR; | |
4399 | spin_unlock(&cache->lock); | |
4400 | ||
4401 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4402 | btrfs_put_block_group(cache); | |
4403 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4404 | } | |
4405 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4406 | ||
45ae2c18 NB |
4407 | /* |
4408 | * Refer to the definition of io_bgs member for details why it's safe | |
4409 | * to use it without any locking | |
4410 | */ | |
c79a1751 LB |
4411 | while (!list_empty(&cur_trans->io_bgs)) { |
4412 | cache = list_first_entry(&cur_trans->io_bgs, | |
4413 | struct btrfs_block_group_cache, | |
4414 | io_list); | |
c79a1751 LB |
4415 | |
4416 | list_del_init(&cache->io_list); | |
4417 | spin_lock(&cache->lock); | |
4418 | cache->disk_cache_state = BTRFS_DC_ERROR; | |
4419 | spin_unlock(&cache->lock); | |
4420 | btrfs_cleanup_bg_io(cache); | |
4421 | } | |
4422 | } | |
4423 | ||
49b25e05 | 4424 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
2ff7e61e | 4425 | struct btrfs_fs_info *fs_info) |
49b25e05 | 4426 | { |
2ff7e61e | 4427 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
c79a1751 LB |
4428 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
4429 | ASSERT(list_empty(&cur_trans->io_bgs)); | |
4430 | ||
2ff7e61e | 4431 | btrfs_destroy_delayed_refs(cur_trans, fs_info); |
49b25e05 | 4432 | |
4a9d8bde | 4433 | cur_trans->state = TRANS_STATE_COMMIT_START; |
0b246afa | 4434 | wake_up(&fs_info->transaction_blocked_wait); |
49b25e05 | 4435 | |
4a9d8bde | 4436 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
0b246afa | 4437 | wake_up(&fs_info->transaction_wait); |
49b25e05 | 4438 | |
ccdf9b30 JM |
4439 | btrfs_destroy_delayed_inodes(fs_info); |
4440 | btrfs_assert_delayed_root_empty(fs_info); | |
49b25e05 | 4441 | |
2ff7e61e | 4442 | btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, |
49b25e05 | 4443 | EXTENT_DIRTY); |
2ff7e61e | 4444 | btrfs_destroy_pinned_extent(fs_info, |
0b246afa | 4445 | fs_info->pinned_extents); |
49b25e05 | 4446 | |
4a9d8bde MX |
4447 | cur_trans->state =TRANS_STATE_COMPLETED; |
4448 | wake_up(&cur_trans->commit_wait); | |
49b25e05 JM |
4449 | } |
4450 | ||
2ff7e61e | 4451 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) |
acce952b | 4452 | { |
4453 | struct btrfs_transaction *t; | |
acce952b | 4454 | |
0b246afa | 4455 | mutex_lock(&fs_info->transaction_kthread_mutex); |
acce952b | 4456 | |
0b246afa JM |
4457 | spin_lock(&fs_info->trans_lock); |
4458 | while (!list_empty(&fs_info->trans_list)) { | |
4459 | t = list_first_entry(&fs_info->trans_list, | |
724e2315 JB |
4460 | struct btrfs_transaction, list); |
4461 | if (t->state >= TRANS_STATE_COMMIT_START) { | |
9b64f57d | 4462 | refcount_inc(&t->use_count); |
0b246afa | 4463 | spin_unlock(&fs_info->trans_lock); |
2ff7e61e | 4464 | btrfs_wait_for_commit(fs_info, t->transid); |
724e2315 | 4465 | btrfs_put_transaction(t); |
0b246afa | 4466 | spin_lock(&fs_info->trans_lock); |
724e2315 JB |
4467 | continue; |
4468 | } | |
0b246afa | 4469 | if (t == fs_info->running_transaction) { |
724e2315 | 4470 | t->state = TRANS_STATE_COMMIT_DOING; |
0b246afa | 4471 | spin_unlock(&fs_info->trans_lock); |
724e2315 JB |
4472 | /* |
4473 | * We wait for 0 num_writers since we don't hold a trans | |
4474 | * handle open currently for this transaction. | |
4475 | */ | |
4476 | wait_event(t->writer_wait, | |
4477 | atomic_read(&t->num_writers) == 0); | |
4478 | } else { | |
0b246afa | 4479 | spin_unlock(&fs_info->trans_lock); |
724e2315 | 4480 | } |
2ff7e61e | 4481 | btrfs_cleanup_one_transaction(t, fs_info); |
4a9d8bde | 4482 | |
0b246afa JM |
4483 | spin_lock(&fs_info->trans_lock); |
4484 | if (t == fs_info->running_transaction) | |
4485 | fs_info->running_transaction = NULL; | |
acce952b | 4486 | list_del_init(&t->list); |
0b246afa | 4487 | spin_unlock(&fs_info->trans_lock); |
acce952b | 4488 | |
724e2315 | 4489 | btrfs_put_transaction(t); |
2ff7e61e | 4490 | trace_btrfs_transaction_commit(fs_info->tree_root); |
0b246afa | 4491 | spin_lock(&fs_info->trans_lock); |
724e2315 | 4492 | } |
0b246afa JM |
4493 | spin_unlock(&fs_info->trans_lock); |
4494 | btrfs_destroy_all_ordered_extents(fs_info); | |
ccdf9b30 JM |
4495 | btrfs_destroy_delayed_inodes(fs_info); |
4496 | btrfs_assert_delayed_root_empty(fs_info); | |
2ff7e61e | 4497 | btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
0b246afa JM |
4498 | btrfs_destroy_all_delalloc_inodes(fs_info); |
4499 | mutex_unlock(&fs_info->transaction_kthread_mutex); | |
acce952b | 4500 | |
4501 | return 0; | |
4502 | } | |
4503 | ||
e8c9f186 | 4504 | static const struct extent_io_ops btree_extent_io_ops = { |
4d53dddb | 4505 | /* mandatory callbacks */ |
0b86a832 | 4506 | .submit_bio_hook = btree_submit_bio_hook, |
4d53dddb | 4507 | .readpage_end_io_hook = btree_readpage_end_io_hook, |
20a7db8a | 4508 | .readpage_io_failed_hook = btree_io_failed_hook, |
4d53dddb DS |
4509 | |
4510 | /* optional callbacks */ | |
0da5468f | 4511 | }; |