]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
6cbd5570 CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
6cbd5570 CM |
4 | */ |
5 | ||
e20d96d6 | 6 | #include <linux/fs.h> |
d98237b3 | 7 | #include <linux/blkdev.h> |
0f7d52f4 | 8 | #include <linux/radix-tree.h> |
35b7e476 | 9 | #include <linux/writeback.h> |
d397712b | 10 | #include <linux/buffer_head.h> |
ce9adaa5 | 11 | #include <linux/workqueue.h> |
a74a4b97 | 12 | #include <linux/kthread.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
784b4e29 | 14 | #include <linux/migrate.h> |
7a36ddec | 15 | #include <linux/ratelimit.h> |
6463fe58 | 16 | #include <linux/uuid.h> |
803b2f54 | 17 | #include <linux/semaphore.h> |
540adea3 | 18 | #include <linux/error-injection.h> |
9678c543 | 19 | #include <linux/crc32c.h> |
7e75bf3f | 20 | #include <asm/unaligned.h> |
eb60ceac CM |
21 | #include "ctree.h" |
22 | #include "disk-io.h" | |
e089f05c | 23 | #include "transaction.h" |
0f7d52f4 | 24 | #include "btrfs_inode.h" |
0b86a832 | 25 | #include "volumes.h" |
db94535d | 26 | #include "print-tree.h" |
925baedd | 27 | #include "locking.h" |
e02119d5 | 28 | #include "tree-log.h" |
fa9c0d79 | 29 | #include "free-space-cache.h" |
70f6d82e | 30 | #include "free-space-tree.h" |
581bb050 | 31 | #include "inode-map.h" |
21adbd5c | 32 | #include "check-integrity.h" |
606686ee | 33 | #include "rcu-string.h" |
8dabb742 | 34 | #include "dev-replace.h" |
53b381b3 | 35 | #include "raid56.h" |
5ac1d209 | 36 | #include "sysfs.h" |
fcebe456 | 37 | #include "qgroup.h" |
ebb8765b | 38 | #include "compression.h" |
557ea5dd | 39 | #include "tree-checker.h" |
fd708b81 | 40 | #include "ref-verify.h" |
eb60ceac | 41 | |
de0022b9 JB |
42 | #ifdef CONFIG_X86 |
43 | #include <asm/cpufeature.h> | |
44 | #endif | |
45 | ||
319e4d06 QW |
46 | #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ |
47 | BTRFS_HEADER_FLAG_RELOC |\ | |
48 | BTRFS_SUPER_FLAG_ERROR |\ | |
49 | BTRFS_SUPER_FLAG_SEEDING |\ | |
e2731e55 AJ |
50 | BTRFS_SUPER_FLAG_METADUMP |\ |
51 | BTRFS_SUPER_FLAG_METADUMP_V2) | |
319e4d06 | 52 | |
e8c9f186 | 53 | static const struct extent_io_ops btree_extent_io_ops; |
8b712842 | 54 | static void end_workqueue_fn(struct btrfs_work *work); |
143bede5 | 55 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
acce952b | 56 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2ff7e61e | 57 | struct btrfs_fs_info *fs_info); |
143bede5 | 58 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); |
2ff7e61e | 59 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
acce952b | 60 | struct extent_io_tree *dirty_pages, |
61 | int mark); | |
2ff7e61e | 62 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
acce952b | 63 | struct extent_io_tree *pinned_extents); |
2ff7e61e JM |
64 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); |
65 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); | |
ce9adaa5 | 66 | |
d352ac68 | 67 | /* |
97eb6b69 DS |
68 | * btrfs_end_io_wq structs are used to do processing in task context when an IO |
69 | * is complete. This is used during reads to verify checksums, and it is used | |
d352ac68 CM |
70 | * by writes to insert metadata for new file extents after IO is complete. |
71 | */ | |
97eb6b69 | 72 | struct btrfs_end_io_wq { |
ce9adaa5 CM |
73 | struct bio *bio; |
74 | bio_end_io_t *end_io; | |
75 | void *private; | |
76 | struct btrfs_fs_info *info; | |
4e4cbee9 | 77 | blk_status_t status; |
bfebd8b5 | 78 | enum btrfs_wq_endio_type metadata; |
8b712842 | 79 | struct btrfs_work work; |
ce9adaa5 | 80 | }; |
0da5468f | 81 | |
97eb6b69 DS |
82 | static struct kmem_cache *btrfs_end_io_wq_cache; |
83 | ||
84 | int __init btrfs_end_io_wq_init(void) | |
85 | { | |
86 | btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", | |
87 | sizeof(struct btrfs_end_io_wq), | |
88 | 0, | |
fba4b697 | 89 | SLAB_MEM_SPREAD, |
97eb6b69 DS |
90 | NULL); |
91 | if (!btrfs_end_io_wq_cache) | |
92 | return -ENOMEM; | |
93 | return 0; | |
94 | } | |
95 | ||
e67c718b | 96 | void __cold btrfs_end_io_wq_exit(void) |
97eb6b69 | 97 | { |
5598e900 | 98 | kmem_cache_destroy(btrfs_end_io_wq_cache); |
97eb6b69 DS |
99 | } |
100 | ||
d352ac68 CM |
101 | /* |
102 | * async submit bios are used to offload expensive checksumming | |
103 | * onto the worker threads. They checksum file and metadata bios | |
104 | * just before they are sent down the IO stack. | |
105 | */ | |
44b8bd7e | 106 | struct async_submit_bio { |
c6100a4b | 107 | void *private_data; |
44b8bd7e | 108 | struct bio *bio; |
a758781d | 109 | extent_submit_bio_start_t *submit_bio_start; |
44b8bd7e | 110 | int mirror_num; |
eaf25d93 CM |
111 | /* |
112 | * bio_offset is optional, can be used if the pages in the bio | |
113 | * can't tell us where in the file the bio should go | |
114 | */ | |
115 | u64 bio_offset; | |
8b712842 | 116 | struct btrfs_work work; |
4e4cbee9 | 117 | blk_status_t status; |
44b8bd7e CM |
118 | }; |
119 | ||
85d4e461 CM |
120 | /* |
121 | * Lockdep class keys for extent_buffer->lock's in this root. For a given | |
122 | * eb, the lockdep key is determined by the btrfs_root it belongs to and | |
123 | * the level the eb occupies in the tree. | |
124 | * | |
125 | * Different roots are used for different purposes and may nest inside each | |
126 | * other and they require separate keysets. As lockdep keys should be | |
127 | * static, assign keysets according to the purpose of the root as indicated | |
4fd786e6 MT |
128 | * by btrfs_root->root_key.objectid. This ensures that all special purpose |
129 | * roots have separate keysets. | |
4008c04a | 130 | * |
85d4e461 CM |
131 | * Lock-nesting across peer nodes is always done with the immediate parent |
132 | * node locked thus preventing deadlock. As lockdep doesn't know this, use | |
133 | * subclass to avoid triggering lockdep warning in such cases. | |
4008c04a | 134 | * |
85d4e461 CM |
135 | * The key is set by the readpage_end_io_hook after the buffer has passed |
136 | * csum validation but before the pages are unlocked. It is also set by | |
137 | * btrfs_init_new_buffer on freshly allocated blocks. | |
4008c04a | 138 | * |
85d4e461 CM |
139 | * We also add a check to make sure the highest level of the tree is the |
140 | * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code | |
141 | * needs update as well. | |
4008c04a CM |
142 | */ |
143 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
144 | # if BTRFS_MAX_LEVEL != 8 | |
145 | # error | |
146 | # endif | |
85d4e461 CM |
147 | |
148 | static struct btrfs_lockdep_keyset { | |
149 | u64 id; /* root objectid */ | |
150 | const char *name_stem; /* lock name stem */ | |
151 | char names[BTRFS_MAX_LEVEL + 1][20]; | |
152 | struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; | |
153 | } btrfs_lockdep_keysets[] = { | |
154 | { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, | |
155 | { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, | |
156 | { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, | |
157 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, | |
158 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, | |
159 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, | |
60b62978 | 160 | { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, |
85d4e461 CM |
161 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, |
162 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, | |
163 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, | |
13fd8da9 | 164 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, |
6b20e0ad | 165 | { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, |
85d4e461 | 166 | { .id = 0, .name_stem = "tree" }, |
4008c04a | 167 | }; |
85d4e461 CM |
168 | |
169 | void __init btrfs_init_lockdep(void) | |
170 | { | |
171 | int i, j; | |
172 | ||
173 | /* initialize lockdep class names */ | |
174 | for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { | |
175 | struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; | |
176 | ||
177 | for (j = 0; j < ARRAY_SIZE(ks->names); j++) | |
178 | snprintf(ks->names[j], sizeof(ks->names[j]), | |
179 | "btrfs-%s-%02d", ks->name_stem, j); | |
180 | } | |
181 | } | |
182 | ||
183 | void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, | |
184 | int level) | |
185 | { | |
186 | struct btrfs_lockdep_keyset *ks; | |
187 | ||
188 | BUG_ON(level >= ARRAY_SIZE(ks->keys)); | |
189 | ||
190 | /* find the matching keyset, id 0 is the default entry */ | |
191 | for (ks = btrfs_lockdep_keysets; ks->id; ks++) | |
192 | if (ks->id == objectid) | |
193 | break; | |
194 | ||
195 | lockdep_set_class_and_name(&eb->lock, | |
196 | &ks->keys[level], ks->names[level]); | |
197 | } | |
198 | ||
4008c04a CM |
199 | #endif |
200 | ||
d352ac68 CM |
201 | /* |
202 | * extents on the btree inode are pretty simple, there's one extent | |
203 | * that covers the entire device | |
204 | */ | |
6af49dbd | 205 | struct extent_map *btree_get_extent(struct btrfs_inode *inode, |
306e16ce | 206 | struct page *page, size_t pg_offset, u64 start, u64 len, |
b2950863 | 207 | int create) |
7eccb903 | 208 | { |
3ffbd68c | 209 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
fc4f21b1 | 210 | struct extent_map_tree *em_tree = &inode->extent_tree; |
5f39d397 CM |
211 | struct extent_map *em; |
212 | int ret; | |
213 | ||
890871be | 214 | read_lock(&em_tree->lock); |
d1310b2e | 215 | em = lookup_extent_mapping(em_tree, start, len); |
a061fc8d | 216 | if (em) { |
0b246afa | 217 | em->bdev = fs_info->fs_devices->latest_bdev; |
890871be | 218 | read_unlock(&em_tree->lock); |
5f39d397 | 219 | goto out; |
a061fc8d | 220 | } |
890871be | 221 | read_unlock(&em_tree->lock); |
7b13b7b1 | 222 | |
172ddd60 | 223 | em = alloc_extent_map(); |
5f39d397 CM |
224 | if (!em) { |
225 | em = ERR_PTR(-ENOMEM); | |
226 | goto out; | |
227 | } | |
228 | em->start = 0; | |
0afbaf8c | 229 | em->len = (u64)-1; |
c8b97818 | 230 | em->block_len = (u64)-1; |
5f39d397 | 231 | em->block_start = 0; |
0b246afa | 232 | em->bdev = fs_info->fs_devices->latest_bdev; |
d1310b2e | 233 | |
890871be | 234 | write_lock(&em_tree->lock); |
09a2a8f9 | 235 | ret = add_extent_mapping(em_tree, em, 0); |
5f39d397 CM |
236 | if (ret == -EEXIST) { |
237 | free_extent_map(em); | |
7b13b7b1 | 238 | em = lookup_extent_mapping(em_tree, start, len); |
b4f359ab | 239 | if (!em) |
0433f20d | 240 | em = ERR_PTR(-EIO); |
5f39d397 | 241 | } else if (ret) { |
7b13b7b1 | 242 | free_extent_map(em); |
0433f20d | 243 | em = ERR_PTR(ret); |
5f39d397 | 244 | } |
890871be | 245 | write_unlock(&em_tree->lock); |
7b13b7b1 | 246 | |
5f39d397 CM |
247 | out: |
248 | return em; | |
7eccb903 CM |
249 | } |
250 | ||
9ed57367 | 251 | u32 btrfs_csum_data(const char *data, u32 seed, size_t len) |
19c00ddc | 252 | { |
9678c543 | 253 | return crc32c(seed, data, len); |
19c00ddc CM |
254 | } |
255 | ||
0b5e3daf | 256 | void btrfs_csum_final(u32 crc, u8 *result) |
19c00ddc | 257 | { |
7e75bf3f | 258 | put_unaligned_le32(~crc, result); |
19c00ddc CM |
259 | } |
260 | ||
d352ac68 CM |
261 | /* |
262 | * compute the csum for a btree block, and either verify it or write it | |
263 | * into the csum field of the block. | |
264 | */ | |
01d58472 DD |
265 | static int csum_tree_block(struct btrfs_fs_info *fs_info, |
266 | struct extent_buffer *buf, | |
19c00ddc CM |
267 | int verify) |
268 | { | |
01d58472 | 269 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
71a63551 | 270 | char result[BTRFS_CSUM_SIZE]; |
19c00ddc CM |
271 | unsigned long len; |
272 | unsigned long cur_len; | |
273 | unsigned long offset = BTRFS_CSUM_SIZE; | |
19c00ddc CM |
274 | char *kaddr; |
275 | unsigned long map_start; | |
276 | unsigned long map_len; | |
277 | int err; | |
278 | u32 crc = ~(u32)0; | |
279 | ||
280 | len = buf->len - offset; | |
d397712b | 281 | while (len > 0) { |
d2e174d5 JT |
282 | /* |
283 | * Note: we don't need to check for the err == 1 case here, as | |
284 | * with the given combination of 'start = BTRFS_CSUM_SIZE (32)' | |
285 | * and 'min_len = 32' and the currently implemented mapping | |
286 | * algorithm we cannot cross a page boundary. | |
287 | */ | |
19c00ddc | 288 | err = map_private_extent_buffer(buf, offset, 32, |
a6591715 | 289 | &kaddr, &map_start, &map_len); |
d397712b | 290 | if (err) |
8bd98f0e | 291 | return err; |
19c00ddc | 292 | cur_len = min(len, map_len - (offset - map_start)); |
b0496686 | 293 | crc = btrfs_csum_data(kaddr + offset - map_start, |
19c00ddc CM |
294 | crc, cur_len); |
295 | len -= cur_len; | |
296 | offset += cur_len; | |
19c00ddc | 297 | } |
71a63551 | 298 | memset(result, 0, BTRFS_CSUM_SIZE); |
607d432d | 299 | |
19c00ddc CM |
300 | btrfs_csum_final(crc, result); |
301 | ||
302 | if (verify) { | |
607d432d | 303 | if (memcmp_extent_buffer(buf, result, 0, csum_size)) { |
e4204ded CM |
304 | u32 val; |
305 | u32 found = 0; | |
607d432d | 306 | memcpy(&found, result, csum_size); |
e4204ded | 307 | |
607d432d | 308 | read_extent_buffer(buf, &val, 0, csum_size); |
94647322 | 309 | btrfs_warn_rl(fs_info, |
5d163e0e | 310 | "%s checksum verify failed on %llu wanted %X found %X level %d", |
01d58472 | 311 | fs_info->sb->s_id, buf->start, |
efe120a0 | 312 | val, found, btrfs_header_level(buf)); |
8bd98f0e | 313 | return -EUCLEAN; |
19c00ddc CM |
314 | } |
315 | } else { | |
607d432d | 316 | write_extent_buffer(buf, result, 0, csum_size); |
19c00ddc | 317 | } |
71a63551 | 318 | |
19c00ddc CM |
319 | return 0; |
320 | } | |
321 | ||
d352ac68 CM |
322 | /* |
323 | * we can't consider a given block up to date unless the transid of the | |
324 | * block matches the transid in the parent node's pointer. This is how we | |
325 | * detect blocks that either didn't get written at all or got written | |
326 | * in the wrong place. | |
327 | */ | |
1259ab75 | 328 | static int verify_parent_transid(struct extent_io_tree *io_tree, |
b9fab919 CM |
329 | struct extent_buffer *eb, u64 parent_transid, |
330 | int atomic) | |
1259ab75 | 331 | { |
2ac55d41 | 332 | struct extent_state *cached_state = NULL; |
1259ab75 | 333 | int ret; |
2755a0de | 334 | bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); |
1259ab75 CM |
335 | |
336 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) | |
337 | return 0; | |
338 | ||
b9fab919 CM |
339 | if (atomic) |
340 | return -EAGAIN; | |
341 | ||
a26e8c9f JB |
342 | if (need_lock) { |
343 | btrfs_tree_read_lock(eb); | |
344 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | |
345 | } | |
346 | ||
2ac55d41 | 347 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, |
ff13db41 | 348 | &cached_state); |
0b32f4bb | 349 | if (extent_buffer_uptodate(eb) && |
1259ab75 CM |
350 | btrfs_header_generation(eb) == parent_transid) { |
351 | ret = 0; | |
352 | goto out; | |
353 | } | |
94647322 DS |
354 | btrfs_err_rl(eb->fs_info, |
355 | "parent transid verify failed on %llu wanted %llu found %llu", | |
356 | eb->start, | |
29549aec | 357 | parent_transid, btrfs_header_generation(eb)); |
1259ab75 | 358 | ret = 1; |
a26e8c9f JB |
359 | |
360 | /* | |
361 | * Things reading via commit roots that don't have normal protection, | |
362 | * like send, can have a really old block in cache that may point at a | |
01327610 | 363 | * block that has been freed and re-allocated. So don't clear uptodate |
a26e8c9f JB |
364 | * if we find an eb that is under IO (dirty/writeback) because we could |
365 | * end up reading in the stale data and then writing it back out and | |
366 | * making everybody very sad. | |
367 | */ | |
368 | if (!extent_buffer_under_io(eb)) | |
369 | clear_extent_buffer_uptodate(eb); | |
33958dc6 | 370 | out: |
2ac55d41 | 371 | unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, |
e43bbe5e | 372 | &cached_state); |
472b909f JB |
373 | if (need_lock) |
374 | btrfs_tree_read_unlock_blocking(eb); | |
1259ab75 | 375 | return ret; |
1259ab75 CM |
376 | } |
377 | ||
1104a885 DS |
378 | /* |
379 | * Return 0 if the superblock checksum type matches the checksum value of that | |
380 | * algorithm. Pass the raw disk superblock data. | |
381 | */ | |
ab8d0fc4 JM |
382 | static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, |
383 | char *raw_disk_sb) | |
1104a885 DS |
384 | { |
385 | struct btrfs_super_block *disk_sb = | |
386 | (struct btrfs_super_block *)raw_disk_sb; | |
387 | u16 csum_type = btrfs_super_csum_type(disk_sb); | |
388 | int ret = 0; | |
389 | ||
390 | if (csum_type == BTRFS_CSUM_TYPE_CRC32) { | |
391 | u32 crc = ~(u32)0; | |
776c4a7c | 392 | char result[sizeof(crc)]; |
1104a885 DS |
393 | |
394 | /* | |
395 | * The super_block structure does not span the whole | |
396 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space | |
01327610 | 397 | * is filled with zeros and is included in the checksum. |
1104a885 DS |
398 | */ |
399 | crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, | |
400 | crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); | |
401 | btrfs_csum_final(crc, result); | |
402 | ||
776c4a7c | 403 | if (memcmp(raw_disk_sb, result, sizeof(result))) |
1104a885 DS |
404 | ret = 1; |
405 | } | |
406 | ||
407 | if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { | |
ab8d0fc4 | 408 | btrfs_err(fs_info, "unsupported checksum algorithm %u", |
1104a885 DS |
409 | csum_type); |
410 | ret = 1; | |
411 | } | |
412 | ||
413 | return ret; | |
414 | } | |
415 | ||
581c1760 QW |
416 | static int verify_level_key(struct btrfs_fs_info *fs_info, |
417 | struct extent_buffer *eb, int level, | |
ff76a864 | 418 | struct btrfs_key *first_key, u64 parent_transid) |
581c1760 QW |
419 | { |
420 | int found_level; | |
421 | struct btrfs_key found_key; | |
422 | int ret; | |
423 | ||
424 | found_level = btrfs_header_level(eb); | |
425 | if (found_level != level) { | |
426 | #ifdef CONFIG_BTRFS_DEBUG | |
427 | WARN_ON(1); | |
428 | btrfs_err(fs_info, | |
429 | "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", | |
430 | eb->start, level, found_level); | |
431 | #endif | |
432 | return -EIO; | |
433 | } | |
434 | ||
435 | if (!first_key) | |
436 | return 0; | |
437 | ||
5d41be6f QW |
438 | /* |
439 | * For live tree block (new tree blocks in current transaction), | |
440 | * we need proper lock context to avoid race, which is impossible here. | |
441 | * So we only checks tree blocks which is read from disk, whose | |
442 | * generation <= fs_info->last_trans_committed. | |
443 | */ | |
444 | if (btrfs_header_generation(eb) > fs_info->last_trans_committed) | |
445 | return 0; | |
581c1760 QW |
446 | if (found_level) |
447 | btrfs_node_key_to_cpu(eb, &found_key, 0); | |
448 | else | |
449 | btrfs_item_key_to_cpu(eb, &found_key, 0); | |
450 | ret = btrfs_comp_cpu_keys(first_key, &found_key); | |
451 | ||
452 | #ifdef CONFIG_BTRFS_DEBUG | |
453 | if (ret) { | |
454 | WARN_ON(1); | |
455 | btrfs_err(fs_info, | |
ff76a864 LB |
456 | "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", |
457 | eb->start, parent_transid, first_key->objectid, | |
458 | first_key->type, first_key->offset, | |
459 | found_key.objectid, found_key.type, | |
460 | found_key.offset); | |
581c1760 QW |
461 | } |
462 | #endif | |
463 | return ret; | |
464 | } | |
465 | ||
d352ac68 CM |
466 | /* |
467 | * helper to read a given tree block, doing retries as required when | |
468 | * the checksums don't match and we have alternate mirrors to try. | |
581c1760 QW |
469 | * |
470 | * @parent_transid: expected transid, skip check if 0 | |
471 | * @level: expected level, mandatory check | |
472 | * @first_key: expected key of first slot, skip check if NULL | |
d352ac68 | 473 | */ |
2ff7e61e | 474 | static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, |
f188591e | 475 | struct extent_buffer *eb, |
581c1760 QW |
476 | u64 parent_transid, int level, |
477 | struct btrfs_key *first_key) | |
f188591e CM |
478 | { |
479 | struct extent_io_tree *io_tree; | |
ea466794 | 480 | int failed = 0; |
f188591e CM |
481 | int ret; |
482 | int num_copies = 0; | |
483 | int mirror_num = 0; | |
ea466794 | 484 | int failed_mirror = 0; |
f188591e | 485 | |
0b246afa | 486 | io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; |
f188591e | 487 | while (1) { |
f8397d69 | 488 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
8436ea91 | 489 | ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, |
6af49dbd | 490 | mirror_num); |
256dd1bb | 491 | if (!ret) { |
581c1760 | 492 | if (verify_parent_transid(io_tree, eb, |
b9fab919 | 493 | parent_transid, 0)) |
256dd1bb | 494 | ret = -EIO; |
581c1760 | 495 | else if (verify_level_key(fs_info, eb, level, |
ff76a864 | 496 | first_key, parent_transid)) |
581c1760 QW |
497 | ret = -EUCLEAN; |
498 | else | |
499 | break; | |
256dd1bb | 500 | } |
d397712b | 501 | |
0b246afa | 502 | num_copies = btrfs_num_copies(fs_info, |
f188591e | 503 | eb->start, eb->len); |
4235298e | 504 | if (num_copies == 1) |
ea466794 | 505 | break; |
4235298e | 506 | |
5cf1ab56 JB |
507 | if (!failed_mirror) { |
508 | failed = 1; | |
509 | failed_mirror = eb->read_mirror; | |
510 | } | |
511 | ||
f188591e | 512 | mirror_num++; |
ea466794 JB |
513 | if (mirror_num == failed_mirror) |
514 | mirror_num++; | |
515 | ||
4235298e | 516 | if (mirror_num > num_copies) |
ea466794 | 517 | break; |
f188591e | 518 | } |
ea466794 | 519 | |
c0901581 | 520 | if (failed && !ret && failed_mirror) |
2ff7e61e | 521 | repair_eb_io_failure(fs_info, eb, failed_mirror); |
ea466794 JB |
522 | |
523 | return ret; | |
f188591e | 524 | } |
19c00ddc | 525 | |
d352ac68 | 526 | /* |
d397712b CM |
527 | * checksum a dirty tree block before IO. This has extra checks to make sure |
528 | * we only fill in the checksum field in the first page of a multi-page block | |
d352ac68 | 529 | */ |
d397712b | 530 | |
01d58472 | 531 | static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) |
19c00ddc | 532 | { |
4eee4fa4 | 533 | u64 start = page_offset(page); |
19c00ddc | 534 | u64 found_start; |
19c00ddc | 535 | struct extent_buffer *eb; |
f188591e | 536 | |
4f2de97a JB |
537 | eb = (struct extent_buffer *)page->private; |
538 | if (page != eb->pages[0]) | |
539 | return 0; | |
0f805531 | 540 | |
19c00ddc | 541 | found_start = btrfs_header_bytenr(eb); |
0f805531 AL |
542 | /* |
543 | * Please do not consolidate these warnings into a single if. | |
544 | * It is useful to know what went wrong. | |
545 | */ | |
546 | if (WARN_ON(found_start != start)) | |
547 | return -EUCLEAN; | |
548 | if (WARN_ON(!PageUptodate(page))) | |
549 | return -EUCLEAN; | |
550 | ||
de37aa51 | 551 | ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, |
0f805531 AL |
552 | btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); |
553 | ||
8bd98f0e | 554 | return csum_tree_block(fs_info, eb, 0); |
19c00ddc CM |
555 | } |
556 | ||
01d58472 | 557 | static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, |
2b82032c YZ |
558 | struct extent_buffer *eb) |
559 | { | |
01d58472 | 560 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
44880fdc | 561 | u8 fsid[BTRFS_FSID_SIZE]; |
2b82032c YZ |
562 | int ret = 1; |
563 | ||
0a4e5586 | 564 | read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); |
2b82032c | 565 | while (fs_devices) { |
7239ff4b NB |
566 | u8 *metadata_uuid; |
567 | ||
568 | /* | |
569 | * Checking the incompat flag is only valid for the current | |
570 | * fs. For seed devices it's forbidden to have their uuid | |
571 | * changed so reading ->fsid in this case is fine | |
572 | */ | |
573 | if (fs_devices == fs_info->fs_devices && | |
574 | btrfs_fs_incompat(fs_info, METADATA_UUID)) | |
575 | metadata_uuid = fs_devices->metadata_uuid; | |
576 | else | |
577 | metadata_uuid = fs_devices->fsid; | |
578 | ||
579 | if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) { | |
2b82032c YZ |
580 | ret = 0; |
581 | break; | |
582 | } | |
583 | fs_devices = fs_devices->seed; | |
584 | } | |
585 | return ret; | |
586 | } | |
587 | ||
facc8a22 MX |
588 | static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
589 | u64 phy_offset, struct page *page, | |
590 | u64 start, u64 end, int mirror) | |
ce9adaa5 | 591 | { |
ce9adaa5 CM |
592 | u64 found_start; |
593 | int found_level; | |
ce9adaa5 CM |
594 | struct extent_buffer *eb; |
595 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | |
02873e43 | 596 | struct btrfs_fs_info *fs_info = root->fs_info; |
f188591e | 597 | int ret = 0; |
727011e0 | 598 | int reads_done; |
ce9adaa5 | 599 | |
ce9adaa5 CM |
600 | if (!page->private) |
601 | goto out; | |
d397712b | 602 | |
4f2de97a | 603 | eb = (struct extent_buffer *)page->private; |
d397712b | 604 | |
0b32f4bb JB |
605 | /* the pending IO might have been the only thing that kept this buffer |
606 | * in memory. Make sure we have a ref for all this other checks | |
607 | */ | |
608 | extent_buffer_get(eb); | |
609 | ||
610 | reads_done = atomic_dec_and_test(&eb->io_pages); | |
727011e0 CM |
611 | if (!reads_done) |
612 | goto err; | |
f188591e | 613 | |
5cf1ab56 | 614 | eb->read_mirror = mirror; |
656f30db | 615 | if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { |
ea466794 JB |
616 | ret = -EIO; |
617 | goto err; | |
618 | } | |
619 | ||
ce9adaa5 | 620 | found_start = btrfs_header_bytenr(eb); |
727011e0 | 621 | if (found_start != eb->start) { |
893bf4b1 SY |
622 | btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu", |
623 | eb->start, found_start); | |
f188591e | 624 | ret = -EIO; |
ce9adaa5 CM |
625 | goto err; |
626 | } | |
02873e43 ZL |
627 | if (check_tree_block_fsid(fs_info, eb)) { |
628 | btrfs_err_rl(fs_info, "bad fsid on block %llu", | |
629 | eb->start); | |
1259ab75 CM |
630 | ret = -EIO; |
631 | goto err; | |
632 | } | |
ce9adaa5 | 633 | found_level = btrfs_header_level(eb); |
1c24c3ce | 634 | if (found_level >= BTRFS_MAX_LEVEL) { |
893bf4b1 SY |
635 | btrfs_err(fs_info, "bad tree block level %d on %llu", |
636 | (int)btrfs_header_level(eb), eb->start); | |
1c24c3ce JB |
637 | ret = -EIO; |
638 | goto err; | |
639 | } | |
ce9adaa5 | 640 | |
85d4e461 CM |
641 | btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), |
642 | eb, found_level); | |
4008c04a | 643 | |
02873e43 | 644 | ret = csum_tree_block(fs_info, eb, 1); |
8bd98f0e | 645 | if (ret) |
a826d6dc | 646 | goto err; |
a826d6dc JB |
647 | |
648 | /* | |
649 | * If this is a leaf block and it is corrupt, set the corrupt bit so | |
650 | * that we don't try and read the other copies of this block, just | |
651 | * return -EIO. | |
652 | */ | |
2f659546 | 653 | if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) { |
a826d6dc JB |
654 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
655 | ret = -EIO; | |
656 | } | |
ce9adaa5 | 657 | |
2f659546 | 658 | if (found_level > 0 && btrfs_check_node(fs_info, eb)) |
053ab70f LB |
659 | ret = -EIO; |
660 | ||
0b32f4bb JB |
661 | if (!ret) |
662 | set_extent_buffer_uptodate(eb); | |
ce9adaa5 | 663 | err: |
79fb65a1 JB |
664 | if (reads_done && |
665 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | |
d48d71aa | 666 | btree_readahead_hook(eb, ret); |
4bb31e92 | 667 | |
53b381b3 DW |
668 | if (ret) { |
669 | /* | |
670 | * our io error hook is going to dec the io pages | |
671 | * again, we have to make sure it has something | |
672 | * to decrement | |
673 | */ | |
674 | atomic_inc(&eb->io_pages); | |
0b32f4bb | 675 | clear_extent_buffer_uptodate(eb); |
53b381b3 | 676 | } |
0b32f4bb | 677 | free_extent_buffer(eb); |
ce9adaa5 | 678 | out: |
f188591e | 679 | return ret; |
ce9adaa5 CM |
680 | } |
681 | ||
4246a0b6 | 682 | static void end_workqueue_bio(struct bio *bio) |
ce9adaa5 | 683 | { |
97eb6b69 | 684 | struct btrfs_end_io_wq *end_io_wq = bio->bi_private; |
ce9adaa5 | 685 | struct btrfs_fs_info *fs_info; |
9e0af237 LB |
686 | struct btrfs_workqueue *wq; |
687 | btrfs_work_func_t func; | |
ce9adaa5 | 688 | |
ce9adaa5 | 689 | fs_info = end_io_wq->info; |
4e4cbee9 | 690 | end_io_wq->status = bio->bi_status; |
d20f7043 | 691 | |
37226b21 | 692 | if (bio_op(bio) == REQ_OP_WRITE) { |
9e0af237 LB |
693 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
694 | wq = fs_info->endio_meta_write_workers; | |
695 | func = btrfs_endio_meta_write_helper; | |
696 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { | |
697 | wq = fs_info->endio_freespace_worker; | |
698 | func = btrfs_freespace_write_helper; | |
699 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { | |
700 | wq = fs_info->endio_raid56_workers; | |
701 | func = btrfs_endio_raid56_helper; | |
702 | } else { | |
703 | wq = fs_info->endio_write_workers; | |
704 | func = btrfs_endio_write_helper; | |
705 | } | |
d20f7043 | 706 | } else { |
8b110e39 MX |
707 | if (unlikely(end_io_wq->metadata == |
708 | BTRFS_WQ_ENDIO_DIO_REPAIR)) { | |
709 | wq = fs_info->endio_repair_workers; | |
710 | func = btrfs_endio_repair_helper; | |
711 | } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { | |
9e0af237 LB |
712 | wq = fs_info->endio_raid56_workers; |
713 | func = btrfs_endio_raid56_helper; | |
714 | } else if (end_io_wq->metadata) { | |
715 | wq = fs_info->endio_meta_workers; | |
716 | func = btrfs_endio_meta_helper; | |
717 | } else { | |
718 | wq = fs_info->endio_workers; | |
719 | func = btrfs_endio_helper; | |
720 | } | |
d20f7043 | 721 | } |
9e0af237 LB |
722 | |
723 | btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); | |
724 | btrfs_queue_work(wq, &end_io_wq->work); | |
ce9adaa5 CM |
725 | } |
726 | ||
4e4cbee9 | 727 | blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
bfebd8b5 | 728 | enum btrfs_wq_endio_type metadata) |
0b86a832 | 729 | { |
97eb6b69 | 730 | struct btrfs_end_io_wq *end_io_wq; |
8b110e39 | 731 | |
97eb6b69 | 732 | end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); |
ce9adaa5 | 733 | if (!end_io_wq) |
4e4cbee9 | 734 | return BLK_STS_RESOURCE; |
ce9adaa5 CM |
735 | |
736 | end_io_wq->private = bio->bi_private; | |
737 | end_io_wq->end_io = bio->bi_end_io; | |
22c59948 | 738 | end_io_wq->info = info; |
4e4cbee9 | 739 | end_io_wq->status = 0; |
ce9adaa5 | 740 | end_io_wq->bio = bio; |
22c59948 | 741 | end_io_wq->metadata = metadata; |
ce9adaa5 CM |
742 | |
743 | bio->bi_private = end_io_wq; | |
744 | bio->bi_end_io = end_workqueue_bio; | |
22c59948 CM |
745 | return 0; |
746 | } | |
747 | ||
4a69a410 CM |
748 | static void run_one_async_start(struct btrfs_work *work) |
749 | { | |
4a69a410 | 750 | struct async_submit_bio *async; |
4e4cbee9 | 751 | blk_status_t ret; |
4a69a410 CM |
752 | |
753 | async = container_of(work, struct async_submit_bio, work); | |
c6100a4b | 754 | ret = async->submit_bio_start(async->private_data, async->bio, |
79787eaa JM |
755 | async->bio_offset); |
756 | if (ret) | |
4e4cbee9 | 757 | async->status = ret; |
4a69a410 CM |
758 | } |
759 | ||
06ea01b1 DS |
760 | /* |
761 | * In order to insert checksums into the metadata in large chunks, we wait | |
762 | * until bio submission time. All the pages in the bio are checksummed and | |
763 | * sums are attached onto the ordered extent record. | |
764 | * | |
765 | * At IO completion time the csums attached on the ordered extent record are | |
766 | * inserted into the tree. | |
767 | */ | |
4a69a410 | 768 | static void run_one_async_done(struct btrfs_work *work) |
8b712842 | 769 | { |
8b712842 | 770 | struct async_submit_bio *async; |
06ea01b1 DS |
771 | struct inode *inode; |
772 | blk_status_t ret; | |
8b712842 CM |
773 | |
774 | async = container_of(work, struct async_submit_bio, work); | |
06ea01b1 | 775 | inode = async->private_data; |
4854ddd0 | 776 | |
bb7ab3b9 | 777 | /* If an error occurred we just want to clean up the bio and move on */ |
4e4cbee9 CH |
778 | if (async->status) { |
779 | async->bio->bi_status = async->status; | |
4246a0b6 | 780 | bio_endio(async->bio); |
79787eaa JM |
781 | return; |
782 | } | |
783 | ||
06ea01b1 DS |
784 | ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, |
785 | async->mirror_num, 1); | |
786 | if (ret) { | |
787 | async->bio->bi_status = ret; | |
788 | bio_endio(async->bio); | |
789 | } | |
4a69a410 CM |
790 | } |
791 | ||
792 | static void run_one_async_free(struct btrfs_work *work) | |
793 | { | |
794 | struct async_submit_bio *async; | |
795 | ||
796 | async = container_of(work, struct async_submit_bio, work); | |
8b712842 CM |
797 | kfree(async); |
798 | } | |
799 | ||
8c27cb35 LT |
800 | blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, |
801 | int mirror_num, unsigned long bio_flags, | |
802 | u64 bio_offset, void *private_data, | |
e288c080 | 803 | extent_submit_bio_start_t *submit_bio_start) |
44b8bd7e CM |
804 | { |
805 | struct async_submit_bio *async; | |
806 | ||
807 | async = kmalloc(sizeof(*async), GFP_NOFS); | |
808 | if (!async) | |
4e4cbee9 | 809 | return BLK_STS_RESOURCE; |
44b8bd7e | 810 | |
c6100a4b | 811 | async->private_data = private_data; |
44b8bd7e CM |
812 | async->bio = bio; |
813 | async->mirror_num = mirror_num; | |
4a69a410 | 814 | async->submit_bio_start = submit_bio_start; |
4a69a410 | 815 | |
9e0af237 | 816 | btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, |
5cdc7ad3 | 817 | run_one_async_done, run_one_async_free); |
4a69a410 | 818 | |
eaf25d93 | 819 | async->bio_offset = bio_offset; |
8c8bee1d | 820 | |
4e4cbee9 | 821 | async->status = 0; |
79787eaa | 822 | |
67f055c7 | 823 | if (op_is_sync(bio->bi_opf)) |
5cdc7ad3 | 824 | btrfs_set_work_high_priority(&async->work); |
d313d7a3 | 825 | |
5cdc7ad3 | 826 | btrfs_queue_work(fs_info->workers, &async->work); |
44b8bd7e CM |
827 | return 0; |
828 | } | |
829 | ||
4e4cbee9 | 830 | static blk_status_t btree_csum_one_bio(struct bio *bio) |
ce3ed71a | 831 | { |
2c30c71b | 832 | struct bio_vec *bvec; |
ce3ed71a | 833 | struct btrfs_root *root; |
2c30c71b | 834 | int i, ret = 0; |
ce3ed71a | 835 | |
c09abff8 | 836 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
2c30c71b | 837 | bio_for_each_segment_all(bvec, bio, i) { |
ce3ed71a | 838 | root = BTRFS_I(bvec->bv_page->mapping->host)->root; |
01d58472 | 839 | ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); |
79787eaa JM |
840 | if (ret) |
841 | break; | |
ce3ed71a | 842 | } |
2c30c71b | 843 | |
4e4cbee9 | 844 | return errno_to_blk_status(ret); |
ce3ed71a CM |
845 | } |
846 | ||
d0ee3934 | 847 | static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio, |
8c27cb35 | 848 | u64 bio_offset) |
22c59948 | 849 | { |
8b712842 CM |
850 | /* |
851 | * when we're called for a write, we're already in the async | |
5443be45 | 852 | * submission context. Just jump into btrfs_map_bio |
8b712842 | 853 | */ |
79787eaa | 854 | return btree_csum_one_bio(bio); |
4a69a410 | 855 | } |
22c59948 | 856 | |
18fdc679 | 857 | static int check_async_write(struct btrfs_inode *bi) |
de0022b9 | 858 | { |
6300463b LB |
859 | if (atomic_read(&bi->sync_writers)) |
860 | return 0; | |
de0022b9 | 861 | #ifdef CONFIG_X86 |
bc696ca0 | 862 | if (static_cpu_has(X86_FEATURE_XMM4_2)) |
de0022b9 JB |
863 | return 0; |
864 | #endif | |
865 | return 1; | |
866 | } | |
867 | ||
8c27cb35 LT |
868 | static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio, |
869 | int mirror_num, unsigned long bio_flags, | |
870 | u64 bio_offset) | |
44b8bd7e | 871 | { |
c6100a4b | 872 | struct inode *inode = private_data; |
0b246afa | 873 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
18fdc679 | 874 | int async = check_async_write(BTRFS_I(inode)); |
4e4cbee9 | 875 | blk_status_t ret; |
cad321ad | 876 | |
37226b21 | 877 | if (bio_op(bio) != REQ_OP_WRITE) { |
4a69a410 CM |
878 | /* |
879 | * called for a read, do the setup so that checksum validation | |
880 | * can happen in the async kernel threads | |
881 | */ | |
0b246afa JM |
882 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
883 | BTRFS_WQ_ENDIO_METADATA); | |
1d4284bd | 884 | if (ret) |
61891923 | 885 | goto out_w_error; |
2ff7e61e | 886 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
de0022b9 JB |
887 | } else if (!async) { |
888 | ret = btree_csum_one_bio(bio); | |
889 | if (ret) | |
61891923 | 890 | goto out_w_error; |
2ff7e61e | 891 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
61891923 SB |
892 | } else { |
893 | /* | |
894 | * kthread helpers are used to submit writes so that | |
895 | * checksumming can happen in parallel across all CPUs | |
896 | */ | |
c6100a4b JB |
897 | ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, |
898 | bio_offset, private_data, | |
e288c080 | 899 | btree_submit_bio_start); |
44b8bd7e | 900 | } |
d313d7a3 | 901 | |
4246a0b6 CH |
902 | if (ret) |
903 | goto out_w_error; | |
904 | return 0; | |
905 | ||
61891923 | 906 | out_w_error: |
4e4cbee9 | 907 | bio->bi_status = ret; |
4246a0b6 | 908 | bio_endio(bio); |
61891923 | 909 | return ret; |
44b8bd7e CM |
910 | } |
911 | ||
3dd1462e | 912 | #ifdef CONFIG_MIGRATION |
784b4e29 | 913 | static int btree_migratepage(struct address_space *mapping, |
a6bc32b8 MG |
914 | struct page *newpage, struct page *page, |
915 | enum migrate_mode mode) | |
784b4e29 CM |
916 | { |
917 | /* | |
918 | * we can't safely write a btree page from here, | |
919 | * we haven't done the locking hook | |
920 | */ | |
921 | if (PageDirty(page)) | |
922 | return -EAGAIN; | |
923 | /* | |
924 | * Buffers may be managed in a filesystem specific way. | |
925 | * We must have no buffers or drop them. | |
926 | */ | |
927 | if (page_has_private(page) && | |
928 | !try_to_release_page(page, GFP_KERNEL)) | |
929 | return -EAGAIN; | |
a6bc32b8 | 930 | return migrate_page(mapping, newpage, page, mode); |
784b4e29 | 931 | } |
3dd1462e | 932 | #endif |
784b4e29 | 933 | |
0da5468f CM |
934 | |
935 | static int btree_writepages(struct address_space *mapping, | |
936 | struct writeback_control *wbc) | |
937 | { | |
e2d84521 MX |
938 | struct btrfs_fs_info *fs_info; |
939 | int ret; | |
940 | ||
d8d5f3e1 | 941 | if (wbc->sync_mode == WB_SYNC_NONE) { |
448d640b CM |
942 | |
943 | if (wbc->for_kupdate) | |
944 | return 0; | |
945 | ||
e2d84521 | 946 | fs_info = BTRFS_I(mapping->host)->root->fs_info; |
b9473439 | 947 | /* this is a bit racy, but that's ok */ |
d814a491 EL |
948 | ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
949 | BTRFS_DIRTY_METADATA_THRESH, | |
950 | fs_info->dirty_metadata_batch); | |
e2d84521 | 951 | if (ret < 0) |
793955bc | 952 | return 0; |
793955bc | 953 | } |
0b32f4bb | 954 | return btree_write_cache_pages(mapping, wbc); |
0da5468f CM |
955 | } |
956 | ||
b2950863 | 957 | static int btree_readpage(struct file *file, struct page *page) |
5f39d397 | 958 | { |
d1310b2e CM |
959 | struct extent_io_tree *tree; |
960 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
8ddc7d9c | 961 | return extent_read_full_page(tree, page, btree_get_extent, 0); |
5f39d397 | 962 | } |
22b0ebda | 963 | |
70dec807 | 964 | static int btree_releasepage(struct page *page, gfp_t gfp_flags) |
5f39d397 | 965 | { |
98509cfc | 966 | if (PageWriteback(page) || PageDirty(page)) |
d397712b | 967 | return 0; |
0c4e538b | 968 | |
f7a52a40 | 969 | return try_release_extent_buffer(page); |
d98237b3 CM |
970 | } |
971 | ||
d47992f8 LC |
972 | static void btree_invalidatepage(struct page *page, unsigned int offset, |
973 | unsigned int length) | |
d98237b3 | 974 | { |
d1310b2e CM |
975 | struct extent_io_tree *tree; |
976 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
5f39d397 CM |
977 | extent_invalidatepage(tree, page, offset); |
978 | btree_releasepage(page, GFP_NOFS); | |
9ad6b7bc | 979 | if (PagePrivate(page)) { |
efe120a0 FH |
980 | btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, |
981 | "page private not zero on page %llu", | |
982 | (unsigned long long)page_offset(page)); | |
9ad6b7bc CM |
983 | ClearPagePrivate(page); |
984 | set_page_private(page, 0); | |
09cbfeaf | 985 | put_page(page); |
9ad6b7bc | 986 | } |
d98237b3 CM |
987 | } |
988 | ||
0b32f4bb JB |
989 | static int btree_set_page_dirty(struct page *page) |
990 | { | |
bb146eb2 | 991 | #ifdef DEBUG |
0b32f4bb JB |
992 | struct extent_buffer *eb; |
993 | ||
994 | BUG_ON(!PagePrivate(page)); | |
995 | eb = (struct extent_buffer *)page->private; | |
996 | BUG_ON(!eb); | |
997 | BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | |
998 | BUG_ON(!atomic_read(&eb->refs)); | |
999 | btrfs_assert_tree_locked(eb); | |
bb146eb2 | 1000 | #endif |
0b32f4bb JB |
1001 | return __set_page_dirty_nobuffers(page); |
1002 | } | |
1003 | ||
7f09410b | 1004 | static const struct address_space_operations btree_aops = { |
d98237b3 | 1005 | .readpage = btree_readpage, |
0da5468f | 1006 | .writepages = btree_writepages, |
5f39d397 CM |
1007 | .releasepage = btree_releasepage, |
1008 | .invalidatepage = btree_invalidatepage, | |
5a92bc88 | 1009 | #ifdef CONFIG_MIGRATION |
784b4e29 | 1010 | .migratepage = btree_migratepage, |
5a92bc88 | 1011 | #endif |
0b32f4bb | 1012 | .set_page_dirty = btree_set_page_dirty, |
d98237b3 CM |
1013 | }; |
1014 | ||
2ff7e61e | 1015 | void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) |
090d1875 | 1016 | { |
5f39d397 | 1017 | struct extent_buffer *buf = NULL; |
2ff7e61e | 1018 | struct inode *btree_inode = fs_info->btree_inode; |
090d1875 | 1019 | |
2ff7e61e | 1020 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 | 1021 | if (IS_ERR(buf)) |
6197d86e | 1022 | return; |
d1310b2e | 1023 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, |
6af49dbd | 1024 | buf, WAIT_NONE, 0); |
5f39d397 | 1025 | free_extent_buffer(buf); |
090d1875 CM |
1026 | } |
1027 | ||
2ff7e61e | 1028 | int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, |
ab0fff03 AJ |
1029 | int mirror_num, struct extent_buffer **eb) |
1030 | { | |
1031 | struct extent_buffer *buf = NULL; | |
2ff7e61e | 1032 | struct inode *btree_inode = fs_info->btree_inode; |
ab0fff03 AJ |
1033 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; |
1034 | int ret; | |
1035 | ||
2ff7e61e | 1036 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 | 1037 | if (IS_ERR(buf)) |
ab0fff03 AJ |
1038 | return 0; |
1039 | ||
1040 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); | |
1041 | ||
8436ea91 | 1042 | ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, |
6af49dbd | 1043 | mirror_num); |
ab0fff03 AJ |
1044 | if (ret) { |
1045 | free_extent_buffer(buf); | |
1046 | return ret; | |
1047 | } | |
1048 | ||
1049 | if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { | |
1050 | free_extent_buffer(buf); | |
1051 | return -EIO; | |
0b32f4bb | 1052 | } else if (extent_buffer_uptodate(buf)) { |
ab0fff03 AJ |
1053 | *eb = buf; |
1054 | } else { | |
1055 | free_extent_buffer(buf); | |
1056 | } | |
1057 | return 0; | |
1058 | } | |
1059 | ||
2ff7e61e JM |
1060 | struct extent_buffer *btrfs_find_create_tree_block( |
1061 | struct btrfs_fs_info *fs_info, | |
1062 | u64 bytenr) | |
0999df54 | 1063 | { |
0b246afa JM |
1064 | if (btrfs_is_testing(fs_info)) |
1065 | return alloc_test_extent_buffer(fs_info, bytenr); | |
1066 | return alloc_extent_buffer(fs_info, bytenr); | |
0999df54 CM |
1067 | } |
1068 | ||
1069 | ||
e02119d5 CM |
1070 | int btrfs_write_tree_block(struct extent_buffer *buf) |
1071 | { | |
727011e0 | 1072 | return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, |
8aa38c31 | 1073 | buf->start + buf->len - 1); |
e02119d5 CM |
1074 | } |
1075 | ||
3189ff77 | 1076 | void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) |
e02119d5 | 1077 | { |
3189ff77 JL |
1078 | filemap_fdatawait_range(buf->pages[0]->mapping, |
1079 | buf->start, buf->start + buf->len - 1); | |
e02119d5 CM |
1080 | } |
1081 | ||
581c1760 QW |
1082 | /* |
1083 | * Read tree block at logical address @bytenr and do variant basic but critical | |
1084 | * verification. | |
1085 | * | |
1086 | * @parent_transid: expected transid of this tree block, skip check if 0 | |
1087 | * @level: expected level, mandatory check | |
1088 | * @first_key: expected key in slot 0, skip check if NULL | |
1089 | */ | |
2ff7e61e | 1090 | struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, |
581c1760 QW |
1091 | u64 parent_transid, int level, |
1092 | struct btrfs_key *first_key) | |
0999df54 CM |
1093 | { |
1094 | struct extent_buffer *buf = NULL; | |
0999df54 CM |
1095 | int ret; |
1096 | ||
2ff7e61e | 1097 | buf = btrfs_find_create_tree_block(fs_info, bytenr); |
c871b0f2 LB |
1098 | if (IS_ERR(buf)) |
1099 | return buf; | |
0999df54 | 1100 | |
581c1760 QW |
1101 | ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid, |
1102 | level, first_key); | |
0f0fe8f7 FDBM |
1103 | if (ret) { |
1104 | free_extent_buffer(buf); | |
64c043de | 1105 | return ERR_PTR(ret); |
0f0fe8f7 | 1106 | } |
5f39d397 | 1107 | return buf; |
ce9adaa5 | 1108 | |
eb60ceac CM |
1109 | } |
1110 | ||
7c302b49 | 1111 | void clean_tree_block(struct btrfs_fs_info *fs_info, |
d5c13f92 | 1112 | struct extent_buffer *buf) |
ed2ff2cb | 1113 | { |
55c69072 | 1114 | if (btrfs_header_generation(buf) == |
e2d84521 | 1115 | fs_info->running_transaction->transid) { |
b9447ef8 | 1116 | btrfs_assert_tree_locked(buf); |
b4ce94de | 1117 | |
b9473439 | 1118 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { |
104b4e51 NB |
1119 | percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, |
1120 | -buf->len, | |
1121 | fs_info->dirty_metadata_batch); | |
ed7b63eb JB |
1122 | /* ugh, clear_extent_buffer_dirty needs to lock the page */ |
1123 | btrfs_set_lock_blocking(buf); | |
1124 | clear_extent_buffer_dirty(buf); | |
1125 | } | |
925baedd | 1126 | } |
5f39d397 CM |
1127 | } |
1128 | ||
8257b2dc MX |
1129 | static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) |
1130 | { | |
1131 | struct btrfs_subvolume_writers *writers; | |
1132 | int ret; | |
1133 | ||
1134 | writers = kmalloc(sizeof(*writers), GFP_NOFS); | |
1135 | if (!writers) | |
1136 | return ERR_PTR(-ENOMEM); | |
1137 | ||
8a5a916d | 1138 | ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); |
8257b2dc MX |
1139 | if (ret < 0) { |
1140 | kfree(writers); | |
1141 | return ERR_PTR(ret); | |
1142 | } | |
1143 | ||
1144 | init_waitqueue_head(&writers->wait); | |
1145 | return writers; | |
1146 | } | |
1147 | ||
1148 | static void | |
1149 | btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) | |
1150 | { | |
1151 | percpu_counter_destroy(&writers->counter); | |
1152 | kfree(writers); | |
1153 | } | |
1154 | ||
da17066c | 1155 | static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, |
143bede5 | 1156 | u64 objectid) |
d97e63b6 | 1157 | { |
7c0260ee | 1158 | bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); |
cfaa7295 | 1159 | root->node = NULL; |
a28ec197 | 1160 | root->commit_root = NULL; |
27cdeb70 | 1161 | root->state = 0; |
d68fc57b | 1162 | root->orphan_cleanup_state = 0; |
0b86a832 | 1163 | |
0f7d52f4 | 1164 | root->last_trans = 0; |
13a8a7c8 | 1165 | root->highest_objectid = 0; |
eb73c1b7 | 1166 | root->nr_delalloc_inodes = 0; |
199c2a9c | 1167 | root->nr_ordered_extents = 0; |
6bef4d31 | 1168 | root->inode_tree = RB_ROOT; |
16cdcec7 | 1169 | INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); |
f0486c68 | 1170 | root->block_rsv = NULL; |
0b86a832 CM |
1171 | |
1172 | INIT_LIST_HEAD(&root->dirty_list); | |
5d4f98a2 | 1173 | INIT_LIST_HEAD(&root->root_list); |
eb73c1b7 MX |
1174 | INIT_LIST_HEAD(&root->delalloc_inodes); |
1175 | INIT_LIST_HEAD(&root->delalloc_root); | |
199c2a9c MX |
1176 | INIT_LIST_HEAD(&root->ordered_extents); |
1177 | INIT_LIST_HEAD(&root->ordered_root); | |
2ab28f32 JB |
1178 | INIT_LIST_HEAD(&root->logged_list[0]); |
1179 | INIT_LIST_HEAD(&root->logged_list[1]); | |
5d4f98a2 | 1180 | spin_lock_init(&root->inode_lock); |
eb73c1b7 | 1181 | spin_lock_init(&root->delalloc_lock); |
199c2a9c | 1182 | spin_lock_init(&root->ordered_extent_lock); |
f0486c68 | 1183 | spin_lock_init(&root->accounting_lock); |
2ab28f32 JB |
1184 | spin_lock_init(&root->log_extents_lock[0]); |
1185 | spin_lock_init(&root->log_extents_lock[1]); | |
8287475a | 1186 | spin_lock_init(&root->qgroup_meta_rsv_lock); |
a2135011 | 1187 | mutex_init(&root->objectid_mutex); |
e02119d5 | 1188 | mutex_init(&root->log_mutex); |
31f3d255 | 1189 | mutex_init(&root->ordered_extent_mutex); |
573bfb72 | 1190 | mutex_init(&root->delalloc_mutex); |
7237f183 YZ |
1191 | init_waitqueue_head(&root->log_writer_wait); |
1192 | init_waitqueue_head(&root->log_commit_wait[0]); | |
1193 | init_waitqueue_head(&root->log_commit_wait[1]); | |
8b050d35 MX |
1194 | INIT_LIST_HEAD(&root->log_ctxs[0]); |
1195 | INIT_LIST_HEAD(&root->log_ctxs[1]); | |
7237f183 YZ |
1196 | atomic_set(&root->log_commit[0], 0); |
1197 | atomic_set(&root->log_commit[1], 0); | |
1198 | atomic_set(&root->log_writers, 0); | |
2ecb7923 | 1199 | atomic_set(&root->log_batch, 0); |
0700cea7 | 1200 | refcount_set(&root->refs, 1); |
ea14b57f | 1201 | atomic_set(&root->will_be_snapshotted, 0); |
8ecebf4d | 1202 | atomic_set(&root->snapshot_force_cow, 0); |
eede2bf3 | 1203 | atomic_set(&root->nr_swapfiles, 0); |
7237f183 | 1204 | root->log_transid = 0; |
d1433deb | 1205 | root->log_transid_committed = -1; |
257c62e1 | 1206 | root->last_log_commit = 0; |
7c0260ee | 1207 | if (!dummy) |
c6100a4b | 1208 | extent_io_tree_init(&root->dirty_log_pages, NULL); |
017e5369 | 1209 | |
3768f368 CM |
1210 | memset(&root->root_key, 0, sizeof(root->root_key)); |
1211 | memset(&root->root_item, 0, sizeof(root->root_item)); | |
6702ed49 | 1212 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); |
7c0260ee | 1213 | if (!dummy) |
06ea65a3 JB |
1214 | root->defrag_trans_start = fs_info->generation; |
1215 | else | |
1216 | root->defrag_trans_start = 0; | |
4d775673 | 1217 | root->root_key.objectid = objectid; |
0ee5dc67 | 1218 | root->anon_dev = 0; |
8ea05e3a | 1219 | |
5f3ab90a | 1220 | spin_lock_init(&root->root_item_lock); |
3768f368 CM |
1221 | } |
1222 | ||
74e4d827 DS |
1223 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, |
1224 | gfp_t flags) | |
6f07e42e | 1225 | { |
74e4d827 | 1226 | struct btrfs_root *root = kzalloc(sizeof(*root), flags); |
6f07e42e AV |
1227 | if (root) |
1228 | root->fs_info = fs_info; | |
1229 | return root; | |
1230 | } | |
1231 | ||
06ea65a3 JB |
1232 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
1233 | /* Should only be used by the testing infrastructure */ | |
da17066c | 1234 | struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) |
06ea65a3 JB |
1235 | { |
1236 | struct btrfs_root *root; | |
1237 | ||
7c0260ee JM |
1238 | if (!fs_info) |
1239 | return ERR_PTR(-EINVAL); | |
1240 | ||
1241 | root = btrfs_alloc_root(fs_info, GFP_KERNEL); | |
06ea65a3 JB |
1242 | if (!root) |
1243 | return ERR_PTR(-ENOMEM); | |
da17066c | 1244 | |
b9ef22de | 1245 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
da17066c | 1246 | __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
faa2dbf0 | 1247 | root->alloc_bytenr = 0; |
06ea65a3 JB |
1248 | |
1249 | return root; | |
1250 | } | |
1251 | #endif | |
1252 | ||
20897f5c AJ |
1253 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, |
1254 | struct btrfs_fs_info *fs_info, | |
1255 | u64 objectid) | |
1256 | { | |
1257 | struct extent_buffer *leaf; | |
1258 | struct btrfs_root *tree_root = fs_info->tree_root; | |
1259 | struct btrfs_root *root; | |
1260 | struct btrfs_key key; | |
1261 | int ret = 0; | |
33d85fda | 1262 | uuid_le uuid = NULL_UUID_LE; |
20897f5c | 1263 | |
74e4d827 | 1264 | root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
20897f5c AJ |
1265 | if (!root) |
1266 | return ERR_PTR(-ENOMEM); | |
1267 | ||
da17066c | 1268 | __setup_root(root, fs_info, objectid); |
20897f5c AJ |
1269 | root->root_key.objectid = objectid; |
1270 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | |
1271 | root->root_key.offset = 0; | |
1272 | ||
4d75f8a9 | 1273 | leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); |
20897f5c AJ |
1274 | if (IS_ERR(leaf)) { |
1275 | ret = PTR_ERR(leaf); | |
1dd05682 | 1276 | leaf = NULL; |
20897f5c AJ |
1277 | goto fail; |
1278 | } | |
1279 | ||
20897f5c | 1280 | root->node = leaf; |
20897f5c AJ |
1281 | btrfs_mark_buffer_dirty(leaf); |
1282 | ||
1283 | root->commit_root = btrfs_root_node(root); | |
27cdeb70 | 1284 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
20897f5c AJ |
1285 | |
1286 | root->root_item.flags = 0; | |
1287 | root->root_item.byte_limit = 0; | |
1288 | btrfs_set_root_bytenr(&root->root_item, leaf->start); | |
1289 | btrfs_set_root_generation(&root->root_item, trans->transid); | |
1290 | btrfs_set_root_level(&root->root_item, 0); | |
1291 | btrfs_set_root_refs(&root->root_item, 1); | |
1292 | btrfs_set_root_used(&root->root_item, leaf->len); | |
1293 | btrfs_set_root_last_snapshot(&root->root_item, 0); | |
1294 | btrfs_set_root_dirid(&root->root_item, 0); | |
33d85fda QW |
1295 | if (is_fstree(objectid)) |
1296 | uuid_le_gen(&uuid); | |
6463fe58 | 1297 | memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); |
20897f5c AJ |
1298 | root->root_item.drop_level = 0; |
1299 | ||
1300 | key.objectid = objectid; | |
1301 | key.type = BTRFS_ROOT_ITEM_KEY; | |
1302 | key.offset = 0; | |
1303 | ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); | |
1304 | if (ret) | |
1305 | goto fail; | |
1306 | ||
1307 | btrfs_tree_unlock(leaf); | |
1308 | ||
1dd05682 TI |
1309 | return root; |
1310 | ||
20897f5c | 1311 | fail: |
1dd05682 TI |
1312 | if (leaf) { |
1313 | btrfs_tree_unlock(leaf); | |
59885b39 | 1314 | free_extent_buffer(root->commit_root); |
1dd05682 TI |
1315 | free_extent_buffer(leaf); |
1316 | } | |
1317 | kfree(root); | |
20897f5c | 1318 | |
1dd05682 | 1319 | return ERR_PTR(ret); |
20897f5c AJ |
1320 | } |
1321 | ||
7237f183 YZ |
1322 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, |
1323 | struct btrfs_fs_info *fs_info) | |
0f7d52f4 CM |
1324 | { |
1325 | struct btrfs_root *root; | |
7237f183 | 1326 | struct extent_buffer *leaf; |
e02119d5 | 1327 | |
74e4d827 | 1328 | root = btrfs_alloc_root(fs_info, GFP_NOFS); |
e02119d5 | 1329 | if (!root) |
7237f183 | 1330 | return ERR_PTR(-ENOMEM); |
e02119d5 | 1331 | |
da17066c | 1332 | __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
e02119d5 CM |
1333 | |
1334 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; | |
1335 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | |
1336 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; | |
27cdeb70 | 1337 | |
7237f183 | 1338 | /* |
27cdeb70 MX |
1339 | * DON'T set REF_COWS for log trees |
1340 | * | |
7237f183 YZ |
1341 | * log trees do not get reference counted because they go away |
1342 | * before a real commit is actually done. They do store pointers | |
1343 | * to file data extents, and those reference counts still get | |
1344 | * updated (along with back refs to the log tree). | |
1345 | */ | |
e02119d5 | 1346 | |
4d75f8a9 DS |
1347 | leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, |
1348 | NULL, 0, 0, 0); | |
7237f183 YZ |
1349 | if (IS_ERR(leaf)) { |
1350 | kfree(root); | |
1351 | return ERR_CAST(leaf); | |
1352 | } | |
e02119d5 | 1353 | |
7237f183 | 1354 | root->node = leaf; |
e02119d5 | 1355 | |
e02119d5 CM |
1356 | btrfs_mark_buffer_dirty(root->node); |
1357 | btrfs_tree_unlock(root->node); | |
7237f183 YZ |
1358 | return root; |
1359 | } | |
1360 | ||
1361 | int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, | |
1362 | struct btrfs_fs_info *fs_info) | |
1363 | { | |
1364 | struct btrfs_root *log_root; | |
1365 | ||
1366 | log_root = alloc_log_tree(trans, fs_info); | |
1367 | if (IS_ERR(log_root)) | |
1368 | return PTR_ERR(log_root); | |
1369 | WARN_ON(fs_info->log_root_tree); | |
1370 | fs_info->log_root_tree = log_root; | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | |
1375 | struct btrfs_root *root) | |
1376 | { | |
0b246afa | 1377 | struct btrfs_fs_info *fs_info = root->fs_info; |
7237f183 YZ |
1378 | struct btrfs_root *log_root; |
1379 | struct btrfs_inode_item *inode_item; | |
1380 | ||
0b246afa | 1381 | log_root = alloc_log_tree(trans, fs_info); |
7237f183 YZ |
1382 | if (IS_ERR(log_root)) |
1383 | return PTR_ERR(log_root); | |
1384 | ||
1385 | log_root->last_trans = trans->transid; | |
1386 | log_root->root_key.offset = root->root_key.objectid; | |
1387 | ||
1388 | inode_item = &log_root->root_item.inode; | |
3cae210f QW |
1389 | btrfs_set_stack_inode_generation(inode_item, 1); |
1390 | btrfs_set_stack_inode_size(inode_item, 3); | |
1391 | btrfs_set_stack_inode_nlink(inode_item, 1); | |
da17066c | 1392 | btrfs_set_stack_inode_nbytes(inode_item, |
0b246afa | 1393 | fs_info->nodesize); |
3cae210f | 1394 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); |
7237f183 | 1395 | |
5d4f98a2 | 1396 | btrfs_set_root_node(&log_root->root_item, log_root->node); |
7237f183 YZ |
1397 | |
1398 | WARN_ON(root->log_root); | |
1399 | root->log_root = log_root; | |
1400 | root->log_transid = 0; | |
d1433deb | 1401 | root->log_transid_committed = -1; |
257c62e1 | 1402 | root->last_log_commit = 0; |
e02119d5 CM |
1403 | return 0; |
1404 | } | |
1405 | ||
35a3621b SB |
1406 | static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, |
1407 | struct btrfs_key *key) | |
e02119d5 CM |
1408 | { |
1409 | struct btrfs_root *root; | |
1410 | struct btrfs_fs_info *fs_info = tree_root->fs_info; | |
0f7d52f4 | 1411 | struct btrfs_path *path; |
84234f3a | 1412 | u64 generation; |
cb517eab | 1413 | int ret; |
581c1760 | 1414 | int level; |
0f7d52f4 | 1415 | |
cb517eab MX |
1416 | path = btrfs_alloc_path(); |
1417 | if (!path) | |
0f7d52f4 | 1418 | return ERR_PTR(-ENOMEM); |
cb517eab | 1419 | |
74e4d827 | 1420 | root = btrfs_alloc_root(fs_info, GFP_NOFS); |
cb517eab MX |
1421 | if (!root) { |
1422 | ret = -ENOMEM; | |
1423 | goto alloc_fail; | |
0f7d52f4 CM |
1424 | } |
1425 | ||
da17066c | 1426 | __setup_root(root, fs_info, key->objectid); |
0f7d52f4 | 1427 | |
cb517eab MX |
1428 | ret = btrfs_find_root(tree_root, key, path, |
1429 | &root->root_item, &root->root_key); | |
0f7d52f4 | 1430 | if (ret) { |
13a8a7c8 YZ |
1431 | if (ret > 0) |
1432 | ret = -ENOENT; | |
cb517eab | 1433 | goto find_fail; |
0f7d52f4 | 1434 | } |
13a8a7c8 | 1435 | |
84234f3a | 1436 | generation = btrfs_root_generation(&root->root_item); |
581c1760 | 1437 | level = btrfs_root_level(&root->root_item); |
2ff7e61e JM |
1438 | root->node = read_tree_block(fs_info, |
1439 | btrfs_root_bytenr(&root->root_item), | |
581c1760 | 1440 | generation, level, NULL); |
64c043de LB |
1441 | if (IS_ERR(root->node)) { |
1442 | ret = PTR_ERR(root->node); | |
cb517eab MX |
1443 | goto find_fail; |
1444 | } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { | |
1445 | ret = -EIO; | |
64c043de LB |
1446 | free_extent_buffer(root->node); |
1447 | goto find_fail; | |
416bc658 | 1448 | } |
5d4f98a2 | 1449 | root->commit_root = btrfs_root_node(root); |
13a8a7c8 | 1450 | out: |
cb517eab MX |
1451 | btrfs_free_path(path); |
1452 | return root; | |
1453 | ||
cb517eab MX |
1454 | find_fail: |
1455 | kfree(root); | |
1456 | alloc_fail: | |
1457 | root = ERR_PTR(ret); | |
1458 | goto out; | |
1459 | } | |
1460 | ||
1461 | struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, | |
1462 | struct btrfs_key *location) | |
1463 | { | |
1464 | struct btrfs_root *root; | |
1465 | ||
1466 | root = btrfs_read_tree_root(tree_root, location); | |
1467 | if (IS_ERR(root)) | |
1468 | return root; | |
1469 | ||
1470 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | |
27cdeb70 | 1471 | set_bit(BTRFS_ROOT_REF_COWS, &root->state); |
08fe4db1 LZ |
1472 | btrfs_check_and_init_root_item(&root->root_item); |
1473 | } | |
13a8a7c8 | 1474 | |
5eda7b5e CM |
1475 | return root; |
1476 | } | |
1477 | ||
cb517eab MX |
1478 | int btrfs_init_fs_root(struct btrfs_root *root) |
1479 | { | |
1480 | int ret; | |
8257b2dc | 1481 | struct btrfs_subvolume_writers *writers; |
cb517eab MX |
1482 | |
1483 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); | |
1484 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), | |
1485 | GFP_NOFS); | |
1486 | if (!root->free_ino_pinned || !root->free_ino_ctl) { | |
1487 | ret = -ENOMEM; | |
1488 | goto fail; | |
1489 | } | |
1490 | ||
8257b2dc MX |
1491 | writers = btrfs_alloc_subvolume_writers(); |
1492 | if (IS_ERR(writers)) { | |
1493 | ret = PTR_ERR(writers); | |
1494 | goto fail; | |
1495 | } | |
1496 | root->subv_writers = writers; | |
1497 | ||
cb517eab | 1498 | btrfs_init_free_ino_ctl(root); |
57cdc8db DS |
1499 | spin_lock_init(&root->ino_cache_lock); |
1500 | init_waitqueue_head(&root->ino_cache_wait); | |
cb517eab MX |
1501 | |
1502 | ret = get_anon_bdev(&root->anon_dev); | |
1503 | if (ret) | |
876d2cf1 | 1504 | goto fail; |
f32e48e9 CR |
1505 | |
1506 | mutex_lock(&root->objectid_mutex); | |
1507 | ret = btrfs_find_highest_objectid(root, | |
1508 | &root->highest_objectid); | |
1509 | if (ret) { | |
1510 | mutex_unlock(&root->objectid_mutex); | |
876d2cf1 | 1511 | goto fail; |
f32e48e9 CR |
1512 | } |
1513 | ||
1514 | ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); | |
1515 | ||
1516 | mutex_unlock(&root->objectid_mutex); | |
1517 | ||
cb517eab MX |
1518 | return 0; |
1519 | fail: | |
84db5ccf | 1520 | /* The caller is responsible to call btrfs_free_fs_root */ |
cb517eab MX |
1521 | return ret; |
1522 | } | |
1523 | ||
35bbb97f JM |
1524 | struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
1525 | u64 root_id) | |
cb517eab MX |
1526 | { |
1527 | struct btrfs_root *root; | |
1528 | ||
1529 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1530 | root = radix_tree_lookup(&fs_info->fs_roots_radix, | |
1531 | (unsigned long)root_id); | |
1532 | spin_unlock(&fs_info->fs_roots_radix_lock); | |
1533 | return root; | |
1534 | } | |
1535 | ||
1536 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, | |
1537 | struct btrfs_root *root) | |
1538 | { | |
1539 | int ret; | |
1540 | ||
e1860a77 | 1541 | ret = radix_tree_preload(GFP_NOFS); |
cb517eab MX |
1542 | if (ret) |
1543 | return ret; | |
1544 | ||
1545 | spin_lock(&fs_info->fs_roots_radix_lock); | |
1546 | ret = radix_tree_insert(&fs_info->fs_roots_radix, | |
1547 | (unsigned long)root->root_key.objectid, | |
1548 | root); | |
1549 | if (ret == 0) | |
27cdeb70 | 1550 | set_bit(BTRFS_ROOT_IN_RADIX, &root->state); |
cb517eab MX |
1551 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1552 | radix_tree_preload_end(); | |
1553 | ||
1554 | return ret; | |
1555 | } | |
1556 | ||
c00869f1 MX |
1557 | struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, |
1558 | struct btrfs_key *location, | |
1559 | bool check_ref) | |
5eda7b5e CM |
1560 | { |
1561 | struct btrfs_root *root; | |
381cf658 | 1562 | struct btrfs_path *path; |
1d4c08e0 | 1563 | struct btrfs_key key; |
5eda7b5e CM |
1564 | int ret; |
1565 | ||
edbd8d4e CM |
1566 | if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) |
1567 | return fs_info->tree_root; | |
1568 | if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) | |
1569 | return fs_info->extent_root; | |
8f18cf13 CM |
1570 | if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) |
1571 | return fs_info->chunk_root; | |
1572 | if (location->objectid == BTRFS_DEV_TREE_OBJECTID) | |
1573 | return fs_info->dev_root; | |
0403e47e YZ |
1574 | if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) |
1575 | return fs_info->csum_root; | |
bcef60f2 AJ |
1576 | if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) |
1577 | return fs_info->quota_root ? fs_info->quota_root : | |
1578 | ERR_PTR(-ENOENT); | |
f7a81ea4 SB |
1579 | if (location->objectid == BTRFS_UUID_TREE_OBJECTID) |
1580 | return fs_info->uuid_root ? fs_info->uuid_root : | |
1581 | ERR_PTR(-ENOENT); | |
70f6d82e OS |
1582 | if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) |
1583 | return fs_info->free_space_root ? fs_info->free_space_root : | |
1584 | ERR_PTR(-ENOENT); | |
4df27c4d | 1585 | again: |
cb517eab | 1586 | root = btrfs_lookup_fs_root(fs_info, location->objectid); |
48475471 | 1587 | if (root) { |
c00869f1 | 1588 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) |
48475471 | 1589 | return ERR_PTR(-ENOENT); |
5eda7b5e | 1590 | return root; |
48475471 | 1591 | } |
5eda7b5e | 1592 | |
cb517eab | 1593 | root = btrfs_read_fs_root(fs_info->tree_root, location); |
5eda7b5e CM |
1594 | if (IS_ERR(root)) |
1595 | return root; | |
3394e160 | 1596 | |
c00869f1 | 1597 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) { |
cb517eab | 1598 | ret = -ENOENT; |
581bb050 | 1599 | goto fail; |
35a30d7c | 1600 | } |
581bb050 | 1601 | |
cb517eab | 1602 | ret = btrfs_init_fs_root(root); |
ac08aedf CM |
1603 | if (ret) |
1604 | goto fail; | |
3394e160 | 1605 | |
381cf658 DS |
1606 | path = btrfs_alloc_path(); |
1607 | if (!path) { | |
1608 | ret = -ENOMEM; | |
1609 | goto fail; | |
1610 | } | |
1d4c08e0 DS |
1611 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
1612 | key.type = BTRFS_ORPHAN_ITEM_KEY; | |
1613 | key.offset = location->objectid; | |
1614 | ||
1615 | ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); | |
381cf658 | 1616 | btrfs_free_path(path); |
d68fc57b YZ |
1617 | if (ret < 0) |
1618 | goto fail; | |
1619 | if (ret == 0) | |
27cdeb70 | 1620 | set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); |
d68fc57b | 1621 | |
cb517eab | 1622 | ret = btrfs_insert_fs_root(fs_info, root); |
0f7d52f4 | 1623 | if (ret) { |
4df27c4d | 1624 | if (ret == -EEXIST) { |
84db5ccf | 1625 | btrfs_free_fs_root(root); |
4df27c4d YZ |
1626 | goto again; |
1627 | } | |
1628 | goto fail; | |
0f7d52f4 | 1629 | } |
edbd8d4e | 1630 | return root; |
4df27c4d | 1631 | fail: |
84db5ccf | 1632 | btrfs_free_fs_root(root); |
4df27c4d | 1633 | return ERR_PTR(ret); |
edbd8d4e CM |
1634 | } |
1635 | ||
04160088 CM |
1636 | static int btrfs_congested_fn(void *congested_data, int bdi_bits) |
1637 | { | |
1638 | struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; | |
1639 | int ret = 0; | |
04160088 CM |
1640 | struct btrfs_device *device; |
1641 | struct backing_dev_info *bdi; | |
b7967db7 | 1642 | |
1f78160c XG |
1643 | rcu_read_lock(); |
1644 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { | |
dfe25020 CM |
1645 | if (!device->bdev) |
1646 | continue; | |
efa7c9f9 | 1647 | bdi = device->bdev->bd_bdi; |
ff9ea323 | 1648 | if (bdi_congested(bdi, bdi_bits)) { |
04160088 CM |
1649 | ret = 1; |
1650 | break; | |
1651 | } | |
1652 | } | |
1f78160c | 1653 | rcu_read_unlock(); |
04160088 CM |
1654 | return ret; |
1655 | } | |
1656 | ||
8b712842 CM |
1657 | /* |
1658 | * called by the kthread helper functions to finally call the bio end_io | |
1659 | * functions. This is where read checksum verification actually happens | |
1660 | */ | |
1661 | static void end_workqueue_fn(struct btrfs_work *work) | |
ce9adaa5 | 1662 | { |
ce9adaa5 | 1663 | struct bio *bio; |
97eb6b69 | 1664 | struct btrfs_end_io_wq *end_io_wq; |
ce9adaa5 | 1665 | |
97eb6b69 | 1666 | end_io_wq = container_of(work, struct btrfs_end_io_wq, work); |
8b712842 | 1667 | bio = end_io_wq->bio; |
ce9adaa5 | 1668 | |
4e4cbee9 | 1669 | bio->bi_status = end_io_wq->status; |
8b712842 CM |
1670 | bio->bi_private = end_io_wq->private; |
1671 | bio->bi_end_io = end_io_wq->end_io; | |
97eb6b69 | 1672 | kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); |
4246a0b6 | 1673 | bio_endio(bio); |
44b8bd7e CM |
1674 | } |
1675 | ||
a74a4b97 CM |
1676 | static int cleaner_kthread(void *arg) |
1677 | { | |
1678 | struct btrfs_root *root = arg; | |
0b246afa | 1679 | struct btrfs_fs_info *fs_info = root->fs_info; |
d0278245 | 1680 | int again; |
a74a4b97 | 1681 | |
d6fd0ae2 | 1682 | while (1) { |
d0278245 | 1683 | again = 0; |
a74a4b97 | 1684 | |
d0278245 | 1685 | /* Make the cleaner go to sleep early. */ |
2ff7e61e | 1686 | if (btrfs_need_cleaner_sleep(fs_info)) |
d0278245 MX |
1687 | goto sleep; |
1688 | ||
90c711ab ZB |
1689 | /* |
1690 | * Do not do anything if we might cause open_ctree() to block | |
1691 | * before we have finished mounting the filesystem. | |
1692 | */ | |
0b246afa | 1693 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) |
90c711ab ZB |
1694 | goto sleep; |
1695 | ||
0b246afa | 1696 | if (!mutex_trylock(&fs_info->cleaner_mutex)) |
d0278245 MX |
1697 | goto sleep; |
1698 | ||
dc7f370c MX |
1699 | /* |
1700 | * Avoid the problem that we change the status of the fs | |
1701 | * during the above check and trylock. | |
1702 | */ | |
2ff7e61e | 1703 | if (btrfs_need_cleaner_sleep(fs_info)) { |
0b246afa | 1704 | mutex_unlock(&fs_info->cleaner_mutex); |
dc7f370c | 1705 | goto sleep; |
76dda93c | 1706 | } |
a74a4b97 | 1707 | |
0b246afa | 1708 | mutex_lock(&fs_info->cleaner_delayed_iput_mutex); |
2ff7e61e | 1709 | btrfs_run_delayed_iputs(fs_info); |
0b246afa | 1710 | mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); |
c2d6cb16 | 1711 | |
d0278245 | 1712 | again = btrfs_clean_one_deleted_snapshot(root); |
0b246afa | 1713 | mutex_unlock(&fs_info->cleaner_mutex); |
d0278245 MX |
1714 | |
1715 | /* | |
05323cd1 MX |
1716 | * The defragger has dealt with the R/O remount and umount, |
1717 | * needn't do anything special here. | |
d0278245 | 1718 | */ |
0b246afa | 1719 | btrfs_run_defrag_inodes(fs_info); |
67c5e7d4 FM |
1720 | |
1721 | /* | |
1722 | * Acquires fs_info->delete_unused_bgs_mutex to avoid racing | |
1723 | * with relocation (btrfs_relocate_chunk) and relocation | |
1724 | * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) | |
1725 | * after acquiring fs_info->delete_unused_bgs_mutex. So we | |
1726 | * can't hold, nor need to, fs_info->cleaner_mutex when deleting | |
1727 | * unused block groups. | |
1728 | */ | |
0b246afa | 1729 | btrfs_delete_unused_bgs(fs_info); |
d0278245 | 1730 | sleep: |
d6fd0ae2 OS |
1731 | if (kthread_should_park()) |
1732 | kthread_parkme(); | |
1733 | if (kthread_should_stop()) | |
1734 | return 0; | |
838fe188 | 1735 | if (!again) { |
a74a4b97 | 1736 | set_current_state(TASK_INTERRUPTIBLE); |
d6fd0ae2 | 1737 | schedule(); |
a74a4b97 CM |
1738 | __set_current_state(TASK_RUNNING); |
1739 | } | |
da288d28 | 1740 | } |
a74a4b97 CM |
1741 | } |
1742 | ||
1743 | static int transaction_kthread(void *arg) | |
1744 | { | |
1745 | struct btrfs_root *root = arg; | |
0b246afa | 1746 | struct btrfs_fs_info *fs_info = root->fs_info; |
a74a4b97 CM |
1747 | struct btrfs_trans_handle *trans; |
1748 | struct btrfs_transaction *cur; | |
8929ecfa | 1749 | u64 transid; |
a944442c | 1750 | time64_t now; |
a74a4b97 | 1751 | unsigned long delay; |
914b2007 | 1752 | bool cannot_commit; |
a74a4b97 CM |
1753 | |
1754 | do { | |
914b2007 | 1755 | cannot_commit = false; |
0b246afa JM |
1756 | delay = HZ * fs_info->commit_interval; |
1757 | mutex_lock(&fs_info->transaction_kthread_mutex); | |
a74a4b97 | 1758 | |
0b246afa JM |
1759 | spin_lock(&fs_info->trans_lock); |
1760 | cur = fs_info->running_transaction; | |
a74a4b97 | 1761 | if (!cur) { |
0b246afa | 1762 | spin_unlock(&fs_info->trans_lock); |
a74a4b97 CM |
1763 | goto sleep; |
1764 | } | |
31153d81 | 1765 | |
afd48513 | 1766 | now = ktime_get_seconds(); |
4a9d8bde | 1767 | if (cur->state < TRANS_STATE_BLOCKED && |
a514d638 | 1768 | !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && |
8b87dc17 | 1769 | (now < cur->start_time || |
0b246afa JM |
1770 | now - cur->start_time < fs_info->commit_interval)) { |
1771 | spin_unlock(&fs_info->trans_lock); | |
a74a4b97 CM |
1772 | delay = HZ * 5; |
1773 | goto sleep; | |
1774 | } | |
8929ecfa | 1775 | transid = cur->transid; |
0b246afa | 1776 | spin_unlock(&fs_info->trans_lock); |
56bec294 | 1777 | |
79787eaa | 1778 | /* If the file system is aborted, this will always fail. */ |
354aa0fb | 1779 | trans = btrfs_attach_transaction(root); |
914b2007 | 1780 | if (IS_ERR(trans)) { |
354aa0fb MX |
1781 | if (PTR_ERR(trans) != -ENOENT) |
1782 | cannot_commit = true; | |
79787eaa | 1783 | goto sleep; |
914b2007 | 1784 | } |
8929ecfa | 1785 | if (transid == trans->transid) { |
3a45bb20 | 1786 | btrfs_commit_transaction(trans); |
8929ecfa | 1787 | } else { |
3a45bb20 | 1788 | btrfs_end_transaction(trans); |
8929ecfa | 1789 | } |
a74a4b97 | 1790 | sleep: |
0b246afa JM |
1791 | wake_up_process(fs_info->cleaner_kthread); |
1792 | mutex_unlock(&fs_info->transaction_kthread_mutex); | |
a74a4b97 | 1793 | |
4e121c06 | 1794 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, |
0b246afa | 1795 | &fs_info->fs_state))) |
2ff7e61e | 1796 | btrfs_cleanup_transaction(fs_info); |
ce63f891 | 1797 | if (!kthread_should_stop() && |
0b246afa | 1798 | (!btrfs_transaction_blocked(fs_info) || |
ce63f891 | 1799 | cannot_commit)) |
bc5511d0 | 1800 | schedule_timeout_interruptible(delay); |
a74a4b97 CM |
1801 | } while (!kthread_should_stop()); |
1802 | return 0; | |
1803 | } | |
1804 | ||
af31f5e5 CM |
1805 | /* |
1806 | * this will find the highest generation in the array of | |
1807 | * root backups. The index of the highest array is returned, | |
1808 | * or -1 if we can't find anything. | |
1809 | * | |
1810 | * We check to make sure the array is valid by comparing the | |
1811 | * generation of the latest root in the array with the generation | |
1812 | * in the super block. If they don't match we pitch it. | |
1813 | */ | |
1814 | static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) | |
1815 | { | |
1816 | u64 cur; | |
1817 | int newest_index = -1; | |
1818 | struct btrfs_root_backup *root_backup; | |
1819 | int i; | |
1820 | ||
1821 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { | |
1822 | root_backup = info->super_copy->super_roots + i; | |
1823 | cur = btrfs_backup_tree_root_gen(root_backup); | |
1824 | if (cur == newest_gen) | |
1825 | newest_index = i; | |
1826 | } | |
1827 | ||
1828 | /* check to see if we actually wrapped around */ | |
1829 | if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { | |
1830 | root_backup = info->super_copy->super_roots; | |
1831 | cur = btrfs_backup_tree_root_gen(root_backup); | |
1832 | if (cur == newest_gen) | |
1833 | newest_index = 0; | |
1834 | } | |
1835 | return newest_index; | |
1836 | } | |
1837 | ||
1838 | ||
1839 | /* | |
1840 | * find the oldest backup so we know where to store new entries | |
1841 | * in the backup array. This will set the backup_root_index | |
1842 | * field in the fs_info struct | |
1843 | */ | |
1844 | static void find_oldest_super_backup(struct btrfs_fs_info *info, | |
1845 | u64 newest_gen) | |
1846 | { | |
1847 | int newest_index = -1; | |
1848 | ||
1849 | newest_index = find_newest_super_backup(info, newest_gen); | |
1850 | /* if there was garbage in there, just move along */ | |
1851 | if (newest_index == -1) { | |
1852 | info->backup_root_index = 0; | |
1853 | } else { | |
1854 | info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; | |
1855 | } | |
1856 | } | |
1857 | ||
1858 | /* | |
1859 | * copy all the root pointers into the super backup array. | |
1860 | * this will bump the backup pointer by one when it is | |
1861 | * done | |
1862 | */ | |
1863 | static void backup_super_roots(struct btrfs_fs_info *info) | |
1864 | { | |
1865 | int next_backup; | |
1866 | struct btrfs_root_backup *root_backup; | |
1867 | int last_backup; | |
1868 | ||
1869 | next_backup = info->backup_root_index; | |
1870 | last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % | |
1871 | BTRFS_NUM_BACKUP_ROOTS; | |
1872 | ||
1873 | /* | |
1874 | * just overwrite the last backup if we're at the same generation | |
1875 | * this happens only at umount | |
1876 | */ | |
1877 | root_backup = info->super_for_commit->super_roots + last_backup; | |
1878 | if (btrfs_backup_tree_root_gen(root_backup) == | |
1879 | btrfs_header_generation(info->tree_root->node)) | |
1880 | next_backup = last_backup; | |
1881 | ||
1882 | root_backup = info->super_for_commit->super_roots + next_backup; | |
1883 | ||
1884 | /* | |
1885 | * make sure all of our padding and empty slots get zero filled | |
1886 | * regardless of which ones we use today | |
1887 | */ | |
1888 | memset(root_backup, 0, sizeof(*root_backup)); | |
1889 | ||
1890 | info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; | |
1891 | ||
1892 | btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); | |
1893 | btrfs_set_backup_tree_root_gen(root_backup, | |
1894 | btrfs_header_generation(info->tree_root->node)); | |
1895 | ||
1896 | btrfs_set_backup_tree_root_level(root_backup, | |
1897 | btrfs_header_level(info->tree_root->node)); | |
1898 | ||
1899 | btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); | |
1900 | btrfs_set_backup_chunk_root_gen(root_backup, | |
1901 | btrfs_header_generation(info->chunk_root->node)); | |
1902 | btrfs_set_backup_chunk_root_level(root_backup, | |
1903 | btrfs_header_level(info->chunk_root->node)); | |
1904 | ||
1905 | btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); | |
1906 | btrfs_set_backup_extent_root_gen(root_backup, | |
1907 | btrfs_header_generation(info->extent_root->node)); | |
1908 | btrfs_set_backup_extent_root_level(root_backup, | |
1909 | btrfs_header_level(info->extent_root->node)); | |
1910 | ||
7c7e82a7 CM |
1911 | /* |
1912 | * we might commit during log recovery, which happens before we set | |
1913 | * the fs_root. Make sure it is valid before we fill it in. | |
1914 | */ | |
1915 | if (info->fs_root && info->fs_root->node) { | |
1916 | btrfs_set_backup_fs_root(root_backup, | |
1917 | info->fs_root->node->start); | |
1918 | btrfs_set_backup_fs_root_gen(root_backup, | |
af31f5e5 | 1919 | btrfs_header_generation(info->fs_root->node)); |
7c7e82a7 | 1920 | btrfs_set_backup_fs_root_level(root_backup, |
af31f5e5 | 1921 | btrfs_header_level(info->fs_root->node)); |
7c7e82a7 | 1922 | } |
af31f5e5 CM |
1923 | |
1924 | btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); | |
1925 | btrfs_set_backup_dev_root_gen(root_backup, | |
1926 | btrfs_header_generation(info->dev_root->node)); | |
1927 | btrfs_set_backup_dev_root_level(root_backup, | |
1928 | btrfs_header_level(info->dev_root->node)); | |
1929 | ||
1930 | btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); | |
1931 | btrfs_set_backup_csum_root_gen(root_backup, | |
1932 | btrfs_header_generation(info->csum_root->node)); | |
1933 | btrfs_set_backup_csum_root_level(root_backup, | |
1934 | btrfs_header_level(info->csum_root->node)); | |
1935 | ||
1936 | btrfs_set_backup_total_bytes(root_backup, | |
1937 | btrfs_super_total_bytes(info->super_copy)); | |
1938 | btrfs_set_backup_bytes_used(root_backup, | |
1939 | btrfs_super_bytes_used(info->super_copy)); | |
1940 | btrfs_set_backup_num_devices(root_backup, | |
1941 | btrfs_super_num_devices(info->super_copy)); | |
1942 | ||
1943 | /* | |
1944 | * if we don't copy this out to the super_copy, it won't get remembered | |
1945 | * for the next commit | |
1946 | */ | |
1947 | memcpy(&info->super_copy->super_roots, | |
1948 | &info->super_for_commit->super_roots, | |
1949 | sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); | |
1950 | } | |
1951 | ||
1952 | /* | |
1953 | * this copies info out of the root backup array and back into | |
1954 | * the in-memory super block. It is meant to help iterate through | |
1955 | * the array, so you send it the number of backups you've already | |
1956 | * tried and the last backup index you used. | |
1957 | * | |
1958 | * this returns -1 when it has tried all the backups | |
1959 | */ | |
1960 | static noinline int next_root_backup(struct btrfs_fs_info *info, | |
1961 | struct btrfs_super_block *super, | |
1962 | int *num_backups_tried, int *backup_index) | |
1963 | { | |
1964 | struct btrfs_root_backup *root_backup; | |
1965 | int newest = *backup_index; | |
1966 | ||
1967 | if (*num_backups_tried == 0) { | |
1968 | u64 gen = btrfs_super_generation(super); | |
1969 | ||
1970 | newest = find_newest_super_backup(info, gen); | |
1971 | if (newest == -1) | |
1972 | return -1; | |
1973 | ||
1974 | *backup_index = newest; | |
1975 | *num_backups_tried = 1; | |
1976 | } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { | |
1977 | /* we've tried all the backups, all done */ | |
1978 | return -1; | |
1979 | } else { | |
1980 | /* jump to the next oldest backup */ | |
1981 | newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % | |
1982 | BTRFS_NUM_BACKUP_ROOTS; | |
1983 | *backup_index = newest; | |
1984 | *num_backups_tried += 1; | |
1985 | } | |
1986 | root_backup = super->super_roots + newest; | |
1987 | ||
1988 | btrfs_set_super_generation(super, | |
1989 | btrfs_backup_tree_root_gen(root_backup)); | |
1990 | btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); | |
1991 | btrfs_set_super_root_level(super, | |
1992 | btrfs_backup_tree_root_level(root_backup)); | |
1993 | btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); | |
1994 | ||
1995 | /* | |
1996 | * fixme: the total bytes and num_devices need to match or we should | |
1997 | * need a fsck | |
1998 | */ | |
1999 | btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); | |
2000 | btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); | |
2001 | return 0; | |
2002 | } | |
2003 | ||
7abadb64 LB |
2004 | /* helper to cleanup workers */ |
2005 | static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) | |
2006 | { | |
dc6e3209 | 2007 | btrfs_destroy_workqueue(fs_info->fixup_workers); |
afe3d242 | 2008 | btrfs_destroy_workqueue(fs_info->delalloc_workers); |
5cdc7ad3 | 2009 | btrfs_destroy_workqueue(fs_info->workers); |
fccb5d86 | 2010 | btrfs_destroy_workqueue(fs_info->endio_workers); |
fccb5d86 | 2011 | btrfs_destroy_workqueue(fs_info->endio_raid56_workers); |
8b110e39 | 2012 | btrfs_destroy_workqueue(fs_info->endio_repair_workers); |
d05a33ac | 2013 | btrfs_destroy_workqueue(fs_info->rmw_workers); |
fccb5d86 QW |
2014 | btrfs_destroy_workqueue(fs_info->endio_write_workers); |
2015 | btrfs_destroy_workqueue(fs_info->endio_freespace_worker); | |
a8c93d4e | 2016 | btrfs_destroy_workqueue(fs_info->submit_workers); |
5b3bc44e | 2017 | btrfs_destroy_workqueue(fs_info->delayed_workers); |
e66f0bb1 | 2018 | btrfs_destroy_workqueue(fs_info->caching_workers); |
736cfa15 | 2019 | btrfs_destroy_workqueue(fs_info->readahead_workers); |
a44903ab | 2020 | btrfs_destroy_workqueue(fs_info->flush_workers); |
fc97fab0 | 2021 | btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); |
a79b7d4b | 2022 | btrfs_destroy_workqueue(fs_info->extent_workers); |
a9b9477d FM |
2023 | /* |
2024 | * Now that all other work queues are destroyed, we can safely destroy | |
2025 | * the queues used for metadata I/O, since tasks from those other work | |
2026 | * queues can do metadata I/O operations. | |
2027 | */ | |
2028 | btrfs_destroy_workqueue(fs_info->endio_meta_workers); | |
2029 | btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); | |
7abadb64 LB |
2030 | } |
2031 | ||
2e9f5954 R |
2032 | static void free_root_extent_buffers(struct btrfs_root *root) |
2033 | { | |
2034 | if (root) { | |
2035 | free_extent_buffer(root->node); | |
2036 | free_extent_buffer(root->commit_root); | |
2037 | root->node = NULL; | |
2038 | root->commit_root = NULL; | |
2039 | } | |
2040 | } | |
2041 | ||
af31f5e5 CM |
2042 | /* helper to cleanup tree roots */ |
2043 | static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) | |
2044 | { | |
2e9f5954 | 2045 | free_root_extent_buffers(info->tree_root); |
655b09fe | 2046 | |
2e9f5954 R |
2047 | free_root_extent_buffers(info->dev_root); |
2048 | free_root_extent_buffers(info->extent_root); | |
2049 | free_root_extent_buffers(info->csum_root); | |
2050 | free_root_extent_buffers(info->quota_root); | |
2051 | free_root_extent_buffers(info->uuid_root); | |
2052 | if (chunk_root) | |
2053 | free_root_extent_buffers(info->chunk_root); | |
70f6d82e | 2054 | free_root_extent_buffers(info->free_space_root); |
af31f5e5 CM |
2055 | } |
2056 | ||
faa2dbf0 | 2057 | void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) |
171f6537 JB |
2058 | { |
2059 | int ret; | |
2060 | struct btrfs_root *gang[8]; | |
2061 | int i; | |
2062 | ||
2063 | while (!list_empty(&fs_info->dead_roots)) { | |
2064 | gang[0] = list_entry(fs_info->dead_roots.next, | |
2065 | struct btrfs_root, root_list); | |
2066 | list_del(&gang[0]->root_list); | |
2067 | ||
27cdeb70 | 2068 | if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { |
cb517eab | 2069 | btrfs_drop_and_free_fs_root(fs_info, gang[0]); |
171f6537 JB |
2070 | } else { |
2071 | free_extent_buffer(gang[0]->node); | |
2072 | free_extent_buffer(gang[0]->commit_root); | |
b0feb9d9 | 2073 | btrfs_put_fs_root(gang[0]); |
171f6537 JB |
2074 | } |
2075 | } | |
2076 | ||
2077 | while (1) { | |
2078 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, | |
2079 | (void **)gang, 0, | |
2080 | ARRAY_SIZE(gang)); | |
2081 | if (!ret) | |
2082 | break; | |
2083 | for (i = 0; i < ret; i++) | |
cb517eab | 2084 | btrfs_drop_and_free_fs_root(fs_info, gang[i]); |
171f6537 | 2085 | } |
1a4319cc LB |
2086 | |
2087 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { | |
2088 | btrfs_free_log_root_tree(NULL, fs_info); | |
2ff7e61e | 2089 | btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
1a4319cc | 2090 | } |
171f6537 | 2091 | } |
af31f5e5 | 2092 | |
638aa7ed ES |
2093 | static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) |
2094 | { | |
2095 | mutex_init(&fs_info->scrub_lock); | |
2096 | atomic_set(&fs_info->scrubs_running, 0); | |
2097 | atomic_set(&fs_info->scrub_pause_req, 0); | |
2098 | atomic_set(&fs_info->scrubs_paused, 0); | |
2099 | atomic_set(&fs_info->scrub_cancel_req, 0); | |
2100 | init_waitqueue_head(&fs_info->scrub_pause_wait); | |
2101 | fs_info->scrub_workers_refcnt = 0; | |
2102 | } | |
2103 | ||
779a65a4 ES |
2104 | static void btrfs_init_balance(struct btrfs_fs_info *fs_info) |
2105 | { | |
2106 | spin_lock_init(&fs_info->balance_lock); | |
2107 | mutex_init(&fs_info->balance_mutex); | |
779a65a4 ES |
2108 | atomic_set(&fs_info->balance_pause_req, 0); |
2109 | atomic_set(&fs_info->balance_cancel_req, 0); | |
2110 | fs_info->balance_ctl = NULL; | |
2111 | init_waitqueue_head(&fs_info->balance_wait_q); | |
2112 | } | |
2113 | ||
6bccf3ab | 2114 | static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) |
f37938e0 | 2115 | { |
2ff7e61e JM |
2116 | struct inode *inode = fs_info->btree_inode; |
2117 | ||
2118 | inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; | |
2119 | set_nlink(inode, 1); | |
f37938e0 ES |
2120 | /* |
2121 | * we set the i_size on the btree inode to the max possible int. | |
2122 | * the real end of the address space is determined by all of | |
2123 | * the devices in the system | |
2124 | */ | |
2ff7e61e JM |
2125 | inode->i_size = OFFSET_MAX; |
2126 | inode->i_mapping->a_ops = &btree_aops; | |
f37938e0 | 2127 | |
2ff7e61e | 2128 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
c6100a4b | 2129 | extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode); |
2ff7e61e JM |
2130 | BTRFS_I(inode)->io_tree.track_uptodate = 0; |
2131 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree); | |
f37938e0 | 2132 | |
2ff7e61e | 2133 | BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; |
f37938e0 | 2134 | |
2ff7e61e JM |
2135 | BTRFS_I(inode)->root = fs_info->tree_root; |
2136 | memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); | |
2137 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); | |
2138 | btrfs_insert_inode_hash(inode); | |
f37938e0 ES |
2139 | } |
2140 | ||
ad618368 ES |
2141 | static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) |
2142 | { | |
ad618368 | 2143 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); |
129827e3 | 2144 | init_rwsem(&fs_info->dev_replace.rwsem); |
7f8d236a | 2145 | init_waitqueue_head(&fs_info->dev_replace.replace_wait); |
ad618368 ES |
2146 | } |
2147 | ||
f9e92e40 ES |
2148 | static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) |
2149 | { | |
2150 | spin_lock_init(&fs_info->qgroup_lock); | |
2151 | mutex_init(&fs_info->qgroup_ioctl_lock); | |
2152 | fs_info->qgroup_tree = RB_ROOT; | |
2153 | fs_info->qgroup_op_tree = RB_ROOT; | |
2154 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); | |
2155 | fs_info->qgroup_seq = 1; | |
f9e92e40 | 2156 | fs_info->qgroup_ulist = NULL; |
d2c609b8 | 2157 | fs_info->qgroup_rescan_running = false; |
f9e92e40 ES |
2158 | mutex_init(&fs_info->qgroup_rescan_lock); |
2159 | } | |
2160 | ||
2a458198 ES |
2161 | static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, |
2162 | struct btrfs_fs_devices *fs_devices) | |
2163 | { | |
f7b885be | 2164 | u32 max_active = fs_info->thread_pool_size; |
6f011058 | 2165 | unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; |
2a458198 ES |
2166 | |
2167 | fs_info->workers = | |
cb001095 JM |
2168 | btrfs_alloc_workqueue(fs_info, "worker", |
2169 | flags | WQ_HIGHPRI, max_active, 16); | |
2a458198 ES |
2170 | |
2171 | fs_info->delalloc_workers = | |
cb001095 JM |
2172 | btrfs_alloc_workqueue(fs_info, "delalloc", |
2173 | flags, max_active, 2); | |
2a458198 ES |
2174 | |
2175 | fs_info->flush_workers = | |
cb001095 JM |
2176 | btrfs_alloc_workqueue(fs_info, "flush_delalloc", |
2177 | flags, max_active, 0); | |
2a458198 ES |
2178 | |
2179 | fs_info->caching_workers = | |
cb001095 | 2180 | btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); |
2a458198 ES |
2181 | |
2182 | /* | |
2183 | * a higher idle thresh on the submit workers makes it much more | |
2184 | * likely that bios will be send down in a sane order to the | |
2185 | * devices | |
2186 | */ | |
2187 | fs_info->submit_workers = | |
cb001095 | 2188 | btrfs_alloc_workqueue(fs_info, "submit", flags, |
2a458198 ES |
2189 | min_t(u64, fs_devices->num_devices, |
2190 | max_active), 64); | |
2191 | ||
2192 | fs_info->fixup_workers = | |
cb001095 | 2193 | btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); |
2a458198 ES |
2194 | |
2195 | /* | |
2196 | * endios are largely parallel and should have a very | |
2197 | * low idle thresh | |
2198 | */ | |
2199 | fs_info->endio_workers = | |
cb001095 | 2200 | btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); |
2a458198 | 2201 | fs_info->endio_meta_workers = |
cb001095 JM |
2202 | btrfs_alloc_workqueue(fs_info, "endio-meta", flags, |
2203 | max_active, 4); | |
2a458198 | 2204 | fs_info->endio_meta_write_workers = |
cb001095 JM |
2205 | btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, |
2206 | max_active, 2); | |
2a458198 | 2207 | fs_info->endio_raid56_workers = |
cb001095 JM |
2208 | btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, |
2209 | max_active, 4); | |
2a458198 | 2210 | fs_info->endio_repair_workers = |
cb001095 | 2211 | btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); |
2a458198 | 2212 | fs_info->rmw_workers = |
cb001095 | 2213 | btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); |
2a458198 | 2214 | fs_info->endio_write_workers = |
cb001095 JM |
2215 | btrfs_alloc_workqueue(fs_info, "endio-write", flags, |
2216 | max_active, 2); | |
2a458198 | 2217 | fs_info->endio_freespace_worker = |
cb001095 JM |
2218 | btrfs_alloc_workqueue(fs_info, "freespace-write", flags, |
2219 | max_active, 0); | |
2a458198 | 2220 | fs_info->delayed_workers = |
cb001095 JM |
2221 | btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, |
2222 | max_active, 0); | |
2a458198 | 2223 | fs_info->readahead_workers = |
cb001095 JM |
2224 | btrfs_alloc_workqueue(fs_info, "readahead", flags, |
2225 | max_active, 2); | |
2a458198 | 2226 | fs_info->qgroup_rescan_workers = |
cb001095 | 2227 | btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); |
2a458198 | 2228 | fs_info->extent_workers = |
cb001095 | 2229 | btrfs_alloc_workqueue(fs_info, "extent-refs", flags, |
2a458198 ES |
2230 | min_t(u64, fs_devices->num_devices, |
2231 | max_active), 8); | |
2232 | ||
2233 | if (!(fs_info->workers && fs_info->delalloc_workers && | |
2234 | fs_info->submit_workers && fs_info->flush_workers && | |
2235 | fs_info->endio_workers && fs_info->endio_meta_workers && | |
2236 | fs_info->endio_meta_write_workers && | |
2237 | fs_info->endio_repair_workers && | |
2238 | fs_info->endio_write_workers && fs_info->endio_raid56_workers && | |
2239 | fs_info->endio_freespace_worker && fs_info->rmw_workers && | |
2240 | fs_info->caching_workers && fs_info->readahead_workers && | |
2241 | fs_info->fixup_workers && fs_info->delayed_workers && | |
2242 | fs_info->extent_workers && | |
2243 | fs_info->qgroup_rescan_workers)) { | |
2244 | return -ENOMEM; | |
2245 | } | |
2246 | ||
2247 | return 0; | |
2248 | } | |
2249 | ||
63443bf5 ES |
2250 | static int btrfs_replay_log(struct btrfs_fs_info *fs_info, |
2251 | struct btrfs_fs_devices *fs_devices) | |
2252 | { | |
2253 | int ret; | |
63443bf5 ES |
2254 | struct btrfs_root *log_tree_root; |
2255 | struct btrfs_super_block *disk_super = fs_info->super_copy; | |
2256 | u64 bytenr = btrfs_super_log_root(disk_super); | |
581c1760 | 2257 | int level = btrfs_super_log_root_level(disk_super); |
63443bf5 ES |
2258 | |
2259 | if (fs_devices->rw_devices == 0) { | |
f14d104d | 2260 | btrfs_warn(fs_info, "log replay required on RO media"); |
63443bf5 ES |
2261 | return -EIO; |
2262 | } | |
2263 | ||
74e4d827 | 2264 | log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
63443bf5 ES |
2265 | if (!log_tree_root) |
2266 | return -ENOMEM; | |
2267 | ||
da17066c | 2268 | __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
63443bf5 | 2269 | |
2ff7e61e | 2270 | log_tree_root->node = read_tree_block(fs_info, bytenr, |
581c1760 QW |
2271 | fs_info->generation + 1, |
2272 | level, NULL); | |
64c043de | 2273 | if (IS_ERR(log_tree_root->node)) { |
f14d104d | 2274 | btrfs_warn(fs_info, "failed to read log tree"); |
0eeff236 | 2275 | ret = PTR_ERR(log_tree_root->node); |
64c043de | 2276 | kfree(log_tree_root); |
0eeff236 | 2277 | return ret; |
64c043de | 2278 | } else if (!extent_buffer_uptodate(log_tree_root->node)) { |
f14d104d | 2279 | btrfs_err(fs_info, "failed to read log tree"); |
63443bf5 ES |
2280 | free_extent_buffer(log_tree_root->node); |
2281 | kfree(log_tree_root); | |
2282 | return -EIO; | |
2283 | } | |
2284 | /* returns with log_tree_root freed on success */ | |
2285 | ret = btrfs_recover_log_trees(log_tree_root); | |
2286 | if (ret) { | |
0b246afa JM |
2287 | btrfs_handle_fs_error(fs_info, ret, |
2288 | "Failed to recover log tree"); | |
63443bf5 ES |
2289 | free_extent_buffer(log_tree_root->node); |
2290 | kfree(log_tree_root); | |
2291 | return ret; | |
2292 | } | |
2293 | ||
bc98a42c | 2294 | if (sb_rdonly(fs_info->sb)) { |
6bccf3ab | 2295 | ret = btrfs_commit_super(fs_info); |
63443bf5 ES |
2296 | if (ret) |
2297 | return ret; | |
2298 | } | |
2299 | ||
2300 | return 0; | |
2301 | } | |
2302 | ||
6bccf3ab | 2303 | static int btrfs_read_roots(struct btrfs_fs_info *fs_info) |
4bbcaa64 | 2304 | { |
6bccf3ab | 2305 | struct btrfs_root *tree_root = fs_info->tree_root; |
a4f3d2c4 | 2306 | struct btrfs_root *root; |
4bbcaa64 ES |
2307 | struct btrfs_key location; |
2308 | int ret; | |
2309 | ||
6bccf3ab JM |
2310 | BUG_ON(!fs_info->tree_root); |
2311 | ||
4bbcaa64 ES |
2312 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; |
2313 | location.type = BTRFS_ROOT_ITEM_KEY; | |
2314 | location.offset = 0; | |
2315 | ||
a4f3d2c4 | 2316 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2317 | if (IS_ERR(root)) { |
2318 | ret = PTR_ERR(root); | |
2319 | goto out; | |
2320 | } | |
a4f3d2c4 DS |
2321 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2322 | fs_info->extent_root = root; | |
4bbcaa64 ES |
2323 | |
2324 | location.objectid = BTRFS_DEV_TREE_OBJECTID; | |
a4f3d2c4 | 2325 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2326 | if (IS_ERR(root)) { |
2327 | ret = PTR_ERR(root); | |
2328 | goto out; | |
2329 | } | |
a4f3d2c4 DS |
2330 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2331 | fs_info->dev_root = root; | |
4bbcaa64 ES |
2332 | btrfs_init_devices_late(fs_info); |
2333 | ||
2334 | location.objectid = BTRFS_CSUM_TREE_OBJECTID; | |
a4f3d2c4 | 2335 | root = btrfs_read_tree_root(tree_root, &location); |
f50f4353 LB |
2336 | if (IS_ERR(root)) { |
2337 | ret = PTR_ERR(root); | |
2338 | goto out; | |
2339 | } | |
a4f3d2c4 DS |
2340 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2341 | fs_info->csum_root = root; | |
4bbcaa64 ES |
2342 | |
2343 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; | |
a4f3d2c4 DS |
2344 | root = btrfs_read_tree_root(tree_root, &location); |
2345 | if (!IS_ERR(root)) { | |
2346 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); | |
afcdd129 | 2347 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
a4f3d2c4 | 2348 | fs_info->quota_root = root; |
4bbcaa64 ES |
2349 | } |
2350 | ||
2351 | location.objectid = BTRFS_UUID_TREE_OBJECTID; | |
a4f3d2c4 DS |
2352 | root = btrfs_read_tree_root(tree_root, &location); |
2353 | if (IS_ERR(root)) { | |
2354 | ret = PTR_ERR(root); | |
4bbcaa64 | 2355 | if (ret != -ENOENT) |
f50f4353 | 2356 | goto out; |
4bbcaa64 | 2357 | } else { |
a4f3d2c4 DS |
2358 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2359 | fs_info->uuid_root = root; | |
4bbcaa64 ES |
2360 | } |
2361 | ||
70f6d82e OS |
2362 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
2363 | location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; | |
2364 | root = btrfs_read_tree_root(tree_root, &location); | |
f50f4353 LB |
2365 | if (IS_ERR(root)) { |
2366 | ret = PTR_ERR(root); | |
2367 | goto out; | |
2368 | } | |
70f6d82e OS |
2369 | set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); |
2370 | fs_info->free_space_root = root; | |
2371 | } | |
2372 | ||
4bbcaa64 | 2373 | return 0; |
f50f4353 LB |
2374 | out: |
2375 | btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", | |
2376 | location.objectid, ret); | |
2377 | return ret; | |
4bbcaa64 ES |
2378 | } |
2379 | ||
069ec957 QW |
2380 | /* |
2381 | * Real super block validation | |
2382 | * NOTE: super csum type and incompat features will not be checked here. | |
2383 | * | |
2384 | * @sb: super block to check | |
2385 | * @mirror_num: the super block number to check its bytenr: | |
2386 | * 0 the primary (1st) sb | |
2387 | * 1, 2 2nd and 3rd backup copy | |
2388 | * -1 skip bytenr check | |
2389 | */ | |
2390 | static int validate_super(struct btrfs_fs_info *fs_info, | |
2391 | struct btrfs_super_block *sb, int mirror_num) | |
21a852b0 | 2392 | { |
21a852b0 QW |
2393 | u64 nodesize = btrfs_super_nodesize(sb); |
2394 | u64 sectorsize = btrfs_super_sectorsize(sb); | |
2395 | int ret = 0; | |
2396 | ||
2397 | if (btrfs_super_magic(sb) != BTRFS_MAGIC) { | |
2398 | btrfs_err(fs_info, "no valid FS found"); | |
2399 | ret = -EINVAL; | |
2400 | } | |
2401 | if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) { | |
2402 | btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu", | |
2403 | btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); | |
2404 | ret = -EINVAL; | |
2405 | } | |
2406 | if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2407 | btrfs_err(fs_info, "tree_root level too big: %d >= %d", | |
2408 | btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); | |
2409 | ret = -EINVAL; | |
2410 | } | |
2411 | if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2412 | btrfs_err(fs_info, "chunk_root level too big: %d >= %d", | |
2413 | btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); | |
2414 | ret = -EINVAL; | |
2415 | } | |
2416 | if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { | |
2417 | btrfs_err(fs_info, "log_root level too big: %d >= %d", | |
2418 | btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); | |
2419 | ret = -EINVAL; | |
2420 | } | |
2421 | ||
2422 | /* | |
2423 | * Check sectorsize and nodesize first, other check will need it. | |
2424 | * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. | |
2425 | */ | |
2426 | if (!is_power_of_2(sectorsize) || sectorsize < 4096 || | |
2427 | sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { | |
2428 | btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); | |
2429 | ret = -EINVAL; | |
2430 | } | |
2431 | /* Only PAGE SIZE is supported yet */ | |
2432 | if (sectorsize != PAGE_SIZE) { | |
2433 | btrfs_err(fs_info, | |
2434 | "sectorsize %llu not supported yet, only support %lu", | |
2435 | sectorsize, PAGE_SIZE); | |
2436 | ret = -EINVAL; | |
2437 | } | |
2438 | if (!is_power_of_2(nodesize) || nodesize < sectorsize || | |
2439 | nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { | |
2440 | btrfs_err(fs_info, "invalid nodesize %llu", nodesize); | |
2441 | ret = -EINVAL; | |
2442 | } | |
2443 | if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { | |
2444 | btrfs_err(fs_info, "invalid leafsize %u, should be %llu", | |
2445 | le32_to_cpu(sb->__unused_leafsize), nodesize); | |
2446 | ret = -EINVAL; | |
2447 | } | |
2448 | ||
2449 | /* Root alignment check */ | |
2450 | if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { | |
2451 | btrfs_warn(fs_info, "tree_root block unaligned: %llu", | |
2452 | btrfs_super_root(sb)); | |
2453 | ret = -EINVAL; | |
2454 | } | |
2455 | if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { | |
2456 | btrfs_warn(fs_info, "chunk_root block unaligned: %llu", | |
2457 | btrfs_super_chunk_root(sb)); | |
2458 | ret = -EINVAL; | |
2459 | } | |
2460 | if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { | |
2461 | btrfs_warn(fs_info, "log_root block unaligned: %llu", | |
2462 | btrfs_super_log_root(sb)); | |
2463 | ret = -EINVAL; | |
2464 | } | |
2465 | ||
de37aa51 | 2466 | if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid, |
7239ff4b | 2467 | BTRFS_FSID_SIZE) != 0) { |
21a852b0 | 2468 | btrfs_err(fs_info, |
7239ff4b | 2469 | "dev_item UUID does not match metadata fsid: %pU != %pU", |
de37aa51 | 2470 | fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); |
21a852b0 QW |
2471 | ret = -EINVAL; |
2472 | } | |
2473 | ||
2474 | /* | |
2475 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are | |
2476 | * done later | |
2477 | */ | |
2478 | if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { | |
2479 | btrfs_err(fs_info, "bytes_used is too small %llu", | |
2480 | btrfs_super_bytes_used(sb)); | |
2481 | ret = -EINVAL; | |
2482 | } | |
2483 | if (!is_power_of_2(btrfs_super_stripesize(sb))) { | |
2484 | btrfs_err(fs_info, "invalid stripesize %u", | |
2485 | btrfs_super_stripesize(sb)); | |
2486 | ret = -EINVAL; | |
2487 | } | |
2488 | if (btrfs_super_num_devices(sb) > (1UL << 31)) | |
2489 | btrfs_warn(fs_info, "suspicious number of devices: %llu", | |
2490 | btrfs_super_num_devices(sb)); | |
2491 | if (btrfs_super_num_devices(sb) == 0) { | |
2492 | btrfs_err(fs_info, "number of devices is 0"); | |
2493 | ret = -EINVAL; | |
2494 | } | |
2495 | ||
069ec957 QW |
2496 | if (mirror_num >= 0 && |
2497 | btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) { | |
21a852b0 QW |
2498 | btrfs_err(fs_info, "super offset mismatch %llu != %u", |
2499 | btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); | |
2500 | ret = -EINVAL; | |
2501 | } | |
2502 | ||
2503 | /* | |
2504 | * Obvious sys_chunk_array corruptions, it must hold at least one key | |
2505 | * and one chunk | |
2506 | */ | |
2507 | if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { | |
2508 | btrfs_err(fs_info, "system chunk array too big %u > %u", | |
2509 | btrfs_super_sys_array_size(sb), | |
2510 | BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); | |
2511 | ret = -EINVAL; | |
2512 | } | |
2513 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) | |
2514 | + sizeof(struct btrfs_chunk)) { | |
2515 | btrfs_err(fs_info, "system chunk array too small %u < %zu", | |
2516 | btrfs_super_sys_array_size(sb), | |
2517 | sizeof(struct btrfs_disk_key) | |
2518 | + sizeof(struct btrfs_chunk)); | |
2519 | ret = -EINVAL; | |
2520 | } | |
2521 | ||
2522 | /* | |
2523 | * The generation is a global counter, we'll trust it more than the others | |
2524 | * but it's still possible that it's the one that's wrong. | |
2525 | */ | |
2526 | if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) | |
2527 | btrfs_warn(fs_info, | |
2528 | "suspicious: generation < chunk_root_generation: %llu < %llu", | |
2529 | btrfs_super_generation(sb), | |
2530 | btrfs_super_chunk_root_generation(sb)); | |
2531 | if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) | |
2532 | && btrfs_super_cache_generation(sb) != (u64)-1) | |
2533 | btrfs_warn(fs_info, | |
2534 | "suspicious: generation < cache_generation: %llu < %llu", | |
2535 | btrfs_super_generation(sb), | |
2536 | btrfs_super_cache_generation(sb)); | |
2537 | ||
2538 | return ret; | |
2539 | } | |
2540 | ||
069ec957 QW |
2541 | /* |
2542 | * Validation of super block at mount time. | |
2543 | * Some checks already done early at mount time, like csum type and incompat | |
2544 | * flags will be skipped. | |
2545 | */ | |
2546 | static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) | |
2547 | { | |
2548 | return validate_super(fs_info, fs_info->super_copy, 0); | |
2549 | } | |
2550 | ||
75cb857d QW |
2551 | /* |
2552 | * Validation of super block at write time. | |
2553 | * Some checks like bytenr check will be skipped as their values will be | |
2554 | * overwritten soon. | |
2555 | * Extra checks like csum type and incompat flags will be done here. | |
2556 | */ | |
2557 | static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, | |
2558 | struct btrfs_super_block *sb) | |
2559 | { | |
2560 | int ret; | |
2561 | ||
2562 | ret = validate_super(fs_info, sb, -1); | |
2563 | if (ret < 0) | |
2564 | goto out; | |
2565 | if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) { | |
2566 | ret = -EUCLEAN; | |
2567 | btrfs_err(fs_info, "invalid csum type, has %u want %u", | |
2568 | btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); | |
2569 | goto out; | |
2570 | } | |
2571 | if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { | |
2572 | ret = -EUCLEAN; | |
2573 | btrfs_err(fs_info, | |
2574 | "invalid incompat flags, has 0x%llx valid mask 0x%llx", | |
2575 | btrfs_super_incompat_flags(sb), | |
2576 | (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); | |
2577 | goto out; | |
2578 | } | |
2579 | out: | |
2580 | if (ret < 0) | |
2581 | btrfs_err(fs_info, | |
2582 | "super block corruption detected before writing it to disk"); | |
2583 | return ret; | |
2584 | } | |
2585 | ||
ad2b2c80 AV |
2586 | int open_ctree(struct super_block *sb, |
2587 | struct btrfs_fs_devices *fs_devices, | |
2588 | char *options) | |
2e635a27 | 2589 | { |
db94535d CM |
2590 | u32 sectorsize; |
2591 | u32 nodesize; | |
87ee04eb | 2592 | u32 stripesize; |
84234f3a | 2593 | u64 generation; |
f2b636e8 | 2594 | u64 features; |
3de4586c | 2595 | struct btrfs_key location; |
a061fc8d | 2596 | struct buffer_head *bh; |
4d34b278 | 2597 | struct btrfs_super_block *disk_super; |
815745cf | 2598 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
f84a8bd6 | 2599 | struct btrfs_root *tree_root; |
4d34b278 | 2600 | struct btrfs_root *chunk_root; |
eb60ceac | 2601 | int ret; |
e58ca020 | 2602 | int err = -EINVAL; |
af31f5e5 CM |
2603 | int num_backups_tried = 0; |
2604 | int backup_index = 0; | |
6675df31 | 2605 | int clear_free_space_tree = 0; |
581c1760 | 2606 | int level; |
4543df7e | 2607 | |
74e4d827 DS |
2608 | tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
2609 | chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); | |
cb517eab | 2610 | if (!tree_root || !chunk_root) { |
39279cc3 CM |
2611 | err = -ENOMEM; |
2612 | goto fail; | |
2613 | } | |
76dda93c YZ |
2614 | |
2615 | ret = init_srcu_struct(&fs_info->subvol_srcu); | |
2616 | if (ret) { | |
2617 | err = ret; | |
2618 | goto fail; | |
2619 | } | |
2620 | ||
908c7f19 | 2621 | ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); |
e2d84521 MX |
2622 | if (ret) { |
2623 | err = ret; | |
9e11ceee | 2624 | goto fail_srcu; |
e2d84521 | 2625 | } |
09cbfeaf | 2626 | fs_info->dirty_metadata_batch = PAGE_SIZE * |
e2d84521 MX |
2627 | (1 + ilog2(nr_cpu_ids)); |
2628 | ||
908c7f19 | 2629 | ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); |
963d678b MX |
2630 | if (ret) { |
2631 | err = ret; | |
2632 | goto fail_dirty_metadata_bytes; | |
2633 | } | |
2634 | ||
7f8d236a DS |
2635 | ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, |
2636 | GFP_KERNEL); | |
c404e0dc MX |
2637 | if (ret) { |
2638 | err = ret; | |
2639 | goto fail_delalloc_bytes; | |
2640 | } | |
2641 | ||
76dda93c | 2642 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
f28491e0 | 2643 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
8fd17795 | 2644 | INIT_LIST_HEAD(&fs_info->trans_list); |
facda1e7 | 2645 | INIT_LIST_HEAD(&fs_info->dead_roots); |
24bbcf04 | 2646 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
eb73c1b7 | 2647 | INIT_LIST_HEAD(&fs_info->delalloc_roots); |
11833d66 | 2648 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
75cb379d JM |
2649 | INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); |
2650 | spin_lock_init(&fs_info->pending_raid_kobjs_lock); | |
eb73c1b7 | 2651 | spin_lock_init(&fs_info->delalloc_root_lock); |
a4abeea4 | 2652 | spin_lock_init(&fs_info->trans_lock); |
76dda93c | 2653 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
24bbcf04 | 2654 | spin_lock_init(&fs_info->delayed_iput_lock); |
4cb5300b | 2655 | spin_lock_init(&fs_info->defrag_inodes_lock); |
f29021b2 | 2656 | spin_lock_init(&fs_info->tree_mod_seq_lock); |
ceda0864 | 2657 | spin_lock_init(&fs_info->super_lock); |
fcebe456 | 2658 | spin_lock_init(&fs_info->qgroup_op_lock); |
f28491e0 | 2659 | spin_lock_init(&fs_info->buffer_lock); |
47ab2a6c | 2660 | spin_lock_init(&fs_info->unused_bgs_lock); |
f29021b2 | 2661 | rwlock_init(&fs_info->tree_mod_log_lock); |
d7c15171 | 2662 | mutex_init(&fs_info->unused_bg_unpin_mutex); |
67c5e7d4 | 2663 | mutex_init(&fs_info->delete_unused_bgs_mutex); |
7585717f | 2664 | mutex_init(&fs_info->reloc_mutex); |
573bfb72 | 2665 | mutex_init(&fs_info->delalloc_root_mutex); |
c2d6cb16 | 2666 | mutex_init(&fs_info->cleaner_delayed_iput_mutex); |
de98ced9 | 2667 | seqlock_init(&fs_info->profiles_lock); |
19c00ddc | 2668 | |
0b86a832 | 2669 | INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); |
6324fbf3 | 2670 | INIT_LIST_HEAD(&fs_info->space_info); |
f29021b2 | 2671 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
47ab2a6c | 2672 | INIT_LIST_HEAD(&fs_info->unused_bgs); |
0b86a832 | 2673 | btrfs_mapping_init(&fs_info->mapping_tree); |
66d8f3dd MX |
2674 | btrfs_init_block_rsv(&fs_info->global_block_rsv, |
2675 | BTRFS_BLOCK_RSV_GLOBAL); | |
66d8f3dd MX |
2676 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); |
2677 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); | |
2678 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); | |
2679 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, | |
2680 | BTRFS_BLOCK_RSV_DELOPS); | |
ba2c4d4e JB |
2681 | btrfs_init_block_rsv(&fs_info->delayed_refs_rsv, |
2682 | BTRFS_BLOCK_RSV_DELREFS); | |
2683 | ||
771ed689 | 2684 | atomic_set(&fs_info->async_delalloc_pages, 0); |
4cb5300b | 2685 | atomic_set(&fs_info->defrag_running, 0); |
fcebe456 | 2686 | atomic_set(&fs_info->qgroup_op_seq, 0); |
2fefd558 | 2687 | atomic_set(&fs_info->reada_works_cnt, 0); |
fc36ed7e | 2688 | atomic64_set(&fs_info->tree_mod_seq, 0); |
e20d96d6 | 2689 | fs_info->sb = sb; |
95ac567a | 2690 | fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; |
9ed74f2d | 2691 | fs_info->metadata_ratio = 0; |
4cb5300b | 2692 | fs_info->defrag_inodes = RB_ROOT; |
a5ed45f8 | 2693 | atomic64_set(&fs_info->free_chunk_space, 0); |
f29021b2 | 2694 | fs_info->tree_mod_log = RB_ROOT; |
8b87dc17 | 2695 | fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; |
f8c269d7 | 2696 | fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ |
90519d66 | 2697 | /* readahead state */ |
d0164adc | 2698 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); |
90519d66 | 2699 | spin_lock_init(&fs_info->reada_lock); |
fd708b81 | 2700 | btrfs_init_ref_verify(fs_info); |
c8b97818 | 2701 | |
b34b086c CM |
2702 | fs_info->thread_pool_size = min_t(unsigned long, |
2703 | num_online_cpus() + 2, 8); | |
0afbaf8c | 2704 | |
199c2a9c MX |
2705 | INIT_LIST_HEAD(&fs_info->ordered_roots); |
2706 | spin_lock_init(&fs_info->ordered_root_lock); | |
69fe2d75 JB |
2707 | |
2708 | fs_info->btree_inode = new_inode(sb); | |
2709 | if (!fs_info->btree_inode) { | |
2710 | err = -ENOMEM; | |
2711 | goto fail_bio_counter; | |
2712 | } | |
2713 | mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); | |
2714 | ||
16cdcec7 | 2715 | fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), |
74e4d827 | 2716 | GFP_KERNEL); |
16cdcec7 MX |
2717 | if (!fs_info->delayed_root) { |
2718 | err = -ENOMEM; | |
2719 | goto fail_iput; | |
2720 | } | |
2721 | btrfs_init_delayed_root(fs_info->delayed_root); | |
3eaa2885 | 2722 | |
638aa7ed | 2723 | btrfs_init_scrub(fs_info); |
21adbd5c SB |
2724 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
2725 | fs_info->check_integrity_print_mask = 0; | |
2726 | #endif | |
779a65a4 | 2727 | btrfs_init_balance(fs_info); |
21c7e756 | 2728 | btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); |
a2de733c | 2729 | |
9f6d2510 DS |
2730 | sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; |
2731 | sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); | |
a061fc8d | 2732 | |
6bccf3ab | 2733 | btrfs_init_btree_inode(fs_info); |
76dda93c | 2734 | |
0f9dd46c | 2735 | spin_lock_init(&fs_info->block_group_cache_lock); |
6bef4d31 | 2736 | fs_info->block_group_cache_tree = RB_ROOT; |
a1897fdd | 2737 | fs_info->first_logical_byte = (u64)-1; |
0f9dd46c | 2738 | |
c6100a4b JB |
2739 | extent_io_tree_init(&fs_info->freed_extents[0], NULL); |
2740 | extent_io_tree_init(&fs_info->freed_extents[1], NULL); | |
11833d66 | 2741 | fs_info->pinned_extents = &fs_info->freed_extents[0]; |
afcdd129 | 2742 | set_bit(BTRFS_FS_BARRIER, &fs_info->flags); |
39279cc3 | 2743 | |
5a3f23d5 | 2744 | mutex_init(&fs_info->ordered_operations_mutex); |
e02119d5 | 2745 | mutex_init(&fs_info->tree_log_mutex); |
925baedd | 2746 | mutex_init(&fs_info->chunk_mutex); |
a74a4b97 CM |
2747 | mutex_init(&fs_info->transaction_kthread_mutex); |
2748 | mutex_init(&fs_info->cleaner_mutex); | |
1bbc621e | 2749 | mutex_init(&fs_info->ro_block_group_mutex); |
9e351cc8 | 2750 | init_rwsem(&fs_info->commit_root_sem); |
c71bf099 | 2751 | init_rwsem(&fs_info->cleanup_work_sem); |
76dda93c | 2752 | init_rwsem(&fs_info->subvol_sem); |
803b2f54 | 2753 | sema_init(&fs_info->uuid_tree_rescan_sem, 1); |
fa9c0d79 | 2754 | |
ad618368 | 2755 | btrfs_init_dev_replace_locks(fs_info); |
f9e92e40 | 2756 | btrfs_init_qgroup(fs_info); |
416ac51d | 2757 | |
fa9c0d79 CM |
2758 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); |
2759 | btrfs_init_free_cluster(&fs_info->data_alloc_cluster); | |
2760 | ||
e6dcd2dc | 2761 | init_waitqueue_head(&fs_info->transaction_throttle); |
f9295749 | 2762 | init_waitqueue_head(&fs_info->transaction_wait); |
bb9c12c9 | 2763 | init_waitqueue_head(&fs_info->transaction_blocked_wait); |
4854ddd0 | 2764 | init_waitqueue_head(&fs_info->async_submit_wait); |
3768f368 | 2765 | |
04216820 FM |
2766 | INIT_LIST_HEAD(&fs_info->pinned_chunks); |
2767 | ||
da17066c JM |
2768 | /* Usable values until the real ones are cached from the superblock */ |
2769 | fs_info->nodesize = 4096; | |
2770 | fs_info->sectorsize = 4096; | |
2771 | fs_info->stripesize = 4096; | |
2772 | ||
eede2bf3 OS |
2773 | spin_lock_init(&fs_info->swapfile_pins_lock); |
2774 | fs_info->swapfile_pins = RB_ROOT; | |
2775 | ||
53b381b3 DW |
2776 | ret = btrfs_alloc_stripe_hash_table(fs_info); |
2777 | if (ret) { | |
83c8266a | 2778 | err = ret; |
53b381b3 DW |
2779 | goto fail_alloc; |
2780 | } | |
2781 | ||
da17066c | 2782 | __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); |
7eccb903 | 2783 | |
3c4bb26b | 2784 | invalidate_bdev(fs_devices->latest_bdev); |
1104a885 DS |
2785 | |
2786 | /* | |
2787 | * Read super block and check the signature bytes only | |
2788 | */ | |
a512bbf8 | 2789 | bh = btrfs_read_dev_super(fs_devices->latest_bdev); |
92fc03fb AJ |
2790 | if (IS_ERR(bh)) { |
2791 | err = PTR_ERR(bh); | |
16cdcec7 | 2792 | goto fail_alloc; |
20b45077 | 2793 | } |
39279cc3 | 2794 | |
1104a885 DS |
2795 | /* |
2796 | * We want to check superblock checksum, the type is stored inside. | |
2797 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). | |
2798 | */ | |
ab8d0fc4 | 2799 | if (btrfs_check_super_csum(fs_info, bh->b_data)) { |
05135f59 | 2800 | btrfs_err(fs_info, "superblock checksum mismatch"); |
1104a885 | 2801 | err = -EINVAL; |
b2acdddf | 2802 | brelse(bh); |
1104a885 DS |
2803 | goto fail_alloc; |
2804 | } | |
2805 | ||
2806 | /* | |
2807 | * super_copy is zeroed at allocation time and we never touch the | |
2808 | * following bytes up to INFO_SIZE, the checksum is calculated from | |
2809 | * the whole block of INFO_SIZE | |
2810 | */ | |
6c41761f | 2811 | memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); |
a061fc8d | 2812 | brelse(bh); |
5f39d397 | 2813 | |
fbc6feae NB |
2814 | disk_super = fs_info->super_copy; |
2815 | ||
de37aa51 NB |
2816 | ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid, |
2817 | BTRFS_FSID_SIZE)); | |
2818 | ||
7239ff4b | 2819 | if (btrfs_fs_incompat(fs_info, METADATA_UUID)) { |
de37aa51 NB |
2820 | ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid, |
2821 | fs_info->super_copy->metadata_uuid, | |
2822 | BTRFS_FSID_SIZE)); | |
7239ff4b | 2823 | } |
0b86a832 | 2824 | |
fbc6feae NB |
2825 | features = btrfs_super_flags(disk_super); |
2826 | if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) { | |
2827 | features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2; | |
2828 | btrfs_set_super_flags(disk_super, features); | |
2829 | btrfs_info(fs_info, | |
2830 | "found metadata UUID change in progress flag, clearing"); | |
2831 | } | |
2832 | ||
2833 | memcpy(fs_info->super_for_commit, fs_info->super_copy, | |
2834 | sizeof(*fs_info->super_for_commit)); | |
de37aa51 | 2835 | |
069ec957 | 2836 | ret = btrfs_validate_mount_super(fs_info); |
1104a885 | 2837 | if (ret) { |
05135f59 | 2838 | btrfs_err(fs_info, "superblock contains fatal errors"); |
1104a885 DS |
2839 | err = -EINVAL; |
2840 | goto fail_alloc; | |
2841 | } | |
2842 | ||
0f7d52f4 | 2843 | if (!btrfs_super_root(disk_super)) |
16cdcec7 | 2844 | goto fail_alloc; |
0f7d52f4 | 2845 | |
acce952b | 2846 | /* check FS state, whether FS is broken. */ |
87533c47 MX |
2847 | if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) |
2848 | set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); | |
acce952b | 2849 | |
af31f5e5 CM |
2850 | /* |
2851 | * run through our array of backup supers and setup | |
2852 | * our ring pointer to the oldest one | |
2853 | */ | |
2854 | generation = btrfs_super_generation(disk_super); | |
2855 | find_oldest_super_backup(fs_info, generation); | |
2856 | ||
75e7cb7f LB |
2857 | /* |
2858 | * In the long term, we'll store the compression type in the super | |
2859 | * block, and it'll be used for per file compression control. | |
2860 | */ | |
2861 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; | |
2862 | ||
2ff7e61e | 2863 | ret = btrfs_parse_options(fs_info, options, sb->s_flags); |
2b82032c YZ |
2864 | if (ret) { |
2865 | err = ret; | |
16cdcec7 | 2866 | goto fail_alloc; |
2b82032c | 2867 | } |
dfe25020 | 2868 | |
f2b636e8 JB |
2869 | features = btrfs_super_incompat_flags(disk_super) & |
2870 | ~BTRFS_FEATURE_INCOMPAT_SUPP; | |
2871 | if (features) { | |
05135f59 DS |
2872 | btrfs_err(fs_info, |
2873 | "cannot mount because of unsupported optional features (%llx)", | |
2874 | features); | |
f2b636e8 | 2875 | err = -EINVAL; |
16cdcec7 | 2876 | goto fail_alloc; |
f2b636e8 JB |
2877 | } |
2878 | ||
5d4f98a2 | 2879 | features = btrfs_super_incompat_flags(disk_super); |
a6fa6fae | 2880 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
0b246afa | 2881 | if (fs_info->compress_type == BTRFS_COMPRESS_LZO) |
a6fa6fae | 2882 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
5c1aab1d NT |
2883 | else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) |
2884 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; | |
727011e0 | 2885 | |
3173a18f | 2886 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
05135f59 | 2887 | btrfs_info(fs_info, "has skinny extents"); |
3173a18f | 2888 | |
727011e0 CM |
2889 | /* |
2890 | * flag our filesystem as having big metadata blocks if | |
2891 | * they are bigger than the page size | |
2892 | */ | |
09cbfeaf | 2893 | if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
727011e0 | 2894 | if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
05135f59 DS |
2895 | btrfs_info(fs_info, |
2896 | "flagging fs with big metadata feature"); | |
727011e0 CM |
2897 | features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
2898 | } | |
2899 | ||
bc3f116f | 2900 | nodesize = btrfs_super_nodesize(disk_super); |
bc3f116f | 2901 | sectorsize = btrfs_super_sectorsize(disk_super); |
b7f67055 | 2902 | stripesize = sectorsize; |
707e8a07 | 2903 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
963d678b | 2904 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
bc3f116f | 2905 | |
da17066c JM |
2906 | /* Cache block sizes */ |
2907 | fs_info->nodesize = nodesize; | |
2908 | fs_info->sectorsize = sectorsize; | |
2909 | fs_info->stripesize = stripesize; | |
2910 | ||
bc3f116f CM |
2911 | /* |
2912 | * mixed block groups end up with duplicate but slightly offset | |
2913 | * extent buffers for the same range. It leads to corruptions | |
2914 | */ | |
2915 | if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && | |
707e8a07 | 2916 | (sectorsize != nodesize)) { |
05135f59 DS |
2917 | btrfs_err(fs_info, |
2918 | "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", | |
2919 | nodesize, sectorsize); | |
bc3f116f CM |
2920 | goto fail_alloc; |
2921 | } | |
2922 | ||
ceda0864 MX |
2923 | /* |
2924 | * Needn't use the lock because there is no other task which will | |
2925 | * update the flag. | |
2926 | */ | |
a6fa6fae | 2927 | btrfs_set_super_incompat_flags(disk_super, features); |
5d4f98a2 | 2928 | |
f2b636e8 JB |
2929 | features = btrfs_super_compat_ro_flags(disk_super) & |
2930 | ~BTRFS_FEATURE_COMPAT_RO_SUPP; | |
bc98a42c | 2931 | if (!sb_rdonly(sb) && features) { |
05135f59 DS |
2932 | btrfs_err(fs_info, |
2933 | "cannot mount read-write because of unsupported optional features (%llx)", | |
c1c9ff7c | 2934 | features); |
f2b636e8 | 2935 | err = -EINVAL; |
16cdcec7 | 2936 | goto fail_alloc; |
f2b636e8 | 2937 | } |
61d92c32 | 2938 | |
2a458198 ES |
2939 | ret = btrfs_init_workqueues(fs_info, fs_devices); |
2940 | if (ret) { | |
2941 | err = ret; | |
0dc3b84a JB |
2942 | goto fail_sb_buffer; |
2943 | } | |
4543df7e | 2944 | |
9e11ceee JK |
2945 | sb->s_bdi->congested_fn = btrfs_congested_fn; |
2946 | sb->s_bdi->congested_data = fs_info; | |
2947 | sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; | |
d4417e22 | 2948 | sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE; |
9e11ceee JK |
2949 | sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); |
2950 | sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); | |
4575c9cc | 2951 | |
a061fc8d CM |
2952 | sb->s_blocksize = sectorsize; |
2953 | sb->s_blocksize_bits = blksize_bits(sectorsize); | |
de37aa51 | 2954 | memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); |
db94535d | 2955 | |
925baedd | 2956 | mutex_lock(&fs_info->chunk_mutex); |
6bccf3ab | 2957 | ret = btrfs_read_sys_array(fs_info); |
925baedd | 2958 | mutex_unlock(&fs_info->chunk_mutex); |
84eed90f | 2959 | if (ret) { |
05135f59 | 2960 | btrfs_err(fs_info, "failed to read the system array: %d", ret); |
5d4f98a2 | 2961 | goto fail_sb_buffer; |
84eed90f | 2962 | } |
0b86a832 | 2963 | |
84234f3a | 2964 | generation = btrfs_super_chunk_root_generation(disk_super); |
581c1760 | 2965 | level = btrfs_super_chunk_root_level(disk_super); |
0b86a832 | 2966 | |
da17066c | 2967 | __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); |
0b86a832 | 2968 | |
2ff7e61e | 2969 | chunk_root->node = read_tree_block(fs_info, |
0b86a832 | 2970 | btrfs_super_chunk_root(disk_super), |
581c1760 | 2971 | generation, level, NULL); |
64c043de LB |
2972 | if (IS_ERR(chunk_root->node) || |
2973 | !extent_buffer_uptodate(chunk_root->node)) { | |
05135f59 | 2974 | btrfs_err(fs_info, "failed to read chunk root"); |
e5fffbac | 2975 | if (!IS_ERR(chunk_root->node)) |
2976 | free_extent_buffer(chunk_root->node); | |
95ab1f64 | 2977 | chunk_root->node = NULL; |
af31f5e5 | 2978 | goto fail_tree_roots; |
83121942 | 2979 | } |
5d4f98a2 YZ |
2980 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); |
2981 | chunk_root->commit_root = btrfs_root_node(chunk_root); | |
0b86a832 | 2982 | |
e17cade2 | 2983 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, |
b308bc2f | 2984 | btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); |
e17cade2 | 2985 | |
5b4aacef | 2986 | ret = btrfs_read_chunk_tree(fs_info); |
2b82032c | 2987 | if (ret) { |
05135f59 | 2988 | btrfs_err(fs_info, "failed to read chunk tree: %d", ret); |
af31f5e5 | 2989 | goto fail_tree_roots; |
2b82032c | 2990 | } |
0b86a832 | 2991 | |
8dabb742 | 2992 | /* |
9b99b115 AJ |
2993 | * Keep the devid that is marked to be the target device for the |
2994 | * device replace procedure | |
8dabb742 | 2995 | */ |
9b99b115 | 2996 | btrfs_free_extra_devids(fs_devices, 0); |
dfe25020 | 2997 | |
a6b0d5c8 | 2998 | if (!fs_devices->latest_bdev) { |
05135f59 | 2999 | btrfs_err(fs_info, "failed to read devices"); |
a6b0d5c8 CM |
3000 | goto fail_tree_roots; |
3001 | } | |
3002 | ||
af31f5e5 | 3003 | retry_root_backup: |
84234f3a | 3004 | generation = btrfs_super_generation(disk_super); |
581c1760 | 3005 | level = btrfs_super_root_level(disk_super); |
0b86a832 | 3006 | |
2ff7e61e | 3007 | tree_root->node = read_tree_block(fs_info, |
db94535d | 3008 | btrfs_super_root(disk_super), |
581c1760 | 3009 | generation, level, NULL); |
64c043de LB |
3010 | if (IS_ERR(tree_root->node) || |
3011 | !extent_buffer_uptodate(tree_root->node)) { | |
05135f59 | 3012 | btrfs_warn(fs_info, "failed to read tree root"); |
e5fffbac | 3013 | if (!IS_ERR(tree_root->node)) |
3014 | free_extent_buffer(tree_root->node); | |
95ab1f64 | 3015 | tree_root->node = NULL; |
af31f5e5 | 3016 | goto recovery_tree_root; |
83121942 | 3017 | } |
af31f5e5 | 3018 | |
5d4f98a2 YZ |
3019 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
3020 | tree_root->commit_root = btrfs_root_node(tree_root); | |
69e9c6c6 | 3021 | btrfs_set_root_refs(&tree_root->root_item, 1); |
db94535d | 3022 | |
f32e48e9 CR |
3023 | mutex_lock(&tree_root->objectid_mutex); |
3024 | ret = btrfs_find_highest_objectid(tree_root, | |
3025 | &tree_root->highest_objectid); | |
3026 | if (ret) { | |
3027 | mutex_unlock(&tree_root->objectid_mutex); | |
3028 | goto recovery_tree_root; | |
3029 | } | |
3030 | ||
3031 | ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); | |
3032 | ||
3033 | mutex_unlock(&tree_root->objectid_mutex); | |
3034 | ||
6bccf3ab | 3035 | ret = btrfs_read_roots(fs_info); |
4bbcaa64 | 3036 | if (ret) |
af31f5e5 | 3037 | goto recovery_tree_root; |
f7a81ea4 | 3038 | |
8929ecfa YZ |
3039 | fs_info->generation = generation; |
3040 | fs_info->last_trans_committed = generation; | |
8929ecfa | 3041 | |
cf90d884 QW |
3042 | ret = btrfs_verify_dev_extents(fs_info); |
3043 | if (ret) { | |
3044 | btrfs_err(fs_info, | |
3045 | "failed to verify dev extents against chunks: %d", | |
3046 | ret); | |
3047 | goto fail_block_groups; | |
3048 | } | |
68310a5e ID |
3049 | ret = btrfs_recover_balance(fs_info); |
3050 | if (ret) { | |
05135f59 | 3051 | btrfs_err(fs_info, "failed to recover balance: %d", ret); |
68310a5e ID |
3052 | goto fail_block_groups; |
3053 | } | |
3054 | ||
733f4fbb SB |
3055 | ret = btrfs_init_dev_stats(fs_info); |
3056 | if (ret) { | |
05135f59 | 3057 | btrfs_err(fs_info, "failed to init dev_stats: %d", ret); |
733f4fbb SB |
3058 | goto fail_block_groups; |
3059 | } | |
3060 | ||
8dabb742 SB |
3061 | ret = btrfs_init_dev_replace(fs_info); |
3062 | if (ret) { | |
05135f59 | 3063 | btrfs_err(fs_info, "failed to init dev_replace: %d", ret); |
8dabb742 SB |
3064 | goto fail_block_groups; |
3065 | } | |
3066 | ||
9b99b115 | 3067 | btrfs_free_extra_devids(fs_devices, 1); |
8dabb742 | 3068 | |
b7c35e81 AJ |
3069 | ret = btrfs_sysfs_add_fsid(fs_devices, NULL); |
3070 | if (ret) { | |
05135f59 DS |
3071 | btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", |
3072 | ret); | |
b7c35e81 AJ |
3073 | goto fail_block_groups; |
3074 | } | |
3075 | ||
3076 | ret = btrfs_sysfs_add_device(fs_devices); | |
3077 | if (ret) { | |
05135f59 DS |
3078 | btrfs_err(fs_info, "failed to init sysfs device interface: %d", |
3079 | ret); | |
b7c35e81 AJ |
3080 | goto fail_fsdev_sysfs; |
3081 | } | |
3082 | ||
96f3136e | 3083 | ret = btrfs_sysfs_add_mounted(fs_info); |
c59021f8 | 3084 | if (ret) { |
05135f59 | 3085 | btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); |
b7c35e81 | 3086 | goto fail_fsdev_sysfs; |
c59021f8 | 3087 | } |
3088 | ||
c59021f8 | 3089 | ret = btrfs_init_space_info(fs_info); |
3090 | if (ret) { | |
05135f59 | 3091 | btrfs_err(fs_info, "failed to initialize space info: %d", ret); |
2365dd3c | 3092 | goto fail_sysfs; |
c59021f8 | 3093 | } |
3094 | ||
5b4aacef | 3095 | ret = btrfs_read_block_groups(fs_info); |
1b1d1f66 | 3096 | if (ret) { |
05135f59 | 3097 | btrfs_err(fs_info, "failed to read block groups: %d", ret); |
2365dd3c | 3098 | goto fail_sysfs; |
1b1d1f66 | 3099 | } |
4330e183 | 3100 | |
6528b99d | 3101 | if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) { |
05135f59 | 3102 | btrfs_warn(fs_info, |
52042d8e | 3103 | "writable mount is not allowed due to too many missing devices"); |
2365dd3c | 3104 | goto fail_sysfs; |
292fd7fc | 3105 | } |
9078a3e1 | 3106 | |
a74a4b97 CM |
3107 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, |
3108 | "btrfs-cleaner"); | |
57506d50 | 3109 | if (IS_ERR(fs_info->cleaner_kthread)) |
2365dd3c | 3110 | goto fail_sysfs; |
a74a4b97 CM |
3111 | |
3112 | fs_info->transaction_kthread = kthread_run(transaction_kthread, | |
3113 | tree_root, | |
3114 | "btrfs-transaction"); | |
57506d50 | 3115 | if (IS_ERR(fs_info->transaction_kthread)) |
3f157a2f | 3116 | goto fail_cleaner; |
a74a4b97 | 3117 | |
583b7231 | 3118 | if (!btrfs_test_opt(fs_info, NOSSD) && |
c289811c | 3119 | !fs_info->fs_devices->rotating) { |
583b7231 | 3120 | btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); |
c289811c CM |
3121 | } |
3122 | ||
572d9ab7 | 3123 | /* |
01327610 | 3124 | * Mount does not set all options immediately, we can do it now and do |
572d9ab7 DS |
3125 | * not have to wait for transaction commit |
3126 | */ | |
3127 | btrfs_apply_pending_changes(fs_info); | |
3818aea2 | 3128 | |
21adbd5c | 3129 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
0b246afa | 3130 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { |
2ff7e61e | 3131 | ret = btrfsic_mount(fs_info, fs_devices, |
0b246afa | 3132 | btrfs_test_opt(fs_info, |
21adbd5c SB |
3133 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? |
3134 | 1 : 0, | |
3135 | fs_info->check_integrity_print_mask); | |
3136 | if (ret) | |
05135f59 DS |
3137 | btrfs_warn(fs_info, |
3138 | "failed to initialize integrity check module: %d", | |
3139 | ret); | |
21adbd5c SB |
3140 | } |
3141 | #endif | |
bcef60f2 AJ |
3142 | ret = btrfs_read_qgroup_config(fs_info); |
3143 | if (ret) | |
3144 | goto fail_trans_kthread; | |
21adbd5c | 3145 | |
fd708b81 JB |
3146 | if (btrfs_build_ref_tree(fs_info)) |
3147 | btrfs_err(fs_info, "couldn't build ref tree"); | |
3148 | ||
96da0919 QW |
3149 | /* do not make disk changes in broken FS or nologreplay is given */ |
3150 | if (btrfs_super_log_root(disk_super) != 0 && | |
0b246afa | 3151 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
63443bf5 | 3152 | ret = btrfs_replay_log(fs_info, fs_devices); |
79787eaa | 3153 | if (ret) { |
63443bf5 | 3154 | err = ret; |
28c16cbb | 3155 | goto fail_qgroup; |
79787eaa | 3156 | } |
e02119d5 | 3157 | } |
1a40e23b | 3158 | |
6bccf3ab | 3159 | ret = btrfs_find_orphan_roots(fs_info); |
79787eaa | 3160 | if (ret) |
28c16cbb | 3161 | goto fail_qgroup; |
76dda93c | 3162 | |
bc98a42c | 3163 | if (!sb_rdonly(sb)) { |
d68fc57b | 3164 | ret = btrfs_cleanup_fs_roots(fs_info); |
44c44af2 | 3165 | if (ret) |
28c16cbb | 3166 | goto fail_qgroup; |
90c711ab ZB |
3167 | |
3168 | mutex_lock(&fs_info->cleaner_mutex); | |
5d4f98a2 | 3169 | ret = btrfs_recover_relocation(tree_root); |
90c711ab | 3170 | mutex_unlock(&fs_info->cleaner_mutex); |
d7ce5843 | 3171 | if (ret < 0) { |
05135f59 DS |
3172 | btrfs_warn(fs_info, "failed to recover relocation: %d", |
3173 | ret); | |
d7ce5843 | 3174 | err = -EINVAL; |
bcef60f2 | 3175 | goto fail_qgroup; |
d7ce5843 | 3176 | } |
7c2ca468 | 3177 | } |
1a40e23b | 3178 | |
3de4586c CM |
3179 | location.objectid = BTRFS_FS_TREE_OBJECTID; |
3180 | location.type = BTRFS_ROOT_ITEM_KEY; | |
cb517eab | 3181 | location.offset = 0; |
3de4586c | 3182 | |
3de4586c | 3183 | fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); |
3140c9a3 DC |
3184 | if (IS_ERR(fs_info->fs_root)) { |
3185 | err = PTR_ERR(fs_info->fs_root); | |
f50f4353 | 3186 | btrfs_warn(fs_info, "failed to read fs tree: %d", err); |
bcef60f2 | 3187 | goto fail_qgroup; |
3140c9a3 | 3188 | } |
c289811c | 3189 | |
bc98a42c | 3190 | if (sb_rdonly(sb)) |
2b6ba629 | 3191 | return 0; |
59641015 | 3192 | |
f8d468a1 OS |
3193 | if (btrfs_test_opt(fs_info, CLEAR_CACHE) && |
3194 | btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { | |
6675df31 OS |
3195 | clear_free_space_tree = 1; |
3196 | } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && | |
3197 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { | |
3198 | btrfs_warn(fs_info, "free space tree is invalid"); | |
3199 | clear_free_space_tree = 1; | |
3200 | } | |
3201 | ||
3202 | if (clear_free_space_tree) { | |
f8d468a1 OS |
3203 | btrfs_info(fs_info, "clearing free space tree"); |
3204 | ret = btrfs_clear_free_space_tree(fs_info); | |
3205 | if (ret) { | |
3206 | btrfs_warn(fs_info, | |
3207 | "failed to clear free space tree: %d", ret); | |
6bccf3ab | 3208 | close_ctree(fs_info); |
f8d468a1 OS |
3209 | return ret; |
3210 | } | |
3211 | } | |
3212 | ||
0b246afa | 3213 | if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && |
511711af | 3214 | !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { |
05135f59 | 3215 | btrfs_info(fs_info, "creating free space tree"); |
511711af CM |
3216 | ret = btrfs_create_free_space_tree(fs_info); |
3217 | if (ret) { | |
05135f59 DS |
3218 | btrfs_warn(fs_info, |
3219 | "failed to create free space tree: %d", ret); | |
6bccf3ab | 3220 | close_ctree(fs_info); |
511711af CM |
3221 | return ret; |
3222 | } | |
3223 | } | |
3224 | ||
2b6ba629 ID |
3225 | down_read(&fs_info->cleanup_work_sem); |
3226 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || | |
3227 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { | |
e3acc2a6 | 3228 | up_read(&fs_info->cleanup_work_sem); |
6bccf3ab | 3229 | close_ctree(fs_info); |
2b6ba629 ID |
3230 | return ret; |
3231 | } | |
3232 | up_read(&fs_info->cleanup_work_sem); | |
59641015 | 3233 | |
2b6ba629 ID |
3234 | ret = btrfs_resume_balance_async(fs_info); |
3235 | if (ret) { | |
05135f59 | 3236 | btrfs_warn(fs_info, "failed to resume balance: %d", ret); |
6bccf3ab | 3237 | close_ctree(fs_info); |
2b6ba629 | 3238 | return ret; |
e3acc2a6 JB |
3239 | } |
3240 | ||
8dabb742 SB |
3241 | ret = btrfs_resume_dev_replace_async(fs_info); |
3242 | if (ret) { | |
05135f59 | 3243 | btrfs_warn(fs_info, "failed to resume device replace: %d", ret); |
6bccf3ab | 3244 | close_ctree(fs_info); |
8dabb742 SB |
3245 | return ret; |
3246 | } | |
3247 | ||
b382a324 JS |
3248 | btrfs_qgroup_rescan_resume(fs_info); |
3249 | ||
4bbcaa64 | 3250 | if (!fs_info->uuid_root) { |
05135f59 | 3251 | btrfs_info(fs_info, "creating UUID tree"); |
f7a81ea4 SB |
3252 | ret = btrfs_create_uuid_tree(fs_info); |
3253 | if (ret) { | |
05135f59 DS |
3254 | btrfs_warn(fs_info, |
3255 | "failed to create the UUID tree: %d", ret); | |
6bccf3ab | 3256 | close_ctree(fs_info); |
f7a81ea4 SB |
3257 | return ret; |
3258 | } | |
0b246afa | 3259 | } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || |
4bbcaa64 ES |
3260 | fs_info->generation != |
3261 | btrfs_super_uuid_tree_generation(disk_super)) { | |
05135f59 | 3262 | btrfs_info(fs_info, "checking UUID tree"); |
70f80175 SB |
3263 | ret = btrfs_check_uuid_tree(fs_info); |
3264 | if (ret) { | |
05135f59 DS |
3265 | btrfs_warn(fs_info, |
3266 | "failed to check the UUID tree: %d", ret); | |
6bccf3ab | 3267 | close_ctree(fs_info); |
70f80175 SB |
3268 | return ret; |
3269 | } | |
3270 | } else { | |
afcdd129 | 3271 | set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); |
f7a81ea4 | 3272 | } |
afcdd129 | 3273 | set_bit(BTRFS_FS_OPEN, &fs_info->flags); |
47ab2a6c | 3274 | |
8dcddfa0 QW |
3275 | /* |
3276 | * backuproot only affect mount behavior, and if open_ctree succeeded, | |
3277 | * no need to keep the flag | |
3278 | */ | |
3279 | btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); | |
3280 | ||
ad2b2c80 | 3281 | return 0; |
39279cc3 | 3282 | |
bcef60f2 AJ |
3283 | fail_qgroup: |
3284 | btrfs_free_qgroup_config(fs_info); | |
7c2ca468 CM |
3285 | fail_trans_kthread: |
3286 | kthread_stop(fs_info->transaction_kthread); | |
2ff7e61e | 3287 | btrfs_cleanup_transaction(fs_info); |
faa2dbf0 | 3288 | btrfs_free_fs_roots(fs_info); |
3f157a2f | 3289 | fail_cleaner: |
a74a4b97 | 3290 | kthread_stop(fs_info->cleaner_kthread); |
7c2ca468 CM |
3291 | |
3292 | /* | |
3293 | * make sure we're done with the btree inode before we stop our | |
3294 | * kthreads | |
3295 | */ | |
3296 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); | |
7c2ca468 | 3297 | |
2365dd3c | 3298 | fail_sysfs: |
6618a59b | 3299 | btrfs_sysfs_remove_mounted(fs_info); |
2365dd3c | 3300 | |
b7c35e81 AJ |
3301 | fail_fsdev_sysfs: |
3302 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); | |
3303 | ||
1b1d1f66 | 3304 | fail_block_groups: |
54067ae9 | 3305 | btrfs_put_block_group_cache(fs_info); |
af31f5e5 CM |
3306 | |
3307 | fail_tree_roots: | |
3308 | free_root_pointers(fs_info, 1); | |
2b8195bb | 3309 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
af31f5e5 | 3310 | |
39279cc3 | 3311 | fail_sb_buffer: |
7abadb64 | 3312 | btrfs_stop_all_workers(fs_info); |
5cdd7db6 | 3313 | btrfs_free_block_groups(fs_info); |
16cdcec7 | 3314 | fail_alloc: |
4543df7e | 3315 | fail_iput: |
586e46e2 ID |
3316 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
3317 | ||
4543df7e | 3318 | iput(fs_info->btree_inode); |
c404e0dc | 3319 | fail_bio_counter: |
7f8d236a | 3320 | percpu_counter_destroy(&fs_info->dev_replace.bio_counter); |
963d678b MX |
3321 | fail_delalloc_bytes: |
3322 | percpu_counter_destroy(&fs_info->delalloc_bytes); | |
e2d84521 MX |
3323 | fail_dirty_metadata_bytes: |
3324 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); | |
76dda93c YZ |
3325 | fail_srcu: |
3326 | cleanup_srcu_struct(&fs_info->subvol_srcu); | |
7e662854 | 3327 | fail: |
53b381b3 | 3328 | btrfs_free_stripe_hash_table(fs_info); |
586e46e2 | 3329 | btrfs_close_devices(fs_info->fs_devices); |
ad2b2c80 | 3330 | return err; |
af31f5e5 CM |
3331 | |
3332 | recovery_tree_root: | |
0b246afa | 3333 | if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) |
af31f5e5 CM |
3334 | goto fail_tree_roots; |
3335 | ||
3336 | free_root_pointers(fs_info, 0); | |
3337 | ||
3338 | /* don't use the log in recovery mode, it won't be valid */ | |
3339 | btrfs_set_super_log_root(disk_super, 0); | |
3340 | ||
3341 | /* we can't trust the free space cache either */ | |
3342 | btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); | |
3343 | ||
3344 | ret = next_root_backup(fs_info, fs_info->super_copy, | |
3345 | &num_backups_tried, &backup_index); | |
3346 | if (ret == -1) | |
3347 | goto fail_block_groups; | |
3348 | goto retry_root_backup; | |
eb60ceac | 3349 | } |
663faf9f | 3350 | ALLOW_ERROR_INJECTION(open_ctree, ERRNO); |
eb60ceac | 3351 | |
f2984462 CM |
3352 | static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) |
3353 | { | |
f2984462 CM |
3354 | if (uptodate) { |
3355 | set_buffer_uptodate(bh); | |
3356 | } else { | |
442a4f63 SB |
3357 | struct btrfs_device *device = (struct btrfs_device *) |
3358 | bh->b_private; | |
3359 | ||
fb456252 | 3360 | btrfs_warn_rl_in_rcu(device->fs_info, |
b14af3b4 | 3361 | "lost page write due to IO error on %s", |
606686ee | 3362 | rcu_str_deref(device->name)); |
01327610 | 3363 | /* note, we don't set_buffer_write_io_error because we have |
1259ab75 CM |
3364 | * our own ways of dealing with the IO errors |
3365 | */ | |
f2984462 | 3366 | clear_buffer_uptodate(bh); |
442a4f63 | 3367 | btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); |
f2984462 CM |
3368 | } |
3369 | unlock_buffer(bh); | |
3370 | put_bh(bh); | |
3371 | } | |
3372 | ||
29c36d72 AJ |
3373 | int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, |
3374 | struct buffer_head **bh_ret) | |
3375 | { | |
3376 | struct buffer_head *bh; | |
3377 | struct btrfs_super_block *super; | |
3378 | u64 bytenr; | |
3379 | ||
3380 | bytenr = btrfs_sb_offset(copy_num); | |
3381 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) | |
3382 | return -EINVAL; | |
3383 | ||
9f6d2510 | 3384 | bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE); |
29c36d72 AJ |
3385 | /* |
3386 | * If we fail to read from the underlying devices, as of now | |
3387 | * the best option we have is to mark it EIO. | |
3388 | */ | |
3389 | if (!bh) | |
3390 | return -EIO; | |
3391 | ||
3392 | super = (struct btrfs_super_block *)bh->b_data; | |
3393 | if (btrfs_super_bytenr(super) != bytenr || | |
3394 | btrfs_super_magic(super) != BTRFS_MAGIC) { | |
3395 | brelse(bh); | |
3396 | return -EINVAL; | |
3397 | } | |
3398 | ||
3399 | *bh_ret = bh; | |
3400 | return 0; | |
3401 | } | |
3402 | ||
3403 | ||
a512bbf8 YZ |
3404 | struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) |
3405 | { | |
3406 | struct buffer_head *bh; | |
3407 | struct buffer_head *latest = NULL; | |
3408 | struct btrfs_super_block *super; | |
3409 | int i; | |
3410 | u64 transid = 0; | |
92fc03fb | 3411 | int ret = -EINVAL; |
a512bbf8 YZ |
3412 | |
3413 | /* we would like to check all the supers, but that would make | |
3414 | * a btrfs mount succeed after a mkfs from a different FS. | |
3415 | * So, we need to add a special mount option to scan for | |
3416 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead | |
3417 | */ | |
3418 | for (i = 0; i < 1; i++) { | |
29c36d72 AJ |
3419 | ret = btrfs_read_dev_one_super(bdev, i, &bh); |
3420 | if (ret) | |
a512bbf8 YZ |
3421 | continue; |
3422 | ||
3423 | super = (struct btrfs_super_block *)bh->b_data; | |
a512bbf8 YZ |
3424 | |
3425 | if (!latest || btrfs_super_generation(super) > transid) { | |
3426 | brelse(latest); | |
3427 | latest = bh; | |
3428 | transid = btrfs_super_generation(super); | |
3429 | } else { | |
3430 | brelse(bh); | |
3431 | } | |
3432 | } | |
92fc03fb AJ |
3433 | |
3434 | if (!latest) | |
3435 | return ERR_PTR(ret); | |
3436 | ||
a512bbf8 YZ |
3437 | return latest; |
3438 | } | |
3439 | ||
4eedeb75 | 3440 | /* |
abbb3b8e DS |
3441 | * Write superblock @sb to the @device. Do not wait for completion, all the |
3442 | * buffer heads we write are pinned. | |
4eedeb75 | 3443 | * |
abbb3b8e DS |
3444 | * Write @max_mirrors copies of the superblock, where 0 means default that fit |
3445 | * the expected device size at commit time. Note that max_mirrors must be | |
3446 | * same for write and wait phases. | |
4eedeb75 | 3447 | * |
abbb3b8e | 3448 | * Return number of errors when buffer head is not found or submission fails. |
4eedeb75 | 3449 | */ |
a512bbf8 | 3450 | static int write_dev_supers(struct btrfs_device *device, |
abbb3b8e | 3451 | struct btrfs_super_block *sb, int max_mirrors) |
a512bbf8 YZ |
3452 | { |
3453 | struct buffer_head *bh; | |
3454 | int i; | |
3455 | int ret; | |
3456 | int errors = 0; | |
3457 | u32 crc; | |
3458 | u64 bytenr; | |
1b9e619c | 3459 | int op_flags; |
a512bbf8 YZ |
3460 | |
3461 | if (max_mirrors == 0) | |
3462 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | |
3463 | ||
a512bbf8 YZ |
3464 | for (i = 0; i < max_mirrors; i++) { |
3465 | bytenr = btrfs_sb_offset(i); | |
935e5cc9 MX |
3466 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
3467 | device->commit_total_bytes) | |
a512bbf8 YZ |
3468 | break; |
3469 | ||
abbb3b8e | 3470 | btrfs_set_super_bytenr(sb, bytenr); |
4eedeb75 | 3471 | |
abbb3b8e DS |
3472 | crc = ~(u32)0; |
3473 | crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, | |
3474 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); | |
3475 | btrfs_csum_final(crc, sb->csum); | |
4eedeb75 | 3476 | |
abbb3b8e | 3477 | /* One reference for us, and we leave it for the caller */ |
9f6d2510 | 3478 | bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, |
abbb3b8e DS |
3479 | BTRFS_SUPER_INFO_SIZE); |
3480 | if (!bh) { | |
3481 | btrfs_err(device->fs_info, | |
3482 | "couldn't get super buffer head for bytenr %llu", | |
3483 | bytenr); | |
3484 | errors++; | |
4eedeb75 | 3485 | continue; |
abbb3b8e | 3486 | } |
634554dc | 3487 | |
abbb3b8e | 3488 | memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); |
a512bbf8 | 3489 | |
abbb3b8e DS |
3490 | /* one reference for submit_bh */ |
3491 | get_bh(bh); | |
4eedeb75 | 3492 | |
abbb3b8e DS |
3493 | set_buffer_uptodate(bh); |
3494 | lock_buffer(bh); | |
3495 | bh->b_end_io = btrfs_end_buffer_write_sync; | |
3496 | bh->b_private = device; | |
a512bbf8 | 3497 | |
387125fc CM |
3498 | /* |
3499 | * we fua the first super. The others we allow | |
3500 | * to go down lazy. | |
3501 | */ | |
1b9e619c OS |
3502 | op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
3503 | if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) | |
3504 | op_flags |= REQ_FUA; | |
3505 | ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh); | |
4eedeb75 | 3506 | if (ret) |
a512bbf8 | 3507 | errors++; |
a512bbf8 YZ |
3508 | } |
3509 | return errors < i ? 0 : -1; | |
3510 | } | |
3511 | ||
abbb3b8e DS |
3512 | /* |
3513 | * Wait for write completion of superblocks done by write_dev_supers, | |
3514 | * @max_mirrors same for write and wait phases. | |
3515 | * | |
3516 | * Return number of errors when buffer head is not found or not marked up to | |
3517 | * date. | |
3518 | */ | |
3519 | static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) | |
3520 | { | |
3521 | struct buffer_head *bh; | |
3522 | int i; | |
3523 | int errors = 0; | |
b6a535fa | 3524 | bool primary_failed = false; |
abbb3b8e DS |
3525 | u64 bytenr; |
3526 | ||
3527 | if (max_mirrors == 0) | |
3528 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | |
3529 | ||
3530 | for (i = 0; i < max_mirrors; i++) { | |
3531 | bytenr = btrfs_sb_offset(i); | |
3532 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | |
3533 | device->commit_total_bytes) | |
3534 | break; | |
3535 | ||
9f6d2510 DS |
3536 | bh = __find_get_block(device->bdev, |
3537 | bytenr / BTRFS_BDEV_BLOCKSIZE, | |
abbb3b8e DS |
3538 | BTRFS_SUPER_INFO_SIZE); |
3539 | if (!bh) { | |
3540 | errors++; | |
b6a535fa HM |
3541 | if (i == 0) |
3542 | primary_failed = true; | |
abbb3b8e DS |
3543 | continue; |
3544 | } | |
3545 | wait_on_buffer(bh); | |
b6a535fa | 3546 | if (!buffer_uptodate(bh)) { |
abbb3b8e | 3547 | errors++; |
b6a535fa HM |
3548 | if (i == 0) |
3549 | primary_failed = true; | |
3550 | } | |
abbb3b8e DS |
3551 | |
3552 | /* drop our reference */ | |
3553 | brelse(bh); | |
3554 | ||
3555 | /* drop the reference from the writing run */ | |
3556 | brelse(bh); | |
3557 | } | |
3558 | ||
b6a535fa HM |
3559 | /* log error, force error return */ |
3560 | if (primary_failed) { | |
3561 | btrfs_err(device->fs_info, "error writing primary super block to device %llu", | |
3562 | device->devid); | |
3563 | return -1; | |
3564 | } | |
3565 | ||
abbb3b8e DS |
3566 | return errors < i ? 0 : -1; |
3567 | } | |
3568 | ||
387125fc CM |
3569 | /* |
3570 | * endio for the write_dev_flush, this will wake anyone waiting | |
3571 | * for the barrier when it is done | |
3572 | */ | |
4246a0b6 | 3573 | static void btrfs_end_empty_barrier(struct bio *bio) |
387125fc | 3574 | { |
e0ae9994 | 3575 | complete(bio->bi_private); |
387125fc CM |
3576 | } |
3577 | ||
3578 | /* | |
4fc6441a AJ |
3579 | * Submit a flush request to the device if it supports it. Error handling is |
3580 | * done in the waiting counterpart. | |
387125fc | 3581 | */ |
4fc6441a | 3582 | static void write_dev_flush(struct btrfs_device *device) |
387125fc | 3583 | { |
c2a9c7ab | 3584 | struct request_queue *q = bdev_get_queue(device->bdev); |
e0ae9994 | 3585 | struct bio *bio = device->flush_bio; |
387125fc | 3586 | |
c2a9c7ab | 3587 | if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
4fc6441a | 3588 | return; |
387125fc | 3589 | |
e0ae9994 | 3590 | bio_reset(bio); |
387125fc | 3591 | bio->bi_end_io = btrfs_end_empty_barrier; |
74d46992 | 3592 | bio_set_dev(bio, device->bdev); |
8d910125 | 3593 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; |
387125fc CM |
3594 | init_completion(&device->flush_wait); |
3595 | bio->bi_private = &device->flush_wait; | |
387125fc | 3596 | |
43a01111 | 3597 | btrfsic_submit_bio(bio); |
1c3063b6 | 3598 | set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); |
4fc6441a | 3599 | } |
387125fc | 3600 | |
4fc6441a AJ |
3601 | /* |
3602 | * If the flush bio has been submitted by write_dev_flush, wait for it. | |
3603 | */ | |
8c27cb35 | 3604 | static blk_status_t wait_dev_flush(struct btrfs_device *device) |
4fc6441a | 3605 | { |
4fc6441a | 3606 | struct bio *bio = device->flush_bio; |
387125fc | 3607 | |
1c3063b6 | 3608 | if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)) |
58efbc9f | 3609 | return BLK_STS_OK; |
387125fc | 3610 | |
1c3063b6 | 3611 | clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); |
2980d574 | 3612 | wait_for_completion_io(&device->flush_wait); |
387125fc | 3613 | |
8c27cb35 | 3614 | return bio->bi_status; |
387125fc | 3615 | } |
387125fc | 3616 | |
d10b82fe | 3617 | static int check_barrier_error(struct btrfs_fs_info *fs_info) |
401b41e5 | 3618 | { |
6528b99d | 3619 | if (!btrfs_check_rw_degradable(fs_info, NULL)) |
401b41e5 | 3620 | return -EIO; |
387125fc CM |
3621 | return 0; |
3622 | } | |
3623 | ||
3624 | /* | |
3625 | * send an empty flush down to each device in parallel, | |
3626 | * then wait for them | |
3627 | */ | |
3628 | static int barrier_all_devices(struct btrfs_fs_info *info) | |
3629 | { | |
3630 | struct list_head *head; | |
3631 | struct btrfs_device *dev; | |
5af3e8cc | 3632 | int errors_wait = 0; |
4e4cbee9 | 3633 | blk_status_t ret; |
387125fc | 3634 | |
1538e6c5 | 3635 | lockdep_assert_held(&info->fs_devices->device_list_mutex); |
387125fc CM |
3636 | /* send down all the barriers */ |
3637 | head = &info->fs_devices->devices; | |
1538e6c5 | 3638 | list_for_each_entry(dev, head, dev_list) { |
e6e674bd | 3639 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
f88ba6a2 | 3640 | continue; |
cea7c8bf | 3641 | if (!dev->bdev) |
387125fc | 3642 | continue; |
e12c9621 | 3643 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3644 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
387125fc CM |
3645 | continue; |
3646 | ||
4fc6441a | 3647 | write_dev_flush(dev); |
58efbc9f | 3648 | dev->last_flush_error = BLK_STS_OK; |
387125fc CM |
3649 | } |
3650 | ||
3651 | /* wait for all the barriers */ | |
1538e6c5 | 3652 | list_for_each_entry(dev, head, dev_list) { |
e6e674bd | 3653 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
f88ba6a2 | 3654 | continue; |
387125fc | 3655 | if (!dev->bdev) { |
5af3e8cc | 3656 | errors_wait++; |
387125fc CM |
3657 | continue; |
3658 | } | |
e12c9621 | 3659 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3660 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
387125fc CM |
3661 | continue; |
3662 | ||
4fc6441a | 3663 | ret = wait_dev_flush(dev); |
401b41e5 AJ |
3664 | if (ret) { |
3665 | dev->last_flush_error = ret; | |
66b4993e DS |
3666 | btrfs_dev_stat_inc_and_print(dev, |
3667 | BTRFS_DEV_STAT_FLUSH_ERRS); | |
5af3e8cc | 3668 | errors_wait++; |
401b41e5 AJ |
3669 | } |
3670 | } | |
3671 | ||
cea7c8bf | 3672 | if (errors_wait) { |
401b41e5 AJ |
3673 | /* |
3674 | * At some point we need the status of all disks | |
3675 | * to arrive at the volume status. So error checking | |
3676 | * is being pushed to a separate loop. | |
3677 | */ | |
d10b82fe | 3678 | return check_barrier_error(info); |
387125fc | 3679 | } |
387125fc CM |
3680 | return 0; |
3681 | } | |
3682 | ||
943c6e99 ZL |
3683 | int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) |
3684 | { | |
8789f4fe ZL |
3685 | int raid_type; |
3686 | int min_tolerated = INT_MAX; | |
943c6e99 | 3687 | |
8789f4fe ZL |
3688 | if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || |
3689 | (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) | |
3690 | min_tolerated = min(min_tolerated, | |
3691 | btrfs_raid_array[BTRFS_RAID_SINGLE]. | |
3692 | tolerated_failures); | |
943c6e99 | 3693 | |
8789f4fe ZL |
3694 | for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { |
3695 | if (raid_type == BTRFS_RAID_SINGLE) | |
3696 | continue; | |
41a6e891 | 3697 | if (!(flags & btrfs_raid_array[raid_type].bg_flag)) |
8789f4fe ZL |
3698 | continue; |
3699 | min_tolerated = min(min_tolerated, | |
3700 | btrfs_raid_array[raid_type]. | |
3701 | tolerated_failures); | |
3702 | } | |
943c6e99 | 3703 | |
8789f4fe | 3704 | if (min_tolerated == INT_MAX) { |
ab8d0fc4 | 3705 | pr_warn("BTRFS: unknown raid flag: %llu", flags); |
8789f4fe ZL |
3706 | min_tolerated = 0; |
3707 | } | |
3708 | ||
3709 | return min_tolerated; | |
943c6e99 ZL |
3710 | } |
3711 | ||
eece6a9c | 3712 | int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) |
f2984462 | 3713 | { |
e5e9a520 | 3714 | struct list_head *head; |
f2984462 | 3715 | struct btrfs_device *dev; |
a061fc8d | 3716 | struct btrfs_super_block *sb; |
f2984462 | 3717 | struct btrfs_dev_item *dev_item; |
f2984462 CM |
3718 | int ret; |
3719 | int do_barriers; | |
a236aed1 CM |
3720 | int max_errors; |
3721 | int total_errors = 0; | |
a061fc8d | 3722 | u64 flags; |
f2984462 | 3723 | |
0b246afa | 3724 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); |
fed3b381 LB |
3725 | |
3726 | /* | |
3727 | * max_mirrors == 0 indicates we're from commit_transaction, | |
3728 | * not from fsync where the tree roots in fs_info have not | |
3729 | * been consistent on disk. | |
3730 | */ | |
3731 | if (max_mirrors == 0) | |
3732 | backup_super_roots(fs_info); | |
f2984462 | 3733 | |
0b246afa | 3734 | sb = fs_info->super_for_commit; |
a061fc8d | 3735 | dev_item = &sb->dev_item; |
e5e9a520 | 3736 | |
0b246afa JM |
3737 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
3738 | head = &fs_info->fs_devices->devices; | |
3739 | max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; | |
387125fc | 3740 | |
5af3e8cc | 3741 | if (do_barriers) { |
0b246afa | 3742 | ret = barrier_all_devices(fs_info); |
5af3e8cc SB |
3743 | if (ret) { |
3744 | mutex_unlock( | |
0b246afa JM |
3745 | &fs_info->fs_devices->device_list_mutex); |
3746 | btrfs_handle_fs_error(fs_info, ret, | |
3747 | "errors while submitting device barriers."); | |
5af3e8cc SB |
3748 | return ret; |
3749 | } | |
3750 | } | |
387125fc | 3751 | |
1538e6c5 | 3752 | list_for_each_entry(dev, head, dev_list) { |
dfe25020 CM |
3753 | if (!dev->bdev) { |
3754 | total_errors++; | |
3755 | continue; | |
3756 | } | |
e12c9621 | 3757 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3758 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
dfe25020 CM |
3759 | continue; |
3760 | ||
2b82032c | 3761 | btrfs_set_stack_device_generation(dev_item, 0); |
a061fc8d CM |
3762 | btrfs_set_stack_device_type(dev_item, dev->type); |
3763 | btrfs_set_stack_device_id(dev_item, dev->devid); | |
7df69d3e | 3764 | btrfs_set_stack_device_total_bytes(dev_item, |
935e5cc9 | 3765 | dev->commit_total_bytes); |
ce7213c7 MX |
3766 | btrfs_set_stack_device_bytes_used(dev_item, |
3767 | dev->commit_bytes_used); | |
a061fc8d CM |
3768 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); |
3769 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); | |
3770 | btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); | |
3771 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); | |
7239ff4b NB |
3772 | memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, |
3773 | BTRFS_FSID_SIZE); | |
a512bbf8 | 3774 | |
a061fc8d CM |
3775 | flags = btrfs_super_flags(sb); |
3776 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); | |
3777 | ||
75cb857d QW |
3778 | ret = btrfs_validate_write_super(fs_info, sb); |
3779 | if (ret < 0) { | |
3780 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
3781 | btrfs_handle_fs_error(fs_info, -EUCLEAN, | |
3782 | "unexpected superblock corruption detected"); | |
3783 | return -EUCLEAN; | |
3784 | } | |
3785 | ||
abbb3b8e | 3786 | ret = write_dev_supers(dev, sb, max_mirrors); |
a236aed1 CM |
3787 | if (ret) |
3788 | total_errors++; | |
f2984462 | 3789 | } |
a236aed1 | 3790 | if (total_errors > max_errors) { |
0b246afa JM |
3791 | btrfs_err(fs_info, "%d errors while writing supers", |
3792 | total_errors); | |
3793 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
79787eaa | 3794 | |
9d565ba4 | 3795 | /* FUA is masked off if unsupported and can't be the reason */ |
0b246afa JM |
3796 | btrfs_handle_fs_error(fs_info, -EIO, |
3797 | "%d errors while writing supers", | |
3798 | total_errors); | |
9d565ba4 | 3799 | return -EIO; |
a236aed1 | 3800 | } |
f2984462 | 3801 | |
a512bbf8 | 3802 | total_errors = 0; |
1538e6c5 | 3803 | list_for_each_entry(dev, head, dev_list) { |
dfe25020 CM |
3804 | if (!dev->bdev) |
3805 | continue; | |
e12c9621 | 3806 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
ebbede42 | 3807 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) |
dfe25020 CM |
3808 | continue; |
3809 | ||
abbb3b8e | 3810 | ret = wait_dev_supers(dev, max_mirrors); |
a512bbf8 YZ |
3811 | if (ret) |
3812 | total_errors++; | |
f2984462 | 3813 | } |
0b246afa | 3814 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
a236aed1 | 3815 | if (total_errors > max_errors) { |
0b246afa JM |
3816 | btrfs_handle_fs_error(fs_info, -EIO, |
3817 | "%d errors while writing supers", | |
3818 | total_errors); | |
79787eaa | 3819 | return -EIO; |
a236aed1 | 3820 | } |
f2984462 CM |
3821 | return 0; |
3822 | } | |
3823 | ||
cb517eab MX |
3824 | /* Drop a fs root from the radix tree and free it. */ |
3825 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | |
3826 | struct btrfs_root *root) | |
2619ba1f | 3827 | { |
4df27c4d | 3828 | spin_lock(&fs_info->fs_roots_radix_lock); |
2619ba1f CM |
3829 | radix_tree_delete(&fs_info->fs_roots_radix, |
3830 | (unsigned long)root->root_key.objectid); | |
4df27c4d | 3831 | spin_unlock(&fs_info->fs_roots_radix_lock); |
76dda93c YZ |
3832 | |
3833 | if (btrfs_root_refs(&root->root_item) == 0) | |
3834 | synchronize_srcu(&fs_info->subvol_srcu); | |
3835 | ||
1c1ea4f7 | 3836 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
3321719e | 3837 | btrfs_free_log(NULL, root); |
1c1ea4f7 LB |
3838 | if (root->reloc_root) { |
3839 | free_extent_buffer(root->reloc_root->node); | |
3840 | free_extent_buffer(root->reloc_root->commit_root); | |
3841 | btrfs_put_fs_root(root->reloc_root); | |
3842 | root->reloc_root = NULL; | |
3843 | } | |
3844 | } | |
3321719e | 3845 | |
faa2dbf0 JB |
3846 | if (root->free_ino_pinned) |
3847 | __btrfs_remove_free_space_cache(root->free_ino_pinned); | |
3848 | if (root->free_ino_ctl) | |
3849 | __btrfs_remove_free_space_cache(root->free_ino_ctl); | |
84db5ccf | 3850 | btrfs_free_fs_root(root); |
4df27c4d YZ |
3851 | } |
3852 | ||
84db5ccf | 3853 | void btrfs_free_fs_root(struct btrfs_root *root) |
4df27c4d | 3854 | { |
57cdc8db | 3855 | iput(root->ino_cache_inode); |
4df27c4d | 3856 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
0ee5dc67 AV |
3857 | if (root->anon_dev) |
3858 | free_anon_bdev(root->anon_dev); | |
8257b2dc MX |
3859 | if (root->subv_writers) |
3860 | btrfs_free_subvolume_writers(root->subv_writers); | |
4df27c4d YZ |
3861 | free_extent_buffer(root->node); |
3862 | free_extent_buffer(root->commit_root); | |
581bb050 LZ |
3863 | kfree(root->free_ino_ctl); |
3864 | kfree(root->free_ino_pinned); | |
b0feb9d9 | 3865 | btrfs_put_fs_root(root); |
2619ba1f CM |
3866 | } |
3867 | ||
c146afad | 3868 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) |
cfaa7295 | 3869 | { |
c146afad YZ |
3870 | u64 root_objectid = 0; |
3871 | struct btrfs_root *gang[8]; | |
65d33fd7 QW |
3872 | int i = 0; |
3873 | int err = 0; | |
3874 | unsigned int ret = 0; | |
3875 | int index; | |
e089f05c | 3876 | |
c146afad | 3877 | while (1) { |
65d33fd7 | 3878 | index = srcu_read_lock(&fs_info->subvol_srcu); |
c146afad YZ |
3879 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
3880 | (void **)gang, root_objectid, | |
3881 | ARRAY_SIZE(gang)); | |
65d33fd7 QW |
3882 | if (!ret) { |
3883 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
c146afad | 3884 | break; |
65d33fd7 | 3885 | } |
5d4f98a2 | 3886 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
65d33fd7 | 3887 | |
c146afad | 3888 | for (i = 0; i < ret; i++) { |
65d33fd7 QW |
3889 | /* Avoid to grab roots in dead_roots */ |
3890 | if (btrfs_root_refs(&gang[i]->root_item) == 0) { | |
3891 | gang[i] = NULL; | |
3892 | continue; | |
3893 | } | |
3894 | /* grab all the search result for later use */ | |
3895 | gang[i] = btrfs_grab_fs_root(gang[i]); | |
3896 | } | |
3897 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
66b4ffd1 | 3898 | |
65d33fd7 QW |
3899 | for (i = 0; i < ret; i++) { |
3900 | if (!gang[i]) | |
3901 | continue; | |
c146afad | 3902 | root_objectid = gang[i]->root_key.objectid; |
66b4ffd1 JB |
3903 | err = btrfs_orphan_cleanup(gang[i]); |
3904 | if (err) | |
65d33fd7 QW |
3905 | break; |
3906 | btrfs_put_fs_root(gang[i]); | |
c146afad YZ |
3907 | } |
3908 | root_objectid++; | |
3909 | } | |
65d33fd7 QW |
3910 | |
3911 | /* release the uncleaned roots due to error */ | |
3912 | for (; i < ret; i++) { | |
3913 | if (gang[i]) | |
3914 | btrfs_put_fs_root(gang[i]); | |
3915 | } | |
3916 | return err; | |
c146afad | 3917 | } |
a2135011 | 3918 | |
6bccf3ab | 3919 | int btrfs_commit_super(struct btrfs_fs_info *fs_info) |
c146afad | 3920 | { |
6bccf3ab | 3921 | struct btrfs_root *root = fs_info->tree_root; |
c146afad | 3922 | struct btrfs_trans_handle *trans; |
a74a4b97 | 3923 | |
0b246afa | 3924 | mutex_lock(&fs_info->cleaner_mutex); |
2ff7e61e | 3925 | btrfs_run_delayed_iputs(fs_info); |
0b246afa JM |
3926 | mutex_unlock(&fs_info->cleaner_mutex); |
3927 | wake_up_process(fs_info->cleaner_kthread); | |
c71bf099 YZ |
3928 | |
3929 | /* wait until ongoing cleanup work done */ | |
0b246afa JM |
3930 | down_write(&fs_info->cleanup_work_sem); |
3931 | up_write(&fs_info->cleanup_work_sem); | |
c71bf099 | 3932 | |
7a7eaa40 | 3933 | trans = btrfs_join_transaction(root); |
3612b495 TI |
3934 | if (IS_ERR(trans)) |
3935 | return PTR_ERR(trans); | |
3a45bb20 | 3936 | return btrfs_commit_transaction(trans); |
c146afad YZ |
3937 | } |
3938 | ||
6bccf3ab | 3939 | void close_ctree(struct btrfs_fs_info *fs_info) |
c146afad | 3940 | { |
c146afad YZ |
3941 | int ret; |
3942 | ||
afcdd129 | 3943 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); |
d6fd0ae2 OS |
3944 | /* |
3945 | * We don't want the cleaner to start new transactions, add more delayed | |
3946 | * iputs, etc. while we're closing. We can't use kthread_stop() yet | |
3947 | * because that frees the task_struct, and the transaction kthread might | |
3948 | * still try to wake up the cleaner. | |
3949 | */ | |
3950 | kthread_park(fs_info->cleaner_kthread); | |
c146afad | 3951 | |
7343dd61 | 3952 | /* wait for the qgroup rescan worker to stop */ |
d06f23d6 | 3953 | btrfs_qgroup_wait_for_completion(fs_info, false); |
7343dd61 | 3954 | |
803b2f54 SB |
3955 | /* wait for the uuid_scan task to finish */ |
3956 | down(&fs_info->uuid_tree_rescan_sem); | |
3957 | /* avoid complains from lockdep et al., set sem back to initial state */ | |
3958 | up(&fs_info->uuid_tree_rescan_sem); | |
3959 | ||
837d5b6e | 3960 | /* pause restriper - we want to resume on mount */ |
aa1b8cd4 | 3961 | btrfs_pause_balance(fs_info); |
837d5b6e | 3962 | |
8dabb742 SB |
3963 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
3964 | ||
aa1b8cd4 | 3965 | btrfs_scrub_cancel(fs_info); |
4cb5300b CM |
3966 | |
3967 | /* wait for any defraggers to finish */ | |
3968 | wait_event(fs_info->transaction_wait, | |
3969 | (atomic_read(&fs_info->defrag_running) == 0)); | |
3970 | ||
3971 | /* clear out the rbtree of defraggable inodes */ | |
26176e7c | 3972 | btrfs_cleanup_defrag_inodes(fs_info); |
4cb5300b | 3973 | |
21c7e756 MX |
3974 | cancel_work_sync(&fs_info->async_reclaim_work); |
3975 | ||
bc98a42c | 3976 | if (!sb_rdonly(fs_info->sb)) { |
e44163e1 | 3977 | /* |
d6fd0ae2 OS |
3978 | * The cleaner kthread is stopped, so do one final pass over |
3979 | * unused block groups. | |
e44163e1 | 3980 | */ |
0b246afa | 3981 | btrfs_delete_unused_bgs(fs_info); |
e44163e1 | 3982 | |
6bccf3ab | 3983 | ret = btrfs_commit_super(fs_info); |
acce952b | 3984 | if (ret) |
04892340 | 3985 | btrfs_err(fs_info, "commit super ret %d", ret); |
acce952b | 3986 | } |
3987 | ||
af722733 LB |
3988 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) || |
3989 | test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) | |
2ff7e61e | 3990 | btrfs_error_commit_super(fs_info); |
0f7d52f4 | 3991 | |
e3029d9f AV |
3992 | kthread_stop(fs_info->transaction_kthread); |
3993 | kthread_stop(fs_info->cleaner_kthread); | |
8929ecfa | 3994 | |
e187831e | 3995 | ASSERT(list_empty(&fs_info->delayed_iputs)); |
afcdd129 | 3996 | set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); |
f25784b3 | 3997 | |
04892340 | 3998 | btrfs_free_qgroup_config(fs_info); |
fe816d0f | 3999 | ASSERT(list_empty(&fs_info->delalloc_roots)); |
bcef60f2 | 4000 | |
963d678b | 4001 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { |
04892340 | 4002 | btrfs_info(fs_info, "at unmount delalloc count %lld", |
963d678b | 4003 | percpu_counter_sum(&fs_info->delalloc_bytes)); |
b0c68f8b | 4004 | } |
bcc63abb | 4005 | |
6618a59b | 4006 | btrfs_sysfs_remove_mounted(fs_info); |
b7c35e81 | 4007 | btrfs_sysfs_remove_fsid(fs_info->fs_devices); |
5ac1d209 | 4008 | |
faa2dbf0 | 4009 | btrfs_free_fs_roots(fs_info); |
d10c5f31 | 4010 | |
1a4319cc LB |
4011 | btrfs_put_block_group_cache(fs_info); |
4012 | ||
de348ee0 WS |
4013 | /* |
4014 | * we must make sure there is not any read request to | |
4015 | * submit after we stopping all workers. | |
4016 | */ | |
4017 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | |
96192499 JB |
4018 | btrfs_stop_all_workers(fs_info); |
4019 | ||
5cdd7db6 FM |
4020 | btrfs_free_block_groups(fs_info); |
4021 | ||
afcdd129 | 4022 | clear_bit(BTRFS_FS_OPEN, &fs_info->flags); |
13e6c37b | 4023 | free_root_pointers(fs_info, 1); |
9ad6b7bc | 4024 | |
13e6c37b | 4025 | iput(fs_info->btree_inode); |
d6bfde87 | 4026 | |
21adbd5c | 4027 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
0b246afa | 4028 | if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) |
2ff7e61e | 4029 | btrfsic_unmount(fs_info->fs_devices); |
21adbd5c SB |
4030 | #endif |
4031 | ||
dfe25020 | 4032 | btrfs_close_devices(fs_info->fs_devices); |
0b86a832 | 4033 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
b248a415 | 4034 | |
e2d84521 | 4035 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
963d678b | 4036 | percpu_counter_destroy(&fs_info->delalloc_bytes); |
7f8d236a | 4037 | percpu_counter_destroy(&fs_info->dev_replace.bio_counter); |
76dda93c | 4038 | cleanup_srcu_struct(&fs_info->subvol_srcu); |
0b86a832 | 4039 | |
53b381b3 | 4040 | btrfs_free_stripe_hash_table(fs_info); |
fd708b81 | 4041 | btrfs_free_ref_cache(fs_info); |
53b381b3 | 4042 | |
04216820 FM |
4043 | while (!list_empty(&fs_info->pinned_chunks)) { |
4044 | struct extent_map *em; | |
4045 | ||
4046 | em = list_first_entry(&fs_info->pinned_chunks, | |
4047 | struct extent_map, list); | |
4048 | list_del_init(&em->list); | |
4049 | free_extent_map(em); | |
4050 | } | |
eb60ceac CM |
4051 | } |
4052 | ||
b9fab919 CM |
4053 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, |
4054 | int atomic) | |
5f39d397 | 4055 | { |
1259ab75 | 4056 | int ret; |
727011e0 | 4057 | struct inode *btree_inode = buf->pages[0]->mapping->host; |
1259ab75 | 4058 | |
0b32f4bb | 4059 | ret = extent_buffer_uptodate(buf); |
1259ab75 CM |
4060 | if (!ret) |
4061 | return ret; | |
4062 | ||
4063 | ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, | |
b9fab919 CM |
4064 | parent_transid, atomic); |
4065 | if (ret == -EAGAIN) | |
4066 | return ret; | |
1259ab75 | 4067 | return !ret; |
5f39d397 CM |
4068 | } |
4069 | ||
5f39d397 CM |
4070 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) |
4071 | { | |
0b246afa | 4072 | struct btrfs_fs_info *fs_info; |
06ea65a3 | 4073 | struct btrfs_root *root; |
5f39d397 | 4074 | u64 transid = btrfs_header_generation(buf); |
b9473439 | 4075 | int was_dirty; |
b4ce94de | 4076 | |
06ea65a3 JB |
4077 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
4078 | /* | |
4079 | * This is a fast path so only do this check if we have sanity tests | |
52042d8e | 4080 | * enabled. Normal people shouldn't be using unmapped buffers as dirty |
06ea65a3 JB |
4081 | * outside of the sanity tests. |
4082 | */ | |
b0132a3b | 4083 | if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) |
06ea65a3 JB |
4084 | return; |
4085 | #endif | |
4086 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; | |
0b246afa | 4087 | fs_info = root->fs_info; |
b9447ef8 | 4088 | btrfs_assert_tree_locked(buf); |
0b246afa | 4089 | if (transid != fs_info->generation) |
5d163e0e | 4090 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", |
0b246afa | 4091 | buf->start, transid, fs_info->generation); |
0b32f4bb | 4092 | was_dirty = set_extent_buffer_dirty(buf); |
e2d84521 | 4093 | if (!was_dirty) |
104b4e51 NB |
4094 | percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, |
4095 | buf->len, | |
4096 | fs_info->dirty_metadata_batch); | |
1f21ef0a | 4097 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
69fc6cbb QW |
4098 | /* |
4099 | * Since btrfs_mark_buffer_dirty() can be called with item pointer set | |
4100 | * but item data not updated. | |
4101 | * So here we should only check item pointers, not item data. | |
4102 | */ | |
4103 | if (btrfs_header_level(buf) == 0 && | |
2f659546 | 4104 | btrfs_check_leaf_relaxed(fs_info, buf)) { |
a4f78750 | 4105 | btrfs_print_leaf(buf); |
1f21ef0a FM |
4106 | ASSERT(0); |
4107 | } | |
4108 | #endif | |
eb60ceac CM |
4109 | } |
4110 | ||
2ff7e61e | 4111 | static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, |
b53d3f5d | 4112 | int flush_delayed) |
16cdcec7 MX |
4113 | { |
4114 | /* | |
4115 | * looks as though older kernels can get into trouble with | |
4116 | * this code, they end up stuck in balance_dirty_pages forever | |
4117 | */ | |
e2d84521 | 4118 | int ret; |
16cdcec7 MX |
4119 | |
4120 | if (current->flags & PF_MEMALLOC) | |
4121 | return; | |
4122 | ||
b53d3f5d | 4123 | if (flush_delayed) |
2ff7e61e | 4124 | btrfs_balance_delayed_items(fs_info); |
16cdcec7 | 4125 | |
d814a491 EL |
4126 | ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
4127 | BTRFS_DIRTY_METADATA_THRESH, | |
4128 | fs_info->dirty_metadata_batch); | |
e2d84521 | 4129 | if (ret > 0) { |
0b246afa | 4130 | balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); |
16cdcec7 | 4131 | } |
16cdcec7 MX |
4132 | } |
4133 | ||
2ff7e61e | 4134 | void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) |
35b7e476 | 4135 | { |
2ff7e61e | 4136 | __btrfs_btree_balance_dirty(fs_info, 1); |
b53d3f5d | 4137 | } |
585ad2c3 | 4138 | |
2ff7e61e | 4139 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) |
b53d3f5d | 4140 | { |
2ff7e61e | 4141 | __btrfs_btree_balance_dirty(fs_info, 0); |
35b7e476 | 4142 | } |
6b80053d | 4143 | |
581c1760 QW |
4144 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level, |
4145 | struct btrfs_key *first_key) | |
6b80053d | 4146 | { |
727011e0 | 4147 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
2ff7e61e JM |
4148 | struct btrfs_fs_info *fs_info = root->fs_info; |
4149 | ||
581c1760 QW |
4150 | return btree_read_extent_buffer_pages(fs_info, buf, parent_transid, |
4151 | level, first_key); | |
6b80053d | 4152 | } |
0da5468f | 4153 | |
2ff7e61e | 4154 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) |
acce952b | 4155 | { |
fe816d0f NB |
4156 | /* cleanup FS via transaction */ |
4157 | btrfs_cleanup_transaction(fs_info); | |
4158 | ||
0b246afa | 4159 | mutex_lock(&fs_info->cleaner_mutex); |
2ff7e61e | 4160 | btrfs_run_delayed_iputs(fs_info); |
0b246afa | 4161 | mutex_unlock(&fs_info->cleaner_mutex); |
acce952b | 4162 | |
0b246afa JM |
4163 | down_write(&fs_info->cleanup_work_sem); |
4164 | up_write(&fs_info->cleanup_work_sem); | |
acce952b | 4165 | } |
4166 | ||
143bede5 | 4167 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
acce952b | 4168 | { |
acce952b | 4169 | struct btrfs_ordered_extent *ordered; |
acce952b | 4170 | |
199c2a9c | 4171 | spin_lock(&root->ordered_extent_lock); |
779880ef JB |
4172 | /* |
4173 | * This will just short circuit the ordered completion stuff which will | |
4174 | * make sure the ordered extent gets properly cleaned up. | |
4175 | */ | |
199c2a9c | 4176 | list_for_each_entry(ordered, &root->ordered_extents, |
779880ef JB |
4177 | root_extent_list) |
4178 | set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); | |
199c2a9c MX |
4179 | spin_unlock(&root->ordered_extent_lock); |
4180 | } | |
4181 | ||
4182 | static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |
4183 | { | |
4184 | struct btrfs_root *root; | |
4185 | struct list_head splice; | |
4186 | ||
4187 | INIT_LIST_HEAD(&splice); | |
4188 | ||
4189 | spin_lock(&fs_info->ordered_root_lock); | |
4190 | list_splice_init(&fs_info->ordered_roots, &splice); | |
4191 | while (!list_empty(&splice)) { | |
4192 | root = list_first_entry(&splice, struct btrfs_root, | |
4193 | ordered_root); | |
1de2cfde JB |
4194 | list_move_tail(&root->ordered_root, |
4195 | &fs_info->ordered_roots); | |
199c2a9c | 4196 | |
2a85d9ca | 4197 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
4198 | btrfs_destroy_ordered_extents(root); |
4199 | ||
2a85d9ca LB |
4200 | cond_resched(); |
4201 | spin_lock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
4202 | } |
4203 | spin_unlock(&fs_info->ordered_root_lock); | |
acce952b | 4204 | } |
4205 | ||
35a3621b | 4206 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2ff7e61e | 4207 | struct btrfs_fs_info *fs_info) |
acce952b | 4208 | { |
4209 | struct rb_node *node; | |
4210 | struct btrfs_delayed_ref_root *delayed_refs; | |
4211 | struct btrfs_delayed_ref_node *ref; | |
4212 | int ret = 0; | |
4213 | ||
4214 | delayed_refs = &trans->delayed_refs; | |
4215 | ||
4216 | spin_lock(&delayed_refs->lock); | |
d7df2c79 | 4217 | if (atomic_read(&delayed_refs->num_entries) == 0) { |
cfece4db | 4218 | spin_unlock(&delayed_refs->lock); |
0b246afa | 4219 | btrfs_info(fs_info, "delayed_refs has NO entry"); |
acce952b | 4220 | return ret; |
4221 | } | |
4222 | ||
5c9d028b | 4223 | while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { |
d7df2c79 | 4224 | struct btrfs_delayed_ref_head *head; |
0e0adbcf | 4225 | struct rb_node *n; |
e78417d1 | 4226 | bool pin_bytes = false; |
acce952b | 4227 | |
d7df2c79 JB |
4228 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
4229 | href_node); | |
4230 | if (!mutex_trylock(&head->mutex)) { | |
d278850e | 4231 | refcount_inc(&head->refs); |
d7df2c79 | 4232 | spin_unlock(&delayed_refs->lock); |
eb12db69 | 4233 | |
d7df2c79 | 4234 | mutex_lock(&head->mutex); |
e78417d1 | 4235 | mutex_unlock(&head->mutex); |
d278850e | 4236 | btrfs_put_delayed_ref_head(head); |
d7df2c79 JB |
4237 | spin_lock(&delayed_refs->lock); |
4238 | continue; | |
4239 | } | |
4240 | spin_lock(&head->lock); | |
e3d03965 | 4241 | while ((n = rb_first_cached(&head->ref_tree)) != NULL) { |
0e0adbcf JB |
4242 | ref = rb_entry(n, struct btrfs_delayed_ref_node, |
4243 | ref_node); | |
d7df2c79 | 4244 | ref->in_tree = 0; |
e3d03965 | 4245 | rb_erase_cached(&ref->ref_node, &head->ref_tree); |
0e0adbcf | 4246 | RB_CLEAR_NODE(&ref->ref_node); |
1d57ee94 WX |
4247 | if (!list_empty(&ref->add_list)) |
4248 | list_del(&ref->add_list); | |
d7df2c79 JB |
4249 | atomic_dec(&delayed_refs->num_entries); |
4250 | btrfs_put_delayed_ref(ref); | |
e78417d1 | 4251 | } |
d7df2c79 JB |
4252 | if (head->must_insert_reserved) |
4253 | pin_bytes = true; | |
4254 | btrfs_free_delayed_extent_op(head->extent_op); | |
4255 | delayed_refs->num_heads--; | |
4256 | if (head->processing == 0) | |
4257 | delayed_refs->num_heads_ready--; | |
4258 | atomic_dec(&delayed_refs->num_entries); | |
5c9d028b | 4259 | rb_erase_cached(&head->href_node, &delayed_refs->href_root); |
d278850e | 4260 | RB_CLEAR_NODE(&head->href_node); |
d7df2c79 JB |
4261 | spin_unlock(&head->lock); |
4262 | spin_unlock(&delayed_refs->lock); | |
4263 | mutex_unlock(&head->mutex); | |
acce952b | 4264 | |
d7df2c79 | 4265 | if (pin_bytes) |
d278850e JB |
4266 | btrfs_pin_extent(fs_info, head->bytenr, |
4267 | head->num_bytes, 1); | |
31890da0 | 4268 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
d278850e | 4269 | btrfs_put_delayed_ref_head(head); |
acce952b | 4270 | cond_resched(); |
4271 | spin_lock(&delayed_refs->lock); | |
4272 | } | |
4273 | ||
4274 | spin_unlock(&delayed_refs->lock); | |
4275 | ||
4276 | return ret; | |
4277 | } | |
4278 | ||
143bede5 | 4279 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) |
acce952b | 4280 | { |
4281 | struct btrfs_inode *btrfs_inode; | |
4282 | struct list_head splice; | |
4283 | ||
4284 | INIT_LIST_HEAD(&splice); | |
4285 | ||
eb73c1b7 MX |
4286 | spin_lock(&root->delalloc_lock); |
4287 | list_splice_init(&root->delalloc_inodes, &splice); | |
acce952b | 4288 | |
4289 | while (!list_empty(&splice)) { | |
fe816d0f | 4290 | struct inode *inode = NULL; |
eb73c1b7 MX |
4291 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, |
4292 | delalloc_inodes); | |
fe816d0f | 4293 | __btrfs_del_delalloc_inode(root, btrfs_inode); |
eb73c1b7 | 4294 | spin_unlock(&root->delalloc_lock); |
acce952b | 4295 | |
fe816d0f NB |
4296 | /* |
4297 | * Make sure we get a live inode and that it'll not disappear | |
4298 | * meanwhile. | |
4299 | */ | |
4300 | inode = igrab(&btrfs_inode->vfs_inode); | |
4301 | if (inode) { | |
4302 | invalidate_inode_pages2(inode->i_mapping); | |
4303 | iput(inode); | |
4304 | } | |
eb73c1b7 | 4305 | spin_lock(&root->delalloc_lock); |
acce952b | 4306 | } |
eb73c1b7 MX |
4307 | spin_unlock(&root->delalloc_lock); |
4308 | } | |
4309 | ||
4310 | static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) | |
4311 | { | |
4312 | struct btrfs_root *root; | |
4313 | struct list_head splice; | |
4314 | ||
4315 | INIT_LIST_HEAD(&splice); | |
4316 | ||
4317 | spin_lock(&fs_info->delalloc_root_lock); | |
4318 | list_splice_init(&fs_info->delalloc_roots, &splice); | |
4319 | while (!list_empty(&splice)) { | |
4320 | root = list_first_entry(&splice, struct btrfs_root, | |
4321 | delalloc_root); | |
eb73c1b7 MX |
4322 | root = btrfs_grab_fs_root(root); |
4323 | BUG_ON(!root); | |
4324 | spin_unlock(&fs_info->delalloc_root_lock); | |
4325 | ||
4326 | btrfs_destroy_delalloc_inodes(root); | |
4327 | btrfs_put_fs_root(root); | |
4328 | ||
4329 | spin_lock(&fs_info->delalloc_root_lock); | |
4330 | } | |
4331 | spin_unlock(&fs_info->delalloc_root_lock); | |
acce952b | 4332 | } |
4333 | ||
2ff7e61e | 4334 | static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, |
acce952b | 4335 | struct extent_io_tree *dirty_pages, |
4336 | int mark) | |
4337 | { | |
4338 | int ret; | |
acce952b | 4339 | struct extent_buffer *eb; |
4340 | u64 start = 0; | |
4341 | u64 end; | |
acce952b | 4342 | |
4343 | while (1) { | |
4344 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | |
e6138876 | 4345 | mark, NULL); |
acce952b | 4346 | if (ret) |
4347 | break; | |
4348 | ||
91166212 | 4349 | clear_extent_bits(dirty_pages, start, end, mark); |
acce952b | 4350 | while (start <= end) { |
0b246afa JM |
4351 | eb = find_extent_buffer(fs_info, start); |
4352 | start += fs_info->nodesize; | |
fd8b2b61 | 4353 | if (!eb) |
acce952b | 4354 | continue; |
fd8b2b61 | 4355 | wait_on_extent_buffer_writeback(eb); |
acce952b | 4356 | |
fd8b2b61 JB |
4357 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
4358 | &eb->bflags)) | |
4359 | clear_extent_buffer_dirty(eb); | |
4360 | free_extent_buffer_stale(eb); | |
acce952b | 4361 | } |
4362 | } | |
4363 | ||
4364 | return ret; | |
4365 | } | |
4366 | ||
2ff7e61e | 4367 | static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
acce952b | 4368 | struct extent_io_tree *pinned_extents) |
4369 | { | |
4370 | struct extent_io_tree *unpin; | |
4371 | u64 start; | |
4372 | u64 end; | |
4373 | int ret; | |
ed0eaa14 | 4374 | bool loop = true; |
acce952b | 4375 | |
4376 | unpin = pinned_extents; | |
ed0eaa14 | 4377 | again: |
acce952b | 4378 | while (1) { |
0e6ec385 FM |
4379 | struct extent_state *cached_state = NULL; |
4380 | ||
fcd5e742 LF |
4381 | /* |
4382 | * The btrfs_finish_extent_commit() may get the same range as | |
4383 | * ours between find_first_extent_bit and clear_extent_dirty. | |
4384 | * Hence, hold the unused_bg_unpin_mutex to avoid double unpin | |
4385 | * the same extent range. | |
4386 | */ | |
4387 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | |
acce952b | 4388 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
0e6ec385 | 4389 | EXTENT_DIRTY, &cached_state); |
fcd5e742 LF |
4390 | if (ret) { |
4391 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | |
acce952b | 4392 | break; |
fcd5e742 | 4393 | } |
acce952b | 4394 | |
0e6ec385 FM |
4395 | clear_extent_dirty(unpin, start, end, &cached_state); |
4396 | free_extent_state(cached_state); | |
2ff7e61e | 4397 | btrfs_error_unpin_extent_range(fs_info, start, end); |
fcd5e742 | 4398 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
acce952b | 4399 | cond_resched(); |
4400 | } | |
4401 | ||
ed0eaa14 | 4402 | if (loop) { |
0b246afa JM |
4403 | if (unpin == &fs_info->freed_extents[0]) |
4404 | unpin = &fs_info->freed_extents[1]; | |
ed0eaa14 | 4405 | else |
0b246afa | 4406 | unpin = &fs_info->freed_extents[0]; |
ed0eaa14 LB |
4407 | loop = false; |
4408 | goto again; | |
4409 | } | |
4410 | ||
acce952b | 4411 | return 0; |
4412 | } | |
4413 | ||
c79a1751 LB |
4414 | static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) |
4415 | { | |
4416 | struct inode *inode; | |
4417 | ||
4418 | inode = cache->io_ctl.inode; | |
4419 | if (inode) { | |
4420 | invalidate_inode_pages2(inode->i_mapping); | |
4421 | BTRFS_I(inode)->generation = 0; | |
4422 | cache->io_ctl.inode = NULL; | |
4423 | iput(inode); | |
4424 | } | |
4425 | btrfs_put_block_group(cache); | |
4426 | } | |
4427 | ||
4428 | void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, | |
2ff7e61e | 4429 | struct btrfs_fs_info *fs_info) |
c79a1751 LB |
4430 | { |
4431 | struct btrfs_block_group_cache *cache; | |
4432 | ||
4433 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4434 | while (!list_empty(&cur_trans->dirty_bgs)) { | |
4435 | cache = list_first_entry(&cur_trans->dirty_bgs, | |
4436 | struct btrfs_block_group_cache, | |
4437 | dirty_list); | |
c79a1751 LB |
4438 | |
4439 | if (!list_empty(&cache->io_list)) { | |
4440 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4441 | list_del_init(&cache->io_list); | |
4442 | btrfs_cleanup_bg_io(cache); | |
4443 | spin_lock(&cur_trans->dirty_bgs_lock); | |
4444 | } | |
4445 | ||
4446 | list_del_init(&cache->dirty_list); | |
4447 | spin_lock(&cache->lock); | |
4448 | cache->disk_cache_state = BTRFS_DC_ERROR; | |
4449 | spin_unlock(&cache->lock); | |
4450 | ||
4451 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4452 | btrfs_put_block_group(cache); | |
ba2c4d4e | 4453 | btrfs_delayed_refs_rsv_release(fs_info, 1); |
c79a1751 LB |
4454 | spin_lock(&cur_trans->dirty_bgs_lock); |
4455 | } | |
4456 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
4457 | ||
45ae2c18 NB |
4458 | /* |
4459 | * Refer to the definition of io_bgs member for details why it's safe | |
4460 | * to use it without any locking | |
4461 | */ | |
c79a1751 LB |
4462 | while (!list_empty(&cur_trans->io_bgs)) { |
4463 | cache = list_first_entry(&cur_trans->io_bgs, | |
4464 | struct btrfs_block_group_cache, | |
4465 | io_list); | |
c79a1751 LB |
4466 | |
4467 | list_del_init(&cache->io_list); | |
4468 | spin_lock(&cache->lock); | |
4469 | cache->disk_cache_state = BTRFS_DC_ERROR; | |
4470 | spin_unlock(&cache->lock); | |
4471 | btrfs_cleanup_bg_io(cache); | |
4472 | } | |
4473 | } | |
4474 | ||
49b25e05 | 4475 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
2ff7e61e | 4476 | struct btrfs_fs_info *fs_info) |
49b25e05 | 4477 | { |
2ff7e61e | 4478 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
c79a1751 LB |
4479 | ASSERT(list_empty(&cur_trans->dirty_bgs)); |
4480 | ASSERT(list_empty(&cur_trans->io_bgs)); | |
4481 | ||
2ff7e61e | 4482 | btrfs_destroy_delayed_refs(cur_trans, fs_info); |
49b25e05 | 4483 | |
4a9d8bde | 4484 | cur_trans->state = TRANS_STATE_COMMIT_START; |
0b246afa | 4485 | wake_up(&fs_info->transaction_blocked_wait); |
49b25e05 | 4486 | |
4a9d8bde | 4487 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
0b246afa | 4488 | wake_up(&fs_info->transaction_wait); |
49b25e05 | 4489 | |
ccdf9b30 JM |
4490 | btrfs_destroy_delayed_inodes(fs_info); |
4491 | btrfs_assert_delayed_root_empty(fs_info); | |
49b25e05 | 4492 | |
2ff7e61e | 4493 | btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, |
49b25e05 | 4494 | EXTENT_DIRTY); |
2ff7e61e | 4495 | btrfs_destroy_pinned_extent(fs_info, |
0b246afa | 4496 | fs_info->pinned_extents); |
49b25e05 | 4497 | |
4a9d8bde MX |
4498 | cur_trans->state =TRANS_STATE_COMPLETED; |
4499 | wake_up(&cur_trans->commit_wait); | |
49b25e05 JM |
4500 | } |
4501 | ||
2ff7e61e | 4502 | static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) |
acce952b | 4503 | { |
4504 | struct btrfs_transaction *t; | |
acce952b | 4505 | |
0b246afa | 4506 | mutex_lock(&fs_info->transaction_kthread_mutex); |
acce952b | 4507 | |
0b246afa JM |
4508 | spin_lock(&fs_info->trans_lock); |
4509 | while (!list_empty(&fs_info->trans_list)) { | |
4510 | t = list_first_entry(&fs_info->trans_list, | |
724e2315 JB |
4511 | struct btrfs_transaction, list); |
4512 | if (t->state >= TRANS_STATE_COMMIT_START) { | |
9b64f57d | 4513 | refcount_inc(&t->use_count); |
0b246afa | 4514 | spin_unlock(&fs_info->trans_lock); |
2ff7e61e | 4515 | btrfs_wait_for_commit(fs_info, t->transid); |
724e2315 | 4516 | btrfs_put_transaction(t); |
0b246afa | 4517 | spin_lock(&fs_info->trans_lock); |
724e2315 JB |
4518 | continue; |
4519 | } | |
0b246afa | 4520 | if (t == fs_info->running_transaction) { |
724e2315 | 4521 | t->state = TRANS_STATE_COMMIT_DOING; |
0b246afa | 4522 | spin_unlock(&fs_info->trans_lock); |
724e2315 JB |
4523 | /* |
4524 | * We wait for 0 num_writers since we don't hold a trans | |
4525 | * handle open currently for this transaction. | |
4526 | */ | |
4527 | wait_event(t->writer_wait, | |
4528 | atomic_read(&t->num_writers) == 0); | |
4529 | } else { | |
0b246afa | 4530 | spin_unlock(&fs_info->trans_lock); |
724e2315 | 4531 | } |
2ff7e61e | 4532 | btrfs_cleanup_one_transaction(t, fs_info); |
4a9d8bde | 4533 | |
0b246afa JM |
4534 | spin_lock(&fs_info->trans_lock); |
4535 | if (t == fs_info->running_transaction) | |
4536 | fs_info->running_transaction = NULL; | |
acce952b | 4537 | list_del_init(&t->list); |
0b246afa | 4538 | spin_unlock(&fs_info->trans_lock); |
acce952b | 4539 | |
724e2315 | 4540 | btrfs_put_transaction(t); |
2ff7e61e | 4541 | trace_btrfs_transaction_commit(fs_info->tree_root); |
0b246afa | 4542 | spin_lock(&fs_info->trans_lock); |
724e2315 | 4543 | } |
0b246afa JM |
4544 | spin_unlock(&fs_info->trans_lock); |
4545 | btrfs_destroy_all_ordered_extents(fs_info); | |
ccdf9b30 JM |
4546 | btrfs_destroy_delayed_inodes(fs_info); |
4547 | btrfs_assert_delayed_root_empty(fs_info); | |
2ff7e61e | 4548 | btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); |
0b246afa JM |
4549 | btrfs_destroy_all_delalloc_inodes(fs_info); |
4550 | mutex_unlock(&fs_info->transaction_kthread_mutex); | |
acce952b | 4551 | |
4552 | return 0; | |
4553 | } | |
4554 | ||
e8c9f186 | 4555 | static const struct extent_io_ops btree_extent_io_ops = { |
4d53dddb | 4556 | /* mandatory callbacks */ |
0b86a832 | 4557 | .submit_bio_hook = btree_submit_bio_hook, |
4d53dddb | 4558 | .readpage_end_io_hook = btree_readpage_end_io_hook, |
0da5468f | 4559 | }; |