]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e02119d5 CM |
2 | /* |
3 | * Copyright (C) 2008 Oracle. All rights reserved. | |
e02119d5 CM |
4 | */ |
5 | ||
6 | #include <linux/sched.h> | |
5a0e3ad6 | 7 | #include <linux/slab.h> |
c6adc9cc | 8 | #include <linux/blkdev.h> |
5dc562c5 | 9 | #include <linux/list_sort.h> |
c7f88c4e | 10 | #include <linux/iversion.h> |
602cbe91 | 11 | #include "misc.h" |
9678c543 | 12 | #include "ctree.h" |
995946dd | 13 | #include "tree-log.h" |
e02119d5 CM |
14 | #include "disk-io.h" |
15 | #include "locking.h" | |
16 | #include "print-tree.h" | |
f186373f | 17 | #include "backref.h" |
ebb8765b | 18 | #include "compression.h" |
df2c95f3 | 19 | #include "qgroup.h" |
6787bb9f NB |
20 | #include "block-group.h" |
21 | #include "space-info.h" | |
d3575156 | 22 | #include "zoned.h" |
e02119d5 CM |
23 | |
24 | /* magic values for the inode_only field in btrfs_log_inode: | |
25 | * | |
26 | * LOG_INODE_ALL means to log everything | |
27 | * LOG_INODE_EXISTS means to log just enough to recreate the inode | |
28 | * during log replay | |
29 | */ | |
e13976cf DS |
30 | enum { |
31 | LOG_INODE_ALL, | |
32 | LOG_INODE_EXISTS, | |
33 | LOG_OTHER_INODE, | |
34 | LOG_OTHER_INODE_ALL, | |
35 | }; | |
e02119d5 | 36 | |
12fcfd22 CM |
37 | /* |
38 | * directory trouble cases | |
39 | * | |
40 | * 1) on rename or unlink, if the inode being unlinked isn't in the fsync | |
41 | * log, we must force a full commit before doing an fsync of the directory | |
42 | * where the unlink was done. | |
43 | * ---> record transid of last unlink/rename per directory | |
44 | * | |
45 | * mkdir foo/some_dir | |
46 | * normal commit | |
47 | * rename foo/some_dir foo2/some_dir | |
48 | * mkdir foo/some_dir | |
49 | * fsync foo/some_dir/some_file | |
50 | * | |
51 | * The fsync above will unlink the original some_dir without recording | |
52 | * it in its new location (foo2). After a crash, some_dir will be gone | |
53 | * unless the fsync of some_file forces a full commit | |
54 | * | |
55 | * 2) we must log any new names for any file or dir that is in the fsync | |
56 | * log. ---> check inode while renaming/linking. | |
57 | * | |
58 | * 2a) we must log any new names for any file or dir during rename | |
59 | * when the directory they are being removed from was logged. | |
60 | * ---> check inode and old parent dir during rename | |
61 | * | |
62 | * 2a is actually the more important variant. With the extra logging | |
63 | * a crash might unlink the old name without recreating the new one | |
64 | * | |
65 | * 3) after a crash, we must go through any directories with a link count | |
66 | * of zero and redo the rm -rf | |
67 | * | |
68 | * mkdir f1/foo | |
69 | * normal commit | |
70 | * rm -rf f1/foo | |
71 | * fsync(f1) | |
72 | * | |
73 | * The directory f1 was fully removed from the FS, but fsync was never | |
74 | * called on f1, only its parent dir. After a crash the rm -rf must | |
75 | * be replayed. This must be able to recurse down the entire | |
76 | * directory tree. The inode link count fixup code takes care of the | |
77 | * ugly details. | |
78 | */ | |
79 | ||
e02119d5 CM |
80 | /* |
81 | * stages for the tree walking. The first | |
82 | * stage (0) is to only pin down the blocks we find | |
83 | * the second stage (1) is to make sure that all the inodes | |
84 | * we find in the log are created in the subvolume. | |
85 | * | |
86 | * The last stage is to deal with directories and links and extents | |
87 | * and all the other fun semantics | |
88 | */ | |
e13976cf DS |
89 | enum { |
90 | LOG_WALK_PIN_ONLY, | |
91 | LOG_WALK_REPLAY_INODES, | |
92 | LOG_WALK_REPLAY_DIR_INDEX, | |
93 | LOG_WALK_REPLAY_ALL, | |
94 | }; | |
e02119d5 | 95 | |
12fcfd22 | 96 | static int btrfs_log_inode(struct btrfs_trans_handle *trans, |
a59108a7 | 97 | struct btrfs_root *root, struct btrfs_inode *inode, |
49dae1bc | 98 | int inode_only, |
8407f553 | 99 | struct btrfs_log_ctx *ctx); |
ec051c0f YZ |
100 | static int link_to_fixup_dir(struct btrfs_trans_handle *trans, |
101 | struct btrfs_root *root, | |
102 | struct btrfs_path *path, u64 objectid); | |
12fcfd22 CM |
103 | static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, |
104 | struct btrfs_root *root, | |
105 | struct btrfs_root *log, | |
106 | struct btrfs_path *path, | |
107 | u64 dirid, int del_all); | |
fa1a0f42 | 108 | static void wait_log_commit(struct btrfs_root *root, int transid); |
e02119d5 CM |
109 | |
110 | /* | |
111 | * tree logging is a special write ahead log used to make sure that | |
112 | * fsyncs and O_SYNCs can happen without doing full tree commits. | |
113 | * | |
114 | * Full tree commits are expensive because they require commonly | |
115 | * modified blocks to be recowed, creating many dirty pages in the | |
116 | * extent tree an 4x-6x higher write load than ext3. | |
117 | * | |
118 | * Instead of doing a tree commit on every fsync, we use the | |
119 | * key ranges and transaction ids to find items for a given file or directory | |
120 | * that have changed in this transaction. Those items are copied into | |
121 | * a special tree (one per subvolume root), that tree is written to disk | |
122 | * and then the fsync is considered complete. | |
123 | * | |
124 | * After a crash, items are copied out of the log-tree back into the | |
125 | * subvolume tree. Any file data extents found are recorded in the extent | |
126 | * allocation tree, and the log-tree freed. | |
127 | * | |
128 | * The log tree is read three times, once to pin down all the extents it is | |
129 | * using in ram and once, once to create all the inodes logged in the tree | |
130 | * and once to do all the other items. | |
131 | */ | |
132 | ||
e02119d5 CM |
133 | /* |
134 | * start a sub transaction and setup the log tree | |
135 | * this increments the log tree writer count to make the people | |
136 | * syncing the tree wait for us to finish | |
137 | */ | |
138 | static int start_log_trans(struct btrfs_trans_handle *trans, | |
8b050d35 MX |
139 | struct btrfs_root *root, |
140 | struct btrfs_log_ctx *ctx) | |
e02119d5 | 141 | { |
0b246afa | 142 | struct btrfs_fs_info *fs_info = root->fs_info; |
47876f7c | 143 | struct btrfs_root *tree_root = fs_info->tree_root; |
fa1a0f42 | 144 | const bool zoned = btrfs_is_zoned(fs_info); |
34eb2a52 | 145 | int ret = 0; |
fa1a0f42 | 146 | bool created = false; |
7237f183 | 147 | |
47876f7c FM |
148 | /* |
149 | * First check if the log root tree was already created. If not, create | |
150 | * it before locking the root's log_mutex, just to keep lockdep happy. | |
151 | */ | |
152 | if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) { | |
153 | mutex_lock(&tree_root->log_mutex); | |
154 | if (!fs_info->log_root_tree) { | |
155 | ret = btrfs_init_log_root_tree(trans, fs_info); | |
fa1a0f42 | 156 | if (!ret) { |
47876f7c | 157 | set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state); |
fa1a0f42 NA |
158 | created = true; |
159 | } | |
47876f7c FM |
160 | } |
161 | mutex_unlock(&tree_root->log_mutex); | |
162 | if (ret) | |
163 | return ret; | |
164 | } | |
165 | ||
7237f183 | 166 | mutex_lock(&root->log_mutex); |
34eb2a52 | 167 | |
fa1a0f42 | 168 | again: |
7237f183 | 169 | if (root->log_root) { |
fa1a0f42 NA |
170 | int index = (root->log_transid + 1) % 2; |
171 | ||
4884b8e8 | 172 | if (btrfs_need_log_full_commit(trans)) { |
50471a38 MX |
173 | ret = -EAGAIN; |
174 | goto out; | |
175 | } | |
34eb2a52 | 176 | |
fa1a0f42 NA |
177 | if (zoned && atomic_read(&root->log_commit[index])) { |
178 | wait_log_commit(root, root->log_transid - 1); | |
179 | goto again; | |
180 | } | |
181 | ||
ff782e0a | 182 | if (!root->log_start_pid) { |
27cdeb70 | 183 | clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); |
34eb2a52 | 184 | root->log_start_pid = current->pid; |
ff782e0a | 185 | } else if (root->log_start_pid != current->pid) { |
27cdeb70 | 186 | set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); |
ff782e0a | 187 | } |
34eb2a52 | 188 | } else { |
fa1a0f42 NA |
189 | /* |
190 | * This means fs_info->log_root_tree was already created | |
191 | * for some other FS trees. Do the full commit not to mix | |
192 | * nodes from multiple log transactions to do sequential | |
193 | * writing. | |
194 | */ | |
195 | if (zoned && !created) { | |
196 | ret = -EAGAIN; | |
197 | goto out; | |
198 | } | |
199 | ||
e02119d5 | 200 | ret = btrfs_add_log_tree(trans, root); |
4a500fd1 | 201 | if (ret) |
e87ac136 | 202 | goto out; |
34eb2a52 | 203 | |
e7a79811 | 204 | set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); |
34eb2a52 Z |
205 | clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); |
206 | root->log_start_pid = current->pid; | |
e02119d5 | 207 | } |
34eb2a52 | 208 | |
7237f183 | 209 | atomic_inc(&root->log_writers); |
75b463d2 | 210 | if (ctx && !ctx->logging_new_name) { |
34eb2a52 | 211 | int index = root->log_transid % 2; |
8b050d35 | 212 | list_add_tail(&ctx->list, &root->log_ctxs[index]); |
d1433deb | 213 | ctx->log_transid = root->log_transid; |
8b050d35 | 214 | } |
34eb2a52 | 215 | |
e87ac136 | 216 | out: |
7237f183 | 217 | mutex_unlock(&root->log_mutex); |
e87ac136 | 218 | return ret; |
e02119d5 CM |
219 | } |
220 | ||
221 | /* | |
222 | * returns 0 if there was a log transaction running and we were able | |
223 | * to join, or returns -ENOENT if there were not transactions | |
224 | * in progress | |
225 | */ | |
226 | static int join_running_log_trans(struct btrfs_root *root) | |
227 | { | |
fa1a0f42 | 228 | const bool zoned = btrfs_is_zoned(root->fs_info); |
e02119d5 CM |
229 | int ret = -ENOENT; |
230 | ||
e7a79811 FM |
231 | if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) |
232 | return ret; | |
233 | ||
7237f183 | 234 | mutex_lock(&root->log_mutex); |
fa1a0f42 | 235 | again: |
e02119d5 | 236 | if (root->log_root) { |
fa1a0f42 NA |
237 | int index = (root->log_transid + 1) % 2; |
238 | ||
e02119d5 | 239 | ret = 0; |
fa1a0f42 NA |
240 | if (zoned && atomic_read(&root->log_commit[index])) { |
241 | wait_log_commit(root, root->log_transid - 1); | |
242 | goto again; | |
243 | } | |
7237f183 | 244 | atomic_inc(&root->log_writers); |
e02119d5 | 245 | } |
7237f183 | 246 | mutex_unlock(&root->log_mutex); |
e02119d5 CM |
247 | return ret; |
248 | } | |
249 | ||
12fcfd22 CM |
250 | /* |
251 | * This either makes the current running log transaction wait | |
252 | * until you call btrfs_end_log_trans() or it makes any future | |
253 | * log transactions wait until you call btrfs_end_log_trans() | |
254 | */ | |
45128b08 | 255 | void btrfs_pin_log_trans(struct btrfs_root *root) |
12fcfd22 | 256 | { |
12fcfd22 | 257 | atomic_inc(&root->log_writers); |
12fcfd22 CM |
258 | } |
259 | ||
e02119d5 CM |
260 | /* |
261 | * indicate we're done making changes to the log tree | |
262 | * and wake up anyone waiting to do a sync | |
263 | */ | |
143bede5 | 264 | void btrfs_end_log_trans(struct btrfs_root *root) |
e02119d5 | 265 | { |
7237f183 | 266 | if (atomic_dec_and_test(&root->log_writers)) { |
093258e6 DS |
267 | /* atomic_dec_and_test implies a barrier */ |
268 | cond_wake_up_nomb(&root->log_writer_wait); | |
7237f183 | 269 | } |
e02119d5 CM |
270 | } |
271 | ||
247462a5 DS |
272 | static int btrfs_write_tree_block(struct extent_buffer *buf) |
273 | { | |
274 | return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, | |
275 | buf->start + buf->len - 1); | |
276 | } | |
277 | ||
278 | static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) | |
279 | { | |
280 | filemap_fdatawait_range(buf->pages[0]->mapping, | |
281 | buf->start, buf->start + buf->len - 1); | |
282 | } | |
e02119d5 CM |
283 | |
284 | /* | |
285 | * the walk control struct is used to pass state down the chain when | |
286 | * processing the log tree. The stage field tells us which part | |
287 | * of the log tree processing we are currently doing. The others | |
288 | * are state fields used for that specific part | |
289 | */ | |
290 | struct walk_control { | |
291 | /* should we free the extent on disk when done? This is used | |
292 | * at transaction commit time while freeing a log tree | |
293 | */ | |
294 | int free; | |
295 | ||
296 | /* should we write out the extent buffer? This is used | |
297 | * while flushing the log tree to disk during a sync | |
298 | */ | |
299 | int write; | |
300 | ||
301 | /* should we wait for the extent buffer io to finish? Also used | |
302 | * while flushing the log tree to disk for a sync | |
303 | */ | |
304 | int wait; | |
305 | ||
306 | /* pin only walk, we record which extents on disk belong to the | |
307 | * log trees | |
308 | */ | |
309 | int pin; | |
310 | ||
311 | /* what stage of the replay code we're currently in */ | |
312 | int stage; | |
313 | ||
f2d72f42 FM |
314 | /* |
315 | * Ignore any items from the inode currently being processed. Needs | |
316 | * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in | |
317 | * the LOG_WALK_REPLAY_INODES stage. | |
318 | */ | |
319 | bool ignore_cur_inode; | |
320 | ||
e02119d5 CM |
321 | /* the root we are currently replaying */ |
322 | struct btrfs_root *replay_dest; | |
323 | ||
324 | /* the trans handle for the current replay */ | |
325 | struct btrfs_trans_handle *trans; | |
326 | ||
327 | /* the function that gets used to process blocks we find in the | |
328 | * tree. Note the extent_buffer might not be up to date when it is | |
329 | * passed in, and it must be checked or read if you need the data | |
330 | * inside it | |
331 | */ | |
332 | int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, | |
581c1760 | 333 | struct walk_control *wc, u64 gen, int level); |
e02119d5 CM |
334 | }; |
335 | ||
336 | /* | |
337 | * process_func used to pin down extents, write them or wait on them | |
338 | */ | |
339 | static int process_one_buffer(struct btrfs_root *log, | |
340 | struct extent_buffer *eb, | |
581c1760 | 341 | struct walk_control *wc, u64 gen, int level) |
e02119d5 | 342 | { |
0b246afa | 343 | struct btrfs_fs_info *fs_info = log->fs_info; |
b50c6e25 JB |
344 | int ret = 0; |
345 | ||
8c2a1a30 JB |
346 | /* |
347 | * If this fs is mixed then we need to be able to process the leaves to | |
348 | * pin down any logged extents, so we have to read the block. | |
349 | */ | |
0b246afa | 350 | if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { |
581c1760 | 351 | ret = btrfs_read_buffer(eb, gen, level, NULL); |
8c2a1a30 JB |
352 | if (ret) |
353 | return ret; | |
354 | } | |
355 | ||
04018de5 | 356 | if (wc->pin) |
9fce5704 | 357 | ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start, |
2ff7e61e | 358 | eb->len); |
e02119d5 | 359 | |
b50c6e25 | 360 | if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { |
8c2a1a30 | 361 | if (wc->pin && btrfs_header_level(eb) == 0) |
bcdc428c | 362 | ret = btrfs_exclude_logged_extents(eb); |
e02119d5 CM |
363 | if (wc->write) |
364 | btrfs_write_tree_block(eb); | |
365 | if (wc->wait) | |
366 | btrfs_wait_tree_block_writeback(eb); | |
367 | } | |
b50c6e25 | 368 | return ret; |
e02119d5 CM |
369 | } |
370 | ||
371 | /* | |
372 | * Item overwrite used by replay and tree logging. eb, slot and key all refer | |
373 | * to the src data we are copying out. | |
374 | * | |
375 | * root is the tree we are copying into, and path is a scratch | |
376 | * path for use in this function (it should be released on entry and | |
377 | * will be released on exit). | |
378 | * | |
379 | * If the key is already in the destination tree the existing item is | |
380 | * overwritten. If the existing item isn't big enough, it is extended. | |
381 | * If it is too large, it is truncated. | |
382 | * | |
383 | * If the key isn't in the destination yet, a new item is inserted. | |
384 | */ | |
385 | static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |
386 | struct btrfs_root *root, | |
387 | struct btrfs_path *path, | |
388 | struct extent_buffer *eb, int slot, | |
389 | struct btrfs_key *key) | |
390 | { | |
391 | int ret; | |
392 | u32 item_size; | |
393 | u64 saved_i_size = 0; | |
394 | int save_old_i_size = 0; | |
395 | unsigned long src_ptr; | |
396 | unsigned long dst_ptr; | |
397 | int overwrite_root = 0; | |
4bc4bee4 | 398 | bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; |
e02119d5 CM |
399 | |
400 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) | |
401 | overwrite_root = 1; | |
402 | ||
403 | item_size = btrfs_item_size_nr(eb, slot); | |
404 | src_ptr = btrfs_item_ptr_offset(eb, slot); | |
405 | ||
406 | /* look for the key in the destination tree */ | |
407 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | |
4bc4bee4 JB |
408 | if (ret < 0) |
409 | return ret; | |
410 | ||
e02119d5 CM |
411 | if (ret == 0) { |
412 | char *src_copy; | |
413 | char *dst_copy; | |
414 | u32 dst_size = btrfs_item_size_nr(path->nodes[0], | |
415 | path->slots[0]); | |
416 | if (dst_size != item_size) | |
417 | goto insert; | |
418 | ||
419 | if (item_size == 0) { | |
b3b4aa74 | 420 | btrfs_release_path(path); |
e02119d5 CM |
421 | return 0; |
422 | } | |
423 | dst_copy = kmalloc(item_size, GFP_NOFS); | |
424 | src_copy = kmalloc(item_size, GFP_NOFS); | |
2a29edc6 | 425 | if (!dst_copy || !src_copy) { |
b3b4aa74 | 426 | btrfs_release_path(path); |
2a29edc6 | 427 | kfree(dst_copy); |
428 | kfree(src_copy); | |
429 | return -ENOMEM; | |
430 | } | |
e02119d5 CM |
431 | |
432 | read_extent_buffer(eb, src_copy, src_ptr, item_size); | |
433 | ||
434 | dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); | |
435 | read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, | |
436 | item_size); | |
437 | ret = memcmp(dst_copy, src_copy, item_size); | |
438 | ||
439 | kfree(dst_copy); | |
440 | kfree(src_copy); | |
441 | /* | |
442 | * they have the same contents, just return, this saves | |
443 | * us from cowing blocks in the destination tree and doing | |
444 | * extra writes that may not have been done by a previous | |
445 | * sync | |
446 | */ | |
447 | if (ret == 0) { | |
b3b4aa74 | 448 | btrfs_release_path(path); |
e02119d5 CM |
449 | return 0; |
450 | } | |
451 | ||
4bc4bee4 JB |
452 | /* |
453 | * We need to load the old nbytes into the inode so when we | |
454 | * replay the extents we've logged we get the right nbytes. | |
455 | */ | |
456 | if (inode_item) { | |
457 | struct btrfs_inode_item *item; | |
458 | u64 nbytes; | |
d555438b | 459 | u32 mode; |
4bc4bee4 JB |
460 | |
461 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
462 | struct btrfs_inode_item); | |
463 | nbytes = btrfs_inode_nbytes(path->nodes[0], item); | |
464 | item = btrfs_item_ptr(eb, slot, | |
465 | struct btrfs_inode_item); | |
466 | btrfs_set_inode_nbytes(eb, item, nbytes); | |
d555438b JB |
467 | |
468 | /* | |
469 | * If this is a directory we need to reset the i_size to | |
470 | * 0 so that we can set it up properly when replaying | |
471 | * the rest of the items in this log. | |
472 | */ | |
473 | mode = btrfs_inode_mode(eb, item); | |
474 | if (S_ISDIR(mode)) | |
475 | btrfs_set_inode_size(eb, item, 0); | |
4bc4bee4 JB |
476 | } |
477 | } else if (inode_item) { | |
478 | struct btrfs_inode_item *item; | |
d555438b | 479 | u32 mode; |
4bc4bee4 JB |
480 | |
481 | /* | |
482 | * New inode, set nbytes to 0 so that the nbytes comes out | |
483 | * properly when we replay the extents. | |
484 | */ | |
485 | item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | |
486 | btrfs_set_inode_nbytes(eb, item, 0); | |
d555438b JB |
487 | |
488 | /* | |
489 | * If this is a directory we need to reset the i_size to 0 so | |
490 | * that we can set it up properly when replaying the rest of | |
491 | * the items in this log. | |
492 | */ | |
493 | mode = btrfs_inode_mode(eb, item); | |
494 | if (S_ISDIR(mode)) | |
495 | btrfs_set_inode_size(eb, item, 0); | |
e02119d5 CM |
496 | } |
497 | insert: | |
b3b4aa74 | 498 | btrfs_release_path(path); |
e02119d5 | 499 | /* try to insert the key into the destination tree */ |
df8d116f | 500 | path->skip_release_on_error = 1; |
e02119d5 CM |
501 | ret = btrfs_insert_empty_item(trans, root, path, |
502 | key, item_size); | |
df8d116f | 503 | path->skip_release_on_error = 0; |
e02119d5 CM |
504 | |
505 | /* make sure any existing item is the correct size */ | |
df8d116f | 506 | if (ret == -EEXIST || ret == -EOVERFLOW) { |
e02119d5 CM |
507 | u32 found_size; |
508 | found_size = btrfs_item_size_nr(path->nodes[0], | |
509 | path->slots[0]); | |
143bede5 | 510 | if (found_size > item_size) |
78ac4f9e | 511 | btrfs_truncate_item(path, item_size, 1); |
143bede5 | 512 | else if (found_size < item_size) |
c71dd880 | 513 | btrfs_extend_item(path, item_size - found_size); |
e02119d5 | 514 | } else if (ret) { |
4a500fd1 | 515 | return ret; |
e02119d5 CM |
516 | } |
517 | dst_ptr = btrfs_item_ptr_offset(path->nodes[0], | |
518 | path->slots[0]); | |
519 | ||
520 | /* don't overwrite an existing inode if the generation number | |
521 | * was logged as zero. This is done when the tree logging code | |
522 | * is just logging an inode to make sure it exists after recovery. | |
523 | * | |
524 | * Also, don't overwrite i_size on directories during replay. | |
525 | * log replay inserts and removes directory items based on the | |
526 | * state of the tree found in the subvolume, and i_size is modified | |
527 | * as it goes | |
528 | */ | |
529 | if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { | |
530 | struct btrfs_inode_item *src_item; | |
531 | struct btrfs_inode_item *dst_item; | |
532 | ||
533 | src_item = (struct btrfs_inode_item *)src_ptr; | |
534 | dst_item = (struct btrfs_inode_item *)dst_ptr; | |
535 | ||
1a4bcf47 FM |
536 | if (btrfs_inode_generation(eb, src_item) == 0) { |
537 | struct extent_buffer *dst_eb = path->nodes[0]; | |
2f2ff0ee | 538 | const u64 ino_size = btrfs_inode_size(eb, src_item); |
1a4bcf47 | 539 | |
2f2ff0ee FM |
540 | /* |
541 | * For regular files an ino_size == 0 is used only when | |
542 | * logging that an inode exists, as part of a directory | |
543 | * fsync, and the inode wasn't fsynced before. In this | |
544 | * case don't set the size of the inode in the fs/subvol | |
545 | * tree, otherwise we would be throwing valid data away. | |
546 | */ | |
1a4bcf47 | 547 | if (S_ISREG(btrfs_inode_mode(eb, src_item)) && |
2f2ff0ee | 548 | S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && |
60d48e2e DS |
549 | ino_size != 0) |
550 | btrfs_set_inode_size(dst_eb, dst_item, ino_size); | |
e02119d5 | 551 | goto no_copy; |
1a4bcf47 | 552 | } |
e02119d5 CM |
553 | |
554 | if (overwrite_root && | |
555 | S_ISDIR(btrfs_inode_mode(eb, src_item)) && | |
556 | S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { | |
557 | save_old_i_size = 1; | |
558 | saved_i_size = btrfs_inode_size(path->nodes[0], | |
559 | dst_item); | |
560 | } | |
561 | } | |
562 | ||
563 | copy_extent_buffer(path->nodes[0], eb, dst_ptr, | |
564 | src_ptr, item_size); | |
565 | ||
566 | if (save_old_i_size) { | |
567 | struct btrfs_inode_item *dst_item; | |
568 | dst_item = (struct btrfs_inode_item *)dst_ptr; | |
569 | btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); | |
570 | } | |
571 | ||
572 | /* make sure the generation is filled in */ | |
573 | if (key->type == BTRFS_INODE_ITEM_KEY) { | |
574 | struct btrfs_inode_item *dst_item; | |
575 | dst_item = (struct btrfs_inode_item *)dst_ptr; | |
576 | if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { | |
577 | btrfs_set_inode_generation(path->nodes[0], dst_item, | |
578 | trans->transid); | |
579 | } | |
580 | } | |
581 | no_copy: | |
582 | btrfs_mark_buffer_dirty(path->nodes[0]); | |
b3b4aa74 | 583 | btrfs_release_path(path); |
e02119d5 CM |
584 | return 0; |
585 | } | |
586 | ||
587 | /* | |
588 | * simple helper to read an inode off the disk from a given root | |
589 | * This can only be called for subvolume roots and not for the log | |
590 | */ | |
591 | static noinline struct inode *read_one_inode(struct btrfs_root *root, | |
592 | u64 objectid) | |
593 | { | |
594 | struct inode *inode; | |
e02119d5 | 595 | |
0202e83f | 596 | inode = btrfs_iget(root->fs_info->sb, objectid, root); |
2e19f1f9 | 597 | if (IS_ERR(inode)) |
5d4f98a2 | 598 | inode = NULL; |
e02119d5 CM |
599 | return inode; |
600 | } | |
601 | ||
602 | /* replays a single extent in 'eb' at 'slot' with 'key' into the | |
603 | * subvolume 'root'. path is released on entry and should be released | |
604 | * on exit. | |
605 | * | |
606 | * extents in the log tree have not been allocated out of the extent | |
607 | * tree yet. So, this completes the allocation, taking a reference | |
608 | * as required if the extent already exists or creating a new extent | |
609 | * if it isn't in the extent allocation tree yet. | |
610 | * | |
611 | * The extent is inserted into the file, dropping any existing extents | |
612 | * from the file that overlap the new one. | |
613 | */ | |
614 | static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |
615 | struct btrfs_root *root, | |
616 | struct btrfs_path *path, | |
617 | struct extent_buffer *eb, int slot, | |
618 | struct btrfs_key *key) | |
619 | { | |
5893dfb9 | 620 | struct btrfs_drop_extents_args drop_args = { 0 }; |
0b246afa | 621 | struct btrfs_fs_info *fs_info = root->fs_info; |
e02119d5 | 622 | int found_type; |
e02119d5 | 623 | u64 extent_end; |
e02119d5 | 624 | u64 start = key->offset; |
4bc4bee4 | 625 | u64 nbytes = 0; |
e02119d5 CM |
626 | struct btrfs_file_extent_item *item; |
627 | struct inode *inode = NULL; | |
628 | unsigned long size; | |
629 | int ret = 0; | |
630 | ||
631 | item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | |
632 | found_type = btrfs_file_extent_type(eb, item); | |
633 | ||
d899e052 | 634 | if (found_type == BTRFS_FILE_EXTENT_REG || |
4bc4bee4 JB |
635 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { |
636 | nbytes = btrfs_file_extent_num_bytes(eb, item); | |
637 | extent_end = start + nbytes; | |
638 | ||
639 | /* | |
640 | * We don't add to the inodes nbytes if we are prealloc or a | |
641 | * hole. | |
642 | */ | |
643 | if (btrfs_file_extent_disk_bytenr(eb, item) == 0) | |
644 | nbytes = 0; | |
645 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
e41ca589 | 646 | size = btrfs_file_extent_ram_bytes(eb, item); |
4bc4bee4 | 647 | nbytes = btrfs_file_extent_ram_bytes(eb, item); |
da17066c | 648 | extent_end = ALIGN(start + size, |
0b246afa | 649 | fs_info->sectorsize); |
e02119d5 CM |
650 | } else { |
651 | ret = 0; | |
652 | goto out; | |
653 | } | |
654 | ||
655 | inode = read_one_inode(root, key->objectid); | |
656 | if (!inode) { | |
657 | ret = -EIO; | |
658 | goto out; | |
659 | } | |
660 | ||
661 | /* | |
662 | * first check to see if we already have this extent in the | |
663 | * file. This must be done before the btrfs_drop_extents run | |
664 | * so we don't try to drop this extent. | |
665 | */ | |
f85b7379 DS |
666 | ret = btrfs_lookup_file_extent(trans, root, path, |
667 | btrfs_ino(BTRFS_I(inode)), start, 0); | |
e02119d5 | 668 | |
d899e052 YZ |
669 | if (ret == 0 && |
670 | (found_type == BTRFS_FILE_EXTENT_REG || | |
671 | found_type == BTRFS_FILE_EXTENT_PREALLOC)) { | |
e02119d5 CM |
672 | struct btrfs_file_extent_item cmp1; |
673 | struct btrfs_file_extent_item cmp2; | |
674 | struct btrfs_file_extent_item *existing; | |
675 | struct extent_buffer *leaf; | |
676 | ||
677 | leaf = path->nodes[0]; | |
678 | existing = btrfs_item_ptr(leaf, path->slots[0], | |
679 | struct btrfs_file_extent_item); | |
680 | ||
681 | read_extent_buffer(eb, &cmp1, (unsigned long)item, | |
682 | sizeof(cmp1)); | |
683 | read_extent_buffer(leaf, &cmp2, (unsigned long)existing, | |
684 | sizeof(cmp2)); | |
685 | ||
686 | /* | |
687 | * we already have a pointer to this exact extent, | |
688 | * we don't have to do anything | |
689 | */ | |
690 | if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { | |
b3b4aa74 | 691 | btrfs_release_path(path); |
e02119d5 CM |
692 | goto out; |
693 | } | |
694 | } | |
b3b4aa74 | 695 | btrfs_release_path(path); |
e02119d5 CM |
696 | |
697 | /* drop any overlapping extents */ | |
5893dfb9 FM |
698 | drop_args.start = start; |
699 | drop_args.end = extent_end; | |
700 | drop_args.drop_cache = true; | |
701 | ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args); | |
3650860b JB |
702 | if (ret) |
703 | goto out; | |
e02119d5 | 704 | |
07d400a6 YZ |
705 | if (found_type == BTRFS_FILE_EXTENT_REG || |
706 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
5d4f98a2 | 707 | u64 offset; |
07d400a6 YZ |
708 | unsigned long dest_offset; |
709 | struct btrfs_key ins; | |
710 | ||
3168021c FM |
711 | if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && |
712 | btrfs_fs_incompat(fs_info, NO_HOLES)) | |
713 | goto update_inode; | |
714 | ||
07d400a6 YZ |
715 | ret = btrfs_insert_empty_item(trans, root, path, key, |
716 | sizeof(*item)); | |
3650860b JB |
717 | if (ret) |
718 | goto out; | |
07d400a6 YZ |
719 | dest_offset = btrfs_item_ptr_offset(path->nodes[0], |
720 | path->slots[0]); | |
721 | copy_extent_buffer(path->nodes[0], eb, dest_offset, | |
722 | (unsigned long)item, sizeof(*item)); | |
723 | ||
724 | ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); | |
725 | ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); | |
726 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
5d4f98a2 | 727 | offset = key->offset - btrfs_file_extent_offset(eb, item); |
07d400a6 | 728 | |
df2c95f3 QW |
729 | /* |
730 | * Manually record dirty extent, as here we did a shallow | |
731 | * file extent item copy and skip normal backref update, | |
732 | * but modifying extent tree all by ourselves. | |
733 | * So need to manually record dirty extent for qgroup, | |
734 | * as the owner of the file extent changed from log tree | |
735 | * (doesn't affect qgroup) to fs/file tree(affects qgroup) | |
736 | */ | |
a95f3aaf | 737 | ret = btrfs_qgroup_trace_extent(trans, |
df2c95f3 QW |
738 | btrfs_file_extent_disk_bytenr(eb, item), |
739 | btrfs_file_extent_disk_num_bytes(eb, item), | |
740 | GFP_NOFS); | |
741 | if (ret < 0) | |
742 | goto out; | |
743 | ||
07d400a6 | 744 | if (ins.objectid > 0) { |
82fa113f | 745 | struct btrfs_ref ref = { 0 }; |
07d400a6 YZ |
746 | u64 csum_start; |
747 | u64 csum_end; | |
748 | LIST_HEAD(ordered_sums); | |
82fa113f | 749 | |
07d400a6 YZ |
750 | /* |
751 | * is this extent already allocated in the extent | |
752 | * allocation tree? If so, just add a reference | |
753 | */ | |
2ff7e61e | 754 | ret = btrfs_lookup_data_extent(fs_info, ins.objectid, |
07d400a6 | 755 | ins.offset); |
3736127a MPS |
756 | if (ret < 0) { |
757 | goto out; | |
758 | } else if (ret == 0) { | |
82fa113f QW |
759 | btrfs_init_generic_ref(&ref, |
760 | BTRFS_ADD_DELAYED_REF, | |
761 | ins.objectid, ins.offset, 0); | |
762 | btrfs_init_data_ref(&ref, | |
763 | root->root_key.objectid, | |
b06c4bf5 | 764 | key->objectid, offset); |
82fa113f | 765 | ret = btrfs_inc_extent_ref(trans, &ref); |
b50c6e25 JB |
766 | if (ret) |
767 | goto out; | |
07d400a6 YZ |
768 | } else { |
769 | /* | |
770 | * insert the extent pointer in the extent | |
771 | * allocation tree | |
772 | */ | |
5d4f98a2 | 773 | ret = btrfs_alloc_logged_file_extent(trans, |
2ff7e61e | 774 | root->root_key.objectid, |
5d4f98a2 | 775 | key->objectid, offset, &ins); |
b50c6e25 JB |
776 | if (ret) |
777 | goto out; | |
07d400a6 | 778 | } |
b3b4aa74 | 779 | btrfs_release_path(path); |
07d400a6 YZ |
780 | |
781 | if (btrfs_file_extent_compression(eb, item)) { | |
782 | csum_start = ins.objectid; | |
783 | csum_end = csum_start + ins.offset; | |
784 | } else { | |
785 | csum_start = ins.objectid + | |
786 | btrfs_file_extent_offset(eb, item); | |
787 | csum_end = csum_start + | |
788 | btrfs_file_extent_num_bytes(eb, item); | |
789 | } | |
790 | ||
791 | ret = btrfs_lookup_csums_range(root->log_root, | |
792 | csum_start, csum_end - 1, | |
a2de733c | 793 | &ordered_sums, 0); |
3650860b JB |
794 | if (ret) |
795 | goto out; | |
b84b8390 FM |
796 | /* |
797 | * Now delete all existing cums in the csum root that | |
798 | * cover our range. We do this because we can have an | |
799 | * extent that is completely referenced by one file | |
800 | * extent item and partially referenced by another | |
801 | * file extent item (like after using the clone or | |
802 | * extent_same ioctls). In this case if we end up doing | |
803 | * the replay of the one that partially references the | |
804 | * extent first, and we do not do the csum deletion | |
805 | * below, we can get 2 csum items in the csum tree that | |
806 | * overlap each other. For example, imagine our log has | |
807 | * the two following file extent items: | |
808 | * | |
809 | * key (257 EXTENT_DATA 409600) | |
810 | * extent data disk byte 12845056 nr 102400 | |
811 | * extent data offset 20480 nr 20480 ram 102400 | |
812 | * | |
813 | * key (257 EXTENT_DATA 819200) | |
814 | * extent data disk byte 12845056 nr 102400 | |
815 | * extent data offset 0 nr 102400 ram 102400 | |
816 | * | |
817 | * Where the second one fully references the 100K extent | |
818 | * that starts at disk byte 12845056, and the log tree | |
819 | * has a single csum item that covers the entire range | |
820 | * of the extent: | |
821 | * | |
822 | * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 | |
823 | * | |
824 | * After the first file extent item is replayed, the | |
825 | * csum tree gets the following csum item: | |
826 | * | |
827 | * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 | |
828 | * | |
829 | * Which covers the 20K sub-range starting at offset 20K | |
830 | * of our extent. Now when we replay the second file | |
831 | * extent item, if we do not delete existing csum items | |
832 | * that cover any of its blocks, we end up getting two | |
833 | * csum items in our csum tree that overlap each other: | |
834 | * | |
835 | * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 | |
836 | * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 | |
837 | * | |
838 | * Which is a problem, because after this anyone trying | |
839 | * to lookup up for the checksum of any block of our | |
840 | * extent starting at an offset of 40K or higher, will | |
841 | * end up looking at the second csum item only, which | |
842 | * does not contain the checksum for any block starting | |
843 | * at offset 40K or higher of our extent. | |
844 | */ | |
07d400a6 YZ |
845 | while (!list_empty(&ordered_sums)) { |
846 | struct btrfs_ordered_sum *sums; | |
847 | sums = list_entry(ordered_sums.next, | |
848 | struct btrfs_ordered_sum, | |
849 | list); | |
b84b8390 | 850 | if (!ret) |
40e046ac FM |
851 | ret = btrfs_del_csums(trans, |
852 | fs_info->csum_root, | |
5b4aacef JM |
853 | sums->bytenr, |
854 | sums->len); | |
3650860b JB |
855 | if (!ret) |
856 | ret = btrfs_csum_file_blocks(trans, | |
0b246afa | 857 | fs_info->csum_root, sums); |
07d400a6 YZ |
858 | list_del(&sums->list); |
859 | kfree(sums); | |
860 | } | |
3650860b JB |
861 | if (ret) |
862 | goto out; | |
07d400a6 | 863 | } else { |
b3b4aa74 | 864 | btrfs_release_path(path); |
07d400a6 YZ |
865 | } |
866 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
867 | /* inline extents are easy, we just overwrite them */ | |
868 | ret = overwrite_item(trans, root, path, eb, slot, key); | |
3650860b JB |
869 | if (ret) |
870 | goto out; | |
07d400a6 | 871 | } |
e02119d5 | 872 | |
9ddc959e JB |
873 | ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, |
874 | extent_end - start); | |
875 | if (ret) | |
876 | goto out; | |
877 | ||
3168021c | 878 | update_inode: |
2766ff61 | 879 | btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found); |
9a56fcd1 | 880 | ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
e02119d5 CM |
881 | out: |
882 | if (inode) | |
883 | iput(inode); | |
884 | return ret; | |
885 | } | |
886 | ||
887 | /* | |
888 | * when cleaning up conflicts between the directory names in the | |
889 | * subvolume, directory names in the log and directory names in the | |
890 | * inode back references, we may have to unlink inodes from directories. | |
891 | * | |
892 | * This is a helper function to do the unlink of a specific directory | |
893 | * item | |
894 | */ | |
895 | static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, | |
896 | struct btrfs_root *root, | |
897 | struct btrfs_path *path, | |
207e7d92 | 898 | struct btrfs_inode *dir, |
e02119d5 CM |
899 | struct btrfs_dir_item *di) |
900 | { | |
901 | struct inode *inode; | |
902 | char *name; | |
903 | int name_len; | |
904 | struct extent_buffer *leaf; | |
905 | struct btrfs_key location; | |
906 | int ret; | |
907 | ||
908 | leaf = path->nodes[0]; | |
909 | ||
910 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | |
911 | name_len = btrfs_dir_name_len(leaf, di); | |
912 | name = kmalloc(name_len, GFP_NOFS); | |
2a29edc6 | 913 | if (!name) |
914 | return -ENOMEM; | |
915 | ||
e02119d5 | 916 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); |
b3b4aa74 | 917 | btrfs_release_path(path); |
e02119d5 CM |
918 | |
919 | inode = read_one_inode(root, location.objectid); | |
c00e9493 | 920 | if (!inode) { |
3650860b JB |
921 | ret = -EIO; |
922 | goto out; | |
c00e9493 | 923 | } |
e02119d5 | 924 | |
ec051c0f | 925 | ret = link_to_fixup_dir(trans, root, path, location.objectid); |
3650860b JB |
926 | if (ret) |
927 | goto out; | |
12fcfd22 | 928 | |
207e7d92 NB |
929 | ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, |
930 | name_len); | |
3650860b JB |
931 | if (ret) |
932 | goto out; | |
ada9af21 | 933 | else |
e5c304e6 | 934 | ret = btrfs_run_delayed_items(trans); |
3650860b | 935 | out: |
e02119d5 | 936 | kfree(name); |
e02119d5 CM |
937 | iput(inode); |
938 | return ret; | |
939 | } | |
940 | ||
941 | /* | |
77a5b9e3 FM |
942 | * See if a given name and sequence number found in an inode back reference are |
943 | * already in a directory and correctly point to this inode. | |
944 | * | |
945 | * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it | |
946 | * exists. | |
e02119d5 CM |
947 | */ |
948 | static noinline int inode_in_dir(struct btrfs_root *root, | |
949 | struct btrfs_path *path, | |
950 | u64 dirid, u64 objectid, u64 index, | |
951 | const char *name, int name_len) | |
952 | { | |
953 | struct btrfs_dir_item *di; | |
954 | struct btrfs_key location; | |
77a5b9e3 | 955 | int ret = 0; |
e02119d5 CM |
956 | |
957 | di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, | |
958 | index, name, name_len, 0); | |
77a5b9e3 FM |
959 | if (IS_ERR(di)) { |
960 | if (PTR_ERR(di) != -ENOENT) | |
961 | ret = PTR_ERR(di); | |
962 | goto out; | |
963 | } else if (di) { | |
e02119d5 CM |
964 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); |
965 | if (location.objectid != objectid) | |
966 | goto out; | |
77a5b9e3 | 967 | } else { |
e02119d5 | 968 | goto out; |
77a5b9e3 | 969 | } |
e02119d5 | 970 | |
77a5b9e3 | 971 | btrfs_release_path(path); |
e02119d5 | 972 | di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); |
77a5b9e3 FM |
973 | if (IS_ERR(di)) { |
974 | ret = PTR_ERR(di); | |
e02119d5 | 975 | goto out; |
77a5b9e3 FM |
976 | } else if (di) { |
977 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); | |
978 | if (location.objectid == objectid) | |
979 | ret = 1; | |
980 | } | |
e02119d5 | 981 | out: |
b3b4aa74 | 982 | btrfs_release_path(path); |
77a5b9e3 | 983 | return ret; |
e02119d5 CM |
984 | } |
985 | ||
986 | /* | |
987 | * helper function to check a log tree for a named back reference in | |
988 | * an inode. This is used to decide if a back reference that is | |
989 | * found in the subvolume conflicts with what we find in the log. | |
990 | * | |
991 | * inode backreferences may have multiple refs in a single item, | |
992 | * during replay we process one reference at a time, and we don't | |
993 | * want to delete valid links to a file from the subvolume if that | |
994 | * link is also in the log. | |
995 | */ | |
996 | static noinline int backref_in_log(struct btrfs_root *log, | |
997 | struct btrfs_key *key, | |
f186373f | 998 | u64 ref_objectid, |
df8d116f | 999 | const char *name, int namelen) |
e02119d5 CM |
1000 | { |
1001 | struct btrfs_path *path; | |
e02119d5 | 1002 | int ret; |
e02119d5 CM |
1003 | |
1004 | path = btrfs_alloc_path(); | |
2a29edc6 | 1005 | if (!path) |
1006 | return -ENOMEM; | |
1007 | ||
e02119d5 | 1008 | ret = btrfs_search_slot(NULL, log, key, path, 0, 0); |
d3316c82 NB |
1009 | if (ret < 0) { |
1010 | goto out; | |
1011 | } else if (ret == 1) { | |
89cbf5f6 | 1012 | ret = 0; |
f186373f MF |
1013 | goto out; |
1014 | } | |
1015 | ||
89cbf5f6 NB |
1016 | if (key->type == BTRFS_INODE_EXTREF_KEY) |
1017 | ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], | |
1018 | path->slots[0], | |
1019 | ref_objectid, | |
1020 | name, namelen); | |
1021 | else | |
1022 | ret = !!btrfs_find_name_in_backref(path->nodes[0], | |
1023 | path->slots[0], | |
1024 | name, namelen); | |
e02119d5 CM |
1025 | out: |
1026 | btrfs_free_path(path); | |
89cbf5f6 | 1027 | return ret; |
e02119d5 CM |
1028 | } |
1029 | ||
5a1d7843 | 1030 | static inline int __add_inode_ref(struct btrfs_trans_handle *trans, |
e02119d5 | 1031 | struct btrfs_root *root, |
e02119d5 | 1032 | struct btrfs_path *path, |
5a1d7843 | 1033 | struct btrfs_root *log_root, |
94c91a1f NB |
1034 | struct btrfs_inode *dir, |
1035 | struct btrfs_inode *inode, | |
f186373f MF |
1036 | u64 inode_objectid, u64 parent_objectid, |
1037 | u64 ref_index, char *name, int namelen, | |
1038 | int *search_done) | |
e02119d5 | 1039 | { |
34f3e4f2 | 1040 | int ret; |
f186373f MF |
1041 | char *victim_name; |
1042 | int victim_name_len; | |
1043 | struct extent_buffer *leaf; | |
5a1d7843 | 1044 | struct btrfs_dir_item *di; |
f186373f MF |
1045 | struct btrfs_key search_key; |
1046 | struct btrfs_inode_extref *extref; | |
c622ae60 | 1047 | |
f186373f MF |
1048 | again: |
1049 | /* Search old style refs */ | |
1050 | search_key.objectid = inode_objectid; | |
1051 | search_key.type = BTRFS_INODE_REF_KEY; | |
1052 | search_key.offset = parent_objectid; | |
1053 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); | |
e02119d5 | 1054 | if (ret == 0) { |
e02119d5 CM |
1055 | struct btrfs_inode_ref *victim_ref; |
1056 | unsigned long ptr; | |
1057 | unsigned long ptr_end; | |
f186373f MF |
1058 | |
1059 | leaf = path->nodes[0]; | |
e02119d5 CM |
1060 | |
1061 | /* are we trying to overwrite a back ref for the root directory | |
1062 | * if so, just jump out, we're done | |
1063 | */ | |
f186373f | 1064 | if (search_key.objectid == search_key.offset) |
5a1d7843 | 1065 | return 1; |
e02119d5 CM |
1066 | |
1067 | /* check all the names in this back reference to see | |
1068 | * if they are in the log. if so, we allow them to stay | |
1069 | * otherwise they must be unlinked as a conflict | |
1070 | */ | |
1071 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
1072 | ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); | |
d397712b | 1073 | while (ptr < ptr_end) { |
e02119d5 CM |
1074 | victim_ref = (struct btrfs_inode_ref *)ptr; |
1075 | victim_name_len = btrfs_inode_ref_name_len(leaf, | |
1076 | victim_ref); | |
1077 | victim_name = kmalloc(victim_name_len, GFP_NOFS); | |
3650860b JB |
1078 | if (!victim_name) |
1079 | return -ENOMEM; | |
e02119d5 CM |
1080 | |
1081 | read_extent_buffer(leaf, victim_name, | |
1082 | (unsigned long)(victim_ref + 1), | |
1083 | victim_name_len); | |
1084 | ||
d3316c82 NB |
1085 | ret = backref_in_log(log_root, &search_key, |
1086 | parent_objectid, victim_name, | |
1087 | victim_name_len); | |
1088 | if (ret < 0) { | |
1089 | kfree(victim_name); | |
1090 | return ret; | |
1091 | } else if (!ret) { | |
94c91a1f | 1092 | inc_nlink(&inode->vfs_inode); |
b3b4aa74 | 1093 | btrfs_release_path(path); |
12fcfd22 | 1094 | |
94c91a1f | 1095 | ret = btrfs_unlink_inode(trans, root, dir, inode, |
4ec5934e | 1096 | victim_name, victim_name_len); |
f186373f | 1097 | kfree(victim_name); |
3650860b JB |
1098 | if (ret) |
1099 | return ret; | |
e5c304e6 | 1100 | ret = btrfs_run_delayed_items(trans); |
ada9af21 FDBM |
1101 | if (ret) |
1102 | return ret; | |
f186373f MF |
1103 | *search_done = 1; |
1104 | goto again; | |
e02119d5 CM |
1105 | } |
1106 | kfree(victim_name); | |
f186373f | 1107 | |
e02119d5 CM |
1108 | ptr = (unsigned long)(victim_ref + 1) + victim_name_len; |
1109 | } | |
e02119d5 | 1110 | |
c622ae60 | 1111 | /* |
1112 | * NOTE: we have searched root tree and checked the | |
bb7ab3b9 | 1113 | * corresponding ref, it does not need to check again. |
c622ae60 | 1114 | */ |
5a1d7843 | 1115 | *search_done = 1; |
e02119d5 | 1116 | } |
b3b4aa74 | 1117 | btrfs_release_path(path); |
e02119d5 | 1118 | |
f186373f MF |
1119 | /* Same search but for extended refs */ |
1120 | extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, | |
1121 | inode_objectid, parent_objectid, 0, | |
1122 | 0); | |
1123 | if (!IS_ERR_OR_NULL(extref)) { | |
1124 | u32 item_size; | |
1125 | u32 cur_offset = 0; | |
1126 | unsigned long base; | |
1127 | struct inode *victim_parent; | |
1128 | ||
1129 | leaf = path->nodes[0]; | |
1130 | ||
1131 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
1132 | base = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
1133 | ||
1134 | while (cur_offset < item_size) { | |
dd9ef135 | 1135 | extref = (struct btrfs_inode_extref *)(base + cur_offset); |
f186373f MF |
1136 | |
1137 | victim_name_len = btrfs_inode_extref_name_len(leaf, extref); | |
1138 | ||
1139 | if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) | |
1140 | goto next; | |
1141 | ||
1142 | victim_name = kmalloc(victim_name_len, GFP_NOFS); | |
3650860b JB |
1143 | if (!victim_name) |
1144 | return -ENOMEM; | |
f186373f MF |
1145 | read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, |
1146 | victim_name_len); | |
1147 | ||
1148 | search_key.objectid = inode_objectid; | |
1149 | search_key.type = BTRFS_INODE_EXTREF_KEY; | |
1150 | search_key.offset = btrfs_extref_hash(parent_objectid, | |
1151 | victim_name, | |
1152 | victim_name_len); | |
d3316c82 NB |
1153 | ret = backref_in_log(log_root, &search_key, |
1154 | parent_objectid, victim_name, | |
1155 | victim_name_len); | |
1156 | if (ret < 0) { | |
1157 | return ret; | |
1158 | } else if (!ret) { | |
f186373f MF |
1159 | ret = -ENOENT; |
1160 | victim_parent = read_one_inode(root, | |
94c91a1f | 1161 | parent_objectid); |
f186373f | 1162 | if (victim_parent) { |
94c91a1f | 1163 | inc_nlink(&inode->vfs_inode); |
f186373f MF |
1164 | btrfs_release_path(path); |
1165 | ||
1166 | ret = btrfs_unlink_inode(trans, root, | |
4ec5934e | 1167 | BTRFS_I(victim_parent), |
94c91a1f | 1168 | inode, |
4ec5934e NB |
1169 | victim_name, |
1170 | victim_name_len); | |
ada9af21 FDBM |
1171 | if (!ret) |
1172 | ret = btrfs_run_delayed_items( | |
e5c304e6 | 1173 | trans); |
f186373f | 1174 | } |
f186373f MF |
1175 | iput(victim_parent); |
1176 | kfree(victim_name); | |
3650860b JB |
1177 | if (ret) |
1178 | return ret; | |
f186373f MF |
1179 | *search_done = 1; |
1180 | goto again; | |
1181 | } | |
1182 | kfree(victim_name); | |
f186373f MF |
1183 | next: |
1184 | cur_offset += victim_name_len + sizeof(*extref); | |
1185 | } | |
1186 | *search_done = 1; | |
1187 | } | |
1188 | btrfs_release_path(path); | |
1189 | ||
34f3e4f2 | 1190 | /* look for a conflicting sequence number */ |
94c91a1f | 1191 | di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), |
f186373f | 1192 | ref_index, name, namelen, 0); |
52db7779 FM |
1193 | if (IS_ERR(di)) { |
1194 | if (PTR_ERR(di) != -ENOENT) | |
1195 | return PTR_ERR(di); | |
1196 | } else if (di) { | |
94c91a1f | 1197 | ret = drop_one_dir_item(trans, root, path, dir, di); |
3650860b JB |
1198 | if (ret) |
1199 | return ret; | |
34f3e4f2 | 1200 | } |
1201 | btrfs_release_path(path); | |
1202 | ||
52042d8e | 1203 | /* look for a conflicting name */ |
94c91a1f | 1204 | di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), |
34f3e4f2 | 1205 | name, namelen, 0); |
52db7779 FM |
1206 | if (IS_ERR(di)) { |
1207 | return PTR_ERR(di); | |
1208 | } else if (di) { | |
94c91a1f | 1209 | ret = drop_one_dir_item(trans, root, path, dir, di); |
3650860b JB |
1210 | if (ret) |
1211 | return ret; | |
34f3e4f2 | 1212 | } |
1213 | btrfs_release_path(path); | |
1214 | ||
5a1d7843 JS |
1215 | return 0; |
1216 | } | |
e02119d5 | 1217 | |
bae15d95 QW |
1218 | static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, |
1219 | u32 *namelen, char **name, u64 *index, | |
1220 | u64 *parent_objectid) | |
f186373f MF |
1221 | { |
1222 | struct btrfs_inode_extref *extref; | |
1223 | ||
1224 | extref = (struct btrfs_inode_extref *)ref_ptr; | |
1225 | ||
1226 | *namelen = btrfs_inode_extref_name_len(eb, extref); | |
1227 | *name = kmalloc(*namelen, GFP_NOFS); | |
1228 | if (*name == NULL) | |
1229 | return -ENOMEM; | |
1230 | ||
1231 | read_extent_buffer(eb, *name, (unsigned long)&extref->name, | |
1232 | *namelen); | |
1233 | ||
1f250e92 FM |
1234 | if (index) |
1235 | *index = btrfs_inode_extref_index(eb, extref); | |
f186373f MF |
1236 | if (parent_objectid) |
1237 | *parent_objectid = btrfs_inode_extref_parent(eb, extref); | |
1238 | ||
1239 | return 0; | |
1240 | } | |
1241 | ||
bae15d95 QW |
1242 | static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, |
1243 | u32 *namelen, char **name, u64 *index) | |
f186373f MF |
1244 | { |
1245 | struct btrfs_inode_ref *ref; | |
1246 | ||
1247 | ref = (struct btrfs_inode_ref *)ref_ptr; | |
1248 | ||
1249 | *namelen = btrfs_inode_ref_name_len(eb, ref); | |
1250 | *name = kmalloc(*namelen, GFP_NOFS); | |
1251 | if (*name == NULL) | |
1252 | return -ENOMEM; | |
1253 | ||
1254 | read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); | |
1255 | ||
1f250e92 FM |
1256 | if (index) |
1257 | *index = btrfs_inode_ref_index(eb, ref); | |
f186373f MF |
1258 | |
1259 | return 0; | |
1260 | } | |
1261 | ||
1f250e92 FM |
1262 | /* |
1263 | * Take an inode reference item from the log tree and iterate all names from the | |
1264 | * inode reference item in the subvolume tree with the same key (if it exists). | |
1265 | * For any name that is not in the inode reference item from the log tree, do a | |
1266 | * proper unlink of that name (that is, remove its entry from the inode | |
1267 | * reference item and both dir index keys). | |
1268 | */ | |
1269 | static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, | |
1270 | struct btrfs_root *root, | |
1271 | struct btrfs_path *path, | |
1272 | struct btrfs_inode *inode, | |
1273 | struct extent_buffer *log_eb, | |
1274 | int log_slot, | |
1275 | struct btrfs_key *key) | |
1276 | { | |
1277 | int ret; | |
1278 | unsigned long ref_ptr; | |
1279 | unsigned long ref_end; | |
1280 | struct extent_buffer *eb; | |
1281 | ||
1282 | again: | |
1283 | btrfs_release_path(path); | |
1284 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | |
1285 | if (ret > 0) { | |
1286 | ret = 0; | |
1287 | goto out; | |
1288 | } | |
1289 | if (ret < 0) | |
1290 | goto out; | |
1291 | ||
1292 | eb = path->nodes[0]; | |
1293 | ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); | |
1294 | ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); | |
1295 | while (ref_ptr < ref_end) { | |
1296 | char *name = NULL; | |
1297 | int namelen; | |
1298 | u64 parent_id; | |
1299 | ||
1300 | if (key->type == BTRFS_INODE_EXTREF_KEY) { | |
1301 | ret = extref_get_fields(eb, ref_ptr, &namelen, &name, | |
1302 | NULL, &parent_id); | |
1303 | } else { | |
1304 | parent_id = key->offset; | |
1305 | ret = ref_get_fields(eb, ref_ptr, &namelen, &name, | |
1306 | NULL); | |
1307 | } | |
1308 | if (ret) | |
1309 | goto out; | |
1310 | ||
1311 | if (key->type == BTRFS_INODE_EXTREF_KEY) | |
6ff49c6a NB |
1312 | ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, |
1313 | parent_id, name, | |
1314 | namelen); | |
1f250e92 | 1315 | else |
9bb8407f NB |
1316 | ret = !!btrfs_find_name_in_backref(log_eb, log_slot, |
1317 | name, namelen); | |
1f250e92 FM |
1318 | |
1319 | if (!ret) { | |
1320 | struct inode *dir; | |
1321 | ||
1322 | btrfs_release_path(path); | |
1323 | dir = read_one_inode(root, parent_id); | |
1324 | if (!dir) { | |
1325 | ret = -ENOENT; | |
1326 | kfree(name); | |
1327 | goto out; | |
1328 | } | |
1329 | ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), | |
1330 | inode, name, namelen); | |
1331 | kfree(name); | |
1332 | iput(dir); | |
1333 | if (ret) | |
1334 | goto out; | |
1335 | goto again; | |
1336 | } | |
1337 | ||
1338 | kfree(name); | |
1339 | ref_ptr += namelen; | |
1340 | if (key->type == BTRFS_INODE_EXTREF_KEY) | |
1341 | ref_ptr += sizeof(struct btrfs_inode_extref); | |
1342 | else | |
1343 | ref_ptr += sizeof(struct btrfs_inode_ref); | |
1344 | } | |
1345 | ret = 0; | |
1346 | out: | |
1347 | btrfs_release_path(path); | |
1348 | return ret; | |
1349 | } | |
1350 | ||
0d836392 FM |
1351 | static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, |
1352 | const u8 ref_type, const char *name, | |
1353 | const int namelen) | |
1354 | { | |
1355 | struct btrfs_key key; | |
1356 | struct btrfs_path *path; | |
1357 | const u64 parent_id = btrfs_ino(BTRFS_I(dir)); | |
1358 | int ret; | |
1359 | ||
1360 | path = btrfs_alloc_path(); | |
1361 | if (!path) | |
1362 | return -ENOMEM; | |
1363 | ||
1364 | key.objectid = btrfs_ino(BTRFS_I(inode)); | |
1365 | key.type = ref_type; | |
1366 | if (key.type == BTRFS_INODE_REF_KEY) | |
1367 | key.offset = parent_id; | |
1368 | else | |
1369 | key.offset = btrfs_extref_hash(parent_id, name, namelen); | |
1370 | ||
1371 | ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0); | |
1372 | if (ret < 0) | |
1373 | goto out; | |
1374 | if (ret > 0) { | |
1375 | ret = 0; | |
1376 | goto out; | |
1377 | } | |
1378 | if (key.type == BTRFS_INODE_EXTREF_KEY) | |
6ff49c6a NB |
1379 | ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], |
1380 | path->slots[0], parent_id, name, namelen); | |
0d836392 | 1381 | else |
9bb8407f NB |
1382 | ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], |
1383 | name, namelen); | |
0d836392 FM |
1384 | |
1385 | out: | |
1386 | btrfs_free_path(path); | |
1387 | return ret; | |
1388 | } | |
1389 | ||
6b5fc433 FM |
1390 | static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
1391 | struct inode *dir, struct inode *inode, const char *name, | |
1392 | int namelen, u64 ref_index) | |
1393 | { | |
1394 | struct btrfs_dir_item *dir_item; | |
1395 | struct btrfs_key key; | |
1396 | struct btrfs_path *path; | |
1397 | struct inode *other_inode = NULL; | |
1398 | int ret; | |
1399 | ||
1400 | path = btrfs_alloc_path(); | |
1401 | if (!path) | |
1402 | return -ENOMEM; | |
1403 | ||
1404 | dir_item = btrfs_lookup_dir_item(NULL, root, path, | |
1405 | btrfs_ino(BTRFS_I(dir)), | |
1406 | name, namelen, 0); | |
1407 | if (!dir_item) { | |
1408 | btrfs_release_path(path); | |
1409 | goto add_link; | |
1410 | } else if (IS_ERR(dir_item)) { | |
1411 | ret = PTR_ERR(dir_item); | |
1412 | goto out; | |
1413 | } | |
1414 | ||
1415 | /* | |
1416 | * Our inode's dentry collides with the dentry of another inode which is | |
1417 | * in the log but not yet processed since it has a higher inode number. | |
1418 | * So delete that other dentry. | |
1419 | */ | |
1420 | btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key); | |
1421 | btrfs_release_path(path); | |
1422 | other_inode = read_one_inode(root, key.objectid); | |
1423 | if (!other_inode) { | |
1424 | ret = -ENOENT; | |
1425 | goto out; | |
1426 | } | |
1427 | ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode), | |
1428 | name, namelen); | |
1429 | if (ret) | |
1430 | goto out; | |
1431 | /* | |
1432 | * If we dropped the link count to 0, bump it so that later the iput() | |
1433 | * on the inode will not free it. We will fixup the link count later. | |
1434 | */ | |
1435 | if (other_inode->i_nlink == 0) | |
1436 | inc_nlink(other_inode); | |
1437 | ||
1438 | ret = btrfs_run_delayed_items(trans); | |
1439 | if (ret) | |
1440 | goto out; | |
1441 | add_link: | |
1442 | ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), | |
1443 | name, namelen, 0, ref_index); | |
1444 | out: | |
1445 | iput(other_inode); | |
1446 | btrfs_free_path(path); | |
1447 | ||
1448 | return ret; | |
1449 | } | |
1450 | ||
5a1d7843 JS |
1451 | /* |
1452 | * replay one inode back reference item found in the log tree. | |
1453 | * eb, slot and key refer to the buffer and key found in the log tree. | |
1454 | * root is the destination we are replaying into, and path is for temp | |
1455 | * use by this function. (it should be released on return). | |
1456 | */ | |
1457 | static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |
1458 | struct btrfs_root *root, | |
1459 | struct btrfs_root *log, | |
1460 | struct btrfs_path *path, | |
1461 | struct extent_buffer *eb, int slot, | |
1462 | struct btrfs_key *key) | |
1463 | { | |
03b2f08b GB |
1464 | struct inode *dir = NULL; |
1465 | struct inode *inode = NULL; | |
5a1d7843 JS |
1466 | unsigned long ref_ptr; |
1467 | unsigned long ref_end; | |
03b2f08b | 1468 | char *name = NULL; |
5a1d7843 JS |
1469 | int namelen; |
1470 | int ret; | |
1471 | int search_done = 0; | |
f186373f MF |
1472 | int log_ref_ver = 0; |
1473 | u64 parent_objectid; | |
1474 | u64 inode_objectid; | |
f46dbe3d | 1475 | u64 ref_index = 0; |
f186373f MF |
1476 | int ref_struct_size; |
1477 | ||
1478 | ref_ptr = btrfs_item_ptr_offset(eb, slot); | |
1479 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); | |
1480 | ||
1481 | if (key->type == BTRFS_INODE_EXTREF_KEY) { | |
1482 | struct btrfs_inode_extref *r; | |
1483 | ||
1484 | ref_struct_size = sizeof(struct btrfs_inode_extref); | |
1485 | log_ref_ver = 1; | |
1486 | r = (struct btrfs_inode_extref *)ref_ptr; | |
1487 | parent_objectid = btrfs_inode_extref_parent(eb, r); | |
1488 | } else { | |
1489 | ref_struct_size = sizeof(struct btrfs_inode_ref); | |
1490 | parent_objectid = key->offset; | |
1491 | } | |
1492 | inode_objectid = key->objectid; | |
e02119d5 | 1493 | |
5a1d7843 JS |
1494 | /* |
1495 | * it is possible that we didn't log all the parent directories | |
1496 | * for a given inode. If we don't find the dir, just don't | |
1497 | * copy the back ref in. The link count fixup code will take | |
1498 | * care of the rest | |
1499 | */ | |
f186373f | 1500 | dir = read_one_inode(root, parent_objectid); |
03b2f08b GB |
1501 | if (!dir) { |
1502 | ret = -ENOENT; | |
1503 | goto out; | |
1504 | } | |
5a1d7843 | 1505 | |
f186373f | 1506 | inode = read_one_inode(root, inode_objectid); |
5a1d7843 | 1507 | if (!inode) { |
03b2f08b GB |
1508 | ret = -EIO; |
1509 | goto out; | |
5a1d7843 JS |
1510 | } |
1511 | ||
5a1d7843 | 1512 | while (ref_ptr < ref_end) { |
f186373f | 1513 | if (log_ref_ver) { |
bae15d95 QW |
1514 | ret = extref_get_fields(eb, ref_ptr, &namelen, &name, |
1515 | &ref_index, &parent_objectid); | |
f186373f MF |
1516 | /* |
1517 | * parent object can change from one array | |
1518 | * item to another. | |
1519 | */ | |
1520 | if (!dir) | |
1521 | dir = read_one_inode(root, parent_objectid); | |
03b2f08b GB |
1522 | if (!dir) { |
1523 | ret = -ENOENT; | |
1524 | goto out; | |
1525 | } | |
f186373f | 1526 | } else { |
bae15d95 QW |
1527 | ret = ref_get_fields(eb, ref_ptr, &namelen, &name, |
1528 | &ref_index); | |
f186373f MF |
1529 | } |
1530 | if (ret) | |
03b2f08b | 1531 | goto out; |
5a1d7843 | 1532 | |
77a5b9e3 FM |
1533 | ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), |
1534 | btrfs_ino(BTRFS_I(inode)), ref_index, | |
1535 | name, namelen); | |
1536 | if (ret < 0) { | |
1537 | goto out; | |
1538 | } else if (ret == 0) { | |
5a1d7843 JS |
1539 | /* |
1540 | * look for a conflicting back reference in the | |
1541 | * metadata. if we find one we have to unlink that name | |
1542 | * of the file before we add our new link. Later on, we | |
1543 | * overwrite any existing back reference, and we don't | |
1544 | * want to create dangling pointers in the directory. | |
1545 | */ | |
1546 | ||
1547 | if (!search_done) { | |
1548 | ret = __add_inode_ref(trans, root, path, log, | |
94c91a1f | 1549 | BTRFS_I(dir), |
d75eefdf | 1550 | BTRFS_I(inode), |
f186373f MF |
1551 | inode_objectid, |
1552 | parent_objectid, | |
1553 | ref_index, name, namelen, | |
5a1d7843 | 1554 | &search_done); |
03b2f08b GB |
1555 | if (ret) { |
1556 | if (ret == 1) | |
1557 | ret = 0; | |
3650860b JB |
1558 | goto out; |
1559 | } | |
5a1d7843 JS |
1560 | } |
1561 | ||
0d836392 FM |
1562 | /* |
1563 | * If a reference item already exists for this inode | |
1564 | * with the same parent and name, but different index, | |
1565 | * drop it and the corresponding directory index entries | |
1566 | * from the parent before adding the new reference item | |
1567 | * and dir index entries, otherwise we would fail with | |
1568 | * -EEXIST returned from btrfs_add_link() below. | |
1569 | */ | |
1570 | ret = btrfs_inode_ref_exists(inode, dir, key->type, | |
1571 | name, namelen); | |
1572 | if (ret > 0) { | |
1573 | ret = btrfs_unlink_inode(trans, root, | |
1574 | BTRFS_I(dir), | |
1575 | BTRFS_I(inode), | |
1576 | name, namelen); | |
1577 | /* | |
1578 | * If we dropped the link count to 0, bump it so | |
1579 | * that later the iput() on the inode will not | |
1580 | * free it. We will fixup the link count later. | |
1581 | */ | |
1582 | if (!ret && inode->i_nlink == 0) | |
1583 | inc_nlink(inode); | |
1584 | } | |
1585 | if (ret < 0) | |
1586 | goto out; | |
1587 | ||
5a1d7843 | 1588 | /* insert our name */ |
6b5fc433 FM |
1589 | ret = add_link(trans, root, dir, inode, name, namelen, |
1590 | ref_index); | |
3650860b JB |
1591 | if (ret) |
1592 | goto out; | |
5a1d7843 | 1593 | |
f96d4474 JB |
1594 | ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
1595 | if (ret) | |
1596 | goto out; | |
5a1d7843 | 1597 | } |
77a5b9e3 | 1598 | /* Else, ret == 1, we already have a perfect match, we're done. */ |
5a1d7843 | 1599 | |
f186373f | 1600 | ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; |
5a1d7843 | 1601 | kfree(name); |
03b2f08b | 1602 | name = NULL; |
f186373f MF |
1603 | if (log_ref_ver) { |
1604 | iput(dir); | |
1605 | dir = NULL; | |
1606 | } | |
5a1d7843 | 1607 | } |
e02119d5 | 1608 | |
1f250e92 FM |
1609 | /* |
1610 | * Before we overwrite the inode reference item in the subvolume tree | |
1611 | * with the item from the log tree, we must unlink all names from the | |
1612 | * parent directory that are in the subvolume's tree inode reference | |
1613 | * item, otherwise we end up with an inconsistent subvolume tree where | |
1614 | * dir index entries exist for a name but there is no inode reference | |
1615 | * item with the same name. | |
1616 | */ | |
1617 | ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, | |
1618 | key); | |
1619 | if (ret) | |
1620 | goto out; | |
1621 | ||
e02119d5 CM |
1622 | /* finally write the back reference in the inode */ |
1623 | ret = overwrite_item(trans, root, path, eb, slot, key); | |
5a1d7843 | 1624 | out: |
b3b4aa74 | 1625 | btrfs_release_path(path); |
03b2f08b | 1626 | kfree(name); |
e02119d5 CM |
1627 | iput(dir); |
1628 | iput(inode); | |
3650860b | 1629 | return ret; |
e02119d5 CM |
1630 | } |
1631 | ||
f186373f | 1632 | static int count_inode_extrefs(struct btrfs_root *root, |
36283658 | 1633 | struct btrfs_inode *inode, struct btrfs_path *path) |
f186373f MF |
1634 | { |
1635 | int ret = 0; | |
1636 | int name_len; | |
1637 | unsigned int nlink = 0; | |
1638 | u32 item_size; | |
1639 | u32 cur_offset = 0; | |
36283658 | 1640 | u64 inode_objectid = btrfs_ino(inode); |
f186373f MF |
1641 | u64 offset = 0; |
1642 | unsigned long ptr; | |
1643 | struct btrfs_inode_extref *extref; | |
1644 | struct extent_buffer *leaf; | |
1645 | ||
1646 | while (1) { | |
1647 | ret = btrfs_find_one_extref(root, inode_objectid, offset, path, | |
1648 | &extref, &offset); | |
1649 | if (ret) | |
1650 | break; | |
c71bf099 | 1651 | |
f186373f MF |
1652 | leaf = path->nodes[0]; |
1653 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
1654 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
2c2c452b | 1655 | cur_offset = 0; |
f186373f MF |
1656 | |
1657 | while (cur_offset < item_size) { | |
1658 | extref = (struct btrfs_inode_extref *) (ptr + cur_offset); | |
1659 | name_len = btrfs_inode_extref_name_len(leaf, extref); | |
1660 | ||
1661 | nlink++; | |
1662 | ||
1663 | cur_offset += name_len + sizeof(*extref); | |
1664 | } | |
1665 | ||
1666 | offset++; | |
1667 | btrfs_release_path(path); | |
1668 | } | |
1669 | btrfs_release_path(path); | |
1670 | ||
2c2c452b | 1671 | if (ret < 0 && ret != -ENOENT) |
f186373f MF |
1672 | return ret; |
1673 | return nlink; | |
1674 | } | |
1675 | ||
1676 | static int count_inode_refs(struct btrfs_root *root, | |
f329e319 | 1677 | struct btrfs_inode *inode, struct btrfs_path *path) |
e02119d5 | 1678 | { |
e02119d5 CM |
1679 | int ret; |
1680 | struct btrfs_key key; | |
f186373f | 1681 | unsigned int nlink = 0; |
e02119d5 CM |
1682 | unsigned long ptr; |
1683 | unsigned long ptr_end; | |
1684 | int name_len; | |
f329e319 | 1685 | u64 ino = btrfs_ino(inode); |
e02119d5 | 1686 | |
33345d01 | 1687 | key.objectid = ino; |
e02119d5 CM |
1688 | key.type = BTRFS_INODE_REF_KEY; |
1689 | key.offset = (u64)-1; | |
1690 | ||
d397712b | 1691 | while (1) { |
e02119d5 CM |
1692 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
1693 | if (ret < 0) | |
1694 | break; | |
1695 | if (ret > 0) { | |
1696 | if (path->slots[0] == 0) | |
1697 | break; | |
1698 | path->slots[0]--; | |
1699 | } | |
e93ae26f | 1700 | process_slot: |
e02119d5 CM |
1701 | btrfs_item_key_to_cpu(path->nodes[0], &key, |
1702 | path->slots[0]); | |
33345d01 | 1703 | if (key.objectid != ino || |
e02119d5 CM |
1704 | key.type != BTRFS_INODE_REF_KEY) |
1705 | break; | |
1706 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); | |
1707 | ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], | |
1708 | path->slots[0]); | |
d397712b | 1709 | while (ptr < ptr_end) { |
e02119d5 CM |
1710 | struct btrfs_inode_ref *ref; |
1711 | ||
1712 | ref = (struct btrfs_inode_ref *)ptr; | |
1713 | name_len = btrfs_inode_ref_name_len(path->nodes[0], | |
1714 | ref); | |
1715 | ptr = (unsigned long)(ref + 1) + name_len; | |
1716 | nlink++; | |
1717 | } | |
1718 | ||
1719 | if (key.offset == 0) | |
1720 | break; | |
e93ae26f FDBM |
1721 | if (path->slots[0] > 0) { |
1722 | path->slots[0]--; | |
1723 | goto process_slot; | |
1724 | } | |
e02119d5 | 1725 | key.offset--; |
b3b4aa74 | 1726 | btrfs_release_path(path); |
e02119d5 | 1727 | } |
b3b4aa74 | 1728 | btrfs_release_path(path); |
f186373f MF |
1729 | |
1730 | return nlink; | |
1731 | } | |
1732 | ||
1733 | /* | |
1734 | * There are a few corners where the link count of the file can't | |
1735 | * be properly maintained during replay. So, instead of adding | |
1736 | * lots of complexity to the log code, we just scan the backrefs | |
1737 | * for any file that has been through replay. | |
1738 | * | |
1739 | * The scan will update the link count on the inode to reflect the | |
1740 | * number of back refs found. If it goes down to zero, the iput | |
1741 | * will free the inode. | |
1742 | */ | |
1743 | static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |
1744 | struct btrfs_root *root, | |
1745 | struct inode *inode) | |
1746 | { | |
1747 | struct btrfs_path *path; | |
1748 | int ret; | |
1749 | u64 nlink = 0; | |
4a0cc7ca | 1750 | u64 ino = btrfs_ino(BTRFS_I(inode)); |
f186373f MF |
1751 | |
1752 | path = btrfs_alloc_path(); | |
1753 | if (!path) | |
1754 | return -ENOMEM; | |
1755 | ||
f329e319 | 1756 | ret = count_inode_refs(root, BTRFS_I(inode), path); |
f186373f MF |
1757 | if (ret < 0) |
1758 | goto out; | |
1759 | ||
1760 | nlink = ret; | |
1761 | ||
36283658 | 1762 | ret = count_inode_extrefs(root, BTRFS_I(inode), path); |
f186373f MF |
1763 | if (ret < 0) |
1764 | goto out; | |
1765 | ||
1766 | nlink += ret; | |
1767 | ||
1768 | ret = 0; | |
1769 | ||
e02119d5 | 1770 | if (nlink != inode->i_nlink) { |
bfe86848 | 1771 | set_nlink(inode, nlink); |
f96d4474 JB |
1772 | ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
1773 | if (ret) | |
1774 | goto out; | |
e02119d5 | 1775 | } |
8d5bf1cb | 1776 | BTRFS_I(inode)->index_cnt = (u64)-1; |
e02119d5 | 1777 | |
c71bf099 YZ |
1778 | if (inode->i_nlink == 0) { |
1779 | if (S_ISDIR(inode->i_mode)) { | |
1780 | ret = replay_dir_deletes(trans, root, NULL, path, | |
33345d01 | 1781 | ino, 1); |
3650860b JB |
1782 | if (ret) |
1783 | goto out; | |
c71bf099 | 1784 | } |
ecdcf3c2 NB |
1785 | ret = btrfs_insert_orphan_item(trans, root, ino); |
1786 | if (ret == -EEXIST) | |
1787 | ret = 0; | |
12fcfd22 | 1788 | } |
12fcfd22 | 1789 | |
f186373f MF |
1790 | out: |
1791 | btrfs_free_path(path); | |
1792 | return ret; | |
e02119d5 CM |
1793 | } |
1794 | ||
1795 | static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, | |
1796 | struct btrfs_root *root, | |
1797 | struct btrfs_path *path) | |
1798 | { | |
1799 | int ret; | |
1800 | struct btrfs_key key; | |
1801 | struct inode *inode; | |
1802 | ||
1803 | key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; | |
1804 | key.type = BTRFS_ORPHAN_ITEM_KEY; | |
1805 | key.offset = (u64)-1; | |
d397712b | 1806 | while (1) { |
e02119d5 CM |
1807 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
1808 | if (ret < 0) | |
1809 | break; | |
1810 | ||
1811 | if (ret == 1) { | |
011b28ac | 1812 | ret = 0; |
e02119d5 CM |
1813 | if (path->slots[0] == 0) |
1814 | break; | |
1815 | path->slots[0]--; | |
1816 | } | |
1817 | ||
1818 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); | |
1819 | if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || | |
1820 | key.type != BTRFS_ORPHAN_ITEM_KEY) | |
1821 | break; | |
1822 | ||
1823 | ret = btrfs_del_item(trans, root, path); | |
65a246c5 | 1824 | if (ret) |
011b28ac | 1825 | break; |
e02119d5 | 1826 | |
b3b4aa74 | 1827 | btrfs_release_path(path); |
e02119d5 | 1828 | inode = read_one_inode(root, key.offset); |
011b28ac JB |
1829 | if (!inode) { |
1830 | ret = -EIO; | |
1831 | break; | |
1832 | } | |
e02119d5 CM |
1833 | |
1834 | ret = fixup_inode_link_count(trans, root, inode); | |
e02119d5 | 1835 | iput(inode); |
3650860b | 1836 | if (ret) |
011b28ac | 1837 | break; |
e02119d5 | 1838 | |
12fcfd22 CM |
1839 | /* |
1840 | * fixup on a directory may create new entries, | |
1841 | * make sure we always look for the highset possible | |
1842 | * offset | |
1843 | */ | |
1844 | key.offset = (u64)-1; | |
e02119d5 | 1845 | } |
b3b4aa74 | 1846 | btrfs_release_path(path); |
65a246c5 | 1847 | return ret; |
e02119d5 CM |
1848 | } |
1849 | ||
1850 | ||
1851 | /* | |
1852 | * record a given inode in the fixup dir so we can check its link | |
1853 | * count when replay is done. The link count is incremented here | |
1854 | * so the inode won't go away until we check it | |
1855 | */ | |
1856 | static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, | |
1857 | struct btrfs_root *root, | |
1858 | struct btrfs_path *path, | |
1859 | u64 objectid) | |
1860 | { | |
1861 | struct btrfs_key key; | |
1862 | int ret = 0; | |
1863 | struct inode *inode; | |
1864 | ||
1865 | inode = read_one_inode(root, objectid); | |
c00e9493 TI |
1866 | if (!inode) |
1867 | return -EIO; | |
e02119d5 CM |
1868 | |
1869 | key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; | |
962a298f | 1870 | key.type = BTRFS_ORPHAN_ITEM_KEY; |
e02119d5 CM |
1871 | key.offset = objectid; |
1872 | ||
1873 | ret = btrfs_insert_empty_item(trans, root, path, &key, 0); | |
1874 | ||
b3b4aa74 | 1875 | btrfs_release_path(path); |
e02119d5 | 1876 | if (ret == 0) { |
9bf7a489 JB |
1877 | if (!inode->i_nlink) |
1878 | set_nlink(inode, 1); | |
1879 | else | |
8b558c5f | 1880 | inc_nlink(inode); |
9a56fcd1 | 1881 | ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
e02119d5 CM |
1882 | } else if (ret == -EEXIST) { |
1883 | ret = 0; | |
e02119d5 CM |
1884 | } |
1885 | iput(inode); | |
1886 | ||
1887 | return ret; | |
1888 | } | |
1889 | ||
1890 | /* | |
1891 | * when replaying the log for a directory, we only insert names | |
1892 | * for inodes that actually exist. This means an fsync on a directory | |
1893 | * does not implicitly fsync all the new files in it | |
1894 | */ | |
1895 | static noinline int insert_one_name(struct btrfs_trans_handle *trans, | |
1896 | struct btrfs_root *root, | |
e02119d5 | 1897 | u64 dirid, u64 index, |
60d53eb3 | 1898 | char *name, int name_len, |
e02119d5 CM |
1899 | struct btrfs_key *location) |
1900 | { | |
1901 | struct inode *inode; | |
1902 | struct inode *dir; | |
1903 | int ret; | |
1904 | ||
1905 | inode = read_one_inode(root, location->objectid); | |
1906 | if (!inode) | |
1907 | return -ENOENT; | |
1908 | ||
1909 | dir = read_one_inode(root, dirid); | |
1910 | if (!dir) { | |
1911 | iput(inode); | |
1912 | return -EIO; | |
1913 | } | |
d555438b | 1914 | |
db0a669f NB |
1915 | ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, |
1916 | name_len, 1, index); | |
e02119d5 CM |
1917 | |
1918 | /* FIXME, put inode into FIXUP list */ | |
1919 | ||
1920 | iput(inode); | |
1921 | iput(dir); | |
1922 | return ret; | |
1923 | } | |
1924 | ||
1925 | /* | |
1926 | * take a single entry in a log directory item and replay it into | |
1927 | * the subvolume. | |
1928 | * | |
1929 | * if a conflicting item exists in the subdirectory already, | |
1930 | * the inode it points to is unlinked and put into the link count | |
1931 | * fix up tree. | |
1932 | * | |
1933 | * If a name from the log points to a file or directory that does | |
1934 | * not exist in the FS, it is skipped. fsyncs on directories | |
1935 | * do not force down inodes inside that directory, just changes to the | |
1936 | * names or unlinks in a directory. | |
bb53eda9 FM |
1937 | * |
1938 | * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a | |
1939 | * non-existing inode) and 1 if the name was replayed. | |
e02119d5 CM |
1940 | */ |
1941 | static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |
1942 | struct btrfs_root *root, | |
1943 | struct btrfs_path *path, | |
1944 | struct extent_buffer *eb, | |
1945 | struct btrfs_dir_item *di, | |
1946 | struct btrfs_key *key) | |
1947 | { | |
1948 | char *name; | |
1949 | int name_len; | |
1950 | struct btrfs_dir_item *dst_di; | |
1951 | struct btrfs_key found_key; | |
1952 | struct btrfs_key log_key; | |
1953 | struct inode *dir; | |
e02119d5 | 1954 | u8 log_type; |
4bef0848 | 1955 | int exists; |
3650860b | 1956 | int ret = 0; |
d555438b | 1957 | bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); |
bb53eda9 | 1958 | bool name_added = false; |
e02119d5 CM |
1959 | |
1960 | dir = read_one_inode(root, key->objectid); | |
c00e9493 TI |
1961 | if (!dir) |
1962 | return -EIO; | |
e02119d5 CM |
1963 | |
1964 | name_len = btrfs_dir_name_len(eb, di); | |
1965 | name = kmalloc(name_len, GFP_NOFS); | |
2bac325e FDBM |
1966 | if (!name) { |
1967 | ret = -ENOMEM; | |
1968 | goto out; | |
1969 | } | |
2a29edc6 | 1970 | |
e02119d5 CM |
1971 | log_type = btrfs_dir_type(eb, di); |
1972 | read_extent_buffer(eb, name, (unsigned long)(di + 1), | |
1973 | name_len); | |
1974 | ||
1975 | btrfs_dir_item_key_to_cpu(eb, di, &log_key); | |
4bef0848 CM |
1976 | exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); |
1977 | if (exists == 0) | |
1978 | exists = 1; | |
1979 | else | |
1980 | exists = 0; | |
b3b4aa74 | 1981 | btrfs_release_path(path); |
4bef0848 | 1982 | |
e02119d5 CM |
1983 | if (key->type == BTRFS_DIR_ITEM_KEY) { |
1984 | dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, | |
1985 | name, name_len, 1); | |
d397712b | 1986 | } else if (key->type == BTRFS_DIR_INDEX_KEY) { |
e02119d5 CM |
1987 | dst_di = btrfs_lookup_dir_index_item(trans, root, path, |
1988 | key->objectid, | |
1989 | key->offset, name, | |
1990 | name_len, 1); | |
1991 | } else { | |
3650860b JB |
1992 | /* Corruption */ |
1993 | ret = -EINVAL; | |
1994 | goto out; | |
e02119d5 | 1995 | } |
e15ac641 FM |
1996 | |
1997 | if (dst_di == ERR_PTR(-ENOENT)) | |
1998 | dst_di = NULL; | |
1999 | ||
2000 | if (IS_ERR(dst_di)) { | |
2001 | ret = PTR_ERR(dst_di); | |
2002 | goto out; | |
2003 | } else if (!dst_di) { | |
e02119d5 CM |
2004 | /* we need a sequence number to insert, so we only |
2005 | * do inserts for the BTRFS_DIR_INDEX_KEY types | |
2006 | */ | |
2007 | if (key->type != BTRFS_DIR_INDEX_KEY) | |
2008 | goto out; | |
2009 | goto insert; | |
2010 | } | |
2011 | ||
2012 | btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); | |
2013 | /* the existing item matches the logged item */ | |
2014 | if (found_key.objectid == log_key.objectid && | |
2015 | found_key.type == log_key.type && | |
2016 | found_key.offset == log_key.offset && | |
2017 | btrfs_dir_type(path->nodes[0], dst_di) == log_type) { | |
a2cc11db | 2018 | update_size = false; |
e02119d5 CM |
2019 | goto out; |
2020 | } | |
2021 | ||
2022 | /* | |
2023 | * don't drop the conflicting directory entry if the inode | |
2024 | * for the new entry doesn't exist | |
2025 | */ | |
4bef0848 | 2026 | if (!exists) |
e02119d5 CM |
2027 | goto out; |
2028 | ||
207e7d92 | 2029 | ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); |
3650860b JB |
2030 | if (ret) |
2031 | goto out; | |
e02119d5 CM |
2032 | |
2033 | if (key->type == BTRFS_DIR_INDEX_KEY) | |
2034 | goto insert; | |
2035 | out: | |
b3b4aa74 | 2036 | btrfs_release_path(path); |
d555438b | 2037 | if (!ret && update_size) { |
6ef06d27 | 2038 | btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); |
9a56fcd1 | 2039 | ret = btrfs_update_inode(trans, root, BTRFS_I(dir)); |
d555438b | 2040 | } |
e02119d5 CM |
2041 | kfree(name); |
2042 | iput(dir); | |
bb53eda9 FM |
2043 | if (!ret && name_added) |
2044 | ret = 1; | |
3650860b | 2045 | return ret; |
e02119d5 CM |
2046 | |
2047 | insert: | |
725af92a NB |
2048 | /* |
2049 | * Check if the inode reference exists in the log for the given name, | |
2050 | * inode and parent inode | |
2051 | */ | |
2052 | found_key.objectid = log_key.objectid; | |
2053 | found_key.type = BTRFS_INODE_REF_KEY; | |
2054 | found_key.offset = key->objectid; | |
2055 | ret = backref_in_log(root->log_root, &found_key, 0, name, name_len); | |
2056 | if (ret < 0) { | |
2057 | goto out; | |
2058 | } else if (ret) { | |
2059 | /* The dentry will be added later. */ | |
2060 | ret = 0; | |
2061 | update_size = false; | |
2062 | goto out; | |
2063 | } | |
2064 | ||
2065 | found_key.objectid = log_key.objectid; | |
2066 | found_key.type = BTRFS_INODE_EXTREF_KEY; | |
2067 | found_key.offset = key->objectid; | |
2068 | ret = backref_in_log(root->log_root, &found_key, key->objectid, name, | |
2069 | name_len); | |
2070 | if (ret < 0) { | |
2071 | goto out; | |
2072 | } else if (ret) { | |
df8d116f FM |
2073 | /* The dentry will be added later. */ |
2074 | ret = 0; | |
2075 | update_size = false; | |
2076 | goto out; | |
2077 | } | |
b3b4aa74 | 2078 | btrfs_release_path(path); |
60d53eb3 Z |
2079 | ret = insert_one_name(trans, root, key->objectid, key->offset, |
2080 | name, name_len, &log_key); | |
df8d116f | 2081 | if (ret && ret != -ENOENT && ret != -EEXIST) |
3650860b | 2082 | goto out; |
bb53eda9 FM |
2083 | if (!ret) |
2084 | name_added = true; | |
d555438b | 2085 | update_size = false; |
3650860b | 2086 | ret = 0; |
e02119d5 CM |
2087 | goto out; |
2088 | } | |
2089 | ||
2090 | /* | |
2091 | * find all the names in a directory item and reconcile them into | |
2092 | * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than | |
2093 | * one name in a directory item, but the same code gets used for | |
2094 | * both directory index types | |
2095 | */ | |
2096 | static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, | |
2097 | struct btrfs_root *root, | |
2098 | struct btrfs_path *path, | |
2099 | struct extent_buffer *eb, int slot, | |
2100 | struct btrfs_key *key) | |
2101 | { | |
bb53eda9 | 2102 | int ret = 0; |
e02119d5 CM |
2103 | u32 item_size = btrfs_item_size_nr(eb, slot); |
2104 | struct btrfs_dir_item *di; | |
2105 | int name_len; | |
2106 | unsigned long ptr; | |
2107 | unsigned long ptr_end; | |
bb53eda9 | 2108 | struct btrfs_path *fixup_path = NULL; |
e02119d5 CM |
2109 | |
2110 | ptr = btrfs_item_ptr_offset(eb, slot); | |
2111 | ptr_end = ptr + item_size; | |
d397712b | 2112 | while (ptr < ptr_end) { |
e02119d5 CM |
2113 | di = (struct btrfs_dir_item *)ptr; |
2114 | name_len = btrfs_dir_name_len(eb, di); | |
2115 | ret = replay_one_name(trans, root, path, eb, di, key); | |
bb53eda9 FM |
2116 | if (ret < 0) |
2117 | break; | |
e02119d5 CM |
2118 | ptr = (unsigned long)(di + 1); |
2119 | ptr += name_len; | |
bb53eda9 FM |
2120 | |
2121 | /* | |
2122 | * If this entry refers to a non-directory (directories can not | |
2123 | * have a link count > 1) and it was added in the transaction | |
2124 | * that was not committed, make sure we fixup the link count of | |
2125 | * the inode it the entry points to. Otherwise something like | |
2126 | * the following would result in a directory pointing to an | |
2127 | * inode with a wrong link that does not account for this dir | |
2128 | * entry: | |
2129 | * | |
2130 | * mkdir testdir | |
2131 | * touch testdir/foo | |
2132 | * touch testdir/bar | |
2133 | * sync | |
2134 | * | |
2135 | * ln testdir/bar testdir/bar_link | |
2136 | * ln testdir/foo testdir/foo_link | |
2137 | * xfs_io -c "fsync" testdir/bar | |
2138 | * | |
2139 | * <power failure> | |
2140 | * | |
2141 | * mount fs, log replay happens | |
2142 | * | |
2143 | * File foo would remain with a link count of 1 when it has two | |
2144 | * entries pointing to it in the directory testdir. This would | |
2145 | * make it impossible to ever delete the parent directory has | |
2146 | * it would result in stale dentries that can never be deleted. | |
2147 | */ | |
2148 | if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { | |
2149 | struct btrfs_key di_key; | |
2150 | ||
2151 | if (!fixup_path) { | |
2152 | fixup_path = btrfs_alloc_path(); | |
2153 | if (!fixup_path) { | |
2154 | ret = -ENOMEM; | |
2155 | break; | |
2156 | } | |
2157 | } | |
2158 | ||
2159 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | |
2160 | ret = link_to_fixup_dir(trans, root, fixup_path, | |
2161 | di_key.objectid); | |
2162 | if (ret) | |
2163 | break; | |
2164 | } | |
2165 | ret = 0; | |
e02119d5 | 2166 | } |
bb53eda9 FM |
2167 | btrfs_free_path(fixup_path); |
2168 | return ret; | |
e02119d5 CM |
2169 | } |
2170 | ||
2171 | /* | |
2172 | * directory replay has two parts. There are the standard directory | |
2173 | * items in the log copied from the subvolume, and range items | |
2174 | * created in the log while the subvolume was logged. | |
2175 | * | |
2176 | * The range items tell us which parts of the key space the log | |
2177 | * is authoritative for. During replay, if a key in the subvolume | |
2178 | * directory is in a logged range item, but not actually in the log | |
2179 | * that means it was deleted from the directory before the fsync | |
2180 | * and should be removed. | |
2181 | */ | |
2182 | static noinline int find_dir_range(struct btrfs_root *root, | |
2183 | struct btrfs_path *path, | |
2184 | u64 dirid, int key_type, | |
2185 | u64 *start_ret, u64 *end_ret) | |
2186 | { | |
2187 | struct btrfs_key key; | |
2188 | u64 found_end; | |
2189 | struct btrfs_dir_log_item *item; | |
2190 | int ret; | |
2191 | int nritems; | |
2192 | ||
2193 | if (*start_ret == (u64)-1) | |
2194 | return 1; | |
2195 | ||
2196 | key.objectid = dirid; | |
2197 | key.type = key_type; | |
2198 | key.offset = *start_ret; | |
2199 | ||
2200 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2201 | if (ret < 0) | |
2202 | goto out; | |
2203 | if (ret > 0) { | |
2204 | if (path->slots[0] == 0) | |
2205 | goto out; | |
2206 | path->slots[0]--; | |
2207 | } | |
2208 | if (ret != 0) | |
2209 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); | |
2210 | ||
2211 | if (key.type != key_type || key.objectid != dirid) { | |
2212 | ret = 1; | |
2213 | goto next; | |
2214 | } | |
2215 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
2216 | struct btrfs_dir_log_item); | |
2217 | found_end = btrfs_dir_log_end(path->nodes[0], item); | |
2218 | ||
2219 | if (*start_ret >= key.offset && *start_ret <= found_end) { | |
2220 | ret = 0; | |
2221 | *start_ret = key.offset; | |
2222 | *end_ret = found_end; | |
2223 | goto out; | |
2224 | } | |
2225 | ret = 1; | |
2226 | next: | |
2227 | /* check the next slot in the tree to see if it is a valid item */ | |
2228 | nritems = btrfs_header_nritems(path->nodes[0]); | |
2a7bf53f | 2229 | path->slots[0]++; |
e02119d5 CM |
2230 | if (path->slots[0] >= nritems) { |
2231 | ret = btrfs_next_leaf(root, path); | |
2232 | if (ret) | |
2233 | goto out; | |
e02119d5 CM |
2234 | } |
2235 | ||
2236 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); | |
2237 | ||
2238 | if (key.type != key_type || key.objectid != dirid) { | |
2239 | ret = 1; | |
2240 | goto out; | |
2241 | } | |
2242 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
2243 | struct btrfs_dir_log_item); | |
2244 | found_end = btrfs_dir_log_end(path->nodes[0], item); | |
2245 | *start_ret = key.offset; | |
2246 | *end_ret = found_end; | |
2247 | ret = 0; | |
2248 | out: | |
b3b4aa74 | 2249 | btrfs_release_path(path); |
e02119d5 CM |
2250 | return ret; |
2251 | } | |
2252 | ||
2253 | /* | |
2254 | * this looks for a given directory item in the log. If the directory | |
2255 | * item is not in the log, the item is removed and the inode it points | |
2256 | * to is unlinked | |
2257 | */ | |
2258 | static noinline int check_item_in_log(struct btrfs_trans_handle *trans, | |
2259 | struct btrfs_root *root, | |
2260 | struct btrfs_root *log, | |
2261 | struct btrfs_path *path, | |
2262 | struct btrfs_path *log_path, | |
2263 | struct inode *dir, | |
2264 | struct btrfs_key *dir_key) | |
2265 | { | |
2266 | int ret; | |
2267 | struct extent_buffer *eb; | |
2268 | int slot; | |
2269 | u32 item_size; | |
2270 | struct btrfs_dir_item *di; | |
2271 | struct btrfs_dir_item *log_di; | |
2272 | int name_len; | |
2273 | unsigned long ptr; | |
2274 | unsigned long ptr_end; | |
2275 | char *name; | |
2276 | struct inode *inode; | |
2277 | struct btrfs_key location; | |
2278 | ||
2279 | again: | |
2280 | eb = path->nodes[0]; | |
2281 | slot = path->slots[0]; | |
2282 | item_size = btrfs_item_size_nr(eb, slot); | |
2283 | ptr = btrfs_item_ptr_offset(eb, slot); | |
2284 | ptr_end = ptr + item_size; | |
d397712b | 2285 | while (ptr < ptr_end) { |
e02119d5 CM |
2286 | di = (struct btrfs_dir_item *)ptr; |
2287 | name_len = btrfs_dir_name_len(eb, di); | |
2288 | name = kmalloc(name_len, GFP_NOFS); | |
2289 | if (!name) { | |
2290 | ret = -ENOMEM; | |
2291 | goto out; | |
2292 | } | |
2293 | read_extent_buffer(eb, name, (unsigned long)(di + 1), | |
2294 | name_len); | |
2295 | log_di = NULL; | |
12fcfd22 | 2296 | if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { |
e02119d5 CM |
2297 | log_di = btrfs_lookup_dir_item(trans, log, log_path, |
2298 | dir_key->objectid, | |
2299 | name, name_len, 0); | |
12fcfd22 | 2300 | } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { |
e02119d5 CM |
2301 | log_di = btrfs_lookup_dir_index_item(trans, log, |
2302 | log_path, | |
2303 | dir_key->objectid, | |
2304 | dir_key->offset, | |
2305 | name, name_len, 0); | |
2306 | } | |
8d9e220c | 2307 | if (!log_di || log_di == ERR_PTR(-ENOENT)) { |
e02119d5 | 2308 | btrfs_dir_item_key_to_cpu(eb, di, &location); |
b3b4aa74 DS |
2309 | btrfs_release_path(path); |
2310 | btrfs_release_path(log_path); | |
e02119d5 | 2311 | inode = read_one_inode(root, location.objectid); |
c00e9493 TI |
2312 | if (!inode) { |
2313 | kfree(name); | |
2314 | return -EIO; | |
2315 | } | |
e02119d5 CM |
2316 | |
2317 | ret = link_to_fixup_dir(trans, root, | |
2318 | path, location.objectid); | |
3650860b JB |
2319 | if (ret) { |
2320 | kfree(name); | |
2321 | iput(inode); | |
2322 | goto out; | |
2323 | } | |
2324 | ||
8b558c5f | 2325 | inc_nlink(inode); |
4ec5934e NB |
2326 | ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), |
2327 | BTRFS_I(inode), name, name_len); | |
3650860b | 2328 | if (!ret) |
e5c304e6 | 2329 | ret = btrfs_run_delayed_items(trans); |
e02119d5 CM |
2330 | kfree(name); |
2331 | iput(inode); | |
3650860b JB |
2332 | if (ret) |
2333 | goto out; | |
e02119d5 CM |
2334 | |
2335 | /* there might still be more names under this key | |
2336 | * check and repeat if required | |
2337 | */ | |
2338 | ret = btrfs_search_slot(NULL, root, dir_key, path, | |
2339 | 0, 0); | |
2340 | if (ret == 0) | |
2341 | goto again; | |
2342 | ret = 0; | |
2343 | goto out; | |
269d040f FDBM |
2344 | } else if (IS_ERR(log_di)) { |
2345 | kfree(name); | |
2346 | return PTR_ERR(log_di); | |
e02119d5 | 2347 | } |
b3b4aa74 | 2348 | btrfs_release_path(log_path); |
e02119d5 CM |
2349 | kfree(name); |
2350 | ||
2351 | ptr = (unsigned long)(di + 1); | |
2352 | ptr += name_len; | |
2353 | } | |
2354 | ret = 0; | |
2355 | out: | |
b3b4aa74 DS |
2356 | btrfs_release_path(path); |
2357 | btrfs_release_path(log_path); | |
e02119d5 CM |
2358 | return ret; |
2359 | } | |
2360 | ||
4f764e51 FM |
2361 | static int replay_xattr_deletes(struct btrfs_trans_handle *trans, |
2362 | struct btrfs_root *root, | |
2363 | struct btrfs_root *log, | |
2364 | struct btrfs_path *path, | |
2365 | const u64 ino) | |
2366 | { | |
2367 | struct btrfs_key search_key; | |
2368 | struct btrfs_path *log_path; | |
2369 | int i; | |
2370 | int nritems; | |
2371 | int ret; | |
2372 | ||
2373 | log_path = btrfs_alloc_path(); | |
2374 | if (!log_path) | |
2375 | return -ENOMEM; | |
2376 | ||
2377 | search_key.objectid = ino; | |
2378 | search_key.type = BTRFS_XATTR_ITEM_KEY; | |
2379 | search_key.offset = 0; | |
2380 | again: | |
2381 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); | |
2382 | if (ret < 0) | |
2383 | goto out; | |
2384 | process_leaf: | |
2385 | nritems = btrfs_header_nritems(path->nodes[0]); | |
2386 | for (i = path->slots[0]; i < nritems; i++) { | |
2387 | struct btrfs_key key; | |
2388 | struct btrfs_dir_item *di; | |
2389 | struct btrfs_dir_item *log_di; | |
2390 | u32 total_size; | |
2391 | u32 cur; | |
2392 | ||
2393 | btrfs_item_key_to_cpu(path->nodes[0], &key, i); | |
2394 | if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { | |
2395 | ret = 0; | |
2396 | goto out; | |
2397 | } | |
2398 | ||
2399 | di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); | |
2400 | total_size = btrfs_item_size_nr(path->nodes[0], i); | |
2401 | cur = 0; | |
2402 | while (cur < total_size) { | |
2403 | u16 name_len = btrfs_dir_name_len(path->nodes[0], di); | |
2404 | u16 data_len = btrfs_dir_data_len(path->nodes[0], di); | |
2405 | u32 this_len = sizeof(*di) + name_len + data_len; | |
2406 | char *name; | |
2407 | ||
2408 | name = kmalloc(name_len, GFP_NOFS); | |
2409 | if (!name) { | |
2410 | ret = -ENOMEM; | |
2411 | goto out; | |
2412 | } | |
2413 | read_extent_buffer(path->nodes[0], name, | |
2414 | (unsigned long)(di + 1), name_len); | |
2415 | ||
2416 | log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, | |
2417 | name, name_len, 0); | |
2418 | btrfs_release_path(log_path); | |
2419 | if (!log_di) { | |
2420 | /* Doesn't exist in log tree, so delete it. */ | |
2421 | btrfs_release_path(path); | |
2422 | di = btrfs_lookup_xattr(trans, root, path, ino, | |
2423 | name, name_len, -1); | |
2424 | kfree(name); | |
2425 | if (IS_ERR(di)) { | |
2426 | ret = PTR_ERR(di); | |
2427 | goto out; | |
2428 | } | |
2429 | ASSERT(di); | |
2430 | ret = btrfs_delete_one_dir_name(trans, root, | |
2431 | path, di); | |
2432 | if (ret) | |
2433 | goto out; | |
2434 | btrfs_release_path(path); | |
2435 | search_key = key; | |
2436 | goto again; | |
2437 | } | |
2438 | kfree(name); | |
2439 | if (IS_ERR(log_di)) { | |
2440 | ret = PTR_ERR(log_di); | |
2441 | goto out; | |
2442 | } | |
2443 | cur += this_len; | |
2444 | di = (struct btrfs_dir_item *)((char *)di + this_len); | |
2445 | } | |
2446 | } | |
2447 | ret = btrfs_next_leaf(root, path); | |
2448 | if (ret > 0) | |
2449 | ret = 0; | |
2450 | else if (ret == 0) | |
2451 | goto process_leaf; | |
2452 | out: | |
2453 | btrfs_free_path(log_path); | |
2454 | btrfs_release_path(path); | |
2455 | return ret; | |
2456 | } | |
2457 | ||
2458 | ||
e02119d5 CM |
2459 | /* |
2460 | * deletion replay happens before we copy any new directory items | |
2461 | * out of the log or out of backreferences from inodes. It | |
2462 | * scans the log to find ranges of keys that log is authoritative for, | |
2463 | * and then scans the directory to find items in those ranges that are | |
2464 | * not present in the log. | |
2465 | * | |
2466 | * Anything we don't find in the log is unlinked and removed from the | |
2467 | * directory. | |
2468 | */ | |
2469 | static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, | |
2470 | struct btrfs_root *root, | |
2471 | struct btrfs_root *log, | |
2472 | struct btrfs_path *path, | |
12fcfd22 | 2473 | u64 dirid, int del_all) |
e02119d5 CM |
2474 | { |
2475 | u64 range_start; | |
2476 | u64 range_end; | |
2477 | int key_type = BTRFS_DIR_LOG_ITEM_KEY; | |
2478 | int ret = 0; | |
2479 | struct btrfs_key dir_key; | |
2480 | struct btrfs_key found_key; | |
2481 | struct btrfs_path *log_path; | |
2482 | struct inode *dir; | |
2483 | ||
2484 | dir_key.objectid = dirid; | |
2485 | dir_key.type = BTRFS_DIR_ITEM_KEY; | |
2486 | log_path = btrfs_alloc_path(); | |
2487 | if (!log_path) | |
2488 | return -ENOMEM; | |
2489 | ||
2490 | dir = read_one_inode(root, dirid); | |
2491 | /* it isn't an error if the inode isn't there, that can happen | |
2492 | * because we replay the deletes before we copy in the inode item | |
2493 | * from the log | |
2494 | */ | |
2495 | if (!dir) { | |
2496 | btrfs_free_path(log_path); | |
2497 | return 0; | |
2498 | } | |
2499 | again: | |
2500 | range_start = 0; | |
2501 | range_end = 0; | |
d397712b | 2502 | while (1) { |
12fcfd22 CM |
2503 | if (del_all) |
2504 | range_end = (u64)-1; | |
2505 | else { | |
2506 | ret = find_dir_range(log, path, dirid, key_type, | |
2507 | &range_start, &range_end); | |
2508 | if (ret != 0) | |
2509 | break; | |
2510 | } | |
e02119d5 CM |
2511 | |
2512 | dir_key.offset = range_start; | |
d397712b | 2513 | while (1) { |
e02119d5 CM |
2514 | int nritems; |
2515 | ret = btrfs_search_slot(NULL, root, &dir_key, path, | |
2516 | 0, 0); | |
2517 | if (ret < 0) | |
2518 | goto out; | |
2519 | ||
2520 | nritems = btrfs_header_nritems(path->nodes[0]); | |
2521 | if (path->slots[0] >= nritems) { | |
2522 | ret = btrfs_next_leaf(root, path); | |
b98def7c | 2523 | if (ret == 1) |
e02119d5 | 2524 | break; |
b98def7c LB |
2525 | else if (ret < 0) |
2526 | goto out; | |
e02119d5 CM |
2527 | } |
2528 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | |
2529 | path->slots[0]); | |
2530 | if (found_key.objectid != dirid || | |
2531 | found_key.type != dir_key.type) | |
2532 | goto next_type; | |
2533 | ||
2534 | if (found_key.offset > range_end) | |
2535 | break; | |
2536 | ||
2537 | ret = check_item_in_log(trans, root, log, path, | |
12fcfd22 CM |
2538 | log_path, dir, |
2539 | &found_key); | |
3650860b JB |
2540 | if (ret) |
2541 | goto out; | |
e02119d5 CM |
2542 | if (found_key.offset == (u64)-1) |
2543 | break; | |
2544 | dir_key.offset = found_key.offset + 1; | |
2545 | } | |
b3b4aa74 | 2546 | btrfs_release_path(path); |
e02119d5 CM |
2547 | if (range_end == (u64)-1) |
2548 | break; | |
2549 | range_start = range_end + 1; | |
2550 | } | |
2551 | ||
2552 | next_type: | |
2553 | ret = 0; | |
2554 | if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { | |
2555 | key_type = BTRFS_DIR_LOG_INDEX_KEY; | |
2556 | dir_key.type = BTRFS_DIR_INDEX_KEY; | |
b3b4aa74 | 2557 | btrfs_release_path(path); |
e02119d5 CM |
2558 | goto again; |
2559 | } | |
2560 | out: | |
b3b4aa74 | 2561 | btrfs_release_path(path); |
e02119d5 CM |
2562 | btrfs_free_path(log_path); |
2563 | iput(dir); | |
2564 | return ret; | |
2565 | } | |
2566 | ||
2567 | /* | |
2568 | * the process_func used to replay items from the log tree. This | |
2569 | * gets called in two different stages. The first stage just looks | |
2570 | * for inodes and makes sure they are all copied into the subvolume. | |
2571 | * | |
2572 | * The second stage copies all the other item types from the log into | |
2573 | * the subvolume. The two stage approach is slower, but gets rid of | |
2574 | * lots of complexity around inodes referencing other inodes that exist | |
2575 | * only in the log (references come from either directory items or inode | |
2576 | * back refs). | |
2577 | */ | |
2578 | static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, | |
581c1760 | 2579 | struct walk_control *wc, u64 gen, int level) |
e02119d5 CM |
2580 | { |
2581 | int nritems; | |
2582 | struct btrfs_path *path; | |
2583 | struct btrfs_root *root = wc->replay_dest; | |
2584 | struct btrfs_key key; | |
e02119d5 CM |
2585 | int i; |
2586 | int ret; | |
2587 | ||
581c1760 | 2588 | ret = btrfs_read_buffer(eb, gen, level, NULL); |
018642a1 TI |
2589 | if (ret) |
2590 | return ret; | |
e02119d5 CM |
2591 | |
2592 | level = btrfs_header_level(eb); | |
2593 | ||
2594 | if (level != 0) | |
2595 | return 0; | |
2596 | ||
2597 | path = btrfs_alloc_path(); | |
1e5063d0 MF |
2598 | if (!path) |
2599 | return -ENOMEM; | |
e02119d5 CM |
2600 | |
2601 | nritems = btrfs_header_nritems(eb); | |
2602 | for (i = 0; i < nritems; i++) { | |
2603 | btrfs_item_key_to_cpu(eb, &key, i); | |
e02119d5 CM |
2604 | |
2605 | /* inode keys are done during the first stage */ | |
2606 | if (key.type == BTRFS_INODE_ITEM_KEY && | |
2607 | wc->stage == LOG_WALK_REPLAY_INODES) { | |
e02119d5 CM |
2608 | struct btrfs_inode_item *inode_item; |
2609 | u32 mode; | |
2610 | ||
2611 | inode_item = btrfs_item_ptr(eb, i, | |
2612 | struct btrfs_inode_item); | |
f2d72f42 FM |
2613 | /* |
2614 | * If we have a tmpfile (O_TMPFILE) that got fsync'ed | |
2615 | * and never got linked before the fsync, skip it, as | |
2616 | * replaying it is pointless since it would be deleted | |
2617 | * later. We skip logging tmpfiles, but it's always | |
2618 | * possible we are replaying a log created with a kernel | |
2619 | * that used to log tmpfiles. | |
2620 | */ | |
2621 | if (btrfs_inode_nlink(eb, inode_item) == 0) { | |
2622 | wc->ignore_cur_inode = true; | |
2623 | continue; | |
2624 | } else { | |
2625 | wc->ignore_cur_inode = false; | |
2626 | } | |
4f764e51 FM |
2627 | ret = replay_xattr_deletes(wc->trans, root, log, |
2628 | path, key.objectid); | |
2629 | if (ret) | |
2630 | break; | |
e02119d5 CM |
2631 | mode = btrfs_inode_mode(eb, inode_item); |
2632 | if (S_ISDIR(mode)) { | |
2633 | ret = replay_dir_deletes(wc->trans, | |
12fcfd22 | 2634 | root, log, path, key.objectid, 0); |
b50c6e25 JB |
2635 | if (ret) |
2636 | break; | |
e02119d5 CM |
2637 | } |
2638 | ret = overwrite_item(wc->trans, root, path, | |
2639 | eb, i, &key); | |
b50c6e25 JB |
2640 | if (ret) |
2641 | break; | |
e02119d5 | 2642 | |
471d557a FM |
2643 | /* |
2644 | * Before replaying extents, truncate the inode to its | |
2645 | * size. We need to do it now and not after log replay | |
2646 | * because before an fsync we can have prealloc extents | |
2647 | * added beyond the inode's i_size. If we did it after, | |
2648 | * through orphan cleanup for example, we would drop | |
2649 | * those prealloc extents just after replaying them. | |
e02119d5 CM |
2650 | */ |
2651 | if (S_ISREG(mode)) { | |
5893dfb9 | 2652 | struct btrfs_drop_extents_args drop_args = { 0 }; |
471d557a FM |
2653 | struct inode *inode; |
2654 | u64 from; | |
2655 | ||
2656 | inode = read_one_inode(root, key.objectid); | |
2657 | if (!inode) { | |
2658 | ret = -EIO; | |
2659 | break; | |
2660 | } | |
2661 | from = ALIGN(i_size_read(inode), | |
2662 | root->fs_info->sectorsize); | |
5893dfb9 FM |
2663 | drop_args.start = from; |
2664 | drop_args.end = (u64)-1; | |
2665 | drop_args.drop_cache = true; | |
2666 | ret = btrfs_drop_extents(wc->trans, root, | |
2667 | BTRFS_I(inode), | |
2668 | &drop_args); | |
471d557a | 2669 | if (!ret) { |
2766ff61 FM |
2670 | inode_sub_bytes(inode, |
2671 | drop_args.bytes_found); | |
f2d72f42 | 2672 | /* Update the inode's nbytes. */ |
471d557a | 2673 | ret = btrfs_update_inode(wc->trans, |
9a56fcd1 | 2674 | root, BTRFS_I(inode)); |
471d557a FM |
2675 | } |
2676 | iput(inode); | |
b50c6e25 JB |
2677 | if (ret) |
2678 | break; | |
e02119d5 | 2679 | } |
c71bf099 | 2680 | |
e02119d5 CM |
2681 | ret = link_to_fixup_dir(wc->trans, root, |
2682 | path, key.objectid); | |
b50c6e25 JB |
2683 | if (ret) |
2684 | break; | |
e02119d5 | 2685 | } |
dd8e7217 | 2686 | |
f2d72f42 FM |
2687 | if (wc->ignore_cur_inode) |
2688 | continue; | |
2689 | ||
dd8e7217 JB |
2690 | if (key.type == BTRFS_DIR_INDEX_KEY && |
2691 | wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { | |
2692 | ret = replay_one_dir_item(wc->trans, root, path, | |
2693 | eb, i, &key); | |
2694 | if (ret) | |
2695 | break; | |
2696 | } | |
2697 | ||
e02119d5 CM |
2698 | if (wc->stage < LOG_WALK_REPLAY_ALL) |
2699 | continue; | |
2700 | ||
2701 | /* these keys are simply copied */ | |
2702 | if (key.type == BTRFS_XATTR_ITEM_KEY) { | |
2703 | ret = overwrite_item(wc->trans, root, path, | |
2704 | eb, i, &key); | |
b50c6e25 JB |
2705 | if (ret) |
2706 | break; | |
2da1c669 LB |
2707 | } else if (key.type == BTRFS_INODE_REF_KEY || |
2708 | key.type == BTRFS_INODE_EXTREF_KEY) { | |
f186373f MF |
2709 | ret = add_inode_ref(wc->trans, root, log, path, |
2710 | eb, i, &key); | |
b50c6e25 JB |
2711 | if (ret && ret != -ENOENT) |
2712 | break; | |
2713 | ret = 0; | |
e02119d5 CM |
2714 | } else if (key.type == BTRFS_EXTENT_DATA_KEY) { |
2715 | ret = replay_one_extent(wc->trans, root, path, | |
2716 | eb, i, &key); | |
b50c6e25 JB |
2717 | if (ret) |
2718 | break; | |
dd8e7217 | 2719 | } else if (key.type == BTRFS_DIR_ITEM_KEY) { |
e02119d5 CM |
2720 | ret = replay_one_dir_item(wc->trans, root, path, |
2721 | eb, i, &key); | |
b50c6e25 JB |
2722 | if (ret) |
2723 | break; | |
e02119d5 CM |
2724 | } |
2725 | } | |
2726 | btrfs_free_path(path); | |
b50c6e25 | 2727 | return ret; |
e02119d5 CM |
2728 | } |
2729 | ||
6787bb9f NB |
2730 | /* |
2731 | * Correctly adjust the reserved bytes occupied by a log tree extent buffer | |
2732 | */ | |
2733 | static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) | |
2734 | { | |
2735 | struct btrfs_block_group *cache; | |
2736 | ||
2737 | cache = btrfs_lookup_block_group(fs_info, start); | |
2738 | if (!cache) { | |
2739 | btrfs_err(fs_info, "unable to find block group for %llu", start); | |
2740 | return; | |
2741 | } | |
2742 | ||
2743 | spin_lock(&cache->space_info->lock); | |
2744 | spin_lock(&cache->lock); | |
2745 | cache->reserved -= fs_info->nodesize; | |
2746 | cache->space_info->bytes_reserved -= fs_info->nodesize; | |
2747 | spin_unlock(&cache->lock); | |
2748 | spin_unlock(&cache->space_info->lock); | |
2749 | ||
2750 | btrfs_put_block_group(cache); | |
2751 | } | |
2752 | ||
d397712b | 2753 | static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, |
e02119d5 CM |
2754 | struct btrfs_root *root, |
2755 | struct btrfs_path *path, int *level, | |
2756 | struct walk_control *wc) | |
2757 | { | |
0b246afa | 2758 | struct btrfs_fs_info *fs_info = root->fs_info; |
e02119d5 CM |
2759 | u64 bytenr; |
2760 | u64 ptr_gen; | |
2761 | struct extent_buffer *next; | |
2762 | struct extent_buffer *cur; | |
e02119d5 CM |
2763 | u32 blocksize; |
2764 | int ret = 0; | |
2765 | ||
d397712b | 2766 | while (*level > 0) { |
581c1760 QW |
2767 | struct btrfs_key first_key; |
2768 | ||
e02119d5 CM |
2769 | cur = path->nodes[*level]; |
2770 | ||
fae7f21c | 2771 | WARN_ON(btrfs_header_level(cur) != *level); |
e02119d5 CM |
2772 | |
2773 | if (path->slots[*level] >= | |
2774 | btrfs_header_nritems(cur)) | |
2775 | break; | |
2776 | ||
2777 | bytenr = btrfs_node_blockptr(cur, path->slots[*level]); | |
2778 | ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); | |
581c1760 | 2779 | btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]); |
0b246afa | 2780 | blocksize = fs_info->nodesize; |
e02119d5 | 2781 | |
3fbaf258 JB |
2782 | next = btrfs_find_create_tree_block(fs_info, bytenr, |
2783 | btrfs_header_owner(cur), | |
2784 | *level - 1); | |
c871b0f2 LB |
2785 | if (IS_ERR(next)) |
2786 | return PTR_ERR(next); | |
e02119d5 | 2787 | |
e02119d5 | 2788 | if (*level == 1) { |
581c1760 QW |
2789 | ret = wc->process_func(root, next, wc, ptr_gen, |
2790 | *level - 1); | |
b50c6e25 JB |
2791 | if (ret) { |
2792 | free_extent_buffer(next); | |
1e5063d0 | 2793 | return ret; |
b50c6e25 | 2794 | } |
4a500fd1 | 2795 | |
e02119d5 CM |
2796 | path->slots[*level]++; |
2797 | if (wc->free) { | |
581c1760 QW |
2798 | ret = btrfs_read_buffer(next, ptr_gen, |
2799 | *level - 1, &first_key); | |
018642a1 TI |
2800 | if (ret) { |
2801 | free_extent_buffer(next); | |
2802 | return ret; | |
2803 | } | |
e02119d5 | 2804 | |
681ae509 JB |
2805 | if (trans) { |
2806 | btrfs_tree_lock(next); | |
6a884d7d | 2807 | btrfs_clean_tree_block(next); |
681ae509 JB |
2808 | btrfs_wait_tree_block_writeback(next); |
2809 | btrfs_tree_unlock(next); | |
7bfc1007 | 2810 | ret = btrfs_pin_reserved_extent(trans, |
10e958d5 NB |
2811 | bytenr, blocksize); |
2812 | if (ret) { | |
2813 | free_extent_buffer(next); | |
2814 | return ret; | |
2815 | } | |
d3575156 NA |
2816 | btrfs_redirty_list_add( |
2817 | trans->transaction, next); | |
1846430c LB |
2818 | } else { |
2819 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) | |
2820 | clear_extent_buffer_dirty(next); | |
10e958d5 | 2821 | unaccount_log_buffer(fs_info, bytenr); |
3650860b | 2822 | } |
e02119d5 CM |
2823 | } |
2824 | free_extent_buffer(next); | |
2825 | continue; | |
2826 | } | |
581c1760 | 2827 | ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key); |
018642a1 TI |
2828 | if (ret) { |
2829 | free_extent_buffer(next); | |
2830 | return ret; | |
2831 | } | |
e02119d5 | 2832 | |
e02119d5 CM |
2833 | if (path->nodes[*level-1]) |
2834 | free_extent_buffer(path->nodes[*level-1]); | |
2835 | path->nodes[*level-1] = next; | |
2836 | *level = btrfs_header_level(next); | |
2837 | path->slots[*level] = 0; | |
2838 | cond_resched(); | |
2839 | } | |
4a500fd1 | 2840 | path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); |
e02119d5 CM |
2841 | |
2842 | cond_resched(); | |
2843 | return 0; | |
2844 | } | |
2845 | ||
d397712b | 2846 | static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, |
e02119d5 CM |
2847 | struct btrfs_root *root, |
2848 | struct btrfs_path *path, int *level, | |
2849 | struct walk_control *wc) | |
2850 | { | |
0b246afa | 2851 | struct btrfs_fs_info *fs_info = root->fs_info; |
e02119d5 CM |
2852 | int i; |
2853 | int slot; | |
2854 | int ret; | |
2855 | ||
d397712b | 2856 | for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { |
e02119d5 | 2857 | slot = path->slots[i]; |
4a500fd1 | 2858 | if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { |
e02119d5 CM |
2859 | path->slots[i]++; |
2860 | *level = i; | |
2861 | WARN_ON(*level == 0); | |
2862 | return 0; | |
2863 | } else { | |
1e5063d0 | 2864 | ret = wc->process_func(root, path->nodes[*level], wc, |
581c1760 QW |
2865 | btrfs_header_generation(path->nodes[*level]), |
2866 | *level); | |
1e5063d0 MF |
2867 | if (ret) |
2868 | return ret; | |
2869 | ||
e02119d5 CM |
2870 | if (wc->free) { |
2871 | struct extent_buffer *next; | |
2872 | ||
2873 | next = path->nodes[*level]; | |
2874 | ||
681ae509 JB |
2875 | if (trans) { |
2876 | btrfs_tree_lock(next); | |
6a884d7d | 2877 | btrfs_clean_tree_block(next); |
681ae509 JB |
2878 | btrfs_wait_tree_block_writeback(next); |
2879 | btrfs_tree_unlock(next); | |
7bfc1007 | 2880 | ret = btrfs_pin_reserved_extent(trans, |
10e958d5 NB |
2881 | path->nodes[*level]->start, |
2882 | path->nodes[*level]->len); | |
2883 | if (ret) | |
2884 | return ret; | |
1846430c LB |
2885 | } else { |
2886 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) | |
2887 | clear_extent_buffer_dirty(next); | |
e02119d5 | 2888 | |
10e958d5 NB |
2889 | unaccount_log_buffer(fs_info, |
2890 | path->nodes[*level]->start); | |
2891 | } | |
e02119d5 CM |
2892 | } |
2893 | free_extent_buffer(path->nodes[*level]); | |
2894 | path->nodes[*level] = NULL; | |
2895 | *level = i + 1; | |
2896 | } | |
2897 | } | |
2898 | return 1; | |
2899 | } | |
2900 | ||
2901 | /* | |
2902 | * drop the reference count on the tree rooted at 'snap'. This traverses | |
2903 | * the tree freeing any blocks that have a ref count of zero after being | |
2904 | * decremented. | |
2905 | */ | |
2906 | static int walk_log_tree(struct btrfs_trans_handle *trans, | |
2907 | struct btrfs_root *log, struct walk_control *wc) | |
2908 | { | |
2ff7e61e | 2909 | struct btrfs_fs_info *fs_info = log->fs_info; |
e02119d5 CM |
2910 | int ret = 0; |
2911 | int wret; | |
2912 | int level; | |
2913 | struct btrfs_path *path; | |
e02119d5 CM |
2914 | int orig_level; |
2915 | ||
2916 | path = btrfs_alloc_path(); | |
db5b493a TI |
2917 | if (!path) |
2918 | return -ENOMEM; | |
e02119d5 CM |
2919 | |
2920 | level = btrfs_header_level(log->node); | |
2921 | orig_level = level; | |
2922 | path->nodes[level] = log->node; | |
67439dad | 2923 | atomic_inc(&log->node->refs); |
e02119d5 CM |
2924 | path->slots[level] = 0; |
2925 | ||
d397712b | 2926 | while (1) { |
e02119d5 CM |
2927 | wret = walk_down_log_tree(trans, log, path, &level, wc); |
2928 | if (wret > 0) | |
2929 | break; | |
79787eaa | 2930 | if (wret < 0) { |
e02119d5 | 2931 | ret = wret; |
79787eaa JM |
2932 | goto out; |
2933 | } | |
e02119d5 CM |
2934 | |
2935 | wret = walk_up_log_tree(trans, log, path, &level, wc); | |
2936 | if (wret > 0) | |
2937 | break; | |
79787eaa | 2938 | if (wret < 0) { |
e02119d5 | 2939 | ret = wret; |
79787eaa JM |
2940 | goto out; |
2941 | } | |
e02119d5 CM |
2942 | } |
2943 | ||
2944 | /* was the root node processed? if not, catch it here */ | |
2945 | if (path->nodes[orig_level]) { | |
79787eaa | 2946 | ret = wc->process_func(log, path->nodes[orig_level], wc, |
581c1760 QW |
2947 | btrfs_header_generation(path->nodes[orig_level]), |
2948 | orig_level); | |
79787eaa JM |
2949 | if (ret) |
2950 | goto out; | |
e02119d5 CM |
2951 | if (wc->free) { |
2952 | struct extent_buffer *next; | |
2953 | ||
2954 | next = path->nodes[orig_level]; | |
2955 | ||
681ae509 JB |
2956 | if (trans) { |
2957 | btrfs_tree_lock(next); | |
6a884d7d | 2958 | btrfs_clean_tree_block(next); |
681ae509 JB |
2959 | btrfs_wait_tree_block_writeback(next); |
2960 | btrfs_tree_unlock(next); | |
7bfc1007 | 2961 | ret = btrfs_pin_reserved_extent(trans, |
10e958d5 NB |
2962 | next->start, next->len); |
2963 | if (ret) | |
2964 | goto out; | |
1846430c LB |
2965 | } else { |
2966 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) | |
2967 | clear_extent_buffer_dirty(next); | |
10e958d5 | 2968 | unaccount_log_buffer(fs_info, next->start); |
681ae509 | 2969 | } |
e02119d5 CM |
2970 | } |
2971 | } | |
2972 | ||
79787eaa | 2973 | out: |
e02119d5 | 2974 | btrfs_free_path(path); |
e02119d5 CM |
2975 | return ret; |
2976 | } | |
2977 | ||
7237f183 YZ |
2978 | /* |
2979 | * helper function to update the item for a given subvolumes log root | |
2980 | * in the tree of log roots | |
2981 | */ | |
2982 | static int update_log_root(struct btrfs_trans_handle *trans, | |
4203e968 JB |
2983 | struct btrfs_root *log, |
2984 | struct btrfs_root_item *root_item) | |
7237f183 | 2985 | { |
0b246afa | 2986 | struct btrfs_fs_info *fs_info = log->fs_info; |
7237f183 YZ |
2987 | int ret; |
2988 | ||
2989 | if (log->log_transid == 1) { | |
2990 | /* insert root item on the first sync */ | |
0b246afa | 2991 | ret = btrfs_insert_root(trans, fs_info->log_root_tree, |
4203e968 | 2992 | &log->root_key, root_item); |
7237f183 | 2993 | } else { |
0b246afa | 2994 | ret = btrfs_update_root(trans, fs_info->log_root_tree, |
4203e968 | 2995 | &log->root_key, root_item); |
7237f183 YZ |
2996 | } |
2997 | return ret; | |
2998 | } | |
2999 | ||
60d53eb3 | 3000 | static void wait_log_commit(struct btrfs_root *root, int transid) |
e02119d5 CM |
3001 | { |
3002 | DEFINE_WAIT(wait); | |
7237f183 | 3003 | int index = transid % 2; |
e02119d5 | 3004 | |
7237f183 YZ |
3005 | /* |
3006 | * we only allow two pending log transactions at a time, | |
3007 | * so we know that if ours is more than 2 older than the | |
3008 | * current transaction, we're done | |
3009 | */ | |
49e83f57 | 3010 | for (;;) { |
7237f183 YZ |
3011 | prepare_to_wait(&root->log_commit_wait[index], |
3012 | &wait, TASK_UNINTERRUPTIBLE); | |
12fcfd22 | 3013 | |
49e83f57 LB |
3014 | if (!(root->log_transid_committed < transid && |
3015 | atomic_read(&root->log_commit[index]))) | |
3016 | break; | |
12fcfd22 | 3017 | |
49e83f57 LB |
3018 | mutex_unlock(&root->log_mutex); |
3019 | schedule(); | |
7237f183 | 3020 | mutex_lock(&root->log_mutex); |
49e83f57 LB |
3021 | } |
3022 | finish_wait(&root->log_commit_wait[index], &wait); | |
7237f183 YZ |
3023 | } |
3024 | ||
60d53eb3 | 3025 | static void wait_for_writer(struct btrfs_root *root) |
7237f183 YZ |
3026 | { |
3027 | DEFINE_WAIT(wait); | |
8b050d35 | 3028 | |
49e83f57 LB |
3029 | for (;;) { |
3030 | prepare_to_wait(&root->log_writer_wait, &wait, | |
3031 | TASK_UNINTERRUPTIBLE); | |
3032 | if (!atomic_read(&root->log_writers)) | |
3033 | break; | |
3034 | ||
7237f183 | 3035 | mutex_unlock(&root->log_mutex); |
49e83f57 | 3036 | schedule(); |
575849ec | 3037 | mutex_lock(&root->log_mutex); |
7237f183 | 3038 | } |
49e83f57 | 3039 | finish_wait(&root->log_writer_wait, &wait); |
e02119d5 CM |
3040 | } |
3041 | ||
8b050d35 MX |
3042 | static inline void btrfs_remove_log_ctx(struct btrfs_root *root, |
3043 | struct btrfs_log_ctx *ctx) | |
3044 | { | |
3045 | if (!ctx) | |
3046 | return; | |
3047 | ||
3048 | mutex_lock(&root->log_mutex); | |
3049 | list_del_init(&ctx->list); | |
3050 | mutex_unlock(&root->log_mutex); | |
3051 | } | |
3052 | ||
3053 | /* | |
3054 | * Invoked in log mutex context, or be sure there is no other task which | |
3055 | * can access the list. | |
3056 | */ | |
3057 | static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, | |
3058 | int index, int error) | |
3059 | { | |
3060 | struct btrfs_log_ctx *ctx; | |
570dd450 | 3061 | struct btrfs_log_ctx *safe; |
8b050d35 | 3062 | |
570dd450 CM |
3063 | list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { |
3064 | list_del_init(&ctx->list); | |
8b050d35 | 3065 | ctx->log_ret = error; |
570dd450 | 3066 | } |
8b050d35 MX |
3067 | } |
3068 | ||
e02119d5 CM |
3069 | /* |
3070 | * btrfs_sync_log does sends a given tree log down to the disk and | |
3071 | * updates the super blocks to record it. When this call is done, | |
12fcfd22 CM |
3072 | * you know that any inodes previously logged are safely on disk only |
3073 | * if it returns 0. | |
3074 | * | |
3075 | * Any other return value means you need to call btrfs_commit_transaction. | |
3076 | * Some of the edge cases for fsyncing directories that have had unlinks | |
3077 | * or renames done in the past mean that sometimes the only safe | |
3078 | * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, | |
3079 | * that has happened. | |
e02119d5 CM |
3080 | */ |
3081 | int btrfs_sync_log(struct btrfs_trans_handle *trans, | |
8b050d35 | 3082 | struct btrfs_root *root, struct btrfs_log_ctx *ctx) |
e02119d5 | 3083 | { |
7237f183 YZ |
3084 | int index1; |
3085 | int index2; | |
8cef4e16 | 3086 | int mark; |
e02119d5 | 3087 | int ret; |
0b246afa | 3088 | struct btrfs_fs_info *fs_info = root->fs_info; |
e02119d5 | 3089 | struct btrfs_root *log = root->log_root; |
0b246afa | 3090 | struct btrfs_root *log_root_tree = fs_info->log_root_tree; |
4203e968 | 3091 | struct btrfs_root_item new_root_item; |
bb14a59b | 3092 | int log_transid = 0; |
8b050d35 | 3093 | struct btrfs_log_ctx root_log_ctx; |
c6adc9cc | 3094 | struct blk_plug plug; |
47876f7c FM |
3095 | u64 log_root_start; |
3096 | u64 log_root_level; | |
e02119d5 | 3097 | |
7237f183 | 3098 | mutex_lock(&root->log_mutex); |
d1433deb MX |
3099 | log_transid = ctx->log_transid; |
3100 | if (root->log_transid_committed >= log_transid) { | |
3101 | mutex_unlock(&root->log_mutex); | |
3102 | return ctx->log_ret; | |
3103 | } | |
3104 | ||
3105 | index1 = log_transid % 2; | |
7237f183 | 3106 | if (atomic_read(&root->log_commit[index1])) { |
60d53eb3 | 3107 | wait_log_commit(root, log_transid); |
7237f183 | 3108 | mutex_unlock(&root->log_mutex); |
8b050d35 | 3109 | return ctx->log_ret; |
e02119d5 | 3110 | } |
d1433deb | 3111 | ASSERT(log_transid == root->log_transid); |
7237f183 YZ |
3112 | atomic_set(&root->log_commit[index1], 1); |
3113 | ||
3114 | /* wait for previous tree log sync to complete */ | |
3115 | if (atomic_read(&root->log_commit[(index1 + 1) % 2])) | |
60d53eb3 | 3116 | wait_log_commit(root, log_transid - 1); |
48cab2e0 | 3117 | |
86df7eb9 | 3118 | while (1) { |
2ecb7923 | 3119 | int batch = atomic_read(&root->log_batch); |
cd354ad6 | 3120 | /* when we're on an ssd, just kick the log commit out */ |
0b246afa | 3121 | if (!btrfs_test_opt(fs_info, SSD) && |
27cdeb70 | 3122 | test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { |
86df7eb9 YZ |
3123 | mutex_unlock(&root->log_mutex); |
3124 | schedule_timeout_uninterruptible(1); | |
3125 | mutex_lock(&root->log_mutex); | |
3126 | } | |
60d53eb3 | 3127 | wait_for_writer(root); |
2ecb7923 | 3128 | if (batch == atomic_read(&root->log_batch)) |
e02119d5 CM |
3129 | break; |
3130 | } | |
e02119d5 | 3131 | |
12fcfd22 | 3132 | /* bail out if we need to do a full commit */ |
4884b8e8 | 3133 | if (btrfs_need_log_full_commit(trans)) { |
12fcfd22 CM |
3134 | ret = -EAGAIN; |
3135 | mutex_unlock(&root->log_mutex); | |
3136 | goto out; | |
3137 | } | |
3138 | ||
8cef4e16 YZ |
3139 | if (log_transid % 2 == 0) |
3140 | mark = EXTENT_DIRTY; | |
3141 | else | |
3142 | mark = EXTENT_NEW; | |
3143 | ||
690587d1 CM |
3144 | /* we start IO on all the marked extents here, but we don't actually |
3145 | * wait for them until later. | |
3146 | */ | |
c6adc9cc | 3147 | blk_start_plug(&plug); |
2ff7e61e | 3148 | ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); |
b528f467 NA |
3149 | /* |
3150 | * -EAGAIN happens when someone, e.g., a concurrent transaction | |
3151 | * commit, writes a dirty extent in this tree-log commit. This | |
3152 | * concurrent write will create a hole writing out the extents, | |
3153 | * and we cannot proceed on a zoned filesystem, requiring | |
3154 | * sequential writing. While we can bail out to a full commit | |
3155 | * here, but we can continue hoping the concurrent writing fills | |
3156 | * the hole. | |
3157 | */ | |
3158 | if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) | |
3159 | ret = 0; | |
79787eaa | 3160 | if (ret) { |
c6adc9cc | 3161 | blk_finish_plug(&plug); |
66642832 | 3162 | btrfs_abort_transaction(trans, ret); |
90787766 | 3163 | btrfs_set_log_full_commit(trans); |
79787eaa JM |
3164 | mutex_unlock(&root->log_mutex); |
3165 | goto out; | |
3166 | } | |
7237f183 | 3167 | |
4203e968 JB |
3168 | /* |
3169 | * We _must_ update under the root->log_mutex in order to make sure we | |
3170 | * have a consistent view of the log root we are trying to commit at | |
3171 | * this moment. | |
3172 | * | |
3173 | * We _must_ copy this into a local copy, because we are not holding the | |
3174 | * log_root_tree->log_mutex yet. This is important because when we | |
3175 | * commit the log_root_tree we must have a consistent view of the | |
3176 | * log_root_tree when we update the super block to point at the | |
3177 | * log_root_tree bytenr. If we update the log_root_tree here we'll race | |
3178 | * with the commit and possibly point at the new block which we may not | |
3179 | * have written out. | |
3180 | */ | |
5d4f98a2 | 3181 | btrfs_set_root_node(&log->root_item, log->node); |
4203e968 | 3182 | memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); |
7237f183 | 3183 | |
7237f183 YZ |
3184 | root->log_transid++; |
3185 | log->log_transid = root->log_transid; | |
ff782e0a | 3186 | root->log_start_pid = 0; |
7237f183 | 3187 | /* |
8cef4e16 YZ |
3188 | * IO has been started, blocks of the log tree have WRITTEN flag set |
3189 | * in their headers. new modifications of the log will be written to | |
3190 | * new positions. so it's safe to allow log writers to go in. | |
7237f183 YZ |
3191 | */ |
3192 | mutex_unlock(&root->log_mutex); | |
3193 | ||
3ddebf27 | 3194 | if (btrfs_is_zoned(fs_info)) { |
e75f9fd1 | 3195 | mutex_lock(&fs_info->tree_root->log_mutex); |
3ddebf27 NA |
3196 | if (!log_root_tree->node) { |
3197 | ret = btrfs_alloc_log_tree_node(trans, log_root_tree); | |
3198 | if (ret) { | |
ea32af47 | 3199 | mutex_unlock(&fs_info->tree_root->log_mutex); |
3ddebf27 NA |
3200 | goto out; |
3201 | } | |
3202 | } | |
e75f9fd1 | 3203 | mutex_unlock(&fs_info->tree_root->log_mutex); |
3ddebf27 NA |
3204 | } |
3205 | ||
e75f9fd1 NA |
3206 | btrfs_init_log_ctx(&root_log_ctx, NULL); |
3207 | ||
3208 | mutex_lock(&log_root_tree->log_mutex); | |
3209 | ||
e3d3b415 FM |
3210 | index2 = log_root_tree->log_transid % 2; |
3211 | list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); | |
3212 | root_log_ctx.log_transid = log_root_tree->log_transid; | |
3213 | ||
4203e968 JB |
3214 | /* |
3215 | * Now we are safe to update the log_root_tree because we're under the | |
3216 | * log_mutex, and we're a current writer so we're holding the commit | |
3217 | * open until we drop the log_mutex. | |
3218 | */ | |
3219 | ret = update_log_root(trans, log, &new_root_item); | |
4a500fd1 | 3220 | if (ret) { |
d1433deb MX |
3221 | if (!list_empty(&root_log_ctx.list)) |
3222 | list_del_init(&root_log_ctx.list); | |
3223 | ||
c6adc9cc | 3224 | blk_finish_plug(&plug); |
90787766 | 3225 | btrfs_set_log_full_commit(trans); |
995946dd | 3226 | |
79787eaa | 3227 | if (ret != -ENOSPC) { |
66642832 | 3228 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
3229 | mutex_unlock(&log_root_tree->log_mutex); |
3230 | goto out; | |
3231 | } | |
bf89d38f | 3232 | btrfs_wait_tree_log_extents(log, mark); |
4a500fd1 YZ |
3233 | mutex_unlock(&log_root_tree->log_mutex); |
3234 | ret = -EAGAIN; | |
3235 | goto out; | |
3236 | } | |
3237 | ||
d1433deb | 3238 | if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { |
3da5ab56 | 3239 | blk_finish_plug(&plug); |
cbd60aa7 | 3240 | list_del_init(&root_log_ctx.list); |
d1433deb MX |
3241 | mutex_unlock(&log_root_tree->log_mutex); |
3242 | ret = root_log_ctx.log_ret; | |
3243 | goto out; | |
3244 | } | |
8b050d35 | 3245 | |
d1433deb | 3246 | index2 = root_log_ctx.log_transid % 2; |
7237f183 | 3247 | if (atomic_read(&log_root_tree->log_commit[index2])) { |
c6adc9cc | 3248 | blk_finish_plug(&plug); |
bf89d38f | 3249 | ret = btrfs_wait_tree_log_extents(log, mark); |
60d53eb3 | 3250 | wait_log_commit(log_root_tree, |
d1433deb | 3251 | root_log_ctx.log_transid); |
7237f183 | 3252 | mutex_unlock(&log_root_tree->log_mutex); |
5ab5e44a FM |
3253 | if (!ret) |
3254 | ret = root_log_ctx.log_ret; | |
7237f183 YZ |
3255 | goto out; |
3256 | } | |
d1433deb | 3257 | ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); |
7237f183 YZ |
3258 | atomic_set(&log_root_tree->log_commit[index2], 1); |
3259 | ||
12fcfd22 | 3260 | if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { |
60d53eb3 | 3261 | wait_log_commit(log_root_tree, |
d1433deb | 3262 | root_log_ctx.log_transid - 1); |
12fcfd22 CM |
3263 | } |
3264 | ||
12fcfd22 CM |
3265 | /* |
3266 | * now that we've moved on to the tree of log tree roots, | |
3267 | * check the full commit flag again | |
3268 | */ | |
4884b8e8 | 3269 | if (btrfs_need_log_full_commit(trans)) { |
c6adc9cc | 3270 | blk_finish_plug(&plug); |
bf89d38f | 3271 | btrfs_wait_tree_log_extents(log, mark); |
12fcfd22 CM |
3272 | mutex_unlock(&log_root_tree->log_mutex); |
3273 | ret = -EAGAIN; | |
3274 | goto out_wake_log_root; | |
3275 | } | |
7237f183 | 3276 | |
2ff7e61e | 3277 | ret = btrfs_write_marked_extents(fs_info, |
c6adc9cc MX |
3278 | &log_root_tree->dirty_log_pages, |
3279 | EXTENT_DIRTY | EXTENT_NEW); | |
3280 | blk_finish_plug(&plug); | |
b528f467 NA |
3281 | /* |
3282 | * As described above, -EAGAIN indicates a hole in the extents. We | |
3283 | * cannot wait for these write outs since the waiting cause a | |
3284 | * deadlock. Bail out to the full commit instead. | |
3285 | */ | |
3286 | if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) { | |
3287 | btrfs_set_log_full_commit(trans); | |
3288 | btrfs_wait_tree_log_extents(log, mark); | |
3289 | mutex_unlock(&log_root_tree->log_mutex); | |
3290 | goto out_wake_log_root; | |
3291 | } else if (ret) { | |
90787766 | 3292 | btrfs_set_log_full_commit(trans); |
66642832 | 3293 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
3294 | mutex_unlock(&log_root_tree->log_mutex); |
3295 | goto out_wake_log_root; | |
3296 | } | |
bf89d38f | 3297 | ret = btrfs_wait_tree_log_extents(log, mark); |
5ab5e44a | 3298 | if (!ret) |
bf89d38f JM |
3299 | ret = btrfs_wait_tree_log_extents(log_root_tree, |
3300 | EXTENT_NEW | EXTENT_DIRTY); | |
5ab5e44a | 3301 | if (ret) { |
90787766 | 3302 | btrfs_set_log_full_commit(trans); |
5ab5e44a FM |
3303 | mutex_unlock(&log_root_tree->log_mutex); |
3304 | goto out_wake_log_root; | |
3305 | } | |
e02119d5 | 3306 | |
47876f7c FM |
3307 | log_root_start = log_root_tree->node->start; |
3308 | log_root_level = btrfs_header_level(log_root_tree->node); | |
7237f183 | 3309 | log_root_tree->log_transid++; |
7237f183 YZ |
3310 | mutex_unlock(&log_root_tree->log_mutex); |
3311 | ||
3312 | /* | |
47876f7c FM |
3313 | * Here we are guaranteed that nobody is going to write the superblock |
3314 | * for the current transaction before us and that neither we do write | |
3315 | * our superblock before the previous transaction finishes its commit | |
3316 | * and writes its superblock, because: | |
3317 | * | |
3318 | * 1) We are holding a handle on the current transaction, so no body | |
3319 | * can commit it until we release the handle; | |
3320 | * | |
3321 | * 2) Before writing our superblock we acquire the tree_log_mutex, so | |
3322 | * if the previous transaction is still committing, and hasn't yet | |
3323 | * written its superblock, we wait for it to do it, because a | |
3324 | * transaction commit acquires the tree_log_mutex when the commit | |
3325 | * begins and releases it only after writing its superblock. | |
7237f183 | 3326 | */ |
47876f7c | 3327 | mutex_lock(&fs_info->tree_log_mutex); |
165ea85f JB |
3328 | |
3329 | /* | |
3330 | * The previous transaction writeout phase could have failed, and thus | |
3331 | * marked the fs in an error state. We must not commit here, as we | |
3332 | * could have updated our generation in the super_for_commit and | |
3333 | * writing the super here would result in transid mismatches. If there | |
3334 | * is an error here just bail. | |
3335 | */ | |
3336 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { | |
3337 | ret = -EIO; | |
3338 | btrfs_set_log_full_commit(trans); | |
3339 | btrfs_abort_transaction(trans, ret); | |
3340 | mutex_unlock(&fs_info->tree_log_mutex); | |
3341 | goto out_wake_log_root; | |
3342 | } | |
3343 | ||
47876f7c FM |
3344 | btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start); |
3345 | btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level); | |
eece6a9c | 3346 | ret = write_all_supers(fs_info, 1); |
47876f7c | 3347 | mutex_unlock(&fs_info->tree_log_mutex); |
5af3e8cc | 3348 | if (ret) { |
90787766 | 3349 | btrfs_set_log_full_commit(trans); |
66642832 | 3350 | btrfs_abort_transaction(trans, ret); |
5af3e8cc SB |
3351 | goto out_wake_log_root; |
3352 | } | |
7237f183 | 3353 | |
e1a6d264 FM |
3354 | /* |
3355 | * We know there can only be one task here, since we have not yet set | |
3356 | * root->log_commit[index1] to 0 and any task attempting to sync the | |
3357 | * log must wait for the previous log transaction to commit if it's | |
3358 | * still in progress or wait for the current log transaction commit if | |
3359 | * someone else already started it. We use <= and not < because the | |
3360 | * first log transaction has an ID of 0. | |
3361 | */ | |
3362 | ASSERT(root->last_log_commit <= log_transid); | |
3363 | root->last_log_commit = log_transid; | |
257c62e1 | 3364 | |
12fcfd22 | 3365 | out_wake_log_root: |
570dd450 | 3366 | mutex_lock(&log_root_tree->log_mutex); |
8b050d35 MX |
3367 | btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); |
3368 | ||
d1433deb | 3369 | log_root_tree->log_transid_committed++; |
7237f183 | 3370 | atomic_set(&log_root_tree->log_commit[index2], 0); |
d1433deb MX |
3371 | mutex_unlock(&log_root_tree->log_mutex); |
3372 | ||
33a9eca7 | 3373 | /* |
093258e6 DS |
3374 | * The barrier before waitqueue_active (in cond_wake_up) is needed so |
3375 | * all the updates above are seen by the woken threads. It might not be | |
3376 | * necessary, but proving that seems to be hard. | |
33a9eca7 | 3377 | */ |
093258e6 | 3378 | cond_wake_up(&log_root_tree->log_commit_wait[index2]); |
e02119d5 | 3379 | out: |
d1433deb | 3380 | mutex_lock(&root->log_mutex); |
570dd450 | 3381 | btrfs_remove_all_log_ctxs(root, index1, ret); |
d1433deb | 3382 | root->log_transid_committed++; |
7237f183 | 3383 | atomic_set(&root->log_commit[index1], 0); |
d1433deb | 3384 | mutex_unlock(&root->log_mutex); |
8b050d35 | 3385 | |
33a9eca7 | 3386 | /* |
093258e6 DS |
3387 | * The barrier before waitqueue_active (in cond_wake_up) is needed so |
3388 | * all the updates above are seen by the woken threads. It might not be | |
3389 | * necessary, but proving that seems to be hard. | |
33a9eca7 | 3390 | */ |
093258e6 | 3391 | cond_wake_up(&root->log_commit_wait[index1]); |
b31eabd8 | 3392 | return ret; |
e02119d5 CM |
3393 | } |
3394 | ||
4a500fd1 YZ |
3395 | static void free_log_tree(struct btrfs_trans_handle *trans, |
3396 | struct btrfs_root *log) | |
e02119d5 CM |
3397 | { |
3398 | int ret; | |
e02119d5 CM |
3399 | struct walk_control wc = { |
3400 | .free = 1, | |
3401 | .process_func = process_one_buffer | |
3402 | }; | |
3403 | ||
3ddebf27 NA |
3404 | if (log->node) { |
3405 | ret = walk_log_tree(trans, log, &wc); | |
3406 | if (ret) { | |
3407 | if (trans) | |
3408 | btrfs_abort_transaction(trans, ret); | |
3409 | else | |
3410 | btrfs_handle_fs_error(log->fs_info, ret, NULL); | |
3411 | } | |
374b0e2d | 3412 | } |
e02119d5 | 3413 | |
59b0713a FM |
3414 | clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, |
3415 | EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); | |
e289f03e | 3416 | extent_io_tree_release(&log->log_csum_range); |
d3575156 NA |
3417 | |
3418 | if (trans && log->node) | |
3419 | btrfs_redirty_list_add(trans->transaction, log->node); | |
00246528 | 3420 | btrfs_put_root(log); |
4a500fd1 YZ |
3421 | } |
3422 | ||
3423 | /* | |
3424 | * free all the extents used by the tree log. This should be called | |
3425 | * at commit time of the full transaction | |
3426 | */ | |
3427 | int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) | |
3428 | { | |
3429 | if (root->log_root) { | |
3430 | free_log_tree(trans, root->log_root); | |
3431 | root->log_root = NULL; | |
e7a79811 | 3432 | clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); |
4a500fd1 YZ |
3433 | } |
3434 | return 0; | |
3435 | } | |
3436 | ||
3437 | int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, | |
3438 | struct btrfs_fs_info *fs_info) | |
3439 | { | |
3440 | if (fs_info->log_root_tree) { | |
3441 | free_log_tree(trans, fs_info->log_root_tree); | |
3442 | fs_info->log_root_tree = NULL; | |
47876f7c | 3443 | clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state); |
4a500fd1 | 3444 | } |
e02119d5 CM |
3445 | return 0; |
3446 | } | |
3447 | ||
803f0f64 | 3448 | /* |
6e8e777d FM |
3449 | * Check if an inode was logged in the current transaction. This may often |
3450 | * return some false positives, because logged_trans is an in memory only field, | |
3451 | * not persisted anywhere. This is meant to be used in contexts where a false | |
3452 | * positive has no functional consequences. | |
803f0f64 FM |
3453 | */ |
3454 | static bool inode_logged(struct btrfs_trans_handle *trans, | |
3455 | struct btrfs_inode *inode) | |
3456 | { | |
3457 | if (inode->logged_trans == trans->transid) | |
3458 | return true; | |
3459 | ||
6e8e777d FM |
3460 | /* |
3461 | * The inode's logged_trans is always 0 when we load it (because it is | |
3462 | * not persisted in the inode item or elsewhere). So if it is 0, the | |
d135a533 FM |
3463 | * inode was last modified in the current transaction then the inode may |
3464 | * have been logged before in the current transaction, then evicted and | |
3465 | * loaded again in the current transaction - or may have never been logged | |
3466 | * in the current transaction, but since we can not be sure, we have to | |
3467 | * assume it was, otherwise our callers can leave an inconsistent log. | |
6e8e777d FM |
3468 | */ |
3469 | if (inode->logged_trans == 0 && | |
3470 | inode->last_trans == trans->transid && | |
803f0f64 FM |
3471 | !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags)) |
3472 | return true; | |
3473 | ||
3474 | return false; | |
3475 | } | |
3476 | ||
e02119d5 CM |
3477 | /* |
3478 | * If both a file and directory are logged, and unlinks or renames are | |
3479 | * mixed in, we have a few interesting corners: | |
3480 | * | |
3481 | * create file X in dir Y | |
3482 | * link file X to X.link in dir Y | |
3483 | * fsync file X | |
3484 | * unlink file X but leave X.link | |
3485 | * fsync dir Y | |
3486 | * | |
3487 | * After a crash we would expect only X.link to exist. But file X | |
3488 | * didn't get fsync'd again so the log has back refs for X and X.link. | |
3489 | * | |
3490 | * We solve this by removing directory entries and inode backrefs from the | |
3491 | * log when a file that was logged in the current transaction is | |
3492 | * unlinked. Any later fsync will include the updated log entries, and | |
3493 | * we'll be able to reconstruct the proper directory items from backrefs. | |
3494 | * | |
3495 | * This optimizations allows us to avoid relogging the entire inode | |
3496 | * or the entire directory. | |
3497 | */ | |
3498 | int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |
3499 | struct btrfs_root *root, | |
3500 | const char *name, int name_len, | |
49f34d1f | 3501 | struct btrfs_inode *dir, u64 index) |
e02119d5 CM |
3502 | { |
3503 | struct btrfs_root *log; | |
3504 | struct btrfs_dir_item *di; | |
3505 | struct btrfs_path *path; | |
3506 | int ret; | |
4a500fd1 | 3507 | int err = 0; |
49f34d1f | 3508 | u64 dir_ino = btrfs_ino(dir); |
e02119d5 | 3509 | |
803f0f64 | 3510 | if (!inode_logged(trans, dir)) |
3a5f1d45 CM |
3511 | return 0; |
3512 | ||
e02119d5 CM |
3513 | ret = join_running_log_trans(root); |
3514 | if (ret) | |
3515 | return 0; | |
3516 | ||
49f34d1f | 3517 | mutex_lock(&dir->log_mutex); |
e02119d5 CM |
3518 | |
3519 | log = root->log_root; | |
3520 | path = btrfs_alloc_path(); | |
a62f44a5 TI |
3521 | if (!path) { |
3522 | err = -ENOMEM; | |
3523 | goto out_unlock; | |
3524 | } | |
2a29edc6 | 3525 | |
33345d01 | 3526 | di = btrfs_lookup_dir_item(trans, log, path, dir_ino, |
e02119d5 | 3527 | name, name_len, -1); |
4a500fd1 YZ |
3528 | if (IS_ERR(di)) { |
3529 | err = PTR_ERR(di); | |
3530 | goto fail; | |
3531 | } | |
3532 | if (di) { | |
e02119d5 | 3533 | ret = btrfs_delete_one_dir_name(trans, log, path, di); |
3650860b JB |
3534 | if (ret) { |
3535 | err = ret; | |
3536 | goto fail; | |
3537 | } | |
e02119d5 | 3538 | } |
b3b4aa74 | 3539 | btrfs_release_path(path); |
33345d01 | 3540 | di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, |
e02119d5 | 3541 | index, name, name_len, -1); |
4a500fd1 YZ |
3542 | if (IS_ERR(di)) { |
3543 | err = PTR_ERR(di); | |
3544 | goto fail; | |
3545 | } | |
3546 | if (di) { | |
e02119d5 | 3547 | ret = btrfs_delete_one_dir_name(trans, log, path, di); |
3650860b JB |
3548 | if (ret) { |
3549 | err = ret; | |
3550 | goto fail; | |
3551 | } | |
e02119d5 CM |
3552 | } |
3553 | ||
ddffcf6f FM |
3554 | /* |
3555 | * We do not need to update the size field of the directory's inode item | |
3556 | * because on log replay we update the field to reflect all existing | |
3557 | * entries in the directory (see overwrite_item()). | |
e02119d5 | 3558 | */ |
4a500fd1 | 3559 | fail: |
e02119d5 | 3560 | btrfs_free_path(path); |
a62f44a5 | 3561 | out_unlock: |
49f34d1f | 3562 | mutex_unlock(&dir->log_mutex); |
fb2fecba | 3563 | if (err == -ENOSPC) { |
90787766 | 3564 | btrfs_set_log_full_commit(trans); |
fb2fecba JB |
3565 | err = 0; |
3566 | } else if (err < 0 && err != -ENOENT) { | |
3567 | /* ENOENT can be returned if the entry hasn't been fsynced yet */ | |
3568 | btrfs_abort_transaction(trans, err); | |
3569 | } | |
79787eaa | 3570 | |
12fcfd22 | 3571 | btrfs_end_log_trans(root); |
e02119d5 | 3572 | |
411fc6bc | 3573 | return err; |
e02119d5 CM |
3574 | } |
3575 | ||
3576 | /* see comments for btrfs_del_dir_entries_in_log */ | |
3577 | int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, | |
3578 | struct btrfs_root *root, | |
3579 | const char *name, int name_len, | |
a491abb2 | 3580 | struct btrfs_inode *inode, u64 dirid) |
e02119d5 CM |
3581 | { |
3582 | struct btrfs_root *log; | |
3583 | u64 index; | |
3584 | int ret; | |
3585 | ||
803f0f64 | 3586 | if (!inode_logged(trans, inode)) |
3a5f1d45 CM |
3587 | return 0; |
3588 | ||
e02119d5 CM |
3589 | ret = join_running_log_trans(root); |
3590 | if (ret) | |
3591 | return 0; | |
3592 | log = root->log_root; | |
a491abb2 | 3593 | mutex_lock(&inode->log_mutex); |
e02119d5 | 3594 | |
a491abb2 | 3595 | ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), |
e02119d5 | 3596 | dirid, &index); |
a491abb2 | 3597 | mutex_unlock(&inode->log_mutex); |
4a500fd1 | 3598 | if (ret == -ENOSPC) { |
90787766 | 3599 | btrfs_set_log_full_commit(trans); |
4a500fd1 | 3600 | ret = 0; |
79787eaa | 3601 | } else if (ret < 0 && ret != -ENOENT) |
66642832 | 3602 | btrfs_abort_transaction(trans, ret); |
12fcfd22 | 3603 | btrfs_end_log_trans(root); |
e02119d5 | 3604 | |
e02119d5 CM |
3605 | return ret; |
3606 | } | |
3607 | ||
3608 | /* | |
3609 | * creates a range item in the log for 'dirid'. first_offset and | |
3610 | * last_offset tell us which parts of the key space the log should | |
3611 | * be considered authoritative for. | |
3612 | */ | |
3613 | static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, | |
3614 | struct btrfs_root *log, | |
3615 | struct btrfs_path *path, | |
3616 | int key_type, u64 dirid, | |
3617 | u64 first_offset, u64 last_offset) | |
3618 | { | |
3619 | int ret; | |
3620 | struct btrfs_key key; | |
3621 | struct btrfs_dir_log_item *item; | |
3622 | ||
3623 | key.objectid = dirid; | |
3624 | key.offset = first_offset; | |
3625 | if (key_type == BTRFS_DIR_ITEM_KEY) | |
3626 | key.type = BTRFS_DIR_LOG_ITEM_KEY; | |
3627 | else | |
3628 | key.type = BTRFS_DIR_LOG_INDEX_KEY; | |
3629 | ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); | |
4a500fd1 YZ |
3630 | if (ret) |
3631 | return ret; | |
e02119d5 CM |
3632 | |
3633 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
3634 | struct btrfs_dir_log_item); | |
3635 | btrfs_set_dir_log_end(path->nodes[0], item, last_offset); | |
3636 | btrfs_mark_buffer_dirty(path->nodes[0]); | |
b3b4aa74 | 3637 | btrfs_release_path(path); |
e02119d5 CM |
3638 | return 0; |
3639 | } | |
3640 | ||
3641 | /* | |
3642 | * log all the items included in the current transaction for a given | |
3643 | * directory. This also creates the range items in the log tree required | |
3644 | * to replay anything deleted before the fsync | |
3645 | */ | |
3646 | static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |
684a5773 | 3647 | struct btrfs_root *root, struct btrfs_inode *inode, |
e02119d5 CM |
3648 | struct btrfs_path *path, |
3649 | struct btrfs_path *dst_path, int key_type, | |
2f2ff0ee | 3650 | struct btrfs_log_ctx *ctx, |
e02119d5 CM |
3651 | u64 min_offset, u64 *last_offset_ret) |
3652 | { | |
3653 | struct btrfs_key min_key; | |
e02119d5 CM |
3654 | struct btrfs_root *log = root->log_root; |
3655 | struct extent_buffer *src; | |
4a500fd1 | 3656 | int err = 0; |
e02119d5 CM |
3657 | int ret; |
3658 | int i; | |
3659 | int nritems; | |
3660 | u64 first_offset = min_offset; | |
3661 | u64 last_offset = (u64)-1; | |
684a5773 | 3662 | u64 ino = btrfs_ino(inode); |
e02119d5 CM |
3663 | |
3664 | log = root->log_root; | |
e02119d5 | 3665 | |
33345d01 | 3666 | min_key.objectid = ino; |
e02119d5 CM |
3667 | min_key.type = key_type; |
3668 | min_key.offset = min_offset; | |
3669 | ||
6174d3cb | 3670 | ret = btrfs_search_forward(root, &min_key, path, trans->transid); |
e02119d5 CM |
3671 | |
3672 | /* | |
3673 | * we didn't find anything from this transaction, see if there | |
3674 | * is anything at all | |
3675 | */ | |
33345d01 LZ |
3676 | if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { |
3677 | min_key.objectid = ino; | |
e02119d5 CM |
3678 | min_key.type = key_type; |
3679 | min_key.offset = (u64)-1; | |
b3b4aa74 | 3680 | btrfs_release_path(path); |
e02119d5 CM |
3681 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); |
3682 | if (ret < 0) { | |
b3b4aa74 | 3683 | btrfs_release_path(path); |
e02119d5 CM |
3684 | return ret; |
3685 | } | |
33345d01 | 3686 | ret = btrfs_previous_item(root, path, ino, key_type); |
e02119d5 CM |
3687 | |
3688 | /* if ret == 0 there are items for this type, | |
3689 | * create a range to tell us the last key of this type. | |
3690 | * otherwise, there are no items in this directory after | |
3691 | * *min_offset, and we create a range to indicate that. | |
3692 | */ | |
3693 | if (ret == 0) { | |
3694 | struct btrfs_key tmp; | |
3695 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, | |
3696 | path->slots[0]); | |
d397712b | 3697 | if (key_type == tmp.type) |
e02119d5 | 3698 | first_offset = max(min_offset, tmp.offset) + 1; |
e02119d5 CM |
3699 | } |
3700 | goto done; | |
3701 | } | |
3702 | ||
3703 | /* go backward to find any previous key */ | |
33345d01 | 3704 | ret = btrfs_previous_item(root, path, ino, key_type); |
e02119d5 CM |
3705 | if (ret == 0) { |
3706 | struct btrfs_key tmp; | |
3707 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); | |
3708 | if (key_type == tmp.type) { | |
3709 | first_offset = tmp.offset; | |
3710 | ret = overwrite_item(trans, log, dst_path, | |
3711 | path->nodes[0], path->slots[0], | |
3712 | &tmp); | |
4a500fd1 YZ |
3713 | if (ret) { |
3714 | err = ret; | |
3715 | goto done; | |
3716 | } | |
e02119d5 CM |
3717 | } |
3718 | } | |
b3b4aa74 | 3719 | btrfs_release_path(path); |
e02119d5 | 3720 | |
2cc83342 JB |
3721 | /* |
3722 | * Find the first key from this transaction again. See the note for | |
3723 | * log_new_dir_dentries, if we're logging a directory recursively we | |
3724 | * won't be holding its i_mutex, which means we can modify the directory | |
3725 | * while we're logging it. If we remove an entry between our first | |
3726 | * search and this search we'll not find the key again and can just | |
3727 | * bail. | |
3728 | */ | |
bb56f02f | 3729 | search: |
e02119d5 | 3730 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); |
2cc83342 | 3731 | if (ret != 0) |
e02119d5 | 3732 | goto done; |
e02119d5 CM |
3733 | |
3734 | /* | |
3735 | * we have a block from this transaction, log every item in it | |
3736 | * from our directory | |
3737 | */ | |
d397712b | 3738 | while (1) { |
e02119d5 CM |
3739 | struct btrfs_key tmp; |
3740 | src = path->nodes[0]; | |
3741 | nritems = btrfs_header_nritems(src); | |
3742 | for (i = path->slots[0]; i < nritems; i++) { | |
2f2ff0ee FM |
3743 | struct btrfs_dir_item *di; |
3744 | ||
e02119d5 CM |
3745 | btrfs_item_key_to_cpu(src, &min_key, i); |
3746 | ||
33345d01 | 3747 | if (min_key.objectid != ino || min_key.type != key_type) |
e02119d5 | 3748 | goto done; |
bb56f02f FM |
3749 | |
3750 | if (need_resched()) { | |
3751 | btrfs_release_path(path); | |
3752 | cond_resched(); | |
3753 | goto search; | |
3754 | } | |
3755 | ||
e02119d5 CM |
3756 | ret = overwrite_item(trans, log, dst_path, src, i, |
3757 | &min_key); | |
4a500fd1 YZ |
3758 | if (ret) { |
3759 | err = ret; | |
3760 | goto done; | |
3761 | } | |
2f2ff0ee FM |
3762 | |
3763 | /* | |
3764 | * We must make sure that when we log a directory entry, | |
3765 | * the corresponding inode, after log replay, has a | |
3766 | * matching link count. For example: | |
3767 | * | |
3768 | * touch foo | |
3769 | * mkdir mydir | |
3770 | * sync | |
3771 | * ln foo mydir/bar | |
3772 | * xfs_io -c "fsync" mydir | |
3773 | * <crash> | |
3774 | * <mount fs and log replay> | |
3775 | * | |
3776 | * Would result in a fsync log that when replayed, our | |
3777 | * file inode would have a link count of 1, but we get | |
3778 | * two directory entries pointing to the same inode. | |
3779 | * After removing one of the names, it would not be | |
3780 | * possible to remove the other name, which resulted | |
3781 | * always in stale file handle errors, and would not | |
3782 | * be possible to rmdir the parent directory, since | |
3783 | * its i_size could never decrement to the value | |
3784 | * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. | |
3785 | */ | |
3786 | di = btrfs_item_ptr(src, i, struct btrfs_dir_item); | |
3787 | btrfs_dir_item_key_to_cpu(src, di, &tmp); | |
3788 | if (ctx && | |
3789 | (btrfs_dir_transid(src, di) == trans->transid || | |
3790 | btrfs_dir_type(src, di) == BTRFS_FT_DIR) && | |
3791 | tmp.type != BTRFS_ROOT_ITEM_KEY) | |
3792 | ctx->log_new_dentries = true; | |
e02119d5 CM |
3793 | } |
3794 | path->slots[0] = nritems; | |
3795 | ||
3796 | /* | |
3797 | * look ahead to the next item and see if it is also | |
3798 | * from this directory and from this transaction | |
3799 | */ | |
3800 | ret = btrfs_next_leaf(root, path); | |
80c0b421 LB |
3801 | if (ret) { |
3802 | if (ret == 1) | |
3803 | last_offset = (u64)-1; | |
3804 | else | |
3805 | err = ret; | |
e02119d5 CM |
3806 | goto done; |
3807 | } | |
3808 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); | |
33345d01 | 3809 | if (tmp.objectid != ino || tmp.type != key_type) { |
e02119d5 CM |
3810 | last_offset = (u64)-1; |
3811 | goto done; | |
3812 | } | |
3813 | if (btrfs_header_generation(path->nodes[0]) != trans->transid) { | |
3814 | ret = overwrite_item(trans, log, dst_path, | |
3815 | path->nodes[0], path->slots[0], | |
3816 | &tmp); | |
4a500fd1 YZ |
3817 | if (ret) |
3818 | err = ret; | |
3819 | else | |
3820 | last_offset = tmp.offset; | |
e02119d5 CM |
3821 | goto done; |
3822 | } | |
3823 | } | |
3824 | done: | |
b3b4aa74 DS |
3825 | btrfs_release_path(path); |
3826 | btrfs_release_path(dst_path); | |
e02119d5 | 3827 | |
4a500fd1 YZ |
3828 | if (err == 0) { |
3829 | *last_offset_ret = last_offset; | |
3830 | /* | |
3831 | * insert the log range keys to indicate where the log | |
3832 | * is valid | |
3833 | */ | |
3834 | ret = insert_dir_log_key(trans, log, path, key_type, | |
33345d01 | 3835 | ino, first_offset, last_offset); |
4a500fd1 YZ |
3836 | if (ret) |
3837 | err = ret; | |
3838 | } | |
3839 | return err; | |
e02119d5 CM |
3840 | } |
3841 | ||
3842 | /* | |
3843 | * logging directories is very similar to logging inodes, We find all the items | |
3844 | * from the current transaction and write them to the log. | |
3845 | * | |
3846 | * The recovery code scans the directory in the subvolume, and if it finds a | |
3847 | * key in the range logged that is not present in the log tree, then it means | |
3848 | * that dir entry was unlinked during the transaction. | |
3849 | * | |
3850 | * In order for that scan to work, we must include one key smaller than | |
3851 | * the smallest logged by this transaction and one key larger than the largest | |
3852 | * key logged by this transaction. | |
3853 | */ | |
3854 | static noinline int log_directory_changes(struct btrfs_trans_handle *trans, | |
dbf39ea4 | 3855 | struct btrfs_root *root, struct btrfs_inode *inode, |
e02119d5 | 3856 | struct btrfs_path *path, |
2f2ff0ee FM |
3857 | struct btrfs_path *dst_path, |
3858 | struct btrfs_log_ctx *ctx) | |
e02119d5 CM |
3859 | { |
3860 | u64 min_key; | |
3861 | u64 max_key; | |
3862 | int ret; | |
3863 | int key_type = BTRFS_DIR_ITEM_KEY; | |
3864 | ||
3865 | again: | |
3866 | min_key = 0; | |
3867 | max_key = 0; | |
d397712b | 3868 | while (1) { |
dbf39ea4 NB |
3869 | ret = log_dir_items(trans, root, inode, path, dst_path, key_type, |
3870 | ctx, min_key, &max_key); | |
4a500fd1 YZ |
3871 | if (ret) |
3872 | return ret; | |
e02119d5 CM |
3873 | if (max_key == (u64)-1) |
3874 | break; | |
3875 | min_key = max_key + 1; | |
3876 | } | |
3877 | ||
3878 | if (key_type == BTRFS_DIR_ITEM_KEY) { | |
3879 | key_type = BTRFS_DIR_INDEX_KEY; | |
3880 | goto again; | |
3881 | } | |
3882 | return 0; | |
3883 | } | |
3884 | ||
3885 | /* | |
3886 | * a helper function to drop items from the log before we relog an | |
3887 | * inode. max_key_type indicates the highest item type to remove. | |
3888 | * This cannot be run for file data extents because it does not | |
3889 | * free the extents they point to. | |
3890 | */ | |
3891 | static int drop_objectid_items(struct btrfs_trans_handle *trans, | |
3892 | struct btrfs_root *log, | |
3893 | struct btrfs_path *path, | |
3894 | u64 objectid, int max_key_type) | |
3895 | { | |
3896 | int ret; | |
3897 | struct btrfs_key key; | |
3898 | struct btrfs_key found_key; | |
18ec90d6 | 3899 | int start_slot; |
e02119d5 CM |
3900 | |
3901 | key.objectid = objectid; | |
3902 | key.type = max_key_type; | |
3903 | key.offset = (u64)-1; | |
3904 | ||
d397712b | 3905 | while (1) { |
e02119d5 | 3906 | ret = btrfs_search_slot(trans, log, &key, path, -1, 1); |
3650860b | 3907 | BUG_ON(ret == 0); /* Logic error */ |
4a500fd1 | 3908 | if (ret < 0) |
e02119d5 CM |
3909 | break; |
3910 | ||
3911 | if (path->slots[0] == 0) | |
3912 | break; | |
3913 | ||
3914 | path->slots[0]--; | |
3915 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | |
3916 | path->slots[0]); | |
3917 | ||
3918 | if (found_key.objectid != objectid) | |
3919 | break; | |
3920 | ||
18ec90d6 JB |
3921 | found_key.offset = 0; |
3922 | found_key.type = 0; | |
e3b83361 | 3923 | ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot); |
cbca7d59 FM |
3924 | if (ret < 0) |
3925 | break; | |
18ec90d6 JB |
3926 | |
3927 | ret = btrfs_del_items(trans, log, path, start_slot, | |
3928 | path->slots[0] - start_slot + 1); | |
3929 | /* | |
3930 | * If start slot isn't 0 then we don't need to re-search, we've | |
3931 | * found the last guy with the objectid in this tree. | |
3932 | */ | |
3933 | if (ret || start_slot != 0) | |
65a246c5 | 3934 | break; |
b3b4aa74 | 3935 | btrfs_release_path(path); |
e02119d5 | 3936 | } |
b3b4aa74 | 3937 | btrfs_release_path(path); |
5bdbeb21 JB |
3938 | if (ret > 0) |
3939 | ret = 0; | |
4a500fd1 | 3940 | return ret; |
e02119d5 CM |
3941 | } |
3942 | ||
94edf4ae JB |
3943 | static void fill_inode_item(struct btrfs_trans_handle *trans, |
3944 | struct extent_buffer *leaf, | |
3945 | struct btrfs_inode_item *item, | |
1a4bcf47 FM |
3946 | struct inode *inode, int log_inode_only, |
3947 | u64 logged_isize) | |
94edf4ae | 3948 | { |
0b1c6cca | 3949 | struct btrfs_map_token token; |
77eea05e | 3950 | u64 flags; |
0b1c6cca | 3951 | |
c82f823c | 3952 | btrfs_init_map_token(&token, leaf); |
94edf4ae JB |
3953 | |
3954 | if (log_inode_only) { | |
3955 | /* set the generation to zero so the recover code | |
3956 | * can tell the difference between an logging | |
3957 | * just to say 'this inode exists' and a logging | |
3958 | * to say 'update this inode with these values' | |
3959 | */ | |
cc4c13d5 DS |
3960 | btrfs_set_token_inode_generation(&token, item, 0); |
3961 | btrfs_set_token_inode_size(&token, item, logged_isize); | |
94edf4ae | 3962 | } else { |
cc4c13d5 DS |
3963 | btrfs_set_token_inode_generation(&token, item, |
3964 | BTRFS_I(inode)->generation); | |
3965 | btrfs_set_token_inode_size(&token, item, inode->i_size); | |
0b1c6cca JB |
3966 | } |
3967 | ||
cc4c13d5 DS |
3968 | btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); |
3969 | btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); | |
3970 | btrfs_set_token_inode_mode(&token, item, inode->i_mode); | |
3971 | btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); | |
3972 | ||
3973 | btrfs_set_token_timespec_sec(&token, &item->atime, | |
3974 | inode->i_atime.tv_sec); | |
3975 | btrfs_set_token_timespec_nsec(&token, &item->atime, | |
3976 | inode->i_atime.tv_nsec); | |
3977 | ||
3978 | btrfs_set_token_timespec_sec(&token, &item->mtime, | |
3979 | inode->i_mtime.tv_sec); | |
3980 | btrfs_set_token_timespec_nsec(&token, &item->mtime, | |
3981 | inode->i_mtime.tv_nsec); | |
3982 | ||
3983 | btrfs_set_token_timespec_sec(&token, &item->ctime, | |
3984 | inode->i_ctime.tv_sec); | |
3985 | btrfs_set_token_timespec_nsec(&token, &item->ctime, | |
3986 | inode->i_ctime.tv_nsec); | |
3987 | ||
e593e54e FM |
3988 | /* |
3989 | * We do not need to set the nbytes field, in fact during a fast fsync | |
3990 | * its value may not even be correct, since a fast fsync does not wait | |
3991 | * for ordered extent completion, which is where we update nbytes, it | |
3992 | * only waits for writeback to complete. During log replay as we find | |
3993 | * file extent items and replay them, we adjust the nbytes field of the | |
3994 | * inode item in subvolume tree as needed (see overwrite_item()). | |
3995 | */ | |
cc4c13d5 DS |
3996 | |
3997 | btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); | |
3998 | btrfs_set_token_inode_transid(&token, item, trans->transid); | |
3999 | btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); | |
77eea05e BB |
4000 | flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, |
4001 | BTRFS_I(inode)->ro_flags); | |
4002 | btrfs_set_token_inode_flags(&token, item, flags); | |
cc4c13d5 | 4003 | btrfs_set_token_inode_block_group(&token, item, 0); |
94edf4ae JB |
4004 | } |
4005 | ||
a95249b3 JB |
4006 | static int log_inode_item(struct btrfs_trans_handle *trans, |
4007 | struct btrfs_root *log, struct btrfs_path *path, | |
2ac691d8 | 4008 | struct btrfs_inode *inode, bool inode_item_dropped) |
a95249b3 JB |
4009 | { |
4010 | struct btrfs_inode_item *inode_item; | |
a95249b3 JB |
4011 | int ret; |
4012 | ||
2ac691d8 FM |
4013 | /* |
4014 | * If we are doing a fast fsync and the inode was logged before in the | |
4015 | * current transaction, then we know the inode was previously logged and | |
4016 | * it exists in the log tree. For performance reasons, in this case use | |
4017 | * btrfs_search_slot() directly with ins_len set to 0 so that we never | |
4018 | * attempt a write lock on the leaf's parent, which adds unnecessary lock | |
4019 | * contention in case there are concurrent fsyncs for other inodes of the | |
4020 | * same subvolume. Using btrfs_insert_empty_item() when the inode item | |
4021 | * already exists can also result in unnecessarily splitting a leaf. | |
4022 | */ | |
4023 | if (!inode_item_dropped && inode->logged_trans == trans->transid) { | |
4024 | ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1); | |
4025 | ASSERT(ret <= 0); | |
4026 | if (ret > 0) | |
4027 | ret = -ENOENT; | |
4028 | } else { | |
4029 | /* | |
4030 | * This means it is the first fsync in the current transaction, | |
4031 | * so the inode item is not in the log and we need to insert it. | |
4032 | * We can never get -EEXIST because we are only called for a fast | |
4033 | * fsync and in case an inode eviction happens after the inode was | |
4034 | * logged before in the current transaction, when we load again | |
4035 | * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime | |
4036 | * flags and set ->logged_trans to 0. | |
4037 | */ | |
4038 | ret = btrfs_insert_empty_item(trans, log, path, &inode->location, | |
4039 | sizeof(*inode_item)); | |
4040 | ASSERT(ret != -EEXIST); | |
4041 | } | |
4042 | if (ret) | |
a95249b3 JB |
4043 | return ret; |
4044 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
4045 | struct btrfs_inode_item); | |
6d889a3b NB |
4046 | fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, |
4047 | 0, 0); | |
a95249b3 JB |
4048 | btrfs_release_path(path); |
4049 | return 0; | |
4050 | } | |
4051 | ||
40e046ac | 4052 | static int log_csums(struct btrfs_trans_handle *trans, |
3ebac17c | 4053 | struct btrfs_inode *inode, |
40e046ac FM |
4054 | struct btrfs_root *log_root, |
4055 | struct btrfs_ordered_sum *sums) | |
4056 | { | |
e289f03e FM |
4057 | const u64 lock_end = sums->bytenr + sums->len - 1; |
4058 | struct extent_state *cached_state = NULL; | |
40e046ac FM |
4059 | int ret; |
4060 | ||
3ebac17c FM |
4061 | /* |
4062 | * If this inode was not used for reflink operations in the current | |
4063 | * transaction with new extents, then do the fast path, no need to | |
4064 | * worry about logging checksum items with overlapping ranges. | |
4065 | */ | |
4066 | if (inode->last_reflink_trans < trans->transid) | |
4067 | return btrfs_csum_file_blocks(trans, log_root, sums); | |
4068 | ||
e289f03e FM |
4069 | /* |
4070 | * Serialize logging for checksums. This is to avoid racing with the | |
4071 | * same checksum being logged by another task that is logging another | |
4072 | * file which happens to refer to the same extent as well. Such races | |
4073 | * can leave checksum items in the log with overlapping ranges. | |
4074 | */ | |
4075 | ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr, | |
4076 | lock_end, &cached_state); | |
4077 | if (ret) | |
4078 | return ret; | |
40e046ac FM |
4079 | /* |
4080 | * Due to extent cloning, we might have logged a csum item that covers a | |
4081 | * subrange of a cloned extent, and later we can end up logging a csum | |
4082 | * item for a larger subrange of the same extent or the entire range. | |
4083 | * This would leave csum items in the log tree that cover the same range | |
4084 | * and break the searches for checksums in the log tree, resulting in | |
4085 | * some checksums missing in the fs/subvolume tree. So just delete (or | |
4086 | * trim and adjust) any existing csum items in the log for this range. | |
4087 | */ | |
4088 | ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); | |
e289f03e FM |
4089 | if (!ret) |
4090 | ret = btrfs_csum_file_blocks(trans, log_root, sums); | |
40e046ac | 4091 | |
e289f03e FM |
4092 | unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end, |
4093 | &cached_state); | |
4094 | ||
4095 | return ret; | |
40e046ac FM |
4096 | } |
4097 | ||
31ff1cd2 | 4098 | static noinline int copy_items(struct btrfs_trans_handle *trans, |
44d70e19 | 4099 | struct btrfs_inode *inode, |
31ff1cd2 | 4100 | struct btrfs_path *dst_path, |
0e56315c | 4101 | struct btrfs_path *src_path, |
1a4bcf47 FM |
4102 | int start_slot, int nr, int inode_only, |
4103 | u64 logged_isize) | |
31ff1cd2 | 4104 | { |
3ffbd68c | 4105 | struct btrfs_fs_info *fs_info = trans->fs_info; |
31ff1cd2 CM |
4106 | unsigned long src_offset; |
4107 | unsigned long dst_offset; | |
44d70e19 | 4108 | struct btrfs_root *log = inode->root->log_root; |
31ff1cd2 CM |
4109 | struct btrfs_file_extent_item *extent; |
4110 | struct btrfs_inode_item *inode_item; | |
16e7549f | 4111 | struct extent_buffer *src = src_path->nodes[0]; |
31ff1cd2 CM |
4112 | int ret; |
4113 | struct btrfs_key *ins_keys; | |
4114 | u32 *ins_sizes; | |
4115 | char *ins_data; | |
4116 | int i; | |
d20f7043 | 4117 | struct list_head ordered_sums; |
44d70e19 | 4118 | int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; |
d20f7043 CM |
4119 | |
4120 | INIT_LIST_HEAD(&ordered_sums); | |
31ff1cd2 CM |
4121 | |
4122 | ins_data = kmalloc(nr * sizeof(struct btrfs_key) + | |
4123 | nr * sizeof(u32), GFP_NOFS); | |
2a29edc6 | 4124 | if (!ins_data) |
4125 | return -ENOMEM; | |
4126 | ||
31ff1cd2 CM |
4127 | ins_sizes = (u32 *)ins_data; |
4128 | ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); | |
4129 | ||
4130 | for (i = 0; i < nr; i++) { | |
4131 | ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); | |
4132 | btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); | |
4133 | } | |
4134 | ret = btrfs_insert_empty_items(trans, log, dst_path, | |
4135 | ins_keys, ins_sizes, nr); | |
4a500fd1 YZ |
4136 | if (ret) { |
4137 | kfree(ins_data); | |
4138 | return ret; | |
4139 | } | |
31ff1cd2 | 4140 | |
5d4f98a2 | 4141 | for (i = 0; i < nr; i++, dst_path->slots[0]++) { |
31ff1cd2 CM |
4142 | dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], |
4143 | dst_path->slots[0]); | |
4144 | ||
4145 | src_offset = btrfs_item_ptr_offset(src, start_slot + i); | |
4146 | ||
94edf4ae | 4147 | if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { |
31ff1cd2 CM |
4148 | inode_item = btrfs_item_ptr(dst_path->nodes[0], |
4149 | dst_path->slots[0], | |
4150 | struct btrfs_inode_item); | |
94edf4ae | 4151 | fill_inode_item(trans, dst_path->nodes[0], inode_item, |
f85b7379 DS |
4152 | &inode->vfs_inode, |
4153 | inode_only == LOG_INODE_EXISTS, | |
1a4bcf47 | 4154 | logged_isize); |
94edf4ae JB |
4155 | } else { |
4156 | copy_extent_buffer(dst_path->nodes[0], src, dst_offset, | |
4157 | src_offset, ins_sizes[i]); | |
31ff1cd2 | 4158 | } |
94edf4ae | 4159 | |
31ff1cd2 CM |
4160 | /* take a reference on file data extents so that truncates |
4161 | * or deletes of this inode don't have to relog the inode | |
4162 | * again | |
4163 | */ | |
962a298f | 4164 | if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && |
d2794405 | 4165 | !skip_csum) { |
31ff1cd2 CM |
4166 | int found_type; |
4167 | extent = btrfs_item_ptr(src, start_slot + i, | |
4168 | struct btrfs_file_extent_item); | |
4169 | ||
8e531cdf | 4170 | if (btrfs_file_extent_generation(src, extent) < trans->transid) |
4171 | continue; | |
4172 | ||
31ff1cd2 | 4173 | found_type = btrfs_file_extent_type(src, extent); |
6f1fed77 | 4174 | if (found_type == BTRFS_FILE_EXTENT_REG) { |
5d4f98a2 YZ |
4175 | u64 ds, dl, cs, cl; |
4176 | ds = btrfs_file_extent_disk_bytenr(src, | |
4177 | extent); | |
4178 | /* ds == 0 is a hole */ | |
4179 | if (ds == 0) | |
4180 | continue; | |
4181 | ||
4182 | dl = btrfs_file_extent_disk_num_bytes(src, | |
4183 | extent); | |
4184 | cs = btrfs_file_extent_offset(src, extent); | |
4185 | cl = btrfs_file_extent_num_bytes(src, | |
a419aef8 | 4186 | extent); |
580afd76 CM |
4187 | if (btrfs_file_extent_compression(src, |
4188 | extent)) { | |
4189 | cs = 0; | |
4190 | cl = dl; | |
4191 | } | |
5d4f98a2 YZ |
4192 | |
4193 | ret = btrfs_lookup_csums_range( | |
0b246afa | 4194 | fs_info->csum_root, |
5d4f98a2 | 4195 | ds + cs, ds + cs + cl - 1, |
a2de733c | 4196 | &ordered_sums, 0); |
4f26433e FM |
4197 | if (ret) |
4198 | break; | |
31ff1cd2 CM |
4199 | } |
4200 | } | |
31ff1cd2 CM |
4201 | } |
4202 | ||
4203 | btrfs_mark_buffer_dirty(dst_path->nodes[0]); | |
b3b4aa74 | 4204 | btrfs_release_path(dst_path); |
31ff1cd2 | 4205 | kfree(ins_data); |
d20f7043 CM |
4206 | |
4207 | /* | |
4208 | * we have to do this after the loop above to avoid changing the | |
4209 | * log tree while trying to change the log tree. | |
4210 | */ | |
d397712b | 4211 | while (!list_empty(&ordered_sums)) { |
d20f7043 CM |
4212 | struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, |
4213 | struct btrfs_ordered_sum, | |
4214 | list); | |
4a500fd1 | 4215 | if (!ret) |
3ebac17c | 4216 | ret = log_csums(trans, inode, log, sums); |
d20f7043 CM |
4217 | list_del(&sums->list); |
4218 | kfree(sums); | |
4219 | } | |
16e7549f | 4220 | |
4a500fd1 | 4221 | return ret; |
31ff1cd2 CM |
4222 | } |
4223 | ||
4f0f586b ST |
4224 | static int extent_cmp(void *priv, const struct list_head *a, |
4225 | const struct list_head *b) | |
5dc562c5 | 4226 | { |
214cc184 | 4227 | const struct extent_map *em1, *em2; |
5dc562c5 JB |
4228 | |
4229 | em1 = list_entry(a, struct extent_map, list); | |
4230 | em2 = list_entry(b, struct extent_map, list); | |
4231 | ||
4232 | if (em1->start < em2->start) | |
4233 | return -1; | |
4234 | else if (em1->start > em2->start) | |
4235 | return 1; | |
4236 | return 0; | |
4237 | } | |
4238 | ||
e7175a69 JB |
4239 | static int log_extent_csums(struct btrfs_trans_handle *trans, |
4240 | struct btrfs_inode *inode, | |
a9ecb653 | 4241 | struct btrfs_root *log_root, |
48778179 FM |
4242 | const struct extent_map *em, |
4243 | struct btrfs_log_ctx *ctx) | |
5dc562c5 | 4244 | { |
48778179 | 4245 | struct btrfs_ordered_extent *ordered; |
2ab28f32 JB |
4246 | u64 csum_offset; |
4247 | u64 csum_len; | |
48778179 FM |
4248 | u64 mod_start = em->mod_start; |
4249 | u64 mod_len = em->mod_len; | |
8407f553 FM |
4250 | LIST_HEAD(ordered_sums); |
4251 | int ret = 0; | |
0aa4a17d | 4252 | |
e7175a69 JB |
4253 | if (inode->flags & BTRFS_INODE_NODATASUM || |
4254 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || | |
8407f553 | 4255 | em->block_start == EXTENT_MAP_HOLE) |
70c8a91c | 4256 | return 0; |
5dc562c5 | 4257 | |
48778179 FM |
4258 | list_for_each_entry(ordered, &ctx->ordered_extents, log_list) { |
4259 | const u64 ordered_end = ordered->file_offset + ordered->num_bytes; | |
4260 | const u64 mod_end = mod_start + mod_len; | |
4261 | struct btrfs_ordered_sum *sums; | |
4262 | ||
4263 | if (mod_len == 0) | |
4264 | break; | |
4265 | ||
4266 | if (ordered_end <= mod_start) | |
4267 | continue; | |
4268 | if (mod_end <= ordered->file_offset) | |
4269 | break; | |
4270 | ||
4271 | /* | |
4272 | * We are going to copy all the csums on this ordered extent, so | |
4273 | * go ahead and adjust mod_start and mod_len in case this ordered | |
4274 | * extent has already been logged. | |
4275 | */ | |
4276 | if (ordered->file_offset > mod_start) { | |
4277 | if (ordered_end >= mod_end) | |
4278 | mod_len = ordered->file_offset - mod_start; | |
4279 | /* | |
4280 | * If we have this case | |
4281 | * | |
4282 | * |--------- logged extent ---------| | |
4283 | * |----- ordered extent ----| | |
4284 | * | |
4285 | * Just don't mess with mod_start and mod_len, we'll | |
4286 | * just end up logging more csums than we need and it | |
4287 | * will be ok. | |
4288 | */ | |
4289 | } else { | |
4290 | if (ordered_end < mod_end) { | |
4291 | mod_len = mod_end - ordered_end; | |
4292 | mod_start = ordered_end; | |
4293 | } else { | |
4294 | mod_len = 0; | |
4295 | } | |
4296 | } | |
4297 | ||
4298 | /* | |
4299 | * To keep us from looping for the above case of an ordered | |
4300 | * extent that falls inside of the logged extent. | |
4301 | */ | |
4302 | if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags)) | |
4303 | continue; | |
4304 | ||
4305 | list_for_each_entry(sums, &ordered->list, list) { | |
4306 | ret = log_csums(trans, inode, log_root, sums); | |
4307 | if (ret) | |
4308 | return ret; | |
4309 | } | |
4310 | } | |
4311 | ||
4312 | /* We're done, found all csums in the ordered extents. */ | |
4313 | if (mod_len == 0) | |
4314 | return 0; | |
4315 | ||
e7175a69 | 4316 | /* If we're compressed we have to save the entire range of csums. */ |
488111aa FDBM |
4317 | if (em->compress_type) { |
4318 | csum_offset = 0; | |
8407f553 | 4319 | csum_len = max(em->block_len, em->orig_block_len); |
488111aa | 4320 | } else { |
48778179 FM |
4321 | csum_offset = mod_start - em->start; |
4322 | csum_len = mod_len; | |
488111aa | 4323 | } |
2ab28f32 | 4324 | |
70c8a91c | 4325 | /* block start is already adjusted for the file extent offset. */ |
a9ecb653 | 4326 | ret = btrfs_lookup_csums_range(trans->fs_info->csum_root, |
70c8a91c JB |
4327 | em->block_start + csum_offset, |
4328 | em->block_start + csum_offset + | |
4329 | csum_len - 1, &ordered_sums, 0); | |
4330 | if (ret) | |
4331 | return ret; | |
5dc562c5 | 4332 | |
70c8a91c JB |
4333 | while (!list_empty(&ordered_sums)) { |
4334 | struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, | |
4335 | struct btrfs_ordered_sum, | |
4336 | list); | |
4337 | if (!ret) | |
3ebac17c | 4338 | ret = log_csums(trans, inode, log_root, sums); |
70c8a91c JB |
4339 | list_del(&sums->list); |
4340 | kfree(sums); | |
5dc562c5 JB |
4341 | } |
4342 | ||
70c8a91c | 4343 | return ret; |
5dc562c5 JB |
4344 | } |
4345 | ||
8407f553 | 4346 | static int log_one_extent(struct btrfs_trans_handle *trans, |
9d122629 | 4347 | struct btrfs_inode *inode, struct btrfs_root *root, |
8407f553 FM |
4348 | const struct extent_map *em, |
4349 | struct btrfs_path *path, | |
8407f553 FM |
4350 | struct btrfs_log_ctx *ctx) |
4351 | { | |
5893dfb9 | 4352 | struct btrfs_drop_extents_args drop_args = { 0 }; |
8407f553 FM |
4353 | struct btrfs_root *log = root->log_root; |
4354 | struct btrfs_file_extent_item *fi; | |
4355 | struct extent_buffer *leaf; | |
4356 | struct btrfs_map_token token; | |
4357 | struct btrfs_key key; | |
4358 | u64 extent_offset = em->start - em->orig_start; | |
4359 | u64 block_len; | |
4360 | int ret; | |
8407f553 | 4361 | |
48778179 | 4362 | ret = log_extent_csums(trans, inode, log, em, ctx); |
8407f553 FM |
4363 | if (ret) |
4364 | return ret; | |
4365 | ||
5893dfb9 FM |
4366 | drop_args.path = path; |
4367 | drop_args.start = em->start; | |
4368 | drop_args.end = em->start + em->len; | |
4369 | drop_args.replace_extent = true; | |
4370 | drop_args.extent_item_size = sizeof(*fi); | |
4371 | ret = btrfs_drop_extents(trans, log, inode, &drop_args); | |
8407f553 FM |
4372 | if (ret) |
4373 | return ret; | |
4374 | ||
5893dfb9 | 4375 | if (!drop_args.extent_inserted) { |
9d122629 | 4376 | key.objectid = btrfs_ino(inode); |
8407f553 FM |
4377 | key.type = BTRFS_EXTENT_DATA_KEY; |
4378 | key.offset = em->start; | |
4379 | ||
4380 | ret = btrfs_insert_empty_item(trans, log, path, &key, | |
4381 | sizeof(*fi)); | |
4382 | if (ret) | |
4383 | return ret; | |
4384 | } | |
4385 | leaf = path->nodes[0]; | |
c82f823c | 4386 | btrfs_init_map_token(&token, leaf); |
8407f553 FM |
4387 | fi = btrfs_item_ptr(leaf, path->slots[0], |
4388 | struct btrfs_file_extent_item); | |
4389 | ||
cc4c13d5 | 4390 | btrfs_set_token_file_extent_generation(&token, fi, trans->transid); |
8407f553 | 4391 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) |
cc4c13d5 DS |
4392 | btrfs_set_token_file_extent_type(&token, fi, |
4393 | BTRFS_FILE_EXTENT_PREALLOC); | |
8407f553 | 4394 | else |
cc4c13d5 DS |
4395 | btrfs_set_token_file_extent_type(&token, fi, |
4396 | BTRFS_FILE_EXTENT_REG); | |
8407f553 FM |
4397 | |
4398 | block_len = max(em->block_len, em->orig_block_len); | |
4399 | if (em->compress_type != BTRFS_COMPRESS_NONE) { | |
cc4c13d5 DS |
4400 | btrfs_set_token_file_extent_disk_bytenr(&token, fi, |
4401 | em->block_start); | |
4402 | btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); | |
8407f553 | 4403 | } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { |
cc4c13d5 | 4404 | btrfs_set_token_file_extent_disk_bytenr(&token, fi, |
8407f553 | 4405 | em->block_start - |
cc4c13d5 DS |
4406 | extent_offset); |
4407 | btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); | |
8407f553 | 4408 | } else { |
cc4c13d5 DS |
4409 | btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0); |
4410 | btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0); | |
8407f553 FM |
4411 | } |
4412 | ||
cc4c13d5 DS |
4413 | btrfs_set_token_file_extent_offset(&token, fi, extent_offset); |
4414 | btrfs_set_token_file_extent_num_bytes(&token, fi, em->len); | |
4415 | btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes); | |
4416 | btrfs_set_token_file_extent_compression(&token, fi, em->compress_type); | |
4417 | btrfs_set_token_file_extent_encryption(&token, fi, 0); | |
4418 | btrfs_set_token_file_extent_other_encoding(&token, fi, 0); | |
8407f553 FM |
4419 | btrfs_mark_buffer_dirty(leaf); |
4420 | ||
4421 | btrfs_release_path(path); | |
4422 | ||
4423 | return ret; | |
4424 | } | |
4425 | ||
31d11b83 FM |
4426 | /* |
4427 | * Log all prealloc extents beyond the inode's i_size to make sure we do not | |
4428 | * lose them after doing a fast fsync and replaying the log. We scan the | |
4429 | * subvolume's root instead of iterating the inode's extent map tree because | |
4430 | * otherwise we can log incorrect extent items based on extent map conversion. | |
4431 | * That can happen due to the fact that extent maps are merged when they | |
4432 | * are not in the extent map tree's list of modified extents. | |
4433 | */ | |
4434 | static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, | |
4435 | struct btrfs_inode *inode, | |
4436 | struct btrfs_path *path) | |
4437 | { | |
4438 | struct btrfs_root *root = inode->root; | |
4439 | struct btrfs_key key; | |
4440 | const u64 i_size = i_size_read(&inode->vfs_inode); | |
4441 | const u64 ino = btrfs_ino(inode); | |
4442 | struct btrfs_path *dst_path = NULL; | |
0e56315c | 4443 | bool dropped_extents = false; |
f135cea3 FM |
4444 | u64 truncate_offset = i_size; |
4445 | struct extent_buffer *leaf; | |
4446 | int slot; | |
31d11b83 FM |
4447 | int ins_nr = 0; |
4448 | int start_slot; | |
4449 | int ret; | |
4450 | ||
4451 | if (!(inode->flags & BTRFS_INODE_PREALLOC)) | |
4452 | return 0; | |
4453 | ||
4454 | key.objectid = ino; | |
4455 | key.type = BTRFS_EXTENT_DATA_KEY; | |
4456 | key.offset = i_size; | |
4457 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
4458 | if (ret < 0) | |
4459 | goto out; | |
4460 | ||
f135cea3 FM |
4461 | /* |
4462 | * We must check if there is a prealloc extent that starts before the | |
4463 | * i_size and crosses the i_size boundary. This is to ensure later we | |
4464 | * truncate down to the end of that extent and not to the i_size, as | |
4465 | * otherwise we end up losing part of the prealloc extent after a log | |
4466 | * replay and with an implicit hole if there is another prealloc extent | |
4467 | * that starts at an offset beyond i_size. | |
4468 | */ | |
4469 | ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); | |
4470 | if (ret < 0) | |
4471 | goto out; | |
4472 | ||
4473 | if (ret == 0) { | |
4474 | struct btrfs_file_extent_item *ei; | |
4475 | ||
4476 | leaf = path->nodes[0]; | |
4477 | slot = path->slots[0]; | |
4478 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
4479 | ||
4480 | if (btrfs_file_extent_type(leaf, ei) == | |
4481 | BTRFS_FILE_EXTENT_PREALLOC) { | |
4482 | u64 extent_end; | |
4483 | ||
4484 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
4485 | extent_end = key.offset + | |
4486 | btrfs_file_extent_num_bytes(leaf, ei); | |
4487 | ||
4488 | if (extent_end > i_size) | |
4489 | truncate_offset = extent_end; | |
4490 | } | |
4491 | } else { | |
4492 | ret = 0; | |
4493 | } | |
4494 | ||
31d11b83 | 4495 | while (true) { |
f135cea3 FM |
4496 | leaf = path->nodes[0]; |
4497 | slot = path->slots[0]; | |
31d11b83 FM |
4498 | |
4499 | if (slot >= btrfs_header_nritems(leaf)) { | |
4500 | if (ins_nr > 0) { | |
4501 | ret = copy_items(trans, inode, dst_path, path, | |
0e56315c | 4502 | start_slot, ins_nr, 1, 0); |
31d11b83 FM |
4503 | if (ret < 0) |
4504 | goto out; | |
4505 | ins_nr = 0; | |
4506 | } | |
4507 | ret = btrfs_next_leaf(root, path); | |
4508 | if (ret < 0) | |
4509 | goto out; | |
4510 | if (ret > 0) { | |
4511 | ret = 0; | |
4512 | break; | |
4513 | } | |
4514 | continue; | |
4515 | } | |
4516 | ||
4517 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
4518 | if (key.objectid > ino) | |
4519 | break; | |
4520 | if (WARN_ON_ONCE(key.objectid < ino) || | |
4521 | key.type < BTRFS_EXTENT_DATA_KEY || | |
4522 | key.offset < i_size) { | |
4523 | path->slots[0]++; | |
4524 | continue; | |
4525 | } | |
0e56315c | 4526 | if (!dropped_extents) { |
31d11b83 FM |
4527 | /* |
4528 | * Avoid logging extent items logged in past fsync calls | |
4529 | * and leading to duplicate keys in the log tree. | |
4530 | */ | |
4531 | do { | |
4532 | ret = btrfs_truncate_inode_items(trans, | |
4533 | root->log_root, | |
50743398 | 4534 | inode, truncate_offset, |
0d7d3165 FM |
4535 | BTRFS_EXTENT_DATA_KEY, |
4536 | NULL); | |
31d11b83 FM |
4537 | } while (ret == -EAGAIN); |
4538 | if (ret) | |
4539 | goto out; | |
0e56315c | 4540 | dropped_extents = true; |
31d11b83 FM |
4541 | } |
4542 | if (ins_nr == 0) | |
4543 | start_slot = slot; | |
4544 | ins_nr++; | |
4545 | path->slots[0]++; | |
4546 | if (!dst_path) { | |
4547 | dst_path = btrfs_alloc_path(); | |
4548 | if (!dst_path) { | |
4549 | ret = -ENOMEM; | |
4550 | goto out; | |
4551 | } | |
4552 | } | |
4553 | } | |
0bc2d3c0 | 4554 | if (ins_nr > 0) |
0e56315c | 4555 | ret = copy_items(trans, inode, dst_path, path, |
31d11b83 | 4556 | start_slot, ins_nr, 1, 0); |
31d11b83 FM |
4557 | out: |
4558 | btrfs_release_path(path); | |
4559 | btrfs_free_path(dst_path); | |
4560 | return ret; | |
4561 | } | |
4562 | ||
5dc562c5 JB |
4563 | static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, |
4564 | struct btrfs_root *root, | |
9d122629 | 4565 | struct btrfs_inode *inode, |
827463c4 | 4566 | struct btrfs_path *path, |
48778179 | 4567 | struct btrfs_log_ctx *ctx) |
5dc562c5 | 4568 | { |
48778179 FM |
4569 | struct btrfs_ordered_extent *ordered; |
4570 | struct btrfs_ordered_extent *tmp; | |
5dc562c5 JB |
4571 | struct extent_map *em, *n; |
4572 | struct list_head extents; | |
9d122629 | 4573 | struct extent_map_tree *tree = &inode->extent_tree; |
5dc562c5 | 4574 | int ret = 0; |
2ab28f32 | 4575 | int num = 0; |
5dc562c5 JB |
4576 | |
4577 | INIT_LIST_HEAD(&extents); | |
4578 | ||
5dc562c5 | 4579 | write_lock(&tree->lock); |
5dc562c5 JB |
4580 | |
4581 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) { | |
4582 | list_del_init(&em->list); | |
2ab28f32 JB |
4583 | /* |
4584 | * Just an arbitrary number, this can be really CPU intensive | |
4585 | * once we start getting a lot of extents, and really once we | |
4586 | * have a bunch of extents we just want to commit since it will | |
4587 | * be faster. | |
4588 | */ | |
4589 | if (++num > 32768) { | |
4590 | list_del_init(&tree->modified_extents); | |
4591 | ret = -EFBIG; | |
4592 | goto process; | |
4593 | } | |
4594 | ||
5f96bfb7 | 4595 | if (em->generation < trans->transid) |
5dc562c5 | 4596 | continue; |
8c6c5928 | 4597 | |
31d11b83 FM |
4598 | /* We log prealloc extents beyond eof later. */ |
4599 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && | |
4600 | em->start >= i_size_read(&inode->vfs_inode)) | |
4601 | continue; | |
4602 | ||
ff44c6e3 | 4603 | /* Need a ref to keep it from getting evicted from cache */ |
490b54d6 | 4604 | refcount_inc(&em->refs); |
ff44c6e3 | 4605 | set_bit(EXTENT_FLAG_LOGGING, &em->flags); |
5dc562c5 | 4606 | list_add_tail(&em->list, &extents); |
2ab28f32 | 4607 | num++; |
5dc562c5 JB |
4608 | } |
4609 | ||
4610 | list_sort(NULL, &extents, extent_cmp); | |
2ab28f32 | 4611 | process: |
5dc562c5 JB |
4612 | while (!list_empty(&extents)) { |
4613 | em = list_entry(extents.next, struct extent_map, list); | |
4614 | ||
4615 | list_del_init(&em->list); | |
4616 | ||
4617 | /* | |
4618 | * If we had an error we just need to delete everybody from our | |
4619 | * private list. | |
4620 | */ | |
ff44c6e3 | 4621 | if (ret) { |
201a9038 | 4622 | clear_em_logging(tree, em); |
ff44c6e3 | 4623 | free_extent_map(em); |
5dc562c5 | 4624 | continue; |
ff44c6e3 JB |
4625 | } |
4626 | ||
4627 | write_unlock(&tree->lock); | |
5dc562c5 | 4628 | |
a2120a47 | 4629 | ret = log_one_extent(trans, inode, root, em, path, ctx); |
ff44c6e3 | 4630 | write_lock(&tree->lock); |
201a9038 JB |
4631 | clear_em_logging(tree, em); |
4632 | free_extent_map(em); | |
5dc562c5 | 4633 | } |
ff44c6e3 JB |
4634 | WARN_ON(!list_empty(&extents)); |
4635 | write_unlock(&tree->lock); | |
5dc562c5 | 4636 | |
5dc562c5 | 4637 | btrfs_release_path(path); |
31d11b83 FM |
4638 | if (!ret) |
4639 | ret = btrfs_log_prealloc_extents(trans, inode, path); | |
48778179 FM |
4640 | if (ret) |
4641 | return ret; | |
31d11b83 | 4642 | |
48778179 FM |
4643 | /* |
4644 | * We have logged all extents successfully, now make sure the commit of | |
4645 | * the current transaction waits for the ordered extents to complete | |
4646 | * before it commits and wipes out the log trees, otherwise we would | |
4647 | * lose data if an ordered extents completes after the transaction | |
4648 | * commits and a power failure happens after the transaction commit. | |
4649 | */ | |
4650 | list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { | |
4651 | list_del_init(&ordered->log_list); | |
4652 | set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags); | |
4653 | ||
4654 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { | |
4655 | spin_lock_irq(&inode->ordered_tree.lock); | |
4656 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { | |
4657 | set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); | |
4658 | atomic_inc(&trans->transaction->pending_ordered); | |
4659 | } | |
4660 | spin_unlock_irq(&inode->ordered_tree.lock); | |
4661 | } | |
4662 | btrfs_put_ordered_extent(ordered); | |
4663 | } | |
4664 | ||
4665 | return 0; | |
5dc562c5 JB |
4666 | } |
4667 | ||
481b01c0 | 4668 | static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, |
1a4bcf47 FM |
4669 | struct btrfs_path *path, u64 *size_ret) |
4670 | { | |
4671 | struct btrfs_key key; | |
4672 | int ret; | |
4673 | ||
481b01c0 | 4674 | key.objectid = btrfs_ino(inode); |
1a4bcf47 FM |
4675 | key.type = BTRFS_INODE_ITEM_KEY; |
4676 | key.offset = 0; | |
4677 | ||
4678 | ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); | |
4679 | if (ret < 0) { | |
4680 | return ret; | |
4681 | } else if (ret > 0) { | |
2f2ff0ee | 4682 | *size_ret = 0; |
1a4bcf47 FM |
4683 | } else { |
4684 | struct btrfs_inode_item *item; | |
4685 | ||
4686 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
4687 | struct btrfs_inode_item); | |
4688 | *size_ret = btrfs_inode_size(path->nodes[0], item); | |
bf504110 FM |
4689 | /* |
4690 | * If the in-memory inode's i_size is smaller then the inode | |
4691 | * size stored in the btree, return the inode's i_size, so | |
4692 | * that we get a correct inode size after replaying the log | |
4693 | * when before a power failure we had a shrinking truncate | |
4694 | * followed by addition of a new name (rename / new hard link). | |
4695 | * Otherwise return the inode size from the btree, to avoid | |
4696 | * data loss when replaying a log due to previously doing a | |
4697 | * write that expands the inode's size and logging a new name | |
4698 | * immediately after. | |
4699 | */ | |
4700 | if (*size_ret > inode->vfs_inode.i_size) | |
4701 | *size_ret = inode->vfs_inode.i_size; | |
1a4bcf47 FM |
4702 | } |
4703 | ||
4704 | btrfs_release_path(path); | |
4705 | return 0; | |
4706 | } | |
4707 | ||
36283bf7 FM |
4708 | /* |
4709 | * At the moment we always log all xattrs. This is to figure out at log replay | |
4710 | * time which xattrs must have their deletion replayed. If a xattr is missing | |
4711 | * in the log tree and exists in the fs/subvol tree, we delete it. This is | |
4712 | * because if a xattr is deleted, the inode is fsynced and a power failure | |
4713 | * happens, causing the log to be replayed the next time the fs is mounted, | |
4714 | * we want the xattr to not exist anymore (same behaviour as other filesystems | |
4715 | * with a journal, ext3/4, xfs, f2fs, etc). | |
4716 | */ | |
4717 | static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, | |
4718 | struct btrfs_root *root, | |
1a93c36a | 4719 | struct btrfs_inode *inode, |
36283bf7 FM |
4720 | struct btrfs_path *path, |
4721 | struct btrfs_path *dst_path) | |
4722 | { | |
4723 | int ret; | |
4724 | struct btrfs_key key; | |
1a93c36a | 4725 | const u64 ino = btrfs_ino(inode); |
36283bf7 FM |
4726 | int ins_nr = 0; |
4727 | int start_slot = 0; | |
f2f121ab FM |
4728 | bool found_xattrs = false; |
4729 | ||
4730 | if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags)) | |
4731 | return 0; | |
36283bf7 FM |
4732 | |
4733 | key.objectid = ino; | |
4734 | key.type = BTRFS_XATTR_ITEM_KEY; | |
4735 | key.offset = 0; | |
4736 | ||
4737 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
4738 | if (ret < 0) | |
4739 | return ret; | |
4740 | ||
4741 | while (true) { | |
4742 | int slot = path->slots[0]; | |
4743 | struct extent_buffer *leaf = path->nodes[0]; | |
4744 | int nritems = btrfs_header_nritems(leaf); | |
4745 | ||
4746 | if (slot >= nritems) { | |
4747 | if (ins_nr > 0) { | |
1a93c36a | 4748 | ret = copy_items(trans, inode, dst_path, path, |
0e56315c | 4749 | start_slot, ins_nr, 1, 0); |
36283bf7 FM |
4750 | if (ret < 0) |
4751 | return ret; | |
4752 | ins_nr = 0; | |
4753 | } | |
4754 | ret = btrfs_next_leaf(root, path); | |
4755 | if (ret < 0) | |
4756 | return ret; | |
4757 | else if (ret > 0) | |
4758 | break; | |
4759 | continue; | |
4760 | } | |
4761 | ||
4762 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
4763 | if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) | |
4764 | break; | |
4765 | ||
4766 | if (ins_nr == 0) | |
4767 | start_slot = slot; | |
4768 | ins_nr++; | |
4769 | path->slots[0]++; | |
f2f121ab | 4770 | found_xattrs = true; |
36283bf7 FM |
4771 | cond_resched(); |
4772 | } | |
4773 | if (ins_nr > 0) { | |
1a93c36a | 4774 | ret = copy_items(trans, inode, dst_path, path, |
0e56315c | 4775 | start_slot, ins_nr, 1, 0); |
36283bf7 FM |
4776 | if (ret < 0) |
4777 | return ret; | |
4778 | } | |
4779 | ||
f2f121ab FM |
4780 | if (!found_xattrs) |
4781 | set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags); | |
4782 | ||
36283bf7 FM |
4783 | return 0; |
4784 | } | |
4785 | ||
a89ca6f2 | 4786 | /* |
0e56315c FM |
4787 | * When using the NO_HOLES feature if we punched a hole that causes the |
4788 | * deletion of entire leafs or all the extent items of the first leaf (the one | |
4789 | * that contains the inode item and references) we may end up not processing | |
4790 | * any extents, because there are no leafs with a generation matching the | |
4791 | * current transaction that have extent items for our inode. So we need to find | |
4792 | * if any holes exist and then log them. We also need to log holes after any | |
4793 | * truncate operation that changes the inode's size. | |
a89ca6f2 | 4794 | */ |
0e56315c FM |
4795 | static int btrfs_log_holes(struct btrfs_trans_handle *trans, |
4796 | struct btrfs_root *root, | |
4797 | struct btrfs_inode *inode, | |
7af59743 | 4798 | struct btrfs_path *path) |
a89ca6f2 | 4799 | { |
0b246afa | 4800 | struct btrfs_fs_info *fs_info = root->fs_info; |
a89ca6f2 | 4801 | struct btrfs_key key; |
a0308dd7 NB |
4802 | const u64 ino = btrfs_ino(inode); |
4803 | const u64 i_size = i_size_read(&inode->vfs_inode); | |
7af59743 | 4804 | u64 prev_extent_end = 0; |
0e56315c | 4805 | int ret; |
a89ca6f2 | 4806 | |
0e56315c | 4807 | if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0) |
a89ca6f2 FM |
4808 | return 0; |
4809 | ||
4810 | key.objectid = ino; | |
4811 | key.type = BTRFS_EXTENT_DATA_KEY; | |
7af59743 | 4812 | key.offset = 0; |
a89ca6f2 FM |
4813 | |
4814 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
a89ca6f2 FM |
4815 | if (ret < 0) |
4816 | return ret; | |
4817 | ||
0e56315c | 4818 | while (true) { |
0e56315c | 4819 | struct extent_buffer *leaf = path->nodes[0]; |
a89ca6f2 | 4820 | |
0e56315c FM |
4821 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { |
4822 | ret = btrfs_next_leaf(root, path); | |
4823 | if (ret < 0) | |
4824 | return ret; | |
4825 | if (ret > 0) { | |
4826 | ret = 0; | |
4827 | break; | |
4828 | } | |
4829 | leaf = path->nodes[0]; | |
4830 | } | |
4831 | ||
4832 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
4833 | if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) | |
4834 | break; | |
4835 | ||
4836 | /* We have a hole, log it. */ | |
4837 | if (prev_extent_end < key.offset) { | |
7af59743 | 4838 | const u64 hole_len = key.offset - prev_extent_end; |
0e56315c FM |
4839 | |
4840 | /* | |
4841 | * Release the path to avoid deadlocks with other code | |
4842 | * paths that search the root while holding locks on | |
4843 | * leafs from the log root. | |
4844 | */ | |
4845 | btrfs_release_path(path); | |
4846 | ret = btrfs_insert_file_extent(trans, root->log_root, | |
4847 | ino, prev_extent_end, 0, | |
4848 | 0, hole_len, 0, hole_len, | |
4849 | 0, 0, 0); | |
4850 | if (ret < 0) | |
4851 | return ret; | |
4852 | ||
4853 | /* | |
4854 | * Search for the same key again in the root. Since it's | |
4855 | * an extent item and we are holding the inode lock, the | |
4856 | * key must still exist. If it doesn't just emit warning | |
4857 | * and return an error to fall back to a transaction | |
4858 | * commit. | |
4859 | */ | |
4860 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
4861 | if (ret < 0) | |
4862 | return ret; | |
4863 | if (WARN_ON(ret > 0)) | |
4864 | return -ENOENT; | |
4865 | leaf = path->nodes[0]; | |
4866 | } | |
a89ca6f2 | 4867 | |
7af59743 | 4868 | prev_extent_end = btrfs_file_extent_end(path); |
0e56315c FM |
4869 | path->slots[0]++; |
4870 | cond_resched(); | |
a89ca6f2 | 4871 | } |
a89ca6f2 | 4872 | |
7af59743 | 4873 | if (prev_extent_end < i_size) { |
0e56315c | 4874 | u64 hole_len; |
a89ca6f2 | 4875 | |
0e56315c | 4876 | btrfs_release_path(path); |
7af59743 | 4877 | hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); |
0e56315c FM |
4878 | ret = btrfs_insert_file_extent(trans, root->log_root, |
4879 | ino, prev_extent_end, 0, 0, | |
4880 | hole_len, 0, hole_len, | |
4881 | 0, 0, 0); | |
4882 | if (ret < 0) | |
4883 | return ret; | |
4884 | } | |
4885 | ||
4886 | return 0; | |
a89ca6f2 FM |
4887 | } |
4888 | ||
56f23fdb FM |
4889 | /* |
4890 | * When we are logging a new inode X, check if it doesn't have a reference that | |
4891 | * matches the reference from some other inode Y created in a past transaction | |
4892 | * and that was renamed in the current transaction. If we don't do this, then at | |
4893 | * log replay time we can lose inode Y (and all its files if it's a directory): | |
4894 | * | |
4895 | * mkdir /mnt/x | |
4896 | * echo "hello world" > /mnt/x/foobar | |
4897 | * sync | |
4898 | * mv /mnt/x /mnt/y | |
4899 | * mkdir /mnt/x # or touch /mnt/x | |
4900 | * xfs_io -c fsync /mnt/x | |
4901 | * <power fail> | |
4902 | * mount fs, trigger log replay | |
4903 | * | |
4904 | * After the log replay procedure, we would lose the first directory and all its | |
4905 | * files (file foobar). | |
4906 | * For the case where inode Y is not a directory we simply end up losing it: | |
4907 | * | |
4908 | * echo "123" > /mnt/foo | |
4909 | * sync | |
4910 | * mv /mnt/foo /mnt/bar | |
4911 | * echo "abc" > /mnt/foo | |
4912 | * xfs_io -c fsync /mnt/foo | |
4913 | * <power fail> | |
4914 | * | |
4915 | * We also need this for cases where a snapshot entry is replaced by some other | |
4916 | * entry (file or directory) otherwise we end up with an unreplayable log due to | |
4917 | * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as | |
4918 | * if it were a regular entry: | |
4919 | * | |
4920 | * mkdir /mnt/x | |
4921 | * btrfs subvolume snapshot /mnt /mnt/x/snap | |
4922 | * btrfs subvolume delete /mnt/x/snap | |
4923 | * rmdir /mnt/x | |
4924 | * mkdir /mnt/x | |
4925 | * fsync /mnt/x or fsync some new file inside it | |
4926 | * <power fail> | |
4927 | * | |
4928 | * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in | |
4929 | * the same transaction. | |
4930 | */ | |
4931 | static int btrfs_check_ref_name_override(struct extent_buffer *eb, | |
4932 | const int slot, | |
4933 | const struct btrfs_key *key, | |
4791c8f1 | 4934 | struct btrfs_inode *inode, |
a3baaf0d | 4935 | u64 *other_ino, u64 *other_parent) |
56f23fdb FM |
4936 | { |
4937 | int ret; | |
4938 | struct btrfs_path *search_path; | |
4939 | char *name = NULL; | |
4940 | u32 name_len = 0; | |
4941 | u32 item_size = btrfs_item_size_nr(eb, slot); | |
4942 | u32 cur_offset = 0; | |
4943 | unsigned long ptr = btrfs_item_ptr_offset(eb, slot); | |
4944 | ||
4945 | search_path = btrfs_alloc_path(); | |
4946 | if (!search_path) | |
4947 | return -ENOMEM; | |
4948 | search_path->search_commit_root = 1; | |
4949 | search_path->skip_locking = 1; | |
4950 | ||
4951 | while (cur_offset < item_size) { | |
4952 | u64 parent; | |
4953 | u32 this_name_len; | |
4954 | u32 this_len; | |
4955 | unsigned long name_ptr; | |
4956 | struct btrfs_dir_item *di; | |
4957 | ||
4958 | if (key->type == BTRFS_INODE_REF_KEY) { | |
4959 | struct btrfs_inode_ref *iref; | |
4960 | ||
4961 | iref = (struct btrfs_inode_ref *)(ptr + cur_offset); | |
4962 | parent = key->offset; | |
4963 | this_name_len = btrfs_inode_ref_name_len(eb, iref); | |
4964 | name_ptr = (unsigned long)(iref + 1); | |
4965 | this_len = sizeof(*iref) + this_name_len; | |
4966 | } else { | |
4967 | struct btrfs_inode_extref *extref; | |
4968 | ||
4969 | extref = (struct btrfs_inode_extref *)(ptr + | |
4970 | cur_offset); | |
4971 | parent = btrfs_inode_extref_parent(eb, extref); | |
4972 | this_name_len = btrfs_inode_extref_name_len(eb, extref); | |
4973 | name_ptr = (unsigned long)&extref->name; | |
4974 | this_len = sizeof(*extref) + this_name_len; | |
4975 | } | |
4976 | ||
4977 | if (this_name_len > name_len) { | |
4978 | char *new_name; | |
4979 | ||
4980 | new_name = krealloc(name, this_name_len, GFP_NOFS); | |
4981 | if (!new_name) { | |
4982 | ret = -ENOMEM; | |
4983 | goto out; | |
4984 | } | |
4985 | name_len = this_name_len; | |
4986 | name = new_name; | |
4987 | } | |
4988 | ||
4989 | read_extent_buffer(eb, name, name_ptr, this_name_len); | |
4791c8f1 NB |
4990 | di = btrfs_lookup_dir_item(NULL, inode->root, search_path, |
4991 | parent, name, this_name_len, 0); | |
56f23fdb | 4992 | if (di && !IS_ERR(di)) { |
44f714da FM |
4993 | struct btrfs_key di_key; |
4994 | ||
4995 | btrfs_dir_item_key_to_cpu(search_path->nodes[0], | |
4996 | di, &di_key); | |
4997 | if (di_key.type == BTRFS_INODE_ITEM_KEY) { | |
6b5fc433 FM |
4998 | if (di_key.objectid != key->objectid) { |
4999 | ret = 1; | |
5000 | *other_ino = di_key.objectid; | |
a3baaf0d | 5001 | *other_parent = parent; |
6b5fc433 FM |
5002 | } else { |
5003 | ret = 0; | |
5004 | } | |
44f714da FM |
5005 | } else { |
5006 | ret = -EAGAIN; | |
5007 | } | |
56f23fdb FM |
5008 | goto out; |
5009 | } else if (IS_ERR(di)) { | |
5010 | ret = PTR_ERR(di); | |
5011 | goto out; | |
5012 | } | |
5013 | btrfs_release_path(search_path); | |
5014 | ||
5015 | cur_offset += this_len; | |
5016 | } | |
5017 | ret = 0; | |
5018 | out: | |
5019 | btrfs_free_path(search_path); | |
5020 | kfree(name); | |
5021 | return ret; | |
5022 | } | |
5023 | ||
6b5fc433 FM |
5024 | struct btrfs_ino_list { |
5025 | u64 ino; | |
a3baaf0d | 5026 | u64 parent; |
6b5fc433 FM |
5027 | struct list_head list; |
5028 | }; | |
5029 | ||
5030 | static int log_conflicting_inodes(struct btrfs_trans_handle *trans, | |
5031 | struct btrfs_root *root, | |
5032 | struct btrfs_path *path, | |
5033 | struct btrfs_log_ctx *ctx, | |
a3baaf0d | 5034 | u64 ino, u64 parent) |
6b5fc433 FM |
5035 | { |
5036 | struct btrfs_ino_list *ino_elem; | |
5037 | LIST_HEAD(inode_list); | |
5038 | int ret = 0; | |
5039 | ||
5040 | ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); | |
5041 | if (!ino_elem) | |
5042 | return -ENOMEM; | |
5043 | ino_elem->ino = ino; | |
a3baaf0d | 5044 | ino_elem->parent = parent; |
6b5fc433 FM |
5045 | list_add_tail(&ino_elem->list, &inode_list); |
5046 | ||
5047 | while (!list_empty(&inode_list)) { | |
5048 | struct btrfs_fs_info *fs_info = root->fs_info; | |
5049 | struct btrfs_key key; | |
5050 | struct inode *inode; | |
5051 | ||
5052 | ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list, | |
5053 | list); | |
5054 | ino = ino_elem->ino; | |
a3baaf0d | 5055 | parent = ino_elem->parent; |
6b5fc433 FM |
5056 | list_del(&ino_elem->list); |
5057 | kfree(ino_elem); | |
5058 | if (ret) | |
5059 | continue; | |
5060 | ||
5061 | btrfs_release_path(path); | |
5062 | ||
0202e83f | 5063 | inode = btrfs_iget(fs_info->sb, ino, root); |
6b5fc433 FM |
5064 | /* |
5065 | * If the other inode that had a conflicting dir entry was | |
a3baaf0d FM |
5066 | * deleted in the current transaction, we need to log its parent |
5067 | * directory. | |
6b5fc433 FM |
5068 | */ |
5069 | if (IS_ERR(inode)) { | |
5070 | ret = PTR_ERR(inode); | |
a3baaf0d | 5071 | if (ret == -ENOENT) { |
0202e83f | 5072 | inode = btrfs_iget(fs_info->sb, parent, root); |
a3baaf0d FM |
5073 | if (IS_ERR(inode)) { |
5074 | ret = PTR_ERR(inode); | |
5075 | } else { | |
5076 | ret = btrfs_log_inode(trans, root, | |
5077 | BTRFS_I(inode), | |
5078 | LOG_OTHER_INODE_ALL, | |
48778179 | 5079 | ctx); |
410f954c | 5080 | btrfs_add_delayed_iput(inode); |
a3baaf0d FM |
5081 | } |
5082 | } | |
6b5fc433 FM |
5083 | continue; |
5084 | } | |
b5e4ff9d FM |
5085 | /* |
5086 | * If the inode was already logged skip it - otherwise we can | |
5087 | * hit an infinite loop. Example: | |
5088 | * | |
5089 | * From the commit root (previous transaction) we have the | |
5090 | * following inodes: | |
5091 | * | |
5092 | * inode 257 a directory | |
5093 | * inode 258 with references "zz" and "zz_link" on inode 257 | |
5094 | * inode 259 with reference "a" on inode 257 | |
5095 | * | |
5096 | * And in the current (uncommitted) transaction we have: | |
5097 | * | |
5098 | * inode 257 a directory, unchanged | |
5099 | * inode 258 with references "a" and "a2" on inode 257 | |
5100 | * inode 259 with reference "zz_link" on inode 257 | |
5101 | * inode 261 with reference "zz" on inode 257 | |
5102 | * | |
5103 | * When logging inode 261 the following infinite loop could | |
5104 | * happen if we don't skip already logged inodes: | |
5105 | * | |
5106 | * - we detect inode 258 as a conflicting inode, with inode 261 | |
5107 | * on reference "zz", and log it; | |
5108 | * | |
5109 | * - we detect inode 259 as a conflicting inode, with inode 258 | |
5110 | * on reference "a", and log it; | |
5111 | * | |
5112 | * - we detect inode 258 as a conflicting inode, with inode 259 | |
5113 | * on reference "zz_link", and log it - again! After this we | |
5114 | * repeat the above steps forever. | |
5115 | */ | |
5116 | spin_lock(&BTRFS_I(inode)->lock); | |
5117 | /* | |
5118 | * Check the inode's logged_trans only instead of | |
5119 | * btrfs_inode_in_log(). This is because the last_log_commit of | |
1f295373 FM |
5120 | * the inode is not updated when we only log that it exists (see |
5121 | * btrfs_log_inode()). | |
b5e4ff9d FM |
5122 | */ |
5123 | if (BTRFS_I(inode)->logged_trans == trans->transid) { | |
5124 | spin_unlock(&BTRFS_I(inode)->lock); | |
5125 | btrfs_add_delayed_iput(inode); | |
5126 | continue; | |
5127 | } | |
5128 | spin_unlock(&BTRFS_I(inode)->lock); | |
6b5fc433 FM |
5129 | /* |
5130 | * We are safe logging the other inode without acquiring its | |
5131 | * lock as long as we log with the LOG_INODE_EXISTS mode. We | |
5132 | * are safe against concurrent renames of the other inode as | |
5133 | * well because during a rename we pin the log and update the | |
5134 | * log with the new name before we unpin it. | |
5135 | */ | |
5136 | ret = btrfs_log_inode(trans, root, BTRFS_I(inode), | |
48778179 | 5137 | LOG_OTHER_INODE, ctx); |
6b5fc433 | 5138 | if (ret) { |
410f954c | 5139 | btrfs_add_delayed_iput(inode); |
6b5fc433 FM |
5140 | continue; |
5141 | } | |
5142 | ||
5143 | key.objectid = ino; | |
5144 | key.type = BTRFS_INODE_REF_KEY; | |
5145 | key.offset = 0; | |
5146 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
5147 | if (ret < 0) { | |
410f954c | 5148 | btrfs_add_delayed_iput(inode); |
6b5fc433 FM |
5149 | continue; |
5150 | } | |
5151 | ||
5152 | while (true) { | |
5153 | struct extent_buffer *leaf = path->nodes[0]; | |
5154 | int slot = path->slots[0]; | |
5155 | u64 other_ino = 0; | |
a3baaf0d | 5156 | u64 other_parent = 0; |
6b5fc433 FM |
5157 | |
5158 | if (slot >= btrfs_header_nritems(leaf)) { | |
5159 | ret = btrfs_next_leaf(root, path); | |
5160 | if (ret < 0) { | |
5161 | break; | |
5162 | } else if (ret > 0) { | |
5163 | ret = 0; | |
5164 | break; | |
5165 | } | |
5166 | continue; | |
5167 | } | |
5168 | ||
5169 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
5170 | if (key.objectid != ino || | |
5171 | (key.type != BTRFS_INODE_REF_KEY && | |
5172 | key.type != BTRFS_INODE_EXTREF_KEY)) { | |
5173 | ret = 0; | |
5174 | break; | |
5175 | } | |
5176 | ||
5177 | ret = btrfs_check_ref_name_override(leaf, slot, &key, | |
a3baaf0d FM |
5178 | BTRFS_I(inode), &other_ino, |
5179 | &other_parent); | |
6b5fc433 FM |
5180 | if (ret < 0) |
5181 | break; | |
5182 | if (ret > 0) { | |
5183 | ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); | |
5184 | if (!ino_elem) { | |
5185 | ret = -ENOMEM; | |
5186 | break; | |
5187 | } | |
5188 | ino_elem->ino = other_ino; | |
a3baaf0d | 5189 | ino_elem->parent = other_parent; |
6b5fc433 FM |
5190 | list_add_tail(&ino_elem->list, &inode_list); |
5191 | ret = 0; | |
5192 | } | |
5193 | path->slots[0]++; | |
5194 | } | |
410f954c | 5195 | btrfs_add_delayed_iput(inode); |
6b5fc433 FM |
5196 | } |
5197 | ||
5198 | return ret; | |
5199 | } | |
5200 | ||
da447009 FM |
5201 | static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, |
5202 | struct btrfs_inode *inode, | |
5203 | struct btrfs_key *min_key, | |
5204 | const struct btrfs_key *max_key, | |
5205 | struct btrfs_path *path, | |
5206 | struct btrfs_path *dst_path, | |
5207 | const u64 logged_isize, | |
5208 | const bool recursive_logging, | |
5209 | const int inode_only, | |
5210 | struct btrfs_log_ctx *ctx, | |
5211 | bool *need_log_inode_item) | |
5212 | { | |
5213 | struct btrfs_root *root = inode->root; | |
5214 | int ins_start_slot = 0; | |
5215 | int ins_nr = 0; | |
5216 | int ret; | |
5217 | ||
5218 | while (1) { | |
5219 | ret = btrfs_search_forward(root, min_key, path, trans->transid); | |
5220 | if (ret < 0) | |
5221 | return ret; | |
5222 | if (ret > 0) { | |
5223 | ret = 0; | |
5224 | break; | |
5225 | } | |
5226 | again: | |
5227 | /* Note, ins_nr might be > 0 here, cleanup outside the loop */ | |
5228 | if (min_key->objectid != max_key->objectid) | |
5229 | break; | |
5230 | if (min_key->type > max_key->type) | |
5231 | break; | |
5232 | ||
5233 | if (min_key->type == BTRFS_INODE_ITEM_KEY) | |
5234 | *need_log_inode_item = false; | |
5235 | ||
5236 | if ((min_key->type == BTRFS_INODE_REF_KEY || | |
5237 | min_key->type == BTRFS_INODE_EXTREF_KEY) && | |
5238 | inode->generation == trans->transid && | |
5239 | !recursive_logging) { | |
5240 | u64 other_ino = 0; | |
5241 | u64 other_parent = 0; | |
5242 | ||
5243 | ret = btrfs_check_ref_name_override(path->nodes[0], | |
5244 | path->slots[0], min_key, inode, | |
5245 | &other_ino, &other_parent); | |
5246 | if (ret < 0) { | |
5247 | return ret; | |
5248 | } else if (ret > 0 && ctx && | |
5249 | other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { | |
5250 | if (ins_nr > 0) { | |
5251 | ins_nr++; | |
5252 | } else { | |
5253 | ins_nr = 1; | |
5254 | ins_start_slot = path->slots[0]; | |
5255 | } | |
5256 | ret = copy_items(trans, inode, dst_path, path, | |
5257 | ins_start_slot, ins_nr, | |
5258 | inode_only, logged_isize); | |
5259 | if (ret < 0) | |
5260 | return ret; | |
5261 | ins_nr = 0; | |
5262 | ||
5263 | ret = log_conflicting_inodes(trans, root, path, | |
5264 | ctx, other_ino, other_parent); | |
5265 | if (ret) | |
5266 | return ret; | |
5267 | btrfs_release_path(path); | |
5268 | goto next_key; | |
5269 | } | |
5270 | } | |
5271 | ||
5272 | /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ | |
5273 | if (min_key->type == BTRFS_XATTR_ITEM_KEY) { | |
5274 | if (ins_nr == 0) | |
5275 | goto next_slot; | |
5276 | ret = copy_items(trans, inode, dst_path, path, | |
5277 | ins_start_slot, | |
5278 | ins_nr, inode_only, logged_isize); | |
5279 | if (ret < 0) | |
5280 | return ret; | |
5281 | ins_nr = 0; | |
5282 | goto next_slot; | |
5283 | } | |
5284 | ||
5285 | if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { | |
5286 | ins_nr++; | |
5287 | goto next_slot; | |
5288 | } else if (!ins_nr) { | |
5289 | ins_start_slot = path->slots[0]; | |
5290 | ins_nr = 1; | |
5291 | goto next_slot; | |
5292 | } | |
5293 | ||
5294 | ret = copy_items(trans, inode, dst_path, path, ins_start_slot, | |
5295 | ins_nr, inode_only, logged_isize); | |
5296 | if (ret < 0) | |
5297 | return ret; | |
5298 | ins_nr = 1; | |
5299 | ins_start_slot = path->slots[0]; | |
5300 | next_slot: | |
5301 | path->slots[0]++; | |
5302 | if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { | |
5303 | btrfs_item_key_to_cpu(path->nodes[0], min_key, | |
5304 | path->slots[0]); | |
5305 | goto again; | |
5306 | } | |
5307 | if (ins_nr) { | |
5308 | ret = copy_items(trans, inode, dst_path, path, | |
5309 | ins_start_slot, ins_nr, inode_only, | |
5310 | logged_isize); | |
5311 | if (ret < 0) | |
5312 | return ret; | |
5313 | ins_nr = 0; | |
5314 | } | |
5315 | btrfs_release_path(path); | |
5316 | next_key: | |
5317 | if (min_key->offset < (u64)-1) { | |
5318 | min_key->offset++; | |
5319 | } else if (min_key->type < max_key->type) { | |
5320 | min_key->type++; | |
5321 | min_key->offset = 0; | |
5322 | } else { | |
5323 | break; | |
5324 | } | |
5325 | } | |
5326 | if (ins_nr) | |
5327 | ret = copy_items(trans, inode, dst_path, path, ins_start_slot, | |
5328 | ins_nr, inode_only, logged_isize); | |
5329 | ||
5330 | return ret; | |
5331 | } | |
5332 | ||
e02119d5 CM |
5333 | /* log a single inode in the tree log. |
5334 | * At least one parent directory for this inode must exist in the tree | |
5335 | * or be logged already. | |
5336 | * | |
5337 | * Any items from this inode changed by the current transaction are copied | |
5338 | * to the log tree. An extra reference is taken on any extents in this | |
5339 | * file, allowing us to avoid a whole pile of corner cases around logging | |
5340 | * blocks that have been removed from the tree. | |
5341 | * | |
5342 | * See LOG_INODE_ALL and related defines for a description of what inode_only | |
5343 | * does. | |
5344 | * | |
5345 | * This handles both files and directories. | |
5346 | */ | |
12fcfd22 | 5347 | static int btrfs_log_inode(struct btrfs_trans_handle *trans, |
a59108a7 | 5348 | struct btrfs_root *root, struct btrfs_inode *inode, |
49dae1bc | 5349 | int inode_only, |
8407f553 | 5350 | struct btrfs_log_ctx *ctx) |
e02119d5 CM |
5351 | { |
5352 | struct btrfs_path *path; | |
5353 | struct btrfs_path *dst_path; | |
5354 | struct btrfs_key min_key; | |
5355 | struct btrfs_key max_key; | |
5356 | struct btrfs_root *log = root->log_root; | |
4a500fd1 | 5357 | int err = 0; |
8c8648dd | 5358 | int ret = 0; |
5dc562c5 | 5359 | bool fast_search = false; |
a59108a7 NB |
5360 | u64 ino = btrfs_ino(inode); |
5361 | struct extent_map_tree *em_tree = &inode->extent_tree; | |
1a4bcf47 | 5362 | u64 logged_isize = 0; |
e4545de5 | 5363 | bool need_log_inode_item = true; |
9a8fca62 | 5364 | bool xattrs_logged = false; |
a3baaf0d | 5365 | bool recursive_logging = false; |
2ac691d8 | 5366 | bool inode_item_dropped = true; |
e02119d5 | 5367 | |
e02119d5 | 5368 | path = btrfs_alloc_path(); |
5df67083 TI |
5369 | if (!path) |
5370 | return -ENOMEM; | |
e02119d5 | 5371 | dst_path = btrfs_alloc_path(); |
5df67083 TI |
5372 | if (!dst_path) { |
5373 | btrfs_free_path(path); | |
5374 | return -ENOMEM; | |
5375 | } | |
e02119d5 | 5376 | |
33345d01 | 5377 | min_key.objectid = ino; |
e02119d5 CM |
5378 | min_key.type = BTRFS_INODE_ITEM_KEY; |
5379 | min_key.offset = 0; | |
5380 | ||
33345d01 | 5381 | max_key.objectid = ino; |
12fcfd22 | 5382 | |
12fcfd22 | 5383 | |
5dc562c5 | 5384 | /* today the code can only do partial logging of directories */ |
a59108a7 | 5385 | if (S_ISDIR(inode->vfs_inode.i_mode) || |
5269b67e | 5386 | (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
a59108a7 | 5387 | &inode->runtime_flags) && |
781feef7 | 5388 | inode_only >= LOG_INODE_EXISTS)) |
e02119d5 CM |
5389 | max_key.type = BTRFS_XATTR_ITEM_KEY; |
5390 | else | |
5391 | max_key.type = (u8)-1; | |
5392 | max_key.offset = (u64)-1; | |
5393 | ||
2c2c452b | 5394 | /* |
5aa7d1a7 FM |
5395 | * Only run delayed items if we are a directory. We want to make sure |
5396 | * all directory indexes hit the fs/subvolume tree so we can find them | |
5397 | * and figure out which index ranges have to be logged. | |
5398 | * | |
8c8648dd FM |
5399 | * Otherwise commit the delayed inode only if the full sync flag is set, |
5400 | * as we want to make sure an up to date version is in the subvolume | |
5401 | * tree so copy_inode_items_to_log() / copy_items() can find it and copy | |
5402 | * it to the log tree. For a non full sync, we always log the inode item | |
5403 | * based on the in-memory struct btrfs_inode which is always up to date. | |
2c2c452b | 5404 | */ |
5aa7d1a7 | 5405 | if (S_ISDIR(inode->vfs_inode.i_mode)) |
a59108a7 | 5406 | ret = btrfs_commit_inode_delayed_items(trans, inode); |
8c8648dd | 5407 | else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) |
a59108a7 | 5408 | ret = btrfs_commit_inode_delayed_inode(inode); |
2c2c452b FM |
5409 | |
5410 | if (ret) { | |
5411 | btrfs_free_path(path); | |
5412 | btrfs_free_path(dst_path); | |
5413 | return ret; | |
16cdcec7 MX |
5414 | } |
5415 | ||
a3baaf0d FM |
5416 | if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) { |
5417 | recursive_logging = true; | |
5418 | if (inode_only == LOG_OTHER_INODE) | |
5419 | inode_only = LOG_INODE_EXISTS; | |
5420 | else | |
5421 | inode_only = LOG_INODE_ALL; | |
a59108a7 | 5422 | mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); |
781feef7 | 5423 | } else { |
a59108a7 | 5424 | mutex_lock(&inode->log_mutex); |
781feef7 | 5425 | } |
e02119d5 | 5426 | |
64d6b281 FM |
5427 | /* |
5428 | * This is for cases where logging a directory could result in losing a | |
5429 | * a file after replaying the log. For example, if we move a file from a | |
5430 | * directory A to a directory B, then fsync directory A, we have no way | |
5431 | * to known the file was moved from A to B, so logging just A would | |
5432 | * result in losing the file after a log replay. | |
5433 | */ | |
5434 | if (S_ISDIR(inode->vfs_inode.i_mode) && | |
5435 | inode_only == LOG_INODE_ALL && | |
5436 | inode->last_unlink_trans >= trans->transid) { | |
5437 | btrfs_set_log_full_commit(trans); | |
5438 | err = 1; | |
5439 | goto out_unlock; | |
5440 | } | |
5441 | ||
e02119d5 CM |
5442 | /* |
5443 | * a brute force approach to making sure we get the most uptodate | |
5444 | * copies of everything. | |
5445 | */ | |
a59108a7 | 5446 | if (S_ISDIR(inode->vfs_inode.i_mode)) { |
e02119d5 CM |
5447 | int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; |
5448 | ||
ab12313a | 5449 | clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); |
4f764e51 FM |
5450 | if (inode_only == LOG_INODE_EXISTS) |
5451 | max_key_type = BTRFS_XATTR_ITEM_KEY; | |
33345d01 | 5452 | ret = drop_objectid_items(trans, log, path, ino, max_key_type); |
e02119d5 | 5453 | } else { |
1a4bcf47 FM |
5454 | if (inode_only == LOG_INODE_EXISTS) { |
5455 | /* | |
5456 | * Make sure the new inode item we write to the log has | |
5457 | * the same isize as the current one (if it exists). | |
5458 | * This is necessary to prevent data loss after log | |
5459 | * replay, and also to prevent doing a wrong expanding | |
5460 | * truncate - for e.g. create file, write 4K into offset | |
5461 | * 0, fsync, write 4K into offset 4096, add hard link, | |
5462 | * fsync some other file (to sync log), power fail - if | |
5463 | * we use the inode's current i_size, after log replay | |
5464 | * we get a 8Kb file, with the last 4Kb extent as a hole | |
5465 | * (zeroes), as if an expanding truncate happened, | |
5466 | * instead of getting a file of 4Kb only. | |
5467 | */ | |
a59108a7 | 5468 | err = logged_inode_size(log, inode, path, &logged_isize); |
1a4bcf47 FM |
5469 | if (err) |
5470 | goto out_unlock; | |
5471 | } | |
a742994a | 5472 | if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
a59108a7 | 5473 | &inode->runtime_flags)) { |
a742994a | 5474 | if (inode_only == LOG_INODE_EXISTS) { |
4f764e51 | 5475 | max_key.type = BTRFS_XATTR_ITEM_KEY; |
a742994a FM |
5476 | ret = drop_objectid_items(trans, log, path, ino, |
5477 | max_key.type); | |
5478 | } else { | |
5479 | clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
a59108a7 | 5480 | &inode->runtime_flags); |
a742994a | 5481 | clear_bit(BTRFS_INODE_COPY_EVERYTHING, |
a59108a7 | 5482 | &inode->runtime_flags); |
28ed1345 CM |
5483 | while(1) { |
5484 | ret = btrfs_truncate_inode_items(trans, | |
0d7d3165 | 5485 | log, inode, 0, 0, NULL); |
28ed1345 CM |
5486 | if (ret != -EAGAIN) |
5487 | break; | |
5488 | } | |
a742994a | 5489 | } |
4f764e51 | 5490 | } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, |
a59108a7 | 5491 | &inode->runtime_flags) || |
6cfab851 | 5492 | inode_only == LOG_INODE_EXISTS) { |
4f764e51 | 5493 | if (inode_only == LOG_INODE_ALL) |
183f37fa | 5494 | fast_search = true; |
4f764e51 | 5495 | max_key.type = BTRFS_XATTR_ITEM_KEY; |
5dc562c5 | 5496 | ret = drop_objectid_items(trans, log, path, ino, |
e9976151 | 5497 | max_key.type); |
a95249b3 JB |
5498 | } else { |
5499 | if (inode_only == LOG_INODE_ALL) | |
5500 | fast_search = true; | |
2ac691d8 | 5501 | inode_item_dropped = false; |
a95249b3 | 5502 | goto log_extents; |
5dc562c5 | 5503 | } |
a95249b3 | 5504 | |
e02119d5 | 5505 | } |
4a500fd1 YZ |
5506 | if (ret) { |
5507 | err = ret; | |
5508 | goto out_unlock; | |
5509 | } | |
e02119d5 | 5510 | |
da447009 FM |
5511 | err = copy_inode_items_to_log(trans, inode, &min_key, &max_key, |
5512 | path, dst_path, logged_isize, | |
7af59743 FM |
5513 | recursive_logging, inode_only, ctx, |
5514 | &need_log_inode_item); | |
da447009 FM |
5515 | if (err) |
5516 | goto out_unlock; | |
5dc562c5 | 5517 | |
36283bf7 FM |
5518 | btrfs_release_path(path); |
5519 | btrfs_release_path(dst_path); | |
a59108a7 | 5520 | err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); |
36283bf7 FM |
5521 | if (err) |
5522 | goto out_unlock; | |
9a8fca62 | 5523 | xattrs_logged = true; |
a89ca6f2 FM |
5524 | if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { |
5525 | btrfs_release_path(path); | |
5526 | btrfs_release_path(dst_path); | |
7af59743 | 5527 | err = btrfs_log_holes(trans, root, inode, path); |
a89ca6f2 FM |
5528 | if (err) |
5529 | goto out_unlock; | |
5530 | } | |
a95249b3 | 5531 | log_extents: |
f3b15ccd JB |
5532 | btrfs_release_path(path); |
5533 | btrfs_release_path(dst_path); | |
e4545de5 | 5534 | if (need_log_inode_item) { |
2ac691d8 | 5535 | err = log_inode_item(trans, log, dst_path, inode, inode_item_dropped); |
b590b839 FM |
5536 | if (err) |
5537 | goto out_unlock; | |
5538 | /* | |
5539 | * If we are doing a fast fsync and the inode was logged before | |
5540 | * in this transaction, we don't need to log the xattrs because | |
5541 | * they were logged before. If xattrs were added, changed or | |
5542 | * deleted since the last time we logged the inode, then we have | |
5543 | * already logged them because the inode had the runtime flag | |
5544 | * BTRFS_INODE_COPY_EVERYTHING set. | |
5545 | */ | |
5546 | if (!xattrs_logged && inode->logged_trans < trans->transid) { | |
9a8fca62 FM |
5547 | err = btrfs_log_all_xattrs(trans, root, inode, path, |
5548 | dst_path); | |
b590b839 FM |
5549 | if (err) |
5550 | goto out_unlock; | |
9a8fca62 FM |
5551 | btrfs_release_path(path); |
5552 | } | |
e4545de5 | 5553 | } |
5dc562c5 | 5554 | if (fast_search) { |
a59108a7 | 5555 | ret = btrfs_log_changed_extents(trans, root, inode, dst_path, |
48778179 | 5556 | ctx); |
5dc562c5 JB |
5557 | if (ret) { |
5558 | err = ret; | |
5559 | goto out_unlock; | |
5560 | } | |
d006a048 | 5561 | } else if (inode_only == LOG_INODE_ALL) { |
06d3d22b LB |
5562 | struct extent_map *em, *n; |
5563 | ||
49dae1bc | 5564 | write_lock(&em_tree->lock); |
48778179 FM |
5565 | list_for_each_entry_safe(em, n, &em_tree->modified_extents, list) |
5566 | list_del_init(&em->list); | |
49dae1bc | 5567 | write_unlock(&em_tree->lock); |
5dc562c5 JB |
5568 | } |
5569 | ||
a59108a7 NB |
5570 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { |
5571 | ret = log_directory_changes(trans, root, inode, path, dst_path, | |
5572 | ctx); | |
4a500fd1 YZ |
5573 | if (ret) { |
5574 | err = ret; | |
5575 | goto out_unlock; | |
5576 | } | |
e02119d5 | 5577 | } |
49dae1bc | 5578 | |
d1d832a0 | 5579 | /* |
75b463d2 FM |
5580 | * If we are logging that an ancestor inode exists as part of logging a |
5581 | * new name from a link or rename operation, don't mark the inode as | |
5582 | * logged - otherwise if an explicit fsync is made against an ancestor, | |
5583 | * the fsync considers the inode in the log and doesn't sync the log, | |
5584 | * resulting in the ancestor missing after a power failure unless the | |
5585 | * log was synced as part of an fsync against any other unrelated inode. | |
5586 | * So keep it simple for this case and just don't flag the ancestors as | |
5587 | * logged. | |
d1d832a0 | 5588 | */ |
75b463d2 FM |
5589 | if (!ctx || |
5590 | !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name && | |
5591 | &inode->vfs_inode != ctx->inode)) { | |
5592 | spin_lock(&inode->lock); | |
5593 | inode->logged_trans = trans->transid; | |
5594 | /* | |
9acc8103 FM |
5595 | * Don't update last_log_commit if we logged that an inode exists. |
5596 | * We do this for two reasons: | |
5597 | * | |
5598 | * 1) We might have had buffered writes to this inode that were | |
5599 | * flushed and had their ordered extents completed in this | |
5600 | * transaction, but we did not previously log the inode with | |
5601 | * LOG_INODE_ALL. Later the inode was evicted and after that | |
5602 | * it was loaded again and this LOG_INODE_EXISTS log operation | |
5603 | * happened. We must make sure that if an explicit fsync against | |
5604 | * the inode is performed later, it logs the new extents, an | |
5605 | * updated inode item, etc, and syncs the log. The same logic | |
5606 | * applies to direct IO writes instead of buffered writes. | |
5607 | * | |
5608 | * 2) When we log the inode with LOG_INODE_EXISTS, its inode item | |
5609 | * is logged with an i_size of 0 or whatever value was logged | |
5610 | * before. If later the i_size of the inode is increased by a | |
5611 | * truncate operation, the log is synced through an fsync of | |
5612 | * some other inode and then finally an explicit fsync against | |
5613 | * this inode is made, we must make sure this fsync logs the | |
5614 | * inode with the new i_size, the hole between old i_size and | |
5615 | * the new i_size, and syncs the log. | |
75b463d2 | 5616 | */ |
9acc8103 | 5617 | if (inode_only != LOG_INODE_EXISTS) |
75b463d2 FM |
5618 | inode->last_log_commit = inode->last_sub_trans; |
5619 | spin_unlock(&inode->lock); | |
5620 | } | |
4a500fd1 | 5621 | out_unlock: |
a59108a7 | 5622 | mutex_unlock(&inode->log_mutex); |
e02119d5 CM |
5623 | |
5624 | btrfs_free_path(path); | |
5625 | btrfs_free_path(dst_path); | |
4a500fd1 | 5626 | return err; |
e02119d5 CM |
5627 | } |
5628 | ||
ab12313a FM |
5629 | /* |
5630 | * Check if we need to log an inode. This is used in contexts where while | |
5631 | * logging an inode we need to log another inode (either that it exists or in | |
5632 | * full mode). This is used instead of btrfs_inode_in_log() because the later | |
5633 | * requires the inode to be in the log and have the log transaction committed, | |
5634 | * while here we do not care if the log transaction was already committed - our | |
5635 | * caller will commit the log later - and we want to avoid logging an inode | |
5636 | * multiple times when multiple tasks have joined the same log transaction. | |
5637 | */ | |
5638 | static bool need_log_inode(struct btrfs_trans_handle *trans, | |
5639 | struct btrfs_inode *inode) | |
5640 | { | |
8be2ba2e FM |
5641 | /* |
5642 | * If a directory was not modified, no dentries added or removed, we can | |
5643 | * and should avoid logging it. | |
5644 | */ | |
5645 | if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid) | |
5646 | return false; | |
5647 | ||
ab12313a FM |
5648 | /* |
5649 | * If this inode does not have new/updated/deleted xattrs since the last | |
5650 | * time it was logged and is flagged as logged in the current transaction, | |
5651 | * we can skip logging it. As for new/deleted names, those are updated in | |
5652 | * the log by link/unlink/rename operations. | |
5653 | * In case the inode was logged and then evicted and reloaded, its | |
5654 | * logged_trans will be 0, in which case we have to fully log it since | |
5655 | * logged_trans is a transient field, not persisted. | |
5656 | */ | |
5657 | if (inode->logged_trans == trans->transid && | |
5658 | !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) | |
5659 | return false; | |
5660 | ||
5661 | return true; | |
5662 | } | |
5663 | ||
2f2ff0ee FM |
5664 | struct btrfs_dir_list { |
5665 | u64 ino; | |
5666 | struct list_head list; | |
5667 | }; | |
5668 | ||
5669 | /* | |
5670 | * Log the inodes of the new dentries of a directory. See log_dir_items() for | |
5671 | * details about the why it is needed. | |
5672 | * This is a recursive operation - if an existing dentry corresponds to a | |
5673 | * directory, that directory's new entries are logged too (same behaviour as | |
5674 | * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes | |
5675 | * the dentries point to we do not lock their i_mutex, otherwise lockdep | |
5676 | * complains about the following circular lock dependency / possible deadlock: | |
5677 | * | |
5678 | * CPU0 CPU1 | |
5679 | * ---- ---- | |
5680 | * lock(&type->i_mutex_dir_key#3/2); | |
5681 | * lock(sb_internal#2); | |
5682 | * lock(&type->i_mutex_dir_key#3/2); | |
5683 | * lock(&sb->s_type->i_mutex_key#14); | |
5684 | * | |
5685 | * Where sb_internal is the lock (a counter that works as a lock) acquired by | |
5686 | * sb_start_intwrite() in btrfs_start_transaction(). | |
5687 | * Not locking i_mutex of the inodes is still safe because: | |
5688 | * | |
5689 | * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible | |
5690 | * that while logging the inode new references (names) are added or removed | |
5691 | * from the inode, leaving the logged inode item with a link count that does | |
5692 | * not match the number of logged inode reference items. This is fine because | |
5693 | * at log replay time we compute the real number of links and correct the | |
5694 | * link count in the inode item (see replay_one_buffer() and | |
5695 | * link_to_fixup_dir()); | |
5696 | * | |
5697 | * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that | |
5698 | * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and | |
5699 | * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item | |
5700 | * has a size that doesn't match the sum of the lengths of all the logged | |
5701 | * names. This does not result in a problem because if a dir_item key is | |
5702 | * logged but its matching dir_index key is not logged, at log replay time we | |
5703 | * don't use it to replay the respective name (see replay_one_name()). On the | |
5704 | * other hand if only the dir_index key ends up being logged, the respective | |
5705 | * name is added to the fs/subvol tree with both the dir_item and dir_index | |
5706 | * keys created (see replay_one_name()). | |
5707 | * The directory's inode item with a wrong i_size is not a problem as well, | |
5708 | * since we don't use it at log replay time to set the i_size in the inode | |
5709 | * item of the fs/subvol tree (see overwrite_item()). | |
5710 | */ | |
5711 | static int log_new_dir_dentries(struct btrfs_trans_handle *trans, | |
5712 | struct btrfs_root *root, | |
51cc0d32 | 5713 | struct btrfs_inode *start_inode, |
2f2ff0ee FM |
5714 | struct btrfs_log_ctx *ctx) |
5715 | { | |
0b246afa | 5716 | struct btrfs_fs_info *fs_info = root->fs_info; |
2f2ff0ee FM |
5717 | struct btrfs_root *log = root->log_root; |
5718 | struct btrfs_path *path; | |
5719 | LIST_HEAD(dir_list); | |
5720 | struct btrfs_dir_list *dir_elem; | |
5721 | int ret = 0; | |
5722 | ||
5723 | path = btrfs_alloc_path(); | |
5724 | if (!path) | |
5725 | return -ENOMEM; | |
5726 | ||
5727 | dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); | |
5728 | if (!dir_elem) { | |
5729 | btrfs_free_path(path); | |
5730 | return -ENOMEM; | |
5731 | } | |
51cc0d32 | 5732 | dir_elem->ino = btrfs_ino(start_inode); |
2f2ff0ee FM |
5733 | list_add_tail(&dir_elem->list, &dir_list); |
5734 | ||
5735 | while (!list_empty(&dir_list)) { | |
5736 | struct extent_buffer *leaf; | |
5737 | struct btrfs_key min_key; | |
5738 | int nritems; | |
5739 | int i; | |
5740 | ||
5741 | dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, | |
5742 | list); | |
5743 | if (ret) | |
5744 | goto next_dir_inode; | |
5745 | ||
5746 | min_key.objectid = dir_elem->ino; | |
5747 | min_key.type = BTRFS_DIR_ITEM_KEY; | |
5748 | min_key.offset = 0; | |
5749 | again: | |
5750 | btrfs_release_path(path); | |
5751 | ret = btrfs_search_forward(log, &min_key, path, trans->transid); | |
5752 | if (ret < 0) { | |
5753 | goto next_dir_inode; | |
5754 | } else if (ret > 0) { | |
5755 | ret = 0; | |
5756 | goto next_dir_inode; | |
5757 | } | |
5758 | ||
5759 | process_leaf: | |
5760 | leaf = path->nodes[0]; | |
5761 | nritems = btrfs_header_nritems(leaf); | |
5762 | for (i = path->slots[0]; i < nritems; i++) { | |
5763 | struct btrfs_dir_item *di; | |
5764 | struct btrfs_key di_key; | |
5765 | struct inode *di_inode; | |
5766 | struct btrfs_dir_list *new_dir_elem; | |
5767 | int log_mode = LOG_INODE_EXISTS; | |
5768 | int type; | |
5769 | ||
5770 | btrfs_item_key_to_cpu(leaf, &min_key, i); | |
5771 | if (min_key.objectid != dir_elem->ino || | |
5772 | min_key.type != BTRFS_DIR_ITEM_KEY) | |
5773 | goto next_dir_inode; | |
5774 | ||
5775 | di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); | |
5776 | type = btrfs_dir_type(leaf, di); | |
5777 | if (btrfs_dir_transid(leaf, di) < trans->transid && | |
5778 | type != BTRFS_FT_DIR) | |
5779 | continue; | |
5780 | btrfs_dir_item_key_to_cpu(leaf, di, &di_key); | |
5781 | if (di_key.type == BTRFS_ROOT_ITEM_KEY) | |
5782 | continue; | |
5783 | ||
ec125cfb | 5784 | btrfs_release_path(path); |
0202e83f | 5785 | di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root); |
2f2ff0ee FM |
5786 | if (IS_ERR(di_inode)) { |
5787 | ret = PTR_ERR(di_inode); | |
5788 | goto next_dir_inode; | |
5789 | } | |
5790 | ||
0e44cb3f | 5791 | if (!need_log_inode(trans, BTRFS_I(di_inode))) { |
410f954c | 5792 | btrfs_add_delayed_iput(di_inode); |
ec125cfb | 5793 | break; |
2f2ff0ee FM |
5794 | } |
5795 | ||
5796 | ctx->log_new_dentries = false; | |
3f9749f6 | 5797 | if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) |
2f2ff0ee | 5798 | log_mode = LOG_INODE_ALL; |
a59108a7 | 5799 | ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), |
48778179 | 5800 | log_mode, ctx); |
410f954c | 5801 | btrfs_add_delayed_iput(di_inode); |
2f2ff0ee FM |
5802 | if (ret) |
5803 | goto next_dir_inode; | |
5804 | if (ctx->log_new_dentries) { | |
5805 | new_dir_elem = kmalloc(sizeof(*new_dir_elem), | |
5806 | GFP_NOFS); | |
5807 | if (!new_dir_elem) { | |
5808 | ret = -ENOMEM; | |
5809 | goto next_dir_inode; | |
5810 | } | |
5811 | new_dir_elem->ino = di_key.objectid; | |
5812 | list_add_tail(&new_dir_elem->list, &dir_list); | |
5813 | } | |
5814 | break; | |
5815 | } | |
5816 | if (i == nritems) { | |
5817 | ret = btrfs_next_leaf(log, path); | |
5818 | if (ret < 0) { | |
5819 | goto next_dir_inode; | |
5820 | } else if (ret > 0) { | |
5821 | ret = 0; | |
5822 | goto next_dir_inode; | |
5823 | } | |
5824 | goto process_leaf; | |
5825 | } | |
5826 | if (min_key.offset < (u64)-1) { | |
5827 | min_key.offset++; | |
5828 | goto again; | |
5829 | } | |
5830 | next_dir_inode: | |
5831 | list_del(&dir_elem->list); | |
5832 | kfree(dir_elem); | |
5833 | } | |
5834 | ||
5835 | btrfs_free_path(path); | |
5836 | return ret; | |
5837 | } | |
5838 | ||
18aa0922 | 5839 | static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, |
d0a0b78d | 5840 | struct btrfs_inode *inode, |
18aa0922 FM |
5841 | struct btrfs_log_ctx *ctx) |
5842 | { | |
3ffbd68c | 5843 | struct btrfs_fs_info *fs_info = trans->fs_info; |
18aa0922 FM |
5844 | int ret; |
5845 | struct btrfs_path *path; | |
5846 | struct btrfs_key key; | |
d0a0b78d NB |
5847 | struct btrfs_root *root = inode->root; |
5848 | const u64 ino = btrfs_ino(inode); | |
18aa0922 FM |
5849 | |
5850 | path = btrfs_alloc_path(); | |
5851 | if (!path) | |
5852 | return -ENOMEM; | |
5853 | path->skip_locking = 1; | |
5854 | path->search_commit_root = 1; | |
5855 | ||
5856 | key.objectid = ino; | |
5857 | key.type = BTRFS_INODE_REF_KEY; | |
5858 | key.offset = 0; | |
5859 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
5860 | if (ret < 0) | |
5861 | goto out; | |
5862 | ||
5863 | while (true) { | |
5864 | struct extent_buffer *leaf = path->nodes[0]; | |
5865 | int slot = path->slots[0]; | |
5866 | u32 cur_offset = 0; | |
5867 | u32 item_size; | |
5868 | unsigned long ptr; | |
5869 | ||
5870 | if (slot >= btrfs_header_nritems(leaf)) { | |
5871 | ret = btrfs_next_leaf(root, path); | |
5872 | if (ret < 0) | |
5873 | goto out; | |
5874 | else if (ret > 0) | |
5875 | break; | |
5876 | continue; | |
5877 | } | |
5878 | ||
5879 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
5880 | /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ | |
5881 | if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) | |
5882 | break; | |
5883 | ||
5884 | item_size = btrfs_item_size_nr(leaf, slot); | |
5885 | ptr = btrfs_item_ptr_offset(leaf, slot); | |
5886 | while (cur_offset < item_size) { | |
5887 | struct btrfs_key inode_key; | |
5888 | struct inode *dir_inode; | |
5889 | ||
5890 | inode_key.type = BTRFS_INODE_ITEM_KEY; | |
5891 | inode_key.offset = 0; | |
5892 | ||
5893 | if (key.type == BTRFS_INODE_EXTREF_KEY) { | |
5894 | struct btrfs_inode_extref *extref; | |
5895 | ||
5896 | extref = (struct btrfs_inode_extref *) | |
5897 | (ptr + cur_offset); | |
5898 | inode_key.objectid = btrfs_inode_extref_parent( | |
5899 | leaf, extref); | |
5900 | cur_offset += sizeof(*extref); | |
5901 | cur_offset += btrfs_inode_extref_name_len(leaf, | |
5902 | extref); | |
5903 | } else { | |
5904 | inode_key.objectid = key.offset; | |
5905 | cur_offset = item_size; | |
5906 | } | |
5907 | ||
0202e83f DS |
5908 | dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid, |
5909 | root); | |
0f375eed FM |
5910 | /* |
5911 | * If the parent inode was deleted, return an error to | |
5912 | * fallback to a transaction commit. This is to prevent | |
5913 | * getting an inode that was moved from one parent A to | |
5914 | * a parent B, got its former parent A deleted and then | |
5915 | * it got fsync'ed, from existing at both parents after | |
5916 | * a log replay (and the old parent still existing). | |
5917 | * Example: | |
5918 | * | |
5919 | * mkdir /mnt/A | |
5920 | * mkdir /mnt/B | |
5921 | * touch /mnt/B/bar | |
5922 | * sync | |
5923 | * mv /mnt/B/bar /mnt/A/bar | |
5924 | * mv -T /mnt/A /mnt/B | |
5925 | * fsync /mnt/B/bar | |
5926 | * <power fail> | |
5927 | * | |
5928 | * If we ignore the old parent B which got deleted, | |
5929 | * after a log replay we would have file bar linked | |
5930 | * at both parents and the old parent B would still | |
5931 | * exist. | |
5932 | */ | |
5933 | if (IS_ERR(dir_inode)) { | |
5934 | ret = PTR_ERR(dir_inode); | |
5935 | goto out; | |
5936 | } | |
18aa0922 | 5937 | |
3e6a86a1 FM |
5938 | if (!need_log_inode(trans, BTRFS_I(dir_inode))) { |
5939 | btrfs_add_delayed_iput(dir_inode); | |
5940 | continue; | |
5941 | } | |
5942 | ||
657ed1aa FM |
5943 | if (ctx) |
5944 | ctx->log_new_dentries = false; | |
a59108a7 | 5945 | ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), |
48778179 | 5946 | LOG_INODE_ALL, ctx); |
657ed1aa FM |
5947 | if (!ret && ctx && ctx->log_new_dentries) |
5948 | ret = log_new_dir_dentries(trans, root, | |
f85b7379 | 5949 | BTRFS_I(dir_inode), ctx); |
410f954c | 5950 | btrfs_add_delayed_iput(dir_inode); |
18aa0922 FM |
5951 | if (ret) |
5952 | goto out; | |
5953 | } | |
5954 | path->slots[0]++; | |
5955 | } | |
5956 | ret = 0; | |
5957 | out: | |
5958 | btrfs_free_path(path); | |
5959 | return ret; | |
5960 | } | |
5961 | ||
b8aa330d FM |
5962 | static int log_new_ancestors(struct btrfs_trans_handle *trans, |
5963 | struct btrfs_root *root, | |
5964 | struct btrfs_path *path, | |
5965 | struct btrfs_log_ctx *ctx) | |
5966 | { | |
5967 | struct btrfs_key found_key; | |
5968 | ||
5969 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); | |
5970 | ||
5971 | while (true) { | |
5972 | struct btrfs_fs_info *fs_info = root->fs_info; | |
b8aa330d FM |
5973 | struct extent_buffer *leaf = path->nodes[0]; |
5974 | int slot = path->slots[0]; | |
5975 | struct btrfs_key search_key; | |
5976 | struct inode *inode; | |
0202e83f | 5977 | u64 ino; |
b8aa330d FM |
5978 | int ret = 0; |
5979 | ||
5980 | btrfs_release_path(path); | |
5981 | ||
0202e83f DS |
5982 | ino = found_key.offset; |
5983 | ||
b8aa330d FM |
5984 | search_key.objectid = found_key.offset; |
5985 | search_key.type = BTRFS_INODE_ITEM_KEY; | |
5986 | search_key.offset = 0; | |
0202e83f | 5987 | inode = btrfs_iget(fs_info->sb, ino, root); |
b8aa330d FM |
5988 | if (IS_ERR(inode)) |
5989 | return PTR_ERR(inode); | |
5990 | ||
ab12313a FM |
5991 | if (BTRFS_I(inode)->generation >= trans->transid && |
5992 | need_log_inode(trans, BTRFS_I(inode))) | |
b8aa330d | 5993 | ret = btrfs_log_inode(trans, root, BTRFS_I(inode), |
48778179 | 5994 | LOG_INODE_EXISTS, ctx); |
410f954c | 5995 | btrfs_add_delayed_iput(inode); |
b8aa330d FM |
5996 | if (ret) |
5997 | return ret; | |
5998 | ||
5999 | if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID) | |
6000 | break; | |
6001 | ||
6002 | search_key.type = BTRFS_INODE_REF_KEY; | |
6003 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); | |
6004 | if (ret < 0) | |
6005 | return ret; | |
6006 | ||
6007 | leaf = path->nodes[0]; | |
6008 | slot = path->slots[0]; | |
6009 | if (slot >= btrfs_header_nritems(leaf)) { | |
6010 | ret = btrfs_next_leaf(root, path); | |
6011 | if (ret < 0) | |
6012 | return ret; | |
6013 | else if (ret > 0) | |
6014 | return -ENOENT; | |
6015 | leaf = path->nodes[0]; | |
6016 | slot = path->slots[0]; | |
6017 | } | |
6018 | ||
6019 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
6020 | if (found_key.objectid != search_key.objectid || | |
6021 | found_key.type != BTRFS_INODE_REF_KEY) | |
6022 | return -ENOENT; | |
6023 | } | |
6024 | return 0; | |
6025 | } | |
6026 | ||
6027 | static int log_new_ancestors_fast(struct btrfs_trans_handle *trans, | |
6028 | struct btrfs_inode *inode, | |
6029 | struct dentry *parent, | |
6030 | struct btrfs_log_ctx *ctx) | |
6031 | { | |
6032 | struct btrfs_root *root = inode->root; | |
b8aa330d FM |
6033 | struct dentry *old_parent = NULL; |
6034 | struct super_block *sb = inode->vfs_inode.i_sb; | |
6035 | int ret = 0; | |
6036 | ||
6037 | while (true) { | |
6038 | if (!parent || d_really_is_negative(parent) || | |
6039 | sb != parent->d_sb) | |
6040 | break; | |
6041 | ||
6042 | inode = BTRFS_I(d_inode(parent)); | |
6043 | if (root != inode->root) | |
6044 | break; | |
6045 | ||
ab12313a FM |
6046 | if (inode->generation >= trans->transid && |
6047 | need_log_inode(trans, inode)) { | |
b8aa330d | 6048 | ret = btrfs_log_inode(trans, root, inode, |
48778179 | 6049 | LOG_INODE_EXISTS, ctx); |
b8aa330d FM |
6050 | if (ret) |
6051 | break; | |
6052 | } | |
6053 | if (IS_ROOT(parent)) | |
6054 | break; | |
6055 | ||
6056 | parent = dget_parent(parent); | |
6057 | dput(old_parent); | |
6058 | old_parent = parent; | |
6059 | } | |
6060 | dput(old_parent); | |
6061 | ||
6062 | return ret; | |
6063 | } | |
6064 | ||
6065 | static int log_all_new_ancestors(struct btrfs_trans_handle *trans, | |
6066 | struct btrfs_inode *inode, | |
6067 | struct dentry *parent, | |
6068 | struct btrfs_log_ctx *ctx) | |
6069 | { | |
6070 | struct btrfs_root *root = inode->root; | |
6071 | const u64 ino = btrfs_ino(inode); | |
6072 | struct btrfs_path *path; | |
6073 | struct btrfs_key search_key; | |
6074 | int ret; | |
6075 | ||
6076 | /* | |
6077 | * For a single hard link case, go through a fast path that does not | |
6078 | * need to iterate the fs/subvolume tree. | |
6079 | */ | |
6080 | if (inode->vfs_inode.i_nlink < 2) | |
6081 | return log_new_ancestors_fast(trans, inode, parent, ctx); | |
6082 | ||
6083 | path = btrfs_alloc_path(); | |
6084 | if (!path) | |
6085 | return -ENOMEM; | |
6086 | ||
6087 | search_key.objectid = ino; | |
6088 | search_key.type = BTRFS_INODE_REF_KEY; | |
6089 | search_key.offset = 0; | |
6090 | again: | |
6091 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); | |
6092 | if (ret < 0) | |
6093 | goto out; | |
6094 | if (ret == 0) | |
6095 | path->slots[0]++; | |
6096 | ||
6097 | while (true) { | |
6098 | struct extent_buffer *leaf = path->nodes[0]; | |
6099 | int slot = path->slots[0]; | |
6100 | struct btrfs_key found_key; | |
6101 | ||
6102 | if (slot >= btrfs_header_nritems(leaf)) { | |
6103 | ret = btrfs_next_leaf(root, path); | |
6104 | if (ret < 0) | |
6105 | goto out; | |
6106 | else if (ret > 0) | |
6107 | break; | |
6108 | continue; | |
6109 | } | |
6110 | ||
6111 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
6112 | if (found_key.objectid != ino || | |
6113 | found_key.type > BTRFS_INODE_EXTREF_KEY) | |
6114 | break; | |
6115 | ||
6116 | /* | |
6117 | * Don't deal with extended references because they are rare | |
6118 | * cases and too complex to deal with (we would need to keep | |
6119 | * track of which subitem we are processing for each item in | |
6120 | * this loop, etc). So just return some error to fallback to | |
6121 | * a transaction commit. | |
6122 | */ | |
6123 | if (found_key.type == BTRFS_INODE_EXTREF_KEY) { | |
6124 | ret = -EMLINK; | |
6125 | goto out; | |
6126 | } | |
6127 | ||
6128 | /* | |
6129 | * Logging ancestors needs to do more searches on the fs/subvol | |
6130 | * tree, so it releases the path as needed to avoid deadlocks. | |
6131 | * Keep track of the last inode ref key and resume from that key | |
6132 | * after logging all new ancestors for the current hard link. | |
6133 | */ | |
6134 | memcpy(&search_key, &found_key, sizeof(search_key)); | |
6135 | ||
6136 | ret = log_new_ancestors(trans, root, path, ctx); | |
6137 | if (ret) | |
6138 | goto out; | |
6139 | btrfs_release_path(path); | |
6140 | goto again; | |
6141 | } | |
6142 | ret = 0; | |
6143 | out: | |
6144 | btrfs_free_path(path); | |
6145 | return ret; | |
6146 | } | |
6147 | ||
e02119d5 CM |
6148 | /* |
6149 | * helper function around btrfs_log_inode to make sure newly created | |
6150 | * parent directories also end up in the log. A minimal inode and backref | |
6151 | * only logging is done of any parent directories that are older than | |
6152 | * the last committed transaction | |
6153 | */ | |
48a3b636 | 6154 | static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, |
19df27a9 | 6155 | struct btrfs_inode *inode, |
49dae1bc | 6156 | struct dentry *parent, |
41a1eada | 6157 | int inode_only, |
8b050d35 | 6158 | struct btrfs_log_ctx *ctx) |
e02119d5 | 6159 | { |
f882274b | 6160 | struct btrfs_root *root = inode->root; |
0b246afa | 6161 | struct btrfs_fs_info *fs_info = root->fs_info; |
12fcfd22 | 6162 | int ret = 0; |
2f2ff0ee | 6163 | bool log_dentries = false; |
12fcfd22 | 6164 | |
0b246afa | 6165 | if (btrfs_test_opt(fs_info, NOTREELOG)) { |
3a5e1404 SW |
6166 | ret = 1; |
6167 | goto end_no_trans; | |
6168 | } | |
6169 | ||
f882274b | 6170 | if (btrfs_root_refs(&root->root_item) == 0) { |
76dda93c YZ |
6171 | ret = 1; |
6172 | goto end_no_trans; | |
6173 | } | |
6174 | ||
f2d72f42 FM |
6175 | /* |
6176 | * Skip already logged inodes or inodes corresponding to tmpfiles | |
6177 | * (since logging them is pointless, a link count of 0 means they | |
6178 | * will never be accessible). | |
6179 | */ | |
626e9f41 FM |
6180 | if ((btrfs_inode_in_log(inode, trans->transid) && |
6181 | list_empty(&ctx->ordered_extents)) || | |
f2d72f42 | 6182 | inode->vfs_inode.i_nlink == 0) { |
257c62e1 CM |
6183 | ret = BTRFS_NO_LOG_SYNC; |
6184 | goto end_no_trans; | |
6185 | } | |
6186 | ||
8b050d35 | 6187 | ret = start_log_trans(trans, root, ctx); |
4a500fd1 | 6188 | if (ret) |
e87ac136 | 6189 | goto end_no_trans; |
e02119d5 | 6190 | |
48778179 | 6191 | ret = btrfs_log_inode(trans, root, inode, inode_only, ctx); |
4a500fd1 YZ |
6192 | if (ret) |
6193 | goto end_trans; | |
12fcfd22 | 6194 | |
af4176b4 CM |
6195 | /* |
6196 | * for regular files, if its inode is already on disk, we don't | |
6197 | * have to worry about the parents at all. This is because | |
6198 | * we can use the last_unlink_trans field to record renames | |
6199 | * and other fun in this file. | |
6200 | */ | |
19df27a9 | 6201 | if (S_ISREG(inode->vfs_inode.i_mode) && |
47d3db41 FM |
6202 | inode->generation < trans->transid && |
6203 | inode->last_unlink_trans < trans->transid) { | |
4a500fd1 YZ |
6204 | ret = 0; |
6205 | goto end_trans; | |
6206 | } | |
af4176b4 | 6207 | |
19df27a9 | 6208 | if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) |
2f2ff0ee FM |
6209 | log_dentries = true; |
6210 | ||
18aa0922 | 6211 | /* |
01327610 | 6212 | * On unlink we must make sure all our current and old parent directory |
18aa0922 FM |
6213 | * inodes are fully logged. This is to prevent leaving dangling |
6214 | * directory index entries in directories that were our parents but are | |
6215 | * not anymore. Not doing this results in old parent directory being | |
6216 | * impossible to delete after log replay (rmdir will always fail with | |
6217 | * error -ENOTEMPTY). | |
6218 | * | |
6219 | * Example 1: | |
6220 | * | |
6221 | * mkdir testdir | |
6222 | * touch testdir/foo | |
6223 | * ln testdir/foo testdir/bar | |
6224 | * sync | |
6225 | * unlink testdir/bar | |
6226 | * xfs_io -c fsync testdir/foo | |
6227 | * <power failure> | |
6228 | * mount fs, triggers log replay | |
6229 | * | |
6230 | * If we don't log the parent directory (testdir), after log replay the | |
6231 | * directory still has an entry pointing to the file inode using the bar | |
6232 | * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and | |
6233 | * the file inode has a link count of 1. | |
6234 | * | |
6235 | * Example 2: | |
6236 | * | |
6237 | * mkdir testdir | |
6238 | * touch foo | |
6239 | * ln foo testdir/foo2 | |
6240 | * ln foo testdir/foo3 | |
6241 | * sync | |
6242 | * unlink testdir/foo3 | |
6243 | * xfs_io -c fsync foo | |
6244 | * <power failure> | |
6245 | * mount fs, triggers log replay | |
6246 | * | |
6247 | * Similar as the first example, after log replay the parent directory | |
6248 | * testdir still has an entry pointing to the inode file with name foo3 | |
6249 | * but the file inode does not have a matching BTRFS_INODE_REF_KEY item | |
6250 | * and has a link count of 2. | |
6251 | */ | |
47d3db41 | 6252 | if (inode->last_unlink_trans >= trans->transid) { |
b8aa330d | 6253 | ret = btrfs_log_all_parents(trans, inode, ctx); |
18aa0922 FM |
6254 | if (ret) |
6255 | goto end_trans; | |
6256 | } | |
6257 | ||
b8aa330d FM |
6258 | ret = log_all_new_ancestors(trans, inode, parent, ctx); |
6259 | if (ret) | |
41bd6067 | 6260 | goto end_trans; |
76dda93c | 6261 | |
2f2ff0ee | 6262 | if (log_dentries) |
b8aa330d | 6263 | ret = log_new_dir_dentries(trans, root, inode, ctx); |
2f2ff0ee FM |
6264 | else |
6265 | ret = 0; | |
4a500fd1 YZ |
6266 | end_trans: |
6267 | if (ret < 0) { | |
90787766 | 6268 | btrfs_set_log_full_commit(trans); |
4a500fd1 YZ |
6269 | ret = 1; |
6270 | } | |
8b050d35 MX |
6271 | |
6272 | if (ret) | |
6273 | btrfs_remove_log_ctx(root, ctx); | |
12fcfd22 CM |
6274 | btrfs_end_log_trans(root); |
6275 | end_no_trans: | |
6276 | return ret; | |
e02119d5 CM |
6277 | } |
6278 | ||
6279 | /* | |
6280 | * it is not safe to log dentry if the chunk root has added new | |
6281 | * chunks. This returns 0 if the dentry was logged, and 1 otherwise. | |
6282 | * If this returns 1, you must commit the transaction to safely get your | |
6283 | * data on disk. | |
6284 | */ | |
6285 | int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, | |
e5b84f7a | 6286 | struct dentry *dentry, |
8b050d35 | 6287 | struct btrfs_log_ctx *ctx) |
e02119d5 | 6288 | { |
6a912213 JB |
6289 | struct dentry *parent = dget_parent(dentry); |
6290 | int ret; | |
6291 | ||
f882274b | 6292 | ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, |
48778179 | 6293 | LOG_INODE_ALL, ctx); |
6a912213 JB |
6294 | dput(parent); |
6295 | ||
6296 | return ret; | |
e02119d5 CM |
6297 | } |
6298 | ||
6299 | /* | |
6300 | * should be called during mount to recover any replay any log trees | |
6301 | * from the FS | |
6302 | */ | |
6303 | int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) | |
6304 | { | |
6305 | int ret; | |
6306 | struct btrfs_path *path; | |
6307 | struct btrfs_trans_handle *trans; | |
6308 | struct btrfs_key key; | |
6309 | struct btrfs_key found_key; | |
e02119d5 CM |
6310 | struct btrfs_root *log; |
6311 | struct btrfs_fs_info *fs_info = log_root_tree->fs_info; | |
6312 | struct walk_control wc = { | |
6313 | .process_func = process_one_buffer, | |
430a6626 | 6314 | .stage = LOG_WALK_PIN_ONLY, |
e02119d5 CM |
6315 | }; |
6316 | ||
e02119d5 | 6317 | path = btrfs_alloc_path(); |
db5b493a TI |
6318 | if (!path) |
6319 | return -ENOMEM; | |
6320 | ||
afcdd129 | 6321 | set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); |
e02119d5 | 6322 | |
4a500fd1 | 6323 | trans = btrfs_start_transaction(fs_info->tree_root, 0); |
79787eaa JM |
6324 | if (IS_ERR(trans)) { |
6325 | ret = PTR_ERR(trans); | |
6326 | goto error; | |
6327 | } | |
e02119d5 CM |
6328 | |
6329 | wc.trans = trans; | |
6330 | wc.pin = 1; | |
6331 | ||
db5b493a | 6332 | ret = walk_log_tree(trans, log_root_tree, &wc); |
79787eaa | 6333 | if (ret) { |
5d163e0e JM |
6334 | btrfs_handle_fs_error(fs_info, ret, |
6335 | "Failed to pin buffers while recovering log root tree."); | |
79787eaa JM |
6336 | goto error; |
6337 | } | |
e02119d5 CM |
6338 | |
6339 | again: | |
6340 | key.objectid = BTRFS_TREE_LOG_OBJECTID; | |
6341 | key.offset = (u64)-1; | |
962a298f | 6342 | key.type = BTRFS_ROOT_ITEM_KEY; |
e02119d5 | 6343 | |
d397712b | 6344 | while (1) { |
e02119d5 | 6345 | ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); |
79787eaa JM |
6346 | |
6347 | if (ret < 0) { | |
34d97007 | 6348 | btrfs_handle_fs_error(fs_info, ret, |
79787eaa JM |
6349 | "Couldn't find tree log root."); |
6350 | goto error; | |
6351 | } | |
e02119d5 CM |
6352 | if (ret > 0) { |
6353 | if (path->slots[0] == 0) | |
6354 | break; | |
6355 | path->slots[0]--; | |
6356 | } | |
6357 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | |
6358 | path->slots[0]); | |
b3b4aa74 | 6359 | btrfs_release_path(path); |
e02119d5 CM |
6360 | if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) |
6361 | break; | |
6362 | ||
62a2c73e | 6363 | log = btrfs_read_tree_root(log_root_tree, &found_key); |
79787eaa JM |
6364 | if (IS_ERR(log)) { |
6365 | ret = PTR_ERR(log); | |
34d97007 | 6366 | btrfs_handle_fs_error(fs_info, ret, |
79787eaa JM |
6367 | "Couldn't read tree log root."); |
6368 | goto error; | |
6369 | } | |
e02119d5 | 6370 | |
56e9357a DS |
6371 | wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset, |
6372 | true); | |
79787eaa JM |
6373 | if (IS_ERR(wc.replay_dest)) { |
6374 | ret = PTR_ERR(wc.replay_dest); | |
9bc574de JB |
6375 | |
6376 | /* | |
6377 | * We didn't find the subvol, likely because it was | |
6378 | * deleted. This is ok, simply skip this log and go to | |
6379 | * the next one. | |
6380 | * | |
6381 | * We need to exclude the root because we can't have | |
6382 | * other log replays overwriting this log as we'll read | |
6383 | * it back in a few more times. This will keep our | |
6384 | * block from being modified, and we'll just bail for | |
6385 | * each subsequent pass. | |
6386 | */ | |
6387 | if (ret == -ENOENT) | |
9fce5704 | 6388 | ret = btrfs_pin_extent_for_log_replay(trans, |
9bc574de JB |
6389 | log->node->start, |
6390 | log->node->len); | |
00246528 | 6391 | btrfs_put_root(log); |
9bc574de JB |
6392 | |
6393 | if (!ret) | |
6394 | goto next; | |
5d163e0e JM |
6395 | btrfs_handle_fs_error(fs_info, ret, |
6396 | "Couldn't read target root for tree log recovery."); | |
79787eaa JM |
6397 | goto error; |
6398 | } | |
e02119d5 | 6399 | |
07d400a6 | 6400 | wc.replay_dest->log_root = log; |
2002ae11 JB |
6401 | ret = btrfs_record_root_in_trans(trans, wc.replay_dest); |
6402 | if (ret) | |
6403 | /* The loop needs to continue due to the root refs */ | |
6404 | btrfs_handle_fs_error(fs_info, ret, | |
6405 | "failed to record the log root in transaction"); | |
6406 | else | |
6407 | ret = walk_log_tree(trans, log, &wc); | |
e02119d5 | 6408 | |
b50c6e25 | 6409 | if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { |
e02119d5 CM |
6410 | ret = fixup_inode_link_counts(trans, wc.replay_dest, |
6411 | path); | |
e02119d5 CM |
6412 | } |
6413 | ||
900c9981 LB |
6414 | if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { |
6415 | struct btrfs_root *root = wc.replay_dest; | |
6416 | ||
6417 | btrfs_release_path(path); | |
6418 | ||
6419 | /* | |
6420 | * We have just replayed everything, and the highest | |
6421 | * objectid of fs roots probably has changed in case | |
6422 | * some inode_item's got replayed. | |
6423 | * | |
6424 | * root->objectid_mutex is not acquired as log replay | |
6425 | * could only happen during mount. | |
6426 | */ | |
453e4873 | 6427 | ret = btrfs_init_root_free_objectid(root); |
900c9981 LB |
6428 | } |
6429 | ||
07d400a6 | 6430 | wc.replay_dest->log_root = NULL; |
00246528 | 6431 | btrfs_put_root(wc.replay_dest); |
00246528 | 6432 | btrfs_put_root(log); |
e02119d5 | 6433 | |
b50c6e25 JB |
6434 | if (ret) |
6435 | goto error; | |
9bc574de | 6436 | next: |
e02119d5 CM |
6437 | if (found_key.offset == 0) |
6438 | break; | |
9bc574de | 6439 | key.offset = found_key.offset - 1; |
e02119d5 | 6440 | } |
b3b4aa74 | 6441 | btrfs_release_path(path); |
e02119d5 CM |
6442 | |
6443 | /* step one is to pin it all, step two is to replay just inodes */ | |
6444 | if (wc.pin) { | |
6445 | wc.pin = 0; | |
6446 | wc.process_func = replay_one_buffer; | |
6447 | wc.stage = LOG_WALK_REPLAY_INODES; | |
6448 | goto again; | |
6449 | } | |
6450 | /* step three is to replay everything */ | |
6451 | if (wc.stage < LOG_WALK_REPLAY_ALL) { | |
6452 | wc.stage++; | |
6453 | goto again; | |
6454 | } | |
6455 | ||
6456 | btrfs_free_path(path); | |
6457 | ||
abefa55a | 6458 | /* step 4: commit the transaction, which also unpins the blocks */ |
3a45bb20 | 6459 | ret = btrfs_commit_transaction(trans); |
abefa55a JB |
6460 | if (ret) |
6461 | return ret; | |
6462 | ||
e02119d5 | 6463 | log_root_tree->log_root = NULL; |
afcdd129 | 6464 | clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); |
00246528 | 6465 | btrfs_put_root(log_root_tree); |
79787eaa | 6466 | |
abefa55a | 6467 | return 0; |
79787eaa | 6468 | error: |
b50c6e25 | 6469 | if (wc.trans) |
3a45bb20 | 6470 | btrfs_end_transaction(wc.trans); |
1aeb6b56 | 6471 | clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); |
79787eaa JM |
6472 | btrfs_free_path(path); |
6473 | return ret; | |
e02119d5 | 6474 | } |
12fcfd22 CM |
6475 | |
6476 | /* | |
6477 | * there are some corner cases where we want to force a full | |
6478 | * commit instead of allowing a directory to be logged. | |
6479 | * | |
6480 | * They revolve around files there were unlinked from the directory, and | |
6481 | * this function updates the parent directory so that a full commit is | |
6482 | * properly done if it is fsync'd later after the unlinks are done. | |
2be63d5c FM |
6483 | * |
6484 | * Must be called before the unlink operations (updates to the subvolume tree, | |
6485 | * inodes, etc) are done. | |
12fcfd22 CM |
6486 | */ |
6487 | void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, | |
4176bdbf | 6488 | struct btrfs_inode *dir, struct btrfs_inode *inode, |
12fcfd22 CM |
6489 | int for_rename) |
6490 | { | |
af4176b4 CM |
6491 | /* |
6492 | * when we're logging a file, if it hasn't been renamed | |
6493 | * or unlinked, and its inode is fully committed on disk, | |
6494 | * we don't have to worry about walking up the directory chain | |
6495 | * to log its parents. | |
6496 | * | |
6497 | * So, we use the last_unlink_trans field to put this transid | |
6498 | * into the file. When the file is logged we check it and | |
6499 | * don't log the parents if the file is fully on disk. | |
6500 | */ | |
4176bdbf NB |
6501 | mutex_lock(&inode->log_mutex); |
6502 | inode->last_unlink_trans = trans->transid; | |
6503 | mutex_unlock(&inode->log_mutex); | |
af4176b4 | 6504 | |
12fcfd22 CM |
6505 | /* |
6506 | * if this directory was already logged any new | |
6507 | * names for this file/dir will get recorded | |
6508 | */ | |
4176bdbf | 6509 | if (dir->logged_trans == trans->transid) |
12fcfd22 CM |
6510 | return; |
6511 | ||
6512 | /* | |
6513 | * if the inode we're about to unlink was logged, | |
6514 | * the log will be properly updated for any new names | |
6515 | */ | |
4176bdbf | 6516 | if (inode->logged_trans == trans->transid) |
12fcfd22 CM |
6517 | return; |
6518 | ||
6519 | /* | |
6520 | * when renaming files across directories, if the directory | |
6521 | * there we're unlinking from gets fsync'd later on, there's | |
6522 | * no way to find the destination directory later and fsync it | |
6523 | * properly. So, we have to be conservative and force commits | |
6524 | * so the new name gets discovered. | |
6525 | */ | |
6526 | if (for_rename) | |
6527 | goto record; | |
6528 | ||
6529 | /* we can safely do the unlink without any special recording */ | |
6530 | return; | |
6531 | ||
6532 | record: | |
4176bdbf NB |
6533 | mutex_lock(&dir->log_mutex); |
6534 | dir->last_unlink_trans = trans->transid; | |
6535 | mutex_unlock(&dir->log_mutex); | |
1ec9a1ae FM |
6536 | } |
6537 | ||
6538 | /* | |
6539 | * Make sure that if someone attempts to fsync the parent directory of a deleted | |
6540 | * snapshot, it ends up triggering a transaction commit. This is to guarantee | |
6541 | * that after replaying the log tree of the parent directory's root we will not | |
6542 | * see the snapshot anymore and at log replay time we will not see any log tree | |
6543 | * corresponding to the deleted snapshot's root, which could lead to replaying | |
6544 | * it after replaying the log tree of the parent directory (which would replay | |
6545 | * the snapshot delete operation). | |
2be63d5c FM |
6546 | * |
6547 | * Must be called before the actual snapshot destroy operation (updates to the | |
6548 | * parent root and tree of tree roots trees, etc) are done. | |
1ec9a1ae FM |
6549 | */ |
6550 | void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, | |
43663557 | 6551 | struct btrfs_inode *dir) |
1ec9a1ae | 6552 | { |
43663557 NB |
6553 | mutex_lock(&dir->log_mutex); |
6554 | dir->last_unlink_trans = trans->transid; | |
6555 | mutex_unlock(&dir->log_mutex); | |
12fcfd22 CM |
6556 | } |
6557 | ||
6558 | /* | |
6559 | * Call this after adding a new name for a file and it will properly | |
6560 | * update the log to reflect the new name. | |
12fcfd22 | 6561 | */ |
75b463d2 | 6562 | void btrfs_log_new_name(struct btrfs_trans_handle *trans, |
9ca5fbfb | 6563 | struct btrfs_inode *inode, struct btrfs_inode *old_dir, |
75b463d2 | 6564 | struct dentry *parent) |
12fcfd22 | 6565 | { |
75b463d2 | 6566 | struct btrfs_log_ctx ctx; |
12fcfd22 | 6567 | |
af4176b4 CM |
6568 | /* |
6569 | * this will force the logging code to walk the dentry chain | |
6570 | * up for the file | |
6571 | */ | |
9a6509c4 | 6572 | if (!S_ISDIR(inode->vfs_inode.i_mode)) |
9ca5fbfb | 6573 | inode->last_unlink_trans = trans->transid; |
af4176b4 | 6574 | |
12fcfd22 CM |
6575 | /* |
6576 | * if this inode hasn't been logged and directory we're renaming it | |
6577 | * from hasn't been logged, we don't need to log it | |
6578 | */ | |
ecc64fab FM |
6579 | if (!inode_logged(trans, inode) && |
6580 | (!old_dir || !inode_logged(trans, old_dir))) | |
75b463d2 | 6581 | return; |
12fcfd22 | 6582 | |
54a40fc3 FM |
6583 | /* |
6584 | * If we are doing a rename (old_dir is not NULL) from a directory that | |
6585 | * was previously logged, make sure the next log attempt on the directory | |
6586 | * is not skipped and logs the inode again. This is because the log may | |
6587 | * not currently be authoritative for a range including the old | |
6588 | * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make | |
6589 | * sure after a log replay we do not end up with both the new and old | |
6590 | * dentries around (in case the inode is a directory we would have a | |
6591 | * directory with two hard links and 2 inode references for different | |
6592 | * parents). The next log attempt of old_dir will happen at | |
6593 | * btrfs_log_all_parents(), called through btrfs_log_inode_parent() | |
6594 | * below, because we have previously set inode->last_unlink_trans to the | |
6595 | * current transaction ID, either here or at btrfs_record_unlink_dir() in | |
6596 | * case inode is a directory. | |
6597 | */ | |
6598 | if (old_dir) | |
6599 | old_dir->logged_trans = 0; | |
6600 | ||
75b463d2 FM |
6601 | btrfs_init_log_ctx(&ctx, &inode->vfs_inode); |
6602 | ctx.logging_new_name = true; | |
6603 | /* | |
6604 | * We don't care about the return value. If we fail to log the new name | |
6605 | * then we know the next attempt to sync the log will fallback to a full | |
6606 | * transaction commit (due to a call to btrfs_set_log_full_commit()), so | |
6607 | * we don't need to worry about getting a log committed that has an | |
6608 | * inconsistent state after a rename operation. | |
6609 | */ | |
48778179 | 6610 | btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx); |
12fcfd22 CM |
6611 | } |
6612 |