]> Git Repo - linux.git/blame - fs/btrfs/ctree.c
Merge branch 'akpm' (patches from Andrew)
[linux.git] / fs / btrfs / ctree.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
6cbd5570 2/*
d352ac68 3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
4 */
5
a6b6e75e 6#include <linux/sched.h>
5a0e3ad6 7#include <linux/slab.h>
bd989ba3 8#include <linux/rbtree.h>
adf02123 9#include <linux/mm.h>
eb60ceac
CM
10#include "ctree.h"
11#include "disk-io.h"
7f5c1516 12#include "transaction.h"
5f39d397 13#include "print-tree.h"
925baedd 14#include "locking.h"
de37aa51 15#include "volumes.h"
f616f5cd 16#include "qgroup.h"
9a8dd150 17
e089f05c
CM
18static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
19 *root, struct btrfs_path *path, int level);
310712b2
OS
20static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
21 const struct btrfs_key *ins_key, struct btrfs_path *path,
22 int data_size, int extend);
5f39d397 23static int push_node_left(struct btrfs_trans_handle *trans,
2ff7e61e 24 struct extent_buffer *dst,
971a1f66 25 struct extent_buffer *src, int empty);
5f39d397 26static int balance_node_right(struct btrfs_trans_handle *trans,
5f39d397
CM
27 struct extent_buffer *dst_buf,
28 struct extent_buffer *src_buf);
afe5fea7
TI
29static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 int level, int slot);
d97e63b6 31
af024ed2
JT
32static const struct btrfs_csums {
33 u16 size;
59a0fcdb
DS
34 const char name[10];
35 const char driver[12];
af024ed2
JT
36} btrfs_csums[] = {
37 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
3951e7f0 38 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
3831bf00 39 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
352ae07b
DS
40 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
41 .driver = "blake2b-256" },
af024ed2
JT
42};
43
44int btrfs_super_csum_size(const struct btrfs_super_block *s)
45{
46 u16 t = btrfs_super_csum_type(s);
47 /*
48 * csum type is validated at mount time
49 */
50 return btrfs_csums[t].size;
51}
52
53const char *btrfs_super_csum_name(u16 csum_type)
54{
55 /* csum type is validated at mount time */
56 return btrfs_csums[csum_type].name;
57}
58
b4e967be
DS
59/*
60 * Return driver name if defined, otherwise the name that's also a valid driver
61 * name
62 */
63const char *btrfs_super_csum_driver(u16 csum_type)
64{
65 /* csum type is validated at mount time */
59a0fcdb
DS
66 return btrfs_csums[csum_type].driver[0] ?
67 btrfs_csums[csum_type].driver :
b4e967be
DS
68 btrfs_csums[csum_type].name;
69}
70
604997b4 71size_t __attribute_const__ btrfs_get_num_csums(void)
f7cea56c
DS
72{
73 return ARRAY_SIZE(btrfs_csums);
74}
75
df24a2b9 76struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 77{
e2c89907 78 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
2c90e5d6
CM
79}
80
d352ac68 81/* this also releases the path */
df24a2b9 82void btrfs_free_path(struct btrfs_path *p)
be0e5c09 83{
ff175d57
JJ
84 if (!p)
85 return;
b3b4aa74 86 btrfs_release_path(p);
df24a2b9 87 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
88}
89
d352ac68
CM
90/*
91 * path release drops references on the extent buffers in the path
92 * and it drops any locks held by this path
93 *
94 * It is safe to call this on paths that no locks or extent buffers held.
95 */
b3b4aa74 96noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
97{
98 int i;
a2135011 99
234b63a0 100 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 101 p->slots[i] = 0;
eb60ceac 102 if (!p->nodes[i])
925baedd
CM
103 continue;
104 if (p->locks[i]) {
bd681513 105 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
925baedd
CM
106 p->locks[i] = 0;
107 }
5f39d397 108 free_extent_buffer(p->nodes[i]);
3f157a2f 109 p->nodes[i] = NULL;
eb60ceac
CM
110 }
111}
112
d352ac68
CM
113/*
114 * safely gets a reference on the root node of a tree. A lock
115 * is not taken, so a concurrent writer may put a different node
116 * at the root of the tree. See btrfs_lock_root_node for the
117 * looping required.
118 *
119 * The extent buffer returned by this has a reference taken, so
120 * it won't disappear. It may stop being the root of the tree
121 * at any time because there are no locks held.
122 */
925baedd
CM
123struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
124{
125 struct extent_buffer *eb;
240f62c8 126
3083ee2e
JB
127 while (1) {
128 rcu_read_lock();
129 eb = rcu_dereference(root->node);
130
131 /*
132 * RCU really hurts here, we could free up the root node because
01327610 133 * it was COWed but we may not get the new root node yet so do
3083ee2e
JB
134 * the inc_not_zero dance and if it doesn't work then
135 * synchronize_rcu and try again.
136 */
137 if (atomic_inc_not_zero(&eb->refs)) {
138 rcu_read_unlock();
139 break;
140 }
141 rcu_read_unlock();
142 synchronize_rcu();
143 }
925baedd
CM
144 return eb;
145}
146
92a7cc42
QW
147/*
148 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
149 * just get put onto a simple dirty list. Transaction walks this list to make
150 * sure they get properly updated on disk.
d352ac68 151 */
0b86a832
CM
152static void add_root_to_dirty_list(struct btrfs_root *root)
153{
0b246afa
JM
154 struct btrfs_fs_info *fs_info = root->fs_info;
155
e7070be1
JB
156 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
157 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
158 return;
159
0b246afa 160 spin_lock(&fs_info->trans_lock);
e7070be1
JB
161 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
162 /* Want the extent tree to be the last on the list */
4fd786e6 163 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
e7070be1 164 list_move_tail(&root->dirty_list,
0b246afa 165 &fs_info->dirty_cowonly_roots);
e7070be1
JB
166 else
167 list_move(&root->dirty_list,
0b246afa 168 &fs_info->dirty_cowonly_roots);
0b86a832 169 }
0b246afa 170 spin_unlock(&fs_info->trans_lock);
0b86a832
CM
171}
172
d352ac68
CM
173/*
174 * used by snapshot creation to make a copy of a root for a tree with
175 * a given objectid. The buffer with the new root node is returned in
176 * cow_ret, and this func returns zero on success or a negative error code.
177 */
be20aa9d
CM
178int btrfs_copy_root(struct btrfs_trans_handle *trans,
179 struct btrfs_root *root,
180 struct extent_buffer *buf,
181 struct extent_buffer **cow_ret, u64 new_root_objectid)
182{
0b246afa 183 struct btrfs_fs_info *fs_info = root->fs_info;
be20aa9d 184 struct extent_buffer *cow;
be20aa9d
CM
185 int ret = 0;
186 int level;
5d4f98a2 187 struct btrfs_disk_key disk_key;
be20aa9d 188
92a7cc42 189 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
0b246afa 190 trans->transid != fs_info->running_transaction->transid);
92a7cc42 191 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
27cdeb70 192 trans->transid != root->last_trans);
be20aa9d
CM
193
194 level = btrfs_header_level(buf);
5d4f98a2
YZ
195 if (level == 0)
196 btrfs_item_key(buf, &disk_key, 0);
197 else
198 btrfs_node_key(buf, &disk_key, 0);
31840ae1 199
4d75f8a9 200 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
cf6f34aa
JB
201 &disk_key, level, buf->start, 0,
202 BTRFS_NESTING_NEW_ROOT);
5d4f98a2 203 if (IS_ERR(cow))
be20aa9d
CM
204 return PTR_ERR(cow);
205
58e8012c 206 copy_extent_buffer_full(cow, buf);
be20aa9d
CM
207 btrfs_set_header_bytenr(cow, cow->start);
208 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
209 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
210 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
211 BTRFS_HEADER_FLAG_RELOC);
212 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
213 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
214 else
215 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 216
de37aa51 217 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
2b82032c 218
be20aa9d 219 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2 220 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
e339a6b0 221 ret = btrfs_inc_ref(trans, root, cow, 1);
5d4f98a2 222 else
e339a6b0 223 ret = btrfs_inc_ref(trans, root, cow, 0);
867ed321 224 if (ret) {
72c9925f
FM
225 btrfs_tree_unlock(cow);
226 free_extent_buffer(cow);
867ed321 227 btrfs_abort_transaction(trans, ret);
be20aa9d 228 return ret;
867ed321 229 }
be20aa9d
CM
230
231 btrfs_mark_buffer_dirty(cow);
232 *cow_ret = cow;
233 return 0;
234}
235
bd989ba3
JS
236enum mod_log_op {
237 MOD_LOG_KEY_REPLACE,
238 MOD_LOG_KEY_ADD,
239 MOD_LOG_KEY_REMOVE,
240 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
241 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
242 MOD_LOG_MOVE_KEYS,
243 MOD_LOG_ROOT_REPLACE,
244};
245
bd989ba3
JS
246struct tree_mod_root {
247 u64 logical;
248 u8 level;
249};
250
251struct tree_mod_elem {
252 struct rb_node node;
298cfd36 253 u64 logical;
097b8a7c 254 u64 seq;
bd989ba3
JS
255 enum mod_log_op op;
256
257 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
258 int slot;
259
260 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
261 u64 generation;
262
263 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
264 struct btrfs_disk_key key;
265 u64 blockptr;
266
267 /* this is used for op == MOD_LOG_MOVE_KEYS */
b6dfa35b
DS
268 struct {
269 int dst_slot;
270 int nr_items;
271 } move;
bd989ba3
JS
272
273 /* this is used for op == MOD_LOG_ROOT_REPLACE */
274 struct tree_mod_root old_root;
275};
276
fc36ed7e 277/*
fcebe456 278 * Pull a new tree mod seq number for our operation.
fc36ed7e 279 */
fcebe456 280static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
fc36ed7e
JS
281{
282 return atomic64_inc_return(&fs_info->tree_mod_seq);
283}
284
097b8a7c
JS
285/*
286 * This adds a new blocker to the tree mod log's blocker list if the @elem
287 * passed does not already have a sequence number set. So when a caller expects
288 * to record tree modifications, it should ensure to set elem->seq to zero
289 * before calling btrfs_get_tree_mod_seq.
290 * Returns a fresh, unused tree log modification sequence number, even if no new
291 * blocker was added.
292 */
293u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
294 struct seq_list *elem)
bd989ba3 295{
b1a09f1e 296 write_lock(&fs_info->tree_mod_log_lock);
097b8a7c 297 if (!elem->seq) {
fcebe456 298 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
097b8a7c
JS
299 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
300 }
b1a09f1e 301 write_unlock(&fs_info->tree_mod_log_lock);
097b8a7c 302
fcebe456 303 return elem->seq;
bd989ba3
JS
304}
305
306void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
307 struct seq_list *elem)
308{
309 struct rb_root *tm_root;
310 struct rb_node *node;
311 struct rb_node *next;
bd989ba3
JS
312 struct tree_mod_elem *tm;
313 u64 min_seq = (u64)-1;
314 u64 seq_putting = elem->seq;
315
316 if (!seq_putting)
317 return;
318
7227ff4d 319 write_lock(&fs_info->tree_mod_log_lock);
bd989ba3 320 list_del(&elem->list);
097b8a7c 321 elem->seq = 0;
bd989ba3 322
42836cf4
FM
323 if (!list_empty(&fs_info->tree_mod_seq_list)) {
324 struct seq_list *first;
325
326 first = list_first_entry(&fs_info->tree_mod_seq_list,
327 struct seq_list, list);
328 if (seq_putting > first->seq) {
329 /*
330 * Blocker with lower sequence number exists, we
331 * cannot remove anything from the log.
332 */
333 write_unlock(&fs_info->tree_mod_log_lock);
334 return;
bd989ba3 335 }
42836cf4 336 min_seq = first->seq;
bd989ba3 337 }
097b8a7c 338
bd989ba3
JS
339 /*
340 * anything that's lower than the lowest existing (read: blocked)
341 * sequence number can be removed from the tree.
342 */
bd989ba3
JS
343 tm_root = &fs_info->tree_mod_log;
344 for (node = rb_first(tm_root); node; node = next) {
345 next = rb_next(node);
6b4df8b6 346 tm = rb_entry(node, struct tree_mod_elem, node);
6609fee8 347 if (tm->seq >= min_seq)
bd989ba3
JS
348 continue;
349 rb_erase(node, tm_root);
bd989ba3
JS
350 kfree(tm);
351 }
b1a09f1e 352 write_unlock(&fs_info->tree_mod_log_lock);
bd989ba3
JS
353}
354
355/*
356 * key order of the log:
298cfd36 357 * node/leaf start address -> sequence
bd989ba3 358 *
298cfd36
CR
359 * The 'start address' is the logical address of the *new* root node
360 * for root replace operations, or the logical address of the affected
361 * block for all other operations.
bd989ba3
JS
362 */
363static noinline int
364__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
365{
366 struct rb_root *tm_root;
367 struct rb_node **new;
368 struct rb_node *parent = NULL;
369 struct tree_mod_elem *cur;
c8cc6341 370
73e82fe4
DS
371 lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
372
fcebe456 373 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
bd989ba3 374
bd989ba3
JS
375 tm_root = &fs_info->tree_mod_log;
376 new = &tm_root->rb_node;
377 while (*new) {
6b4df8b6 378 cur = rb_entry(*new, struct tree_mod_elem, node);
bd989ba3 379 parent = *new;
298cfd36 380 if (cur->logical < tm->logical)
bd989ba3 381 new = &((*new)->rb_left);
298cfd36 382 else if (cur->logical > tm->logical)
bd989ba3 383 new = &((*new)->rb_right);
097b8a7c 384 else if (cur->seq < tm->seq)
bd989ba3 385 new = &((*new)->rb_left);
097b8a7c 386 else if (cur->seq > tm->seq)
bd989ba3 387 new = &((*new)->rb_right);
5de865ee
FDBM
388 else
389 return -EEXIST;
bd989ba3
JS
390 }
391
392 rb_link_node(&tm->node, parent, new);
393 rb_insert_color(&tm->node, tm_root);
5de865ee 394 return 0;
bd989ba3
JS
395}
396
097b8a7c
JS
397/*
398 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
399 * returns zero with the tree_mod_log_lock acquired. The caller must hold
400 * this until all tree mod log insertions are recorded in the rb tree and then
b1a09f1e 401 * write unlock fs_info::tree_mod_log_lock.
097b8a7c 402 */
e9b7fd4d
JS
403static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
404 struct extent_buffer *eb) {
405 smp_mb();
406 if (list_empty(&(fs_info)->tree_mod_seq_list))
407 return 1;
097b8a7c
JS
408 if (eb && btrfs_header_level(eb) == 0)
409 return 1;
5de865ee 410
b1a09f1e 411 write_lock(&fs_info->tree_mod_log_lock);
5de865ee 412 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
b1a09f1e 413 write_unlock(&fs_info->tree_mod_log_lock);
5de865ee
FDBM
414 return 1;
415 }
416
e9b7fd4d
JS
417 return 0;
418}
419
5de865ee
FDBM
420/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
421static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
422 struct extent_buffer *eb)
423{
424 smp_mb();
425 if (list_empty(&(fs_info)->tree_mod_seq_list))
426 return 0;
427 if (eb && btrfs_header_level(eb) == 0)
428 return 0;
429
430 return 1;
431}
432
433static struct tree_mod_elem *
434alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
435 enum mod_log_op op, gfp_t flags)
bd989ba3 436{
097b8a7c 437 struct tree_mod_elem *tm;
bd989ba3 438
c8cc6341
JB
439 tm = kzalloc(sizeof(*tm), flags);
440 if (!tm)
5de865ee 441 return NULL;
bd989ba3 442
298cfd36 443 tm->logical = eb->start;
bd989ba3
JS
444 if (op != MOD_LOG_KEY_ADD) {
445 btrfs_node_key(eb, &tm->key, slot);
446 tm->blockptr = btrfs_node_blockptr(eb, slot);
447 }
448 tm->op = op;
449 tm->slot = slot;
450 tm->generation = btrfs_node_ptr_generation(eb, slot);
5de865ee 451 RB_CLEAR_NODE(&tm->node);
bd989ba3 452
5de865ee 453 return tm;
097b8a7c
JS
454}
455
e09c2efe
DS
456static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
457 enum mod_log_op op, gfp_t flags)
097b8a7c 458{
5de865ee
FDBM
459 struct tree_mod_elem *tm;
460 int ret;
461
e09c2efe 462 if (!tree_mod_need_log(eb->fs_info, eb))
5de865ee
FDBM
463 return 0;
464
465 tm = alloc_tree_mod_elem(eb, slot, op, flags);
466 if (!tm)
467 return -ENOMEM;
468
e09c2efe 469 if (tree_mod_dont_log(eb->fs_info, eb)) {
5de865ee 470 kfree(tm);
097b8a7c 471 return 0;
5de865ee
FDBM
472 }
473
e09c2efe 474 ret = __tree_mod_log_insert(eb->fs_info, tm);
b1a09f1e 475 write_unlock(&eb->fs_info->tree_mod_log_lock);
5de865ee
FDBM
476 if (ret)
477 kfree(tm);
097b8a7c 478
5de865ee 479 return ret;
097b8a7c
JS
480}
481
6074d45f
DS
482static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
483 int dst_slot, int src_slot, int nr_items)
bd989ba3 484{
5de865ee
FDBM
485 struct tree_mod_elem *tm = NULL;
486 struct tree_mod_elem **tm_list = NULL;
487 int ret = 0;
bd989ba3 488 int i;
5de865ee 489 int locked = 0;
bd989ba3 490
6074d45f 491 if (!tree_mod_need_log(eb->fs_info, eb))
f395694c 492 return 0;
bd989ba3 493
176ef8f5 494 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
5de865ee
FDBM
495 if (!tm_list)
496 return -ENOMEM;
497
176ef8f5 498 tm = kzalloc(sizeof(*tm), GFP_NOFS);
5de865ee
FDBM
499 if (!tm) {
500 ret = -ENOMEM;
501 goto free_tms;
502 }
503
298cfd36 504 tm->logical = eb->start;
5de865ee
FDBM
505 tm->slot = src_slot;
506 tm->move.dst_slot = dst_slot;
507 tm->move.nr_items = nr_items;
508 tm->op = MOD_LOG_MOVE_KEYS;
509
510 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
511 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
176ef8f5 512 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
5de865ee
FDBM
513 if (!tm_list[i]) {
514 ret = -ENOMEM;
515 goto free_tms;
516 }
517 }
518
6074d45f 519 if (tree_mod_dont_log(eb->fs_info, eb))
5de865ee
FDBM
520 goto free_tms;
521 locked = 1;
522
01763a2e
JS
523 /*
524 * When we override something during the move, we log these removals.
525 * This can only happen when we move towards the beginning of the
526 * buffer, i.e. dst_slot < src_slot.
527 */
bd989ba3 528 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
6074d45f 529 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
5de865ee
FDBM
530 if (ret)
531 goto free_tms;
bd989ba3
JS
532 }
533
6074d45f 534 ret = __tree_mod_log_insert(eb->fs_info, tm);
5de865ee
FDBM
535 if (ret)
536 goto free_tms;
b1a09f1e 537 write_unlock(&eb->fs_info->tree_mod_log_lock);
5de865ee 538 kfree(tm_list);
f395694c 539
5de865ee
FDBM
540 return 0;
541free_tms:
542 for (i = 0; i < nr_items; i++) {
543 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
6074d45f 544 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
5de865ee
FDBM
545 kfree(tm_list[i]);
546 }
547 if (locked)
b1a09f1e 548 write_unlock(&eb->fs_info->tree_mod_log_lock);
5de865ee
FDBM
549 kfree(tm_list);
550 kfree(tm);
bd989ba3 551
5de865ee 552 return ret;
bd989ba3
JS
553}
554
5de865ee
FDBM
555static inline int
556__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
557 struct tree_mod_elem **tm_list,
558 int nritems)
097b8a7c 559{
5de865ee 560 int i, j;
097b8a7c
JS
561 int ret;
562
097b8a7c 563 for (i = nritems - 1; i >= 0; i--) {
5de865ee
FDBM
564 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
565 if (ret) {
566 for (j = nritems - 1; j > i; j--)
567 rb_erase(&tm_list[j]->node,
568 &fs_info->tree_mod_log);
569 return ret;
570 }
097b8a7c 571 }
5de865ee
FDBM
572
573 return 0;
097b8a7c
JS
574}
575
95b757c1
DS
576static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
577 struct extent_buffer *new_root, int log_removal)
bd989ba3 578{
95b757c1 579 struct btrfs_fs_info *fs_info = old_root->fs_info;
5de865ee
FDBM
580 struct tree_mod_elem *tm = NULL;
581 struct tree_mod_elem **tm_list = NULL;
582 int nritems = 0;
583 int ret = 0;
584 int i;
bd989ba3 585
5de865ee 586 if (!tree_mod_need_log(fs_info, NULL))
097b8a7c
JS
587 return 0;
588
5de865ee
FDBM
589 if (log_removal && btrfs_header_level(old_root) > 0) {
590 nritems = btrfs_header_nritems(old_root);
31e818fe 591 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
bcc8e07f 592 GFP_NOFS);
5de865ee
FDBM
593 if (!tm_list) {
594 ret = -ENOMEM;
595 goto free_tms;
596 }
597 for (i = 0; i < nritems; i++) {
598 tm_list[i] = alloc_tree_mod_elem(old_root, i,
bcc8e07f 599 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
5de865ee
FDBM
600 if (!tm_list[i]) {
601 ret = -ENOMEM;
602 goto free_tms;
603 }
604 }
605 }
d9abbf1c 606
bcc8e07f 607 tm = kzalloc(sizeof(*tm), GFP_NOFS);
5de865ee
FDBM
608 if (!tm) {
609 ret = -ENOMEM;
610 goto free_tms;
611 }
bd989ba3 612
298cfd36 613 tm->logical = new_root->start;
bd989ba3
JS
614 tm->old_root.logical = old_root->start;
615 tm->old_root.level = btrfs_header_level(old_root);
616 tm->generation = btrfs_header_generation(old_root);
617 tm->op = MOD_LOG_ROOT_REPLACE;
618
5de865ee
FDBM
619 if (tree_mod_dont_log(fs_info, NULL))
620 goto free_tms;
621
622 if (tm_list)
623 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
624 if (!ret)
625 ret = __tree_mod_log_insert(fs_info, tm);
626
b1a09f1e 627 write_unlock(&fs_info->tree_mod_log_lock);
5de865ee
FDBM
628 if (ret)
629 goto free_tms;
630 kfree(tm_list);
631
632 return ret;
633
634free_tms:
635 if (tm_list) {
636 for (i = 0; i < nritems; i++)
637 kfree(tm_list[i]);
638 kfree(tm_list);
639 }
640 kfree(tm);
641
642 return ret;
bd989ba3
JS
643}
644
645static struct tree_mod_elem *
646__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
647 int smallest)
648{
649 struct rb_root *tm_root;
650 struct rb_node *node;
651 struct tree_mod_elem *cur = NULL;
652 struct tree_mod_elem *found = NULL;
bd989ba3 653
b1a09f1e 654 read_lock(&fs_info->tree_mod_log_lock);
bd989ba3
JS
655 tm_root = &fs_info->tree_mod_log;
656 node = tm_root->rb_node;
657 while (node) {
6b4df8b6 658 cur = rb_entry(node, struct tree_mod_elem, node);
298cfd36 659 if (cur->logical < start) {
bd989ba3 660 node = node->rb_left;
298cfd36 661 } else if (cur->logical > start) {
bd989ba3 662 node = node->rb_right;
097b8a7c 663 } else if (cur->seq < min_seq) {
bd989ba3
JS
664 node = node->rb_left;
665 } else if (!smallest) {
666 /* we want the node with the highest seq */
667 if (found)
097b8a7c 668 BUG_ON(found->seq > cur->seq);
bd989ba3
JS
669 found = cur;
670 node = node->rb_left;
097b8a7c 671 } else if (cur->seq > min_seq) {
bd989ba3
JS
672 /* we want the node with the smallest seq */
673 if (found)
097b8a7c 674 BUG_ON(found->seq < cur->seq);
bd989ba3
JS
675 found = cur;
676 node = node->rb_right;
677 } else {
678 found = cur;
679 break;
680 }
681 }
b1a09f1e 682 read_unlock(&fs_info->tree_mod_log_lock);
bd989ba3
JS
683
684 return found;
685}
686
687/*
688 * this returns the element from the log with the smallest time sequence
689 * value that's in the log (the oldest log item). any element with a time
690 * sequence lower than min_seq will be ignored.
691 */
692static struct tree_mod_elem *
693tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
694 u64 min_seq)
695{
696 return __tree_mod_log_search(fs_info, start, min_seq, 1);
697}
698
699/*
700 * this returns the element from the log with the largest time sequence
701 * value that's in the log (the most recent log item). any element with
702 * a time sequence lower than min_seq will be ignored.
703 */
704static struct tree_mod_elem *
705tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
706{
707 return __tree_mod_log_search(fs_info, start, min_seq, 0);
708}
709
ed874f0d 710static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst,
bd989ba3 711 struct extent_buffer *src, unsigned long dst_offset,
90f8d62e 712 unsigned long src_offset, int nr_items)
bd989ba3 713{
ed874f0d 714 struct btrfs_fs_info *fs_info = dst->fs_info;
5de865ee
FDBM
715 int ret = 0;
716 struct tree_mod_elem **tm_list = NULL;
717 struct tree_mod_elem **tm_list_add, **tm_list_rem;
bd989ba3 718 int i;
5de865ee 719 int locked = 0;
bd989ba3 720
5de865ee
FDBM
721 if (!tree_mod_need_log(fs_info, NULL))
722 return 0;
bd989ba3 723
c8cc6341 724 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
5de865ee
FDBM
725 return 0;
726
31e818fe 727 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
5de865ee
FDBM
728 GFP_NOFS);
729 if (!tm_list)
730 return -ENOMEM;
bd989ba3 731
5de865ee
FDBM
732 tm_list_add = tm_list;
733 tm_list_rem = tm_list + nr_items;
bd989ba3 734 for (i = 0; i < nr_items; i++) {
5de865ee
FDBM
735 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
736 MOD_LOG_KEY_REMOVE, GFP_NOFS);
737 if (!tm_list_rem[i]) {
738 ret = -ENOMEM;
739 goto free_tms;
740 }
741
742 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
743 MOD_LOG_KEY_ADD, GFP_NOFS);
744 if (!tm_list_add[i]) {
745 ret = -ENOMEM;
746 goto free_tms;
747 }
748 }
749
750 if (tree_mod_dont_log(fs_info, NULL))
751 goto free_tms;
752 locked = 1;
753
754 for (i = 0; i < nr_items; i++) {
755 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
756 if (ret)
757 goto free_tms;
758 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
759 if (ret)
760 goto free_tms;
bd989ba3 761 }
5de865ee 762
b1a09f1e 763 write_unlock(&fs_info->tree_mod_log_lock);
5de865ee
FDBM
764 kfree(tm_list);
765
766 return 0;
767
768free_tms:
769 for (i = 0; i < nr_items * 2; i++) {
770 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
771 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
772 kfree(tm_list[i]);
773 }
774 if (locked)
b1a09f1e 775 write_unlock(&fs_info->tree_mod_log_lock);
5de865ee
FDBM
776 kfree(tm_list);
777
778 return ret;
bd989ba3
JS
779}
780
db7279a2 781static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
bd989ba3 782{
5de865ee
FDBM
783 struct tree_mod_elem **tm_list = NULL;
784 int nritems = 0;
785 int i;
786 int ret = 0;
787
788 if (btrfs_header_level(eb) == 0)
789 return 0;
790
db7279a2 791 if (!tree_mod_need_log(eb->fs_info, NULL))
5de865ee
FDBM
792 return 0;
793
794 nritems = btrfs_header_nritems(eb);
31e818fe 795 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
5de865ee
FDBM
796 if (!tm_list)
797 return -ENOMEM;
798
799 for (i = 0; i < nritems; i++) {
800 tm_list[i] = alloc_tree_mod_elem(eb, i,
801 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
802 if (!tm_list[i]) {
803 ret = -ENOMEM;
804 goto free_tms;
805 }
806 }
807
db7279a2 808 if (tree_mod_dont_log(eb->fs_info, eb))
5de865ee
FDBM
809 goto free_tms;
810
db7279a2 811 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
b1a09f1e 812 write_unlock(&eb->fs_info->tree_mod_log_lock);
5de865ee
FDBM
813 if (ret)
814 goto free_tms;
815 kfree(tm_list);
816
817 return 0;
818
819free_tms:
820 for (i = 0; i < nritems; i++)
821 kfree(tm_list[i]);
822 kfree(tm_list);
823
824 return ret;
bd989ba3
JS
825}
826
5d4f98a2
YZ
827/*
828 * check if the tree block can be shared by multiple trees
829 */
830int btrfs_block_can_be_shared(struct btrfs_root *root,
831 struct extent_buffer *buf)
832{
833 /*
92a7cc42
QW
834 * Tree blocks not in shareable trees and tree roots are never shared.
835 * If a block was allocated after the last snapshot and the block was
836 * not allocated by tree relocation, we know the block is not shared.
5d4f98a2 837 */
92a7cc42 838 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
5d4f98a2
YZ
839 buf != root->node && buf != root->commit_root &&
840 (btrfs_header_generation(buf) <=
841 btrfs_root_last_snapshot(&root->root_item) ||
842 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
843 return 1;
a79865c6 844
5d4f98a2
YZ
845 return 0;
846}
847
848static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
849 struct btrfs_root *root,
850 struct extent_buffer *buf,
f0486c68
YZ
851 struct extent_buffer *cow,
852 int *last_ref)
5d4f98a2 853{
0b246afa 854 struct btrfs_fs_info *fs_info = root->fs_info;
5d4f98a2
YZ
855 u64 refs;
856 u64 owner;
857 u64 flags;
858 u64 new_flags = 0;
859 int ret;
860
861 /*
862 * Backrefs update rules:
863 *
864 * Always use full backrefs for extent pointers in tree block
865 * allocated by tree relocation.
866 *
867 * If a shared tree block is no longer referenced by its owner
868 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
869 * use full backrefs for extent pointers in tree block.
870 *
871 * If a tree block is been relocating
872 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
873 * use full backrefs for extent pointers in tree block.
874 * The reason for this is some operations (such as drop tree)
875 * are only allowed for blocks use full backrefs.
876 */
877
878 if (btrfs_block_can_be_shared(root, buf)) {
2ff7e61e 879 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
3173a18f
JB
880 btrfs_header_level(buf), 1,
881 &refs, &flags);
be1a5564
MF
882 if (ret)
883 return ret;
e5df9573
MF
884 if (refs == 0) {
885 ret = -EROFS;
0b246afa 886 btrfs_handle_fs_error(fs_info, ret, NULL);
e5df9573
MF
887 return ret;
888 }
5d4f98a2
YZ
889 } else {
890 refs = 1;
891 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
892 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
893 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
894 else
895 flags = 0;
896 }
897
898 owner = btrfs_header_owner(buf);
899 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
900 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
901
902 if (refs > 1) {
903 if ((owner == root->root_key.objectid ||
904 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
905 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
e339a6b0 906 ret = btrfs_inc_ref(trans, root, buf, 1);
692826b2
JM
907 if (ret)
908 return ret;
5d4f98a2
YZ
909
910 if (root->root_key.objectid ==
911 BTRFS_TREE_RELOC_OBJECTID) {
e339a6b0 912 ret = btrfs_dec_ref(trans, root, buf, 0);
692826b2
JM
913 if (ret)
914 return ret;
e339a6b0 915 ret = btrfs_inc_ref(trans, root, cow, 1);
692826b2
JM
916 if (ret)
917 return ret;
5d4f98a2
YZ
918 }
919 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
920 } else {
921
922 if (root->root_key.objectid ==
923 BTRFS_TREE_RELOC_OBJECTID)
e339a6b0 924 ret = btrfs_inc_ref(trans, root, cow, 1);
5d4f98a2 925 else
e339a6b0 926 ret = btrfs_inc_ref(trans, root, cow, 0);
692826b2
JM
927 if (ret)
928 return ret;
5d4f98a2
YZ
929 }
930 if (new_flags != 0) {
b1c79e09
JB
931 int level = btrfs_header_level(buf);
932
42c9d0b5 933 ret = btrfs_set_disk_extent_flags(trans, buf,
b1c79e09 934 new_flags, level, 0);
be1a5564
MF
935 if (ret)
936 return ret;
5d4f98a2
YZ
937 }
938 } else {
939 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
940 if (root->root_key.objectid ==
941 BTRFS_TREE_RELOC_OBJECTID)
e339a6b0 942 ret = btrfs_inc_ref(trans, root, cow, 1);
5d4f98a2 943 else
e339a6b0 944 ret = btrfs_inc_ref(trans, root, cow, 0);
692826b2
JM
945 if (ret)
946 return ret;
e339a6b0 947 ret = btrfs_dec_ref(trans, root, buf, 1);
692826b2
JM
948 if (ret)
949 return ret;
5d4f98a2 950 }
6a884d7d 951 btrfs_clean_tree_block(buf);
f0486c68 952 *last_ref = 1;
5d4f98a2
YZ
953 }
954 return 0;
955}
956
a6279470
FM
957static struct extent_buffer *alloc_tree_block_no_bg_flush(
958 struct btrfs_trans_handle *trans,
959 struct btrfs_root *root,
960 u64 parent_start,
961 const struct btrfs_disk_key *disk_key,
962 int level,
963 u64 hint,
9631e4cc
JB
964 u64 empty_size,
965 enum btrfs_lock_nesting nest)
a6279470
FM
966{
967 struct btrfs_fs_info *fs_info = root->fs_info;
968 struct extent_buffer *ret;
969
970 /*
971 * If we are COWing a node/leaf from the extent, chunk, device or free
972 * space trees, make sure that we do not finish block group creation of
973 * pending block groups. We do this to avoid a deadlock.
974 * COWing can result in allocation of a new chunk, and flushing pending
975 * block groups (btrfs_create_pending_block_groups()) can be triggered
976 * when finishing allocation of a new chunk. Creation of a pending block
977 * group modifies the extent, chunk, device and free space trees,
978 * therefore we could deadlock with ourselves since we are holding a
979 * lock on an extent buffer that btrfs_create_pending_block_groups() may
980 * try to COW later.
981 * For similar reasons, we also need to delay flushing pending block
982 * groups when splitting a leaf or node, from one of those trees, since
983 * we are holding a write lock on it and its parent or when inserting a
984 * new root node for one of those trees.
985 */
986 if (root == fs_info->extent_root ||
987 root == fs_info->chunk_root ||
988 root == fs_info->dev_root ||
989 root == fs_info->free_space_root)
990 trans->can_flush_pending_bgs = false;
991
992 ret = btrfs_alloc_tree_block(trans, root, parent_start,
993 root->root_key.objectid, disk_key, level,
9631e4cc 994 hint, empty_size, nest);
a6279470
FM
995 trans->can_flush_pending_bgs = true;
996
997 return ret;
998}
999
d352ac68 1000/*
d397712b
CM
1001 * does the dirty work in cow of a single block. The parent block (if
1002 * supplied) is updated to point to the new cow copy. The new buffer is marked
1003 * dirty and returned locked. If you modify the block it needs to be marked
1004 * dirty again.
d352ac68
CM
1005 *
1006 * search_start -- an allocation hint for the new block
1007 *
d397712b
CM
1008 * empty_size -- a hint that you plan on doing more cow. This is the size in
1009 * bytes the allocator should try to find free next to the block it returns.
1010 * This is just a hint and may be ignored by the allocator.
d352ac68 1011 */
d397712b 1012static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
1013 struct btrfs_root *root,
1014 struct extent_buffer *buf,
1015 struct extent_buffer *parent, int parent_slot,
1016 struct extent_buffer **cow_ret,
9631e4cc
JB
1017 u64 search_start, u64 empty_size,
1018 enum btrfs_lock_nesting nest)
02217ed2 1019{
0b246afa 1020 struct btrfs_fs_info *fs_info = root->fs_info;
5d4f98a2 1021 struct btrfs_disk_key disk_key;
5f39d397 1022 struct extent_buffer *cow;
be1a5564 1023 int level, ret;
f0486c68 1024 int last_ref = 0;
925baedd 1025 int unlock_orig = 0;
0f5053eb 1026 u64 parent_start = 0;
7bb86316 1027
925baedd
CM
1028 if (*cow_ret == buf)
1029 unlock_orig = 1;
1030
b9447ef8 1031 btrfs_assert_tree_locked(buf);
925baedd 1032
92a7cc42 1033 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
0b246afa 1034 trans->transid != fs_info->running_transaction->transid);
92a7cc42 1035 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
27cdeb70 1036 trans->transid != root->last_trans);
5f39d397 1037
7bb86316 1038 level = btrfs_header_level(buf);
31840ae1 1039
5d4f98a2
YZ
1040 if (level == 0)
1041 btrfs_item_key(buf, &disk_key, 0);
1042 else
1043 btrfs_node_key(buf, &disk_key, 0);
1044
0f5053eb
GR
1045 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1046 parent_start = parent->start;
5d4f98a2 1047
a6279470 1048 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
9631e4cc 1049 level, search_start, empty_size, nest);
54aa1f4d
CM
1050 if (IS_ERR(cow))
1051 return PTR_ERR(cow);
6702ed49 1052
b4ce94de
CM
1053 /* cow is set to blocking by btrfs_init_new_buffer */
1054
58e8012c 1055 copy_extent_buffer_full(cow, buf);
db94535d 1056 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 1057 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
1058 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1059 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1060 BTRFS_HEADER_FLAG_RELOC);
1061 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1062 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1063 else
1064 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 1065
de37aa51 1066 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
2b82032c 1067
be1a5564 1068 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
b68dc2a9 1069 if (ret) {
572c83ac
JB
1070 btrfs_tree_unlock(cow);
1071 free_extent_buffer(cow);
66642832 1072 btrfs_abort_transaction(trans, ret);
b68dc2a9
MF
1073 return ret;
1074 }
1a40e23b 1075
92a7cc42 1076 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
83d4cfd4 1077 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
93314e3b 1078 if (ret) {
572c83ac
JB
1079 btrfs_tree_unlock(cow);
1080 free_extent_buffer(cow);
66642832 1081 btrfs_abort_transaction(trans, ret);
83d4cfd4 1082 return ret;
93314e3b 1083 }
83d4cfd4 1084 }
3fd0a558 1085
02217ed2 1086 if (buf == root->node) {
925baedd 1087 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
1088 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1089 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1090 parent_start = buf->start;
925baedd 1091
67439dad 1092 atomic_inc(&cow->refs);
d9d19a01
DS
1093 ret = tree_mod_log_insert_root(root->node, cow, 1);
1094 BUG_ON(ret < 0);
240f62c8 1095 rcu_assign_pointer(root->node, cow);
925baedd 1096
f0486c68 1097 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 1098 last_ref);
5f39d397 1099 free_extent_buffer(buf);
0b86a832 1100 add_root_to_dirty_list(root);
02217ed2 1101 } else {
5d4f98a2 1102 WARN_ON(trans->transid != btrfs_header_generation(parent));
e09c2efe 1103 tree_mod_log_insert_key(parent, parent_slot,
c8cc6341 1104 MOD_LOG_KEY_REPLACE, GFP_NOFS);
5f39d397 1105 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 1106 cow->start);
74493f7a
CM
1107 btrfs_set_node_ptr_generation(parent, parent_slot,
1108 trans->transid);
d6025579 1109 btrfs_mark_buffer_dirty(parent);
5de865ee 1110 if (last_ref) {
db7279a2 1111 ret = tree_mod_log_free_eb(buf);
5de865ee 1112 if (ret) {
572c83ac
JB
1113 btrfs_tree_unlock(cow);
1114 free_extent_buffer(cow);
66642832 1115 btrfs_abort_transaction(trans, ret);
5de865ee
FDBM
1116 return ret;
1117 }
1118 }
f0486c68 1119 btrfs_free_tree_block(trans, root, buf, parent_start,
5581a51a 1120 last_ref);
02217ed2 1121 }
925baedd
CM
1122 if (unlock_orig)
1123 btrfs_tree_unlock(buf);
3083ee2e 1124 free_extent_buffer_stale(buf);
ccd467d6 1125 btrfs_mark_buffer_dirty(cow);
2c90e5d6 1126 *cow_ret = cow;
02217ed2
CM
1127 return 0;
1128}
1129
5d9e75c4
JS
1130/*
1131 * returns the logical address of the oldest predecessor of the given root.
1132 * entries older than time_seq are ignored.
1133 */
bcd24dab
DS
1134static struct tree_mod_elem *__tree_mod_log_oldest_root(
1135 struct extent_buffer *eb_root, u64 time_seq)
5d9e75c4
JS
1136{
1137 struct tree_mod_elem *tm;
1138 struct tree_mod_elem *found = NULL;
30b0463a 1139 u64 root_logical = eb_root->start;
5d9e75c4
JS
1140 int looped = 0;
1141
1142 if (!time_seq)
35a3621b 1143 return NULL;
5d9e75c4
JS
1144
1145 /*
298cfd36
CR
1146 * the very last operation that's logged for a root is the
1147 * replacement operation (if it is replaced at all). this has
1148 * the logical address of the *new* root, making it the very
1149 * first operation that's logged for this root.
5d9e75c4
JS
1150 */
1151 while (1) {
bcd24dab 1152 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
5d9e75c4
JS
1153 time_seq);
1154 if (!looped && !tm)
35a3621b 1155 return NULL;
5d9e75c4 1156 /*
28da9fb4
JS
1157 * if there are no tree operation for the oldest root, we simply
1158 * return it. this should only happen if that (old) root is at
1159 * level 0.
5d9e75c4 1160 */
28da9fb4
JS
1161 if (!tm)
1162 break;
5d9e75c4 1163
28da9fb4
JS
1164 /*
1165 * if there's an operation that's not a root replacement, we
1166 * found the oldest version of our root. normally, we'll find a
1167 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1168 */
5d9e75c4
JS
1169 if (tm->op != MOD_LOG_ROOT_REPLACE)
1170 break;
1171
1172 found = tm;
1173 root_logical = tm->old_root.logical;
5d9e75c4
JS
1174 looped = 1;
1175 }
1176
a95236d9
JS
1177 /* if there's no old root to return, return what we found instead */
1178 if (!found)
1179 found = tm;
1180
5d9e75c4
JS
1181 return found;
1182}
1183
1184/*
1185 * tm is a pointer to the first operation to rewind within eb. then, all
01327610 1186 * previous operations will be rewound (until we reach something older than
5d9e75c4
JS
1187 * time_seq).
1188 */
1189static void
f1ca7e98
JB
1190__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1191 u64 time_seq, struct tree_mod_elem *first_tm)
5d9e75c4
JS
1192{
1193 u32 n;
1194 struct rb_node *next;
1195 struct tree_mod_elem *tm = first_tm;
1196 unsigned long o_dst;
1197 unsigned long o_src;
1198 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1199
1200 n = btrfs_header_nritems(eb);
b1a09f1e 1201 read_lock(&fs_info->tree_mod_log_lock);
097b8a7c 1202 while (tm && tm->seq >= time_seq) {
5d9e75c4
JS
1203 /*
1204 * all the operations are recorded with the operator used for
1205 * the modification. as we're going backwards, we do the
1206 * opposite of each operation here.
1207 */
1208 switch (tm->op) {
1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1210 BUG_ON(tm->slot < n);
c730ae0c 1211 fallthrough;
95c80bb1 1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
4c3e6969 1213 case MOD_LOG_KEY_REMOVE:
5d9e75c4
JS
1214 btrfs_set_node_key(eb, &tm->key, tm->slot);
1215 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1216 btrfs_set_node_ptr_generation(eb, tm->slot,
1217 tm->generation);
4c3e6969 1218 n++;
5d9e75c4
JS
1219 break;
1220 case MOD_LOG_KEY_REPLACE:
1221 BUG_ON(tm->slot >= n);
1222 btrfs_set_node_key(eb, &tm->key, tm->slot);
1223 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1224 btrfs_set_node_ptr_generation(eb, tm->slot,
1225 tm->generation);
1226 break;
1227 case MOD_LOG_KEY_ADD:
19956c7e 1228 /* if a move operation is needed it's in the log */
5d9e75c4
JS
1229 n--;
1230 break;
1231 case MOD_LOG_MOVE_KEYS:
c3193108
JS
1232 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1233 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1234 memmove_extent_buffer(eb, o_dst, o_src,
5d9e75c4
JS
1235 tm->move.nr_items * p_size);
1236 break;
1237 case MOD_LOG_ROOT_REPLACE:
1238 /*
1239 * this operation is special. for roots, this must be
1240 * handled explicitly before rewinding.
1241 * for non-roots, this operation may exist if the node
1242 * was a root: root A -> child B; then A gets empty and
1243 * B is promoted to the new root. in the mod log, we'll
1244 * have a root-replace operation for B, a tree block
1245 * that is no root. we simply ignore that operation.
1246 */
1247 break;
1248 }
1249 next = rb_next(&tm->node);
1250 if (!next)
1251 break;
6b4df8b6 1252 tm = rb_entry(next, struct tree_mod_elem, node);
298cfd36 1253 if (tm->logical != first_tm->logical)
5d9e75c4
JS
1254 break;
1255 }
b1a09f1e 1256 read_unlock(&fs_info->tree_mod_log_lock);
5d9e75c4
JS
1257 btrfs_set_header_nritems(eb, n);
1258}
1259
47fb091f 1260/*
01327610 1261 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
47fb091f
JS
1262 * is returned. If rewind operations happen, a fresh buffer is returned. The
1263 * returned buffer is always read-locked. If the returned buffer is not the
1264 * input buffer, the lock on the input buffer is released and the input buffer
1265 * is freed (its refcount is decremented).
1266 */
5d9e75c4 1267static struct extent_buffer *
9ec72677
JB
1268tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1269 struct extent_buffer *eb, u64 time_seq)
5d9e75c4
JS
1270{
1271 struct extent_buffer *eb_rewin;
1272 struct tree_mod_elem *tm;
1273
1274 if (!time_seq)
1275 return eb;
1276
1277 if (btrfs_header_level(eb) == 0)
1278 return eb;
1279
1280 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1281 if (!tm)
1282 return eb;
1283
1284 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1285 BUG_ON(tm->slot != 0);
da17066c 1286 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
db7f3436 1287 if (!eb_rewin) {
ac5887c8 1288 btrfs_tree_read_unlock(eb);
db7f3436
JB
1289 free_extent_buffer(eb);
1290 return NULL;
1291 }
5d9e75c4
JS
1292 btrfs_set_header_bytenr(eb_rewin, eb->start);
1293 btrfs_set_header_backref_rev(eb_rewin,
1294 btrfs_header_backref_rev(eb));
1295 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
c3193108 1296 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
5d9e75c4
JS
1297 } else {
1298 eb_rewin = btrfs_clone_extent_buffer(eb);
db7f3436 1299 if (!eb_rewin) {
ac5887c8 1300 btrfs_tree_read_unlock(eb);
db7f3436
JB
1301 free_extent_buffer(eb);
1302 return NULL;
1303 }
5d9e75c4
JS
1304 }
1305
ac5887c8 1306 btrfs_tree_read_unlock(eb);
5d9e75c4
JS
1307 free_extent_buffer(eb);
1308
d3beaa25
JB
1309 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
1310 eb_rewin, btrfs_header_level(eb_rewin));
47fb091f 1311 btrfs_tree_read_lock(eb_rewin);
f1ca7e98 1312 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
57911b8b 1313 WARN_ON(btrfs_header_nritems(eb_rewin) >
da17066c 1314 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
5d9e75c4
JS
1315
1316 return eb_rewin;
1317}
1318
8ba97a15
JS
1319/*
1320 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1321 * value. If there are no changes, the current root->root_node is returned. If
1322 * anything changed in between, there's a fresh buffer allocated on which the
1323 * rewind operations are done. In any case, the returned buffer is read locked.
1324 * Returns NULL on error (with no locks held).
1325 */
5d9e75c4
JS
1326static inline struct extent_buffer *
1327get_old_root(struct btrfs_root *root, u64 time_seq)
1328{
0b246afa 1329 struct btrfs_fs_info *fs_info = root->fs_info;
5d9e75c4 1330 struct tree_mod_elem *tm;
30b0463a
JS
1331 struct extent_buffer *eb = NULL;
1332 struct extent_buffer *eb_root;
efad8a85 1333 u64 eb_root_owner = 0;
7bfdcf7f 1334 struct extent_buffer *old;
a95236d9 1335 struct tree_mod_root *old_root = NULL;
4325edd0 1336 u64 old_generation = 0;
a95236d9 1337 u64 logical;
581c1760 1338 int level;
5d9e75c4 1339
30b0463a 1340 eb_root = btrfs_read_lock_root_node(root);
bcd24dab 1341 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
5d9e75c4 1342 if (!tm)
30b0463a 1343 return eb_root;
5d9e75c4 1344
a95236d9
JS
1345 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1346 old_root = &tm->old_root;
1347 old_generation = tm->generation;
1348 logical = old_root->logical;
581c1760 1349 level = old_root->level;
a95236d9 1350 } else {
30b0463a 1351 logical = eb_root->start;
581c1760 1352 level = btrfs_header_level(eb_root);
a95236d9 1353 }
5d9e75c4 1354
0b246afa 1355 tm = tree_mod_log_search(fs_info, logical, time_seq);
834328a8 1356 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
30b0463a
JS
1357 btrfs_tree_read_unlock(eb_root);
1358 free_extent_buffer(eb_root);
1b7ec85e
JB
1359 old = read_tree_block(fs_info, logical, root->root_key.objectid,
1360 0, level, NULL);
64c043de
LB
1361 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1362 if (!IS_ERR(old))
1363 free_extent_buffer(old);
0b246afa
JM
1364 btrfs_warn(fs_info,
1365 "failed to read tree block %llu from get_old_root",
1366 logical);
834328a8 1367 } else {
7bfdcf7f
LB
1368 eb = btrfs_clone_extent_buffer(old);
1369 free_extent_buffer(old);
834328a8
JS
1370 }
1371 } else if (old_root) {
efad8a85 1372 eb_root_owner = btrfs_header_owner(eb_root);
30b0463a
JS
1373 btrfs_tree_read_unlock(eb_root);
1374 free_extent_buffer(eb_root);
0b246afa 1375 eb = alloc_dummy_extent_buffer(fs_info, logical);
834328a8 1376 } else {
30b0463a 1377 eb = btrfs_clone_extent_buffer(eb_root);
ac5887c8 1378 btrfs_tree_read_unlock(eb_root);
30b0463a 1379 free_extent_buffer(eb_root);
834328a8
JS
1380 }
1381
8ba97a15
JS
1382 if (!eb)
1383 return NULL;
a95236d9 1384 if (old_root) {
5d9e75c4
JS
1385 btrfs_set_header_bytenr(eb, eb->start);
1386 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
efad8a85 1387 btrfs_set_header_owner(eb, eb_root_owner);
a95236d9
JS
1388 btrfs_set_header_level(eb, old_root->level);
1389 btrfs_set_header_generation(eb, old_generation);
5d9e75c4 1390 }
d3beaa25
JB
1391 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
1392 btrfs_header_level(eb));
1393 btrfs_tree_read_lock(eb);
28da9fb4 1394 if (tm)
0b246afa 1395 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
28da9fb4
JS
1396 else
1397 WARN_ON(btrfs_header_level(eb) != 0);
0b246afa 1398 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
5d9e75c4
JS
1399
1400 return eb;
1401}
1402
5b6602e7
JS
1403int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1404{
1405 struct tree_mod_elem *tm;
1406 int level;
30b0463a 1407 struct extent_buffer *eb_root = btrfs_root_node(root);
5b6602e7 1408
bcd24dab 1409 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
5b6602e7
JS
1410 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1411 level = tm->old_root.level;
1412 } else {
30b0463a 1413 level = btrfs_header_level(eb_root);
5b6602e7 1414 }
30b0463a 1415 free_extent_buffer(eb_root);
5b6602e7
JS
1416
1417 return level;
1418}
1419
5d4f98a2
YZ
1420static inline int should_cow_block(struct btrfs_trans_handle *trans,
1421 struct btrfs_root *root,
1422 struct extent_buffer *buf)
1423{
f5ee5c9a 1424 if (btrfs_is_testing(root->fs_info))
faa2dbf0 1425 return 0;
fccb84c9 1426
d1980131
DS
1427 /* Ensure we can see the FORCE_COW bit */
1428 smp_mb__before_atomic();
f1ebcc74
LB
1429
1430 /*
1431 * We do not need to cow a block if
1432 * 1) this block is not created or changed in this transaction;
1433 * 2) this block does not belong to TREE_RELOC tree;
1434 * 3) the root is not forced COW.
1435 *
1436 * What is forced COW:
01327610 1437 * when we create snapshot during committing the transaction,
52042d8e 1438 * after we've finished copying src root, we must COW the shared
f1ebcc74
LB
1439 * block to ensure the metadata consistency.
1440 */
5d4f98a2
YZ
1441 if (btrfs_header_generation(buf) == trans->transid &&
1442 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1443 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
f1ebcc74 1444 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
27cdeb70 1445 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
5d4f98a2
YZ
1446 return 0;
1447 return 1;
1448}
1449
d352ac68
CM
1450/*
1451 * cows a single block, see __btrfs_cow_block for the real work.
01327610 1452 * This version of it has extra checks so that a block isn't COWed more than
d352ac68
CM
1453 * once per transaction, as long as it hasn't been written yet
1454 */
d397712b 1455noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
1456 struct btrfs_root *root, struct extent_buffer *buf,
1457 struct extent_buffer *parent, int parent_slot,
9631e4cc
JB
1458 struct extent_buffer **cow_ret,
1459 enum btrfs_lock_nesting nest)
6702ed49 1460{
0b246afa 1461 struct btrfs_fs_info *fs_info = root->fs_info;
6702ed49 1462 u64 search_start;
f510cfec 1463 int ret;
dc17ff8f 1464
83354f07
JB
1465 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1466 btrfs_err(fs_info,
1467 "COW'ing blocks on a fs root that's being dropped");
1468
0b246afa 1469 if (trans->transaction != fs_info->running_transaction)
31b1a2bd 1470 WARN(1, KERN_CRIT "trans %llu running %llu\n",
c1c9ff7c 1471 trans->transid,
0b246afa 1472 fs_info->running_transaction->transid);
31b1a2bd 1473
0b246afa 1474 if (trans->transid != fs_info->generation)
31b1a2bd 1475 WARN(1, KERN_CRIT "trans %llu running %llu\n",
0b246afa 1476 trans->transid, fs_info->generation);
dc17ff8f 1477
5d4f98a2 1478 if (!should_cow_block(trans, root, buf)) {
64c12921 1479 trans->dirty = true;
6702ed49
CM
1480 *cow_ret = buf;
1481 return 0;
1482 }
c487685d 1483
ee22184b 1484 search_start = buf->start & ~((u64)SZ_1G - 1);
b4ce94de 1485
f616f5cd
QW
1486 /*
1487 * Before CoWing this block for later modification, check if it's
1488 * the subtree root and do the delayed subtree trace if needed.
1489 *
1490 * Also We don't care about the error, as it's handled internally.
1491 */
1492 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
f510cfec 1493 ret = __btrfs_cow_block(trans, root, buf, parent,
9631e4cc 1494 parent_slot, cow_ret, search_start, 0, nest);
1abe9b8a 1495
1496 trace_btrfs_cow_block(root, buf, *cow_ret);
1497
f510cfec 1498 return ret;
6702ed49 1499}
f75e2b79 1500ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
6702ed49 1501
d352ac68
CM
1502/*
1503 * helper function for defrag to decide if two blocks pointed to by a
1504 * node are actually close by
1505 */
6b80053d 1506static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 1507{
6b80053d 1508 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 1509 return 1;
6b80053d 1510 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
1511 return 1;
1512 return 0;
1513}
1514
ce6ef5ab
DS
1515#ifdef __LITTLE_ENDIAN
1516
1517/*
1518 * Compare two keys, on little-endian the disk order is same as CPU order and
1519 * we can avoid the conversion.
1520 */
1521static int comp_keys(const struct btrfs_disk_key *disk_key,
1522 const struct btrfs_key *k2)
1523{
1524 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
1525
1526 return btrfs_comp_cpu_keys(k1, k2);
1527}
1528
1529#else
1530
081e9573
CM
1531/*
1532 * compare two keys in a memcmp fashion
1533 */
310712b2
OS
1534static int comp_keys(const struct btrfs_disk_key *disk,
1535 const struct btrfs_key *k2)
081e9573
CM
1536{
1537 struct btrfs_key k1;
1538
1539 btrfs_disk_key_to_cpu(&k1, disk);
1540
20736aba 1541 return btrfs_comp_cpu_keys(&k1, k2);
081e9573 1542}
ce6ef5ab 1543#endif
081e9573 1544
f3465ca4
JB
1545/*
1546 * same as comp_keys only with two btrfs_key's
1547 */
e1f60a65 1548int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
f3465ca4
JB
1549{
1550 if (k1->objectid > k2->objectid)
1551 return 1;
1552 if (k1->objectid < k2->objectid)
1553 return -1;
1554 if (k1->type > k2->type)
1555 return 1;
1556 if (k1->type < k2->type)
1557 return -1;
1558 if (k1->offset > k2->offset)
1559 return 1;
1560 if (k1->offset < k2->offset)
1561 return -1;
1562 return 0;
1563}
081e9573 1564
d352ac68
CM
1565/*
1566 * this is used by the defrag code to go through all the
1567 * leaves pointed to by a node and reallocate them so that
1568 * disk order is close to key order
1569 */
6702ed49 1570int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 1571 struct btrfs_root *root, struct extent_buffer *parent,
de78b51a 1572 int start_slot, u64 *last_ret,
a6b6e75e 1573 struct btrfs_key *progress)
6702ed49 1574{
0b246afa 1575 struct btrfs_fs_info *fs_info = root->fs_info;
6b80053d 1576 struct extent_buffer *cur;
6702ed49 1577 u64 blocknr;
e9d0b13b
CM
1578 u64 search_start = *last_ret;
1579 u64 last_block = 0;
6702ed49
CM
1580 u64 other;
1581 u32 parent_nritems;
6702ed49
CM
1582 int end_slot;
1583 int i;
1584 int err = 0;
6b80053d 1585 u32 blocksize;
081e9573
CM
1586 int progress_passed = 0;
1587 struct btrfs_disk_key disk_key;
6702ed49 1588
0b246afa
JM
1589 WARN_ON(trans->transaction != fs_info->running_transaction);
1590 WARN_ON(trans->transid != fs_info->generation);
86479a04 1591
6b80053d 1592 parent_nritems = btrfs_header_nritems(parent);
0b246afa 1593 blocksize = fs_info->nodesize;
5dfe2be7 1594 end_slot = parent_nritems - 1;
6702ed49 1595
5dfe2be7 1596 if (parent_nritems <= 1)
6702ed49
CM
1597 return 0;
1598
5dfe2be7 1599 for (i = start_slot; i <= end_slot; i++) {
6702ed49 1600 int close = 1;
a6b6e75e 1601
081e9573
CM
1602 btrfs_node_key(parent, &disk_key, i);
1603 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1604 continue;
1605
1606 progress_passed = 1;
6b80053d 1607 blocknr = btrfs_node_blockptr(parent, i);
e9d0b13b
CM
1608 if (last_block == 0)
1609 last_block = blocknr;
5708b959 1610
6702ed49 1611 if (i > 0) {
6b80053d
CM
1612 other = btrfs_node_blockptr(parent, i - 1);
1613 close = close_blocks(blocknr, other, blocksize);
6702ed49 1614 }
5dfe2be7 1615 if (!close && i < end_slot) {
6b80053d
CM
1616 other = btrfs_node_blockptr(parent, i + 1);
1617 close = close_blocks(blocknr, other, blocksize);
6702ed49 1618 }
e9d0b13b
CM
1619 if (close) {
1620 last_block = blocknr;
6702ed49 1621 continue;
e9d0b13b 1622 }
6702ed49 1623
206983b7
JB
1624 cur = btrfs_read_node_slot(parent, i);
1625 if (IS_ERR(cur))
1626 return PTR_ERR(cur);
e9d0b13b 1627 if (search_start == 0)
6b80053d 1628 search_start = last_block;
e9d0b13b 1629
e7a84565 1630 btrfs_tree_lock(cur);
6b80053d 1631 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 1632 &cur, search_start,
6b80053d 1633 min(16 * blocksize,
9631e4cc
JB
1634 (end_slot - i) * blocksize),
1635 BTRFS_NESTING_COW);
252c38f0 1636 if (err) {
e7a84565 1637 btrfs_tree_unlock(cur);
6b80053d 1638 free_extent_buffer(cur);
6702ed49 1639 break;
252c38f0 1640 }
e7a84565
CM
1641 search_start = cur->start;
1642 last_block = cur->start;
f2183bde 1643 *last_ret = search_start;
e7a84565
CM
1644 btrfs_tree_unlock(cur);
1645 free_extent_buffer(cur);
6702ed49
CM
1646 }
1647 return err;
1648}
1649
74123bd7 1650/*
5f39d397
CM
1651 * search for key in the extent_buffer. The items start at offset p,
1652 * and they are item_size apart. There are 'max' items in p.
1653 *
74123bd7
CM
1654 * the slot in the array is returned via slot, and it points to
1655 * the place where you would insert key if it is not found in
1656 * the array.
1657 *
1658 * slot may point to max if the key is bigger than all of the keys
1659 */
e02119d5 1660static noinline int generic_bin_search(struct extent_buffer *eb,
310712b2
OS
1661 unsigned long p, int item_size,
1662 const struct btrfs_key *key,
e02119d5 1663 int max, int *slot)
be0e5c09
CM
1664{
1665 int low = 0;
1666 int high = max;
be0e5c09 1667 int ret;
5cd17f34 1668 const int key_size = sizeof(struct btrfs_disk_key);
be0e5c09 1669
5e24e9af
LB
1670 if (low > high) {
1671 btrfs_err(eb->fs_info,
1672 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1673 __func__, low, high, eb->start,
1674 btrfs_header_owner(eb), btrfs_header_level(eb));
1675 return -EINVAL;
1676 }
1677
d397712b 1678 while (low < high) {
5cd17f34
DS
1679 unsigned long oip;
1680 unsigned long offset;
1681 struct btrfs_disk_key *tmp;
1682 struct btrfs_disk_key unaligned;
1683 int mid;
1684
be0e5c09 1685 mid = (low + high) / 2;
5f39d397 1686 offset = p + mid * item_size;
5cd17f34 1687 oip = offset_in_page(offset);
5f39d397 1688
5cd17f34 1689 if (oip + key_size <= PAGE_SIZE) {
884b07d0 1690 const unsigned long idx = get_eb_page_index(offset);
5cd17f34 1691 char *kaddr = page_address(eb->pages[idx]);
5f39d397 1692
884b07d0 1693 oip = get_eb_offset_in_page(eb, offset);
5cd17f34 1694 tmp = (struct btrfs_disk_key *)(kaddr + oip);
5f39d397 1695 } else {
5cd17f34
DS
1696 read_extent_buffer(eb, &unaligned, offset, key_size);
1697 tmp = &unaligned;
5f39d397 1698 }
5cd17f34 1699
be0e5c09
CM
1700 ret = comp_keys(tmp, key);
1701
1702 if (ret < 0)
1703 low = mid + 1;
1704 else if (ret > 0)
1705 high = mid;
1706 else {
1707 *slot = mid;
1708 return 0;
1709 }
1710 }
1711 *slot = low;
1712 return 1;
1713}
1714
97571fd0
CM
1715/*
1716 * simple bin_search frontend that does the right thing for
1717 * leaves vs nodes
1718 */
a74b35ec 1719int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
e3b83361 1720 int *slot)
be0e5c09 1721{
e3b83361 1722 if (btrfs_header_level(eb) == 0)
5f39d397
CM
1723 return generic_bin_search(eb,
1724 offsetof(struct btrfs_leaf, items),
0783fcfc 1725 sizeof(struct btrfs_item),
5f39d397 1726 key, btrfs_header_nritems(eb),
7518a238 1727 slot);
f775738f 1728 else
5f39d397
CM
1729 return generic_bin_search(eb,
1730 offsetof(struct btrfs_node, ptrs),
123abc88 1731 sizeof(struct btrfs_key_ptr),
5f39d397 1732 key, btrfs_header_nritems(eb),
7518a238 1733 slot);
be0e5c09
CM
1734}
1735
f0486c68
YZ
1736static void root_add_used(struct btrfs_root *root, u32 size)
1737{
1738 spin_lock(&root->accounting_lock);
1739 btrfs_set_root_used(&root->root_item,
1740 btrfs_root_used(&root->root_item) + size);
1741 spin_unlock(&root->accounting_lock);
1742}
1743
1744static void root_sub_used(struct btrfs_root *root, u32 size)
1745{
1746 spin_lock(&root->accounting_lock);
1747 btrfs_set_root_used(&root->root_item,
1748 btrfs_root_used(&root->root_item) - size);
1749 spin_unlock(&root->accounting_lock);
1750}
1751
d352ac68
CM
1752/* given a node and slot number, this reads the blocks it points to. The
1753 * extent buffer is returned with a reference taken (but unlocked).
d352ac68 1754 */
4b231ae4
DS
1755struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1756 int slot)
bb803951 1757{
ca7a79ad 1758 int level = btrfs_header_level(parent);
416bc658 1759 struct extent_buffer *eb;
581c1760 1760 struct btrfs_key first_key;
416bc658 1761
fb770ae4
LB
1762 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1763 return ERR_PTR(-ENOENT);
ca7a79ad
CM
1764
1765 BUG_ON(level == 0);
1766
581c1760 1767 btrfs_node_key_to_cpu(parent, &first_key, slot);
d0d20b0f 1768 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
1b7ec85e 1769 btrfs_header_owner(parent),
581c1760
QW
1770 btrfs_node_ptr_generation(parent, slot),
1771 level - 1, &first_key);
fb770ae4
LB
1772 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1773 free_extent_buffer(eb);
1774 eb = ERR_PTR(-EIO);
416bc658
JB
1775 }
1776
1777 return eb;
bb803951
CM
1778}
1779
d352ac68
CM
1780/*
1781 * node level balancing, used to make sure nodes are in proper order for
1782 * item deletion. We balance from the top down, so we have to make sure
1783 * that a deletion won't leave an node completely empty later on.
1784 */
e02119d5 1785static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
1786 struct btrfs_root *root,
1787 struct btrfs_path *path, int level)
bb803951 1788{
0b246afa 1789 struct btrfs_fs_info *fs_info = root->fs_info;
5f39d397
CM
1790 struct extent_buffer *right = NULL;
1791 struct extent_buffer *mid;
1792 struct extent_buffer *left = NULL;
1793 struct extent_buffer *parent = NULL;
bb803951
CM
1794 int ret = 0;
1795 int wret;
1796 int pslot;
bb803951 1797 int orig_slot = path->slots[level];
79f95c82 1798 u64 orig_ptr;
bb803951 1799
98e6b1eb 1800 ASSERT(level > 0);
bb803951 1801
5f39d397 1802 mid = path->nodes[level];
b4ce94de 1803
ac5887c8 1804 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
7bb86316
CM
1805 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1806
1d4f8a0c 1807 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 1808
a05a9bb1 1809 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1810 parent = path->nodes[level + 1];
a05a9bb1
LZ
1811 pslot = path->slots[level + 1];
1812 }
bb803951 1813
40689478
CM
1814 /*
1815 * deal with the case where there is only one pointer in the root
1816 * by promoting the node below to a root
1817 */
5f39d397
CM
1818 if (!parent) {
1819 struct extent_buffer *child;
bb803951 1820
5f39d397 1821 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
1822 return 0;
1823
1824 /* promote the child to a root */
4b231ae4 1825 child = btrfs_read_node_slot(mid, 0);
fb770ae4
LB
1826 if (IS_ERR(child)) {
1827 ret = PTR_ERR(child);
0b246afa 1828 btrfs_handle_fs_error(fs_info, ret, NULL);
305a26af
MF
1829 goto enospc;
1830 }
1831
925baedd 1832 btrfs_tree_lock(child);
9631e4cc
JB
1833 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
1834 BTRFS_NESTING_COW);
f0486c68
YZ
1835 if (ret) {
1836 btrfs_tree_unlock(child);
1837 free_extent_buffer(child);
1838 goto enospc;
1839 }
2f375ab9 1840
d9d19a01
DS
1841 ret = tree_mod_log_insert_root(root->node, child, 1);
1842 BUG_ON(ret < 0);
240f62c8 1843 rcu_assign_pointer(root->node, child);
925baedd 1844
0b86a832 1845 add_root_to_dirty_list(root);
925baedd 1846 btrfs_tree_unlock(child);
b4ce94de 1847
925baedd 1848 path->locks[level] = 0;
bb803951 1849 path->nodes[level] = NULL;
6a884d7d 1850 btrfs_clean_tree_block(mid);
925baedd 1851 btrfs_tree_unlock(mid);
bb803951 1852 /* once for the path */
5f39d397 1853 free_extent_buffer(mid);
f0486c68
YZ
1854
1855 root_sub_used(root, mid->len);
5581a51a 1856 btrfs_free_tree_block(trans, root, mid, 0, 1);
bb803951 1857 /* once for the root ptr */
3083ee2e 1858 free_extent_buffer_stale(mid);
f0486c68 1859 return 0;
bb803951 1860 }
5f39d397 1861 if (btrfs_header_nritems(mid) >
0b246afa 1862 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
bb803951
CM
1863 return 0;
1864
4b231ae4 1865 left = btrfs_read_node_slot(parent, pslot - 1);
fb770ae4
LB
1866 if (IS_ERR(left))
1867 left = NULL;
1868
5f39d397 1869 if (left) {
bf77467a 1870 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
5f39d397 1871 wret = btrfs_cow_block(trans, root, left,
9631e4cc 1872 parent, pslot - 1, &left,
bf59a5a2 1873 BTRFS_NESTING_LEFT_COW);
54aa1f4d
CM
1874 if (wret) {
1875 ret = wret;
1876 goto enospc;
1877 }
2cc58cf2 1878 }
fb770ae4 1879
4b231ae4 1880 right = btrfs_read_node_slot(parent, pslot + 1);
fb770ae4
LB
1881 if (IS_ERR(right))
1882 right = NULL;
1883
5f39d397 1884 if (right) {
bf77467a 1885 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
5f39d397 1886 wret = btrfs_cow_block(trans, root, right,
9631e4cc 1887 parent, pslot + 1, &right,
bf59a5a2 1888 BTRFS_NESTING_RIGHT_COW);
2cc58cf2
CM
1889 if (wret) {
1890 ret = wret;
1891 goto enospc;
1892 }
1893 }
1894
1895 /* first, try to make some room in the middle buffer */
5f39d397
CM
1896 if (left) {
1897 orig_slot += btrfs_header_nritems(left);
d30a668f 1898 wret = push_node_left(trans, left, mid, 1);
79f95c82
CM
1899 if (wret < 0)
1900 ret = wret;
bb803951 1901 }
79f95c82
CM
1902
1903 /*
1904 * then try to empty the right most buffer into the middle
1905 */
5f39d397 1906 if (right) {
d30a668f 1907 wret = push_node_left(trans, mid, right, 1);
54aa1f4d 1908 if (wret < 0 && wret != -ENOSPC)
79f95c82 1909 ret = wret;
5f39d397 1910 if (btrfs_header_nritems(right) == 0) {
6a884d7d 1911 btrfs_clean_tree_block(right);
925baedd 1912 btrfs_tree_unlock(right);
afe5fea7 1913 del_ptr(root, path, level + 1, pslot + 1);
f0486c68 1914 root_sub_used(root, right->len);
5581a51a 1915 btrfs_free_tree_block(trans, root, right, 0, 1);
3083ee2e 1916 free_extent_buffer_stale(right);
f0486c68 1917 right = NULL;
bb803951 1918 } else {
5f39d397
CM
1919 struct btrfs_disk_key right_key;
1920 btrfs_node_key(right, &right_key, 0);
0e82bcfe
DS
1921 ret = tree_mod_log_insert_key(parent, pslot + 1,
1922 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1923 BUG_ON(ret < 0);
5f39d397
CM
1924 btrfs_set_node_key(parent, &right_key, pslot + 1);
1925 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1926 }
1927 }
5f39d397 1928 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1929 /*
1930 * we're not allowed to leave a node with one item in the
1931 * tree during a delete. A deletion from lower in the tree
1932 * could try to delete the only pointer in this node.
1933 * So, pull some keys from the left.
1934 * There has to be a left pointer at this point because
1935 * otherwise we would have pulled some pointers from the
1936 * right
1937 */
305a26af
MF
1938 if (!left) {
1939 ret = -EROFS;
0b246afa 1940 btrfs_handle_fs_error(fs_info, ret, NULL);
305a26af
MF
1941 goto enospc;
1942 }
55d32ed8 1943 wret = balance_node_right(trans, mid, left);
54aa1f4d 1944 if (wret < 0) {
79f95c82 1945 ret = wret;
54aa1f4d
CM
1946 goto enospc;
1947 }
bce4eae9 1948 if (wret == 1) {
d30a668f 1949 wret = push_node_left(trans, left, mid, 1);
bce4eae9
CM
1950 if (wret < 0)
1951 ret = wret;
1952 }
79f95c82
CM
1953 BUG_ON(wret == 1);
1954 }
5f39d397 1955 if (btrfs_header_nritems(mid) == 0) {
6a884d7d 1956 btrfs_clean_tree_block(mid);
925baedd 1957 btrfs_tree_unlock(mid);
afe5fea7 1958 del_ptr(root, path, level + 1, pslot);
f0486c68 1959 root_sub_used(root, mid->len);
5581a51a 1960 btrfs_free_tree_block(trans, root, mid, 0, 1);
3083ee2e 1961 free_extent_buffer_stale(mid);
f0486c68 1962 mid = NULL;
79f95c82
CM
1963 } else {
1964 /* update the parent key to reflect our changes */
5f39d397
CM
1965 struct btrfs_disk_key mid_key;
1966 btrfs_node_key(mid, &mid_key, 0);
0e82bcfe
DS
1967 ret = tree_mod_log_insert_key(parent, pslot,
1968 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1969 BUG_ON(ret < 0);
5f39d397
CM
1970 btrfs_set_node_key(parent, &mid_key, pslot);
1971 btrfs_mark_buffer_dirty(parent);
79f95c82 1972 }
bb803951 1973
79f95c82 1974 /* update the path */
5f39d397
CM
1975 if (left) {
1976 if (btrfs_header_nritems(left) > orig_slot) {
67439dad 1977 atomic_inc(&left->refs);
925baedd 1978 /* left was locked after cow */
5f39d397 1979 path->nodes[level] = left;
bb803951
CM
1980 path->slots[level + 1] -= 1;
1981 path->slots[level] = orig_slot;
925baedd
CM
1982 if (mid) {
1983 btrfs_tree_unlock(mid);
5f39d397 1984 free_extent_buffer(mid);
925baedd 1985 }
bb803951 1986 } else {
5f39d397 1987 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1988 path->slots[level] = orig_slot;
1989 }
1990 }
79f95c82 1991 /* double check we haven't messed things up */
e20d96d6 1992 if (orig_ptr !=
5f39d397 1993 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1994 BUG();
54aa1f4d 1995enospc:
925baedd
CM
1996 if (right) {
1997 btrfs_tree_unlock(right);
5f39d397 1998 free_extent_buffer(right);
925baedd
CM
1999 }
2000 if (left) {
2001 if (path->nodes[level] != left)
2002 btrfs_tree_unlock(left);
5f39d397 2003 free_extent_buffer(left);
925baedd 2004 }
bb803951
CM
2005 return ret;
2006}
2007
d352ac68
CM
2008/* Node balancing for insertion. Here we only split or push nodes around
2009 * when they are completely full. This is also done top down, so we
2010 * have to be pessimistic.
2011 */
d397712b 2012static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
2013 struct btrfs_root *root,
2014 struct btrfs_path *path, int level)
e66f709b 2015{
0b246afa 2016 struct btrfs_fs_info *fs_info = root->fs_info;
5f39d397
CM
2017 struct extent_buffer *right = NULL;
2018 struct extent_buffer *mid;
2019 struct extent_buffer *left = NULL;
2020 struct extent_buffer *parent = NULL;
e66f709b
CM
2021 int ret = 0;
2022 int wret;
2023 int pslot;
2024 int orig_slot = path->slots[level];
e66f709b
CM
2025
2026 if (level == 0)
2027 return 1;
2028
5f39d397 2029 mid = path->nodes[level];
7bb86316 2030 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b 2031
a05a9bb1 2032 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 2033 parent = path->nodes[level + 1];
a05a9bb1
LZ
2034 pslot = path->slots[level + 1];
2035 }
e66f709b 2036
5f39d397 2037 if (!parent)
e66f709b 2038 return 1;
e66f709b 2039
4b231ae4 2040 left = btrfs_read_node_slot(parent, pslot - 1);
fb770ae4
LB
2041 if (IS_ERR(left))
2042 left = NULL;
e66f709b
CM
2043
2044 /* first, try to make some room in the middle buffer */
5f39d397 2045 if (left) {
e66f709b 2046 u32 left_nr;
925baedd 2047
bf77467a 2048 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
b4ce94de 2049
5f39d397 2050 left_nr = btrfs_header_nritems(left);
0b246afa 2051 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
33ade1f8
CM
2052 wret = 1;
2053 } else {
5f39d397 2054 ret = btrfs_cow_block(trans, root, left, parent,
9631e4cc 2055 pslot - 1, &left,
bf59a5a2 2056 BTRFS_NESTING_LEFT_COW);
54aa1f4d
CM
2057 if (ret)
2058 wret = 1;
2059 else {
d30a668f 2060 wret = push_node_left(trans, left, mid, 0);
54aa1f4d 2061 }
33ade1f8 2062 }
e66f709b
CM
2063 if (wret < 0)
2064 ret = wret;
2065 if (wret == 0) {
5f39d397 2066 struct btrfs_disk_key disk_key;
e66f709b 2067 orig_slot += left_nr;
5f39d397 2068 btrfs_node_key(mid, &disk_key, 0);
0e82bcfe
DS
2069 ret = tree_mod_log_insert_key(parent, pslot,
2070 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2071 BUG_ON(ret < 0);
5f39d397
CM
2072 btrfs_set_node_key(parent, &disk_key, pslot);
2073 btrfs_mark_buffer_dirty(parent);
2074 if (btrfs_header_nritems(left) > orig_slot) {
2075 path->nodes[level] = left;
e66f709b
CM
2076 path->slots[level + 1] -= 1;
2077 path->slots[level] = orig_slot;
925baedd 2078 btrfs_tree_unlock(mid);
5f39d397 2079 free_extent_buffer(mid);
e66f709b
CM
2080 } else {
2081 orig_slot -=
5f39d397 2082 btrfs_header_nritems(left);
e66f709b 2083 path->slots[level] = orig_slot;
925baedd 2084 btrfs_tree_unlock(left);
5f39d397 2085 free_extent_buffer(left);
e66f709b 2086 }
e66f709b
CM
2087 return 0;
2088 }
925baedd 2089 btrfs_tree_unlock(left);
5f39d397 2090 free_extent_buffer(left);
e66f709b 2091 }
4b231ae4 2092 right = btrfs_read_node_slot(parent, pslot + 1);
fb770ae4
LB
2093 if (IS_ERR(right))
2094 right = NULL;
e66f709b
CM
2095
2096 /*
2097 * then try to empty the right most buffer into the middle
2098 */
5f39d397 2099 if (right) {
33ade1f8 2100 u32 right_nr;
b4ce94de 2101
bf77467a 2102 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
b4ce94de 2103
5f39d397 2104 right_nr = btrfs_header_nritems(right);
0b246afa 2105 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
33ade1f8
CM
2106 wret = 1;
2107 } else {
5f39d397
CM
2108 ret = btrfs_cow_block(trans, root, right,
2109 parent, pslot + 1,
bf59a5a2 2110 &right, BTRFS_NESTING_RIGHT_COW);
54aa1f4d
CM
2111 if (ret)
2112 wret = 1;
2113 else {
55d32ed8 2114 wret = balance_node_right(trans, right, mid);
54aa1f4d 2115 }
33ade1f8 2116 }
e66f709b
CM
2117 if (wret < 0)
2118 ret = wret;
2119 if (wret == 0) {
5f39d397
CM
2120 struct btrfs_disk_key disk_key;
2121
2122 btrfs_node_key(right, &disk_key, 0);
0e82bcfe
DS
2123 ret = tree_mod_log_insert_key(parent, pslot + 1,
2124 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2125 BUG_ON(ret < 0);
5f39d397
CM
2126 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2127 btrfs_mark_buffer_dirty(parent);
2128
2129 if (btrfs_header_nritems(mid) <= orig_slot) {
2130 path->nodes[level] = right;
e66f709b
CM
2131 path->slots[level + 1] += 1;
2132 path->slots[level] = orig_slot -
5f39d397 2133 btrfs_header_nritems(mid);
925baedd 2134 btrfs_tree_unlock(mid);
5f39d397 2135 free_extent_buffer(mid);
e66f709b 2136 } else {
925baedd 2137 btrfs_tree_unlock(right);
5f39d397 2138 free_extent_buffer(right);
e66f709b 2139 }
e66f709b
CM
2140 return 0;
2141 }
925baedd 2142 btrfs_tree_unlock(right);
5f39d397 2143 free_extent_buffer(right);
e66f709b 2144 }
e66f709b
CM
2145 return 1;
2146}
2147
3c69faec 2148/*
d352ac68
CM
2149 * readahead one full node of leaves, finding things that are close
2150 * to the block in 'slot', and triggering ra on them.
3c69faec 2151 */
2ff7e61e 2152static void reada_for_search(struct btrfs_fs_info *fs_info,
c8c42864
CM
2153 struct btrfs_path *path,
2154 int level, int slot, u64 objectid)
3c69faec 2155{
5f39d397 2156 struct extent_buffer *node;
01f46658 2157 struct btrfs_disk_key disk_key;
3c69faec 2158 u32 nritems;
3c69faec 2159 u64 search;
a7175319 2160 u64 target;
6b80053d 2161 u64 nread = 0;
5f39d397 2162 struct extent_buffer *eb;
6b80053d
CM
2163 u32 nr;
2164 u32 blocksize;
2165 u32 nscan = 0;
db94535d 2166
a6b6e75e 2167 if (level != 1)
6702ed49
CM
2168 return;
2169
2170 if (!path->nodes[level])
3c69faec
CM
2171 return;
2172
5f39d397 2173 node = path->nodes[level];
925baedd 2174
3c69faec 2175 search = btrfs_node_blockptr(node, slot);
0b246afa
JM
2176 blocksize = fs_info->nodesize;
2177 eb = find_extent_buffer(fs_info, search);
5f39d397
CM
2178 if (eb) {
2179 free_extent_buffer(eb);
3c69faec
CM
2180 return;
2181 }
2182
a7175319 2183 target = search;
6b80053d 2184
5f39d397 2185 nritems = btrfs_header_nritems(node);
6b80053d 2186 nr = slot;
25b8b936 2187
d397712b 2188 while (1) {
e4058b54 2189 if (path->reada == READA_BACK) {
6b80053d
CM
2190 if (nr == 0)
2191 break;
2192 nr--;
e4058b54 2193 } else if (path->reada == READA_FORWARD) {
6b80053d
CM
2194 nr++;
2195 if (nr >= nritems)
2196 break;
3c69faec 2197 }
e4058b54 2198 if (path->reada == READA_BACK && objectid) {
01f46658
CM
2199 btrfs_node_key(node, &disk_key, nr);
2200 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2201 break;
2202 }
6b80053d 2203 search = btrfs_node_blockptr(node, nr);
a7175319
CM
2204 if ((search <= target && target - search <= 65536) ||
2205 (search > target && search - target <= 65536)) {
bfb484d9 2206 btrfs_readahead_node_child(node, nr);
6b80053d
CM
2207 nread += blocksize;
2208 }
2209 nscan++;
a7175319 2210 if ((nread > 65536 || nscan > 32))
6b80053d 2211 break;
3c69faec
CM
2212 }
2213}
925baedd 2214
bfb484d9 2215static noinline void reada_for_balance(struct btrfs_path *path, int level)
b4ce94de 2216{
bfb484d9 2217 struct extent_buffer *parent;
b4ce94de
CM
2218 int slot;
2219 int nritems;
b4ce94de 2220
8c594ea8 2221 parent = path->nodes[level + 1];
b4ce94de 2222 if (!parent)
0b08851f 2223 return;
b4ce94de
CM
2224
2225 nritems = btrfs_header_nritems(parent);
8c594ea8 2226 slot = path->slots[level + 1];
b4ce94de 2227
bfb484d9
JB
2228 if (slot > 0)
2229 btrfs_readahead_node_child(parent, slot - 1);
2230 if (slot + 1 < nritems)
2231 btrfs_readahead_node_child(parent, slot + 1);
b4ce94de
CM
2232}
2233
2234
d352ac68 2235/*
d397712b
CM
2236 * when we walk down the tree, it is usually safe to unlock the higher layers
2237 * in the tree. The exceptions are when our path goes through slot 0, because
2238 * operations on the tree might require changing key pointers higher up in the
2239 * tree.
d352ac68 2240 *
d397712b
CM
2241 * callers might also have set path->keep_locks, which tells this code to keep
2242 * the lock if the path points to the last slot in the block. This is part of
2243 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 2244 *
d397712b
CM
2245 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2246 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 2247 */
e02119d5 2248static noinline void unlock_up(struct btrfs_path *path, int level,
f7c79f30
CM
2249 int lowest_unlock, int min_write_lock_level,
2250 int *write_lock_level)
925baedd
CM
2251{
2252 int i;
2253 int skip_level = level;
051e1b9f 2254 int no_skips = 0;
925baedd
CM
2255 struct extent_buffer *t;
2256
2257 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2258 if (!path->nodes[i])
2259 break;
2260 if (!path->locks[i])
2261 break;
051e1b9f 2262 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
2263 skip_level = i + 1;
2264 continue;
2265 }
051e1b9f 2266 if (!no_skips && path->keep_locks) {
925baedd
CM
2267 u32 nritems;
2268 t = path->nodes[i];
2269 nritems = btrfs_header_nritems(t);
051e1b9f 2270 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
2271 skip_level = i + 1;
2272 continue;
2273 }
2274 }
051e1b9f
CM
2275 if (skip_level < i && i >= lowest_unlock)
2276 no_skips = 1;
2277
925baedd 2278 t = path->nodes[i];
d80bb3f9 2279 if (i >= lowest_unlock && i > skip_level) {
bd681513 2280 btrfs_tree_unlock_rw(t, path->locks[i]);
925baedd 2281 path->locks[i] = 0;
f7c79f30
CM
2282 if (write_lock_level &&
2283 i > min_write_lock_level &&
2284 i <= *write_lock_level) {
2285 *write_lock_level = i - 1;
2286 }
925baedd
CM
2287 }
2288 }
2289}
2290
c8c42864
CM
2291/*
2292 * helper function for btrfs_search_slot. The goal is to find a block
2293 * in cache without setting the path to blocking. If we find the block
2294 * we return zero and the path is unchanged.
2295 *
2296 * If we can't find the block, we set the path blocking and do some
2297 * reada. -EAGAIN is returned and the search must be repeated.
2298 */
2299static int
d07b8528
LB
2300read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2301 struct extent_buffer **eb_ret, int level, int slot,
cda79c54 2302 const struct btrfs_key *key)
c8c42864 2303{
0b246afa 2304 struct btrfs_fs_info *fs_info = root->fs_info;
c8c42864
CM
2305 u64 blocknr;
2306 u64 gen;
c8c42864 2307 struct extent_buffer *tmp;
581c1760 2308 struct btrfs_key first_key;
76a05b35 2309 int ret;
581c1760 2310 int parent_level;
c8c42864 2311
213ff4b7
NB
2312 blocknr = btrfs_node_blockptr(*eb_ret, slot);
2313 gen = btrfs_node_ptr_generation(*eb_ret, slot);
2314 parent_level = btrfs_header_level(*eb_ret);
2315 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
c8c42864 2316
0b246afa 2317 tmp = find_extent_buffer(fs_info, blocknr);
cb44921a 2318 if (tmp) {
b9fab919 2319 /* first we do an atomic uptodate check */
bdf7c00e 2320 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
448de471
QW
2321 /*
2322 * Do extra check for first_key, eb can be stale due to
2323 * being cached, read from scrub, or have multiple
2324 * parents (shared tree blocks).
2325 */
e064d5e9 2326 if (btrfs_verify_level_key(tmp,
448de471
QW
2327 parent_level - 1, &first_key, gen)) {
2328 free_extent_buffer(tmp);
2329 return -EUCLEAN;
2330 }
bdf7c00e
JB
2331 *eb_ret = tmp;
2332 return 0;
2333 }
2334
bdf7c00e 2335 /* now we're allowed to do a blocking uptodate check */
581c1760 2336 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
bdf7c00e
JB
2337 if (!ret) {
2338 *eb_ret = tmp;
2339 return 0;
cb44921a 2340 }
bdf7c00e
JB
2341 free_extent_buffer(tmp);
2342 btrfs_release_path(p);
2343 return -EIO;
c8c42864
CM
2344 }
2345
2346 /*
2347 * reduce lock contention at high levels
2348 * of the btree by dropping locks before
76a05b35
CM
2349 * we read. Don't release the lock on the current
2350 * level because we need to walk this node to figure
2351 * out which blocks to read.
c8c42864 2352 */
8c594ea8 2353 btrfs_unlock_up_safe(p, level + 1);
8c594ea8 2354
e4058b54 2355 if (p->reada != READA_NONE)
2ff7e61e 2356 reada_for_search(fs_info, p, level, slot, key->objectid);
c8c42864 2357
76a05b35 2358 ret = -EAGAIN;
1b7ec85e
JB
2359 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
2360 gen, parent_level - 1, &first_key);
64c043de 2361 if (!IS_ERR(tmp)) {
76a05b35
CM
2362 /*
2363 * If the read above didn't mark this buffer up to date,
2364 * it will never end up being up to date. Set ret to EIO now
2365 * and give up so that our caller doesn't loop forever
2366 * on our EAGAINs.
2367 */
e6a1d6fd 2368 if (!extent_buffer_uptodate(tmp))
76a05b35 2369 ret = -EIO;
c8c42864 2370 free_extent_buffer(tmp);
c871b0f2
LB
2371 } else {
2372 ret = PTR_ERR(tmp);
76a05b35 2373 }
02a3307a
LB
2374
2375 btrfs_release_path(p);
76a05b35 2376 return ret;
c8c42864
CM
2377}
2378
2379/*
2380 * helper function for btrfs_search_slot. This does all of the checks
2381 * for node-level blocks and does any balancing required based on
2382 * the ins_len.
2383 *
2384 * If no extra work was required, zero is returned. If we had to
2385 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2386 * start over
2387 */
2388static int
2389setup_nodes_for_search(struct btrfs_trans_handle *trans,
2390 struct btrfs_root *root, struct btrfs_path *p,
bd681513
CM
2391 struct extent_buffer *b, int level, int ins_len,
2392 int *write_lock_level)
c8c42864 2393{
0b246afa 2394 struct btrfs_fs_info *fs_info = root->fs_info;
95b982de 2395 int ret = 0;
0b246afa 2396
c8c42864 2397 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
0b246afa 2398 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
c8c42864 2399
bd681513
CM
2400 if (*write_lock_level < level + 1) {
2401 *write_lock_level = level + 1;
2402 btrfs_release_path(p);
95b982de 2403 return -EAGAIN;
bd681513
CM
2404 }
2405
bfb484d9 2406 reada_for_balance(p, level);
95b982de 2407 ret = split_node(trans, root, p, level);
c8c42864 2408
c8c42864
CM
2409 b = p->nodes[level];
2410 } else if (ins_len < 0 && btrfs_header_nritems(b) <
0b246afa 2411 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
c8c42864 2412
bd681513
CM
2413 if (*write_lock_level < level + 1) {
2414 *write_lock_level = level + 1;
2415 btrfs_release_path(p);
95b982de 2416 return -EAGAIN;
bd681513
CM
2417 }
2418
bfb484d9 2419 reada_for_balance(p, level);
95b982de
NB
2420 ret = balance_level(trans, root, p, level);
2421 if (ret)
2422 return ret;
c8c42864 2423
c8c42864
CM
2424 b = p->nodes[level];
2425 if (!b) {
b3b4aa74 2426 btrfs_release_path(p);
95b982de 2427 return -EAGAIN;
c8c42864
CM
2428 }
2429 BUG_ON(btrfs_header_nritems(b) == 1);
2430 }
c8c42864
CM
2431 return ret;
2432}
2433
381cf658 2434int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
e33d5c3d
KN
2435 u64 iobjectid, u64 ioff, u8 key_type,
2436 struct btrfs_key *found_key)
2437{
2438 int ret;
2439 struct btrfs_key key;
2440 struct extent_buffer *eb;
381cf658
DS
2441
2442 ASSERT(path);
1d4c08e0 2443 ASSERT(found_key);
e33d5c3d
KN
2444
2445 key.type = key_type;
2446 key.objectid = iobjectid;
2447 key.offset = ioff;
2448
2449 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1d4c08e0 2450 if (ret < 0)
e33d5c3d
KN
2451 return ret;
2452
2453 eb = path->nodes[0];
2454 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2455 ret = btrfs_next_leaf(fs_root, path);
2456 if (ret)
2457 return ret;
2458 eb = path->nodes[0];
2459 }
2460
2461 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2462 if (found_key->type != key.type ||
2463 found_key->objectid != key.objectid)
2464 return 1;
2465
2466 return 0;
2467}
2468
1fc28d8e
LB
2469static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2470 struct btrfs_path *p,
2471 int write_lock_level)
2472{
2473 struct btrfs_fs_info *fs_info = root->fs_info;
2474 struct extent_buffer *b;
2475 int root_lock;
2476 int level = 0;
2477
2478 /* We try very hard to do read locks on the root */
2479 root_lock = BTRFS_READ_LOCK;
2480
2481 if (p->search_commit_root) {
be6821f8
FM
2482 /*
2483 * The commit roots are read only so we always do read locks,
2484 * and we always must hold the commit_root_sem when doing
2485 * searches on them, the only exception is send where we don't
2486 * want to block transaction commits for a long time, so
2487 * we need to clone the commit root in order to avoid races
2488 * with transaction commits that create a snapshot of one of
2489 * the roots used by a send operation.
2490 */
2491 if (p->need_commit_sem) {
1fc28d8e 2492 down_read(&fs_info->commit_root_sem);
be6821f8 2493 b = btrfs_clone_extent_buffer(root->commit_root);
1fc28d8e 2494 up_read(&fs_info->commit_root_sem);
be6821f8
FM
2495 if (!b)
2496 return ERR_PTR(-ENOMEM);
2497
2498 } else {
2499 b = root->commit_root;
67439dad 2500 atomic_inc(&b->refs);
be6821f8
FM
2501 }
2502 level = btrfs_header_level(b);
f9ddfd05
LB
2503 /*
2504 * Ensure that all callers have set skip_locking when
2505 * p->search_commit_root = 1.
2506 */
2507 ASSERT(p->skip_locking == 1);
1fc28d8e
LB
2508
2509 goto out;
2510 }
2511
2512 if (p->skip_locking) {
2513 b = btrfs_root_node(root);
2514 level = btrfs_header_level(b);
2515 goto out;
2516 }
2517
2518 /*
662c653b
LB
2519 * If the level is set to maximum, we can skip trying to get the read
2520 * lock.
1fc28d8e 2521 */
662c653b
LB
2522 if (write_lock_level < BTRFS_MAX_LEVEL) {
2523 /*
2524 * We don't know the level of the root node until we actually
2525 * have it read locked
2526 */
1bb96598 2527 b = btrfs_read_lock_root_node(root);
662c653b
LB
2528 level = btrfs_header_level(b);
2529 if (level > write_lock_level)
2530 goto out;
2531
2532 /* Whoops, must trade for write lock */
2533 btrfs_tree_read_unlock(b);
2534 free_extent_buffer(b);
2535 }
1fc28d8e 2536
1fc28d8e
LB
2537 b = btrfs_lock_root_node(root);
2538 root_lock = BTRFS_WRITE_LOCK;
2539
2540 /* The level might have changed, check again */
2541 level = btrfs_header_level(b);
2542
2543out:
2544 p->nodes[level] = b;
2545 if (!p->skip_locking)
2546 p->locks[level] = root_lock;
2547 /*
2548 * Callers are responsible for dropping b's references.
2549 */
2550 return b;
2551}
2552
2553
74123bd7 2554/*
4271ecea
NB
2555 * btrfs_search_slot - look for a key in a tree and perform necessary
2556 * modifications to preserve tree invariants.
74123bd7 2557 *
4271ecea
NB
2558 * @trans: Handle of transaction, used when modifying the tree
2559 * @p: Holds all btree nodes along the search path
2560 * @root: The root node of the tree
2561 * @key: The key we are looking for
9a664971 2562 * @ins_len: Indicates purpose of search:
2563 * >0 for inserts it's size of item inserted (*)
2564 * <0 for deletions
2565 * 0 for plain searches, not modifying the tree
2566 *
2567 * (*) If size of item inserted doesn't include
2568 * sizeof(struct btrfs_item), then p->search_for_extension must
2569 * be set.
4271ecea
NB
2570 * @cow: boolean should CoW operations be performed. Must always be 1
2571 * when modifying the tree.
97571fd0 2572 *
4271ecea
NB
2573 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2574 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2575 *
2576 * If @key is found, 0 is returned and you can find the item in the leaf level
2577 * of the path (level 0)
2578 *
2579 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2580 * points to the slot where it should be inserted
2581 *
2582 * If an error is encountered while searching the tree a negative error number
2583 * is returned
74123bd7 2584 */
310712b2
OS
2585int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2586 const struct btrfs_key *key, struct btrfs_path *p,
2587 int ins_len, int cow)
be0e5c09 2588{
5f39d397 2589 struct extent_buffer *b;
be0e5c09
CM
2590 int slot;
2591 int ret;
33c66f43 2592 int err;
be0e5c09 2593 int level;
925baedd 2594 int lowest_unlock = 1;
bd681513
CM
2595 /* everything at write_lock_level or lower must be write locked */
2596 int write_lock_level = 0;
9f3a7427 2597 u8 lowest_level = 0;
f7c79f30 2598 int min_write_lock_level;
d7396f07 2599 int prev_cmp;
9f3a7427 2600
6702ed49 2601 lowest_level = p->lowest_level;
323ac95b 2602 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 2603 WARN_ON(p->nodes[0] != NULL);
eb653de1 2604 BUG_ON(!cow && ins_len);
25179201 2605
bd681513 2606 if (ins_len < 0) {
925baedd 2607 lowest_unlock = 2;
65b51a00 2608
bd681513
CM
2609 /* when we are removing items, we might have to go up to level
2610 * two as we update tree pointers Make sure we keep write
2611 * for those levels as well
2612 */
2613 write_lock_level = 2;
2614 } else if (ins_len > 0) {
2615 /*
2616 * for inserting items, make sure we have a write lock on
2617 * level 1 so we can update keys
2618 */
2619 write_lock_level = 1;
2620 }
2621
2622 if (!cow)
2623 write_lock_level = -1;
2624
09a2a8f9 2625 if (cow && (p->keep_locks || p->lowest_level))
bd681513
CM
2626 write_lock_level = BTRFS_MAX_LEVEL;
2627
f7c79f30
CM
2628 min_write_lock_level = write_lock_level;
2629
bb803951 2630again:
d7396f07 2631 prev_cmp = -1;
1fc28d8e 2632 b = btrfs_search_slot_get_root(root, p, write_lock_level);
be6821f8
FM
2633 if (IS_ERR(b)) {
2634 ret = PTR_ERR(b);
2635 goto done;
2636 }
925baedd 2637
eb60ceac 2638 while (b) {
f624d976
QW
2639 int dec = 0;
2640
5f39d397 2641 level = btrfs_header_level(b);
65b51a00 2642
02217ed2 2643 if (cow) {
9ea2c7c9
NB
2644 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2645
c8c42864
CM
2646 /*
2647 * if we don't really need to cow this block
2648 * then we don't want to set the path blocking,
2649 * so we test it here
2650 */
64c12921
JM
2651 if (!should_cow_block(trans, root, b)) {
2652 trans->dirty = true;
65b51a00 2653 goto cow_done;
64c12921 2654 }
5d4f98a2 2655
bd681513
CM
2656 /*
2657 * must have write locks on this node and the
2658 * parent
2659 */
5124e00e
JB
2660 if (level > write_lock_level ||
2661 (level + 1 > write_lock_level &&
2662 level + 1 < BTRFS_MAX_LEVEL &&
2663 p->nodes[level + 1])) {
bd681513
CM
2664 write_lock_level = level + 1;
2665 btrfs_release_path(p);
2666 goto again;
2667 }
2668
9ea2c7c9
NB
2669 if (last_level)
2670 err = btrfs_cow_block(trans, root, b, NULL, 0,
9631e4cc
JB
2671 &b,
2672 BTRFS_NESTING_COW);
9ea2c7c9
NB
2673 else
2674 err = btrfs_cow_block(trans, root, b,
2675 p->nodes[level + 1],
9631e4cc
JB
2676 p->slots[level + 1], &b,
2677 BTRFS_NESTING_COW);
33c66f43 2678 if (err) {
33c66f43 2679 ret = err;
65b51a00 2680 goto done;
54aa1f4d 2681 }
02217ed2 2682 }
65b51a00 2683cow_done:
eb60ceac 2684 p->nodes[level] = b;
52398340
LB
2685 /*
2686 * Leave path with blocking locks to avoid massive
2687 * lock context switch, this is made on purpose.
2688 */
b4ce94de
CM
2689
2690 /*
2691 * we have a lock on b and as long as we aren't changing
2692 * the tree, there is no way to for the items in b to change.
2693 * It is safe to drop the lock on our parent before we
2694 * go through the expensive btree search on b.
2695 *
eb653de1
FDBM
2696 * If we're inserting or deleting (ins_len != 0), then we might
2697 * be changing slot zero, which may require changing the parent.
2698 * So, we can't drop the lock until after we know which slot
2699 * we're operating on.
b4ce94de 2700 */
eb653de1
FDBM
2701 if (!ins_len && !p->keep_locks) {
2702 int u = level + 1;
2703
2704 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2705 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2706 p->locks[u] = 0;
2707 }
2708 }
b4ce94de 2709
995e9a16
NB
2710 /*
2711 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
2712 * we can safely assume the target key will always be in slot 0
2713 * on lower levels due to the invariants BTRFS' btree provides,
2714 * namely that a btrfs_key_ptr entry always points to the
2715 * lowest key in the child node, thus we can skip searching
2716 * lower levels
2717 */
2718 if (prev_cmp == 0) {
2719 slot = 0;
2720 ret = 0;
2721 } else {
2722 ret = btrfs_bin_search(b, key, &slot);
2723 prev_cmp = ret;
2724 if (ret < 0)
2725 goto done;
2726 }
b4ce94de 2727
f624d976 2728 if (level == 0) {
be0e5c09 2729 p->slots[level] = slot;
9a664971 2730 /*
2731 * Item key already exists. In this case, if we are
2732 * allowed to insert the item (for example, in dir_item
2733 * case, item key collision is allowed), it will be
2734 * merged with the original item. Only the item size
2735 * grows, no new btrfs item will be added. If
2736 * search_for_extension is not set, ins_len already
2737 * accounts the size btrfs_item, deduct it here so leaf
2738 * space check will be correct.
2739 */
2740 if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
2741 ASSERT(ins_len >= sizeof(struct btrfs_item));
2742 ins_len -= sizeof(struct btrfs_item);
2743 }
87b29b20 2744 if (ins_len > 0 &&
e902baac 2745 btrfs_leaf_free_space(b) < ins_len) {
bd681513
CM
2746 if (write_lock_level < 1) {
2747 write_lock_level = 1;
2748 btrfs_release_path(p);
2749 goto again;
2750 }
2751
33c66f43
YZ
2752 err = split_leaf(trans, root, key,
2753 p, ins_len, ret == 0);
b4ce94de 2754
33c66f43
YZ
2755 BUG_ON(err > 0);
2756 if (err) {
2757 ret = err;
65b51a00
CM
2758 goto done;
2759 }
5c680ed6 2760 }
459931ec 2761 if (!p->search_for_split)
f7c79f30 2762 unlock_up(p, level, lowest_unlock,
4b6f8e96 2763 min_write_lock_level, NULL);
65b51a00 2764 goto done;
be0e5c09 2765 }
f624d976
QW
2766 if (ret && slot > 0) {
2767 dec = 1;
2768 slot--;
2769 }
2770 p->slots[level] = slot;
2771 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2772 &write_lock_level);
2773 if (err == -EAGAIN)
2774 goto again;
2775 if (err) {
2776 ret = err;
2777 goto done;
2778 }
2779 b = p->nodes[level];
2780 slot = p->slots[level];
2781
2782 /*
2783 * Slot 0 is special, if we change the key we have to update
2784 * the parent pointer which means we must have a write lock on
2785 * the parent
2786 */
2787 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2788 write_lock_level = level + 1;
2789 btrfs_release_path(p);
2790 goto again;
2791 }
2792
2793 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2794 &write_lock_level);
2795
2796 if (level == lowest_level) {
2797 if (dec)
2798 p->slots[level]++;
2799 goto done;
2800 }
2801
2802 err = read_block_for_search(root, p, &b, level, slot, key);
2803 if (err == -EAGAIN)
2804 goto again;
2805 if (err) {
2806 ret = err;
2807 goto done;
2808 }
2809
2810 if (!p->skip_locking) {
2811 level = btrfs_header_level(b);
2812 if (level <= write_lock_level) {
ac5887c8 2813 btrfs_tree_lock(b);
f624d976
QW
2814 p->locks[level] = BTRFS_WRITE_LOCK;
2815 } else {
fe596ca3 2816 btrfs_tree_read_lock(b);
f624d976
QW
2817 p->locks[level] = BTRFS_READ_LOCK;
2818 }
2819 p->nodes[level] = b;
2820 }
be0e5c09 2821 }
65b51a00
CM
2822 ret = 1;
2823done:
5f5bc6b1 2824 if (ret < 0 && !p->skip_release_on_error)
b3b4aa74 2825 btrfs_release_path(p);
65b51a00 2826 return ret;
be0e5c09 2827}
f75e2b79 2828ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
be0e5c09 2829
5d9e75c4
JS
2830/*
2831 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2832 * current state of the tree together with the operations recorded in the tree
2833 * modification log to search for the key in a previous version of this tree, as
2834 * denoted by the time_seq parameter.
2835 *
2836 * Naturally, there is no support for insert, delete or cow operations.
2837 *
2838 * The resulting path and return value will be set up as if we called
2839 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2840 */
310712b2 2841int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
5d9e75c4
JS
2842 struct btrfs_path *p, u64 time_seq)
2843{
0b246afa 2844 struct btrfs_fs_info *fs_info = root->fs_info;
5d9e75c4
JS
2845 struct extent_buffer *b;
2846 int slot;
2847 int ret;
2848 int err;
2849 int level;
2850 int lowest_unlock = 1;
2851 u8 lowest_level = 0;
2852
2853 lowest_level = p->lowest_level;
2854 WARN_ON(p->nodes[0] != NULL);
2855
2856 if (p->search_commit_root) {
2857 BUG_ON(time_seq);
2858 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2859 }
2860
2861again:
5d9e75c4 2862 b = get_old_root(root, time_seq);
315bed43
NB
2863 if (!b) {
2864 ret = -EIO;
2865 goto done;
2866 }
5d9e75c4 2867 level = btrfs_header_level(b);
5d9e75c4
JS
2868 p->locks[level] = BTRFS_READ_LOCK;
2869
2870 while (b) {
abe9339d
QW
2871 int dec = 0;
2872
5d9e75c4
JS
2873 level = btrfs_header_level(b);
2874 p->nodes[level] = b;
5d9e75c4
JS
2875
2876 /*
2877 * we have a lock on b and as long as we aren't changing
2878 * the tree, there is no way to for the items in b to change.
2879 * It is safe to drop the lock on our parent before we
2880 * go through the expensive btree search on b.
2881 */
2882 btrfs_unlock_up_safe(p, level + 1);
2883
995e9a16 2884 ret = btrfs_bin_search(b, key, &slot);
cbca7d59
FM
2885 if (ret < 0)
2886 goto done;
5d9e75c4 2887
abe9339d 2888 if (level == 0) {
5d9e75c4
JS
2889 p->slots[level] = slot;
2890 unlock_up(p, level, lowest_unlock, 0, NULL);
abe9339d
QW
2891 goto done;
2892 }
5d9e75c4 2893
abe9339d
QW
2894 if (ret && slot > 0) {
2895 dec = 1;
2896 slot--;
2897 }
2898 p->slots[level] = slot;
2899 unlock_up(p, level, lowest_unlock, 0, NULL);
5d9e75c4 2900
abe9339d
QW
2901 if (level == lowest_level) {
2902 if (dec)
2903 p->slots[level]++;
2904 goto done;
2905 }
5d9e75c4 2906
abe9339d
QW
2907 err = read_block_for_search(root, p, &b, level, slot, key);
2908 if (err == -EAGAIN)
2909 goto again;
2910 if (err) {
2911 ret = err;
5d9e75c4
JS
2912 goto done;
2913 }
abe9339d
QW
2914
2915 level = btrfs_header_level(b);
ac5887c8 2916 btrfs_tree_read_lock(b);
abe9339d
QW
2917 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
2918 if (!b) {
2919 ret = -ENOMEM;
2920 goto done;
2921 }
2922 p->locks[level] = BTRFS_READ_LOCK;
2923 p->nodes[level] = b;
5d9e75c4
JS
2924 }
2925 ret = 1;
2926done:
5d9e75c4
JS
2927 if (ret < 0)
2928 btrfs_release_path(p);
2929
2930 return ret;
2931}
2932
2f38b3e1
AJ
2933/*
2934 * helper to use instead of search slot if no exact match is needed but
2935 * instead the next or previous item should be returned.
2936 * When find_higher is true, the next higher item is returned, the next lower
2937 * otherwise.
2938 * When return_any and find_higher are both true, and no higher item is found,
2939 * return the next lower instead.
2940 * When return_any is true and find_higher is false, and no lower item is found,
2941 * return the next higher instead.
2942 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2943 * < 0 on error
2944 */
2945int btrfs_search_slot_for_read(struct btrfs_root *root,
310712b2
OS
2946 const struct btrfs_key *key,
2947 struct btrfs_path *p, int find_higher,
2948 int return_any)
2f38b3e1
AJ
2949{
2950 int ret;
2951 struct extent_buffer *leaf;
2952
2953again:
2954 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2955 if (ret <= 0)
2956 return ret;
2957 /*
2958 * a return value of 1 means the path is at the position where the
2959 * item should be inserted. Normally this is the next bigger item,
2960 * but in case the previous item is the last in a leaf, path points
2961 * to the first free slot in the previous leaf, i.e. at an invalid
2962 * item.
2963 */
2964 leaf = p->nodes[0];
2965
2966 if (find_higher) {
2967 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2968 ret = btrfs_next_leaf(root, p);
2969 if (ret <= 0)
2970 return ret;
2971 if (!return_any)
2972 return 1;
2973 /*
2974 * no higher item found, return the next
2975 * lower instead
2976 */
2977 return_any = 0;
2978 find_higher = 0;
2979 btrfs_release_path(p);
2980 goto again;
2981 }
2982 } else {
e6793769
AJ
2983 if (p->slots[0] == 0) {
2984 ret = btrfs_prev_leaf(root, p);
2985 if (ret < 0)
2986 return ret;
2987 if (!ret) {
23c6bf6a
FDBM
2988 leaf = p->nodes[0];
2989 if (p->slots[0] == btrfs_header_nritems(leaf))
2990 p->slots[0]--;
e6793769 2991 return 0;
2f38b3e1 2992 }
e6793769
AJ
2993 if (!return_any)
2994 return 1;
2995 /*
2996 * no lower item found, return the next
2997 * higher instead
2998 */
2999 return_any = 0;
3000 find_higher = 1;
3001 btrfs_release_path(p);
3002 goto again;
3003 } else {
2f38b3e1
AJ
3004 --p->slots[0];
3005 }
3006 }
3007 return 0;
3008}
3009
74123bd7
CM
3010/*
3011 * adjust the pointers going up the tree, starting at level
3012 * making sure the right key of each node is points to 'key'.
3013 * This is used after shifting pointers to the left, so it stops
3014 * fixing up pointers when a given leaf/node is not in slot 0 of the
3015 * higher levels
aa5d6bed 3016 *
74123bd7 3017 */
b167fa91 3018static void fixup_low_keys(struct btrfs_path *path,
143bede5 3019 struct btrfs_disk_key *key, int level)
be0e5c09
CM
3020{
3021 int i;
5f39d397 3022 struct extent_buffer *t;
0e82bcfe 3023 int ret;
5f39d397 3024
234b63a0 3025 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 3026 int tslot = path->slots[i];
0e82bcfe 3027
eb60ceac 3028 if (!path->nodes[i])
be0e5c09 3029 break;
5f39d397 3030 t = path->nodes[i];
0e82bcfe
DS
3031 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3032 GFP_ATOMIC);
3033 BUG_ON(ret < 0);
5f39d397 3034 btrfs_set_node_key(t, key, tslot);
d6025579 3035 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
3036 if (tslot != 0)
3037 break;
3038 }
3039}
3040
31840ae1
ZY
3041/*
3042 * update item key.
3043 *
3044 * This function isn't completely safe. It's the caller's responsibility
3045 * that the new key won't break the order
3046 */
b7a0365e
DD
3047void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3048 struct btrfs_path *path,
310712b2 3049 const struct btrfs_key *new_key)
31840ae1
ZY
3050{
3051 struct btrfs_disk_key disk_key;
3052 struct extent_buffer *eb;
3053 int slot;
3054
3055 eb = path->nodes[0];
3056 slot = path->slots[0];
3057 if (slot > 0) {
3058 btrfs_item_key(eb, &disk_key, slot - 1);
7c15d410
QW
3059 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
3060 btrfs_crit(fs_info,
3061 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3062 slot, btrfs_disk_key_objectid(&disk_key),
3063 btrfs_disk_key_type(&disk_key),
3064 btrfs_disk_key_offset(&disk_key),
3065 new_key->objectid, new_key->type,
3066 new_key->offset);
3067 btrfs_print_leaf(eb);
3068 BUG();
3069 }
31840ae1
ZY
3070 }
3071 if (slot < btrfs_header_nritems(eb) - 1) {
3072 btrfs_item_key(eb, &disk_key, slot + 1);
7c15d410
QW
3073 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
3074 btrfs_crit(fs_info,
3075 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3076 slot, btrfs_disk_key_objectid(&disk_key),
3077 btrfs_disk_key_type(&disk_key),
3078 btrfs_disk_key_offset(&disk_key),
3079 new_key->objectid, new_key->type,
3080 new_key->offset);
3081 btrfs_print_leaf(eb);
3082 BUG();
3083 }
31840ae1
ZY
3084 }
3085
3086 btrfs_cpu_key_to_disk(&disk_key, new_key);
3087 btrfs_set_item_key(eb, &disk_key, slot);
3088 btrfs_mark_buffer_dirty(eb);
3089 if (slot == 0)
b167fa91 3090 fixup_low_keys(path, &disk_key, 1);
31840ae1
ZY
3091}
3092
d16c702f
QW
3093/*
3094 * Check key order of two sibling extent buffers.
3095 *
3096 * Return true if something is wrong.
3097 * Return false if everything is fine.
3098 *
3099 * Tree-checker only works inside one tree block, thus the following
3100 * corruption can not be detected by tree-checker:
3101 *
3102 * Leaf @left | Leaf @right
3103 * --------------------------------------------------------------
3104 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
3105 *
3106 * Key f6 in leaf @left itself is valid, but not valid when the next
3107 * key in leaf @right is 7.
3108 * This can only be checked at tree block merge time.
3109 * And since tree checker has ensured all key order in each tree block
3110 * is correct, we only need to bother the last key of @left and the first
3111 * key of @right.
3112 */
3113static bool check_sibling_keys(struct extent_buffer *left,
3114 struct extent_buffer *right)
3115{
3116 struct btrfs_key left_last;
3117 struct btrfs_key right_first;
3118 int level = btrfs_header_level(left);
3119 int nr_left = btrfs_header_nritems(left);
3120 int nr_right = btrfs_header_nritems(right);
3121
3122 /* No key to check in one of the tree blocks */
3123 if (!nr_left || !nr_right)
3124 return false;
3125
3126 if (level) {
3127 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
3128 btrfs_node_key_to_cpu(right, &right_first, 0);
3129 } else {
3130 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
3131 btrfs_item_key_to_cpu(right, &right_first, 0);
3132 }
3133
3134 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
3135 btrfs_crit(left->fs_info,
3136"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
3137 left_last.objectid, left_last.type,
3138 left_last.offset, right_first.objectid,
3139 right_first.type, right_first.offset);
3140 return true;
3141 }
3142 return false;
3143}
3144
74123bd7
CM
3145/*
3146 * try to push data from one node into the next node left in the
79f95c82 3147 * tree.
aa5d6bed
CM
3148 *
3149 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3150 * error, and > 0 if there was no room in the left hand block.
74123bd7 3151 */
98ed5174 3152static int push_node_left(struct btrfs_trans_handle *trans,
2ff7e61e 3153 struct extent_buffer *dst,
971a1f66 3154 struct extent_buffer *src, int empty)
be0e5c09 3155{
d30a668f 3156 struct btrfs_fs_info *fs_info = trans->fs_info;
be0e5c09 3157 int push_items = 0;
bb803951
CM
3158 int src_nritems;
3159 int dst_nritems;
aa5d6bed 3160 int ret = 0;
be0e5c09 3161
5f39d397
CM
3162 src_nritems = btrfs_header_nritems(src);
3163 dst_nritems = btrfs_header_nritems(dst);
0b246afa 3164 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
7bb86316
CM
3165 WARN_ON(btrfs_header_generation(src) != trans->transid);
3166 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 3167
bce4eae9 3168 if (!empty && src_nritems <= 8)
971a1f66
CM
3169 return 1;
3170
d397712b 3171 if (push_items <= 0)
be0e5c09
CM
3172 return 1;
3173
bce4eae9 3174 if (empty) {
971a1f66 3175 push_items = min(src_nritems, push_items);
bce4eae9
CM
3176 if (push_items < src_nritems) {
3177 /* leave at least 8 pointers in the node if
3178 * we aren't going to empty it
3179 */
3180 if (src_nritems - push_items < 8) {
3181 if (push_items <= 8)
3182 return 1;
3183 push_items -= 8;
3184 }
3185 }
3186 } else
3187 push_items = min(src_nritems - 8, push_items);
79f95c82 3188
d16c702f
QW
3189 /* dst is the left eb, src is the middle eb */
3190 if (check_sibling_keys(dst, src)) {
3191 ret = -EUCLEAN;
3192 btrfs_abort_transaction(trans, ret);
3193 return ret;
3194 }
ed874f0d 3195 ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
5de865ee 3196 if (ret) {
66642832 3197 btrfs_abort_transaction(trans, ret);
5de865ee
FDBM
3198 return ret;
3199 }
5f39d397
CM
3200 copy_extent_buffer(dst, src,
3201 btrfs_node_key_ptr_offset(dst_nritems),
3202 btrfs_node_key_ptr_offset(0),
d397712b 3203 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 3204
bb803951 3205 if (push_items < src_nritems) {
57911b8b 3206 /*
bf1d3425
DS
3207 * Don't call tree_mod_log_insert_move here, key removal was
3208 * already fully logged by tree_mod_log_eb_copy above.
57911b8b 3209 */
5f39d397
CM
3210 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3211 btrfs_node_key_ptr_offset(push_items),
3212 (src_nritems - push_items) *
3213 sizeof(struct btrfs_key_ptr));
3214 }
3215 btrfs_set_header_nritems(src, src_nritems - push_items);
3216 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3217 btrfs_mark_buffer_dirty(src);
3218 btrfs_mark_buffer_dirty(dst);
31840ae1 3219
79f95c82
CM
3220 return ret;
3221}
3222
3223/*
3224 * try to push data from one node into the next node right in the
3225 * tree.
3226 *
3227 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3228 * error, and > 0 if there was no room in the right hand block.
3229 *
3230 * this will only push up to 1/2 the contents of the left node over
3231 */
5f39d397 3232static int balance_node_right(struct btrfs_trans_handle *trans,
5f39d397
CM
3233 struct extent_buffer *dst,
3234 struct extent_buffer *src)
79f95c82 3235{
55d32ed8 3236 struct btrfs_fs_info *fs_info = trans->fs_info;
79f95c82
CM
3237 int push_items = 0;
3238 int max_push;
3239 int src_nritems;
3240 int dst_nritems;
3241 int ret = 0;
79f95c82 3242
7bb86316
CM
3243 WARN_ON(btrfs_header_generation(src) != trans->transid);
3244 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3245
5f39d397
CM
3246 src_nritems = btrfs_header_nritems(src);
3247 dst_nritems = btrfs_header_nritems(dst);
0b246afa 3248 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
d397712b 3249 if (push_items <= 0)
79f95c82 3250 return 1;
bce4eae9 3251
d397712b 3252 if (src_nritems < 4)
bce4eae9 3253 return 1;
79f95c82
CM
3254
3255 max_push = src_nritems / 2 + 1;
3256 /* don't try to empty the node */
d397712b 3257 if (max_push >= src_nritems)
79f95c82 3258 return 1;
252c38f0 3259
79f95c82
CM
3260 if (max_push < push_items)
3261 push_items = max_push;
3262
d16c702f
QW
3263 /* dst is the right eb, src is the middle eb */
3264 if (check_sibling_keys(src, dst)) {
3265 ret = -EUCLEAN;
3266 btrfs_abort_transaction(trans, ret);
3267 return ret;
3268 }
bf1d3425
DS
3269 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3270 BUG_ON(ret < 0);
5f39d397
CM
3271 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3272 btrfs_node_key_ptr_offset(0),
3273 (dst_nritems) *
3274 sizeof(struct btrfs_key_ptr));
d6025579 3275
ed874f0d
DS
3276 ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
3277 push_items);
5de865ee 3278 if (ret) {
66642832 3279 btrfs_abort_transaction(trans, ret);
5de865ee
FDBM
3280 return ret;
3281 }
5f39d397
CM
3282 copy_extent_buffer(dst, src,
3283 btrfs_node_key_ptr_offset(0),
3284 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 3285 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 3286
5f39d397
CM
3287 btrfs_set_header_nritems(src, src_nritems - push_items);
3288 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 3289
5f39d397
CM
3290 btrfs_mark_buffer_dirty(src);
3291 btrfs_mark_buffer_dirty(dst);
31840ae1 3292
aa5d6bed 3293 return ret;
be0e5c09
CM
3294}
3295
97571fd0
CM
3296/*
3297 * helper function to insert a new root level in the tree.
3298 * A new node is allocated, and a single item is inserted to
3299 * point to the existing root
aa5d6bed
CM
3300 *
3301 * returns zero on success or < 0 on failure.
97571fd0 3302 */
d397712b 3303static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397 3304 struct btrfs_root *root,
fdd99c72 3305 struct btrfs_path *path, int level)
5c680ed6 3306{
0b246afa 3307 struct btrfs_fs_info *fs_info = root->fs_info;
7bb86316 3308 u64 lower_gen;
5f39d397
CM
3309 struct extent_buffer *lower;
3310 struct extent_buffer *c;
925baedd 3311 struct extent_buffer *old;
5f39d397 3312 struct btrfs_disk_key lower_key;
d9d19a01 3313 int ret;
5c680ed6
CM
3314
3315 BUG_ON(path->nodes[level]);
3316 BUG_ON(path->nodes[level-1] != root->node);
3317
7bb86316
CM
3318 lower = path->nodes[level-1];
3319 if (level == 1)
3320 btrfs_item_key(lower, &lower_key, 0);
3321 else
3322 btrfs_node_key(lower, &lower_key, 0);
3323
a6279470 3324 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
9631e4cc 3325 root->node->start, 0,
cf6f34aa 3326 BTRFS_NESTING_NEW_ROOT);
5f39d397
CM
3327 if (IS_ERR(c))
3328 return PTR_ERR(c);
925baedd 3329
0b246afa 3330 root_add_used(root, fs_info->nodesize);
f0486c68 3331
5f39d397 3332 btrfs_set_header_nritems(c, 1);
5f39d397 3333 btrfs_set_node_key(c, &lower_key, 0);
db94535d 3334 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 3335 lower_gen = btrfs_header_generation(lower);
31840ae1 3336 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
3337
3338 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 3339
5f39d397 3340 btrfs_mark_buffer_dirty(c);
d5719762 3341
925baedd 3342 old = root->node;
d9d19a01
DS
3343 ret = tree_mod_log_insert_root(root->node, c, 0);
3344 BUG_ON(ret < 0);
240f62c8 3345 rcu_assign_pointer(root->node, c);
925baedd
CM
3346
3347 /* the super has an extra ref to root->node */
3348 free_extent_buffer(old);
3349
0b86a832 3350 add_root_to_dirty_list(root);
67439dad 3351 atomic_inc(&c->refs);
5f39d397 3352 path->nodes[level] = c;
ac5887c8 3353 path->locks[level] = BTRFS_WRITE_LOCK;
5c680ed6
CM
3354 path->slots[level] = 0;
3355 return 0;
3356}
3357
74123bd7
CM
3358/*
3359 * worker function to insert a single pointer in a node.
3360 * the node should have enough room for the pointer already
97571fd0 3361 *
74123bd7
CM
3362 * slot and level indicate where you want the key to go, and
3363 * blocknr is the block the key points to.
3364 */
143bede5 3365static void insert_ptr(struct btrfs_trans_handle *trans,
6ad3cf6d 3366 struct btrfs_path *path,
143bede5 3367 struct btrfs_disk_key *key, u64 bytenr,
c3e06965 3368 int slot, int level)
74123bd7 3369{
5f39d397 3370 struct extent_buffer *lower;
74123bd7 3371 int nritems;
f3ea38da 3372 int ret;
5c680ed6
CM
3373
3374 BUG_ON(!path->nodes[level]);
f0486c68 3375 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
3376 lower = path->nodes[level];
3377 nritems = btrfs_header_nritems(lower);
c293498b 3378 BUG_ON(slot > nritems);
6ad3cf6d 3379 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
74123bd7 3380 if (slot != nritems) {
bf1d3425
DS
3381 if (level) {
3382 ret = tree_mod_log_insert_move(lower, slot + 1, slot,
a446a979 3383 nritems - slot);
bf1d3425
DS
3384 BUG_ON(ret < 0);
3385 }
5f39d397
CM
3386 memmove_extent_buffer(lower,
3387 btrfs_node_key_ptr_offset(slot + 1),
3388 btrfs_node_key_ptr_offset(slot),
d6025579 3389 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 3390 }
c3e06965 3391 if (level) {
e09c2efe
DS
3392 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3393 GFP_NOFS);
f3ea38da
JS
3394 BUG_ON(ret < 0);
3395 }
5f39d397 3396 btrfs_set_node_key(lower, key, slot);
db94535d 3397 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
3398 WARN_ON(trans->transid == 0);
3399 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
3400 btrfs_set_header_nritems(lower, nritems + 1);
3401 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
3402}
3403
97571fd0
CM
3404/*
3405 * split the node at the specified level in path in two.
3406 * The path is corrected to point to the appropriate node after the split
3407 *
3408 * Before splitting this tries to make some room in the node by pushing
3409 * left and right, if either one works, it returns right away.
aa5d6bed
CM
3410 *
3411 * returns 0 on success and < 0 on failure
97571fd0 3412 */
e02119d5
CM
3413static noinline int split_node(struct btrfs_trans_handle *trans,
3414 struct btrfs_root *root,
3415 struct btrfs_path *path, int level)
be0e5c09 3416{
0b246afa 3417 struct btrfs_fs_info *fs_info = root->fs_info;
5f39d397
CM
3418 struct extent_buffer *c;
3419 struct extent_buffer *split;
3420 struct btrfs_disk_key disk_key;
be0e5c09 3421 int mid;
5c680ed6 3422 int ret;
7518a238 3423 u32 c_nritems;
eb60ceac 3424
5f39d397 3425 c = path->nodes[level];
7bb86316 3426 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 3427 if (c == root->node) {
d9abbf1c 3428 /*
90f8d62e
JS
3429 * trying to split the root, lets make a new one
3430 *
fdd99c72 3431 * tree mod log: We don't log_removal old root in
90f8d62e
JS
3432 * insert_new_root, because that root buffer will be kept as a
3433 * normal node. We are going to log removal of half of the
3434 * elements below with tree_mod_log_eb_copy. We're holding a
3435 * tree lock on the buffer, which is why we cannot race with
3436 * other tree_mod_log users.
d9abbf1c 3437 */
fdd99c72 3438 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
3439 if (ret)
3440 return ret;
b3612421 3441 } else {
e66f709b 3442 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
3443 c = path->nodes[level];
3444 if (!ret && btrfs_header_nritems(c) <
0b246afa 3445 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
e66f709b 3446 return 0;
54aa1f4d
CM
3447 if (ret < 0)
3448 return ret;
be0e5c09 3449 }
e66f709b 3450
5f39d397 3451 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
3452 mid = (c_nritems + 1) / 2;
3453 btrfs_node_key(c, &disk_key, mid);
7bb86316 3454
a6279470 3455 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
4dff97e6 3456 c->start, 0, BTRFS_NESTING_SPLIT);
5f39d397
CM
3457 if (IS_ERR(split))
3458 return PTR_ERR(split);
3459
0b246afa 3460 root_add_used(root, fs_info->nodesize);
bc877d28 3461 ASSERT(btrfs_header_level(c) == level);
54aa1f4d 3462
ed874f0d 3463 ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
5de865ee 3464 if (ret) {
66642832 3465 btrfs_abort_transaction(trans, ret);
5de865ee
FDBM
3466 return ret;
3467 }
5f39d397
CM
3468 copy_extent_buffer(split, c,
3469 btrfs_node_key_ptr_offset(0),
3470 btrfs_node_key_ptr_offset(mid),
3471 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3472 btrfs_set_header_nritems(split, c_nritems - mid);
3473 btrfs_set_header_nritems(c, mid);
aa5d6bed 3474
5f39d397
CM
3475 btrfs_mark_buffer_dirty(c);
3476 btrfs_mark_buffer_dirty(split);
3477
6ad3cf6d 3478 insert_ptr(trans, path, &disk_key, split->start,
c3e06965 3479 path->slots[level + 1] + 1, level + 1);
aa5d6bed 3480
5de08d7d 3481 if (path->slots[level] >= mid) {
5c680ed6 3482 path->slots[level] -= mid;
925baedd 3483 btrfs_tree_unlock(c);
5f39d397
CM
3484 free_extent_buffer(c);
3485 path->nodes[level] = split;
5c680ed6
CM
3486 path->slots[level + 1] += 1;
3487 } else {
925baedd 3488 btrfs_tree_unlock(split);
5f39d397 3489 free_extent_buffer(split);
be0e5c09 3490 }
d5286a92 3491 return 0;
be0e5c09
CM
3492}
3493
74123bd7
CM
3494/*
3495 * how many bytes are required to store the items in a leaf. start
3496 * and nr indicate which items in the leaf to check. This totals up the
3497 * space used both by the item structs and the item data
3498 */
5f39d397 3499static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09 3500{
41be1f3b
JB
3501 struct btrfs_item *start_item;
3502 struct btrfs_item *end_item;
be0e5c09 3503 int data_len;
5f39d397 3504 int nritems = btrfs_header_nritems(l);
d4dbff95 3505 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
3506
3507 if (!nr)
3508 return 0;
dd3cc16b
RK
3509 start_item = btrfs_item_nr(start);
3510 end_item = btrfs_item_nr(end);
a31356b9
DS
3511 data_len = btrfs_item_offset(l, start_item) +
3512 btrfs_item_size(l, start_item);
3513 data_len = data_len - btrfs_item_offset(l, end_item);
0783fcfc 3514 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 3515 WARN_ON(data_len < 0);
be0e5c09
CM
3516 return data_len;
3517}
3518
d4dbff95
CM
3519/*
3520 * The space between the end of the leaf items and
3521 * the start of the leaf data. IOW, how much room
3522 * the leaf has left for both items and data
3523 */
e902baac 3524noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
d4dbff95 3525{
e902baac 3526 struct btrfs_fs_info *fs_info = leaf->fs_info;
5f39d397
CM
3527 int nritems = btrfs_header_nritems(leaf);
3528 int ret;
0b246afa
JM
3529
3530 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
5f39d397 3531 if (ret < 0) {
0b246afa
JM
3532 btrfs_crit(fs_info,
3533 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3534 ret,
3535 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3536 leaf_space_used(leaf, 0, nritems), nritems);
5f39d397
CM
3537 }
3538 return ret;
d4dbff95
CM
3539}
3540
99d8f83c
CM
3541/*
3542 * min slot controls the lowest index we're willing to push to the
3543 * right. We'll push up to and including min_slot, but no lower
3544 */
f72f0010 3545static noinline int __push_leaf_right(struct btrfs_path *path,
44871b1b
CM
3546 int data_size, int empty,
3547 struct extent_buffer *right,
99d8f83c
CM
3548 int free_space, u32 left_nritems,
3549 u32 min_slot)
00ec4c51 3550{
f72f0010 3551 struct btrfs_fs_info *fs_info = right->fs_info;
5f39d397 3552 struct extent_buffer *left = path->nodes[0];
44871b1b 3553 struct extent_buffer *upper = path->nodes[1];
cfed81a0 3554 struct btrfs_map_token token;
5f39d397 3555 struct btrfs_disk_key disk_key;
00ec4c51 3556 int slot;
34a38218 3557 u32 i;
00ec4c51
CM
3558 int push_space = 0;
3559 int push_items = 0;
0783fcfc 3560 struct btrfs_item *item;
34a38218 3561 u32 nr;
7518a238 3562 u32 right_nritems;
5f39d397 3563 u32 data_end;
db94535d 3564 u32 this_item_size;
00ec4c51 3565
34a38218
CM
3566 if (empty)
3567 nr = 0;
3568 else
99d8f83c 3569 nr = max_t(u32, 1, min_slot);
34a38218 3570
31840ae1 3571 if (path->slots[0] >= left_nritems)
87b29b20 3572 push_space += data_size;
31840ae1 3573
44871b1b 3574 slot = path->slots[1];
34a38218
CM
3575 i = left_nritems - 1;
3576 while (i >= nr) {
dd3cc16b 3577 item = btrfs_item_nr(i);
db94535d 3578
31840ae1
ZY
3579 if (!empty && push_items > 0) {
3580 if (path->slots[0] > i)
3581 break;
3582 if (path->slots[0] == i) {
e902baac
DS
3583 int space = btrfs_leaf_free_space(left);
3584
31840ae1
ZY
3585 if (space + push_space * 2 > free_space)
3586 break;
3587 }
3588 }
3589
00ec4c51 3590 if (path->slots[0] == i)
87b29b20 3591 push_space += data_size;
db94535d 3592
db94535d
CM
3593 this_item_size = btrfs_item_size(left, item);
3594 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 3595 break;
31840ae1 3596
00ec4c51 3597 push_items++;
db94535d 3598 push_space += this_item_size + sizeof(*item);
34a38218
CM
3599 if (i == 0)
3600 break;
3601 i--;
db94535d 3602 }
5f39d397 3603
925baedd
CM
3604 if (push_items == 0)
3605 goto out_unlock;
5f39d397 3606
6c1500f2 3607 WARN_ON(!empty && push_items == left_nritems);
5f39d397 3608
00ec4c51 3609 /* push left to right */
5f39d397 3610 right_nritems = btrfs_header_nritems(right);
34a38218 3611
5f39d397 3612 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
8f881e8c 3613 push_space -= leaf_data_end(left);
5f39d397 3614
00ec4c51 3615 /* make room in the right data area */
8f881e8c 3616 data_end = leaf_data_end(right);
5f39d397 3617 memmove_extent_buffer(right,
3d9ec8c4
NB
3618 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3619 BTRFS_LEAF_DATA_OFFSET + data_end,
0b246afa 3620 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
5f39d397 3621
00ec4c51 3622 /* copy from the left data area */
3d9ec8c4 3623 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
0b246afa 3624 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
8f881e8c 3625 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
d6025579 3626 push_space);
5f39d397
CM
3627
3628 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3629 btrfs_item_nr_offset(0),
3630 right_nritems * sizeof(struct btrfs_item));
3631
00ec4c51 3632 /* copy the items from left to right */
5f39d397
CM
3633 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3634 btrfs_item_nr_offset(left_nritems - push_items),
3635 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
3636
3637 /* update the item pointers */
c82f823c 3638 btrfs_init_map_token(&token, right);
7518a238 3639 right_nritems += push_items;
5f39d397 3640 btrfs_set_header_nritems(right, right_nritems);
0b246afa 3641 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
7518a238 3642 for (i = 0; i < right_nritems; i++) {
dd3cc16b 3643 item = btrfs_item_nr(i);
cc4c13d5
DS
3644 push_space -= btrfs_token_item_size(&token, item);
3645 btrfs_set_token_item_offset(&token, item, push_space);
db94535d
CM
3646 }
3647
7518a238 3648 left_nritems -= push_items;
5f39d397 3649 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 3650
34a38218
CM
3651 if (left_nritems)
3652 btrfs_mark_buffer_dirty(left);
f0486c68 3653 else
6a884d7d 3654 btrfs_clean_tree_block(left);
f0486c68 3655
5f39d397 3656 btrfs_mark_buffer_dirty(right);
a429e513 3657
5f39d397
CM
3658 btrfs_item_key(right, &disk_key, 0);
3659 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 3660 btrfs_mark_buffer_dirty(upper);
02217ed2 3661
00ec4c51 3662 /* then fixup the leaf pointer in the path */
7518a238
CM
3663 if (path->slots[0] >= left_nritems) {
3664 path->slots[0] -= left_nritems;
925baedd 3665 if (btrfs_header_nritems(path->nodes[0]) == 0)
6a884d7d 3666 btrfs_clean_tree_block(path->nodes[0]);
925baedd 3667 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3668 free_extent_buffer(path->nodes[0]);
3669 path->nodes[0] = right;
00ec4c51
CM
3670 path->slots[1] += 1;
3671 } else {
925baedd 3672 btrfs_tree_unlock(right);
5f39d397 3673 free_extent_buffer(right);
00ec4c51
CM
3674 }
3675 return 0;
925baedd
CM
3676
3677out_unlock:
3678 btrfs_tree_unlock(right);
3679 free_extent_buffer(right);
3680 return 1;
00ec4c51 3681}
925baedd 3682
44871b1b
CM
3683/*
3684 * push some data in the path leaf to the right, trying to free up at
3685 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3686 *
3687 * returns 1 if the push failed because the other node didn't have enough
3688 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
3689 *
3690 * this will push starting from min_slot to the end of the leaf. It won't
3691 * push any slot lower than min_slot
44871b1b
CM
3692 */
3693static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3694 *root, struct btrfs_path *path,
3695 int min_data_size, int data_size,
3696 int empty, u32 min_slot)
44871b1b
CM
3697{
3698 struct extent_buffer *left = path->nodes[0];
3699 struct extent_buffer *right;
3700 struct extent_buffer *upper;
3701 int slot;
3702 int free_space;
3703 u32 left_nritems;
3704 int ret;
3705
3706 if (!path->nodes[1])
3707 return 1;
3708
3709 slot = path->slots[1];
3710 upper = path->nodes[1];
3711 if (slot >= btrfs_header_nritems(upper) - 1)
3712 return 1;
3713
3714 btrfs_assert_tree_locked(path->nodes[1]);
3715
4b231ae4 3716 right = btrfs_read_node_slot(upper, slot + 1);
fb770ae4
LB
3717 /*
3718 * slot + 1 is not valid or we fail to read the right node,
3719 * no big deal, just return.
3720 */
3721 if (IS_ERR(right))
91ca338d
TI
3722 return 1;
3723
bf77467a 3724 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
44871b1b 3725
e902baac 3726 free_space = btrfs_leaf_free_space(right);
44871b1b
CM
3727 if (free_space < data_size)
3728 goto out_unlock;
3729
3730 /* cow and double check */
3731 ret = btrfs_cow_block(trans, root, right, upper,
bf59a5a2 3732 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
44871b1b
CM
3733 if (ret)
3734 goto out_unlock;
3735
e902baac 3736 free_space = btrfs_leaf_free_space(right);
44871b1b
CM
3737 if (free_space < data_size)
3738 goto out_unlock;
3739
3740 left_nritems = btrfs_header_nritems(left);
3741 if (left_nritems == 0)
3742 goto out_unlock;
3743
d16c702f
QW
3744 if (check_sibling_keys(left, right)) {
3745 ret = -EUCLEAN;
3746 btrfs_tree_unlock(right);
3747 free_extent_buffer(right);
3748 return ret;
3749 }
2ef1fed2
FDBM
3750 if (path->slots[0] == left_nritems && !empty) {
3751 /* Key greater than all keys in the leaf, right neighbor has
3752 * enough room for it and we're not emptying our leaf to delete
3753 * it, therefore use right neighbor to insert the new item and
52042d8e 3754 * no need to touch/dirty our left leaf. */
2ef1fed2
FDBM
3755 btrfs_tree_unlock(left);
3756 free_extent_buffer(left);
3757 path->nodes[0] = right;
3758 path->slots[0] = 0;
3759 path->slots[1]++;
3760 return 0;
3761 }
3762
f72f0010 3763 return __push_leaf_right(path, min_data_size, empty,
99d8f83c 3764 right, free_space, left_nritems, min_slot);
44871b1b
CM
3765out_unlock:
3766 btrfs_tree_unlock(right);
3767 free_extent_buffer(right);
3768 return 1;
3769}
3770
74123bd7
CM
3771/*
3772 * push some data in the path leaf to the left, trying to free up at
3773 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3774 *
3775 * max_slot can put a limit on how far into the leaf we'll push items. The
3776 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3777 * items
74123bd7 3778 */
8087c193 3779static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
44871b1b 3780 int empty, struct extent_buffer *left,
99d8f83c
CM
3781 int free_space, u32 right_nritems,
3782 u32 max_slot)
be0e5c09 3783{
8087c193 3784 struct btrfs_fs_info *fs_info = left->fs_info;
5f39d397
CM
3785 struct btrfs_disk_key disk_key;
3786 struct extent_buffer *right = path->nodes[0];
be0e5c09 3787 int i;
be0e5c09
CM
3788 int push_space = 0;
3789 int push_items = 0;
0783fcfc 3790 struct btrfs_item *item;
7518a238 3791 u32 old_left_nritems;
34a38218 3792 u32 nr;
aa5d6bed 3793 int ret = 0;
db94535d
CM
3794 u32 this_item_size;
3795 u32 old_left_item_size;
cfed81a0
CM
3796 struct btrfs_map_token token;
3797
34a38218 3798 if (empty)
99d8f83c 3799 nr = min(right_nritems, max_slot);
34a38218 3800 else
99d8f83c 3801 nr = min(right_nritems - 1, max_slot);
34a38218
CM
3802
3803 for (i = 0; i < nr; i++) {
dd3cc16b 3804 item = btrfs_item_nr(i);
db94535d 3805
31840ae1
ZY
3806 if (!empty && push_items > 0) {
3807 if (path->slots[0] < i)
3808 break;
3809 if (path->slots[0] == i) {
e902baac
DS
3810 int space = btrfs_leaf_free_space(right);
3811
31840ae1
ZY
3812 if (space + push_space * 2 > free_space)
3813 break;
3814 }
3815 }
3816
be0e5c09 3817 if (path->slots[0] == i)
87b29b20 3818 push_space += data_size;
db94535d
CM
3819
3820 this_item_size = btrfs_item_size(right, item);
3821 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 3822 break;
db94535d 3823
be0e5c09 3824 push_items++;
db94535d
CM
3825 push_space += this_item_size + sizeof(*item);
3826 }
3827
be0e5c09 3828 if (push_items == 0) {
925baedd
CM
3829 ret = 1;
3830 goto out;
be0e5c09 3831 }
fae7f21c 3832 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
5f39d397 3833
be0e5c09 3834 /* push data from right to left */
5f39d397
CM
3835 copy_extent_buffer(left, right,
3836 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3837 btrfs_item_nr_offset(0),
3838 push_items * sizeof(struct btrfs_item));
3839
0b246afa 3840 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
d397712b 3841 btrfs_item_offset_nr(right, push_items - 1);
5f39d397 3842
3d9ec8c4 3843 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
8f881e8c 3844 leaf_data_end(left) - push_space,
3d9ec8c4 3845 BTRFS_LEAF_DATA_OFFSET +
5f39d397 3846 btrfs_item_offset_nr(right, push_items - 1),
d6025579 3847 push_space);
5f39d397 3848 old_left_nritems = btrfs_header_nritems(left);
87b29b20 3849 BUG_ON(old_left_nritems <= 0);
eb60ceac 3850
c82f823c 3851 btrfs_init_map_token(&token, left);
db94535d 3852 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 3853 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 3854 u32 ioff;
db94535d 3855
dd3cc16b 3856 item = btrfs_item_nr(i);
db94535d 3857
cc4c13d5
DS
3858 ioff = btrfs_token_item_offset(&token, item);
3859 btrfs_set_token_item_offset(&token, item,
3860 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
be0e5c09 3861 }
5f39d397 3862 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
3863
3864 /* fixup right node */
31b1a2bd
JL
3865 if (push_items > right_nritems)
3866 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
d397712b 3867 right_nritems);
34a38218
CM
3868
3869 if (push_items < right_nritems) {
3870 push_space = btrfs_item_offset_nr(right, push_items - 1) -
8f881e8c 3871 leaf_data_end(right);
3d9ec8c4 3872 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
0b246afa 3873 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3d9ec8c4 3874 BTRFS_LEAF_DATA_OFFSET +
8f881e8c 3875 leaf_data_end(right), push_space);
34a38218
CM
3876
3877 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
3878 btrfs_item_nr_offset(push_items),
3879 (btrfs_header_nritems(right) - push_items) *
3880 sizeof(struct btrfs_item));
34a38218 3881 }
c82f823c
DS
3882
3883 btrfs_init_map_token(&token, right);
eef1c494
Y
3884 right_nritems -= push_items;
3885 btrfs_set_header_nritems(right, right_nritems);
0b246afa 3886 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
5f39d397 3887 for (i = 0; i < right_nritems; i++) {
dd3cc16b 3888 item = btrfs_item_nr(i);
db94535d 3889
cc4c13d5
DS
3890 push_space = push_space - btrfs_token_item_size(&token, item);
3891 btrfs_set_token_item_offset(&token, item, push_space);
db94535d 3892 }
eb60ceac 3893
5f39d397 3894 btrfs_mark_buffer_dirty(left);
34a38218
CM
3895 if (right_nritems)
3896 btrfs_mark_buffer_dirty(right);
f0486c68 3897 else
6a884d7d 3898 btrfs_clean_tree_block(right);
098f59c2 3899
5f39d397 3900 btrfs_item_key(right, &disk_key, 0);
b167fa91 3901 fixup_low_keys(path, &disk_key, 1);
be0e5c09
CM
3902
3903 /* then fixup the leaf pointer in the path */
3904 if (path->slots[0] < push_items) {
3905 path->slots[0] += old_left_nritems;
925baedd 3906 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
3907 free_extent_buffer(path->nodes[0]);
3908 path->nodes[0] = left;
be0e5c09
CM
3909 path->slots[1] -= 1;
3910 } else {
925baedd 3911 btrfs_tree_unlock(left);
5f39d397 3912 free_extent_buffer(left);
be0e5c09
CM
3913 path->slots[0] -= push_items;
3914 }
eb60ceac 3915 BUG_ON(path->slots[0] < 0);
aa5d6bed 3916 return ret;
925baedd
CM
3917out:
3918 btrfs_tree_unlock(left);
3919 free_extent_buffer(left);
3920 return ret;
be0e5c09
CM
3921}
3922
44871b1b
CM
3923/*
3924 * push some data in the path leaf to the left, trying to free up at
3925 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
3926 *
3927 * max_slot can put a limit on how far into the leaf we'll push items. The
3928 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3929 * items
44871b1b
CM
3930 */
3931static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
3932 *root, struct btrfs_path *path, int min_data_size,
3933 int data_size, int empty, u32 max_slot)
44871b1b
CM
3934{
3935 struct extent_buffer *right = path->nodes[0];
3936 struct extent_buffer *left;
3937 int slot;
3938 int free_space;
3939 u32 right_nritems;
3940 int ret = 0;
3941
3942 slot = path->slots[1];
3943 if (slot == 0)
3944 return 1;
3945 if (!path->nodes[1])
3946 return 1;
3947
3948 right_nritems = btrfs_header_nritems(right);
3949 if (right_nritems == 0)
3950 return 1;
3951
3952 btrfs_assert_tree_locked(path->nodes[1]);
3953
4b231ae4 3954 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
fb770ae4
LB
3955 /*
3956 * slot - 1 is not valid or we fail to read the left node,
3957 * no big deal, just return.
3958 */
3959 if (IS_ERR(left))
91ca338d
TI
3960 return 1;
3961
bf77467a 3962 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
44871b1b 3963
e902baac 3964 free_space = btrfs_leaf_free_space(left);
44871b1b
CM
3965 if (free_space < data_size) {
3966 ret = 1;
3967 goto out;
3968 }
3969
3970 /* cow and double check */
3971 ret = btrfs_cow_block(trans, root, left,
9631e4cc 3972 path->nodes[1], slot - 1, &left,
bf59a5a2 3973 BTRFS_NESTING_LEFT_COW);
44871b1b
CM
3974 if (ret) {
3975 /* we hit -ENOSPC, but it isn't fatal here */
79787eaa
JM
3976 if (ret == -ENOSPC)
3977 ret = 1;
44871b1b
CM
3978 goto out;
3979 }
3980
e902baac 3981 free_space = btrfs_leaf_free_space(left);
44871b1b
CM
3982 if (free_space < data_size) {
3983 ret = 1;
3984 goto out;
3985 }
3986
d16c702f
QW
3987 if (check_sibling_keys(left, right)) {
3988 ret = -EUCLEAN;
3989 goto out;
3990 }
8087c193 3991 return __push_leaf_left(path, min_data_size,
99d8f83c
CM
3992 empty, left, free_space, right_nritems,
3993 max_slot);
44871b1b
CM
3994out:
3995 btrfs_tree_unlock(left);
3996 free_extent_buffer(left);
3997 return ret;
3998}
3999
4000/*
4001 * split the path's leaf in two, making sure there is at least data_size
4002 * available for the resulting leaf level of the path.
44871b1b 4003 */
143bede5 4004static noinline void copy_for_split(struct btrfs_trans_handle *trans,
143bede5
JM
4005 struct btrfs_path *path,
4006 struct extent_buffer *l,
4007 struct extent_buffer *right,
4008 int slot, int mid, int nritems)
44871b1b 4009{
94f94ad9 4010 struct btrfs_fs_info *fs_info = trans->fs_info;
44871b1b
CM
4011 int data_copy_size;
4012 int rt_data_off;
4013 int i;
44871b1b 4014 struct btrfs_disk_key disk_key;
cfed81a0
CM
4015 struct btrfs_map_token token;
4016
44871b1b
CM
4017 nritems = nritems - mid;
4018 btrfs_set_header_nritems(right, nritems);
8f881e8c 4019 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
44871b1b
CM
4020
4021 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4022 btrfs_item_nr_offset(mid),
4023 nritems * sizeof(struct btrfs_item));
4024
4025 copy_extent_buffer(right, l,
3d9ec8c4
NB
4026 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4027 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
8f881e8c 4028 leaf_data_end(l), data_copy_size);
44871b1b 4029
0b246afa 4030 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
44871b1b 4031
c82f823c 4032 btrfs_init_map_token(&token, right);
44871b1b 4033 for (i = 0; i < nritems; i++) {
dd3cc16b 4034 struct btrfs_item *item = btrfs_item_nr(i);
44871b1b
CM
4035 u32 ioff;
4036
cc4c13d5
DS
4037 ioff = btrfs_token_item_offset(&token, item);
4038 btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
44871b1b
CM
4039 }
4040
44871b1b 4041 btrfs_set_header_nritems(l, mid);
44871b1b 4042 btrfs_item_key(right, &disk_key, 0);
6ad3cf6d 4043 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
44871b1b
CM
4044
4045 btrfs_mark_buffer_dirty(right);
4046 btrfs_mark_buffer_dirty(l);
4047 BUG_ON(path->slots[0] != slot);
4048
44871b1b
CM
4049 if (mid <= slot) {
4050 btrfs_tree_unlock(path->nodes[0]);
4051 free_extent_buffer(path->nodes[0]);
4052 path->nodes[0] = right;
4053 path->slots[0] -= mid;
4054 path->slots[1] += 1;
4055 } else {
4056 btrfs_tree_unlock(right);
4057 free_extent_buffer(right);
4058 }
4059
4060 BUG_ON(path->slots[0] < 0);
44871b1b
CM
4061}
4062
99d8f83c
CM
4063/*
4064 * double splits happen when we need to insert a big item in the middle
4065 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4066 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4067 * A B C
4068 *
4069 * We avoid this by trying to push the items on either side of our target
4070 * into the adjacent leaves. If all goes well we can avoid the double split
4071 * completely.
4072 */
4073static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4074 struct btrfs_root *root,
4075 struct btrfs_path *path,
4076 int data_size)
4077{
4078 int ret;
4079 int progress = 0;
4080 int slot;
4081 u32 nritems;
5a4267ca 4082 int space_needed = data_size;
99d8f83c
CM
4083
4084 slot = path->slots[0];
5a4267ca 4085 if (slot < btrfs_header_nritems(path->nodes[0]))
e902baac 4086 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
99d8f83c
CM
4087
4088 /*
4089 * try to push all the items after our slot into the
4090 * right leaf
4091 */
5a4267ca 4092 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
99d8f83c
CM
4093 if (ret < 0)
4094 return ret;
4095
4096 if (ret == 0)
4097 progress++;
4098
4099 nritems = btrfs_header_nritems(path->nodes[0]);
4100 /*
4101 * our goal is to get our slot at the start or end of a leaf. If
4102 * we've done so we're done
4103 */
4104 if (path->slots[0] == 0 || path->slots[0] == nritems)
4105 return 0;
4106
e902baac 4107 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
99d8f83c
CM
4108 return 0;
4109
4110 /* try to push all the items before our slot into the next leaf */
4111 slot = path->slots[0];
263d3995
FM
4112 space_needed = data_size;
4113 if (slot > 0)
e902baac 4114 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
5a4267ca 4115 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
99d8f83c
CM
4116 if (ret < 0)
4117 return ret;
4118
4119 if (ret == 0)
4120 progress++;
4121
4122 if (progress)
4123 return 0;
4124 return 1;
4125}
4126
74123bd7
CM
4127/*
4128 * split the path's leaf in two, making sure there is at least data_size
4129 * available for the resulting leaf level of the path.
aa5d6bed
CM
4130 *
4131 * returns 0 if all went well and < 0 on failure.
74123bd7 4132 */
e02119d5
CM
4133static noinline int split_leaf(struct btrfs_trans_handle *trans,
4134 struct btrfs_root *root,
310712b2 4135 const struct btrfs_key *ins_key,
e02119d5
CM
4136 struct btrfs_path *path, int data_size,
4137 int extend)
be0e5c09 4138{
5d4f98a2 4139 struct btrfs_disk_key disk_key;
5f39d397 4140 struct extent_buffer *l;
7518a238 4141 u32 nritems;
eb60ceac
CM
4142 int mid;
4143 int slot;
5f39d397 4144 struct extent_buffer *right;
b7a0365e 4145 struct btrfs_fs_info *fs_info = root->fs_info;
d4dbff95 4146 int ret = 0;
aa5d6bed 4147 int wret;
5d4f98a2 4148 int split;
cc0c5538 4149 int num_doubles = 0;
99d8f83c 4150 int tried_avoid_double = 0;
aa5d6bed 4151
a5719521
YZ
4152 l = path->nodes[0];
4153 slot = path->slots[0];
4154 if (extend && data_size + btrfs_item_size_nr(l, slot) +
0b246afa 4155 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
a5719521
YZ
4156 return -EOVERFLOW;
4157
40689478 4158 /* first try to make some room by pushing left and right */
33157e05 4159 if (data_size && path->nodes[1]) {
5a4267ca
FDBM
4160 int space_needed = data_size;
4161
4162 if (slot < btrfs_header_nritems(l))
e902baac 4163 space_needed -= btrfs_leaf_free_space(l);
5a4267ca
FDBM
4164
4165 wret = push_leaf_right(trans, root, path, space_needed,
4166 space_needed, 0, 0);
d397712b 4167 if (wret < 0)
eaee50e8 4168 return wret;
3685f791 4169 if (wret) {
263d3995
FM
4170 space_needed = data_size;
4171 if (slot > 0)
e902baac 4172 space_needed -= btrfs_leaf_free_space(l);
5a4267ca
FDBM
4173 wret = push_leaf_left(trans, root, path, space_needed,
4174 space_needed, 0, (u32)-1);
3685f791
CM
4175 if (wret < 0)
4176 return wret;
4177 }
4178 l = path->nodes[0];
aa5d6bed 4179
3685f791 4180 /* did the pushes work? */
e902baac 4181 if (btrfs_leaf_free_space(l) >= data_size)
3685f791 4182 return 0;
3326d1b0 4183 }
aa5d6bed 4184
5c680ed6 4185 if (!path->nodes[1]) {
fdd99c72 4186 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
4187 if (ret)
4188 return ret;
4189 }
cc0c5538 4190again:
5d4f98a2 4191 split = 1;
cc0c5538 4192 l = path->nodes[0];
eb60ceac 4193 slot = path->slots[0];
5f39d397 4194 nritems = btrfs_header_nritems(l);
d397712b 4195 mid = (nritems + 1) / 2;
54aa1f4d 4196
5d4f98a2
YZ
4197 if (mid <= slot) {
4198 if (nritems == 1 ||
4199 leaf_space_used(l, mid, nritems - mid) + data_size >
0b246afa 4200 BTRFS_LEAF_DATA_SIZE(fs_info)) {
5d4f98a2
YZ
4201 if (slot >= nritems) {
4202 split = 0;
4203 } else {
4204 mid = slot;
4205 if (mid != nritems &&
4206 leaf_space_used(l, mid, nritems - mid) +
0b246afa 4207 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
99d8f83c
CM
4208 if (data_size && !tried_avoid_double)
4209 goto push_for_double;
5d4f98a2
YZ
4210 split = 2;
4211 }
4212 }
4213 }
4214 } else {
4215 if (leaf_space_used(l, 0, mid) + data_size >
0b246afa 4216 BTRFS_LEAF_DATA_SIZE(fs_info)) {
5d4f98a2
YZ
4217 if (!extend && data_size && slot == 0) {
4218 split = 0;
4219 } else if ((extend || !data_size) && slot == 0) {
4220 mid = 1;
4221 } else {
4222 mid = slot;
4223 if (mid != nritems &&
4224 leaf_space_used(l, mid, nritems - mid) +
0b246afa 4225 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
99d8f83c
CM
4226 if (data_size && !tried_avoid_double)
4227 goto push_for_double;
67871254 4228 split = 2;
5d4f98a2
YZ
4229 }
4230 }
4231 }
4232 }
4233
4234 if (split == 0)
4235 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4236 else
4237 btrfs_item_key(l, &disk_key, mid);
4238
ca9d473a
JB
4239 /*
4240 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
4241 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
4242 * subclasses, which is 8 at the time of this patch, and we've maxed it
4243 * out. In the future we could add a
4244 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
4245 * use BTRFS_NESTING_NEW_ROOT.
4246 */
a6279470 4247 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
ca9d473a
JB
4248 l->start, 0, num_doubles ?
4249 BTRFS_NESTING_NEW_ROOT :
4250 BTRFS_NESTING_SPLIT);
f0486c68 4251 if (IS_ERR(right))
5f39d397 4252 return PTR_ERR(right);
f0486c68 4253
0b246afa 4254 root_add_used(root, fs_info->nodesize);
5f39d397 4255
5d4f98a2
YZ
4256 if (split == 0) {
4257 if (mid <= slot) {
4258 btrfs_set_header_nritems(right, 0);
6ad3cf6d 4259 insert_ptr(trans, path, &disk_key,
2ff7e61e 4260 right->start, path->slots[1] + 1, 1);
5d4f98a2
YZ
4261 btrfs_tree_unlock(path->nodes[0]);
4262 free_extent_buffer(path->nodes[0]);
4263 path->nodes[0] = right;
4264 path->slots[0] = 0;
4265 path->slots[1] += 1;
4266 } else {
4267 btrfs_set_header_nritems(right, 0);
6ad3cf6d 4268 insert_ptr(trans, path, &disk_key,
2ff7e61e 4269 right->start, path->slots[1], 1);
5d4f98a2
YZ
4270 btrfs_tree_unlock(path->nodes[0]);
4271 free_extent_buffer(path->nodes[0]);
4272 path->nodes[0] = right;
4273 path->slots[0] = 0;
143bede5 4274 if (path->slots[1] == 0)
b167fa91 4275 fixup_low_keys(path, &disk_key, 1);
d4dbff95 4276 }
196e0249
LB
4277 /*
4278 * We create a new leaf 'right' for the required ins_len and
4279 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4280 * the content of ins_len to 'right'.
4281 */
5d4f98a2 4282 return ret;
d4dbff95 4283 }
74123bd7 4284
94f94ad9 4285 copy_for_split(trans, path, l, right, slot, mid, nritems);
31840ae1 4286
5d4f98a2 4287 if (split == 2) {
cc0c5538
CM
4288 BUG_ON(num_doubles != 0);
4289 num_doubles++;
4290 goto again;
a429e513 4291 }
44871b1b 4292
143bede5 4293 return 0;
99d8f83c
CM
4294
4295push_for_double:
4296 push_for_double_split(trans, root, path, data_size);
4297 tried_avoid_double = 1;
e902baac 4298 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
99d8f83c
CM
4299 return 0;
4300 goto again;
be0e5c09
CM
4301}
4302
ad48fd75
YZ
4303static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4304 struct btrfs_root *root,
4305 struct btrfs_path *path, int ins_len)
459931ec 4306{
ad48fd75 4307 struct btrfs_key key;
459931ec 4308 struct extent_buffer *leaf;
ad48fd75
YZ
4309 struct btrfs_file_extent_item *fi;
4310 u64 extent_len = 0;
4311 u32 item_size;
4312 int ret;
459931ec
CM
4313
4314 leaf = path->nodes[0];
ad48fd75
YZ
4315 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4316
4317 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4318 key.type != BTRFS_EXTENT_CSUM_KEY);
4319
e902baac 4320 if (btrfs_leaf_free_space(leaf) >= ins_len)
ad48fd75 4321 return 0;
459931ec
CM
4322
4323 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
4324 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4325 fi = btrfs_item_ptr(leaf, path->slots[0],
4326 struct btrfs_file_extent_item);
4327 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4328 }
b3b4aa74 4329 btrfs_release_path(path);
459931ec 4330
459931ec 4331 path->keep_locks = 1;
ad48fd75
YZ
4332 path->search_for_split = 1;
4333 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 4334 path->search_for_split = 0;
a8df6fe6
FM
4335 if (ret > 0)
4336 ret = -EAGAIN;
ad48fd75
YZ
4337 if (ret < 0)
4338 goto err;
459931ec 4339
ad48fd75
YZ
4340 ret = -EAGAIN;
4341 leaf = path->nodes[0];
a8df6fe6
FM
4342 /* if our item isn't there, return now */
4343 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
ad48fd75
YZ
4344 goto err;
4345
109f6aef 4346 /* the leaf has changed, it now has room. return now */
e902baac 4347 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
109f6aef
CM
4348 goto err;
4349
ad48fd75
YZ
4350 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4351 fi = btrfs_item_ptr(leaf, path->slots[0],
4352 struct btrfs_file_extent_item);
4353 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4354 goto err;
459931ec
CM
4355 }
4356
ad48fd75 4357 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
4358 if (ret)
4359 goto err;
459931ec 4360
ad48fd75 4361 path->keep_locks = 0;
b9473439 4362 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
4363 return 0;
4364err:
4365 path->keep_locks = 0;
4366 return ret;
4367}
4368
25263cd7 4369static noinline int split_item(struct btrfs_path *path,
310712b2 4370 const struct btrfs_key *new_key,
ad48fd75
YZ
4371 unsigned long split_offset)
4372{
4373 struct extent_buffer *leaf;
4374 struct btrfs_item *item;
4375 struct btrfs_item *new_item;
4376 int slot;
4377 char *buf;
4378 u32 nritems;
4379 u32 item_size;
4380 u32 orig_offset;
4381 struct btrfs_disk_key disk_key;
4382
b9473439 4383 leaf = path->nodes[0];
e902baac 4384 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
b9473439 4385
dd3cc16b 4386 item = btrfs_item_nr(path->slots[0]);
459931ec
CM
4387 orig_offset = btrfs_item_offset(leaf, item);
4388 item_size = btrfs_item_size(leaf, item);
4389
459931ec 4390 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
4391 if (!buf)
4392 return -ENOMEM;
4393
459931ec
CM
4394 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4395 path->slots[0]), item_size);
459931ec 4396
ad48fd75 4397 slot = path->slots[0] + 1;
459931ec 4398 nritems = btrfs_header_nritems(leaf);
459931ec
CM
4399 if (slot != nritems) {
4400 /* shift the items */
4401 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
4402 btrfs_item_nr_offset(slot),
4403 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
4404 }
4405
4406 btrfs_cpu_key_to_disk(&disk_key, new_key);
4407 btrfs_set_item_key(leaf, &disk_key, slot);
4408
dd3cc16b 4409 new_item = btrfs_item_nr(slot);
459931ec
CM
4410
4411 btrfs_set_item_offset(leaf, new_item, orig_offset);
4412 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4413
4414 btrfs_set_item_offset(leaf, item,
4415 orig_offset + item_size - split_offset);
4416 btrfs_set_item_size(leaf, item, split_offset);
4417
4418 btrfs_set_header_nritems(leaf, nritems + 1);
4419
4420 /* write the data for the start of the original item */
4421 write_extent_buffer(leaf, buf,
4422 btrfs_item_ptr_offset(leaf, path->slots[0]),
4423 split_offset);
4424
4425 /* write the data for the new item */
4426 write_extent_buffer(leaf, buf + split_offset,
4427 btrfs_item_ptr_offset(leaf, slot),
4428 item_size - split_offset);
4429 btrfs_mark_buffer_dirty(leaf);
4430
e902baac 4431 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
459931ec 4432 kfree(buf);
ad48fd75
YZ
4433 return 0;
4434}
4435
4436/*
4437 * This function splits a single item into two items,
4438 * giving 'new_key' to the new item and splitting the
4439 * old one at split_offset (from the start of the item).
4440 *
4441 * The path may be released by this operation. After
4442 * the split, the path is pointing to the old item. The
4443 * new item is going to be in the same node as the old one.
4444 *
4445 * Note, the item being split must be smaller enough to live alone on
4446 * a tree block with room for one extra struct btrfs_item
4447 *
4448 * This allows us to split the item in place, keeping a lock on the
4449 * leaf the entire time.
4450 */
4451int btrfs_split_item(struct btrfs_trans_handle *trans,
4452 struct btrfs_root *root,
4453 struct btrfs_path *path,
310712b2 4454 const struct btrfs_key *new_key,
ad48fd75
YZ
4455 unsigned long split_offset)
4456{
4457 int ret;
4458 ret = setup_leaf_for_split(trans, root, path,
4459 sizeof(struct btrfs_item));
4460 if (ret)
4461 return ret;
4462
25263cd7 4463 ret = split_item(path, new_key, split_offset);
459931ec
CM
4464 return ret;
4465}
4466
ad48fd75
YZ
4467/*
4468 * This function duplicate a item, giving 'new_key' to the new item.
4469 * It guarantees both items live in the same tree leaf and the new item
4470 * is contiguous with the original item.
4471 *
4472 * This allows us to split file extent in place, keeping a lock on the
4473 * leaf the entire time.
4474 */
4475int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4476 struct btrfs_root *root,
4477 struct btrfs_path *path,
310712b2 4478 const struct btrfs_key *new_key)
ad48fd75
YZ
4479{
4480 struct extent_buffer *leaf;
4481 int ret;
4482 u32 item_size;
4483
4484 leaf = path->nodes[0];
4485 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4486 ret = setup_leaf_for_split(trans, root, path,
4487 item_size + sizeof(struct btrfs_item));
4488 if (ret)
4489 return ret;
4490
4491 path->slots[0]++;
fc0d82e1 4492 setup_items_for_insert(root, path, new_key, &item_size, 1);
ad48fd75
YZ
4493 leaf = path->nodes[0];
4494 memcpy_extent_buffer(leaf,
4495 btrfs_item_ptr_offset(leaf, path->slots[0]),
4496 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4497 item_size);
4498 return 0;
4499}
4500
d352ac68
CM
4501/*
4502 * make the item pointed to by the path smaller. new_size indicates
4503 * how small to make it, and from_end tells us if we just chop bytes
4504 * off the end of the item or if we shift the item to chop bytes off
4505 * the front.
4506 */
78ac4f9e 4507void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
b18c6685 4508{
b18c6685 4509 int slot;
5f39d397
CM
4510 struct extent_buffer *leaf;
4511 struct btrfs_item *item;
b18c6685
CM
4512 u32 nritems;
4513 unsigned int data_end;
4514 unsigned int old_data_start;
4515 unsigned int old_size;
4516 unsigned int size_diff;
4517 int i;
cfed81a0
CM
4518 struct btrfs_map_token token;
4519
5f39d397 4520 leaf = path->nodes[0];
179e29e4
CM
4521 slot = path->slots[0];
4522
4523 old_size = btrfs_item_size_nr(leaf, slot);
4524 if (old_size == new_size)
143bede5 4525 return;
b18c6685 4526
5f39d397 4527 nritems = btrfs_header_nritems(leaf);
8f881e8c 4528 data_end = leaf_data_end(leaf);
b18c6685 4529
5f39d397 4530 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 4531
b18c6685
CM
4532 size_diff = old_size - new_size;
4533
4534 BUG_ON(slot < 0);
4535 BUG_ON(slot >= nritems);
4536
4537 /*
4538 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4539 */
4540 /* first correct the data pointers */
c82f823c 4541 btrfs_init_map_token(&token, leaf);
b18c6685 4542 for (i = slot; i < nritems; i++) {
5f39d397 4543 u32 ioff;
dd3cc16b 4544 item = btrfs_item_nr(i);
db94535d 4545
cc4c13d5
DS
4546 ioff = btrfs_token_item_offset(&token, item);
4547 btrfs_set_token_item_offset(&token, item, ioff + size_diff);
b18c6685 4548 }
db94535d 4549
b18c6685 4550 /* shift the data */
179e29e4 4551 if (from_end) {
3d9ec8c4
NB
4552 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4553 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
179e29e4
CM
4554 data_end, old_data_start + new_size - data_end);
4555 } else {
4556 struct btrfs_disk_key disk_key;
4557 u64 offset;
4558
4559 btrfs_item_key(leaf, &disk_key, slot);
4560
4561 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4562 unsigned long ptr;
4563 struct btrfs_file_extent_item *fi;
4564
4565 fi = btrfs_item_ptr(leaf, slot,
4566 struct btrfs_file_extent_item);
4567 fi = (struct btrfs_file_extent_item *)(
4568 (unsigned long)fi - size_diff);
4569
4570 if (btrfs_file_extent_type(leaf, fi) ==
4571 BTRFS_FILE_EXTENT_INLINE) {
4572 ptr = btrfs_item_ptr_offset(leaf, slot);
4573 memmove_extent_buffer(leaf, ptr,
d397712b 4574 (unsigned long)fi,
7ec20afb 4575 BTRFS_FILE_EXTENT_INLINE_DATA_START);
179e29e4
CM
4576 }
4577 }
4578
3d9ec8c4
NB
4579 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4580 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
179e29e4
CM
4581 data_end, old_data_start - data_end);
4582
4583 offset = btrfs_disk_key_offset(&disk_key);
4584 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4585 btrfs_set_item_key(leaf, &disk_key, slot);
4586 if (slot == 0)
b167fa91 4587 fixup_low_keys(path, &disk_key, 1);
179e29e4 4588 }
5f39d397 4589
dd3cc16b 4590 item = btrfs_item_nr(slot);
5f39d397
CM
4591 btrfs_set_item_size(leaf, item, new_size);
4592 btrfs_mark_buffer_dirty(leaf);
b18c6685 4593
e902baac 4594 if (btrfs_leaf_free_space(leaf) < 0) {
a4f78750 4595 btrfs_print_leaf(leaf);
b18c6685 4596 BUG();
5f39d397 4597 }
b18c6685
CM
4598}
4599
d352ac68 4600/*
8f69dbd2 4601 * make the item pointed to by the path bigger, data_size is the added size.
d352ac68 4602 */
c71dd880 4603void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
6567e837 4604{
6567e837 4605 int slot;
5f39d397
CM
4606 struct extent_buffer *leaf;
4607 struct btrfs_item *item;
6567e837
CM
4608 u32 nritems;
4609 unsigned int data_end;
4610 unsigned int old_data;
4611 unsigned int old_size;
4612 int i;
cfed81a0
CM
4613 struct btrfs_map_token token;
4614
5f39d397 4615 leaf = path->nodes[0];
6567e837 4616
5f39d397 4617 nritems = btrfs_header_nritems(leaf);
8f881e8c 4618 data_end = leaf_data_end(leaf);
6567e837 4619
e902baac 4620 if (btrfs_leaf_free_space(leaf) < data_size) {
a4f78750 4621 btrfs_print_leaf(leaf);
6567e837 4622 BUG();
5f39d397 4623 }
6567e837 4624 slot = path->slots[0];
5f39d397 4625 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
4626
4627 BUG_ON(slot < 0);
3326d1b0 4628 if (slot >= nritems) {
a4f78750 4629 btrfs_print_leaf(leaf);
c71dd880 4630 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
0b246afa 4631 slot, nritems);
290342f6 4632 BUG();
3326d1b0 4633 }
6567e837
CM
4634
4635 /*
4636 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4637 */
4638 /* first correct the data pointers */
c82f823c 4639 btrfs_init_map_token(&token, leaf);
6567e837 4640 for (i = slot; i < nritems; i++) {
5f39d397 4641 u32 ioff;
dd3cc16b 4642 item = btrfs_item_nr(i);
db94535d 4643
cc4c13d5
DS
4644 ioff = btrfs_token_item_offset(&token, item);
4645 btrfs_set_token_item_offset(&token, item, ioff - data_size);
6567e837 4646 }
5f39d397 4647
6567e837 4648 /* shift the data */
3d9ec8c4
NB
4649 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4650 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
6567e837 4651 data_end, old_data - data_end);
5f39d397 4652
6567e837 4653 data_end = old_data;
5f39d397 4654 old_size = btrfs_item_size_nr(leaf, slot);
dd3cc16b 4655 item = btrfs_item_nr(slot);
5f39d397
CM
4656 btrfs_set_item_size(leaf, item, old_size + data_size);
4657 btrfs_mark_buffer_dirty(leaf);
6567e837 4658
e902baac 4659 if (btrfs_leaf_free_space(leaf) < 0) {
a4f78750 4660 btrfs_print_leaf(leaf);
6567e837 4661 BUG();
5f39d397 4662 }
6567e837
CM
4663}
4664
da9ffb24
NB
4665/**
4666 * setup_items_for_insert - Helper called before inserting one or more items
4667 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
4668 * in a function that doesn't call btrfs_search_slot
4669 *
4670 * @root: root we are inserting items to
4671 * @path: points to the leaf/slot where we are going to insert new items
4672 * @cpu_key: array of keys for items to be inserted
4673 * @data_size: size of the body of each item we are going to insert
4674 * @nr: size of @cpu_key/@data_size arrays
74123bd7 4675 */
afe5fea7 4676void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
310712b2 4677 const struct btrfs_key *cpu_key, u32 *data_size,
fc0d82e1 4678 int nr)
be0e5c09 4679{
0b246afa 4680 struct btrfs_fs_info *fs_info = root->fs_info;
5f39d397 4681 struct btrfs_item *item;
9c58309d 4682 int i;
7518a238 4683 u32 nritems;
be0e5c09 4684 unsigned int data_end;
e2fa7227 4685 struct btrfs_disk_key disk_key;
44871b1b
CM
4686 struct extent_buffer *leaf;
4687 int slot;
cfed81a0 4688 struct btrfs_map_token token;
fc0d82e1
NB
4689 u32 total_size;
4690 u32 total_data = 0;
4691
4692 for (i = 0; i < nr; i++)
4693 total_data += data_size[i];
4694 total_size = total_data + (nr * sizeof(struct btrfs_item));
cfed81a0 4695
24cdc847
FM
4696 if (path->slots[0] == 0) {
4697 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
b167fa91 4698 fixup_low_keys(path, &disk_key, 1);
24cdc847
FM
4699 }
4700 btrfs_unlock_up_safe(path, 1);
4701
5f39d397 4702 leaf = path->nodes[0];
44871b1b 4703 slot = path->slots[0];
74123bd7 4704
5f39d397 4705 nritems = btrfs_header_nritems(leaf);
8f881e8c 4706 data_end = leaf_data_end(leaf);
eb60ceac 4707
e902baac 4708 if (btrfs_leaf_free_space(leaf) < total_size) {
a4f78750 4709 btrfs_print_leaf(leaf);
0b246afa 4710 btrfs_crit(fs_info, "not enough freespace need %u have %d",
e902baac 4711 total_size, btrfs_leaf_free_space(leaf));
be0e5c09 4712 BUG();
d4dbff95 4713 }
5f39d397 4714
c82f823c 4715 btrfs_init_map_token(&token, leaf);
be0e5c09 4716 if (slot != nritems) {
5f39d397 4717 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 4718
5f39d397 4719 if (old_data < data_end) {
a4f78750 4720 btrfs_print_leaf(leaf);
7269ddd2
NB
4721 btrfs_crit(fs_info,
4722 "item at slot %d with data offset %u beyond data end of leaf %u",
5d163e0e 4723 slot, old_data, data_end);
290342f6 4724 BUG();
5f39d397 4725 }
be0e5c09
CM
4726 /*
4727 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4728 */
4729 /* first correct the data pointers */
0783fcfc 4730 for (i = slot; i < nritems; i++) {
5f39d397 4731 u32 ioff;
db94535d 4732
62e85577 4733 item = btrfs_item_nr(i);
cc4c13d5
DS
4734 ioff = btrfs_token_item_offset(&token, item);
4735 btrfs_set_token_item_offset(&token, item,
4736 ioff - total_data);
0783fcfc 4737 }
be0e5c09 4738 /* shift the items */
9c58309d 4739 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 4740 btrfs_item_nr_offset(slot),
d6025579 4741 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
4742
4743 /* shift the data */
3d9ec8c4
NB
4744 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4745 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
d6025579 4746 data_end, old_data - data_end);
be0e5c09
CM
4747 data_end = old_data;
4748 }
5f39d397 4749
62e2749e 4750 /* setup the item for the new data */
9c58309d
CM
4751 for (i = 0; i < nr; i++) {
4752 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4753 btrfs_set_item_key(leaf, &disk_key, slot + i);
dd3cc16b 4754 item = btrfs_item_nr(slot + i);
9c58309d 4755 data_end -= data_size[i];
fc0716c2 4756 btrfs_set_token_item_offset(&token, item, data_end);
cc4c13d5 4757 btrfs_set_token_item_size(&token, item, data_size[i]);
9c58309d 4758 }
44871b1b 4759
9c58309d 4760 btrfs_set_header_nritems(leaf, nritems + nr);
b9473439 4761 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 4762
e902baac 4763 if (btrfs_leaf_free_space(leaf) < 0) {
a4f78750 4764 btrfs_print_leaf(leaf);
be0e5c09 4765 BUG();
5f39d397 4766 }
44871b1b
CM
4767}
4768
4769/*
4770 * Given a key and some data, insert items into the tree.
4771 * This does all the path init required, making room in the tree if needed.
4772 */
4773int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4774 struct btrfs_root *root,
4775 struct btrfs_path *path,
310712b2 4776 const struct btrfs_key *cpu_key, u32 *data_size,
44871b1b
CM
4777 int nr)
4778{
44871b1b
CM
4779 int ret = 0;
4780 int slot;
4781 int i;
4782 u32 total_size = 0;
4783 u32 total_data = 0;
4784
4785 for (i = 0; i < nr; i++)
4786 total_data += data_size[i];
4787
4788 total_size = total_data + (nr * sizeof(struct btrfs_item));
4789 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4790 if (ret == 0)
4791 return -EEXIST;
4792 if (ret < 0)
143bede5 4793 return ret;
44871b1b 4794
44871b1b
CM
4795 slot = path->slots[0];
4796 BUG_ON(slot < 0);
4797
fc0d82e1 4798 setup_items_for_insert(root, path, cpu_key, data_size, nr);
143bede5 4799 return 0;
62e2749e
CM
4800}
4801
4802/*
4803 * Given a key and some data, insert an item into the tree.
4804 * This does all the path init required, making room in the tree if needed.
4805 */
310712b2
OS
4806int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4807 const struct btrfs_key *cpu_key, void *data,
4808 u32 data_size)
62e2749e
CM
4809{
4810 int ret = 0;
2c90e5d6 4811 struct btrfs_path *path;
5f39d397
CM
4812 struct extent_buffer *leaf;
4813 unsigned long ptr;
62e2749e 4814
2c90e5d6 4815 path = btrfs_alloc_path();
db5b493a
TI
4816 if (!path)
4817 return -ENOMEM;
2c90e5d6 4818 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 4819 if (!ret) {
5f39d397
CM
4820 leaf = path->nodes[0];
4821 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4822 write_extent_buffer(leaf, data, ptr, data_size);
4823 btrfs_mark_buffer_dirty(leaf);
62e2749e 4824 }
2c90e5d6 4825 btrfs_free_path(path);
aa5d6bed 4826 return ret;
be0e5c09
CM
4827}
4828
74123bd7 4829/*
5de08d7d 4830 * delete the pointer from a given node.
74123bd7 4831 *
d352ac68
CM
4832 * the tree should have been previously balanced so the deletion does not
4833 * empty a node.
74123bd7 4834 */
afe5fea7
TI
4835static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4836 int level, int slot)
be0e5c09 4837{
5f39d397 4838 struct extent_buffer *parent = path->nodes[level];
7518a238 4839 u32 nritems;
f3ea38da 4840 int ret;
be0e5c09 4841
5f39d397 4842 nritems = btrfs_header_nritems(parent);
d397712b 4843 if (slot != nritems - 1) {
bf1d3425
DS
4844 if (level) {
4845 ret = tree_mod_log_insert_move(parent, slot, slot + 1,
a446a979 4846 nritems - slot - 1);
bf1d3425
DS
4847 BUG_ON(ret < 0);
4848 }
5f39d397
CM
4849 memmove_extent_buffer(parent,
4850 btrfs_node_key_ptr_offset(slot),
4851 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
4852 sizeof(struct btrfs_key_ptr) *
4853 (nritems - slot - 1));
57ba86c0 4854 } else if (level) {
e09c2efe
DS
4855 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4856 GFP_NOFS);
57ba86c0 4857 BUG_ON(ret < 0);
bb803951 4858 }
f3ea38da 4859
7518a238 4860 nritems--;
5f39d397 4861 btrfs_set_header_nritems(parent, nritems);
7518a238 4862 if (nritems == 0 && parent == root->node) {
5f39d397 4863 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 4864 /* just turn the root into a leaf and break */
5f39d397 4865 btrfs_set_header_level(root->node, 0);
bb803951 4866 } else if (slot == 0) {
5f39d397
CM
4867 struct btrfs_disk_key disk_key;
4868
4869 btrfs_node_key(parent, &disk_key, 0);
b167fa91 4870 fixup_low_keys(path, &disk_key, level + 1);
be0e5c09 4871 }
d6025579 4872 btrfs_mark_buffer_dirty(parent);
be0e5c09
CM
4873}
4874
323ac95b
CM
4875/*
4876 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 4877 * path->nodes[1].
323ac95b
CM
4878 *
4879 * This deletes the pointer in path->nodes[1] and frees the leaf
4880 * block extent. zero is returned if it all worked out, < 0 otherwise.
4881 *
4882 * The path must have already been setup for deleting the leaf, including
4883 * all the proper balancing. path->nodes[1] must be locked.
4884 */
143bede5
JM
4885static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4886 struct btrfs_root *root,
4887 struct btrfs_path *path,
4888 struct extent_buffer *leaf)
323ac95b 4889{
5d4f98a2 4890 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
afe5fea7 4891 del_ptr(root, path, 1, path->slots[1]);
323ac95b 4892
4d081c41
CM
4893 /*
4894 * btrfs_free_extent is expensive, we want to make sure we
4895 * aren't holding any locks when we call it
4896 */
4897 btrfs_unlock_up_safe(path, 0);
4898
f0486c68
YZ
4899 root_sub_used(root, leaf->len);
4900
67439dad 4901 atomic_inc(&leaf->refs);
5581a51a 4902 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3083ee2e 4903 free_extent_buffer_stale(leaf);
323ac95b 4904}
74123bd7
CM
4905/*
4906 * delete the item at the leaf level in path. If that empties
4907 * the leaf, remove it from the tree
4908 */
85e21bac
CM
4909int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4910 struct btrfs_path *path, int slot, int nr)
be0e5c09 4911{
0b246afa 4912 struct btrfs_fs_info *fs_info = root->fs_info;
5f39d397
CM
4913 struct extent_buffer *leaf;
4914 struct btrfs_item *item;
ce0eac2a
AM
4915 u32 last_off;
4916 u32 dsize = 0;
aa5d6bed
CM
4917 int ret = 0;
4918 int wret;
85e21bac 4919 int i;
7518a238 4920 u32 nritems;
be0e5c09 4921
5f39d397 4922 leaf = path->nodes[0];
85e21bac
CM
4923 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4924
4925 for (i = 0; i < nr; i++)
4926 dsize += btrfs_item_size_nr(leaf, slot + i);
4927
5f39d397 4928 nritems = btrfs_header_nritems(leaf);
be0e5c09 4929
85e21bac 4930 if (slot + nr != nritems) {
8f881e8c 4931 int data_end = leaf_data_end(leaf);
c82f823c 4932 struct btrfs_map_token token;
5f39d397 4933
3d9ec8c4 4934 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
d6025579 4935 data_end + dsize,
3d9ec8c4 4936 BTRFS_LEAF_DATA_OFFSET + data_end,
85e21bac 4937 last_off - data_end);
5f39d397 4938
c82f823c 4939 btrfs_init_map_token(&token, leaf);
85e21bac 4940 for (i = slot + nr; i < nritems; i++) {
5f39d397 4941 u32 ioff;
db94535d 4942
dd3cc16b 4943 item = btrfs_item_nr(i);
cc4c13d5
DS
4944 ioff = btrfs_token_item_offset(&token, item);
4945 btrfs_set_token_item_offset(&token, item, ioff + dsize);
0783fcfc 4946 }
db94535d 4947
5f39d397 4948 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 4949 btrfs_item_nr_offset(slot + nr),
d6025579 4950 sizeof(struct btrfs_item) *
85e21bac 4951 (nritems - slot - nr));
be0e5c09 4952 }
85e21bac
CM
4953 btrfs_set_header_nritems(leaf, nritems - nr);
4954 nritems -= nr;
5f39d397 4955
74123bd7 4956 /* delete the leaf if we've emptied it */
7518a238 4957 if (nritems == 0) {
5f39d397
CM
4958 if (leaf == root->node) {
4959 btrfs_set_header_level(leaf, 0);
9a8dd150 4960 } else {
6a884d7d 4961 btrfs_clean_tree_block(leaf);
143bede5 4962 btrfs_del_leaf(trans, root, path, leaf);
9a8dd150 4963 }
be0e5c09 4964 } else {
7518a238 4965 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 4966 if (slot == 0) {
5f39d397
CM
4967 struct btrfs_disk_key disk_key;
4968
4969 btrfs_item_key(leaf, &disk_key, 0);
b167fa91 4970 fixup_low_keys(path, &disk_key, 1);
aa5d6bed 4971 }
aa5d6bed 4972
74123bd7 4973 /* delete the leaf if it is mostly empty */
0b246afa 4974 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
be0e5c09
CM
4975 /* push_leaf_left fixes the path.
4976 * make sure the path still points to our leaf
4977 * for possible call to del_ptr below
4978 */
4920c9ac 4979 slot = path->slots[1];
67439dad 4980 atomic_inc(&leaf->refs);
5f39d397 4981
99d8f83c
CM
4982 wret = push_leaf_left(trans, root, path, 1, 1,
4983 1, (u32)-1);
54aa1f4d 4984 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 4985 ret = wret;
5f39d397
CM
4986
4987 if (path->nodes[0] == leaf &&
4988 btrfs_header_nritems(leaf)) {
99d8f83c
CM
4989 wret = push_leaf_right(trans, root, path, 1,
4990 1, 1, 0);
54aa1f4d 4991 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
4992 ret = wret;
4993 }
5f39d397
CM
4994
4995 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 4996 path->slots[1] = slot;
143bede5 4997 btrfs_del_leaf(trans, root, path, leaf);
5f39d397 4998 free_extent_buffer(leaf);
143bede5 4999 ret = 0;
5de08d7d 5000 } else {
925baedd
CM
5001 /* if we're still in the path, make sure
5002 * we're dirty. Otherwise, one of the
5003 * push_leaf functions must have already
5004 * dirtied this buffer
5005 */
5006 if (path->nodes[0] == leaf)
5007 btrfs_mark_buffer_dirty(leaf);
5f39d397 5008 free_extent_buffer(leaf);
be0e5c09 5009 }
d5719762 5010 } else {
5f39d397 5011 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
5012 }
5013 }
aa5d6bed 5014 return ret;
be0e5c09
CM
5015}
5016
7bb86316 5017/*
925baedd 5018 * search the tree again to find a leaf with lesser keys
7bb86316
CM
5019 * returns 0 if it found something or 1 if there are no lesser leaves.
5020 * returns < 0 on io errors.
d352ac68
CM
5021 *
5022 * This may release the path, and so you may lose any locks held at the
5023 * time you call it.
7bb86316 5024 */
16e7549f 5025int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
7bb86316 5026{
925baedd
CM
5027 struct btrfs_key key;
5028 struct btrfs_disk_key found_key;
5029 int ret;
7bb86316 5030
925baedd 5031 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 5032
e8b0d724 5033 if (key.offset > 0) {
925baedd 5034 key.offset--;
e8b0d724 5035 } else if (key.type > 0) {
925baedd 5036 key.type--;
e8b0d724
FDBM
5037 key.offset = (u64)-1;
5038 } else if (key.objectid > 0) {
925baedd 5039 key.objectid--;
e8b0d724
FDBM
5040 key.type = (u8)-1;
5041 key.offset = (u64)-1;
5042 } else {
925baedd 5043 return 1;
e8b0d724 5044 }
7bb86316 5045
b3b4aa74 5046 btrfs_release_path(path);
925baedd
CM
5047 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5048 if (ret < 0)
5049 return ret;
5050 btrfs_item_key(path->nodes[0], &found_key, 0);
5051 ret = comp_keys(&found_key, &key);
337c6f68
FM
5052 /*
5053 * We might have had an item with the previous key in the tree right
5054 * before we released our path. And after we released our path, that
5055 * item might have been pushed to the first slot (0) of the leaf we
5056 * were holding due to a tree balance. Alternatively, an item with the
5057 * previous key can exist as the only element of a leaf (big fat item).
5058 * Therefore account for these 2 cases, so that our callers (like
5059 * btrfs_previous_item) don't miss an existing item with a key matching
5060 * the previous key we computed above.
5061 */
5062 if (ret <= 0)
925baedd
CM
5063 return 0;
5064 return 1;
7bb86316
CM
5065}
5066
3f157a2f
CM
5067/*
5068 * A helper function to walk down the tree starting at min_key, and looking
de78b51a
ES
5069 * for nodes or leaves that are have a minimum transaction id.
5070 * This is used by the btree defrag code, and tree logging
3f157a2f
CM
5071 *
5072 * This does not cow, but it does stuff the starting key it finds back
5073 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5074 * key and get a writable path.
5075 *
3f157a2f
CM
5076 * This honors path->lowest_level to prevent descent past a given level
5077 * of the tree.
5078 *
d352ac68
CM
5079 * min_trans indicates the oldest transaction that you are interested
5080 * in walking through. Any nodes or leaves older than min_trans are
5081 * skipped over (without reading them).
5082 *
3f157a2f
CM
5083 * returns zero if something useful was found, < 0 on error and 1 if there
5084 * was nothing in the tree that matched the search criteria.
5085 */
5086int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
de78b51a 5087 struct btrfs_path *path,
3f157a2f
CM
5088 u64 min_trans)
5089{
5090 struct extent_buffer *cur;
5091 struct btrfs_key found_key;
5092 int slot;
9652480b 5093 int sret;
3f157a2f
CM
5094 u32 nritems;
5095 int level;
5096 int ret = 1;
f98de9b9 5097 int keep_locks = path->keep_locks;
3f157a2f 5098
f98de9b9 5099 path->keep_locks = 1;
3f157a2f 5100again:
bd681513 5101 cur = btrfs_read_lock_root_node(root);
3f157a2f 5102 level = btrfs_header_level(cur);
e02119d5 5103 WARN_ON(path->nodes[level]);
3f157a2f 5104 path->nodes[level] = cur;
bd681513 5105 path->locks[level] = BTRFS_READ_LOCK;
3f157a2f
CM
5106
5107 if (btrfs_header_generation(cur) < min_trans) {
5108 ret = 1;
5109 goto out;
5110 }
d397712b 5111 while (1) {
3f157a2f
CM
5112 nritems = btrfs_header_nritems(cur);
5113 level = btrfs_header_level(cur);
e3b83361 5114 sret = btrfs_bin_search(cur, min_key, &slot);
cbca7d59
FM
5115 if (sret < 0) {
5116 ret = sret;
5117 goto out;
5118 }
3f157a2f 5119
323ac95b
CM
5120 /* at the lowest level, we're done, setup the path and exit */
5121 if (level == path->lowest_level) {
e02119d5
CM
5122 if (slot >= nritems)
5123 goto find_next_key;
3f157a2f
CM
5124 ret = 0;
5125 path->slots[level] = slot;
5126 btrfs_item_key_to_cpu(cur, &found_key, slot);
5127 goto out;
5128 }
9652480b
Y
5129 if (sret && slot > 0)
5130 slot--;
3f157a2f 5131 /*
de78b51a 5132 * check this node pointer against the min_trans parameters.
260db43c 5133 * If it is too old, skip to the next one.
3f157a2f 5134 */
d397712b 5135 while (slot < nritems) {
3f157a2f 5136 u64 gen;
e02119d5 5137
3f157a2f
CM
5138 gen = btrfs_node_ptr_generation(cur, slot);
5139 if (gen < min_trans) {
5140 slot++;
5141 continue;
5142 }
de78b51a 5143 break;
3f157a2f 5144 }
e02119d5 5145find_next_key:
3f157a2f
CM
5146 /*
5147 * we didn't find a candidate key in this node, walk forward
5148 * and find another one
5149 */
5150 if (slot >= nritems) {
e02119d5
CM
5151 path->slots[level] = slot;
5152 sret = btrfs_find_next_key(root, path, min_key, level,
de78b51a 5153 min_trans);
e02119d5 5154 if (sret == 0) {
b3b4aa74 5155 btrfs_release_path(path);
3f157a2f
CM
5156 goto again;
5157 } else {
5158 goto out;
5159 }
5160 }
5161 /* save our key for returning back */
5162 btrfs_node_key_to_cpu(cur, &found_key, slot);
5163 path->slots[level] = slot;
5164 if (level == path->lowest_level) {
5165 ret = 0;
3f157a2f
CM
5166 goto out;
5167 }
4b231ae4 5168 cur = btrfs_read_node_slot(cur, slot);
fb770ae4
LB
5169 if (IS_ERR(cur)) {
5170 ret = PTR_ERR(cur);
5171 goto out;
5172 }
3f157a2f 5173
bd681513 5174 btrfs_tree_read_lock(cur);
b4ce94de 5175
bd681513 5176 path->locks[level - 1] = BTRFS_READ_LOCK;
3f157a2f 5177 path->nodes[level - 1] = cur;
f7c79f30 5178 unlock_up(path, level, 1, 0, NULL);
3f157a2f
CM
5179 }
5180out:
f98de9b9
FM
5181 path->keep_locks = keep_locks;
5182 if (ret == 0) {
5183 btrfs_unlock_up_safe(path, path->lowest_level + 1);
3f157a2f 5184 memcpy(min_key, &found_key, sizeof(found_key));
f98de9b9 5185 }
3f157a2f
CM
5186 return ret;
5187}
5188
5189/*
5190 * this is similar to btrfs_next_leaf, but does not try to preserve
5191 * and fixup the path. It looks for and returns the next key in the
de78b51a 5192 * tree based on the current path and the min_trans parameters.
3f157a2f
CM
5193 *
5194 * 0 is returned if another key is found, < 0 if there are any errors
5195 * and 1 is returned if there are no higher keys in the tree
5196 *
5197 * path->keep_locks should be set to 1 on the search made before
5198 * calling this function.
5199 */
e7a84565 5200int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
de78b51a 5201 struct btrfs_key *key, int level, u64 min_trans)
e7a84565 5202{
e7a84565
CM
5203 int slot;
5204 struct extent_buffer *c;
5205
6a9fb468 5206 WARN_ON(!path->keep_locks && !path->skip_locking);
d397712b 5207 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
5208 if (!path->nodes[level])
5209 return 1;
5210
5211 slot = path->slots[level] + 1;
5212 c = path->nodes[level];
3f157a2f 5213next:
e7a84565 5214 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
5215 int ret;
5216 int orig_lowest;
5217 struct btrfs_key cur_key;
5218 if (level + 1 >= BTRFS_MAX_LEVEL ||
5219 !path->nodes[level + 1])
e7a84565 5220 return 1;
33c66f43 5221
6a9fb468 5222 if (path->locks[level + 1] || path->skip_locking) {
33c66f43
YZ
5223 level++;
5224 continue;
5225 }
5226
5227 slot = btrfs_header_nritems(c) - 1;
5228 if (level == 0)
5229 btrfs_item_key_to_cpu(c, &cur_key, slot);
5230 else
5231 btrfs_node_key_to_cpu(c, &cur_key, slot);
5232
5233 orig_lowest = path->lowest_level;
b3b4aa74 5234 btrfs_release_path(path);
33c66f43
YZ
5235 path->lowest_level = level;
5236 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5237 0, 0);
5238 path->lowest_level = orig_lowest;
5239 if (ret < 0)
5240 return ret;
5241
5242 c = path->nodes[level];
5243 slot = path->slots[level];
5244 if (ret == 0)
5245 slot++;
5246 goto next;
e7a84565 5247 }
33c66f43 5248
e7a84565
CM
5249 if (level == 0)
5250 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f 5251 else {
3f157a2f
CM
5252 u64 gen = btrfs_node_ptr_generation(c, slot);
5253
3f157a2f
CM
5254 if (gen < min_trans) {
5255 slot++;
5256 goto next;
5257 }
e7a84565 5258 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 5259 }
e7a84565
CM
5260 return 0;
5261 }
5262 return 1;
5263}
5264
97571fd0 5265/*
925baedd 5266 * search the tree again to find a leaf with greater keys
0f70abe2
CM
5267 * returns 0 if it found something or 1 if there are no greater leaves.
5268 * returns < 0 on io errors.
97571fd0 5269 */
234b63a0 5270int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3d7806ec
JS
5271{
5272 return btrfs_next_old_leaf(root, path, 0);
5273}
5274
5275int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5276 u64 time_seq)
d97e63b6
CM
5277{
5278 int slot;
8e73f275 5279 int level;
5f39d397 5280 struct extent_buffer *c;
8e73f275 5281 struct extent_buffer *next;
925baedd
CM
5282 struct btrfs_key key;
5283 u32 nritems;
5284 int ret;
0e46318d 5285 int i;
925baedd
CM
5286
5287 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 5288 if (nritems == 0)
925baedd 5289 return 1;
925baedd 5290
8e73f275
CM
5291 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5292again:
5293 level = 1;
5294 next = NULL;
b3b4aa74 5295 btrfs_release_path(path);
8e73f275 5296
a2135011 5297 path->keep_locks = 1;
8e73f275 5298
3d7806ec
JS
5299 if (time_seq)
5300 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5301 else
5302 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
925baedd
CM
5303 path->keep_locks = 0;
5304
5305 if (ret < 0)
5306 return ret;
5307
a2135011 5308 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
5309 /*
5310 * by releasing the path above we dropped all our locks. A balance
5311 * could have added more items next to the key that used to be
5312 * at the very end of the block. So, check again here and
5313 * advance the path if there are now more items available.
5314 */
a2135011 5315 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
5316 if (ret == 0)
5317 path->slots[0]++;
8e73f275 5318 ret = 0;
925baedd
CM
5319 goto done;
5320 }
0b43e04f
LB
5321 /*
5322 * So the above check misses one case:
5323 * - after releasing the path above, someone has removed the item that
5324 * used to be at the very end of the block, and balance between leafs
5325 * gets another one with bigger key.offset to replace it.
5326 *
5327 * This one should be returned as well, or we can get leaf corruption
5328 * later(esp. in __btrfs_drop_extents()).
5329 *
5330 * And a bit more explanation about this check,
5331 * with ret > 0, the key isn't found, the path points to the slot
5332 * where it should be inserted, so the path->slots[0] item must be the
5333 * bigger one.
5334 */
5335 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5336 ret = 0;
5337 goto done;
5338 }
d97e63b6 5339
d397712b 5340 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
5341 if (!path->nodes[level]) {
5342 ret = 1;
5343 goto done;
5344 }
5f39d397 5345
d97e63b6
CM
5346 slot = path->slots[level] + 1;
5347 c = path->nodes[level];
5f39d397 5348 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 5349 level++;
8e73f275
CM
5350 if (level == BTRFS_MAX_LEVEL) {
5351 ret = 1;
5352 goto done;
5353 }
d97e63b6
CM
5354 continue;
5355 }
5f39d397 5356
0e46318d
JB
5357
5358 /*
5359 * Our current level is where we're going to start from, and to
5360 * make sure lockdep doesn't complain we need to drop our locks
5361 * and nodes from 0 to our current level.
5362 */
5363 for (i = 0; i < level; i++) {
5364 if (path->locks[level]) {
5365 btrfs_tree_read_unlock(path->nodes[i]);
5366 path->locks[i] = 0;
5367 }
5368 free_extent_buffer(path->nodes[i]);
5369 path->nodes[i] = NULL;
925baedd 5370 }
5f39d397 5371
8e73f275 5372 next = c;
d07b8528 5373 ret = read_block_for_search(root, path, &next, level,
cda79c54 5374 slot, &key);
8e73f275
CM
5375 if (ret == -EAGAIN)
5376 goto again;
5f39d397 5377
76a05b35 5378 if (ret < 0) {
b3b4aa74 5379 btrfs_release_path(path);
76a05b35
CM
5380 goto done;
5381 }
5382
5cd57b2c 5383 if (!path->skip_locking) {
bd681513 5384 ret = btrfs_try_tree_read_lock(next);
d42244a0
JS
5385 if (!ret && time_seq) {
5386 /*
5387 * If we don't get the lock, we may be racing
5388 * with push_leaf_left, holding that lock while
5389 * itself waiting for the leaf we've currently
5390 * locked. To solve this situation, we give up
5391 * on our lock and cycle.
5392 */
cf538830 5393 free_extent_buffer(next);
d42244a0
JS
5394 btrfs_release_path(path);
5395 cond_resched();
5396 goto again;
5397 }
0e46318d
JB
5398 if (!ret)
5399 btrfs_tree_read_lock(next);
5cd57b2c 5400 }
d97e63b6
CM
5401 break;
5402 }
5403 path->slots[level] = slot;
d397712b 5404 while (1) {
d97e63b6 5405 level--;
d97e63b6
CM
5406 path->nodes[level] = next;
5407 path->slots[level] = 0;
a74a4b97 5408 if (!path->skip_locking)
ffeb03cf 5409 path->locks[level] = BTRFS_READ_LOCK;
d97e63b6
CM
5410 if (!level)
5411 break;
b4ce94de 5412
d07b8528 5413 ret = read_block_for_search(root, path, &next, level,
cda79c54 5414 0, &key);
8e73f275
CM
5415 if (ret == -EAGAIN)
5416 goto again;
5417
76a05b35 5418 if (ret < 0) {
b3b4aa74 5419 btrfs_release_path(path);
76a05b35
CM
5420 goto done;
5421 }
5422
ffeb03cf 5423 if (!path->skip_locking)
0e46318d 5424 btrfs_tree_read_lock(next);
d97e63b6 5425 }
8e73f275 5426 ret = 0;
925baedd 5427done:
f7c79f30 5428 unlock_up(path, 0, 1, 0, NULL);
8e73f275
CM
5429
5430 return ret;
d97e63b6 5431}
0b86a832 5432
3f157a2f
CM
5433/*
5434 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5435 * searching until it gets past min_objectid or finds an item of 'type'
5436 *
5437 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5438 */
0b86a832
CM
5439int btrfs_previous_item(struct btrfs_root *root,
5440 struct btrfs_path *path, u64 min_objectid,
5441 int type)
5442{
5443 struct btrfs_key found_key;
5444 struct extent_buffer *leaf;
e02119d5 5445 u32 nritems;
0b86a832
CM
5446 int ret;
5447
d397712b 5448 while (1) {
0b86a832
CM
5449 if (path->slots[0] == 0) {
5450 ret = btrfs_prev_leaf(root, path);
5451 if (ret != 0)
5452 return ret;
5453 } else {
5454 path->slots[0]--;
5455 }
5456 leaf = path->nodes[0];
e02119d5
CM
5457 nritems = btrfs_header_nritems(leaf);
5458 if (nritems == 0)
5459 return 1;
5460 if (path->slots[0] == nritems)
5461 path->slots[0]--;
5462
0b86a832 5463 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
5464 if (found_key.objectid < min_objectid)
5465 break;
0a4eefbb
YZ
5466 if (found_key.type == type)
5467 return 0;
e02119d5
CM
5468 if (found_key.objectid == min_objectid &&
5469 found_key.type < type)
5470 break;
0b86a832
CM
5471 }
5472 return 1;
5473}
ade2e0b3
WS
5474
5475/*
5476 * search in extent tree to find a previous Metadata/Data extent item with
5477 * min objecitd.
5478 *
5479 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5480 */
5481int btrfs_previous_extent_item(struct btrfs_root *root,
5482 struct btrfs_path *path, u64 min_objectid)
5483{
5484 struct btrfs_key found_key;
5485 struct extent_buffer *leaf;
5486 u32 nritems;
5487 int ret;
5488
5489 while (1) {
5490 if (path->slots[0] == 0) {
ade2e0b3
WS
5491 ret = btrfs_prev_leaf(root, path);
5492 if (ret != 0)
5493 return ret;
5494 } else {
5495 path->slots[0]--;
5496 }
5497 leaf = path->nodes[0];
5498 nritems = btrfs_header_nritems(leaf);
5499 if (nritems == 0)
5500 return 1;
5501 if (path->slots[0] == nritems)
5502 path->slots[0]--;
5503
5504 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5505 if (found_key.objectid < min_objectid)
5506 break;
5507 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5508 found_key.type == BTRFS_METADATA_ITEM_KEY)
5509 return 0;
5510 if (found_key.objectid == min_objectid &&
5511 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5512 break;
5513 }
5514 return 1;
5515}
This page took 1.847609 seconds and 4 git commands to generate.