2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-btree-internal.h"
8 #include "dm-space-map.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
12 #include <linux/device-mapper.h>
14 #define DM_MSG_PREFIX "btree"
16 /*----------------------------------------------------------------
18 *--------------------------------------------------------------*/
19 static void memcpy_disk(void *dest, const void *src, size_t len)
20 __dm_written_to_disk(src)
22 memcpy(dest, src, len);
23 __dm_unbless_for_disk(src);
26 static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
27 unsigned index, void *elt)
28 __dm_written_to_disk(elt)
31 memmove(base + (elt_size * (index + 1)),
32 base + (elt_size * index),
33 (nr_elts - index) * elt_size);
35 memcpy_disk(base + (elt_size * index), elt, elt_size);
38 /*----------------------------------------------------------------*/
40 /* makes the assumption that no two keys are the same. */
41 static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
46 int mid = lo + ((hi - lo) / 2);
47 uint64_t mid_key = le64_to_cpu(n->keys[mid]);
58 return want_hi ? hi : lo;
61 int lower_bound(struct btree_node *n, uint64_t key)
63 return bsearch(n, key, 0);
66 static int upper_bound(struct btree_node *n, uint64_t key)
68 return bsearch(n, key, 1);
71 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
72 struct dm_btree_value_type *vt)
75 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
77 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
78 for (i = 0; i < nr_entries; i++)
79 dm_tm_inc(tm, value64(n, i));
81 for (i = 0; i < nr_entries; i++)
82 vt->inc(vt->context, value_ptr(n, i));
85 static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
86 uint64_t key, void *value)
87 __dm_written_to_disk(value)
89 uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
90 __le64 key_le = cpu_to_le64(key);
92 if (index > nr_entries ||
93 index >= le32_to_cpu(node->header.max_entries)) {
94 DMERR("too many entries in btree node for insert");
95 __dm_unbless_for_disk(value);
99 __dm_bless_for_disk(&key_le);
101 array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
102 array_insert(value_base(node), value_size, nr_entries, index, value);
103 node->header.nr_entries = cpu_to_le32(nr_entries + 1);
108 /*----------------------------------------------------------------*/
111 * We want 3n entries (for some n). This works more nicely for repeated
112 * insert remove loops than (2n + 1).
114 static uint32_t calc_max_entries(size_t value_size, size_t block_size)
117 size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
119 block_size -= sizeof(struct node_header);
120 total = block_size / elt_size;
121 n = total / 3; /* rounds down */
126 int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
130 struct btree_node *n;
132 uint32_t max_entries;
134 r = new_block(info, &b);
138 block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
139 max_entries = calc_max_entries(info->value_type.size, block_size);
141 n = dm_block_data(b);
142 memset(n, 0, block_size);
143 n->header.flags = cpu_to_le32(LEAF_NODE);
144 n->header.nr_entries = cpu_to_le32(0);
145 n->header.max_entries = cpu_to_le32(max_entries);
146 n->header.value_size = cpu_to_le32(info->value_type.size);
148 *root = dm_block_location(b);
149 unlock_block(info, b);
153 EXPORT_SYMBOL_GPL(dm_btree_empty);
155 /*----------------------------------------------------------------*/
158 * Deletion uses a recursive algorithm, since we have limited stack space
159 * we explicitly manage our own stack on the heap.
161 #define MAX_SPINE_DEPTH 64
164 struct btree_node *n;
166 unsigned nr_children;
167 unsigned current_child;
171 struct dm_btree_info *info;
172 struct dm_transaction_manager *tm;
174 struct frame spine[MAX_SPINE_DEPTH];
177 static int top_frame(struct del_stack *s, struct frame **f)
180 DMERR("btree deletion stack empty");
184 *f = s->spine + s->top;
189 static int unprocessed_frames(struct del_stack *s)
194 static void prefetch_children(struct del_stack *s, struct frame *f)
197 struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
199 for (i = 0; i < f->nr_children; i++)
200 dm_bm_prefetch(bm, value64(f->n, i));
203 static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
205 return f->level < (info->levels - 1);
208 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
213 if (s->top >= MAX_SPINE_DEPTH - 1) {
214 DMERR("btree deletion stack out of memory");
218 r = dm_tm_ref(s->tm, b, &ref_count);
224 * This is a shared node, so we can just decrement it's
225 * reference counter and leave the children.
231 struct frame *f = s->spine + ++s->top;
233 r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
239 f->n = dm_block_data(f->b);
241 f->nr_children = le32_to_cpu(f->n->header.nr_entries);
242 f->current_child = 0;
244 flags = le32_to_cpu(f->n->header.flags);
245 if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
246 prefetch_children(s, f);
252 static void pop_frame(struct del_stack *s)
254 struct frame *f = s->spine + s->top--;
256 dm_tm_dec(s->tm, dm_block_location(f->b));
257 dm_tm_unlock(s->tm, f->b);
260 static void unlock_all_frames(struct del_stack *s)
264 while (unprocessed_frames(s)) {
265 f = s->spine + s->top--;
266 dm_tm_unlock(s->tm, f->b);
270 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
275 s = kmalloc(sizeof(*s), GFP_NOIO);
282 r = push_frame(s, root, 0);
286 while (unprocessed_frames(s)) {
291 r = top_frame(s, &f);
295 if (f->current_child >= f->nr_children) {
300 flags = le32_to_cpu(f->n->header.flags);
301 if (flags & INTERNAL_NODE) {
302 b = value64(f->n, f->current_child);
304 r = push_frame(s, b, f->level);
308 } else if (is_internal_level(info, f)) {
309 b = value64(f->n, f->current_child);
311 r = push_frame(s, b, f->level + 1);
316 if (info->value_type.dec) {
319 for (i = 0; i < f->nr_children; i++)
320 info->value_type.dec(info->value_type.context,
328 /* cleanup all frames of del_stack */
329 unlock_all_frames(s);
335 EXPORT_SYMBOL_GPL(dm_btree_del);
337 /*----------------------------------------------------------------*/
339 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
340 int (*search_fn)(struct btree_node *, uint64_t),
341 uint64_t *result_key, void *v, size_t value_size)
344 uint32_t flags, nr_entries;
347 r = ro_step(s, block);
351 i = search_fn(ro_node(s), key);
353 flags = le32_to_cpu(ro_node(s)->header.flags);
354 nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
355 if (i < 0 || i >= nr_entries)
358 if (flags & INTERNAL_NODE)
359 block = value64(ro_node(s), i);
361 } while (!(flags & LEAF_NODE));
363 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
364 memcpy(v, value_ptr(ro_node(s), i), value_size);
369 int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
370 uint64_t *keys, void *value_le)
372 unsigned level, last_level = info->levels - 1;
375 __le64 internal_value_le;
376 struct ro_spine spine;
378 init_ro_spine(&spine, info);
379 for (level = 0; level < info->levels; level++) {
383 if (level == last_level) {
385 size = info->value_type.size;
388 value_p = &internal_value_le;
389 size = sizeof(uint64_t);
392 r = btree_lookup_raw(&spine, root, keys[level],
397 if (rkey != keys[level]) {
398 exit_ro_spine(&spine);
402 exit_ro_spine(&spine);
406 root = le64_to_cpu(internal_value_le);
408 exit_ro_spine(&spine);
412 EXPORT_SYMBOL_GPL(dm_btree_lookup);
414 static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
415 uint64_t key, uint64_t *rkey, void *value_le)
418 uint32_t flags, nr_entries;
419 struct dm_block *node;
420 struct btree_node *n;
422 r = bn_read_lock(info, root, &node);
426 n = dm_block_data(node);
427 flags = le32_to_cpu(n->header.flags);
428 nr_entries = le32_to_cpu(n->header.nr_entries);
430 if (flags & INTERNAL_NODE) {
431 i = lower_bound(n, key);
432 if (i < 0 || i >= nr_entries) {
437 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
438 if (r == -ENODATA && i < (nr_entries - 1)) {
440 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
444 i = upper_bound(n, key);
445 if (i < 0 || i >= nr_entries) {
450 *rkey = le64_to_cpu(n->keys[i]);
451 memcpy(value_le, value_ptr(n, i), info->value_type.size);
454 dm_tm_unlock(info->tm, node);
458 int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
459 uint64_t *keys, uint64_t *rkey, void *value_le)
463 __le64 internal_value_le;
464 struct ro_spine spine;
466 init_ro_spine(&spine, info);
467 for (level = 0; level < info->levels - 1u; level++) {
468 r = btree_lookup_raw(&spine, root, keys[level],
470 &internal_value_le, sizeof(uint64_t));
474 if (*rkey != keys[level]) {
479 root = le64_to_cpu(internal_value_le);
482 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
484 exit_ro_spine(&spine);
488 EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
491 * Splits a node by creating a sibling node and shifting half the nodes
492 * contents across. Assumes there is a parent node, and it has room for
514 * +---------+ +-------+
518 * Where A* is a shadow of A.
520 static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
525 unsigned nr_left, nr_right;
526 struct dm_block *left, *right, *parent;
527 struct btree_node *ln, *rn, *pn;
530 left = shadow_current(s);
532 r = new_block(s->info, &right);
536 ln = dm_block_data(left);
537 rn = dm_block_data(right);
539 nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
540 nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
542 ln->header.nr_entries = cpu_to_le32(nr_left);
544 rn->header.flags = ln->header.flags;
545 rn->header.nr_entries = cpu_to_le32(nr_right);
546 rn->header.max_entries = ln->header.max_entries;
547 rn->header.value_size = ln->header.value_size;
548 memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
550 size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
551 sizeof(uint64_t) : s->info->value_type.size;
552 memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
556 * Patch up the parent
558 parent = shadow_parent(s);
560 pn = dm_block_data(parent);
561 location = cpu_to_le64(dm_block_location(left));
562 __dm_bless_for_disk(&location);
563 memcpy_disk(value_ptr(pn, parent_index),
564 &location, sizeof(__le64));
566 location = cpu_to_le64(dm_block_location(right));
567 __dm_bless_for_disk(&location);
569 r = insert_at(sizeof(__le64), pn, parent_index + 1,
570 le64_to_cpu(rn->keys[0]), &location);
572 unlock_block(s->info, right);
576 if (key < le64_to_cpu(rn->keys[0])) {
577 unlock_block(s->info, right);
580 unlock_block(s->info, left);
588 * Splits a node by creating two new children beneath the given node.
604 * +-------+ +-------+
605 * | B +++ | | C +++ |
606 * +-------+ +-------+
608 static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
612 unsigned nr_left, nr_right;
613 struct dm_block *left, *right, *new_parent;
614 struct btree_node *pn, *ln, *rn;
617 new_parent = shadow_current(s);
619 r = new_block(s->info, &left);
623 r = new_block(s->info, &right);
625 unlock_block(s->info, left);
629 pn = dm_block_data(new_parent);
630 ln = dm_block_data(left);
631 rn = dm_block_data(right);
633 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
634 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
636 ln->header.flags = pn->header.flags;
637 ln->header.nr_entries = cpu_to_le32(nr_left);
638 ln->header.max_entries = pn->header.max_entries;
639 ln->header.value_size = pn->header.value_size;
641 rn->header.flags = pn->header.flags;
642 rn->header.nr_entries = cpu_to_le32(nr_right);
643 rn->header.max_entries = pn->header.max_entries;
644 rn->header.value_size = pn->header.value_size;
646 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
647 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
649 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
650 sizeof(__le64) : s->info->value_type.size;
651 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
652 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
655 /* new_parent should just point to l and r now */
656 pn->header.flags = cpu_to_le32(INTERNAL_NODE);
657 pn->header.nr_entries = cpu_to_le32(2);
658 pn->header.max_entries = cpu_to_le32(
659 calc_max_entries(sizeof(__le64),
661 dm_tm_get_bm(s->info->tm))));
662 pn->header.value_size = cpu_to_le32(sizeof(__le64));
664 val = cpu_to_le64(dm_block_location(left));
665 __dm_bless_for_disk(&val);
666 pn->keys[0] = ln->keys[0];
667 memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
669 val = cpu_to_le64(dm_block_location(right));
670 __dm_bless_for_disk(&val);
671 pn->keys[1] = rn->keys[0];
672 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
675 * rejig the spine. This is ugly, since it knows too
676 * much about the spine
678 if (s->nodes[0] != new_parent) {
679 unlock_block(s->info, s->nodes[0]);
680 s->nodes[0] = new_parent;
682 if (key < le64_to_cpu(rn->keys[0])) {
683 unlock_block(s->info, right);
686 unlock_block(s->info, left);
694 static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
695 struct dm_btree_value_type *vt,
696 uint64_t key, unsigned *index)
698 int r, i = *index, top = 1;
699 struct btree_node *node;
702 r = shadow_step(s, root, vt);
706 node = dm_block_data(shadow_current(s));
709 * We have to patch up the parent node, ugly, but I don't
710 * see a way to do this automatically as part of the spine
713 if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
714 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
716 __dm_bless_for_disk(&location);
717 memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
718 &location, sizeof(__le64));
721 node = dm_block_data(shadow_current(s));
723 if (node->header.nr_entries == node->header.max_entries) {
725 r = btree_split_beneath(s, key);
727 r = btree_split_sibling(s, i, key);
733 node = dm_block_data(shadow_current(s));
735 i = lower_bound(node, key);
737 if (le32_to_cpu(node->header.flags) & LEAF_NODE)
741 /* change the bounds on the lowest key */
742 node->keys[0] = cpu_to_le64(key);
746 root = value64(node, i);
750 if (i < 0 || le64_to_cpu(node->keys[i]) != key)
757 static bool need_insert(struct btree_node *node, uint64_t *keys,
758 unsigned level, unsigned index)
760 return ((index >= le32_to_cpu(node->header.nr_entries)) ||
761 (le64_to_cpu(node->keys[index]) != keys[level]));
764 static int insert(struct dm_btree_info *info, dm_block_t root,
765 uint64_t *keys, void *value, dm_block_t *new_root,
767 __dm_written_to_disk(value)
770 unsigned level, index = -1, last_level = info->levels - 1;
771 dm_block_t block = root;
772 struct shadow_spine spine;
773 struct btree_node *n;
774 struct dm_btree_value_type le64_type;
776 init_le64_type(info->tm, &le64_type);
777 init_shadow_spine(&spine, info);
779 for (level = 0; level < (info->levels - 1); level++) {
780 r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
784 n = dm_block_data(shadow_current(&spine));
786 if (need_insert(n, keys, level, index)) {
790 r = dm_btree_empty(info, &new_tree);
794 new_le = cpu_to_le64(new_tree);
795 __dm_bless_for_disk(&new_le);
797 r = insert_at(sizeof(uint64_t), n, index,
798 keys[level], &new_le);
803 if (level < last_level)
804 block = value64(n, index);
807 r = btree_insert_raw(&spine, block, &info->value_type,
808 keys[level], &index);
812 n = dm_block_data(shadow_current(&spine));
814 if (need_insert(n, keys, level, index)) {
818 r = insert_at(info->value_type.size, n, index,
826 if (info->value_type.dec &&
827 (!info->value_type.equal ||
828 !info->value_type.equal(
829 info->value_type.context,
832 info->value_type.dec(info->value_type.context,
833 value_ptr(n, index));
835 memcpy_disk(value_ptr(n, index),
836 value, info->value_type.size);
839 *new_root = shadow_root(&spine);
840 exit_shadow_spine(&spine);
845 __dm_unbless_for_disk(value);
847 exit_shadow_spine(&spine);
851 int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
852 uint64_t *keys, void *value, dm_block_t *new_root)
853 __dm_written_to_disk(value)
855 return insert(info, root, keys, value, new_root, NULL);
857 EXPORT_SYMBOL_GPL(dm_btree_insert);
859 int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
860 uint64_t *keys, void *value, dm_block_t *new_root,
862 __dm_written_to_disk(value)
864 return insert(info, root, keys, value, new_root, inserted);
866 EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
868 /*----------------------------------------------------------------*/
870 static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
871 uint64_t *result_key, dm_block_t *next_block)
877 r = ro_step(s, block);
881 flags = le32_to_cpu(ro_node(s)->header.flags);
882 i = le32_to_cpu(ro_node(s)->header.nr_entries);
889 *result_key = le64_to_cpu(ro_node(s)->keys[i]);
891 *result_key = le64_to_cpu(ro_node(s)->keys[0]);
893 if (next_block || flags & INTERNAL_NODE)
894 block = value64(ro_node(s), i);
896 } while (flags & INTERNAL_NODE);
903 static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
904 bool find_highest, uint64_t *result_keys)
906 int r = 0, count = 0, level;
907 struct ro_spine spine;
909 init_ro_spine(&spine, info);
910 for (level = 0; level < info->levels; level++) {
911 r = find_key(&spine, root, find_highest, result_keys + level,
912 level == info->levels - 1 ? NULL : &root);
922 exit_ro_spine(&spine);
924 return r ? r : count;
927 int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
928 uint64_t *result_keys)
930 return dm_btree_find_key(info, root, true, result_keys);
932 EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
934 int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
935 uint64_t *result_keys)
937 return dm_btree_find_key(info, root, false, result_keys);
939 EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
941 /*----------------------------------------------------------------*/
944 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
945 * space. Also this only works for single level trees.
947 static int walk_node(struct dm_btree_info *info, dm_block_t block,
948 int (*fn)(void *context, uint64_t *keys, void *leaf),
953 struct dm_block *node;
954 struct btree_node *n;
957 r = bn_read_lock(info, block, &node);
961 n = dm_block_data(node);
963 nr = le32_to_cpu(n->header.nr_entries);
964 for (i = 0; i < nr; i++) {
965 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
966 r = walk_node(info, value64(n, i), fn, context);
970 keys = le64_to_cpu(*key_ptr(n, i));
971 r = fn(context, &keys, value_ptr(n, i));
978 dm_tm_unlock(info->tm, node);
982 int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
983 int (*fn)(void *context, uint64_t *keys, void *leaf),
986 BUG_ON(info->levels > 1);
987 return walk_node(info, root, fn, context);
989 EXPORT_SYMBOL_GPL(dm_btree_walk);