]> Git Repo - linux.git/blob - fs/bcachefs/btree_types.h
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / fs / bcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7
8 #include "bbpos_types.h"
9 #include "btree_key_cache_types.h"
10 #include "buckets_types.h"
11 #include "darray.h"
12 #include "errcode.h"
13 #include "journal_types.h"
14 #include "replicas_types.h"
15 #include "six.h"
16
17 struct open_bucket;
18 struct btree_update;
19 struct btree_trans;
20
21 #define MAX_BSETS               3U
22
23 struct btree_nr_keys {
24
25         /*
26          * Amount of live metadata (i.e. size of node after a compaction) in
27          * units of u64s
28          */
29         u16                     live_u64s;
30         u16                     bset_u64s[MAX_BSETS];
31
32         /* live keys only: */
33         u16                     packed_keys;
34         u16                     unpacked_keys;
35 };
36
37 struct bset_tree {
38         /*
39          * We construct a binary tree in an array as if the array
40          * started at 1, so that things line up on the same cachelines
41          * better: see comments in bset.c at cacheline_to_bkey() for
42          * details
43          */
44
45         /* size of the binary tree and prev array */
46         u16                     size;
47
48         /* function of size - precalculated for to_inorder() */
49         u16                     extra;
50
51         u16                     data_offset;
52         u16                     aux_data_offset;
53         u16                     end_offset;
54 };
55
56 struct btree_write {
57         struct journal_entry_pin        journal;
58 };
59
60 struct btree_alloc {
61         struct open_buckets     ob;
62         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
63 };
64
65 struct btree_bkey_cached_common {
66         struct six_lock         lock;
67         u8                      level;
68         u8                      btree_id;
69         bool                    cached;
70 };
71
72 struct btree {
73         struct btree_bkey_cached_common c;
74
75         struct rhash_head       hash;
76         u64                     hash_val;
77
78         unsigned long           flags;
79         u16                     written;
80         u8                      nsets;
81         u8                      nr_key_bits;
82         u16                     version_ondisk;
83
84         struct bkey_format      format;
85
86         struct btree_node       *data;
87         void                    *aux_data;
88
89         /*
90          * Sets of sorted keys - the real btree node - plus a binary search tree
91          *
92          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
93          * to the memory we have allocated for this btree node. Additionally,
94          * set[0]->data points to the entire btree node as it exists on disk.
95          */
96         struct bset_tree        set[MAX_BSETS];
97
98         struct btree_nr_keys    nr;
99         u16                     sib_u64s[2];
100         u16                     whiteout_u64s;
101         u8                      byte_order;
102         u8                      unpack_fn_len;
103
104         struct btree_write      writes[2];
105
106         /* Key/pointer for this btree node */
107         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
108
109         /*
110          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
111          * fails because the lock sequence number has changed - i.e. the
112          * contents were modified - we can still relock the node if it's still
113          * the one we want, without redoing the traversal
114          */
115
116         /*
117          * For asynchronous splits/interior node updates:
118          * When we do a split, we allocate new child nodes and update the parent
119          * node to point to them: we update the parent in memory immediately,
120          * but then we must wait until the children have been written out before
121          * the update to the parent can be written - this is a list of the
122          * btree_updates that are blocking this node from being
123          * written:
124          */
125         struct list_head        write_blocked;
126
127         /*
128          * Also for asynchronous splits/interior node updates:
129          * If a btree node isn't reachable yet, we don't want to kick off
130          * another write - because that write also won't yet be reachable and
131          * marking it as completed before it's reachable would be incorrect:
132          */
133         unsigned long           will_make_reachable;
134
135         struct open_buckets     ob;
136
137         /* lru list */
138         struct list_head        list;
139 };
140
141 struct btree_cache {
142         struct rhashtable       table;
143         bool                    table_init_done;
144         /*
145          * We never free a struct btree, except on shutdown - we just put it on
146          * the btree_cache_freed list and reuse it later. This simplifies the
147          * code, and it doesn't cost us much memory as the memory usage is
148          * dominated by buffers that hold the actual btree node data and those
149          * can be freed - and the number of struct btrees allocated is
150          * effectively bounded.
151          *
152          * btree_cache_freeable effectively is a small cache - we use it because
153          * high order page allocations can be rather expensive, and it's quite
154          * common to delete and allocate btree nodes in quick succession. It
155          * should never grow past ~2-3 nodes in practice.
156          */
157         struct mutex            lock;
158         struct list_head        live;
159         struct list_head        freeable;
160         struct list_head        freed_pcpu;
161         struct list_head        freed_nonpcpu;
162
163         /* Number of elements in live + freeable lists */
164         unsigned                used;
165         unsigned                reserve;
166         unsigned                freed;
167         unsigned                not_freed_lock_intent;
168         unsigned                not_freed_lock_write;
169         unsigned                not_freed_dirty;
170         unsigned                not_freed_read_in_flight;
171         unsigned                not_freed_write_in_flight;
172         unsigned                not_freed_noevict;
173         unsigned                not_freed_write_blocked;
174         unsigned                not_freed_will_make_reachable;
175         unsigned                not_freed_access_bit;
176         atomic_t                dirty;
177         struct shrinker         *shrink;
178
179         unsigned                used_by_btree[BTREE_ID_NR];
180
181         /*
182          * If we need to allocate memory for a new btree node and that
183          * allocation fails, we can cannibalize another node in the btree cache
184          * to satisfy the allocation - lock to guarantee only one thread does
185          * this at a time:
186          */
187         struct task_struct      *alloc_lock;
188         struct closure_waitlist alloc_wait;
189
190         struct bbpos            pinned_nodes_start;
191         struct bbpos            pinned_nodes_end;
192         u64                     pinned_nodes_leaf_mask;
193         u64                     pinned_nodes_interior_mask;
194 };
195
196 struct btree_node_iter {
197         struct btree_node_iter_set {
198                 u16     k, end;
199         } data[MAX_BSETS];
200 };
201
202 #define BTREE_ITER_FLAGS()                      \
203         x(slots)                                \
204         x(intent)                               \
205         x(prefetch)                             \
206         x(is_extents)                           \
207         x(not_extents)                          \
208         x(cached)                               \
209         x(with_key_cache)                       \
210         x(with_updates)                         \
211         x(with_journal)                         \
212         x(snapshot_field)                       \
213         x(all_snapshots)                        \
214         x(filter_snapshots)                     \
215         x(nopreserve)                           \
216         x(cached_nofill)                        \
217         x(key_cache_fill)                       \
218
219 #define STR_HASH_FLAGS()                        \
220         x(must_create)                          \
221         x(must_replace)
222
223 #define BTREE_UPDATE_FLAGS()                    \
224         x(internal_snapshot_node)               \
225         x(nojournal)                            \
226         x(key_cache_reclaim)
227
228
229 /*
230  * BTREE_TRIGGER_norun - don't run triggers at all
231  *
232  * BTREE_TRIGGER_transactional - we're running transactional triggers as part of
233  * a transaction commit: triggers may generate new updates
234  *
235  * BTREE_TRIGGER_atomic - we're running atomic triggers during a transaction
236  * commit: we have our journal reservation, we're holding btree node write
237  * locks, and we know the transaction is going to commit (returning an error
238  * here is a fatal error, causing us to go emergency read-only)
239  *
240  * BTREE_TRIGGER_gc - we're in gc/fsck: running triggers to recalculate e.g. disk usage
241  *
242  * BTREE_TRIGGER_insert - @new is entering the btree
243  * BTREE_TRIGGER_overwrite - @old is leaving the btree
244  *
245  * BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc
246  * trigger
247  */
248 #define BTREE_TRIGGER_FLAGS()                   \
249         x(norun)                                \
250         x(transactional)                        \
251         x(atomic)                               \
252         x(check_repair)                         \
253         x(gc)                                   \
254         x(insert)                               \
255         x(overwrite)                            \
256         x(is_root)                              \
257         x(bucket_invalidate)
258
259 enum {
260 #define x(n) BTREE_ITER_FLAG_BIT_##n,
261         BTREE_ITER_FLAGS()
262         STR_HASH_FLAGS()
263         BTREE_UPDATE_FLAGS()
264         BTREE_TRIGGER_FLAGS()
265 #undef x
266 };
267
268 /* iter flags must fit in a u16: */
269 //BUILD_BUG_ON(BTREE_ITER_FLAG_BIT_key_cache_fill > 15);
270
271 enum btree_iter_update_trigger_flags {
272 #define x(n) BTREE_ITER_##n     = 1U << BTREE_ITER_FLAG_BIT_##n,
273         BTREE_ITER_FLAGS()
274 #undef x
275 #define x(n) STR_HASH_##n       = 1U << BTREE_ITER_FLAG_BIT_##n,
276         STR_HASH_FLAGS()
277 #undef x
278 #define x(n) BTREE_UPDATE_##n   = 1U << BTREE_ITER_FLAG_BIT_##n,
279         BTREE_UPDATE_FLAGS()
280 #undef x
281 #define x(n) BTREE_TRIGGER_##n  = 1U << BTREE_ITER_FLAG_BIT_##n,
282         BTREE_TRIGGER_FLAGS()
283 #undef x
284 };
285
286 enum btree_path_uptodate {
287         BTREE_ITER_UPTODATE             = 0,
288         BTREE_ITER_NEED_RELOCK          = 1,
289         BTREE_ITER_NEED_TRAVERSE        = 2,
290 };
291
292 #if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
293 #define TRACK_PATH_ALLOCATED
294 #endif
295
296 typedef u16 btree_path_idx_t;
297
298 struct btree_path {
299         btree_path_idx_t        sorted_idx;
300         u8                      ref;
301         u8                      intent_ref;
302
303         /* btree_iter_copy starts here: */
304         struct bpos             pos;
305
306         enum btree_id           btree_id:5;
307         bool                    cached:1;
308         bool                    preserve:1;
309         enum btree_path_uptodate uptodate:2;
310         /*
311          * When true, failing to relock this path will cause the transaction to
312          * restart:
313          */
314         bool                    should_be_locked:1;
315         unsigned                level:3,
316                                 locks_want:3;
317         u8                      nodes_locked;
318
319         struct btree_path_level {
320                 struct btree    *b;
321                 struct btree_node_iter iter;
322                 u32             lock_seq;
323 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
324                 u64             lock_taken_time;
325 #endif
326         }                       l[BTREE_MAX_DEPTH];
327 #ifdef TRACK_PATH_ALLOCATED
328         unsigned long           ip_allocated;
329 #endif
330 };
331
332 static inline struct btree_path_level *path_l(struct btree_path *path)
333 {
334         return path->l + path->level;
335 }
336
337 static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
338 {
339 #ifdef TRACK_PATH_ALLOCATED
340         return path->ip_allocated;
341 #else
342         return _THIS_IP_;
343 #endif
344 }
345
346 /*
347  * @pos                 - iterator's current position
348  * @level               - current btree depth
349  * @locks_want          - btree level below which we start taking intent locks
350  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
351  * @nodes_intent_locked - bitmask indicating which locks are intent locks
352  */
353 struct btree_iter {
354         struct btree_trans      *trans;
355         btree_path_idx_t        path;
356         btree_path_idx_t        update_path;
357         btree_path_idx_t        key_cache_path;
358
359         enum btree_id           btree_id:8;
360         u8                      min_depth;
361
362         /* btree_iter_copy starts here: */
363         u16                     flags;
364
365         /* When we're filtering by snapshot, the snapshot ID we're looking for: */
366         unsigned                snapshot;
367
368         struct bpos             pos;
369         /*
370          * Current unpacked key - so that bch2_btree_iter_next()/
371          * bch2_btree_iter_next_slot() can correctly advance pos.
372          */
373         struct bkey             k;
374
375         /* BTREE_ITER_with_journal: */
376         size_t                  journal_idx;
377 #ifdef TRACK_PATH_ALLOCATED
378         unsigned long           ip_allocated;
379 #endif
380 };
381
382 #define BKEY_CACHED_ACCESSED            0
383 #define BKEY_CACHED_DIRTY               1
384
385 struct bkey_cached {
386         struct btree_bkey_cached_common c;
387
388         unsigned long           flags;
389         unsigned long           btree_trans_barrier_seq;
390         u16                     u64s;
391         bool                    valid;
392         struct bkey_cached_key  key;
393
394         struct rhash_head       hash;
395         struct list_head        list;
396
397         struct journal_entry_pin journal;
398         u64                     seq;
399
400         struct bkey_i           *k;
401 };
402
403 static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
404 {
405         return !b->cached
406                 ? container_of(b, struct btree, c)->key.k.p
407                 : container_of(b, struct bkey_cached, c)->key.pos;
408 }
409
410 struct btree_insert_entry {
411         unsigned                flags;
412         u8                      bkey_type;
413         enum btree_id           btree_id:8;
414         u8                      level:4;
415         bool                    cached:1;
416         bool                    insert_trigger_run:1;
417         bool                    overwrite_trigger_run:1;
418         bool                    key_cache_already_flushed:1;
419         /*
420          * @old_k may be a key from the journal; @old_btree_u64s always refers
421          * to the size of the key being overwritten in the btree:
422          */
423         u8                      old_btree_u64s;
424         btree_path_idx_t        path;
425         struct bkey_i           *k;
426         /* key being overwritten: */
427         struct bkey             old_k;
428         const struct bch_val    *old_v;
429         unsigned long           ip_allocated;
430 };
431
432 /* Number of btree paths we preallocate, usually enough */
433 #define BTREE_ITER_INITIAL              64
434 /*
435  * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
436  * paths should run inside this limit, and if they don't it usually indicates a
437  * bug (leaking/duplicated btree paths).
438  *
439  * exception: some fsck paths
440  *
441  * bugs with excessive path usage seem to have possibly been eliminated now, so
442  * we might consider eliminating this (and btree_trans_too_many_iter()) at some
443  * point.
444  */
445 #define BTREE_ITER_NORMAL_LIMIT         256
446 /* never exceed limit */
447 #define BTREE_ITER_MAX                  (1U << 10)
448
449 struct btree_trans_commit_hook;
450 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
451
452 struct btree_trans_commit_hook {
453         btree_trans_commit_hook_fn      *fn;
454         struct btree_trans_commit_hook  *next;
455 };
456
457 #define BTREE_TRANS_MEM_MAX     (1U << 16)
458
459 #define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS       10000
460
461 struct btree_trans_paths {
462         unsigned long           nr_paths;
463         struct btree_path       paths[];
464 };
465
466 struct btree_trans {
467         struct bch_fs           *c;
468
469         unsigned long           *paths_allocated;
470         struct btree_path       *paths;
471         btree_path_idx_t        *sorted;
472         struct btree_insert_entry *updates;
473
474         void                    *mem;
475         unsigned                mem_top;
476         unsigned                mem_bytes;
477
478         btree_path_idx_t        nr_sorted;
479         btree_path_idx_t        nr_paths;
480         btree_path_idx_t        nr_paths_max;
481         u8                      fn_idx;
482         u8                      nr_updates;
483         u8                      lock_must_abort;
484         bool                    lock_may_not_fail:1;
485         bool                    srcu_held:1;
486         bool                    locked:1;
487         bool                    write_locked:1;
488         bool                    used_mempool:1;
489         bool                    in_traverse_all:1;
490         bool                    paths_sorted:1;
491         bool                    memory_allocation_failure:1;
492         bool                    journal_transaction_names:1;
493         bool                    journal_replay_not_finished:1;
494         bool                    notrace_relock_fail:1;
495         enum bch_errcode        restarted:16;
496         u32                     restart_count;
497
498         u64                     last_begin_time;
499         unsigned long           last_begin_ip;
500         unsigned long           last_restarted_ip;
501         unsigned long           last_unlock_ip;
502         unsigned long           srcu_lock_time;
503
504         const char              *fn;
505         struct btree_bkey_cached_common *locking;
506         struct six_lock_waiter  locking_wait;
507         int                     srcu_idx;
508
509         /* update path: */
510         u16                     journal_entries_u64s;
511         u16                     journal_entries_size;
512         struct jset_entry       *journal_entries;
513
514         struct btree_trans_commit_hook *hooks;
515         struct journal_entry_pin *journal_pin;
516
517         struct journal_res      journal_res;
518         u64                     *journal_seq;
519         struct disk_reservation *disk_res;
520
521         struct bch_fs_usage_base fs_usage_delta;
522
523         unsigned                journal_u64s;
524         unsigned                extra_disk_res; /* XXX kill */
525         struct replicas_delta_list *fs_usage_deltas;
526
527         /* Entries before this are zeroed out on every bch2_trans_get() call */
528
529         struct list_head        list;
530         struct closure          ref;
531
532         unsigned long           _paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
533         struct btree_trans_paths trans_paths;
534         struct btree_path       _paths[BTREE_ITER_INITIAL];
535         btree_path_idx_t        _sorted[BTREE_ITER_INITIAL + 4];
536         struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
537 };
538
539 static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
540 {
541         return trans->paths + iter->path;
542 }
543
544 static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
545 {
546         return iter->key_cache_path
547                 ? trans->paths + iter->key_cache_path
548                 : NULL;
549 }
550
551 #define BCH_BTREE_WRITE_TYPES()                                         \
552         x(initial,              0)                                      \
553         x(init_next_bset,       1)                                      \
554         x(cache_reclaim,        2)                                      \
555         x(journal_reclaim,      3)                                      \
556         x(interior,             4)
557
558 enum btree_write_type {
559 #define x(t, n) BTREE_WRITE_##t,
560         BCH_BTREE_WRITE_TYPES()
561 #undef x
562         BTREE_WRITE_TYPE_NR,
563 };
564
565 #define BTREE_WRITE_TYPE_MASK   (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
566 #define BTREE_WRITE_TYPE_BITS   ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
567
568 #define BTREE_FLAGS()                                                   \
569         x(read_in_flight)                                               \
570         x(read_error)                                                   \
571         x(dirty)                                                        \
572         x(need_write)                                                   \
573         x(write_blocked)                                                \
574         x(will_make_reachable)                                          \
575         x(noevict)                                                      \
576         x(write_idx)                                                    \
577         x(accessed)                                                     \
578         x(write_in_flight)                                              \
579         x(write_in_flight_inner)                                        \
580         x(just_written)                                                 \
581         x(dying)                                                        \
582         x(fake)                                                         \
583         x(need_rewrite)                                                 \
584         x(never_write)
585
586 enum btree_flags {
587         /* First bits for btree node write type */
588         BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
589 #define x(flag) BTREE_NODE_##flag,
590         BTREE_FLAGS()
591 #undef x
592 };
593
594 #define x(flag)                                                         \
595 static inline bool btree_node_ ## flag(struct btree *b)                 \
596 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
597                                                                         \
598 static inline void set_btree_node_ ## flag(struct btree *b)             \
599 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
600                                                                         \
601 static inline void clear_btree_node_ ## flag(struct btree *b)           \
602 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
603
604 BTREE_FLAGS()
605 #undef x
606
607 static inline struct btree_write *btree_current_write(struct btree *b)
608 {
609         return b->writes + btree_node_write_idx(b);
610 }
611
612 static inline struct btree_write *btree_prev_write(struct btree *b)
613 {
614         return b->writes + (btree_node_write_idx(b) ^ 1);
615 }
616
617 static inline struct bset_tree *bset_tree_last(struct btree *b)
618 {
619         EBUG_ON(!b->nsets);
620         return b->set + b->nsets - 1;
621 }
622
623 static inline void *
624 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
625 {
626         return (void *) ((u64 *) b->data + 1 + offset);
627 }
628
629 static inline u16
630 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
631 {
632         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
633
634         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
635         return ret;
636 }
637
638 static inline struct bset *bset(const struct btree *b,
639                                 const struct bset_tree *t)
640 {
641         return __btree_node_offset_to_ptr(b, t->data_offset);
642 }
643
644 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
645 {
646         t->end_offset =
647                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
648 }
649
650 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
651                                   const struct bset *i)
652 {
653         t->data_offset = __btree_node_ptr_to_offset(b, i);
654         set_btree_bset_end(b, t);
655 }
656
657 static inline struct bset *btree_bset_first(struct btree *b)
658 {
659         return bset(b, b->set);
660 }
661
662 static inline struct bset *btree_bset_last(struct btree *b)
663 {
664         return bset(b, bset_tree_last(b));
665 }
666
667 static inline u16
668 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
669 {
670         return __btree_node_ptr_to_offset(b, k);
671 }
672
673 static inline struct bkey_packed *
674 __btree_node_offset_to_key(const struct btree *b, u16 k)
675 {
676         return __btree_node_offset_to_ptr(b, k);
677 }
678
679 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
680 {
681         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
682 }
683
684 #define btree_bkey_first(_b, _t)                                        \
685 ({                                                                      \
686         EBUG_ON(bset(_b, _t)->start !=                                  \
687                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
688                                                                         \
689         bset(_b, _t)->start;                                            \
690 })
691
692 #define btree_bkey_last(_b, _t)                                         \
693 ({                                                                      \
694         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
695                 vstruct_last(bset(_b, _t)));                            \
696                                                                         \
697         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
698 })
699
700 static inline unsigned bset_u64s(struct bset_tree *t)
701 {
702         return t->end_offset - t->data_offset -
703                 sizeof(struct bset) / sizeof(u64);
704 }
705
706 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
707 {
708         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
709 }
710
711 static inline unsigned bset_byte_offset(struct btree *b, void *i)
712 {
713         return i - (void *) b->data;
714 }
715
716 enum btree_node_type {
717         BKEY_TYPE_btree,
718 #define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
719         BCH_BTREE_IDS()
720 #undef x
721         BKEY_TYPE_NR
722 };
723
724 /* Type of a key in btree @id at level @level: */
725 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
726 {
727         return level ? BKEY_TYPE_btree : (unsigned) id + 1;
728 }
729
730 /* Type of keys @b contains: */
731 static inline enum btree_node_type btree_node_type(struct btree *b)
732 {
733         return __btree_node_type(b->c.level, b->c.btree_id);
734 }
735
736 const char *bch2_btree_node_type_str(enum btree_node_type);
737
738 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
739         (BIT_ULL(BKEY_TYPE_extents)|                    \
740          BIT_ULL(BKEY_TYPE_alloc)|                      \
741          BIT_ULL(BKEY_TYPE_inodes)|                     \
742          BIT_ULL(BKEY_TYPE_stripes)|                    \
743          BIT_ULL(BKEY_TYPE_reflink)|                    \
744          BIT_ULL(BKEY_TYPE_subvolumes)|                 \
745          BIT_ULL(BKEY_TYPE_btree))
746
747 #define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS             \
748         (BIT_ULL(BKEY_TYPE_alloc)|                      \
749          BIT_ULL(BKEY_TYPE_inodes)|                     \
750          BIT_ULL(BKEY_TYPE_stripes)|                    \
751          BIT_ULL(BKEY_TYPE_snapshots))
752
753 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
754         (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
755          BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
756
757 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
758 {
759         return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
760 }
761
762 static inline bool btree_node_type_is_extents(enum btree_node_type type)
763 {
764         const unsigned mask = 0
765 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
766         BCH_BTREE_IDS()
767 #undef x
768         ;
769
770         return (1U << type) & mask;
771 }
772
773 static inline bool btree_id_is_extents(enum btree_id btree)
774 {
775         return btree_node_type_is_extents(__btree_node_type(0, btree));
776 }
777
778 static inline bool btree_type_has_snapshots(enum btree_id id)
779 {
780         const unsigned mask = 0
781 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
782         BCH_BTREE_IDS()
783 #undef x
784         ;
785
786         return (1U << id) & mask;
787 }
788
789 static inline bool btree_type_has_snapshot_field(enum btree_id id)
790 {
791         const unsigned mask = 0
792 #define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
793         BCH_BTREE_IDS()
794 #undef x
795         ;
796
797         return (1U << id) & mask;
798 }
799
800 static inline bool btree_type_has_ptrs(enum btree_id id)
801 {
802         const unsigned mask = 0
803 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
804         BCH_BTREE_IDS()
805 #undef x
806         ;
807
808         return (1U << id) & mask;
809 }
810
811 struct btree_root {
812         struct btree            *b;
813
814         /* On disk root - see async splits: */
815         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
816         u8                      level;
817         u8                      alive;
818         s16                     error;
819 };
820
821 enum btree_gc_coalesce_fail_reason {
822         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
823         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
824         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
825 };
826
827 enum btree_node_sibling {
828         btree_prev_sib,
829         btree_next_sib,
830 };
831
832 struct get_locks_fail {
833         unsigned        l;
834         struct btree    *b;
835 };
836
837 #endif /* _BCACHEFS_BTREE_TYPES_H */
This page took 0.084848 seconds and 4 git commands to generate.