]> Git Repo - linux.git/blob - fs/bcachefs/backpointers.h
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / fs / bcachefs / backpointers.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
3 #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
4
5 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "error.h"
10 #include "super.h"
11
12 static inline u64 swab40(u64 x)
13 {
14         return (((x & 0x00000000ffULL) << 32)|
15                 ((x & 0x000000ff00ULL) << 16)|
16                 ((x & 0x0000ff0000ULL) >>  0)|
17                 ((x & 0x00ff000000ULL) >> 16)|
18                 ((x & 0xff00000000ULL) >> 32));
19 }
20
21 int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
22                              enum bch_validate_flags, struct printbuf *);
23 void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
24 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
25 void bch2_backpointer_swab(struct bkey_s);
26
27 #define bch2_bkey_ops_backpointer ((struct bkey_ops) {  \
28         .key_invalid    = bch2_backpointer_invalid,     \
29         .val_to_text    = bch2_backpointer_k_to_text,   \
30         .swab           = bch2_backpointer_swab,        \
31         .min_val_size   = 32,                           \
32 })
33
34 #define MAX_EXTENT_COMPRESS_RATIO_SHIFT         10
35
36 /*
37  * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
38  * btree:
39  */
40 static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos bp_pos)
41 {
42         u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
43
44         return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
45 }
46
47 static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
48 {
49         rcu_read_lock();
50         struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode);
51         if (ca)
52                 *bucket = bp_pos_to_bucket(ca, bp_pos);
53         rcu_read_unlock();
54         return ca != NULL;
55 }
56
57 static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
58 {
59         return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket),
60                                         c, "backpointer for missing device %llu", bp_pos.inode);
61 }
62
63 static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca,
64                                                    struct bpos bucket,
65                                                    u64 bucket_offset)
66 {
67         return POS(bucket.inode,
68                    (bucket_to_sector(ca, bucket.offset) <<
69                     MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
70 }
71
72 /*
73  * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
74  */
75 static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca,
76                                            struct bpos bucket,
77                                            u64 bucket_offset)
78 {
79         struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset);
80         EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret)));
81         return ret;
82 }
83
84 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *,
85                                 struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool);
86
87 static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
88                                 struct bch_dev *ca,
89                                 struct bpos bucket,
90                                 struct bch_backpointer bp,
91                                 struct bkey_s_c orig_k,
92                                 bool insert)
93 {
94         if (unlikely(bch2_backpointers_no_use_write_buffer))
95                 return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert);
96
97         struct bkey_i_backpointer bp_k;
98
99         bkey_backpointer_init(&bp_k.k_i);
100         bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
101         bp_k.v = bp;
102
103         if (!insert) {
104                 bp_k.k.type = KEY_TYPE_deleted;
105                 set_bkey_val_u64s(&bp_k.k, 0);
106         }
107
108         return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
109 }
110
111 static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
112                                                          struct extent_ptr_decoded p,
113                                                          const union bch_extent_entry *entry)
114 {
115         switch (k.k->type) {
116         case KEY_TYPE_btree_ptr:
117         case KEY_TYPE_btree_ptr_v2:
118                 return BCH_DATA_btree;
119         case KEY_TYPE_extent:
120         case KEY_TYPE_reflink_v:
121                 return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
122         case KEY_TYPE_stripe: {
123                 const struct bch_extent_ptr *ptr = &entry->ptr;
124                 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
125
126                 BUG_ON(ptr < s.v->ptrs ||
127                        ptr >= s.v->ptrs + s.v->nr_blocks);
128
129                 return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
130                         ? BCH_DATA_parity
131                         : BCH_DATA_user;
132         }
133         default:
134                 BUG();
135         }
136 }
137
138 static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
139                            enum btree_id btree_id, unsigned level,
140                            struct bkey_s_c k, struct extent_ptr_decoded p,
141                            const union bch_extent_entry *entry,
142                            struct bpos *bucket_pos, struct bch_backpointer *bp)
143 {
144         enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
145         s64 sectors = level ? btree_sectors(c) : k.k->size;
146         u32 bucket_offset;
147
148         *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
149         *bp = (struct bch_backpointer) {
150                 .btree_id       = btree_id,
151                 .level          = level,
152                 .data_type      = data_type,
153                 .bucket_offset  = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
154                         p.crc.offset,
155                 .bucket_len     = ptr_disk_sectors(sectors, p),
156                 .pos            = k.k->p,
157         };
158 }
159
160 int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
161                               struct bpos *, struct bch_backpointer *, unsigned);
162 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
163                                          struct bpos, struct bch_backpointer,
164                                          unsigned);
165 struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
166                                         struct bpos, struct bch_backpointer);
167
168 int bch2_check_btree_backpointers(struct bch_fs *);
169 int bch2_check_extents_to_backpointers(struct bch_fs *);
170 int bch2_check_backpointers_to_extents(struct bch_fs *);
171
172 #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
This page took 0.042326 seconds and 4 git commands to generate.