1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include "buckets_types.h"
7 #include "extents_types.h"
9 enum bch_validate_flags;
11 int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
12 enum bch_validate_flags, struct printbuf *);
13 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
15 int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
16 struct bkey_s_c, struct bkey_s,
17 enum btree_iter_update_trigger_flags);
19 #define bch2_bkey_ops_stripe ((struct bkey_ops) { \
20 .key_invalid = bch2_stripe_invalid, \
21 .val_to_text = bch2_stripe_to_text, \
22 .swab = bch2_ptr_swab, \
23 .trigger = bch2_trigger_stripe, \
27 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
29 return DIV_ROUND_UP(le16_to_cpu(s->sectors),
30 1 << s->csum_granularity_bits);
33 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
34 unsigned dev, unsigned csum_idx)
36 EBUG_ON(s->csum_type >= BCH_CSUM_NR);
38 unsigned csum_bytes = bch_crc_bytes[s->csum_type];
40 return sizeof(struct bch_stripe) +
41 sizeof(struct bch_extent_ptr) * s->nr_blocks +
42 (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
45 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
48 return stripe_csum_offset(s, s->nr_blocks, 0) +
52 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
55 return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
58 static inline void stripe_blockcount_set(struct bch_stripe *s,
59 unsigned idx, unsigned v)
61 __le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
66 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
68 return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
72 static inline void *stripe_csum(struct bch_stripe *s,
73 unsigned block, unsigned csum_idx)
75 EBUG_ON(block >= s->nr_blocks);
76 EBUG_ON(csum_idx >= stripe_csums_per_device(s));
78 return (void *) s + stripe_csum_offset(s, block, csum_idx);
81 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
82 unsigned block, unsigned csum_idx)
84 struct bch_csum csum = { 0 };
86 memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
90 static inline void stripe_csum_set(struct bch_stripe *s,
91 unsigned block, unsigned csum_idx,
94 memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
97 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
98 const struct bch_extent_ptr *data_ptr,
101 return data_ptr->dev == stripe_ptr->dev &&
102 data_ptr->gen == stripe_ptr->gen &&
103 data_ptr->offset >= stripe_ptr->offset &&
104 data_ptr->offset < stripe_ptr->offset + sectors;
107 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
108 struct extent_ptr_decoded p)
110 unsigned nr_data = s->nr_blocks - s->nr_redundant;
114 if (p.ec.block >= nr_data)
117 return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
118 le16_to_cpu(s->sectors));
121 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
122 struct extent_ptr_decoded p)
124 unsigned nr_data = m->nr_blocks - m->nr_redundant;
128 if (p.ec.block >= nr_data)
131 return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
137 struct ec_stripe_buf {
138 /* might not be buffering the entire stripe: */
141 unsigned long valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
143 void *data[BCH_BKEY_PTRS_MAX];
145 __BKEY_PADDED(key, 255);
148 struct ec_stripe_head;
156 struct ec_stripe_new {
158 struct ec_stripe_head *h;
160 struct list_head list;
162 struct hlist_node hash;
165 struct closure iodone;
167 atomic_t ref[STRIPE_REF_NR];
175 bool have_existing_stripe;
177 unsigned long blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
178 unsigned long blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
179 open_bucket_idx_t blocks[BCH_BKEY_PTRS_MAX];
180 struct disk_reservation res;
182 struct ec_stripe_buf new_stripe;
183 struct ec_stripe_buf existing_stripe;
186 struct ec_stripe_head {
187 struct list_head list;
193 enum bch_watermark watermark;
195 struct bch_devs_mask devs;
196 unsigned nr_active_devs;
200 struct dev_stripe_state block_stripe;
201 struct dev_stripe_state parity_stripe;
203 struct ec_stripe_new *s;
206 int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *);
208 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
210 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
212 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
214 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
215 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
216 unsigned, unsigned, unsigned,
217 enum bch_watermark, struct closure *);
219 void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
220 void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
221 void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
223 void bch2_do_stripe_deletes(struct bch_fs *);
224 void bch2_ec_do_stripe_creates(struct bch_fs *);
225 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
227 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
228 enum ec_stripe_ref ref)
230 atomic_inc(&s->ref[ref]);
233 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
234 enum ec_stripe_ref ref)
236 BUG_ON(atomic_read(&s->ref[ref]) <= 0);
238 if (atomic_dec_and_test(&s->ref[ref]))
240 case STRIPE_REF_stripe:
241 bch2_ec_stripe_new_free(c, s);
244 bch2_ec_do_stripe_creates(c);
251 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
252 void bch2_fs_ec_stop(struct bch_fs *);
253 void bch2_fs_ec_flush(struct bch_fs *);
255 int bch2_stripes_read(struct bch_fs *);
257 void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
258 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
260 void bch2_fs_ec_exit(struct bch_fs *);
261 void bch2_fs_ec_init_early(struct bch_fs *);
262 int bch2_fs_ec_init(struct bch_fs *);
264 #endif /* _BCACHEFS_EC_H */