]> Git Repo - linux.git/blame - fs/bcachefs/extents.c
bcachefs: Don't drop devices with stripe pointers
[linux.git] / fs / bcachefs / extents.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <[email protected]>
4 *
5 * Code for managing the extent btree and dynamically updating the writeback
6 * dirty sector count.
7 */
8
9#include "bcachefs.h"
10#include "bkey_methods.h"
ec4edd7b 11#include "btree_cache.h"
1c6fdbd8 12#include "btree_gc.h"
39fb2983 13#include "btree_io.h"
08c07fea 14#include "btree_iter.h"
1c6fdbd8
KO
15#include "buckets.h"
16#include "checksum.h"
fb3f57bb 17#include "compress.h"
1c6fdbd8 18#include "debug.h"
1c6fdbd8
KO
19#include "disk_groups.h"
20#include "error.h"
21#include "extents.h"
22#include "inode.h"
23#include "journal.h"
24#include "replicas.h"
25#include "super.h"
26#include "super-io.h"
27#include "trace.h"
28#include "util.h"
1c6fdbd8 29
4de77495
KO
30static unsigned bch2_crc_field_size_max[] = {
31 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34};
1c6fdbd8 35
4de77495
KO
36static void bch2_extent_crc_pack(union bch_extent_crc *,
37 struct bch_extent_crc_unpacked,
38 enum bch_extent_entry_type);
1c6fdbd8 39
a2cb8a62
KO
40struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f,
41 unsigned dev)
26609b61
KO
42{
43 struct bch_dev_io_failures *i;
44
45 for (i = f->devs; i < f->devs + f->nr; i++)
46 if (i->dev == dev)
47 return i;
48
49 return NULL;
50}
51
52void bch2_mark_io_failure(struct bch_io_failures *failed,
53 struct extent_ptr_decoded *p)
54{
a2cb8a62 55 struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev);
26609b61
KO
56
57 if (!f) {
58 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59
60 f = &failed->devs[failed->nr++];
61 f->dev = p->ptr.dev;
62 f->idx = p->idx;
63 f->nr_failed = 1;
64 f->nr_retries = 0;
65 } else if (p->idx != f->idx) {
66 f->idx = p->idx;
67 f->nr_failed = 1;
68 f->nr_retries = 0;
69 } else {
70 f->nr_failed++;
71 }
72}
73
a9422fd4
KO
74static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
75{
76 struct bch_dev *ca = bch2_dev_rcu(c, dev);
77 return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
78}
79
26609b61
KO
80/*
81 * returns true if p1 is better than p2:
82 */
83static inline bool ptr_better(struct bch_fs *c,
84 const struct extent_ptr_decoded p1,
85 const struct extent_ptr_decoded p2)
86{
87 if (likely(!p1.idx && !p2.idx)) {
a9422fd4
KO
88 u64 l1 = dev_latency(c, p1.ptr.dev);
89 u64 l2 = dev_latency(c, p2.ptr.dev);
26609b61
KO
90
91 /* Pick at random, biased in favor of the faster device: */
92
93 return bch2_rand_range(l1 + l2) > l1;
94 }
95
29364f34 96 if (bch2_force_reconstruct_read)
26609b61
KO
97 return p1.idx > p2.idx;
98
99 return p1.idx < p2.idx;
100}
101
102/*
103 * This picks a non-stale pointer, preferably from a device other than @avoid.
104 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
105 * other devices, it will still pick a pointer from avoid.
106 */
107int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
108 struct bch_io_failures *failed,
109 struct extent_ptr_decoded *pick)
110{
111 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
112 const union bch_extent_entry *entry;
113 struct extent_ptr_decoded p;
114 struct bch_dev_io_failures *f;
26609b61
KO
115 int ret = 0;
116
117 if (k.k->type == KEY_TYPE_error)
54a12984 118 return -BCH_ERR_key_type_error;
26609b61 119
a9422fd4 120 rcu_read_lock();
26609b61 121 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
79203111
KO
122 /*
123 * Unwritten extent: no need to actually read, treat it as a
124 * hole and return 0s:
125 */
a9422fd4
KO
126 if (p.ptr.unwritten) {
127 ret = 0;
128 break;
129 }
26609b61
KO
130
131 /*
132 * If there are any dirty pointers it's an error if we can't
133 * read:
134 */
135 if (!ret && !p.ptr.cached)
54a12984 136 ret = -BCH_ERR_no_device_to_read_from;
26609b61 137
a9422fd4
KO
138 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
139
9432e90d 140 if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
26609b61
KO
141 continue;
142
a2cb8a62 143 f = failed ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
26609b61
KO
144 if (f)
145 p.idx = f->nr_failed < f->nr_retries
146 ? f->idx
147 : f->idx + 1;
148
54a12984 149 if (!p.idx && (!ca || !bch2_dev_is_readable(ca)))
a9422fd4
KO
150 p.idx++;
151
152 if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
26609b61
KO
153 p.idx++;
154
54a12984 155 if (p.idx > (unsigned) p.has_ec)
26609b61
KO
156 continue;
157
158 if (ret > 0 && !ptr_better(c, p, *pick))
159 continue;
160
161 *pick = p;
162 ret = 1;
163 }
a9422fd4 164 rcu_read_unlock();
26609b61
KO
165
166 return ret;
167}
168
4de77495 169/* KEY_TYPE_btree_ptr: */
26609b61 170
d97de0d0
KO
171int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k,
172 enum bch_validate_flags flags)
26609b61 173{
b65db750 174 int ret = 0;
26609b61 175
d97de0d0
KO
176 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX,
177 c, btree_ptr_val_too_big,
b65db750
KO
178 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
179
d97de0d0 180 ret = bch2_bkey_ptrs_validate(c, k, flags);
b65db750
KO
181fsck_err:
182 return ret;
26609b61
KO
183}
184
4de77495
KO
185void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
186 struct bkey_s_c k)
26609b61 187{
4de77495
KO
188 bch2_bkey_ptrs_to_text(out, c, k);
189}
26609b61 190
d97de0d0
KO
191int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k,
192 enum bch_validate_flags flags)
fad7cfed 193{
805b535a 194 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
b65db750 195 int ret = 0;
fad7cfed 196
805b535a 197 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
d97de0d0 198 c, btree_ptr_v2_val_too_big,
b65db750
KO
199 "value too big (%zu > %zu)",
200 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
201
805b535a 202 bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
d97de0d0 203 c, btree_ptr_v2_min_key_bad,
805b535a
KO
204 "min_key > key");
205
65eaf4e2 206 if (flags & BCH_VALIDATE_write)
9de40d77 207 bkey_fsck_err_on(!bp.v->sectors_written,
d97de0d0 208 c, btree_ptr_v2_written_0,
9de40d77
KO
209 "sectors_written == 0");
210
d97de0d0 211 ret = bch2_bkey_ptrs_validate(c, k, flags);
b65db750
KO
212fsck_err:
213 return ret;
fad7cfed
KO
214}
215
59a38a38 216void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
275c8426 217 struct bkey_s_c k)
59a38a38
KO
218{
219 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
220
401ec4db 221 prt_printf(out, "seq %llx written %u min_key %s",
59a38a38 222 le64_to_cpu(bp.v->seq),
f8f86c6a
KO
223 le16_to_cpu(bp.v->sectors_written),
224 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
59a38a38
KO
225
226 bch2_bpos_to_text(out, bp.v->min_key);
401ec4db 227 prt_printf(out, " ");
59a38a38
KO
228 bch2_bkey_ptrs_to_text(out, c, k);
229}
230
39fb2983
KO
231void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
232 unsigned big_endian, int write,
233 struct bkey_s k)
234{
235 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
236
237 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
238
239 if (version < bcachefs_metadata_version_inode_btree_change &&
73bd774d 240 btree_id_is_extents(btree_id) &&
e88a75eb 241 !bkey_eq(bp.v->min_key, POS_MIN))
39fb2983 242 bp.v->min_key = write
e751c01a
KO
243 ? bpos_nosnap_predecessor(bp.v->min_key)
244 : bpos_nosnap_successor(bp.v->min_key);
39fb2983
KO
245}
246
4de77495 247/* KEY_TYPE_extent: */
26609b61 248
b058ac20 249bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
1c6fdbd8 250{
b058ac20
KO
251 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
252 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
c2177e4d
KO
253 union bch_extent_entry *en_l;
254 const union bch_extent_entry *en_r;
255 struct extent_ptr_decoded lp, rp;
256 bool use_right_ptr;
a2753581 257
b058ac20
KO
258 en_l = l_ptrs.start;
259 en_r = r_ptrs.start;
260 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
4de77495 261 if (extent_entry_type(en_l) != extent_entry_type(en_r))
59ba21d9 262 return false;
b058ac20
KO
263
264 en_l = extent_entry_next(en_l);
265 en_r = extent_entry_next(en_r);
c2177e4d 266 }
4de77495 267
b058ac20
KO
268 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
269 return false;
270
271 en_l = l_ptrs.start;
272 en_r = r_ptrs.start;
c2177e4d
KO
273 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
274 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
275
b058ac20
KO
276 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
277 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
c2177e4d
KO
278 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
279 rp.ptr.offset + rp.crc.offset ||
280 lp.ptr.dev != rp.ptr.dev ||
281 lp.ptr.gen != rp.ptr.gen ||
79203111 282 lp.ptr.unwritten != rp.ptr.unwritten ||
c2177e4d
KO
283 lp.has_ec != rp.has_ec)
284 return false;
4de77495 285
c2177e4d 286 /* Extents may not straddle buckets: */
3793b3f9
KO
287 rcu_read_lock();
288 struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
289 bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
290 rcu_read_unlock();
291
292 if (!same_bucket)
c2177e4d 293 return false;
4de77495 294
c2177e4d
KO
295 if (lp.has_ec != rp.has_ec ||
296 (lp.has_ec &&
297 (lp.ec.block != rp.ec.block ||
298 lp.ec.redundancy != rp.ec.redundancy ||
299 lp.ec.idx != rp.ec.idx)))
300 return false;
4de77495 301
c2177e4d
KO
302 if (lp.crc.compression_type != rp.crc.compression_type ||
303 lp.crc.nonce != rp.crc.nonce)
304 return false;
4de77495 305
c2177e4d
KO
306 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
307 lp.crc.uncompressed_size) {
308 /* can use left extent's crc entry */
3e3e02e6 309 } else if (lp.crc.live_size <= rp.crc.offset) {
c2177e4d
KO
310 /* can use right extent's crc entry */
311 } else {
312 /* check if checksums can be merged: */
313 if (lp.crc.csum_type != rp.crc.csum_type ||
314 lp.crc.nonce != rp.crc.nonce ||
315 crc_is_compressed(lp.crc) ||
316 !bch2_checksum_mergeable(lp.crc.csum_type))
59ba21d9 317 return false;
4de77495 318
c2177e4d
KO
319 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
320 rp.crc.offset)
59ba21d9 321 return false;
4de77495 322
c2177e4d
KO
323 if (lp.crc.csum_type &&
324 lp.crc.uncompressed_size +
e4099990 325 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
59ba21d9 326 return false;
e320b42d
KO
327 }
328
329 en_l = extent_entry_next(en_l);
330 en_r = extent_entry_next(en_r);
331 }
332
333 en_l = l_ptrs.start;
334 en_r = r_ptrs.start;
335 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
336 if (extent_entry_is_crc(en_l)) {
337 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
338 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
4de77495 339
e320b42d 340 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
4de77495 341 bch2_crc_field_size_max[extent_entry_type(en_l)])
59ba21d9 342 return false;
a2753581 343 }
c2177e4d
KO
344
345 en_l = extent_entry_next(en_l);
346 en_r = extent_entry_next(en_r);
4de77495 347 }
cd575ddf 348
c2177e4d 349 use_right_ptr = false;
b058ac20
KO
350 en_l = l_ptrs.start;
351 en_r = r_ptrs.start;
352 while (en_l < l_ptrs.end) {
c2177e4d
KO
353 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
354 use_right_ptr)
355 en_l->ptr = en_r->ptr;
356
b058ac20
KO
357 if (extent_entry_is_crc(en_l)) {
358 struct bch_extent_crc_unpacked crc_l =
359 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
360 struct bch_extent_crc_unpacked crc_r =
361 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
362
363 use_right_ptr = false;
364
365 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
366 crc_l.uncompressed_size) {
367 /* can use left extent's crc entry */
3e3e02e6 368 } else if (crc_l.live_size <= crc_r.offset) {
b058ac20
KO
369 /* can use right extent's crc entry */
370 crc_r.offset -= crc_l.live_size;
371 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
372 extent_entry_type(en_l));
373 use_right_ptr = true;
374 } else {
375 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
376 crc_l.csum,
377 crc_r.csum,
378 crc_r.uncompressed_size << 9);
379
380 crc_l.uncompressed_size += crc_r.uncompressed_size;
381 crc_l.compressed_size += crc_r.compressed_size;
382 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
383 extent_entry_type(en_l));
384 }
c2177e4d 385 }
b058ac20
KO
386
387 en_l = extent_entry_next(en_l);
388 en_r = extent_entry_next(en_r);
a2753581
KO
389 }
390
4de77495 391 bch2_key_resize(l.k, l.k->size + r.k->size);
59ba21d9 392 return true;
4de77495
KO
393}
394
395/* KEY_TYPE_reservation: */
396
d97de0d0
KO
397int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k,
398 enum bch_validate_flags flags)
4de77495
KO
399{
400 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
b65db750 401 int ret = 0;
4de77495 402
d97de0d0
KO
403 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX,
404 c, reservation_key_nr_replicas_invalid,
b65db750
KO
405 "invalid nr_replicas (%u)", r.v->nr_replicas);
406fsck_err:
407 return ret;
4de77495
KO
408}
409
410void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
411 struct bkey_s_c k)
412{
413 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
414
401ec4db 415 prt_printf(out, "generation %u replicas %u",
4de77495
KO
416 le32_to_cpu(r.v->generation),
417 r.v->nr_replicas);
418}
419
59ba21d9 420bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
4de77495
KO
421{
422 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
59ba21d9 423 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
4de77495
KO
424
425 if (l.v->generation != r.v->generation ||
426 l.v->nr_replicas != r.v->nr_replicas)
59ba21d9 427 return false;
4de77495
KO
428
429 bch2_key_resize(l.k, l.k->size + r.k->size);
59ba21d9 430 return true;
4de77495
KO
431}
432
433/* Extent checksum entries: */
434
435/* returns true if not equal */
436static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
437 struct bch_extent_crc_unpacked r)
438{
439 return (l.csum_type != r.csum_type ||
440 l.compression_type != r.compression_type ||
441 l.compressed_size != r.compressed_size ||
442 l.uncompressed_size != r.uncompressed_size ||
443 l.offset != r.offset ||
444 l.live_size != r.live_size ||
445 l.nonce != r.nonce ||
446 bch2_crc_cmp(l.csum, r.csum));
1c6fdbd8
KO
447}
448
449static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
450 struct bch_extent_crc_unpacked n)
451{
ab05de4c 452 return !crc_is_compressed(u) &&
1c6fdbd8
KO
453 u.csum_type &&
454 u.uncompressed_size > u.live_size &&
455 bch2_csum_type_is_encryption(u.csum_type) ==
456 bch2_csum_type_is_encryption(n.csum_type);
457}
458
99aaf570 459bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
1c6fdbd8
KO
460 struct bch_extent_crc_unpacked n)
461{
99aaf570 462 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1c6fdbd8
KO
463 struct bch_extent_crc_unpacked crc;
464 const union bch_extent_entry *i;
465
466 if (!n.csum_type)
467 return false;
468
99aaf570 469 bkey_for_each_crc(k.k, ptrs, crc, i)
1c6fdbd8
KO
470 if (can_narrow_crc(crc, n))
471 return true;
472
473 return false;
474}
475
476/*
477 * We're writing another replica for this extent, so while we've got the data in
478 * memory we'll be computing a new checksum for the currently live data.
479 *
480 * If there are other replicas we aren't moving, and they are checksummed but
481 * not compressed, we can modify them to point to only the data that is
482 * currently live (so that readers won't have to bounce) while we've got the
483 * checksum we need:
484 */
99aaf570 485bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
1c6fdbd8 486{
99aaf570 487 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
1c6fdbd8 488 struct bch_extent_crc_unpacked u;
1742237b 489 struct extent_ptr_decoded p;
1c6fdbd8 490 union bch_extent_entry *i;
71c9e0ba 491 bool ret = false;
1c6fdbd8
KO
492
493 /* Find a checksum entry that covers only live data: */
71c9e0ba 494 if (!n.csum_type) {
99aaf570 495 bkey_for_each_crc(&k->k, ptrs, u, i)
ab05de4c 496 if (!crc_is_compressed(u) &&
1c6fdbd8
KO
497 u.csum_type &&
498 u.live_size == u.uncompressed_size) {
499 n = u;
71c9e0ba 500 goto found;
1c6fdbd8 501 }
1c6fdbd8 502 return false;
71c9e0ba
KO
503 }
504found:
ab05de4c 505 BUG_ON(crc_is_compressed(n));
1c6fdbd8 506 BUG_ON(n.offset);
99aaf570 507 BUG_ON(n.live_size != k->k.size);
1c6fdbd8 508
1c6fdbd8 509restart_narrow_pointers:
f698a957
KO
510 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
511
99aaf570 512 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
1742237b 513 if (can_narrow_crc(p.crc, n)) {
702ffea2 514 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
71c9e0ba
KO
515 p.ptr.offset += p.crc.offset;
516 p.crc = n;
99aaf570 517 bch2_extent_ptr_decoded_append(k, &p);
71c9e0ba 518 ret = true;
1c6fdbd8
KO
519 goto restart_narrow_pointers;
520 }
521
71c9e0ba 522 return ret;
1c6fdbd8
KO
523}
524
4de77495
KO
525static void bch2_extent_crc_pack(union bch_extent_crc *dst,
526 struct bch_extent_crc_unpacked src,
527 enum bch_extent_entry_type type)
1c6fdbd8 528{
4de77495
KO
529#define set_common_fields(_dst, _src) \
530 _dst.type = 1 << type; \
531 _dst.csum_type = _src.csum_type, \
532 _dst.compression_type = _src.compression_type, \
533 _dst._compressed_size = _src.compressed_size - 1, \
534 _dst._uncompressed_size = _src.uncompressed_size - 1, \
535 _dst.offset = _src.offset
536
537 switch (type) {
538 case BCH_EXTENT_ENTRY_crc32:
539 set_common_fields(dst->crc32, src);
1e81f89b 540 dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
4de77495
KO
541 break;
542 case BCH_EXTENT_ENTRY_crc64:
543 set_common_fields(dst->crc64, src);
544 dst->crc64.nonce = src.nonce;
73bd774d
KO
545 dst->crc64.csum_lo = (u64 __force) src.csum.lo;
546 dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
4de77495
KO
547 break;
548 case BCH_EXTENT_ENTRY_crc128:
549 set_common_fields(dst->crc128, src);
550 dst->crc128.nonce = src.nonce;
551 dst->crc128.csum = src.csum;
552 break;
553 default:
554 BUG();
555 }
556#undef set_common_fields
1c6fdbd8
KO
557}
558
4de77495
KO
559void bch2_extent_crc_append(struct bkey_i *k,
560 struct bch_extent_crc_unpacked new)
1c6fdbd8 561{
4de77495
KO
562 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
563 union bch_extent_crc *crc = (void *) ptrs.end;
564 enum bch_extent_entry_type type;
1c6fdbd8 565
4de77495 566 if (bch_crc_bytes[new.csum_type] <= 4 &&
e65fcb43 567 new.uncompressed_size <= CRC32_SIZE_MAX &&
4de77495
KO
568 new.nonce <= CRC32_NONCE_MAX)
569 type = BCH_EXTENT_ENTRY_crc32;
570 else if (bch_crc_bytes[new.csum_type] <= 10 &&
e65fcb43 571 new.uncompressed_size <= CRC64_SIZE_MAX &&
4de77495
KO
572 new.nonce <= CRC64_NONCE_MAX)
573 type = BCH_EXTENT_ENTRY_crc64;
574 else if (bch_crc_bytes[new.csum_type] <= 16 &&
e65fcb43 575 new.uncompressed_size <= CRC128_SIZE_MAX &&
4de77495
KO
576 new.nonce <= CRC128_NONCE_MAX)
577 type = BCH_EXTENT_ENTRY_crc128;
578 else
579 BUG();
1c6fdbd8 580
4de77495
KO
581 bch2_extent_crc_pack(crc, new, type);
582
583 k->k.u64s += extent_entry_u64s(ptrs.end);
584
585 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
1c6fdbd8
KO
586}
587
4de77495 588/* Generic code for keys with pointers: */
1c6fdbd8 589
4de77495
KO
590unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
591{
592 return bch2_bkey_devs(k).nr;
1c6fdbd8
KO
593}
594
4de77495 595unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
3811aa6d 596{
4de77495
KO
597 return k.k->type == KEY_TYPE_reservation
598 ? bkey_s_c_to_reservation(k).v->nr_replicas
599 : bch2_bkey_dirty_devs(k).nr;
3811aa6d
KO
600}
601
4de77495 602unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
1c6fdbd8 603{
4de77495 604 unsigned ret = 0;
3811aa6d 605
4de77495
KO
606 if (k.k->type == KEY_TYPE_reservation) {
607 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
608 } else {
609 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
610 const union bch_extent_entry *entry;
611 struct extent_ptr_decoded p;
3811aa6d 612
4de77495 613 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
ab05de4c 614 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
1c6fdbd8 615 }
26609b61 616
4de77495 617 return ret;
3811aa6d
KO
618}
619
4de77495 620unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
1c6fdbd8 621{
26609b61 622 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
4de77495
KO
623 const union bch_extent_entry *entry;
624 struct extent_ptr_decoded p;
625 unsigned ret = 0;
1c6fdbd8 626
4de77495 627 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
ab05de4c 628 if (!p.ptr.cached && crc_is_compressed(p.crc))
4de77495 629 ret += p.crc.compressed_size;
1c6fdbd8 630
4de77495 631 return ret;
1c6fdbd8
KO
632}
633
ab05de4c
KO
634bool bch2_bkey_is_incompressible(struct bkey_s_c k)
635{
636 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
637 const union bch_extent_entry *entry;
638 struct bch_extent_crc_unpacked crc;
639
640 bkey_for_each_crc(k.k, ptrs, crc, entry)
641 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
642 return true;
643 return false;
644}
645
35a067b4
KO
646unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
647{
648 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
649 const union bch_extent_entry *entry;
33a391a2 650 struct extent_ptr_decoded p = { 0 };
35a067b4
KO
651 unsigned replicas = 0;
652
653 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
654 if (p.ptr.cached)
655 continue;
656
7f4e1d5d
KO
657 if (p.has_ec)
658 replicas += p.ec.redundancy;
35a067b4
KO
659
660 replicas++;
661
662 }
663
664 return replicas;
665}
666
e4f72bb4 667static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
1c6fdbd8 668{
a8b3a677 669 if (p->ptr.cached)
085ab693 670 return 0;
1c6fdbd8 671
e4f72bb4
KO
672 return p->has_ec
673 ? p->ec.redundancy + 1
674 : ca->mi.durability;
91ecd41b 675}
1c6fdbd8 676
e4f72bb4 677unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
91ecd41b 678{
302c980a 679 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
085ab693 680
302c980a 681 return ca ? __extent_ptr_durability(ca, p) : 0;
e4f72bb4 682}
91ecd41b 683
e4f72bb4
KO
684unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
685{
302c980a 686 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
91ecd41b 687
302c980a 688 if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
91ecd41b
KO
689 return 0;
690
e4f72bb4 691 return __extent_ptr_durability(ca, p);
1c6fdbd8
KO
692}
693
4de77495 694unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
1c6fdbd8 695{
4de77495 696 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
45dbb73e
KO
697 const union bch_extent_entry *entry;
698 struct extent_ptr_decoded p;
4de77495 699 unsigned durability = 0;
45dbb73e 700
302c980a 701 rcu_read_lock();
4de77495 702 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
702ffea2 703 durability += bch2_extent_ptr_durability(c, &p);
302c980a 704 rcu_read_unlock();
702ffea2
KO
705
706 return durability;
707}
708
709static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
710{
711 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
712 const union bch_extent_entry *entry;
713 struct extent_ptr_decoded p;
714 unsigned durability = 0;
715
302c980a 716 rcu_read_lock();
702ffea2
KO
717 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
718 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
719 durability += bch2_extent_ptr_durability(c, &p);
302c980a 720 rcu_read_unlock();
45dbb73e 721
4de77495 722 return durability;
1c6fdbd8
KO
723}
724
0507962f
KO
725void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
726{
727 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
728 union bch_extent_entry *next = extent_entry_next(entry);
729
730 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
731 k->k.u64s -= extent_entry_u64s(entry);
732}
733
99aaf570 734void bch2_extent_ptr_decoded_append(struct bkey_i *k,
71c9e0ba
KO
735 struct extent_ptr_decoded *p)
736{
99aaf570
KO
737 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
738 struct bch_extent_crc_unpacked crc =
739 bch2_extent_crc_unpack(&k->k, NULL);
71c9e0ba 740 union bch_extent_entry *pos;
1c6fdbd8 741
71c9e0ba 742 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
99aaf570 743 pos = ptrs.start;
71c9e0ba
KO
744 goto found;
745 }
746
99aaf570 747 bkey_for_each_crc(&k->k, ptrs, crc, pos)
71c9e0ba
KO
748 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
749 pos = extent_entry_next(pos);
750 goto found;
751 }
752
99aaf570
KO
753 bch2_extent_crc_append(k, p->crc);
754 pos = bkey_val_end(bkey_i_to_s(k));
71c9e0ba
KO
755found:
756 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
99aaf570 757 __extent_entry_insert(k, pos, to_entry(&p->ptr));
cd575ddf 758
37954a27
KO
759 if (p->has_ec) {
760 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
761 __extent_entry_insert(k, pos, to_entry(&p->ec));
cd575ddf 762 }
1c6fdbd8
KO
763}
764
4de77495
KO
765static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
766 union bch_extent_entry *entry)
767{
768 union bch_extent_entry *i = ptrs.start;
769
770 if (i == entry)
771 return NULL;
772
773 while (extent_entry_next(i) != entry)
774 i = extent_entry_next(i);
775 return i;
776}
777
b9a7d8ac
KO
778/*
779 * Returns pointer to the next entry after the one being dropped:
780 */
df88febc 781void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
4de77495
KO
782{
783 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
b9a7d8ac 784 union bch_extent_entry *entry = to_entry(ptr), *next;
4de77495
KO
785 bool drop_crc = true;
786
52df04f0
KO
787 if (k.k->type == KEY_TYPE_stripe) {
788 ptr->dev = BCH_SB_MEMBER_INVALID;
789 return;
790 }
791
4de77495
KO
792 EBUG_ON(ptr < &ptrs.start->ptr ||
793 ptr >= &ptrs.end->ptr);
794 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
795
b9a7d8ac
KO
796 for (next = extent_entry_next(entry);
797 next != ptrs.end;
798 next = extent_entry_next(next)) {
799 if (extent_entry_is_crc(next)) {
4de77495 800 break;
b9a7d8ac
KO
801 } else if (extent_entry_is_ptr(next)) {
802 drop_crc = false;
4de77495
KO
803 break;
804 }
b9a7d8ac
KO
805 }
806
807 extent_entry_drop(k, entry);
4de77495 808
b9a7d8ac
KO
809 while ((entry = extent_entry_prev(ptrs, entry))) {
810 if (extent_entry_is_ptr(entry))
811 break;
812
813 if ((extent_entry_is_crc(entry) && drop_crc) ||
df88febc 814 extent_entry_is_stripe_ptr(entry))
b9a7d8ac 815 extent_entry_drop(k, entry);
4de77495 816 }
b9a7d8ac
KO
817}
818
df88febc 819void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
b9a7d8ac 820{
d5c5b337
KO
821 if (k.k->type != KEY_TYPE_stripe) {
822 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k.s_c);
823 const union bch_extent_entry *entry;
824 struct extent_ptr_decoded p;
825
826 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
827 if (p.ptr.dev == ptr->dev && p.has_ec) {
828 ptr->dev = BCH_SB_MEMBER_INVALID;
829 return;
830 }
831 }
832
b9a7d8ac 833 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
df88febc
KO
834
835 bch2_bkey_drop_ptr_noerror(k, ptr);
b9a7d8ac
KO
836
837 /*
838 * If we deleted all the dirty pointers and there's still cached
839 * pointers, we could set the cached pointers to dirty if they're not
840 * stale - but to do that correctly we'd need to grab an open_bucket
841 * reference so that we don't race with bucket reuse:
842 */
843 if (have_dirty &&
844 !bch2_bkey_dirty_devs(k.s_c).nr) {
845 k.k->type = KEY_TYPE_error;
846 set_bkey_val_u64s(k.k, 0);
b9a7d8ac
KO
847 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
848 k.k->type = KEY_TYPE_deleted;
849 set_bkey_val_u64s(k.k, 0);
b9a7d8ac 850 }
4de77495
KO
851}
852
853void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
854{
4de77495
KO
855 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
856}
857
7f5c5d20
KO
858void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
859{
d5c5b337 860 bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev);
7f5c5d20
KO
861}
862
702ffea2 863const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
4de77495
KO
864{
865 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
4de77495
KO
866
867 bkey_for_each_ptr(ptrs, ptr)
868 if (ptr->dev == dev)
869 return ptr;
870
871 return NULL;
872}
873
874bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
875{
876 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
8e3cc200
KO
877 struct bch_dev *ca;
878 bool ret = false;
4de77495 879
8e3cc200 880 rcu_read_lock();
4de77495
KO
881 bkey_for_each_ptr(ptrs, ptr)
882 if (bch2_dev_in_target(c, ptr->dev, target) &&
8e3cc200 883 (ca = bch2_dev_rcu(c, ptr->dev)) &&
4de77495 884 (!ptr->cached ||
8e3cc200
KO
885 !dev_ptr_stale_rcu(ca, ptr))) {
886 ret = true;
887 break;
888 }
889 rcu_read_unlock();
4de77495 890
8e3cc200 891 return ret;
4de77495
KO
892}
893
894bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
895 struct bch_extent_ptr m, u64 offset)
896{
897 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
898 const union bch_extent_entry *entry;
899 struct extent_ptr_decoded p;
900
901 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
902 if (p.ptr.dev == m.dev &&
903 p.ptr.gen == m.gen &&
904 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
905 (s64) m.offset - offset)
906 return true;
907
908 return false;
909}
910
7f5c5d20
KO
911/*
912 * Returns true if two extents refer to the same data:
913 */
914bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
915{
a8b3a677 916 if (k1.k->type != k2.k->type)
79203111
KO
917 return false;
918
a8b3a677
KO
919 if (bkey_extent_is_direct_data(k1.k)) {
920 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
921 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
922 const union bch_extent_entry *entry1, *entry2;
923 struct extent_ptr_decoded p1, p2;
924
925 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
926 return false;
927
928 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
929 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
1e81f89b
KO
930 if (p1.ptr.dev == p2.ptr.dev &&
931 p1.ptr.gen == p2.ptr.gen &&
d2693569
KO
932
933 /*
934 * This checks that the two pointers point
935 * to the same region on disk - adjusting
936 * for the difference in where the extents
937 * start, since one may have been trimmed:
938 */
1e81f89b 939 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
d2693569
KO
940 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) &&
941
942 /*
943 * This additionally checks that the
944 * extents overlap on disk, since the
945 * previous check may trigger spuriously
946 * when one extent is immediately partially
947 * overwritten with another extent (so that
948 * on disk they are adjacent) and
949 * compression is in use:
950 */
951 ((p1.ptr.offset >= p2.ptr.offset &&
952 p1.ptr.offset < p2.ptr.offset + p2.crc.compressed_size) ||
953 (p2.ptr.offset >= p1.ptr.offset &&
954 p2.ptr.offset < p1.ptr.offset + p1.crc.compressed_size)))
1e81f89b 955 return true;
7f5c5d20 956
a8b3a677
KO
957 return false;
958 } else {
959 /* KEY_TYPE_deleted, etc. */
960 return true;
961 }
7f5c5d20
KO
962}
963
702ffea2
KO
964struct bch_extent_ptr *
965bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
7f5c5d20 966{
702ffea2
KO
967 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
968 union bch_extent_entry *entry2;
7f5c5d20
KO
969 struct extent_ptr_decoded p2;
970
971 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
972 if (p1.ptr.dev == p2.ptr.dev &&
973 p1.ptr.gen == p2.ptr.gen &&
974 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
975 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
702ffea2 976 return &entry2->ptr;
7f5c5d20 977
702ffea2 978 return NULL;
7f5c5d20
KO
979}
980
c9163bb0
KO
981void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
982{
983 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
984 union bch_extent_entry *entry;
985 union bch_extent_entry *ec = NULL;
986
987 bkey_extent_entry_for_each(ptrs, entry) {
988 if (&entry->ptr == ptr) {
989 ptr->cached = true;
990 if (ec)
991 extent_entry_drop(k, ec);
992 return;
993 }
994
995 if (extent_entry_is_stripe_ptr(entry))
996 ec = entry;
997 else if (extent_entry_is_ptr(entry))
998 ec = NULL;
999 }
1000
1001 BUG();
1002}
1003
1c6fdbd8
KO
1004/*
1005 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1006 *
1007 * Returns true if @k should be dropped entirely
1008 *
1009 * For existing keys, only called when btree nodes are being rewritten, not when
1010 * they're merely being compacted/resorted in memory.
1011 */
1012bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1013{
9cadb4ea
KO
1014 struct bch_dev *ca;
1015
1016 rcu_read_lock();
26609b61
KO
1017 bch2_bkey_drop_ptrs(k, ptr,
1018 ptr->cached &&
9cadb4ea 1019 (ca = bch2_dev_rcu(c, ptr->dev)) &&
9432e90d 1020 dev_ptr_stale_rcu(ca, ptr) > 0);
9cadb4ea 1021 rcu_read_unlock();
1c6fdbd8 1022
c052cf82 1023 return bkey_deleted(k.k);
1c6fdbd8
KO
1024}
1025
4409b808
KO
1026void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
1027{
2f724563
KO
1028 out->atomic++;
1029 rcu_read_lock();
805ddc20 1030 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
4409b808
KO
1031 if (!ca) {
1032 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1033 (u64) ptr->offset, ptr->gen,
1034 ptr->cached ? " cached" : "");
1035 } else {
1036 u32 offset;
1037 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1038
1039 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1040 ptr->dev, b, offset, ptr->gen);
2102bdac
KO
1041 if (ca->mi.durability != 1)
1042 prt_printf(out, " d=%u", ca->mi.durability);
4409b808
KO
1043 if (ptr->cached)
1044 prt_str(out, " cached");
1045 if (ptr->unwritten)
1046 prt_str(out, " unwritten");
9432e90d
KO
1047 int stale = dev_ptr_stale_rcu(ca, ptr);
1048 if (stale > 0)
4409b808 1049 prt_printf(out, " stale");
9432e90d
KO
1050 else if (stale)
1051 prt_printf(out, " invalid");
4409b808 1052 }
2f724563
KO
1053 rcu_read_unlock();
1054 --out->atomic;
4409b808
KO
1055}
1056
9d9d212e
KO
1057void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_crc_unpacked *crc)
1058{
1059 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
1060 crc->compressed_size,
1061 crc->uncompressed_size,
1062 crc->offset, crc->nonce);
1063 bch2_prt_csum_type(out, crc->csum_type);
1064 prt_printf(out, " %0llx:%0llx ", crc->csum.hi, crc->csum.lo);
1065 prt_str(out, " compress ");
1066 bch2_prt_compression_type(out, crc->compression_type);
1067}
1068
4de77495
KO
1069void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1070 struct bkey_s_c k)
1c6fdbd8 1071{
4de77495
KO
1072 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1073 const union bch_extent_entry *entry;
4de77495 1074 bool first = true;
1c6fdbd8 1075
702ffea2
KO
1076 if (c)
1077 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1078
4de77495
KO
1079 bkey_extent_entry_for_each(ptrs, entry) {
1080 if (!first)
401ec4db 1081 prt_printf(out, " ");
1c6fdbd8 1082
4de77495 1083 switch (__extent_entry_type(entry)) {
4409b808
KO
1084 case BCH_EXTENT_ENTRY_ptr:
1085 bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
4de77495 1086 break;
4409b808 1087
4de77495
KO
1088 case BCH_EXTENT_ENTRY_crc32:
1089 case BCH_EXTENT_ENTRY_crc64:
ef435abd
KO
1090 case BCH_EXTENT_ENTRY_crc128: {
1091 struct bch_extent_crc_unpacked crc =
1092 bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1c6fdbd8 1093
9d9d212e 1094 bch2_extent_crc_unpacked_to_text(out, &crc);
4de77495 1095 break;
ef435abd
KO
1096 }
1097 case BCH_EXTENT_ENTRY_stripe_ptr: {
1098 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
4de77495 1099
401ec4db 1100 prt_printf(out, "ec: idx %llu block %u",
4de77495
KO
1101 (u64) ec->idx, ec->block);
1102 break;
ef435abd 1103 }
fb3f57bb
KO
1104 case BCH_EXTENT_ENTRY_rebalance: {
1105 const struct bch_extent_rebalance *r = &entry->rebalance;
1106
1107 prt_str(out, "rebalance: target ");
1108 if (c)
1109 bch2_target_to_text(out, c, r->target);
1110 else
1111 prt_printf(out, "%u", r->target);
1112 prt_str(out, " compression ");
1113 bch2_compression_opt_to_text(out, r->compression);
1114 break;
1115 }
4de77495 1116 default:
401ec4db 1117 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
4de77495 1118 return;
1c6fdbd8 1119 }
4de77495
KO
1120
1121 first = false;
1122 }
1c6fdbd8
KO
1123}
1124
d97de0d0
KO
1125static int extent_ptr_validate(struct bch_fs *c,
1126 struct bkey_s_c k,
1127 enum bch_validate_flags flags,
1128 const struct bch_extent_ptr *ptr,
1129 unsigned size_ondisk,
1130 bool metadata)
1c6fdbd8 1131{
b65db750 1132 int ret = 0;
1c6fdbd8 1133
805ddc20 1134 /* bad pointers are repaired by check_fix_ptrs(): */
8feecbed 1135 rcu_read_lock();
805ddc20 1136 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
8feecbed
KO
1137 if (!ca) {
1138 rcu_read_unlock();
1139 return 0;
1140 }
1141 u32 bucket_offset;
1142 u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1143 unsigned first_bucket = ca->mi.first_bucket;
1144 u64 nbuckets = ca->mi.nbuckets;
1145 unsigned bucket_size = ca->mi.bucket_size;
1146 rcu_read_unlock();
1147
99179fb8 1148 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
4de77495 1149 bkey_for_each_ptr(ptrs, ptr2)
d97de0d0
KO
1150 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev,
1151 c, ptr_to_duplicate_device,
b65db750 1152 "multiple pointers to same device (%u)", ptr->dev);
1c6fdbd8 1153
1c6fdbd8 1154
d97de0d0
KO
1155 bkey_fsck_err_on(bucket >= nbuckets,
1156 c, ptr_after_last_bucket,
8feecbed 1157 "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
d97de0d0
KO
1158 bkey_fsck_err_on(bucket < first_bucket,
1159 c, ptr_before_first_bucket,
8feecbed 1160 "pointer before first bucket (%llu < %u)", bucket, first_bucket);
d97de0d0
KO
1161 bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size,
1162 c, ptr_spans_multiple_buckets,
b65db750 1163 "pointer spans multiple buckets (%u + %u > %u)",
8feecbed 1164 bucket_offset, size_ondisk, bucket_size);
b65db750
KO
1165fsck_err:
1166 return ret;
4de77495 1167}
1c6fdbd8 1168
d97de0d0
KO
1169int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
1170 enum bch_validate_flags flags)
4de77495
KO
1171{
1172 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1173 const union bch_extent_entry *entry;
1174 struct bch_extent_crc_unpacked crc;
1175 unsigned size_ondisk = k.k->size;
4de77495 1176 unsigned nonce = UINT_MAX;
b5f73fd7 1177 unsigned nr_ptrs = 0;
b65db750
KO
1178 bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1179 int ret = 0;
4de77495 1180
f0ac7df2 1181 if (bkey_is_btree_ptr(k.k))
8244f320 1182 size_ondisk = btree_sectors(c);
4de77495
KO
1183
1184 bkey_extent_entry_for_each(ptrs, entry) {
d97de0d0
KO
1185 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX,
1186 c, extent_ptrs_invalid_entry,
1187 "invalid extent entry type (got %u, max %u)",
1188 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
6009b4e5 1189
b65db750 1190 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
d97de0d0
KO
1191 !extent_entry_is_ptr(entry),
1192 c, btree_ptr_has_non_ptr,
b65db750 1193 "has non ptr field");
4de77495
KO
1194
1195 switch (extent_entry_type(entry)) {
1196 case BCH_EXTENT_ENTRY_ptr:
d97de0d0 1197 ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false);
f0ac7df2
KO
1198 if (ret)
1199 return ret;
79203111 1200
d97de0d0
KO
1201 bkey_fsck_err_on(entry->ptr.cached && have_ec,
1202 c, ptr_cached_and_erasure_coded,
b65db750 1203 "cached, erasure coded ptr");
79203111 1204
b65db750
KO
1205 if (!entry->ptr.unwritten)
1206 have_written = true;
1207 else
1208 have_unwritten = true;
c9163bb0 1209
c9163bb0 1210 have_ec = false;
43b0e878 1211 crc_since_last_ptr = false;
b5f73fd7 1212 nr_ptrs++;
6009b4e5
KO
1213 break;
1214 case BCH_EXTENT_ENTRY_crc32:
1215 case BCH_EXTENT_ENTRY_crc64:
1216 case BCH_EXTENT_ENTRY_crc128:
4de77495 1217 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
6009b4e5 1218
d97de0d0
KO
1219 bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size,
1220 c, ptr_crc_uncompressed_size_too_small,
b65db750 1221 "checksum offset + key size > uncompressed size");
d97de0d0
KO
1222 bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type),
1223 c, ptr_crc_csum_type_unknown,
b65db750 1224 "invalid checksum type");
d97de0d0
KO
1225 bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR,
1226 c, ptr_crc_compression_type_unknown,
b65db750 1227 "invalid compression type");
6009b4e5 1228
4de77495
KO
1229 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1230 if (nonce == UINT_MAX)
1231 nonce = crc.offset + crc.nonce;
b65db750 1232 else if (nonce != crc.offset + crc.nonce)
d97de0d0 1233 bkey_fsck_err(c, ptr_crc_nonce_mismatch,
b65db750 1234 "incorrect nonce");
4de77495 1235 }
43b0e878 1236
d97de0d0
KO
1237 bkey_fsck_err_on(crc_since_last_ptr,
1238 c, ptr_crc_redundant,
b65db750 1239 "redundant crc entry");
43b0e878 1240 crc_since_last_ptr = true;
9db2f860 1241
b65db750
KO
1242 bkey_fsck_err_on(crc_is_encoded(crc) &&
1243 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
d97de0d0
KO
1244 (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)),
1245 c, ptr_crc_uncompressed_size_too_big,
b65db750 1246 "too large encoded extent");
9db2f860 1247
b65db750 1248 size_ondisk = crc.compressed_size;
4de77495
KO
1249 break;
1250 case BCH_EXTENT_ENTRY_stripe_ptr:
d97de0d0
KO
1251 bkey_fsck_err_on(have_ec,
1252 c, ptr_stripe_redundant,
b65db750 1253 "redundant stripe entry");
c9163bb0 1254 have_ec = true;
6009b4e5 1255 break;
fb3f57bb 1256 case BCH_EXTENT_ENTRY_rebalance: {
d97de0d0
KO
1257 /*
1258 * this shouldn't be a fsck error, for forward
1259 * compatibility; the rebalance code should just refetch
1260 * the compression opt if it's unknown
1261 */
1262#if 0
fb3f57bb
KO
1263 const struct bch_extent_rebalance *r = &entry->rebalance;
1264
1265 if (!bch2_compression_opt_valid(r->compression)) {
1266 struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1267 prt_printf(err, "invalid compression opt %u:%u",
1268 opt.type, opt.level);
1269 return -BCH_ERR_invalid_bkey;
1270 }
d97de0d0 1271#endif
2766876d 1272 break;
6009b4e5 1273 }
fb3f57bb 1274 }
1c6fdbd8
KO
1275 }
1276
d97de0d0
KO
1277 bkey_fsck_err_on(!nr_ptrs,
1278 c, extent_ptrs_no_ptrs,
b65db750 1279 "no ptrs");
d97de0d0
KO
1280 bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX,
1281 c, extent_ptrs_too_many_ptrs,
b65db750 1282 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
d97de0d0
KO
1283 bkey_fsck_err_on(have_written && have_unwritten,
1284 c, extent_ptrs_written_and_unwritten,
b65db750 1285 "extent with unwritten and written ptrs");
d97de0d0
KO
1286 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten,
1287 c, extent_ptrs_unwritten,
b65db750 1288 "has unwritten ptrs");
d97de0d0
KO
1289 bkey_fsck_err_on(crc_since_last_ptr,
1290 c, extent_ptrs_redundant_crc,
b65db750 1291 "redundant crc entry");
d97de0d0
KO
1292 bkey_fsck_err_on(have_ec,
1293 c, extent_ptrs_redundant_stripe,
b65db750
KO
1294 "redundant stripe entry");
1295fsck_err:
1296 return ret;
4de77495 1297}
6009b4e5 1298
1f49dafc 1299void bch2_ptr_swab(struct bkey_s k)
4de77495 1300{
1f49dafc 1301 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
4de77495 1302 union bch_extent_entry *entry;
1f49dafc 1303 u64 *d;
6009b4e5 1304
1f49dafc
KO
1305 for (d = (u64 *) ptrs.start;
1306 d != (u64 *) ptrs.end;
1307 d++)
1308 *d = swab64(*d);
6009b4e5 1309
1f49dafc
KO
1310 for (entry = ptrs.start;
1311 entry < ptrs.end;
4de77495
KO
1312 entry = extent_entry_next(entry)) {
1313 switch (extent_entry_type(entry)) {
1314 case BCH_EXTENT_ENTRY_ptr:
1315 break;
1316 case BCH_EXTENT_ENTRY_crc32:
1317 entry->crc32.csum = swab32(entry->crc32.csum);
1318 break;
1319 case BCH_EXTENT_ENTRY_crc64:
1320 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1321 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1322 break;
1323 case BCH_EXTENT_ENTRY_crc128:
1324 entry->crc128.csum.hi = (__force __le64)
1325 swab64((__force u64) entry->crc128.csum.hi);
1326 entry->crc128.csum.lo = (__force __le64)
1327 swab64((__force u64) entry->crc128.csum.lo);
1328 break;
1329 case BCH_EXTENT_ENTRY_stripe_ptr:
1330 break;
2766876d
KO
1331 case BCH_EXTENT_ENTRY_rebalance:
1332 break;
4de77495 1333 }
1c6fdbd8 1334 }
1c6fdbd8
KO
1335}
1336
fb3f57bb
KO
1337const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1338{
1339 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1340 const union bch_extent_entry *entry;
1341
1342 bkey_extent_entry_for_each(ptrs, entry)
1343 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1344 return &entry->rebalance;
1345
1346 return NULL;
1347}
1348
1349unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1350 unsigned target, unsigned compression)
1351{
1352 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1353 unsigned rewrite_ptrs = 0;
1354
1355 if (compression) {
1356 unsigned compression_type = bch2_compression_opt_to_type(compression);
1357 const union bch_extent_entry *entry;
1358 struct extent_ptr_decoded p;
1359 unsigned i = 0;
1360
1361 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
e5972888
DH
1362 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1363 p.ptr.unwritten) {
fb3f57bb
KO
1364 rewrite_ptrs = 0;
1365 goto incompressible;
1366 }
1367
1368 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1369 rewrite_ptrs |= 1U << i;
1370 i++;
1371 }
1372 }
1373incompressible:
1374 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
fb3f57bb
KO
1375 unsigned i = 0;
1376
1377 bkey_for_each_ptr(ptrs, ptr) {
1378 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1379 rewrite_ptrs |= 1U << i;
1380 i++;
1381 }
1382 }
1383
1384 return rewrite_ptrs;
1385}
1386
1387bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1388{
1389 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1390
1391 /*
1392 * If it's an indirect extent, we don't delete the rebalance entry when
1393 * done so that we know what options were applied - check if it still
1394 * needs work done:
1395 */
1396 if (r &&
1397 k.k->type == KEY_TYPE_reflink_v &&
1398 !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1399 r = NULL;
1400
1401 return r != NULL;
1402}
1403
49aa7830
KO
1404static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1405 unsigned target, unsigned compression)
1406{
1407 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1408 const union bch_extent_entry *entry;
1409 struct extent_ptr_decoded p;
1410 u64 sectors = 0;
1411
1412 if (compression) {
1413 unsigned compression_type = bch2_compression_opt_to_type(compression);
1414
1415 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1416 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1417 p.ptr.unwritten) {
1418 sectors = 0;
1419 goto incompressible;
1420 }
1421
1422 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1423 sectors += p.crc.compressed_size;
1424 }
1425 }
1426incompressible:
1427 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1428 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
1429 if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target))
1430 sectors += p.crc.compressed_size;
1431 }
1432
1433 return sectors;
1434}
1435
1436u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
1437{
1438 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1439
1440 return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0;
1441}
1442
fb3f57bb 1443int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
d7e77f53 1444 struct bch_io_opts *opts)
fb3f57bb
KO
1445{
1446 struct bkey_s k = bkey_i_to_s(_k);
1447 struct bch_extent_rebalance *r;
d7e77f53
KO
1448 unsigned target = opts->background_target;
1449 unsigned compression = background_compression(*opts);
fb3f57bb
KO
1450 bool needs_rebalance;
1451
1452 if (!bkey_extent_is_direct_data(k.k))
1453 return 0;
1454
1455 /* get existing rebalance entry: */
1456 r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1457 if (r) {
1458 if (k.k->type == KEY_TYPE_reflink_v) {
1459 /*
1460 * indirect extents: existing options take precedence,
1461 * so that we don't move extents back and forth if
1462 * they're referenced by different inodes with different
1463 * options:
1464 */
1465 if (r->target)
1466 target = r->target;
1467 if (r->compression)
1468 compression = r->compression;
1469 }
1470
1471 r->target = target;
1472 r->compression = compression;
1473 }
1474
1475 needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1476
1477 if (needs_rebalance && !r) {
1478 union bch_extent_entry *new = bkey_val_end(k);
1479
1480 new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
1481 new->rebalance.compression = compression;
1482 new->rebalance.target = target;
1483 new->rebalance.unused = 0;
1484 k.k->u64s += extent_entry_u64s(new);
1485 } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1486 /*
1487 * For indirect extents, don't delete the rebalance entry when
1488 * we're finished so that we know we specifically moved it or
1489 * compressed it to its current location/compression type
1490 */
1491 extent_entry_drop(k, (union bch_extent_entry *) r);
1492 }
1493
1494 return 0;
1495}
1496
4de77495
KO
1497/* Generic extent code: */
1498
1499int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1c6fdbd8 1500{
4de77495
KO
1501 unsigned new_val_u64s = bkey_val_u64s(k.k);
1502 int val_u64s_delta;
1503 u64 sub;
1c6fdbd8 1504
e88a75eb 1505 if (bkey_le(where, bkey_start_pos(k.k)))
4de77495 1506 return 0;
1c6fdbd8 1507
e88a75eb 1508 EBUG_ON(bkey_gt(where, k.k->p));
424eb881 1509
4de77495 1510 sub = where.offset - bkey_start_offset(k.k);
1c6fdbd8 1511
4de77495
KO
1512 k.k->size -= sub;
1513
1514 if (!k.k->size) {
1515 k.k->type = KEY_TYPE_deleted;
1516 new_val_u64s = 0;
1c6fdbd8 1517 }
1c6fdbd8 1518
4de77495
KO
1519 switch (k.k->type) {
1520 case KEY_TYPE_extent:
1521 case KEY_TYPE_reflink_v: {
1522 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1523 union bch_extent_entry *entry;
1524 bool seen_crc = false;
26609b61 1525
4de77495
KO
1526 bkey_extent_entry_for_each(ptrs, entry) {
1527 switch (extent_entry_type(entry)) {
1528 case BCH_EXTENT_ENTRY_ptr:
1529 if (!seen_crc)
1530 entry->ptr.offset += sub;
1531 break;
1532 case BCH_EXTENT_ENTRY_crc32:
1533 entry->crc32.offset += sub;
1534 break;
1535 case BCH_EXTENT_ENTRY_crc64:
1536 entry->crc64.offset += sub;
1537 break;
1538 case BCH_EXTENT_ENTRY_crc128:
1539 entry->crc128.offset += sub;
1540 break;
1541 case BCH_EXTENT_ENTRY_stripe_ptr:
1542 break;
2766876d
KO
1543 case BCH_EXTENT_ENTRY_rebalance:
1544 break;
4de77495 1545 }
7ef2a73a 1546
4de77495
KO
1547 if (extent_entry_is_crc(entry))
1548 seen_crc = true;
1549 }
7ef2a73a 1550
7ef2a73a
KO
1551 break;
1552 }
4de77495
KO
1553 case KEY_TYPE_reflink_p: {
1554 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1555
1556 le64_add_cpu(&p.v->idx, sub);
7ef2a73a
KO
1557 break;
1558 }
801a3de6
KO
1559 case KEY_TYPE_inline_data:
1560 case KEY_TYPE_indirect_inline_data: {
1561 void *p = bkey_inline_data_p(k);
1562 unsigned bytes = bkey_inline_data_bytes(k.k);
7ef2a73a 1563
801a3de6 1564 sub = min_t(u64, sub << 9, bytes);
26609b61 1565
801a3de6 1566 memmove(p, p + sub, bytes - sub);
26609b61 1567
4de77495
KO
1568 new_val_u64s -= sub >> 3;
1569 break;
1570 }
1571 }
26609b61 1572
4de77495
KO
1573 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1574 BUG_ON(val_u64s_delta < 0);
26609b61 1575
4de77495
KO
1576 set_bkey_val_u64s(k.k, new_val_u64s);
1577 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1578 return -val_u64s_delta;
26609b61
KO
1579}
1580
4de77495 1581int bch2_cut_back_s(struct bpos where, struct bkey_s k)
26609b61 1582{
4de77495
KO
1583 unsigned new_val_u64s = bkey_val_u64s(k.k);
1584 int val_u64s_delta;
1585 u64 len = 0;
26609b61 1586
e88a75eb 1587 if (bkey_ge(where, k.k->p))
4de77495 1588 return 0;
26609b61 1589
e88a75eb 1590 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
26609b61 1591
4de77495 1592 len = where.offset - bkey_start_offset(k.k);
26609b61 1593
d361a26d 1594 k.k->p.offset = where.offset;
4de77495
KO
1595 k.k->size = len;
1596
1597 if (!len) {
1598 k.k->type = KEY_TYPE_deleted;
1599 new_val_u64s = 0;
26609b61
KO
1600 }
1601
4de77495
KO
1602 switch (k.k->type) {
1603 case KEY_TYPE_inline_data:
801a3de6
KO
1604 case KEY_TYPE_indirect_inline_data:
1605 new_val_u64s = (bkey_inline_data_offset(k.k) +
1606 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
4de77495
KO
1607 break;
1608 }
26609b61 1609
4de77495
KO
1610 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1611 BUG_ON(val_u64s_delta < 0);
1612
1613 set_bkey_val_u64s(k.k, new_val_u64s);
1614 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1615 return -val_u64s_delta;
26609b61 1616}
This page took 0.5882 seconds and 4 git commands to generate.