1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_write_buffer.h"
6 #include "bkey_methods.h"
7 #include "btree_update.h"
11 #include "disk_accounting.h"
14 #include "extent_update.h"
19 #include "subvolume.h"
22 #include <linux/random.h>
24 #include <linux/unaligned.h>
26 #define x(name, ...) #name,
27 const char * const bch2_inode_opts[] = {
32 static const char * const bch2_inode_flag_strs[] = {
38 static int delete_ancestor_snapshot_inodes(struct btree_trans *, struct bpos);
40 static const u8 byte_table[8] = { 1, 2, 3, 4, 6, 8, 10, 13 };
42 static int inode_decode_field(const u8 *in, const u8 *end,
43 u64 out[2], unsigned *out_bits)
45 __be64 be[2] = { 0, 0 };
46 unsigned bytes, shift;
56 * position of highest set bit indicates number of bytes:
57 * shift = number of bits to remove in high byte:
59 shift = 8 - __fls(*in); /* 1 <= shift <= 8 */
60 bytes = byte_table[shift - 1];
65 p = (u8 *) be + 16 - bytes;
67 *p ^= (1 << 8) >> shift;
69 out[0] = be64_to_cpu(be[0]);
70 out[1] = be64_to_cpu(be[1]);
71 *out_bits = out[0] ? 64 + fls64(out[0]) : fls64(out[1]);
76 static inline void bch2_inode_pack_inlined(struct bkey_inode_buf *packed,
77 const struct bch_inode_unpacked *inode)
79 struct bkey_i_inode_v3 *k = &packed->inode;
80 u8 *out = k->v.fields;
81 u8 *end = (void *) &packed[1];
82 u8 *last_nonzero_field = out;
83 unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
87 bkey_inode_v3_init(&packed->inode.k_i);
88 packed->inode.k.p.offset = inode->bi_inum;
89 packed->inode.v.bi_journal_seq = cpu_to_le64(inode->bi_journal_seq);
90 packed->inode.v.bi_hash_seed = inode->bi_hash_seed;
91 packed->inode.v.bi_flags = cpu_to_le64(inode->bi_flags);
92 packed->inode.v.bi_sectors = cpu_to_le64(inode->bi_sectors);
93 packed->inode.v.bi_size = cpu_to_le64(inode->bi_size);
94 packed->inode.v.bi_version = cpu_to_le64(inode->bi_version);
95 SET_INODEv3_MODE(&packed->inode.v, inode->bi_mode);
96 SET_INODEv3_FIELDS_START(&packed->inode.v, INODEv3_FIELDS_START_CUR);
99 #define x(_name, _bits) \
102 if (inode->_name) { \
103 ret = bch2_varint_encode_fast(out, inode->_name); \
109 last_nonzero_field = out; \
110 last_nonzero_fieldnr = nr_fields; \
118 BCH_INODE_FIELDS_v3()
122 out = last_nonzero_field;
123 nr_fields = last_nonzero_fieldnr;
125 bytes = out - (u8 *) &packed->inode.v;
126 set_bkey_val_bytes(&packed->inode.k, bytes);
127 memset_u64s_tail(&packed->inode.v, 0, bytes);
129 SET_INODEv3_NR_FIELDS(&k->v, nr_fields);
131 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
132 struct bch_inode_unpacked unpacked;
134 ret = bch2_inode_unpack(bkey_i_to_s_c(&packed->inode.k_i), &unpacked);
136 BUG_ON(unpacked.bi_inum != inode->bi_inum);
137 BUG_ON(unpacked.bi_hash_seed != inode->bi_hash_seed);
138 BUG_ON(unpacked.bi_sectors != inode->bi_sectors);
139 BUG_ON(unpacked.bi_size != inode->bi_size);
140 BUG_ON(unpacked.bi_version != inode->bi_version);
141 BUG_ON(unpacked.bi_mode != inode->bi_mode);
143 #define x(_name, _bits) if (unpacked._name != inode->_name) \
144 panic("unpacked %llu should be %llu", \
145 (u64) unpacked._name, (u64) inode->_name);
146 BCH_INODE_FIELDS_v3()
151 void bch2_inode_pack(struct bkey_inode_buf *packed,
152 const struct bch_inode_unpacked *inode)
154 bch2_inode_pack_inlined(packed, inode);
157 static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode,
158 struct bch_inode_unpacked *unpacked)
160 const u8 *in = inode.v->fields;
161 const u8 *end = bkey_val_end(inode);
163 unsigned fieldnr = 0, field_bits;
166 #define x(_name, _bits) \
167 if (fieldnr++ == INODEv1_NR_FIELDS(inode.v)) { \
168 unsigned offset = offsetof(struct bch_inode_unpacked, _name);\
169 memset((void *) unpacked + offset, 0, \
170 sizeof(*unpacked) - offset); \
174 ret = inode_decode_field(in, end, field, &field_bits); \
178 if (field_bits > sizeof(unpacked->_name) * 8) \
181 unpacked->_name = field[1]; \
184 BCH_INODE_FIELDS_v2()
187 /* XXX: signal if there were more fields than expected? */
191 static int bch2_inode_unpack_v2(struct bch_inode_unpacked *unpacked,
192 const u8 *in, const u8 *end,
195 unsigned fieldnr = 0;
199 #define x(_name, _bits) \
200 if (fieldnr < nr_fields) { \
201 ret = bch2_varint_decode_fast(in, end, &v[0]); \
207 ret = bch2_varint_decode_fast(in, end, &v[1]); \
218 unpacked->_name = v[0]; \
219 if (v[1] || v[0] != unpacked->_name) \
223 BCH_INODE_FIELDS_v2()
226 /* XXX: signal if there were more fields than expected? */
230 static int bch2_inode_unpack_v3(struct bkey_s_c k,
231 struct bch_inode_unpacked *unpacked)
233 struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
234 const u8 *in = inode.v->fields;
235 const u8 *end = bkey_val_end(inode);
236 unsigned nr_fields = INODEv3_NR_FIELDS(inode.v);
237 unsigned fieldnr = 0;
241 unpacked->bi_inum = inode.k->p.offset;
242 unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq);
243 unpacked->bi_hash_seed = inode.v->bi_hash_seed;
244 unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags);
245 unpacked->bi_sectors = le64_to_cpu(inode.v->bi_sectors);
246 unpacked->bi_size = le64_to_cpu(inode.v->bi_size);
247 unpacked->bi_version = le64_to_cpu(inode.v->bi_version);
248 unpacked->bi_mode = INODEv3_MODE(inode.v);
250 #define x(_name, _bits) \
251 if (fieldnr < nr_fields) { \
252 ret = bch2_varint_decode_fast(in, end, &v[0]); \
258 ret = bch2_varint_decode_fast(in, end, &v[1]); \
269 unpacked->_name = v[0]; \
270 if (v[1] || v[0] != unpacked->_name) \
274 BCH_INODE_FIELDS_v3()
277 /* XXX: signal if there were more fields than expected? */
281 static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
282 struct bch_inode_unpacked *unpacked)
284 memset(unpacked, 0, sizeof(*unpacked));
286 unpacked->bi_snapshot = k.k->p.snapshot;
289 case KEY_TYPE_inode: {
290 struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
292 unpacked->bi_inum = inode.k->p.offset;
293 unpacked->bi_journal_seq= 0;
294 unpacked->bi_hash_seed = inode.v->bi_hash_seed;
295 unpacked->bi_flags = le32_to_cpu(inode.v->bi_flags);
296 unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
298 if (INODEv1_NEW_VARINT(inode.v)) {
299 return bch2_inode_unpack_v2(unpacked, inode.v->fields,
301 INODEv1_NR_FIELDS(inode.v));
303 return bch2_inode_unpack_v1(inode, unpacked);
307 case KEY_TYPE_inode_v2: {
308 struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
310 unpacked->bi_inum = inode.k->p.offset;
311 unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq);
312 unpacked->bi_hash_seed = inode.v->bi_hash_seed;
313 unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags);
314 unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
316 return bch2_inode_unpack_v2(unpacked, inode.v->fields,
318 INODEv2_NR_FIELDS(inode.v));
325 int bch2_inode_unpack(struct bkey_s_c k,
326 struct bch_inode_unpacked *unpacked)
328 unpacked->bi_snapshot = k.k->p.snapshot;
330 return likely(k.k->type == KEY_TYPE_inode_v3)
331 ? bch2_inode_unpack_v3(k, unpacked)
332 : bch2_inode_unpack_slowpath(k, unpacked);
335 int __bch2_inode_peek(struct btree_trans *trans,
336 struct btree_iter *iter,
337 struct bch_inode_unpacked *inode,
338 subvol_inum inum, unsigned flags,
342 int ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn);
346 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes,
347 SPOS(0, inum.inum, snapshot),
348 flags|BTREE_ITER_cached);
353 ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
357 ret = bch2_inode_unpack(k, inode);
364 bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum);
365 bch2_trans_iter_exit(trans, iter);
369 int bch2_inode_write_flags(struct btree_trans *trans,
370 struct btree_iter *iter,
371 struct bch_inode_unpacked *inode,
372 enum btree_iter_update_trigger_flags flags)
374 struct bkey_inode_buf *inode_p;
376 inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
378 return PTR_ERR(inode_p);
380 bch2_inode_pack_inlined(inode_p, inode);
381 inode_p->inode.k.p.snapshot = iter->snapshot;
382 return bch2_trans_update(trans, iter, &inode_p->inode.k_i, flags);
385 int __bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
387 struct bkey_inode_buf *inode_p =
388 bch2_trans_kmalloc(trans, sizeof(*inode_p));
391 return PTR_ERR(inode_p);
393 bch2_inode_pack(inode_p, inode);
394 inode_p->inode.k.p.snapshot = inode->bi_snapshot;
396 return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
398 BTREE_UPDATE_internal_snapshot_node);
401 int bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
403 int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
404 __bch2_fsck_write_inode(trans, inode));
405 bch_err_fn(trans->c, ret);
409 struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
411 struct bch_inode_unpacked u;
412 struct bkey_inode_buf *inode_p;
415 if (!bkey_is_inode(&k->k))
416 return ERR_PTR(-ENOENT);
418 inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
420 return ERR_CAST(inode_p);
422 ret = bch2_inode_unpack(bkey_i_to_s_c(k), &u);
426 bch2_inode_pack(inode_p, &u);
427 return &inode_p->inode.k_i;
430 static int __bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k,
431 enum bch_validate_flags flags)
433 struct bch_inode_unpacked unpacked;
436 bkey_fsck_err_on(k.k->p.inode,
437 c, inode_pos_inode_nonzero,
438 "nonzero k.p.inode");
440 bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX,
441 c, inode_pos_blockdev_range,
442 "fs inode in blockdev range");
444 bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked),
445 c, inode_unpack_error,
446 "invalid variable length fields");
448 bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1,
449 c, inode_checksum_type_invalid,
450 "invalid data checksum type (%u >= %u",
451 unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
453 bkey_fsck_err_on(unpacked.bi_compression &&
454 !bch2_compression_opt_valid(unpacked.bi_compression - 1),
455 c, inode_compression_type_invalid,
456 "invalid compression opt %u", unpacked.bi_compression - 1);
458 bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_unlinked) &&
459 unpacked.bi_nlink != 0,
460 c, inode_unlinked_but_nlink_nonzero,
461 "flagged as unlinked but bi_nlink != 0");
463 bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode),
464 c, inode_subvol_root_but_not_dir,
465 "subvolume root but not a directory");
470 int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k,
471 enum bch_validate_flags flags)
473 struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
476 bkey_fsck_err_on(INODEv1_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
477 c, inode_str_hash_invalid,
478 "invalid str hash type (%llu >= %u)",
479 INODEv1_STR_HASH(inode.v), BCH_STR_HASH_NR);
481 ret = __bch2_inode_validate(c, k, flags);
486 int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k,
487 enum bch_validate_flags flags)
489 struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
492 bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
493 c, inode_str_hash_invalid,
494 "invalid str hash type (%llu >= %u)",
495 INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
497 ret = __bch2_inode_validate(c, k, flags);
502 int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k,
503 enum bch_validate_flags flags)
505 struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
508 bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
509 INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k),
510 c, inode_v3_fields_start_bad,
511 "invalid fields_start (got %llu, min %u max %zu)",
512 INODEv3_FIELDS_START(inode.v),
513 INODEv3_FIELDS_START_INITIAL,
514 bkey_val_u64s(inode.k));
516 bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
517 c, inode_str_hash_invalid,
518 "invalid str hash type (%llu >= %u)",
519 INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
521 ret = __bch2_inode_validate(c, k, flags);
526 static void __bch2_inode_unpacked_to_text(struct printbuf *out,
527 struct bch_inode_unpacked *inode)
529 prt_printf(out, "\n");
530 printbuf_indent_add(out, 2);
531 prt_printf(out, "mode=%o\n", inode->bi_mode);
533 prt_str(out, "flags=");
534 prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
535 prt_printf(out, "(%x)\n", inode->bi_flags);
537 prt_printf(out, "journal_seq=%llu\n", inode->bi_journal_seq);
538 prt_printf(out, "hash_seed=%llx\n", inode->bi_hash_seed);
539 prt_printf(out, "hash_type=");
540 bch2_prt_str_hash_type(out, INODE_STR_HASH(inode));
542 prt_printf(out, "bi_size=%llu\n", inode->bi_size);
543 prt_printf(out, "bi_sectors=%llu\n", inode->bi_sectors);
544 prt_printf(out, "bi_version=%llu\n", inode->bi_version);
546 #define x(_name, _bits) \
547 prt_printf(out, #_name "=%llu\n", (u64) inode->_name);
548 BCH_INODE_FIELDS_v3()
551 bch2_printbuf_strip_trailing_newline(out);
552 printbuf_indent_sub(out, 2);
555 void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
557 prt_printf(out, "inum: %llu:%u ", inode->bi_inum, inode->bi_snapshot);
558 __bch2_inode_unpacked_to_text(out, inode);
561 void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
563 struct bch_inode_unpacked inode;
565 if (bch2_inode_unpack(k, &inode)) {
566 prt_printf(out, "(unpack error)");
570 __bch2_inode_unpacked_to_text(out, &inode);
573 static inline u64 bkey_inode_flags(struct bkey_s_c k)
577 return le32_to_cpu(bkey_s_c_to_inode(k).v->bi_flags);
578 case KEY_TYPE_inode_v2:
579 return le64_to_cpu(bkey_s_c_to_inode_v2(k).v->bi_flags);
580 case KEY_TYPE_inode_v3:
581 return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_flags);
587 static inline void bkey_inode_flags_set(struct bkey_s k, u64 f)
591 bkey_s_to_inode(k).v->bi_flags = cpu_to_le32(f);
593 case KEY_TYPE_inode_v2:
594 bkey_s_to_inode_v2(k).v->bi_flags = cpu_to_le64(f);
596 case KEY_TYPE_inode_v3:
597 bkey_s_to_inode_v3(k).v->bi_flags = cpu_to_le64(f);
604 static inline bool bkey_is_unlinked_inode(struct bkey_s_c k)
606 unsigned f = bkey_inode_flags(k) & BCH_INODE_unlinked;
608 return (f & BCH_INODE_unlinked) && !(f & BCH_INODE_has_child_snapshot);
611 static struct bkey_s_c
612 bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter,
613 enum btree_id btree, struct bpos pos,
616 struct bch_fs *c = trans->c;
620 for_each_btree_key_upto_norestart(trans, *iter, btree,
622 SPOS(pos.inode, pos.offset, U32_MAX),
623 flags|BTREE_ITER_all_snapshots, k, ret)
624 if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot))
627 bch2_trans_iter_exit(trans, iter);
628 return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
631 static struct bkey_s_c
632 bch2_inode_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter,
633 struct bpos pos, unsigned flags)
637 k = bch2_bkey_get_iter_snapshot_parent(trans, iter, BTREE_ID_inodes, pos, flags);
643 bch2_trans_iter_exit(trans, iter);
648 int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos)
650 struct bch_fs *c = trans->c;
651 struct btree_iter iter;
655 for_each_btree_key_upto_norestart(trans, iter,
656 BTREE_ID_inodes, POS(0, pos.offset), bpos_predecessor(pos),
657 BTREE_ITER_all_snapshots|
658 BTREE_ITER_with_updates, k, ret)
659 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot) &&
660 bkey_is_inode(k.k)) {
664 bch2_trans_iter_exit(trans, &iter);
668 static int update_inode_has_children(struct btree_trans *trans,
673 int ret = bch2_inode_has_child_snapshots(trans, k.k->p);
675 return ret < 0 ? ret : 0;
678 u64 f = bkey_inode_flags(k.s_c);
679 if (have_child != !!(f & BCH_INODE_has_child_snapshot))
680 bkey_inode_flags_set(k, f ^ BCH_INODE_has_child_snapshot);
685 static int update_parent_inode_has_children(struct btree_trans *trans, struct bpos pos,
688 struct btree_iter iter;
689 struct bkey_s_c k = bch2_inode_get_iter_snapshot_parent(trans,
690 &iter, pos, BTREE_ITER_with_updates);
691 int ret = bkey_err(k);
698 ret = bch2_inode_has_child_snapshots(trans, k.k->p);
700 ret = ret < 0 ? ret : 0;
705 u64 f = bkey_inode_flags(k);
706 if (have_child != !!(f & BCH_INODE_has_child_snapshot)) {
707 struct bkey_i *update = bch2_bkey_make_mut(trans, &iter, &k,
708 BTREE_UPDATE_internal_snapshot_node);
709 ret = PTR_ERR_OR_ZERO(update);
713 bkey_inode_flags_set(bkey_i_to_s(update), f ^ BCH_INODE_has_child_snapshot);
716 bch2_trans_iter_exit(trans, &iter);
720 int bch2_trigger_inode(struct btree_trans *trans,
721 enum btree_id btree_id, unsigned level,
724 enum btree_iter_update_trigger_flags flags)
726 struct bch_fs *c = trans->c;
728 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
729 BUG_ON(!trans->journal_res.seq);
730 bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
733 s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
734 if ((flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) && nr) {
735 struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_nr_inodes };
736 int ret = bch2_disk_accounting_mod(trans, &acc, &nr, 1, flags & BTREE_TRIGGER_gc);
741 if (flags & BTREE_TRIGGER_transactional) {
742 int unlinked_delta = (int) bkey_is_unlinked_inode(new.s_c) -
743 (int) bkey_is_unlinked_inode(old);
744 if (unlinked_delta) {
745 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes,
746 new.k->p, unlinked_delta > 0);
752 * If we're creating or deleting an inode at this snapshot ID,
753 * and there might be an inode in a parent snapshot ID, we might
754 * need to set or clear the has_child_snapshot flag on the
757 int deleted_delta = (int) bkey_is_inode(new.k) -
758 (int) bkey_is_inode(old.k);
760 bch2_snapshot_parent(c, new.k->p.snapshot)) {
761 int ret = update_parent_inode_has_children(trans, new.k->p,
768 * When an inode is first updated in a new snapshot, we may need
769 * to clear has_child_snapshot
771 if (deleted_delta > 0) {
772 int ret = update_inode_has_children(trans, new, false);
781 int bch2_inode_generation_validate(struct bch_fs *c, struct bkey_s_c k,
782 enum bch_validate_flags flags)
786 bkey_fsck_err_on(k.k->p.inode,
787 c, inode_pos_inode_nonzero,
788 "nonzero k.p.inode");
793 void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c,
796 struct bkey_s_c_inode_generation gen = bkey_s_c_to_inode_generation(k);
798 prt_printf(out, "generation: %u", le32_to_cpu(gen.v->bi_generation));
801 void bch2_inode_init_early(struct bch_fs *c,
802 struct bch_inode_unpacked *inode_u)
804 enum bch_str_hash_type str_hash =
805 bch2_str_hash_opt_to_type(c, c->opts.str_hash);
807 memset(inode_u, 0, sizeof(*inode_u));
809 SET_INODE_STR_HASH(inode_u, str_hash);
810 get_random_bytes(&inode_u->bi_hash_seed, sizeof(inode_u->bi_hash_seed));
813 void bch2_inode_init_late(struct bch_inode_unpacked *inode_u, u64 now,
814 uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
815 struct bch_inode_unpacked *parent)
817 inode_u->bi_mode = mode;
818 inode_u->bi_uid = uid;
819 inode_u->bi_gid = gid;
820 inode_u->bi_dev = rdev;
821 inode_u->bi_atime = now;
822 inode_u->bi_mtime = now;
823 inode_u->bi_ctime = now;
824 inode_u->bi_otime = now;
826 if (parent && parent->bi_mode & S_ISGID) {
827 inode_u->bi_gid = parent->bi_gid;
829 inode_u->bi_mode |= S_ISGID;
833 #define x(_name, ...) inode_u->bi_##_name = parent->bi_##_name;
839 void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
840 uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
841 struct bch_inode_unpacked *parent)
843 bch2_inode_init_early(c, inode_u);
844 bch2_inode_init_late(inode_u, bch2_current_time(c),
845 uid, gid, mode, rdev, parent);
848 static inline u32 bkey_generation(struct bkey_s_c k)
852 case KEY_TYPE_inode_v2:
854 case KEY_TYPE_inode_generation:
855 return le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
862 * This just finds an empty slot:
864 int bch2_inode_create(struct btree_trans *trans,
865 struct btree_iter *iter,
866 struct bch_inode_unpacked *inode_u,
867 u32 snapshot, u64 cpu)
869 struct bch_fs *c = trans->c;
871 u64 min, max, start, pos, *hint;
873 unsigned bits = (c->opts.inodes_32bit ? 31 : 63);
875 if (c->opts.shard_inode_numbers) {
876 bits -= c->inode_shard_bits;
879 max = (cpu << bits) | ~(ULLONG_MAX << bits);
881 min = max_t(u64, min, BLOCKDEV_INODE_MAX);
882 hint = c->unused_inode_hints + cpu;
884 min = BLOCKDEV_INODE_MAX;
885 max = ~(ULLONG_MAX << bits);
886 hint = c->unused_inode_hints;
889 start = READ_ONCE(*hint);
891 if (start >= max || start < min)
895 bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
896 BTREE_ITER_all_snapshots|
899 while ((k = bch2_btree_iter_peek(iter)).k &&
900 !(ret = bkey_err(k)) &&
901 bkey_lt(k.k->p, POS(0, max))) {
902 if (pos < iter->pos.offset)
906 * We don't need to iterate over keys in every snapshot once
907 * we've found just one:
909 pos = iter->pos.offset + 1;
910 bch2_btree_iter_set_pos(iter, POS(0, pos));
913 if (!ret && pos < max)
916 if (!ret && start == min)
917 ret = -BCH_ERR_ENOSPC_inode_create;
920 bch2_trans_iter_exit(trans, iter);
924 /* Retry from start */
926 bch2_btree_iter_set_pos(iter, POS(0, pos));
929 bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
930 k = bch2_btree_iter_peek_slot(iter);
933 bch2_trans_iter_exit(trans, iter);
937 *hint = k.k->p.offset;
938 inode_u->bi_inum = k.k->p.offset;
939 inode_u->bi_generation = bkey_generation(k);
943 static int bch2_inode_delete_keys(struct btree_trans *trans,
944 subvol_inum inum, enum btree_id id)
946 struct btree_iter iter;
948 struct bkey_i delete;
949 struct bpos end = POS(inum.inum, U64_MAX);
954 * We're never going to be deleting partial extents, no need to use an
957 bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
961 bch2_trans_begin(trans);
963 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
967 bch2_btree_iter_set_snapshot(&iter, snapshot);
969 k = bch2_btree_iter_peek_upto(&iter, end);
977 bkey_init(&delete.k);
978 delete.k.p = iter.pos;
980 if (iter.flags & BTREE_ITER_is_extents)
981 bch2_key_resize(&delete.k,
982 bpos_min(end, k.k->p).offset -
985 ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
986 bch2_trans_commit(trans, NULL, NULL,
987 BCH_TRANS_COMMIT_no_enospc);
989 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
993 bch2_trans_iter_exit(trans, &iter);
997 int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
999 struct btree_trans *trans = bch2_trans_get(c);
1000 struct btree_iter iter = { NULL };
1001 struct bkey_i_inode_generation delete;
1002 struct bch_inode_unpacked inode_u;
1008 * If this was a directory, there shouldn't be any real dirents left -
1009 * but there could be whiteouts (from hash collisions) that we should
1012 * XXX: the dirent could ideally would delete whiteouts when they're no
1015 ret = bch2_inode_delete_keys(trans, inum, BTREE_ID_extents) ?:
1016 bch2_inode_delete_keys(trans, inum, BTREE_ID_xattrs) ?:
1017 bch2_inode_delete_keys(trans, inum, BTREE_ID_dirents);
1021 bch2_trans_begin(trans);
1023 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1027 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
1028 SPOS(0, inum.inum, snapshot),
1029 BTREE_ITER_intent|BTREE_ITER_cached);
1034 if (!bkey_is_inode(k.k)) {
1035 bch2_fs_inconsistent(c,
1036 "inode %llu:%u not found when deleting",
1037 inum.inum, snapshot);
1042 bch2_inode_unpack(k, &inode_u);
1044 bkey_inode_generation_init(&delete.k_i);
1045 delete.k.p = iter.pos;
1046 delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
1048 ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
1049 bch2_trans_commit(trans, NULL, NULL,
1050 BCH_TRANS_COMMIT_no_enospc);
1052 bch2_trans_iter_exit(trans, &iter);
1053 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1059 ret = delete_ancestor_snapshot_inodes(trans, SPOS(0, inum.inum, snapshot));
1061 bch2_trans_put(trans);
1065 int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans,
1067 struct bch_inode_unpacked *inode)
1069 struct btree_iter iter;
1072 ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0);
1074 bch2_trans_iter_exit(trans, &iter);
1078 int bch2_inode_find_by_inum_trans(struct btree_trans *trans,
1080 struct bch_inode_unpacked *inode)
1082 struct btree_iter iter;
1085 ret = bch2_inode_peek(trans, &iter, inode, inum, 0);
1087 bch2_trans_iter_exit(trans, &iter);
1091 int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum,
1092 struct bch_inode_unpacked *inode)
1094 return bch2_trans_do(c, bch2_inode_find_by_inum_trans(trans, inum, inode));
1097 int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
1099 if (bi->bi_flags & BCH_INODE_unlinked)
1100 bi->bi_flags &= ~BCH_INODE_unlinked;
1102 if (bi->bi_nlink == U32_MAX)
1111 void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *bi)
1113 if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_unlinked)) {
1114 bch2_trans_inconsistent(trans, "inode %llu unlinked but link count nonzero",
1119 if (bi->bi_flags & BCH_INODE_unlinked) {
1120 bch2_trans_inconsistent(trans, "inode %llu link count underflow", bi->bi_inum);
1127 bi->bi_flags |= BCH_INODE_unlinked;
1130 struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode)
1132 struct bch_opts ret = { 0 };
1133 #define x(_name, _bits) \
1134 if (inode->bi_##_name) \
1135 opt_set(ret, _name, inode->bi_##_name - 1);
1141 void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
1142 struct bch_inode_unpacked *inode)
1144 #define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name);
1149 opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
1152 int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts)
1154 struct bch_inode_unpacked inode;
1155 int ret = lockrestart_do(trans, bch2_inode_find_by_inum_trans(trans, inum, &inode));
1160 bch2_inode_opts_get(opts, trans->c, &inode);
1164 static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
1166 struct bch_fs *c = trans->c;
1167 struct btree_iter iter = { NULL };
1168 struct bkey_i_inode_generation delete;
1169 struct bch_inode_unpacked inode_u;
1174 ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
1175 SPOS(inum, 0, snapshot),
1176 SPOS(inum, U64_MAX, snapshot),
1178 bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
1179 SPOS(inum, 0, snapshot),
1180 SPOS(inum, U64_MAX, snapshot),
1182 bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
1183 SPOS(inum, 0, snapshot),
1184 SPOS(inum, U64_MAX, snapshot),
1186 } while (ret == -BCH_ERR_transaction_restart_nested);
1190 bch2_trans_begin(trans);
1192 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
1193 SPOS(0, inum, snapshot), BTREE_ITER_intent);
1198 if (!bkey_is_inode(k.k)) {
1199 bch2_fs_inconsistent(c,
1200 "inode %llu:%u not found when deleting",
1206 bch2_inode_unpack(k, &inode_u);
1208 /* Subvolume root? */
1209 if (inode_u.bi_subvol)
1210 bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum);
1212 bkey_inode_generation_init(&delete.k_i);
1213 delete.k.p = iter.pos;
1214 delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
1216 ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
1217 bch2_trans_commit(trans, NULL, NULL,
1218 BCH_TRANS_COMMIT_no_enospc);
1220 bch2_trans_iter_exit(trans, &iter);
1221 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1224 return ret ?: -BCH_ERR_transaction_restart_nested;
1228 * After deleting an inode, there may be versions in older snapshots that should
1229 * also be deleted - if they're not referenced by sibling snapshots and not open
1230 * in other subvolumes:
1232 static int delete_ancestor_snapshot_inodes(struct btree_trans *trans, struct bpos pos)
1234 struct btree_iter iter;
1238 ret = lockrestart_do(trans,
1239 bkey_err(k = bch2_inode_get_iter_snapshot_parent(trans, &iter, pos, 0)));
1243 bool unlinked = bkey_is_unlinked_inode(k);
1245 bch2_trans_iter_exit(trans, &iter);
1250 ret = lockrestart_do(trans, bch2_inode_or_descendents_is_open(trans, pos));
1252 return ret < 0 ? ret : 0;
1254 ret = __bch2_inode_rm_snapshot(trans, pos.offset, pos.snapshot);
1260 int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
1262 return __bch2_inode_rm_snapshot(trans, inum, snapshot) ?:
1263 delete_ancestor_snapshot_inodes(trans, SPOS(0, inum, snapshot));
1266 static int may_delete_deleted_inode(struct btree_trans *trans,
1267 struct btree_iter *iter,
1269 bool *need_another_pass)
1271 struct bch_fs *c = trans->c;
1272 struct btree_iter inode_iter;
1274 struct bch_inode_unpacked inode;
1275 struct printbuf buf = PRINTBUF;
1278 k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached);
1283 ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
1284 if (fsck_err_on(!bkey_is_inode(k.k),
1285 trans, deleted_inode_missing,
1286 "nonexistent inode %llu:%u in deleted_inodes btree",
1287 pos.offset, pos.snapshot))
1290 ret = bch2_inode_unpack(k, &inode);
1294 if (S_ISDIR(inode.bi_mode)) {
1295 ret = bch2_empty_dir_snapshot(trans, pos.offset, 0, pos.snapshot);
1296 if (fsck_err_on(bch2_err_matches(ret, ENOTEMPTY),
1297 trans, deleted_inode_is_dir,
1298 "non empty directory %llu:%u in deleted_inodes btree",
1299 pos.offset, pos.snapshot))
1305 if (fsck_err_on(!(inode.bi_flags & BCH_INODE_unlinked),
1306 trans, deleted_inode_not_unlinked,
1307 "non-deleted inode %llu:%u in deleted_inodes btree",
1308 pos.offset, pos.snapshot))
1311 if (fsck_err_on(inode.bi_flags & BCH_INODE_has_child_snapshot,
1312 trans, deleted_inode_has_child_snapshots,
1313 "inode with child snapshots %llu:%u in deleted_inodes btree",
1314 pos.offset, pos.snapshot))
1317 ret = bch2_inode_has_child_snapshots(trans, k.k->p);
1322 if (fsck_err(trans, inode_has_child_snapshots_wrong,
1323 "inode has_child_snapshots flag wrong (should be set)\n%s",
1324 (printbuf_reset(&buf),
1325 bch2_inode_unpacked_to_text(&buf, &inode),
1327 inode.bi_flags |= BCH_INODE_has_child_snapshot;
1328 ret = __bch2_fsck_write_inode(trans, &inode);
1336 if (test_bit(BCH_FS_clean_recovery, &c->flags) &&
1337 !fsck_err(trans, deleted_inode_but_clean,
1338 "filesystem marked as clean but have deleted inode %llu:%u",
1339 pos.offset, pos.snapshot)) {
1347 bch2_trans_iter_exit(trans, &inode_iter);
1348 printbuf_exit(&buf);
1351 ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false);
1355 int bch2_delete_dead_inodes(struct bch_fs *c)
1357 struct btree_trans *trans = bch2_trans_get(c);
1358 bool need_another_pass;
1362 * if we ran check_inodes() unlinked inodes will have already been
1363 * cleaned up but the write buffer will be out of sync; therefore we
1364 * alway need a write buffer flush
1366 ret = bch2_btree_write_buffer_flush_sync(trans);
1370 need_another_pass = false;
1373 * Weird transaction restart handling here because on successful delete,
1374 * bch2_inode_rm_snapshot() will return a nested transaction restart,
1375 * but we can't retry because the btree write buffer won't have been
1376 * flushed and we'd spin:
1378 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
1379 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
1380 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1381 ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass);
1383 bch_verbose(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot);
1385 ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot);
1387 * We don't want to loop here: a transaction restart
1388 * error here means we handled a transaction restart and
1389 * we're actually done, but if we loop we'll retry the
1390 * same key because the write buffer hasn't been flushed
1393 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1402 if (!ret && need_another_pass)
1405 bch2_trans_put(trans);