1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM bcachefs
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/tracepoint.h>
9 #define TRACE_BPOS_entries(name) \
10 __field(u64, name##_inode ) \
11 __field(u64, name##_offset ) \
12 __field(u32, name##_snapshot )
14 #define TRACE_BPOS_assign(dst, src) \
15 __entry->dst##_inode = (src).inode; \
16 __entry->dst##_offset = (src).offset; \
17 __entry->dst##_snapshot = (src).snapshot
19 DECLARE_EVENT_CLASS(bpos,
20 TP_PROTO(const struct bpos *p),
28 TRACE_BPOS_assign(p, *p);
31 TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
34 DECLARE_EVENT_CLASS(fs_str,
35 TP_PROTO(struct bch_fs *c, const char *str),
44 __entry->dev = c->dev;
48 TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
51 DECLARE_EVENT_CLASS(trans_str,
52 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
53 TP_ARGS(trans, caller_ip, str),
57 __array(char, trans_fn, 32 )
58 __field(unsigned long, caller_ip )
63 __entry->dev = trans->c->dev;
64 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
65 __entry->caller_ip = caller_ip;
69 TP_printk("%d,%d %s %pS %s",
70 MAJOR(__entry->dev), MINOR(__entry->dev),
71 __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
74 DECLARE_EVENT_CLASS(trans_str_nocaller,
75 TP_PROTO(struct btree_trans *trans, const char *str),
80 __array(char, trans_fn, 32 )
85 __entry->dev = trans->c->dev;
86 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
90 TP_printk("%d,%d %s %s",
91 MAJOR(__entry->dev), MINOR(__entry->dev),
92 __entry->trans_fn, __get_str(str))
95 DECLARE_EVENT_CLASS(btree_node_nofs,
96 TP_PROTO(struct bch_fs *c, struct btree *b),
102 __field(u8, btree_id )
103 TRACE_BPOS_entries(pos)
107 __entry->dev = c->dev;
108 __entry->level = b->c.level;
109 __entry->btree_id = b->c.btree_id;
110 TRACE_BPOS_assign(pos, b->key.k.p);
113 TP_printk("%d,%d %u %s %llu:%llu:%u",
114 MAJOR(__entry->dev), MINOR(__entry->dev),
116 bch2_btree_id_str(__entry->btree_id),
117 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
120 DECLARE_EVENT_CLASS(btree_node,
121 TP_PROTO(struct btree_trans *trans, struct btree *b),
126 __array(char, trans_fn, 32 )
128 __field(u8, btree_id )
129 TRACE_BPOS_entries(pos)
133 __entry->dev = trans->c->dev;
134 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
135 __entry->level = b->c.level;
136 __entry->btree_id = b->c.btree_id;
137 TRACE_BPOS_assign(pos, b->key.k.p);
140 TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
143 bch2_btree_id_str(__entry->btree_id),
144 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
147 DECLARE_EVENT_CLASS(bch_fs,
148 TP_PROTO(struct bch_fs *c),
156 __entry->dev = c->dev;
159 TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
162 DECLARE_EVENT_CLASS(btree_trans,
163 TP_PROTO(struct btree_trans *trans),
168 __array(char, trans_fn, 32 )
172 __entry->dev = trans->c->dev;
173 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
176 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
179 DECLARE_EVENT_CLASS(bio,
180 TP_PROTO(struct bio *bio),
185 __field(sector_t, sector )
186 __field(unsigned int, nr_sector )
187 __array(char, rwbs, 6 )
191 __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
192 __entry->sector = bio->bi_iter.bi_sector;
193 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
194 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
197 TP_printk("%d,%d %s %llu + %u",
198 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
199 (unsigned long long)__entry->sector, __entry->nr_sector)
202 /* disk_accounting.c */
204 TRACE_EVENT(accounting_mem_insert,
205 TP_PROTO(struct bch_fs *c, const char *acc),
210 __field(unsigned, new_nr )
215 __entry->dev = c->dev;
216 __entry->new_nr = c->accounting.k.nr;
220 TP_printk("%d,%d entries %u added %s",
221 MAJOR(__entry->dev), MINOR(__entry->dev),
227 TRACE_EVENT(bch2_sync_fs,
228 TP_PROTO(struct super_block *sb, int wait),
233 __field( dev_t, dev )
239 __entry->dev = sb->s_dev;
240 __entry->wait = wait;
243 TP_printk("dev %d,%d wait %d",
244 MAJOR(__entry->dev), MINOR(__entry->dev),
249 TRACE_EVENT(bch2_fsync,
250 TP_PROTO(struct file *file, int datasync),
252 TP_ARGS(file, datasync),
255 __field( dev_t, dev )
256 __field( ino_t, ino )
257 __field( ino_t, parent )
258 __field( int, datasync )
262 struct dentry *dentry = file->f_path.dentry;
264 __entry->dev = dentry->d_sb->s_dev;
265 __entry->ino = d_inode(dentry)->i_ino;
266 __entry->parent = d_inode(dentry->d_parent)->i_ino;
267 __entry->datasync = datasync;
270 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
271 MAJOR(__entry->dev), MINOR(__entry->dev),
272 (unsigned long) __entry->ino,
273 (unsigned long) __entry->parent, __entry->datasync)
277 TRACE_EVENT(write_super,
278 TP_PROTO(struct bch_fs *c, unsigned long ip),
283 __field(unsigned long, ip )
287 __entry->dev = c->dev;
291 TP_printk("%d,%d for %pS",
292 MAJOR(__entry->dev), MINOR(__entry->dev),
293 (void *) __entry->ip)
298 DEFINE_EVENT(bio, read_promote,
299 TP_PROTO(struct bio *bio),
303 TRACE_EVENT(read_nopromote,
304 TP_PROTO(struct bch_fs *c, int ret),
309 __array(char, ret, 32 )
313 __entry->dev = c->dev;
314 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
317 TP_printk("%d,%d ret %s",
318 MAJOR(__entry->dev), MINOR(__entry->dev),
322 DEFINE_EVENT(bio, read_bounce,
323 TP_PROTO(struct bio *bio),
327 DEFINE_EVENT(bio, read_split,
328 TP_PROTO(struct bio *bio),
332 DEFINE_EVENT(bio, read_retry,
333 TP_PROTO(struct bio *bio),
337 DEFINE_EVENT(bio, read_reuse_race,
338 TP_PROTO(struct bio *bio),
344 DEFINE_EVENT(bch_fs, journal_full,
345 TP_PROTO(struct bch_fs *c),
349 DEFINE_EVENT(fs_str, journal_entry_full,
350 TP_PROTO(struct bch_fs *c, const char *str),
354 DEFINE_EVENT(fs_str, journal_entry_close,
355 TP_PROTO(struct bch_fs *c, const char *str),
359 DEFINE_EVENT(bio, journal_write,
360 TP_PROTO(struct bio *bio),
364 TRACE_EVENT(journal_reclaim_start,
365 TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
366 u64 min_nr, u64 min_key_cache,
367 u64 btree_cache_dirty, u64 btree_cache_total,
368 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
369 TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
370 btree_cache_dirty, btree_cache_total,
371 btree_key_cache_dirty, btree_key_cache_total),
375 __field(bool, direct )
376 __field(bool, kicked )
377 __field(u64, min_nr )
378 __field(u64, min_key_cache )
379 __field(u64, btree_cache_dirty )
380 __field(u64, btree_cache_total )
381 __field(u64, btree_key_cache_dirty )
382 __field(u64, btree_key_cache_total )
386 __entry->dev = c->dev;
387 __entry->direct = direct;
388 __entry->kicked = kicked;
389 __entry->min_nr = min_nr;
390 __entry->min_key_cache = min_key_cache;
391 __entry->btree_cache_dirty = btree_cache_dirty;
392 __entry->btree_cache_total = btree_cache_total;
393 __entry->btree_key_cache_dirty = btree_key_cache_dirty;
394 __entry->btree_key_cache_total = btree_key_cache_total;
397 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
398 MAJOR(__entry->dev), MINOR(__entry->dev),
402 __entry->min_key_cache,
403 __entry->btree_cache_dirty,
404 __entry->btree_cache_total,
405 __entry->btree_key_cache_dirty,
406 __entry->btree_key_cache_total)
409 TRACE_EVENT(journal_reclaim_finish,
410 TP_PROTO(struct bch_fs *c, u64 nr_flushed),
411 TP_ARGS(c, nr_flushed),
415 __field(u64, nr_flushed )
419 __entry->dev = c->dev;
420 __entry->nr_flushed = nr_flushed;
423 TP_printk("%d,%d flushed %llu",
424 MAJOR(__entry->dev), MINOR(__entry->dev),
430 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
431 TP_PROTO(const struct bpos *p),
437 TRACE_EVENT(btree_cache_scan,
438 TP_PROTO(long nr_to_scan, long can_free, long ret),
439 TP_ARGS(nr_to_scan, can_free, ret),
442 __field(long, nr_to_scan )
443 __field(long, can_free )
448 __entry->nr_to_scan = nr_to_scan;
449 __entry->can_free = can_free;
453 TP_printk("scanned for %li nodes, can free %li, ret %li",
454 __entry->nr_to_scan, __entry->can_free, __entry->ret)
457 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
458 TP_PROTO(struct bch_fs *c, struct btree *b),
462 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
463 TP_PROTO(struct btree_trans *trans),
467 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
468 TP_PROTO(struct btree_trans *trans),
472 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
473 TP_PROTO(struct btree_trans *trans),
477 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
478 TP_PROTO(struct btree_trans *trans),
484 DEFINE_EVENT(btree_node, btree_node_read,
485 TP_PROTO(struct btree_trans *trans, struct btree *b),
489 TRACE_EVENT(btree_node_write,
490 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
491 TP_ARGS(b, bytes, sectors),
494 __field(enum btree_node_type, type)
495 __field(unsigned, bytes )
496 __field(unsigned, sectors )
500 __entry->type = btree_node_type(b);
501 __entry->bytes = bytes;
502 __entry->sectors = sectors;
505 TP_printk("bkey type %u bytes %u sectors %u",
506 __entry->type , __entry->bytes, __entry->sectors)
509 DEFINE_EVENT(btree_node, btree_node_alloc,
510 TP_PROTO(struct btree_trans *trans, struct btree *b),
514 DEFINE_EVENT(btree_node, btree_node_free,
515 TP_PROTO(struct btree_trans *trans, struct btree *b),
519 TRACE_EVENT(btree_reserve_get_fail,
520 TP_PROTO(const char *trans_fn,
521 unsigned long caller_ip,
524 TP_ARGS(trans_fn, caller_ip, required, ret),
527 __array(char, trans_fn, 32 )
528 __field(unsigned long, caller_ip )
529 __field(size_t, required )
530 __array(char, ret, 32 )
534 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
535 __entry->caller_ip = caller_ip;
536 __entry->required = required;
537 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
540 TP_printk("%s %pS required %zu ret %s",
542 (void *) __entry->caller_ip,
547 DEFINE_EVENT(btree_node, btree_node_compact,
548 TP_PROTO(struct btree_trans *trans, struct btree *b),
552 DEFINE_EVENT(btree_node, btree_node_merge,
553 TP_PROTO(struct btree_trans *trans, struct btree *b),
557 DEFINE_EVENT(btree_node, btree_node_split,
558 TP_PROTO(struct btree_trans *trans, struct btree *b),
562 DEFINE_EVENT(btree_node, btree_node_rewrite,
563 TP_PROTO(struct btree_trans *trans, struct btree *b),
567 DEFINE_EVENT(btree_node, btree_node_set_root,
568 TP_PROTO(struct btree_trans *trans, struct btree *b),
572 TRACE_EVENT(btree_path_relock_fail,
573 TP_PROTO(struct btree_trans *trans,
574 unsigned long caller_ip,
575 struct btree_path *path,
577 TP_ARGS(trans, caller_ip, path, level),
580 __array(char, trans_fn, 32 )
581 __field(unsigned long, caller_ip )
582 __field(u8, btree_id )
584 __field(u8, path_idx)
585 TRACE_BPOS_entries(pos)
586 __array(char, node, 24 )
587 __field(u8, self_read_count )
588 __field(u8, self_intent_count)
589 __field(u8, read_count )
590 __field(u8, intent_count )
591 __field(u32, iter_lock_seq )
592 __field(u32, node_lock_seq )
596 struct btree *b = btree_path_node(path, level);
597 struct six_lock_count c;
599 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
600 __entry->caller_ip = caller_ip;
601 __entry->btree_id = path->btree_id;
602 __entry->level = level;
603 __entry->path_idx = path - trans->paths;
604 TRACE_BPOS_assign(pos, path->pos);
606 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
607 __entry->self_read_count = c.n[SIX_LOCK_read];
608 __entry->self_intent_count = c.n[SIX_LOCK_intent];
611 strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
613 c = six_lock_counts(&path->l[level].b->c.lock);
614 __entry->read_count = c.n[SIX_LOCK_read];
615 __entry->intent_count = c.n[SIX_LOCK_intent];
616 scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
618 __entry->iter_lock_seq = path->l[level].lock_seq;
619 __entry->node_lock_seq = is_btree_node(path, level)
620 ? six_lock_seq(&path->l[level].b->c.lock)
624 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
626 (void *) __entry->caller_ip,
628 bch2_btree_id_str(__entry->btree_id),
631 __entry->pos_snapshot,
634 __entry->self_read_count,
635 __entry->self_intent_count,
637 __entry->intent_count,
638 __entry->iter_lock_seq,
639 __entry->node_lock_seq)
642 TRACE_EVENT(btree_path_upgrade_fail,
643 TP_PROTO(struct btree_trans *trans,
644 unsigned long caller_ip,
645 struct btree_path *path,
647 TP_ARGS(trans, caller_ip, path, level),
650 __array(char, trans_fn, 32 )
651 __field(unsigned long, caller_ip )
652 __field(u8, btree_id )
654 __field(u8, path_idx)
655 TRACE_BPOS_entries(pos)
657 __field(u8, self_read_count )
658 __field(u8, self_intent_count)
659 __field(u8, read_count )
660 __field(u8, intent_count )
661 __field(u32, iter_lock_seq )
662 __field(u32, node_lock_seq )
666 struct six_lock_count c;
668 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
669 __entry->caller_ip = caller_ip;
670 __entry->btree_id = path->btree_id;
671 __entry->level = level;
672 __entry->path_idx = path - trans->paths;
673 TRACE_BPOS_assign(pos, path->pos);
674 __entry->locked = btree_node_locked(path, level);
676 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
677 __entry->self_read_count = c.n[SIX_LOCK_read];
678 __entry->self_intent_count = c.n[SIX_LOCK_intent];
679 c = six_lock_counts(&path->l[level].b->c.lock);
680 __entry->read_count = c.n[SIX_LOCK_read];
681 __entry->intent_count = c.n[SIX_LOCK_intent];
682 __entry->iter_lock_seq = path->l[level].lock_seq;
683 __entry->node_lock_seq = is_btree_node(path, level)
684 ? six_lock_seq(&path->l[level].b->c.lock)
688 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
690 (void *) __entry->caller_ip,
692 bch2_btree_id_str(__entry->btree_id),
695 __entry->pos_snapshot,
698 __entry->self_read_count,
699 __entry->self_intent_count,
701 __entry->intent_count,
702 __entry->iter_lock_seq,
703 __entry->node_lock_seq)
706 /* Garbage collection */
708 DEFINE_EVENT(bch_fs, gc_gens_start,
709 TP_PROTO(struct bch_fs *c),
713 DEFINE_EVENT(bch_fs, gc_gens_end,
714 TP_PROTO(struct bch_fs *c),
720 DEFINE_EVENT(fs_str, bucket_alloc,
721 TP_PROTO(struct bch_fs *c, const char *str),
725 DEFINE_EVENT(fs_str, bucket_alloc_fail,
726 TP_PROTO(struct bch_fs *c, const char *str),
730 DECLARE_EVENT_CLASS(discard_buckets_class,
731 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
732 u64 need_journal_commit, u64 discarded, const char *err),
733 TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
739 __field(u64, need_journal_commit )
740 __field(u64, discarded )
741 __array(char, err, 16 )
745 __entry->dev = c->dev;
746 __entry->seen = seen;
747 __entry->open = open;
748 __entry->need_journal_commit = need_journal_commit;
749 __entry->discarded = discarded;
750 strscpy(__entry->err, err, sizeof(__entry->err));
753 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
754 MAJOR(__entry->dev), MINOR(__entry->dev),
757 __entry->need_journal_commit,
762 DEFINE_EVENT(discard_buckets_class, discard_buckets,
763 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
764 u64 need_journal_commit, u64 discarded, const char *err),
765 TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
768 DEFINE_EVENT(discard_buckets_class, discard_buckets_fast,
769 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
770 u64 need_journal_commit, u64 discarded, const char *err),
771 TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
774 TRACE_EVENT(bucket_invalidate,
775 TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
776 TP_ARGS(c, dev, bucket, sectors),
780 __field(u32, dev_idx )
781 __field(u32, sectors )
782 __field(u64, bucket )
786 __entry->dev = c->dev;
787 __entry->dev_idx = dev;
788 __entry->sectors = sectors;
789 __entry->bucket = bucket;
792 TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
793 MAJOR(__entry->dev), MINOR(__entry->dev),
794 __entry->dev_idx, __entry->bucket,
800 TRACE_EVENT(bucket_evacuate,
801 TP_PROTO(struct bch_fs *c, struct bpos *bucket),
806 __field(u32, dev_idx )
807 __field(u64, bucket )
811 __entry->dev = c->dev;
812 __entry->dev_idx = bucket->inode;
813 __entry->bucket = bucket->offset;
816 TP_printk("%d:%d %u:%llu",
817 MAJOR(__entry->dev), MINOR(__entry->dev),
818 __entry->dev_idx, __entry->bucket)
821 DEFINE_EVENT(fs_str, move_extent,
822 TP_PROTO(struct bch_fs *c, const char *str),
826 DEFINE_EVENT(fs_str, move_extent_read,
827 TP_PROTO(struct bch_fs *c, const char *str),
831 DEFINE_EVENT(fs_str, move_extent_write,
832 TP_PROTO(struct bch_fs *c, const char *str),
836 DEFINE_EVENT(fs_str, move_extent_finish,
837 TP_PROTO(struct bch_fs *c, const char *str),
841 DEFINE_EVENT(fs_str, move_extent_fail,
842 TP_PROTO(struct bch_fs *c, const char *str),
846 DEFINE_EVENT(fs_str, move_extent_start_fail,
847 TP_PROTO(struct bch_fs *c, const char *str),
851 TRACE_EVENT(move_data,
852 TP_PROTO(struct bch_fs *c,
853 struct bch_move_stats *stats),
858 __field(u64, keys_moved )
859 __field(u64, keys_raced )
860 __field(u64, sectors_seen )
861 __field(u64, sectors_moved )
862 __field(u64, sectors_raced )
866 __entry->dev = c->dev;
867 __entry->keys_moved = atomic64_read(&stats->keys_moved);
868 __entry->keys_raced = atomic64_read(&stats->keys_raced);
869 __entry->sectors_seen = atomic64_read(&stats->sectors_seen);
870 __entry->sectors_moved = atomic64_read(&stats->sectors_moved);
871 __entry->sectors_raced = atomic64_read(&stats->sectors_raced);
874 TP_printk("%d,%d keys moved %llu raced %llu"
875 "sectors seen %llu moved %llu raced %llu",
876 MAJOR(__entry->dev), MINOR(__entry->dev),
879 __entry->sectors_seen,
880 __entry->sectors_moved,
881 __entry->sectors_raced)
884 TRACE_EVENT(evacuate_bucket,
885 TP_PROTO(struct bch_fs *c, struct bpos *bucket,
886 unsigned sectors, unsigned bucket_size,
888 TP_ARGS(c, bucket, sectors, bucket_size, ret),
892 __field(u64, member )
893 __field(u64, bucket )
894 __field(u32, sectors )
895 __field(u32, bucket_size )
900 __entry->dev = c->dev;
901 __entry->member = bucket->inode;
902 __entry->bucket = bucket->offset;
903 __entry->sectors = sectors;
904 __entry->bucket_size = bucket_size;
908 TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i",
909 MAJOR(__entry->dev), MINOR(__entry->dev),
910 __entry->member, __entry->bucket,
911 __entry->sectors, __entry->bucket_size,
916 TP_PROTO(struct bch_fs *c,
920 TP_ARGS(c, buckets, sectors_seen, sectors_moved),
924 __field(u64, buckets )
925 __field(u64, sectors_seen )
926 __field(u64, sectors_moved )
930 __entry->dev = c->dev;
931 __entry->buckets = buckets;
932 __entry->sectors_seen = sectors_seen;
933 __entry->sectors_moved = sectors_moved;
936 TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu",
937 MAJOR(__entry->dev), MINOR(__entry->dev),
939 __entry->sectors_seen,
940 __entry->sectors_moved)
943 TRACE_EVENT(copygc_wait,
944 TP_PROTO(struct bch_fs *c,
945 u64 wait_amount, u64 until),
946 TP_ARGS(c, wait_amount, until),
950 __field(u64, wait_amount )
955 __entry->dev = c->dev;
956 __entry->wait_amount = wait_amount;
957 __entry->until = until;
960 TP_printk("%d,%u waiting for %llu sectors until %llu",
961 MAJOR(__entry->dev), MINOR(__entry->dev),
962 __entry->wait_amount, __entry->until)
965 /* btree transactions: */
967 DECLARE_EVENT_CLASS(transaction_event,
968 TP_PROTO(struct btree_trans *trans,
969 unsigned long caller_ip),
970 TP_ARGS(trans, caller_ip),
973 __array(char, trans_fn, 32 )
974 __field(unsigned long, caller_ip )
978 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
979 __entry->caller_ip = caller_ip;
982 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
985 DEFINE_EVENT(transaction_event, transaction_commit,
986 TP_PROTO(struct btree_trans *trans,
987 unsigned long caller_ip),
988 TP_ARGS(trans, caller_ip)
991 DEFINE_EVENT(transaction_event, trans_restart_injected,
992 TP_PROTO(struct btree_trans *trans,
993 unsigned long caller_ip),
994 TP_ARGS(trans, caller_ip)
997 TRACE_EVENT(trans_restart_split_race,
998 TP_PROTO(struct btree_trans *trans,
999 unsigned long caller_ip,
1001 TP_ARGS(trans, caller_ip, b),
1004 __array(char, trans_fn, 32 )
1005 __field(unsigned long, caller_ip )
1007 __field(u16, written )
1008 __field(u16, blocks )
1009 __field(u16, u64s_remaining )
1013 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1014 __entry->caller_ip = caller_ip;
1015 __entry->level = b->c.level;
1016 __entry->written = b->written;
1017 __entry->blocks = btree_blocks(trans->c);
1018 __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
1021 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
1022 __entry->trans_fn, (void *) __entry->caller_ip,
1024 __entry->written, __entry->blocks,
1025 __entry->u64s_remaining)
1028 TRACE_EVENT(trans_blocked_journal_reclaim,
1029 TP_PROTO(struct btree_trans *trans,
1030 unsigned long caller_ip),
1031 TP_ARGS(trans, caller_ip),
1034 __array(char, trans_fn, 32 )
1035 __field(unsigned long, caller_ip )
1037 __field(unsigned long, key_cache_nr_keys )
1038 __field(unsigned long, key_cache_nr_dirty )
1039 __field(long, must_wait )
1043 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1044 __entry->caller_ip = caller_ip;
1045 __entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys);
1046 __entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
1047 __entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c);
1050 TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1051 __entry->trans_fn, (void *) __entry->caller_ip,
1052 __entry->key_cache_nr_keys,
1053 __entry->key_cache_nr_dirty,
1057 TRACE_EVENT(trans_restart_journal_preres_get,
1058 TP_PROTO(struct btree_trans *trans,
1059 unsigned long caller_ip,
1061 TP_ARGS(trans, caller_ip, flags),
1064 __array(char, trans_fn, 32 )
1065 __field(unsigned long, caller_ip )
1066 __field(unsigned, flags )
1070 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1071 __entry->caller_ip = caller_ip;
1072 __entry->flags = flags;
1075 TP_printk("%s %pS %x", __entry->trans_fn,
1076 (void *) __entry->caller_ip,
1080 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
1081 TP_PROTO(struct btree_trans *trans,
1082 unsigned long caller_ip),
1083 TP_ARGS(trans, caller_ip)
1086 DEFINE_EVENT(transaction_event, trans_traverse_all,
1087 TP_PROTO(struct btree_trans *trans,
1088 unsigned long caller_ip),
1089 TP_ARGS(trans, caller_ip)
1092 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
1093 TP_PROTO(struct btree_trans *trans,
1094 unsigned long caller_ip),
1095 TP_ARGS(trans, caller_ip)
1098 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1099 TP_PROTO(struct btree_trans *trans,
1100 unsigned long caller_ip,
1102 TP_ARGS(trans, caller_ip, paths)
1105 DECLARE_EVENT_CLASS(transaction_restart_iter,
1106 TP_PROTO(struct btree_trans *trans,
1107 unsigned long caller_ip,
1108 struct btree_path *path),
1109 TP_ARGS(trans, caller_ip, path),
1112 __array(char, trans_fn, 32 )
1113 __field(unsigned long, caller_ip )
1114 __field(u8, btree_id )
1115 TRACE_BPOS_entries(pos)
1119 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1120 __entry->caller_ip = caller_ip;
1121 __entry->btree_id = path->btree_id;
1122 TRACE_BPOS_assign(pos, path->pos)
1125 TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1127 (void *) __entry->caller_ip,
1128 bch2_btree_id_str(__entry->btree_id),
1130 __entry->pos_offset,
1131 __entry->pos_snapshot)
1134 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
1135 TP_PROTO(struct btree_trans *trans,
1136 unsigned long caller_ip,
1137 struct btree_path *path),
1138 TP_ARGS(trans, caller_ip, path)
1141 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
1142 TP_PROTO(struct btree_trans *trans,
1143 unsigned long caller_ip,
1144 struct btree_path *path),
1145 TP_ARGS(trans, caller_ip, path)
1148 TRACE_EVENT(trans_restart_upgrade,
1149 TP_PROTO(struct btree_trans *trans,
1150 unsigned long caller_ip,
1151 struct btree_path *path,
1152 unsigned old_locks_want,
1153 unsigned new_locks_want,
1154 struct get_locks_fail *f),
1155 TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1158 __array(char, trans_fn, 32 )
1159 __field(unsigned long, caller_ip )
1160 __field(u8, btree_id )
1161 __field(u8, old_locks_want )
1162 __field(u8, new_locks_want )
1164 __field(u32, path_seq )
1165 __field(u32, node_seq )
1166 TRACE_BPOS_entries(pos)
1170 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1171 __entry->caller_ip = caller_ip;
1172 __entry->btree_id = path->btree_id;
1173 __entry->old_locks_want = old_locks_want;
1174 __entry->new_locks_want = new_locks_want;
1175 __entry->level = f->l;
1176 __entry->path_seq = path->l[f->l].lock_seq;
1177 __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1178 TRACE_BPOS_assign(pos, path->pos)
1181 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1183 (void *) __entry->caller_ip,
1184 bch2_btree_id_str(__entry->btree_id),
1186 __entry->pos_offset,
1187 __entry->pos_snapshot,
1188 __entry->old_locks_want,
1189 __entry->new_locks_want,
1195 DEFINE_EVENT(trans_str, trans_restart_relock,
1196 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1197 TP_ARGS(trans, caller_ip, str)
1200 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
1201 TP_PROTO(struct btree_trans *trans,
1202 unsigned long caller_ip,
1203 struct btree_path *path),
1204 TP_ARGS(trans, caller_ip, path)
1207 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
1208 TP_PROTO(struct btree_trans *trans,
1209 unsigned long caller_ip,
1210 struct btree_path *path),
1211 TP_ARGS(trans, caller_ip, path)
1214 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
1215 TP_PROTO(struct btree_trans *trans,
1216 unsigned long caller_ip,
1217 struct btree_path *path),
1218 TP_ARGS(trans, caller_ip, path)
1221 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1222 TP_PROTO(struct btree_trans *trans,
1223 unsigned long caller_ip),
1224 TP_ARGS(trans, caller_ip)
1227 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
1228 TP_PROTO(struct btree_trans *trans,
1229 unsigned long caller_ip,
1230 struct btree_path *path),
1231 TP_ARGS(trans, caller_ip, path)
1234 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
1235 TP_PROTO(struct btree_trans *trans,
1236 unsigned long caller_ip,
1237 struct btree_path *path),
1238 TP_ARGS(trans, caller_ip, path)
1241 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
1242 TP_PROTO(struct btree_trans *trans,
1243 unsigned long caller_ip,
1244 struct btree_path *path),
1245 TP_ARGS(trans, caller_ip, path)
1248 DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
1249 TP_PROTO(struct btree_trans *trans,
1250 unsigned long caller_ip,
1251 struct btree_path *path),
1252 TP_ARGS(trans, caller_ip, path)
1255 DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
1256 TP_PROTO(struct btree_trans *trans,
1257 unsigned long caller_ip,
1258 struct btree_path *path),
1259 TP_ARGS(trans, caller_ip, path)
1262 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1263 TP_PROTO(struct btree_trans *trans,
1265 TP_ARGS(trans, cycle)
1268 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1269 TP_PROTO(struct btree_trans *trans,
1270 unsigned long caller_ip),
1271 TP_ARGS(trans, caller_ip)
1274 TRACE_EVENT(trans_restart_would_deadlock_write,
1275 TP_PROTO(struct btree_trans *trans),
1279 __array(char, trans_fn, 32 )
1283 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1286 TP_printk("%s", __entry->trans_fn)
1289 TRACE_EVENT(trans_restart_mem_realloced,
1290 TP_PROTO(struct btree_trans *trans,
1291 unsigned long caller_ip,
1292 unsigned long bytes),
1293 TP_ARGS(trans, caller_ip, bytes),
1296 __array(char, trans_fn, 32 )
1297 __field(unsigned long, caller_ip )
1298 __field(unsigned long, bytes )
1302 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1303 __entry->caller_ip = caller_ip;
1304 __entry->bytes = bytes;
1307 TP_printk("%s %pS bytes %lu",
1309 (void *) __entry->caller_ip,
1313 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1314 TP_PROTO(struct btree_trans *trans,
1315 unsigned long caller_ip,
1316 struct btree_path *path,
1319 TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1322 __array(char, trans_fn, 32 )
1323 __field(unsigned long, caller_ip )
1324 __field(enum btree_id, btree_id )
1325 TRACE_BPOS_entries(pos)
1326 __field(u32, old_u64s )
1327 __field(u32, new_u64s )
1331 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1332 __entry->caller_ip = caller_ip;
1334 __entry->btree_id = path->btree_id;
1335 TRACE_BPOS_assign(pos, path->pos);
1336 __entry->old_u64s = old_u64s;
1337 __entry->new_u64s = new_u64s;
1340 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1342 (void *) __entry->caller_ip,
1343 bch2_btree_id_str(__entry->btree_id),
1345 __entry->pos_offset,
1346 __entry->pos_snapshot,
1351 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1352 TP_PROTO(struct btree_trans *trans,
1353 unsigned long caller_ip),
1354 TP_ARGS(trans, caller_ip)
1357 TRACE_EVENT(path_downgrade,
1358 TP_PROTO(struct btree_trans *trans,
1359 unsigned long caller_ip,
1360 struct btree_path *path,
1361 unsigned old_locks_want),
1362 TP_ARGS(trans, caller_ip, path, old_locks_want),
1365 __array(char, trans_fn, 32 )
1366 __field(unsigned long, caller_ip )
1367 __field(unsigned, old_locks_want )
1368 __field(unsigned, new_locks_want )
1369 __field(unsigned, btree )
1370 TRACE_BPOS_entries(pos)
1374 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1375 __entry->caller_ip = caller_ip;
1376 __entry->old_locks_want = old_locks_want;
1377 __entry->new_locks_want = path->locks_want;
1378 __entry->btree = path->btree_id;
1379 TRACE_BPOS_assign(pos, path->pos);
1382 TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1384 (void *) __entry->caller_ip,
1385 __entry->old_locks_want,
1386 __entry->new_locks_want,
1387 bch2_btree_id_str(__entry->btree),
1389 __entry->pos_offset,
1390 __entry->pos_snapshot)
1393 TRACE_EVENT(key_cache_fill,
1394 TP_PROTO(struct btree_trans *trans, const char *key),
1395 TP_ARGS(trans, key),
1398 __array(char, trans_fn, 32 )
1403 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1407 TP_printk("%s %s", __entry->trans_fn, __get_str(key))
1410 TRACE_EVENT(write_buffer_flush,
1411 TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1412 TP_ARGS(trans, nr, skipped, fast, size),
1415 __field(size_t, nr )
1416 __field(size_t, skipped )
1417 __field(size_t, fast )
1418 __field(size_t, size )
1423 __entry->skipped = skipped;
1424 __entry->fast = fast;
1425 __entry->size = size;
1428 TP_printk("%zu/%zu skipped %zu fast %zu",
1429 __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1432 TRACE_EVENT(write_buffer_flush_sync,
1433 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1434 TP_ARGS(trans, caller_ip),
1437 __array(char, trans_fn, 32 )
1438 __field(unsigned long, caller_ip )
1442 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1443 __entry->caller_ip = caller_ip;
1446 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1449 TRACE_EVENT(write_buffer_flush_slowpath,
1450 TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1451 TP_ARGS(trans, slowpath, total),
1454 __field(size_t, slowpath )
1455 __field(size_t, total )
1459 __entry->slowpath = slowpath;
1460 __entry->total = total;
1463 TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1466 TRACE_EVENT(write_buffer_maybe_flush,
1467 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key),
1468 TP_ARGS(trans, caller_ip, key),
1471 __array(char, trans_fn, 32 )
1472 __field(unsigned long, caller_ip )
1477 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1481 TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key))
1484 DEFINE_EVENT(fs_str, rebalance_extent,
1485 TP_PROTO(struct bch_fs *c, const char *str),
1489 DEFINE_EVENT(fs_str, data_update,
1490 TP_PROTO(struct bch_fs *c, const char *str),
1494 TRACE_EVENT(error_downcast,
1495 TP_PROTO(int bch_err, int std_err, unsigned long ip),
1496 TP_ARGS(bch_err, std_err, ip),
1499 __array(char, bch_err, 32 )
1500 __array(char, std_err, 32 )
1501 __array(char, ip, 32 )
1505 strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1506 strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1507 snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1510 TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1513 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1515 TRACE_EVENT(update_by_path,
1516 TP_PROTO(struct btree_trans *trans, struct btree_path *path,
1517 struct btree_insert_entry *i, bool overwrite),
1518 TP_ARGS(trans, path, i, overwrite),
1521 __array(char, trans_fn, 32 )
1522 __field(btree_path_idx_t, path_idx )
1523 __field(u8, btree_id )
1524 TRACE_BPOS_entries(pos)
1525 __field(u8, overwrite )
1526 __field(btree_path_idx_t, update_idx )
1527 __field(btree_path_idx_t, nr_updates )
1531 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1532 __entry->path_idx = path - trans->paths;
1533 __entry->btree_id = path->btree_id;
1534 TRACE_BPOS_assign(pos, path->pos);
1535 __entry->overwrite = overwrite;
1536 __entry->update_idx = i - trans->updates;
1537 __entry->nr_updates = trans->nr_updates;
1540 TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1543 bch2_btree_id_str(__entry->btree_id),
1545 __entry->pos_offset,
1546 __entry->pos_snapshot,
1548 __entry->update_idx,
1549 __entry->nr_updates)
1552 TRACE_EVENT(btree_path_lock,
1553 TP_PROTO(struct btree_trans *trans,
1554 unsigned long caller_ip,
1555 struct btree_bkey_cached_common *b),
1556 TP_ARGS(trans, caller_ip, b),
1559 __array(char, trans_fn, 32 )
1560 __field(unsigned long, caller_ip )
1561 __field(u8, btree_id )
1563 __array(char, node, 24 )
1564 __field(u32, lock_seq )
1568 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1569 __entry->caller_ip = caller_ip;
1570 __entry->btree_id = b->btree_id;
1571 __entry->level = b->level;
1573 scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
1574 __entry->lock_seq = six_lock_seq(&b->lock);
1577 TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1579 (void *) __entry->caller_ip,
1580 bch2_btree_id_str(__entry->btree_id),
1586 DECLARE_EVENT_CLASS(btree_path_ev,
1587 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1588 TP_ARGS(trans, path),
1593 __field(u8, btree_id )
1594 TRACE_BPOS_entries(pos)
1598 __entry->idx = path - trans->paths;
1599 __entry->ref = path->ref;
1600 __entry->btree_id = path->btree_id;
1601 TRACE_BPOS_assign(pos, path->pos);
1604 TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1605 __entry->idx, __entry->ref,
1606 bch2_btree_id_str(__entry->btree_id),
1608 __entry->pos_offset,
1609 __entry->pos_snapshot)
1612 DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
1613 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1614 TP_ARGS(trans, path)
1617 DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
1618 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1619 TP_ARGS(trans, path)
1622 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
1623 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1624 TP_ARGS(trans, path)
1627 TRACE_EVENT(btree_path_alloc,
1628 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1629 TP_ARGS(trans, path),
1632 __field(btree_path_idx_t, idx )
1633 __field(u8, locks_want )
1634 __field(u8, btree_id )
1635 TRACE_BPOS_entries(pos)
1639 __entry->idx = path - trans->paths;
1640 __entry->locks_want = path->locks_want;
1641 __entry->btree_id = path->btree_id;
1642 TRACE_BPOS_assign(pos, path->pos);
1645 TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1647 bch2_btree_id_str(__entry->btree_id),
1648 __entry->locks_want,
1650 __entry->pos_offset,
1651 __entry->pos_snapshot)
1654 TRACE_EVENT(btree_path_get,
1655 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
1656 TP_ARGS(trans, path, new_pos),
1659 __field(btree_path_idx_t, idx )
1661 __field(u8, preserve )
1662 __field(u8, locks_want )
1663 __field(u8, btree_id )
1664 TRACE_BPOS_entries(old_pos)
1665 TRACE_BPOS_entries(new_pos)
1669 __entry->idx = path - trans->paths;
1670 __entry->ref = path->ref;
1671 __entry->preserve = path->preserve;
1672 __entry->locks_want = path->locks_want;
1673 __entry->btree_id = path->btree_id;
1674 TRACE_BPOS_assign(old_pos, path->pos);
1675 TRACE_BPOS_assign(new_pos, *new_pos);
1678 TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1682 bch2_btree_id_str(__entry->btree_id),
1683 __entry->locks_want,
1684 __entry->old_pos_inode,
1685 __entry->old_pos_offset,
1686 __entry->old_pos_snapshot,
1687 __entry->new_pos_inode,
1688 __entry->new_pos_offset,
1689 __entry->new_pos_snapshot)
1692 DECLARE_EVENT_CLASS(btree_path_clone,
1693 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1694 TP_ARGS(trans, path, new),
1697 __field(btree_path_idx_t, idx )
1698 __field(u8, new_idx )
1699 __field(u8, btree_id )
1701 __field(u8, preserve )
1702 TRACE_BPOS_entries(pos)
1706 __entry->idx = path - trans->paths;
1707 __entry->new_idx = new - trans->paths;
1708 __entry->btree_id = path->btree_id;
1709 __entry->ref = path->ref;
1710 __entry->preserve = path->preserve;
1711 TRACE_BPOS_assign(pos, path->pos);
1714 TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1718 bch2_btree_id_str(__entry->btree_id),
1720 __entry->pos_offset,
1721 __entry->pos_snapshot,
1725 DEFINE_EVENT(btree_path_clone, btree_path_clone,
1726 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1727 TP_ARGS(trans, path, new)
1730 DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
1731 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1732 TP_ARGS(trans, path, new)
1735 DECLARE_EVENT_CLASS(btree_path_traverse,
1736 TP_PROTO(struct btree_trans *trans,
1737 struct btree_path *path),
1738 TP_ARGS(trans, path),
1741 __array(char, trans_fn, 32 )
1742 __field(btree_path_idx_t, idx )
1744 __field(u8, preserve )
1745 __field(u8, should_be_locked )
1746 __field(u8, btree_id )
1748 TRACE_BPOS_entries(pos)
1749 __field(u8, locks_want )
1750 __field(u8, nodes_locked )
1751 __array(char, node0, 24 )
1752 __array(char, node1, 24 )
1753 __array(char, node2, 24 )
1754 __array(char, node3, 24 )
1758 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1760 __entry->idx = path - trans->paths;
1761 __entry->ref = path->ref;
1762 __entry->preserve = path->preserve;
1763 __entry->btree_id = path->btree_id;
1764 __entry->level = path->level;
1765 TRACE_BPOS_assign(pos, path->pos);
1767 __entry->locks_want = path->locks_want;
1768 __entry->nodes_locked = path->nodes_locked;
1769 struct btree *b = path->l[0].b;
1771 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1773 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1776 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1778 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1781 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1783 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1786 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1788 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1791 TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1792 "locks %u %u %u %u node %s %s %s %s",
1797 bch2_btree_id_str(__entry->btree_id),
1799 __entry->pos_offset,
1800 __entry->pos_snapshot,
1802 __entry->locks_want,
1803 (__entry->nodes_locked >> 6) & 3,
1804 (__entry->nodes_locked >> 4) & 3,
1805 (__entry->nodes_locked >> 2) & 3,
1806 (__entry->nodes_locked >> 0) & 3,
1813 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
1814 TP_PROTO(struct btree_trans *trans,
1815 struct btree_path *path),
1816 TP_ARGS(trans, path)
1819 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
1820 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1821 TP_ARGS(trans, path)
1824 TRACE_EVENT(btree_path_set_pos,
1825 TP_PROTO(struct btree_trans *trans,
1826 struct btree_path *path,
1827 struct bpos *new_pos),
1828 TP_ARGS(trans, path, new_pos),
1831 __field(btree_path_idx_t, idx )
1833 __field(u8, preserve )
1834 __field(u8, btree_id )
1835 TRACE_BPOS_entries(old_pos)
1836 TRACE_BPOS_entries(new_pos)
1837 __field(u8, locks_want )
1838 __field(u8, nodes_locked )
1839 __array(char, node0, 24 )
1840 __array(char, node1, 24 )
1841 __array(char, node2, 24 )
1842 __array(char, node3, 24 )
1846 __entry->idx = path - trans->paths;
1847 __entry->ref = path->ref;
1848 __entry->preserve = path->preserve;
1849 __entry->btree_id = path->btree_id;
1850 TRACE_BPOS_assign(old_pos, path->pos);
1851 TRACE_BPOS_assign(new_pos, *new_pos);
1853 __entry->nodes_locked = path->nodes_locked;
1854 struct btree *b = path->l[0].b;
1856 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1858 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1861 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1863 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1866 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1868 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1871 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1873 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1876 TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1877 "locks %u %u %u %u node %s %s %s %s",
1881 bch2_btree_id_str(__entry->btree_id),
1882 __entry->old_pos_inode,
1883 __entry->old_pos_offset,
1884 __entry->old_pos_snapshot,
1885 __entry->new_pos_inode,
1886 __entry->new_pos_offset,
1887 __entry->new_pos_snapshot,
1888 (__entry->nodes_locked >> 6) & 3,
1889 (__entry->nodes_locked >> 4) & 3,
1890 (__entry->nodes_locked >> 2) & 3,
1891 (__entry->nodes_locked >> 0) & 3,
1898 TRACE_EVENT(btree_path_free,
1899 TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
1900 TP_ARGS(trans, path, dup),
1903 __field(btree_path_idx_t, idx )
1904 __field(u8, preserve )
1905 __field(u8, should_be_locked)
1907 __field(u8, dup_locked )
1911 __entry->idx = path;
1912 __entry->preserve = trans->paths[path].preserve;
1913 __entry->should_be_locked = trans->paths[path].should_be_locked;
1914 __entry->dup = dup ? dup - trans->paths : -1;
1915 __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
1918 TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
1919 __entry->preserve ? 'P' : ' ',
1920 __entry->should_be_locked ? 'S' : ' ',
1922 __entry->dup_locked)
1925 TRACE_EVENT(btree_path_free_trans_begin,
1926 TP_PROTO(btree_path_idx_t path),
1930 __field(btree_path_idx_t, idx )
1934 __entry->idx = path;
1937 TP_printk(" path %3u", __entry->idx)
1940 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1941 #ifndef _TRACE_BCACHEFS_H
1943 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
1944 struct btree_insert_entry *i, bool overwrite) {}
1945 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
1946 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
1947 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
1948 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
1949 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
1950 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
1951 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
1952 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
1953 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
1954 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
1955 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
1956 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
1957 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
1960 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1962 #define _TRACE_BCACHEFS_H
1963 #endif /* _TRACE_BCACHEFS_H */
1965 /* This part must be outside protection */
1966 #undef TRACE_INCLUDE_PATH
1967 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1969 #undef TRACE_INCLUDE_FILE
1970 #define TRACE_INCLUDE_FILE trace
1972 #include <trace/define_trace.h>