pr_info(" %10s %s\n\n", "type", "caller");
bad = total = 0;
+ if (use_bpf)
+ bad = bad_hist[BROKEN_CONTENDED];
+
while ((st = pop_from_result())) {
- total++;
+ total += use_bpf ? st->nr_contended : 1;
if (st->broken)
bad++;
lock_contention_stop();
lock_contention_read(&con);
+
+ /* abuse bad hist stats for lost entries */
+ bad_hist[BROKEN_CONTENDED] = con.lost;
} else {
err = perf_session__process_events(session);
if (err)
/* should be same as bpf_skel/lock_contention.bpf.c */
struct lock_contention_key {
- u32 stack_id;
+ s32 stack_id;
};
struct lock_contention_data {
int lock_contention_read(struct lock_contention *con)
{
int fd, stack;
- u32 prev_key, key;
+ s32 prev_key, key;
struct lock_contention_data data;
struct lock_stat *st;
struct machine *machine = con->machine;
fd = bpf_map__fd(skel->maps.lock_stat);
stack = bpf_map__fd(skel->maps.stacks);
+ con->lost = skel->bss->lost;
+
prev_key = 0;
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap;
#define MAX_ENTRIES 10240
struct contention_key {
- __u32 stack_id;
+ __s32 stack_id;
};
struct contention_data {
__u64 timestamp;
__u64 lock;
__u32 flags;
- __u32 stack_id;
+ __s32 stack_id;
};
/* callstack storage */
int has_cpu;
int has_task;
+/* error stat */
+unsigned long lost;
+
static inline int can_record(void)
{
if (has_cpu) {
pelem->flags = (__u32)ctx[1];
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
+ if (pelem->stack_id < 0)
+ lost++;
return 0;
}
struct machine *machine;
struct hlist_head *result;
unsigned long map_nr_entries;
+ unsigned long lost;
};
#ifdef HAVE_BPF_SKEL