1 // SPDX-License-Identifier: GPL-2.0
7 #include "util/evlist.h" // for struct evsel_str_handler
8 #include "util/evsel.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
12 #include "util/target.h"
13 #include "util/callchain.h"
14 #include "util/lock-contention.h"
15 #include "util/bpf_skel/lock_data.h"
17 #include <subcmd/pager.h>
18 #include <subcmd/parse-options.h>
19 #include "util/trace-event.h"
20 #include "util/tracepoint.h"
22 #include "util/debug.h"
23 #include "util/session.h"
24 #include "util/tool.h"
25 #include "util/data.h"
26 #include "util/string2.h"
28 #include "util/util.h"
30 #include <sys/types.h>
31 #include <sys/prctl.h>
32 #include <semaphore.h>
37 #include <linux/list.h>
38 #include <linux/hash.h>
39 #include <linux/kernel.h>
40 #include <linux/zalloc.h>
41 #include <linux/err.h>
42 #include <linux/stringify.h>
44 static struct perf_session *session;
45 static struct target target;
47 /* based on kernel/lockdep.c */
48 #define LOCKHASH_BITS 12
49 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
51 static struct hlist_head lockhash_table[LOCKHASH_SIZE];
53 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
54 #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
56 static struct rb_root thread_stats;
58 static bool combine_locks;
59 static bool show_thread_stats;
60 static bool show_lock_addrs;
62 static unsigned long bpf_map_entries = 10240;
63 static int max_stack_depth = CONTENTION_STACK_DEPTH;
64 static int stack_skip = CONTENTION_STACK_SKIP;
65 static int print_nr_entries = INT_MAX / 2;
67 static struct lock_filter filters;
69 static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
71 static struct thread_stat *thread_stat_find(u32 tid)
74 struct thread_stat *st;
76 node = thread_stats.rb_node;
78 st = container_of(node, struct thread_stat, rb);
81 else if (tid < st->tid)
84 node = node->rb_right;
90 static void thread_stat_insert(struct thread_stat *new)
92 struct rb_node **rb = &thread_stats.rb_node;
93 struct rb_node *parent = NULL;
94 struct thread_stat *p;
97 p = container_of(*rb, struct thread_stat, rb);
100 if (new->tid < p->tid)
101 rb = &(*rb)->rb_left;
102 else if (new->tid > p->tid)
103 rb = &(*rb)->rb_right;
105 BUG_ON("inserting invalid thread_stat\n");
108 rb_link_node(&new->rb, parent, rb);
109 rb_insert_color(&new->rb, &thread_stats);
112 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
114 struct thread_stat *st;
116 st = thread_stat_find(tid);
120 st = zalloc(sizeof(struct thread_stat));
122 pr_err("memory allocation failed\n");
127 INIT_LIST_HEAD(&st->seq_list);
129 thread_stat_insert(st);
134 static struct thread_stat *thread_stat_findnew_first(u32 tid);
135 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
136 thread_stat_findnew_first;
138 static struct thread_stat *thread_stat_findnew_first(u32 tid)
140 struct thread_stat *st;
142 st = zalloc(sizeof(struct thread_stat));
144 pr_err("memory allocation failed\n");
148 INIT_LIST_HEAD(&st->seq_list);
150 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
151 rb_insert_color(&st->rb, &thread_stats);
153 thread_stat_findnew = thread_stat_findnew_after_first;
157 /* build simple key function one is bigger than two */
158 #define SINGLE_KEY(member) \
159 static int lock_stat_key_ ## member(struct lock_stat *one, \
160 struct lock_stat *two) \
162 return one->member > two->member; \
165 SINGLE_KEY(nr_acquired)
166 SINGLE_KEY(nr_contended)
167 SINGLE_KEY(avg_wait_time)
168 SINGLE_KEY(wait_time_total)
169 SINGLE_KEY(wait_time_max)
171 static int lock_stat_key_wait_time_min(struct lock_stat *one,
172 struct lock_stat *two)
174 u64 s1 = one->wait_time_min;
175 u64 s2 = two->wait_time_min;
176 if (s1 == ULLONG_MAX)
178 if (s2 == ULLONG_MAX)
185 * name: the value for specify by user
186 * this should be simpler than raw name of member
187 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
190 /* header: the string printed on the header line */
192 /* len: the printing width of the field */
194 /* key: a pointer to function to compare two lock stats for sorting */
195 int (*key)(struct lock_stat*, struct lock_stat*);
196 /* print: a pointer to function to print a given lock stats */
197 void (*print)(struct lock_key*, struct lock_stat*);
198 /* list: list entry to link this */
199 struct list_head list;
202 static void lock_stat_key_print_time(unsigned long long nsec, int len)
204 static const struct {
208 { 1e9 * 3600, "h " },
216 for (int i = 0; table[i].unit; i++) {
217 if (nsec < table[i].base)
220 pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
224 pr_info("%*llu %s", len - 3, nsec, "ns");
227 #define PRINT_KEY(member) \
228 static void lock_stat_key_print_ ## member(struct lock_key *key, \
229 struct lock_stat *ls) \
231 pr_info("%*llu", key->len, (unsigned long long)ls->member); \
234 #define PRINT_TIME(member) \
235 static void lock_stat_key_print_ ## member(struct lock_key *key, \
236 struct lock_stat *ls) \
238 lock_stat_key_print_time((unsigned long long)ls->member, key->len); \
241 PRINT_KEY(nr_acquired)
242 PRINT_KEY(nr_contended)
243 PRINT_TIME(avg_wait_time)
244 PRINT_TIME(wait_time_total)
245 PRINT_TIME(wait_time_max)
247 static void lock_stat_key_print_wait_time_min(struct lock_key *key,
248 struct lock_stat *ls)
250 u64 wait_time = ls->wait_time_min;
252 if (wait_time == ULLONG_MAX)
255 lock_stat_key_print_time(wait_time, key->len);
259 static const char *sort_key = "acquired";
261 static int (*compare)(struct lock_stat *, struct lock_stat *);
263 static struct rb_root sorted; /* place to store intermediate data */
264 static struct rb_root result; /* place to store sorted data */
266 static LIST_HEAD(lock_keys);
267 static const char *output_fields;
269 #define DEF_KEY_LOCK(name, header, fn_suffix, len) \
270 { #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
271 static struct lock_key report_keys[] = {
272 DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
273 DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
274 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
275 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
276 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
277 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
279 /* extra comparisons much complicated should be here */
283 static struct lock_key contention_keys[] = {
284 DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
285 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
286 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
287 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
288 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
290 /* extra comparisons much complicated should be here */
294 static int select_key(bool contention)
297 struct lock_key *keys = report_keys;
300 keys = contention_keys;
302 for (i = 0; keys[i].name; i++) {
303 if (!strcmp(keys[i].name, sort_key)) {
304 compare = keys[i].key;
306 /* selected key should be in the output fields */
307 if (list_empty(&keys[i].list))
308 list_add_tail(&keys[i].list, &lock_keys);
314 pr_err("Unknown compare key: %s\n", sort_key);
318 static int add_output_field(bool contention, char *name)
321 struct lock_key *keys = report_keys;
324 keys = contention_keys;
326 for (i = 0; keys[i].name; i++) {
327 if (strcmp(keys[i].name, name))
330 /* prevent double link */
331 if (list_empty(&keys[i].list))
332 list_add_tail(&keys[i].list, &lock_keys);
337 pr_err("Unknown output field: %s\n", name);
341 static int setup_output_field(bool contention, const char *str)
343 char *tok, *tmp, *orig;
345 struct lock_key *keys = report_keys;
348 keys = contention_keys;
350 /* no output field given: use all of them */
352 for (i = 0; keys[i].name; i++)
353 list_add_tail(&keys[i].list, &lock_keys);
357 for (i = 0; keys[i].name; i++)
358 INIT_LIST_HEAD(&keys[i].list);
360 orig = tmp = strdup(str);
364 while ((tok = strsep(&tmp, ",")) != NULL){
365 ret = add_output_field(contention, tok);
374 static void combine_lock_stats(struct lock_stat *st)
376 struct rb_node **rb = &sorted.rb_node;
377 struct rb_node *parent = NULL;
382 p = container_of(*rb, struct lock_stat, rb);
385 if (st->name && p->name)
386 ret = strcmp(st->name, p->name);
388 ret = !!st->name - !!p->name;
391 p->nr_acquired += st->nr_acquired;
392 p->nr_contended += st->nr_contended;
393 p->wait_time_total += st->wait_time_total;
396 p->avg_wait_time = p->wait_time_total / p->nr_contended;
398 if (p->wait_time_min > st->wait_time_min)
399 p->wait_time_min = st->wait_time_min;
400 if (p->wait_time_max < st->wait_time_max)
401 p->wait_time_max = st->wait_time_max;
403 p->broken |= st->broken;
409 rb = &(*rb)->rb_left;
411 rb = &(*rb)->rb_right;
414 rb_link_node(&st->rb, parent, rb);
415 rb_insert_color(&st->rb, &sorted);
418 static void insert_to_result(struct lock_stat *st,
419 int (*bigger)(struct lock_stat *, struct lock_stat *))
421 struct rb_node **rb = &result.rb_node;
422 struct rb_node *parent = NULL;
425 if (combine_locks && st->combined)
429 p = container_of(*rb, struct lock_stat, rb);
433 rb = &(*rb)->rb_left;
435 rb = &(*rb)->rb_right;
438 rb_link_node(&st->rb, parent, rb);
439 rb_insert_color(&st->rb, &result);
442 /* returns left most element of result, and erase it */
443 static struct lock_stat *pop_from_result(void)
445 struct rb_node *node = result.rb_node;
450 while (node->rb_left)
451 node = node->rb_left;
453 rb_erase(node, &result);
454 return container_of(node, struct lock_stat, rb);
457 static struct lock_stat *lock_stat_find(u64 addr)
459 struct hlist_head *entry = lockhashentry(addr);
460 struct lock_stat *ret;
462 hlist_for_each_entry(ret, entry, hash_entry) {
463 if (ret->addr == addr)
469 static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
471 struct hlist_head *entry = lockhashentry(addr);
472 struct lock_stat *ret, *new;
474 hlist_for_each_entry(ret, entry, hash_entry) {
475 if (ret->addr == addr)
479 new = zalloc(sizeof(struct lock_stat));
484 new->name = strdup(name);
491 new->wait_time_min = ULLONG_MAX;
493 hlist_add_head(&new->hash_entry, entry);
497 pr_err("memory allocation failed\n");
501 struct trace_lock_handler {
502 /* it's used on CONFIG_LOCKDEP */
503 int (*acquire_event)(struct evsel *evsel,
504 struct perf_sample *sample);
506 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
507 int (*acquired_event)(struct evsel *evsel,
508 struct perf_sample *sample);
510 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
511 int (*contended_event)(struct evsel *evsel,
512 struct perf_sample *sample);
514 /* it's used on CONFIG_LOCKDEP */
515 int (*release_event)(struct evsel *evsel,
516 struct perf_sample *sample);
518 /* it's used when CONFIG_LOCKDEP is off */
519 int (*contention_begin_event)(struct evsel *evsel,
520 struct perf_sample *sample);
522 /* it's used when CONFIG_LOCKDEP is off */
523 int (*contention_end_event)(struct evsel *evsel,
524 struct perf_sample *sample);
527 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
529 struct lock_seq_stat *seq;
531 list_for_each_entry(seq, &ts->seq_list, list) {
532 if (seq->addr == addr)
536 seq = zalloc(sizeof(struct lock_seq_stat));
538 pr_err("memory allocation failed\n");
541 seq->state = SEQ_STATE_UNINITIALIZED;
544 list_add(&seq->list, &ts->seq_list);
556 static int bad_hist[BROKEN_MAX];
563 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
572 case LOCK_AGGR_CALLER:
574 pr_err("Invalid aggregation mode: %d\n", aggr_mode);
580 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
582 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
583 struct perf_sample *sample)
585 if (aggr_mode == LOCK_AGGR_CALLER) {
586 *key = callchain_id(evsel, sample);
589 return get_key_by_aggr_mode_simple(key, addr, sample->tid);
592 static int report_lock_acquire_event(struct evsel *evsel,
593 struct perf_sample *sample)
595 struct lock_stat *ls;
596 struct thread_stat *ts;
597 struct lock_seq_stat *seq;
598 const char *name = evsel__strval(evsel, sample, "name");
599 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
600 int flag = evsel__intval(evsel, sample, "flags");
604 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
608 ls = lock_stat_findnew(key, name, 0);
612 ts = thread_stat_findnew(sample->tid);
616 seq = get_seq(ts, addr);
620 switch (seq->state) {
621 case SEQ_STATE_UNINITIALIZED:
622 case SEQ_STATE_RELEASED:
624 seq->state = SEQ_STATE_ACQUIRING;
628 if (flag & READ_LOCK)
630 seq->state = SEQ_STATE_READ_ACQUIRED;
635 case SEQ_STATE_READ_ACQUIRED:
636 if (flag & READ_LOCK) {
644 case SEQ_STATE_ACQUIRED:
645 case SEQ_STATE_ACQUIRING:
646 case SEQ_STATE_CONTENDED:
648 /* broken lock sequence */
651 bad_hist[BROKEN_ACQUIRE]++;
653 list_del_init(&seq->list);
657 BUG_ON("Unknown state of lock sequence found!\n");
662 seq->prev_event_time = sample->time;
667 static int report_lock_acquired_event(struct evsel *evsel,
668 struct perf_sample *sample)
670 struct lock_stat *ls;
671 struct thread_stat *ts;
672 struct lock_seq_stat *seq;
674 const char *name = evsel__strval(evsel, sample, "name");
675 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
679 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
683 ls = lock_stat_findnew(key, name, 0);
687 ts = thread_stat_findnew(sample->tid);
691 seq = get_seq(ts, addr);
695 switch (seq->state) {
696 case SEQ_STATE_UNINITIALIZED:
697 /* orphan event, do nothing */
699 case SEQ_STATE_ACQUIRING:
701 case SEQ_STATE_CONTENDED:
702 contended_term = sample->time - seq->prev_event_time;
703 ls->wait_time_total += contended_term;
704 if (contended_term < ls->wait_time_min)
705 ls->wait_time_min = contended_term;
706 if (ls->wait_time_max < contended_term)
707 ls->wait_time_max = contended_term;
709 case SEQ_STATE_RELEASED:
710 case SEQ_STATE_ACQUIRED:
711 case SEQ_STATE_READ_ACQUIRED:
712 /* broken lock sequence */
715 bad_hist[BROKEN_ACQUIRED]++;
717 list_del_init(&seq->list);
721 BUG_ON("Unknown state of lock sequence found!\n");
725 seq->state = SEQ_STATE_ACQUIRED;
727 ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
728 seq->prev_event_time = sample->time;
733 static int report_lock_contended_event(struct evsel *evsel,
734 struct perf_sample *sample)
736 struct lock_stat *ls;
737 struct thread_stat *ts;
738 struct lock_seq_stat *seq;
739 const char *name = evsel__strval(evsel, sample, "name");
740 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
744 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
748 ls = lock_stat_findnew(key, name, 0);
752 ts = thread_stat_findnew(sample->tid);
756 seq = get_seq(ts, addr);
760 switch (seq->state) {
761 case SEQ_STATE_UNINITIALIZED:
762 /* orphan event, do nothing */
764 case SEQ_STATE_ACQUIRING:
766 case SEQ_STATE_RELEASED:
767 case SEQ_STATE_ACQUIRED:
768 case SEQ_STATE_READ_ACQUIRED:
769 case SEQ_STATE_CONTENDED:
770 /* broken lock sequence */
773 bad_hist[BROKEN_CONTENDED]++;
775 list_del_init(&seq->list);
779 BUG_ON("Unknown state of lock sequence found!\n");
783 seq->state = SEQ_STATE_CONTENDED;
785 ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
786 seq->prev_event_time = sample->time;
791 static int report_lock_release_event(struct evsel *evsel,
792 struct perf_sample *sample)
794 struct lock_stat *ls;
795 struct thread_stat *ts;
796 struct lock_seq_stat *seq;
797 const char *name = evsel__strval(evsel, sample, "name");
798 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
802 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
806 ls = lock_stat_findnew(key, name, 0);
810 ts = thread_stat_findnew(sample->tid);
814 seq = get_seq(ts, addr);
818 switch (seq->state) {
819 case SEQ_STATE_UNINITIALIZED:
821 case SEQ_STATE_ACQUIRED:
823 case SEQ_STATE_READ_ACQUIRED:
825 BUG_ON(seq->read_count < 0);
826 if (seq->read_count) {
831 case SEQ_STATE_ACQUIRING:
832 case SEQ_STATE_CONTENDED:
833 case SEQ_STATE_RELEASED:
834 /* broken lock sequence */
837 bad_hist[BROKEN_RELEASE]++;
841 BUG_ON("Unknown state of lock sequence found!\n");
847 list_del_init(&seq->list);
853 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
858 if (map == NULL || sym == NULL) {
863 offset = map->map_ip(map, ip) - sym->start;
866 return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
868 return strlcpy(buf, sym->name, size);
870 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
873 struct thread *thread;
874 struct callchain_cursor *cursor = &callchain_cursor;
875 struct machine *machine = &session->machines.host;
880 /* lock names will be replaced to task name later */
881 if (show_thread_stats)
884 thread = machine__findnew_thread(machine, -1, sample->pid);
888 /* use caller function name from the callchain */
889 ret = thread__resolve_callchain(thread, cursor, evsel, sample,
890 NULL, NULL, max_stack_depth);
896 callchain_cursor_commit(cursor);
900 struct callchain_cursor_node *node;
902 node = callchain_cursor_current(cursor);
906 /* skip first few entries - for lock functions */
907 if (++skip <= stack_skip)
911 if (sym && !machine__is_lock_function(machine, node->ip)) {
912 get_symbol_name_offset(node->ms.map, sym, node->ip,
918 callchain_cursor_advance(cursor);
923 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
925 struct callchain_cursor *cursor = &callchain_cursor;
926 struct machine *machine = &session->machines.host;
927 struct thread *thread;
932 thread = machine__findnew_thread(machine, -1, sample->pid);
936 /* use caller function name from the callchain */
937 ret = thread__resolve_callchain(thread, cursor, evsel, sample,
938 NULL, NULL, max_stack_depth);
944 callchain_cursor_commit(cursor);
947 struct callchain_cursor_node *node;
949 node = callchain_cursor_current(cursor);
953 /* skip first few entries - for lock functions */
954 if (++skip <= stack_skip)
957 if (node->ms.sym && machine__is_lock_function(machine, node->ip))
960 hash ^= hash_long((unsigned long)node->ip, 64);
963 callchain_cursor_advance(cursor);
968 static u64 *get_callstack(struct perf_sample *sample, int max_stack)
974 callstack = calloc(max_stack, sizeof(*callstack));
975 if (callstack == NULL)
978 for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
979 u64 ip = sample->callchain->ips[i];
981 if (ip >= PERF_CONTEXT_MAX)
989 static int report_lock_contention_begin_event(struct evsel *evsel,
990 struct perf_sample *sample)
992 struct lock_stat *ls;
993 struct thread_stat *ts;
994 struct lock_seq_stat *seq;
995 u64 addr = evsel__intval(evsel, sample, "lock_addr");
996 unsigned int flags = evsel__intval(evsel, sample, "flags");
999 static bool kmap_loaded;
1000 struct machine *machine = &session->machines.host;
1004 ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1009 unsigned long *addrs;
1011 /* make sure it loads the kernel map to find lock symbols */
1012 map__load(machine__kernel_map(machine));
1015 /* convert (kernel) symbols to addresses */
1016 for (i = 0; i < filters.nr_syms; i++) {
1017 sym = machine__find_kernel_symbol_by_name(machine,
1021 pr_warning("ignore unknown symbol: %s\n",
1026 addrs = realloc(filters.addrs,
1027 (filters.nr_addrs + 1) * sizeof(*addrs));
1028 if (addrs == NULL) {
1029 pr_warning("memory allocation failure\n");
1033 addrs[filters.nr_addrs++] = kmap->unmap_ip(kmap, sym->start);
1034 filters.addrs = addrs;
1038 ls = lock_stat_find(key);
1041 const char *name = "";
1043 switch (aggr_mode) {
1044 case LOCK_AGGR_ADDR:
1045 sym = machine__find_kernel_symbol(machine, key, &kmap);
1049 case LOCK_AGGR_CALLER:
1051 if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
1054 case LOCK_AGGR_TASK:
1059 ls = lock_stat_findnew(key, name, flags);
1063 if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1064 ls->callstack = get_callstack(sample, max_stack_depth);
1065 if (ls->callstack == NULL)
1070 if (filters.nr_types) {
1073 for (i = 0; i < filters.nr_types; i++) {
1074 if (flags == filters.types[i]) {
1084 if (filters.nr_addrs) {
1087 for (i = 0; i < filters.nr_addrs; i++) {
1088 if (addr == filters.addrs[i]) {
1098 ts = thread_stat_findnew(sample->tid);
1102 seq = get_seq(ts, addr);
1106 switch (seq->state) {
1107 case SEQ_STATE_UNINITIALIZED:
1108 case SEQ_STATE_ACQUIRED:
1110 case SEQ_STATE_CONTENDED:
1112 * It can have nested contention begin with mutex spinning,
1113 * then we would use the original contention begin event and
1114 * ignore the second one.
1117 case SEQ_STATE_ACQUIRING:
1118 case SEQ_STATE_READ_ACQUIRED:
1119 case SEQ_STATE_RELEASED:
1120 /* broken lock sequence */
1123 bad_hist[BROKEN_CONTENDED]++;
1125 list_del_init(&seq->list);
1129 BUG_ON("Unknown state of lock sequence found!\n");
1133 if (seq->state != SEQ_STATE_CONTENDED) {
1134 seq->state = SEQ_STATE_CONTENDED;
1135 seq->prev_event_time = sample->time;
1142 static int report_lock_contention_end_event(struct evsel *evsel,
1143 struct perf_sample *sample)
1145 struct lock_stat *ls;
1146 struct thread_stat *ts;
1147 struct lock_seq_stat *seq;
1149 u64 addr = evsel__intval(evsel, sample, "lock_addr");
1153 ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1157 ls = lock_stat_find(key);
1161 ts = thread_stat_find(sample->tid);
1165 seq = get_seq(ts, addr);
1169 switch (seq->state) {
1170 case SEQ_STATE_UNINITIALIZED:
1172 case SEQ_STATE_CONTENDED:
1173 contended_term = sample->time - seq->prev_event_time;
1174 ls->wait_time_total += contended_term;
1175 if (contended_term < ls->wait_time_min)
1176 ls->wait_time_min = contended_term;
1177 if (ls->wait_time_max < contended_term)
1178 ls->wait_time_max = contended_term;
1180 case SEQ_STATE_ACQUIRING:
1181 case SEQ_STATE_ACQUIRED:
1182 case SEQ_STATE_READ_ACQUIRED:
1183 case SEQ_STATE_RELEASED:
1184 /* broken lock sequence */
1187 bad_hist[BROKEN_ACQUIRED]++;
1189 list_del_init(&seq->list);
1193 BUG_ON("Unknown state of lock sequence found!\n");
1197 seq->state = SEQ_STATE_ACQUIRED;
1199 ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
1204 /* lock oriented handlers */
1205 /* TODO: handlers for CPU oriented, thread oriented */
1206 static struct trace_lock_handler report_lock_ops = {
1207 .acquire_event = report_lock_acquire_event,
1208 .acquired_event = report_lock_acquired_event,
1209 .contended_event = report_lock_contended_event,
1210 .release_event = report_lock_release_event,
1211 .contention_begin_event = report_lock_contention_begin_event,
1212 .contention_end_event = report_lock_contention_end_event,
1215 static struct trace_lock_handler contention_lock_ops = {
1216 .contention_begin_event = report_lock_contention_begin_event,
1217 .contention_end_event = report_lock_contention_end_event,
1221 static struct trace_lock_handler *trace_handler;
1223 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
1225 if (trace_handler->acquire_event)
1226 return trace_handler->acquire_event(evsel, sample);
1230 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
1232 if (trace_handler->acquired_event)
1233 return trace_handler->acquired_event(evsel, sample);
1237 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
1239 if (trace_handler->contended_event)
1240 return trace_handler->contended_event(evsel, sample);
1244 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
1246 if (trace_handler->release_event)
1247 return trace_handler->release_event(evsel, sample);
1251 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
1253 if (trace_handler->contention_begin_event)
1254 return trace_handler->contention_begin_event(evsel, sample);
1258 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
1260 if (trace_handler->contention_end_event)
1261 return trace_handler->contention_end_event(evsel, sample);
1265 static void print_bad_events(int bad, int total)
1267 /* Output for debug, this have to be removed */
1270 const char *name[4] =
1271 { "acquire", "acquired", "contended", "release" };
1273 for (i = 0; i < BROKEN_MAX; i++)
1274 broken += bad_hist[i];
1276 if (quiet || (broken == 0 && verbose <= 0))
1279 pr_info("\n=== output for debug===\n\n");
1280 pr_info("bad: %d, total: %d\n", bad, total);
1281 pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
1282 pr_info("histogram of events caused bad sequence\n");
1283 for (i = 0; i < BROKEN_MAX; i++)
1284 pr_info(" %10s: %d\n", name[i], bad_hist[i]);
1287 /* TODO: various way to print, coloring, nano or milli sec */
1288 static void print_result(void)
1290 struct lock_stat *st;
1291 struct lock_key *key;
1293 int bad, total, printed;
1296 pr_info("%20s ", "Name");
1297 list_for_each_entry(key, &lock_keys, list)
1298 pr_info("%*s ", key->len, key->header);
1302 bad = total = printed = 0;
1303 while ((st = pop_from_result())) {
1307 if (!st->nr_acquired)
1310 bzero(cut_name, 20);
1312 if (strlen(st->name) < 20) {
1313 /* output raw name */
1314 const char *name = st->name;
1316 if (show_thread_stats) {
1319 /* st->addr contains tid of thread */
1320 t = perf_session__findnew(session, st->addr);
1321 name = thread__comm_str(t);
1324 pr_info("%20s ", name);
1326 strncpy(cut_name, st->name, 16);
1330 cut_name[19] = '\0';
1331 /* cut off name for saving output style */
1332 pr_info("%20s ", cut_name);
1335 list_for_each_entry(key, &lock_keys, list) {
1336 key->print(key, st);
1341 if (++printed >= print_nr_entries)
1345 print_bad_events(bad, total);
1348 static bool info_threads, info_map;
1350 static void dump_threads(void)
1352 struct thread_stat *st;
1353 struct rb_node *node;
1356 pr_info("%10s: comm\n", "Thread ID");
1358 node = rb_first(&thread_stats);
1360 st = container_of(node, struct thread_stat, rb);
1361 t = perf_session__findnew(session, st->tid);
1362 pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
1363 node = rb_next(node);
1368 static int compare_maps(struct lock_stat *a, struct lock_stat *b)
1372 if (a->name && b->name)
1373 ret = strcmp(a->name, b->name);
1375 ret = !!a->name - !!b->name;
1378 return a->addr < b->addr;
1383 static void dump_map(void)
1386 struct lock_stat *st;
1388 pr_info("Address of instance: name of class\n");
1389 for (i = 0; i < LOCKHASH_SIZE; i++) {
1390 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1391 insert_to_result(st, compare_maps);
1395 while ((st = pop_from_result()))
1396 pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
1399 static int dump_info(void)
1409 pr_err("Unknown type of information\n");
1415 static const struct evsel_str_handler lock_tracepoints[] = {
1416 { "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
1417 { "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1418 { "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1419 { "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
1422 static const struct evsel_str_handler contention_tracepoints[] = {
1423 { "lock:contention_begin", evsel__process_contention_begin, },
1424 { "lock:contention_end", evsel__process_contention_end, },
1427 static int process_event_update(struct perf_tool *tool,
1428 union perf_event *event,
1429 struct evlist **pevlist)
1433 ret = perf_event__process_event_update(tool, event, pevlist);
1437 /* this can return -EEXIST since we call it for each evsel */
1438 perf_session__set_tracepoints_handlers(session, lock_tracepoints);
1439 perf_session__set_tracepoints_handlers(session, contention_tracepoints);
1443 typedef int (*tracepoint_handler)(struct evsel *evsel,
1444 struct perf_sample *sample);
1446 static int process_sample_event(struct perf_tool *tool __maybe_unused,
1447 union perf_event *event,
1448 struct perf_sample *sample,
1449 struct evsel *evsel,
1450 struct machine *machine)
1453 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1456 if (thread == NULL) {
1457 pr_debug("problem processing %d event, skipping it.\n",
1458 event->header.type);
1462 if (evsel->handler != NULL) {
1463 tracepoint_handler f = evsel->handler;
1464 err = f(evsel, sample);
1467 thread__put(thread);
1472 static void combine_result(void)
1475 struct lock_stat *st;
1480 for (i = 0; i < LOCKHASH_SIZE; i++) {
1481 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1482 combine_lock_stats(st);
1487 static void sort_result(void)
1490 struct lock_stat *st;
1492 for (i = 0; i < LOCKHASH_SIZE; i++) {
1493 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1494 insert_to_result(st, compare);
1499 static const struct {
1502 } lock_type_table[] = {
1504 { LCB_F_SPIN, "spinlock" },
1505 { LCB_F_SPIN | LCB_F_READ, "rwlock:R" },
1506 { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W"},
1507 { LCB_F_READ, "rwsem:R" },
1508 { LCB_F_WRITE, "rwsem:W" },
1509 { LCB_F_RT, "rtmutex" },
1510 { LCB_F_RT | LCB_F_READ, "rwlock-rt:R" },
1511 { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W"},
1512 { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R" },
1513 { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W" },
1514 { LCB_F_MUTEX, "mutex" },
1515 { LCB_F_MUTEX | LCB_F_SPIN, "mutex" },
1516 /* alias for get_type_flag() */
1517 { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin" },
1520 static const char *get_type_str(unsigned int flags)
1522 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1523 if (lock_type_table[i].flags == flags)
1524 return lock_type_table[i].name;
1529 static unsigned int get_type_flag(const char *str)
1531 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1532 if (!strcmp(lock_type_table[i].name, str))
1533 return lock_type_table[i].flags;
1538 static void lock_filter_finish(void)
1540 zfree(&filters.types);
1541 filters.nr_types = 0;
1543 zfree(&filters.addrs);
1544 filters.nr_addrs = 0;
1546 for (int i = 0; i < filters.nr_syms; i++)
1547 free(filters.syms[i]);
1549 zfree(&filters.syms);
1550 filters.nr_syms = 0;
1553 static void sort_contention_result(void)
1558 static void print_contention_result(struct lock_contention *con)
1560 struct lock_stat *st;
1561 struct lock_key *key;
1562 int bad, total, printed;
1565 list_for_each_entry(key, &lock_keys, list)
1566 pr_info("%*s ", key->len, key->header);
1568 switch (aggr_mode) {
1569 case LOCK_AGGR_TASK:
1570 pr_info(" %10s %s\n\n", "pid", "comm");
1572 case LOCK_AGGR_CALLER:
1573 pr_info(" %10s %s\n\n", "type", "caller");
1575 case LOCK_AGGR_ADDR:
1576 pr_info(" %16s %s\n\n", "address", "symbol");
1583 bad = total = printed = 0;
1585 bad = bad_hist[BROKEN_CONTENDED];
1587 while ((st = pop_from_result())) {
1591 total += use_bpf ? st->nr_contended : 1;
1595 if (!st->wait_time_total)
1598 list_for_each_entry(key, &lock_keys, list) {
1599 key->print(key, st);
1603 switch (aggr_mode) {
1604 case LOCK_AGGR_CALLER:
1605 pr_info(" %10s %s\n", get_type_str(st->flags), st->name);
1607 case LOCK_AGGR_TASK:
1609 t = perf_session__findnew(session, pid);
1610 pr_info(" %10d %s\n", pid, thread__comm_str(t));
1612 case LOCK_AGGR_ADDR:
1613 pr_info(" %016llx %s\n", (unsigned long long)st->addr,
1620 if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1626 for (int i = 0; i < max_stack_depth; i++) {
1627 if (!st->callstack || !st->callstack[i])
1630 ip = st->callstack[i];
1631 sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1632 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1633 pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf);
1637 if (++printed >= print_nr_entries)
1641 print_bad_events(bad, total);
1646 static int __cmd_report(bool display_info)
1649 struct perf_tool eops = {
1650 .attr = perf_event__process_attr,
1651 .event_update = process_event_update,
1652 .sample = process_sample_event,
1653 .comm = perf_event__process_comm,
1654 .mmap = perf_event__process_mmap,
1655 .namespaces = perf_event__process_namespaces,
1656 .tracing_data = perf_event__process_tracing_data,
1657 .ordered_events = true,
1659 struct perf_data data = {
1661 .mode = PERF_DATA_MODE_READ,
1665 session = perf_session__new(&data, &eops);
1666 if (IS_ERR(session)) {
1667 pr_err("Initializing perf session failed\n");
1668 return PTR_ERR(session);
1671 /* for lock function check */
1672 symbol_conf.sort_by_name = true;
1673 symbol__init(&session->header.env);
1675 if (!data.is_pipe) {
1676 if (!perf_session__has_traces(session, "lock record"))
1679 if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
1680 pr_err("Initializing perf session tracepoint handlers failed\n");
1684 if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
1685 pr_err("Initializing perf session tracepoint handlers failed\n");
1690 if (setup_output_field(false, output_fields))
1693 if (select_key(false))
1696 if (show_thread_stats)
1697 aggr_mode = LOCK_AGGR_TASK;
1699 err = perf_session__process_events(session);
1704 if (display_info) /* used for info subcommand */
1713 perf_session__delete(session);
1717 static void sighandler(int sig __maybe_unused)
1721 static int __cmd_contention(int argc, const char **argv)
1724 struct perf_tool eops = {
1725 .attr = perf_event__process_attr,
1726 .event_update = process_event_update,
1727 .sample = process_sample_event,
1728 .comm = perf_event__process_comm,
1729 .mmap = perf_event__process_mmap,
1730 .tracing_data = perf_event__process_tracing_data,
1731 .ordered_events = true,
1733 struct perf_data data = {
1735 .mode = PERF_DATA_MODE_READ,
1738 struct lock_contention con = {
1740 .result = &lockhash_table[0],
1741 .map_nr_entries = bpf_map_entries,
1742 .max_stack = max_stack_depth,
1743 .stack_skip = stack_skip,
1744 .filters = &filters,
1747 session = perf_session__new(use_bpf ? NULL : &data, &eops);
1748 if (IS_ERR(session)) {
1749 pr_err("Initializing perf session failed\n");
1750 return PTR_ERR(session);
1753 con.machine = &session->machines.host;
1755 con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
1756 show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
1758 /* for lock function check */
1759 symbol_conf.sort_by_name = true;
1760 symbol__init(&session->header.env);
1763 err = target__validate(&target);
1767 target__strerror(&target, err, errbuf, 512);
1768 pr_err("%s\n", errbuf);
1772 signal(SIGINT, sighandler);
1773 signal(SIGCHLD, sighandler);
1774 signal(SIGTERM, sighandler);
1776 con.evlist = evlist__new();
1777 if (con.evlist == NULL) {
1782 err = evlist__create_maps(con.evlist, &target);
1787 err = evlist__prepare_workload(con.evlist, &target,
1793 if (lock_contention_prepare(&con) < 0) {
1794 pr_err("lock contention BPF setup failed\n");
1797 } else if (!data.is_pipe) {
1798 if (!perf_session__has_traces(session, "lock record"))
1801 if (!evlist__find_evsel_by_str(session->evlist,
1802 "lock:contention_begin")) {
1803 pr_err("lock contention evsel not found\n");
1807 if (perf_session__set_tracepoints_handlers(session,
1808 contention_tracepoints)) {
1809 pr_err("Initializing perf session tracepoint handlers failed\n");
1814 if (setup_output_field(true, output_fields))
1817 if (select_key(true))
1821 lock_contention_start();
1823 evlist__start_workload(con.evlist);
1825 /* wait for signal */
1828 lock_contention_stop();
1829 lock_contention_read(&con);
1831 /* abuse bad hist stats for lost entries */
1832 bad_hist[BROKEN_CONTENDED] = con.lost;
1834 err = perf_session__process_events(session);
1841 sort_contention_result();
1842 print_contention_result(&con);
1845 lock_filter_finish();
1846 evlist__delete(con.evlist);
1847 lock_contention_finish();
1848 perf_session__delete(session);
1853 static int __cmd_record(int argc, const char **argv)
1855 const char *record_args[] = {
1856 "record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
1858 const char *callgraph_args[] = {
1859 "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
1861 unsigned int rec_argc, i, j, ret;
1862 unsigned int nr_tracepoints;
1863 unsigned int nr_callgraph_args = 0;
1864 const char **rec_argv;
1865 bool has_lock_stat = true;
1867 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
1868 if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
1869 pr_debug("tracepoint %s is not enabled. "
1870 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
1871 lock_tracepoints[i].name);
1872 has_lock_stat = false;
1880 for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
1881 if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
1882 pr_err("tracepoint %s is not enabled.\n",
1883 contention_tracepoints[i].name);
1888 nr_callgraph_args = ARRAY_SIZE(callgraph_args);
1891 rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
1894 nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
1896 nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
1898 /* factor of 2 is for -e in front of each tracepoint */
1899 rec_argc += 2 * nr_tracepoints;
1901 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1905 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1906 rec_argv[i] = strdup(record_args[i]);
1908 for (j = 0; j < nr_tracepoints; j++) {
1909 const char *ev_name;
1912 ev_name = strdup(lock_tracepoints[j].name);
1914 ev_name = strdup(contention_tracepoints[j].name);
1919 rec_argv[i++] = "-e";
1920 rec_argv[i++] = ev_name;
1923 for (j = 0; j < nr_callgraph_args; j++, i++)
1924 rec_argv[i] = callgraph_args[j];
1926 for (j = 1; j < (unsigned int)argc; j++, i++)
1927 rec_argv[i] = argv[j];
1929 BUG_ON(i != rec_argc);
1931 ret = cmd_record(i, rec_argv);
1936 static int parse_map_entry(const struct option *opt, const char *str,
1937 int unset __maybe_unused)
1939 unsigned long *len = (unsigned long *)opt->value;
1944 val = strtoul(str, &endptr, 0);
1945 if (*endptr != '\0' || errno != 0) {
1946 pr_err("invalid BPF map length: %s\n", str);
1954 static int parse_max_stack(const struct option *opt, const char *str,
1955 int unset __maybe_unused)
1957 unsigned long *len = (unsigned long *)opt->value;
1962 val = strtol(str, &endptr, 0);
1963 if (*endptr != '\0' || errno != 0) {
1964 pr_err("invalid max stack depth: %s\n", str);
1968 if (val < 0 || val > sysctl__max_stack()) {
1969 pr_err("invalid max stack depth: %ld\n", val);
1977 static bool add_lock_type(unsigned int flags)
1981 tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
1985 tmp[filters.nr_types++] = flags;
1986 filters.types = tmp;
1990 static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
1991 int unset __maybe_unused)
1993 char *s, *tmp, *tok;
2000 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2001 unsigned int flags = get_type_flag(tok);
2006 if (strchr(tok, ':'))
2009 /* try :R and :W suffixes for rwlock, rwsem, ... */
2010 scnprintf(buf, sizeof(buf), "%s:R", tok);
2011 flags = get_type_flag(buf);
2012 if (flags != UINT_MAX) {
2013 if (!add_lock_type(flags)) {
2019 scnprintf(buf, sizeof(buf), "%s:W", tok);
2020 flags = get_type_flag(buf);
2021 if (flags != UINT_MAX) {
2022 if (!add_lock_type(flags)) {
2030 if (!add_lock_type(flags)) {
2035 if (!strcmp(tok, "mutex")) {
2036 flags = get_type_flag("mutex-spin");
2037 if (flags != UINT_MAX) {
2038 if (!add_lock_type(flags)) {
2050 static bool add_lock_addr(unsigned long addr)
2054 tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
2056 pr_err("Memory allocation failure\n");
2060 tmp[filters.nr_addrs++] = addr;
2061 filters.addrs = tmp;
2065 static bool add_lock_sym(char *name)
2068 char *sym = strdup(name);
2071 pr_err("Memory allocation failure\n");
2075 tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
2077 pr_err("Memory allocation failure\n");
2082 tmp[filters.nr_syms++] = sym;
2087 static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
2088 int unset __maybe_unused)
2090 char *s, *tmp, *tok;
2098 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2101 addr = strtoul(tok, &end, 16);
2103 if (!add_lock_addr(addr)) {
2111 * At this moment, we don't have kernel symbols. Save the symbols
2112 * in a separate list and resolve them to addresses later.
2114 if (!add_lock_sym(tok)) {
2124 int cmd_lock(int argc, const char **argv)
2126 const struct option lock_options[] = {
2127 OPT_STRING('i', "input", &input_name, "file", "input file name"),
2128 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
2129 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
2130 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
2131 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2132 "file", "vmlinux pathname"),
2133 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2134 "file", "kallsyms pathname"),
2135 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
2139 const struct option info_options[] = {
2140 OPT_BOOLEAN('t', "threads", &info_threads,
2141 "dump thread list in perf.data"),
2142 OPT_BOOLEAN('m', "map", &info_map,
2143 "map of lock instances (address:name table)"),
2144 OPT_PARENT(lock_options)
2147 const struct option report_options[] = {
2148 OPT_STRING('k', "key", &sort_key, "acquired",
2149 "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2150 OPT_STRING('F', "field", &output_fields, NULL,
2151 "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2153 OPT_BOOLEAN('c', "combine-locks", &combine_locks,
2154 "combine locks in the same class"),
2155 OPT_BOOLEAN('t', "threads", &show_thread_stats,
2156 "show per-thread lock stats"),
2157 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2158 OPT_PARENT(lock_options)
2161 struct option contention_options[] = {
2162 OPT_STRING('k', "key", &sort_key, "wait_total",
2163 "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
2164 OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
2165 "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
2166 OPT_BOOLEAN('t', "threads", &show_thread_stats,
2167 "show per-thread lock stats"),
2168 OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
2169 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
2170 "System-wide collection from all CPUs"),
2171 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
2172 "List of cpus to monitor"),
2173 OPT_STRING('p', "pid", &target.pid, "pid",
2174 "Trace on existing process id"),
2175 OPT_STRING(0, "tid", &target.tid, "tid",
2176 "Trace on existing thread id (exclusive to --pid)"),
2177 OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num",
2178 "Max number of BPF map entries", parse_map_entry),
2179 OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
2180 "Set the maximum stack depth when collecting lopck contention, "
2181 "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
2182 OPT_INTEGER(0, "stack-skip", &stack_skip,
2183 "Set the number of stack depth to skip when finding a lock caller, "
2184 "Default: " __stringify(CONTENTION_STACK_SKIP)),
2185 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2186 OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
2187 OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
2188 "Filter specific type of locks", parse_lock_type),
2189 OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
2190 "Filter specific address/symbol of locks", parse_lock_addr),
2191 OPT_PARENT(lock_options)
2194 const char * const info_usage[] = {
2195 "perf lock info [<options>]",
2198 const char *const lock_subcommands[] = { "record", "report", "script",
2199 "info", "contention", NULL };
2200 const char *lock_usage[] = {
2204 const char * const report_usage[] = {
2205 "perf lock report [<options>]",
2208 const char * const contention_usage[] = {
2209 "perf lock contention [<options>]",
2215 for (i = 0; i < LOCKHASH_SIZE; i++)
2216 INIT_HLIST_HEAD(lockhash_table + i);
2218 argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
2219 lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2221 usage_with_options(lock_usage, lock_options);
2223 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2224 return __cmd_record(argc, argv);
2225 } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2226 trace_handler = &report_lock_ops;
2228 argc = parse_options(argc, argv,
2229 report_options, report_usage, 0);
2231 usage_with_options(report_usage, report_options);
2233 rc = __cmd_report(false);
2234 } else if (!strcmp(argv[0], "script")) {
2235 /* Aliased to 'perf script' */
2236 return cmd_script(argc, argv);
2237 } else if (!strcmp(argv[0], "info")) {
2239 argc = parse_options(argc, argv,
2240 info_options, info_usage, 0);
2242 usage_with_options(info_usage, info_options);
2244 /* recycling report_lock_ops */
2245 trace_handler = &report_lock_ops;
2246 rc = __cmd_report(true);
2247 } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
2248 trace_handler = &contention_lock_ops;
2249 sort_key = "wait_total";
2250 output_fields = "contended,wait_total,wait_max,avg_wait";
2252 #ifndef HAVE_BPF_SKEL
2253 set_option_nobuild(contention_options, 'b', "use-bpf",
2254 "no BUILD_BPF_SKEL=1", false);
2257 argc = parse_options(argc, argv, contention_options,
2258 contention_usage, 0);
2261 if (show_thread_stats && show_lock_addrs) {
2262 pr_err("Cannot use thread and addr mode together\n");
2263 parse_options_usage(contention_usage, contention_options,
2265 parse_options_usage(NULL, contention_options,
2270 rc = __cmd_contention(argc, argv);
2272 usage_with_options(lock_usage, lock_options);