1 // SPDX-License-Identifier: GPL-2.0
11 #include "util/data.h"
12 #include "util/evlist.h"
13 #include "util/evsel.h"
14 #include "util/header.h"
15 #include "util/kwork.h"
16 #include "util/debug.h"
17 #include "util/session.h"
18 #include "util/symbol.h"
19 #include "util/thread.h"
20 #include "util/string2.h"
21 #include "util/callchain.h"
22 #include "util/evsel_fprintf.h"
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 #include <traceevent/event-parse.h>
31 #include <linux/err.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
36 * report header elements width
38 #define PRINT_CPU_WIDTH 4
39 #define PRINT_COUNT_WIDTH 9
40 #define PRINT_RUNTIME_WIDTH 10
41 #define PRINT_LATENCY_WIDTH 10
42 #define PRINT_TIMESTAMP_WIDTH 17
43 #define PRINT_KWORK_NAME_WIDTH 30
44 #define RPINT_DECIMAL_WIDTH 3
45 #define PRINT_BRACKETPAIR_WIDTH 2
46 #define PRINT_TIME_UNIT_SEC_WIDTH 2
47 #define PRINT_TIME_UNIT_MESC_WIDTH 3
48 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
49 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
50 #define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
51 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
53 struct sort_dimension {
55 int (*cmp)(struct kwork_work *l, struct kwork_work *r);
56 struct list_head list;
59 static int id_cmp(struct kwork_work *l, struct kwork_work *r)
74 static int count_cmp(struct kwork_work *l, struct kwork_work *r)
76 if (l->nr_atoms > r->nr_atoms)
78 if (l->nr_atoms < r->nr_atoms)
84 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
86 if (l->total_runtime > r->total_runtime)
88 if (l->total_runtime < r->total_runtime)
94 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
96 if (l->max_runtime > r->max_runtime)
98 if (l->max_runtime < r->max_runtime)
104 static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
113 avgl = l->total_latency / l->nr_atoms;
114 avgr = r->total_latency / r->nr_atoms;
124 static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
126 if (l->max_latency > r->max_latency)
128 if (l->max_latency < r->max_latency)
134 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
135 const char *tok, struct list_head *list)
138 static struct sort_dimension max_sort_dimension = {
140 .cmp = max_runtime_cmp,
142 static struct sort_dimension id_sort_dimension = {
146 static struct sort_dimension runtime_sort_dimension = {
150 static struct sort_dimension count_sort_dimension = {
154 static struct sort_dimension avg_sort_dimension = {
156 .cmp = avg_latency_cmp,
158 struct sort_dimension *available_sorts[] = {
161 &count_sort_dimension,
162 &runtime_sort_dimension,
166 if (kwork->report == KWORK_REPORT_LATENCY)
167 max_sort_dimension.cmp = max_latency_cmp;
169 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
170 if (!strcmp(available_sorts[i]->name, tok)) {
171 list_add_tail(&available_sorts[i]->list, list);
179 static void setup_sorting(struct perf_kwork *kwork,
180 const struct option *options,
181 const char * const usage_msg[])
183 char *tmp, *tok, *str = strdup(kwork->sort_order);
185 for (tok = strtok_r(str, ", ", &tmp);
186 tok; tok = strtok_r(NULL, ", ", &tmp)) {
187 if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
188 usage_with_options_msg(usage_msg, options,
189 "Unknown --sort key: `%s'", tok);
192 pr_debug("Sort order: %s\n", kwork->sort_order);
196 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
197 struct perf_sample *sample)
200 struct kwork_atom_page *page;
201 struct kwork_atom *atom = NULL;
203 list_for_each_entry(page, &kwork->atom_page_list, list) {
204 if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
205 i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
206 BUG_ON(i >= NR_ATOM_PER_PAGE);
207 atom = &page->atoms[i];
215 page = zalloc(sizeof(*page));
217 pr_err("Failed to zalloc kwork atom page\n");
222 atom = &page->atoms[0];
223 list_add_tail(&page->list, &kwork->atom_page_list);
226 __set_bit(i, page->bitmap);
227 atom->time = sample->time;
229 atom->page_addr = page;
230 atom->bit_inpage = i;
234 static void atom_free(struct kwork_atom *atom)
236 if (atom->prev != NULL)
237 atom_free(atom->prev);
239 __clear_bit(atom->bit_inpage,
240 ((struct kwork_atom_page *)atom->page_addr)->bitmap);
243 static void atom_del(struct kwork_atom *atom)
245 list_del(&atom->list);
249 static int work_cmp(struct list_head *list,
250 struct kwork_work *l, struct kwork_work *r)
253 struct sort_dimension *sort;
255 BUG_ON(list_empty(list));
257 list_for_each_entry(sort, list, list) {
258 ret = sort->cmp(l, r);
266 static struct kwork_work *work_search(struct rb_root_cached *root,
267 struct kwork_work *key,
268 struct list_head *sort_list)
271 struct kwork_work *work;
272 struct rb_node *node = root->rb_root.rb_node;
275 work = container_of(node, struct kwork_work, node);
276 cmp = work_cmp(sort_list, key, work);
278 node = node->rb_left;
280 node = node->rb_right;
282 if (work->name == NULL)
283 work->name = key->name;
290 static void work_insert(struct rb_root_cached *root,
291 struct kwork_work *key, struct list_head *sort_list)
294 bool leftmost = true;
295 struct kwork_work *cur;
296 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
299 cur = container_of(*new, struct kwork_work, node);
301 cmp = work_cmp(sort_list, key, cur);
304 new = &((*new)->rb_left);
306 new = &((*new)->rb_right);
311 rb_link_node(&key->node, parent, new);
312 rb_insert_color_cached(&key->node, root, leftmost);
315 static struct kwork_work *work_new(struct kwork_work *key)
318 struct kwork_work *work = zalloc(sizeof(*work));
321 pr_err("Failed to zalloc kwork work\n");
325 for (i = 0; i < KWORK_TRACE_MAX; i++)
326 INIT_LIST_HEAD(&work->atom_list[i]);
329 work->cpu = key->cpu;
330 work->name = key->name;
331 work->class = key->class;
335 static struct kwork_work *work_findnew(struct rb_root_cached *root,
336 struct kwork_work *key,
337 struct list_head *sort_list)
339 struct kwork_work *work = work_search(root, key, sort_list);
344 work = work_new(key);
346 work_insert(root, work, sort_list);
351 static void profile_update_timespan(struct perf_kwork *kwork,
352 struct perf_sample *sample)
357 if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
358 kwork->timestart = sample->time;
360 if (kwork->timeend < sample->time)
361 kwork->timeend = sample->time;
364 static bool profile_event_match(struct perf_kwork *kwork,
365 struct kwork_work *work,
366 struct perf_sample *sample)
369 u64 time = sample->time;
370 struct perf_time_interval *ptime = &kwork->ptime;
372 if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
375 if (((ptime->start != 0) && (ptime->start > time)) ||
376 ((ptime->end != 0) && (ptime->end < time)))
379 if ((kwork->profile_name != NULL) &&
380 (work->name != NULL) &&
381 (strcmp(work->name, kwork->profile_name) != 0))
384 profile_update_timespan(kwork, sample);
388 static int work_push_atom(struct perf_kwork *kwork,
389 struct kwork_class *class,
390 enum kwork_trace_type src_type,
391 enum kwork_trace_type dst_type,
393 struct perf_sample *sample,
394 struct machine *machine,
395 struct kwork_work **ret_work)
397 struct kwork_atom *atom, *dst_atom;
398 struct kwork_work *work, key;
400 BUG_ON(class->work_init == NULL);
401 class->work_init(class, &key, evsel, sample, machine);
403 atom = atom_new(kwork, sample);
407 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
413 if (!profile_event_match(kwork, work, sample))
416 if (dst_type < KWORK_TRACE_MAX) {
417 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
418 struct kwork_atom, list);
419 if (dst_atom != NULL) {
420 atom->prev = dst_atom;
421 list_del(&dst_atom->list);
425 if (ret_work != NULL)
428 list_add_tail(&atom->list, &work->atom_list[src_type]);
433 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
434 struct kwork_class *class,
435 enum kwork_trace_type src_type,
436 enum kwork_trace_type dst_type,
438 struct perf_sample *sample,
439 struct machine *machine,
440 struct kwork_work **ret_work)
442 struct kwork_atom *atom, *src_atom;
443 struct kwork_work *work, key;
445 BUG_ON(class->work_init == NULL);
446 class->work_init(class, &key, evsel, sample, machine);
448 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
449 if (ret_work != NULL)
455 if (!profile_event_match(kwork, work, sample))
458 atom = list_last_entry_or_null(&work->atom_list[dst_type],
459 struct kwork_atom, list);
463 src_atom = atom_new(kwork, sample);
464 if (src_atom != NULL)
465 list_add_tail(&src_atom->list, &work->atom_list[src_type]);
467 if (ret_work != NULL)
474 static void report_update_exit_event(struct kwork_work *work,
475 struct kwork_atom *atom,
476 struct perf_sample *sample)
479 u64 exit_time = sample->time;
480 u64 entry_time = atom->time;
482 if ((entry_time != 0) && (exit_time >= entry_time)) {
483 delta = exit_time - entry_time;
484 if ((delta > work->max_runtime) ||
485 (work->max_runtime == 0)) {
486 work->max_runtime = delta;
487 work->max_runtime_start = entry_time;
488 work->max_runtime_end = exit_time;
490 work->total_runtime += delta;
495 static int report_entry_event(struct perf_kwork *kwork,
496 struct kwork_class *class,
498 struct perf_sample *sample,
499 struct machine *machine)
501 return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
502 KWORK_TRACE_MAX, evsel, sample,
506 static int report_exit_event(struct perf_kwork *kwork,
507 struct kwork_class *class,
509 struct perf_sample *sample,
510 struct machine *machine)
512 struct kwork_atom *atom = NULL;
513 struct kwork_work *work = NULL;
515 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
516 KWORK_TRACE_ENTRY, evsel, sample,
522 report_update_exit_event(work, atom, sample);
529 static void latency_update_entry_event(struct kwork_work *work,
530 struct kwork_atom *atom,
531 struct perf_sample *sample)
534 u64 entry_time = sample->time;
535 u64 raise_time = atom->time;
537 if ((raise_time != 0) && (entry_time >= raise_time)) {
538 delta = entry_time - raise_time;
539 if ((delta > work->max_latency) ||
540 (work->max_latency == 0)) {
541 work->max_latency = delta;
542 work->max_latency_start = raise_time;
543 work->max_latency_end = entry_time;
545 work->total_latency += delta;
550 static int latency_raise_event(struct perf_kwork *kwork,
551 struct kwork_class *class,
553 struct perf_sample *sample,
554 struct machine *machine)
556 return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
557 KWORK_TRACE_MAX, evsel, sample,
561 static int latency_entry_event(struct perf_kwork *kwork,
562 struct kwork_class *class,
564 struct perf_sample *sample,
565 struct machine *machine)
567 struct kwork_atom *atom = NULL;
568 struct kwork_work *work = NULL;
570 atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
571 KWORK_TRACE_RAISE, evsel, sample,
577 latency_update_entry_event(work, atom, sample);
584 static void timehist_save_callchain(struct perf_kwork *kwork,
585 struct perf_sample *sample,
587 struct machine *machine)
590 struct thread *thread;
591 struct callchain_cursor_node *node;
592 struct callchain_cursor *cursor = &callchain_cursor;
594 if (!kwork->show_callchain || sample->callchain == NULL)
597 /* want main thread for process - has maps */
598 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
599 if (thread == NULL) {
600 pr_debug("Failed to get thread for pid %d\n", sample->pid);
604 if (thread__resolve_callchain(thread, cursor, evsel, sample,
605 NULL, NULL, kwork->max_stack + 2) != 0) {
606 pr_debug("Failed to resolve callchain, skipping\n");
610 callchain_cursor_commit(cursor);
613 node = callchain_cursor_current(cursor);
619 if (!strcmp(sym->name, "__softirqentry_text_start") ||
620 !strcmp(sym->name, "__do_softirq"))
624 callchain_cursor_advance(cursor);
631 static void timehist_print_event(struct perf_kwork *kwork,
632 struct kwork_work *work,
633 struct kwork_atom *atom,
634 struct perf_sample *sample,
635 struct addr_location *al)
637 char entrytime[32], exittime[32];
638 char kwork_name[PRINT_KWORK_NAME_WIDTH];
643 timestamp__scnprintf_usec(atom->time,
644 entrytime, sizeof(entrytime));
645 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
650 timestamp__scnprintf_usec(sample->time,
651 exittime, sizeof(exittime));
652 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
657 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
662 if (work->class && work->class->work_name) {
663 work->class->work_name(work, kwork_name,
664 PRINT_KWORK_NAME_WIDTH);
665 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
667 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
673 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
674 (double)(sample->time - atom->time) / NSEC_PER_MSEC);
679 if (atom->prev != NULL)
680 printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
681 (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
683 printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
688 if (kwork->show_callchain) {
690 sample__fprintf_sym(sample, al, 0,
691 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
692 EVSEL__PRINT_CALLCHAIN_ARROW |
693 EVSEL__PRINT_SKIP_IGNORED,
694 &callchain_cursor, symbol_conf.bt_stop_list,
701 static int timehist_raise_event(struct perf_kwork *kwork,
702 struct kwork_class *class,
704 struct perf_sample *sample,
705 struct machine *machine)
707 return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
708 KWORK_TRACE_MAX, evsel, sample,
712 static int timehist_entry_event(struct perf_kwork *kwork,
713 struct kwork_class *class,
715 struct perf_sample *sample,
716 struct machine *machine)
719 struct kwork_work *work = NULL;
721 ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
722 KWORK_TRACE_RAISE, evsel, sample,
728 timehist_save_callchain(kwork, sample, evsel, machine);
733 static int timehist_exit_event(struct perf_kwork *kwork,
734 struct kwork_class *class,
736 struct perf_sample *sample,
737 struct machine *machine)
739 struct kwork_atom *atom = NULL;
740 struct kwork_work *work = NULL;
741 struct addr_location al;
743 if (machine__resolve(machine, &al, sample) < 0) {
744 pr_debug("Problem processing event, skipping it\n");
748 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
749 KWORK_TRACE_ENTRY, evsel, sample,
756 timehist_print_event(kwork, work, atom, sample, &al);
763 static struct kwork_class kwork_irq;
764 static int process_irq_handler_entry_event(struct perf_tool *tool,
766 struct perf_sample *sample,
767 struct machine *machine)
769 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
771 if (kwork->tp_handler->entry_event)
772 return kwork->tp_handler->entry_event(kwork, &kwork_irq,
773 evsel, sample, machine);
777 static int process_irq_handler_exit_event(struct perf_tool *tool,
779 struct perf_sample *sample,
780 struct machine *machine)
782 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
784 if (kwork->tp_handler->exit_event)
785 return kwork->tp_handler->exit_event(kwork, &kwork_irq,
786 evsel, sample, machine);
790 const struct evsel_str_handler irq_tp_handlers[] = {
791 { "irq:irq_handler_entry", process_irq_handler_entry_event, },
792 { "irq:irq_handler_exit", process_irq_handler_exit_event, },
795 static int irq_class_init(struct kwork_class *class,
796 struct perf_session *session)
798 if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
799 pr_err("Failed to set irq tracepoints handlers\n");
803 class->work_root = RB_ROOT_CACHED;
807 static void irq_work_init(struct kwork_class *class,
808 struct kwork_work *work,
810 struct perf_sample *sample,
811 struct machine *machine __maybe_unused)
814 work->cpu = sample->cpu;
815 work->id = evsel__intval(evsel, sample, "irq");
816 work->name = evsel__strval(evsel, sample, "name");
819 static void irq_work_name(struct kwork_work *work, char *buf, int len)
821 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
824 static struct kwork_class kwork_irq = {
826 .type = KWORK_CLASS_IRQ,
828 .tp_handlers = irq_tp_handlers,
829 .class_init = irq_class_init,
830 .work_init = irq_work_init,
831 .work_name = irq_work_name,
834 static struct kwork_class kwork_softirq;
835 static int process_softirq_raise_event(struct perf_tool *tool,
837 struct perf_sample *sample,
838 struct machine *machine)
840 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
842 if (kwork->tp_handler->raise_event)
843 return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
844 evsel, sample, machine);
849 static int process_softirq_entry_event(struct perf_tool *tool,
851 struct perf_sample *sample,
852 struct machine *machine)
854 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
856 if (kwork->tp_handler->entry_event)
857 return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
858 evsel, sample, machine);
863 static int process_softirq_exit_event(struct perf_tool *tool,
865 struct perf_sample *sample,
866 struct machine *machine)
868 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
870 if (kwork->tp_handler->exit_event)
871 return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
872 evsel, sample, machine);
877 const struct evsel_str_handler softirq_tp_handlers[] = {
878 { "irq:softirq_raise", process_softirq_raise_event, },
879 { "irq:softirq_entry", process_softirq_entry_event, },
880 { "irq:softirq_exit", process_softirq_exit_event, },
883 static int softirq_class_init(struct kwork_class *class,
884 struct perf_session *session)
886 if (perf_session__set_tracepoints_handlers(session,
887 softirq_tp_handlers)) {
888 pr_err("Failed to set softirq tracepoints handlers\n");
892 class->work_root = RB_ROOT_CACHED;
896 static char *evsel__softirq_name(struct evsel *evsel, u64 num)
900 struct tep_print_flag_sym *sym = NULL;
901 struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
903 if ((args == NULL) || (args->next == NULL))
906 /* skip softirq field: "REC->vec" */
907 for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
908 if ((eval_flag(sym->value) == (unsigned long long)num) &&
909 (strlen(sym->str) != 0)) {
918 name = strdup(sym->str);
920 pr_err("Failed to copy symbol name\n");
926 static void softirq_work_init(struct kwork_class *class,
927 struct kwork_work *work,
929 struct perf_sample *sample,
930 struct machine *machine __maybe_unused)
932 u64 num = evsel__intval(evsel, sample, "vec");
936 work->cpu = sample->cpu;
937 work->name = evsel__softirq_name(evsel, num);
940 static void softirq_work_name(struct kwork_work *work, char *buf, int len)
942 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
945 static struct kwork_class kwork_softirq = {
947 .type = KWORK_CLASS_SOFTIRQ,
949 .tp_handlers = softirq_tp_handlers,
950 .class_init = softirq_class_init,
951 .work_init = softirq_work_init,
952 .work_name = softirq_work_name,
955 static struct kwork_class kwork_workqueue;
956 static int process_workqueue_activate_work_event(struct perf_tool *tool,
958 struct perf_sample *sample,
959 struct machine *machine)
961 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
963 if (kwork->tp_handler->raise_event)
964 return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
965 evsel, sample, machine);
970 static int process_workqueue_execute_start_event(struct perf_tool *tool,
972 struct perf_sample *sample,
973 struct machine *machine)
975 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
977 if (kwork->tp_handler->entry_event)
978 return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
979 evsel, sample, machine);
984 static int process_workqueue_execute_end_event(struct perf_tool *tool,
986 struct perf_sample *sample,
987 struct machine *machine)
989 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
991 if (kwork->tp_handler->exit_event)
992 return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
993 evsel, sample, machine);
998 const struct evsel_str_handler workqueue_tp_handlers[] = {
999 { "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1000 { "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1001 { "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, },
1004 static int workqueue_class_init(struct kwork_class *class,
1005 struct perf_session *session)
1007 if (perf_session__set_tracepoints_handlers(session,
1008 workqueue_tp_handlers)) {
1009 pr_err("Failed to set workqueue tracepoints handlers\n");
1013 class->work_root = RB_ROOT_CACHED;
1017 static void workqueue_work_init(struct kwork_class *class,
1018 struct kwork_work *work,
1019 struct evsel *evsel,
1020 struct perf_sample *sample,
1021 struct machine *machine)
1024 unsigned long long function_addr = evsel__intval(evsel,
1025 sample, "function");
1027 work->class = class;
1028 work->cpu = sample->cpu;
1029 work->id = evsel__intval(evsel, sample, "work");
1030 work->name = function_addr == 0 ? NULL :
1031 machine__resolve_kernel_addr(machine, &function_addr, &modp);
1034 static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1036 if (work->name != NULL)
1037 snprintf(buf, len, "(w)%s", work->name);
1039 snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1042 static struct kwork_class kwork_workqueue = {
1043 .name = "workqueue",
1044 .type = KWORK_CLASS_WORKQUEUE,
1045 .nr_tracepoints = 3,
1046 .tp_handlers = workqueue_tp_handlers,
1047 .class_init = workqueue_class_init,
1048 .work_init = workqueue_work_init,
1049 .work_name = workqueue_work_name,
1052 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1053 [KWORK_CLASS_IRQ] = &kwork_irq,
1054 [KWORK_CLASS_SOFTIRQ] = &kwork_softirq,
1055 [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1058 static void print_separator(int len)
1060 printf(" %.*s\n", len, graph_dotted_line);
1063 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1066 char kwork_name[PRINT_KWORK_NAME_WIDTH];
1067 char max_runtime_start[32], max_runtime_end[32];
1068 char max_latency_start[32], max_latency_end[32];
1075 if (work->class && work->class->work_name) {
1076 work->class->work_name(work, kwork_name,
1077 PRINT_KWORK_NAME_WIDTH);
1078 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1080 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1086 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1091 if (kwork->report == KWORK_REPORT_RUNTIME) {
1092 ret += printf(" %*.*f ms |",
1093 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1094 (double)work->total_runtime / NSEC_PER_MSEC);
1095 } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1096 ret += printf(" %*.*f ms |",
1097 PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1098 (double)work->total_latency /
1099 work->nr_atoms / NSEC_PER_MSEC);
1105 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1108 * max runtime, max runtime start, max runtime end
1110 if (kwork->report == KWORK_REPORT_RUNTIME) {
1111 timestamp__scnprintf_usec(work->max_runtime_start,
1113 sizeof(max_runtime_start));
1114 timestamp__scnprintf_usec(work->max_runtime_end,
1116 sizeof(max_runtime_end));
1117 ret += printf(" %*.*f ms | %*s s | %*s s |",
1118 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1119 (double)work->max_runtime / NSEC_PER_MSEC,
1120 PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1121 PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1124 * max delay, max delay start, max delay end
1126 else if (kwork->report == KWORK_REPORT_LATENCY) {
1127 timestamp__scnprintf_usec(work->max_latency_start,
1129 sizeof(max_latency_start));
1130 timestamp__scnprintf_usec(work->max_latency_end,
1132 sizeof(max_latency_end));
1133 ret += printf(" %*.*f ms | %*s s | %*s s |",
1134 PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1135 (double)work->max_latency / NSEC_PER_MSEC,
1136 PRINT_TIMESTAMP_WIDTH, max_latency_start,
1137 PRINT_TIMESTAMP_WIDTH, max_latency_end);
1144 static int report_print_header(struct perf_kwork *kwork)
1149 ret = printf(" %-*s | %-*s |",
1150 PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1151 PRINT_CPU_WIDTH, "Cpu");
1153 if (kwork->report == KWORK_REPORT_RUNTIME) {
1154 ret += printf(" %-*s |",
1155 PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1156 } else if (kwork->report == KWORK_REPORT_LATENCY) {
1157 ret += printf(" %-*s |",
1158 PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1161 ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1163 if (kwork->report == KWORK_REPORT_RUNTIME) {
1164 ret += printf(" %-*s | %-*s | %-*s |",
1165 PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1166 PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1167 PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1168 } else if (kwork->report == KWORK_REPORT_LATENCY) {
1169 ret += printf(" %-*s | %-*s | %-*s |",
1170 PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1171 PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1172 PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1176 print_separator(ret);
1180 static void timehist_print_header(void)
1185 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
1186 PRINT_TIMESTAMP_WIDTH, "Runtime start",
1187 PRINT_TIMESTAMP_WIDTH, "Runtime end",
1188 PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1189 PRINT_KWORK_NAME_WIDTH, "Kwork name",
1190 PRINT_RUNTIME_WIDTH, "Runtime",
1191 PRINT_RUNTIME_WIDTH, "Delaytime");
1196 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
1197 PRINT_TIMESTAMP_WIDTH, "",
1198 PRINT_TIMESTAMP_WIDTH, "",
1199 PRINT_TIMEHIST_CPU_WIDTH, "",
1200 PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1201 PRINT_RUNTIME_WIDTH, "(msec)",
1202 PRINT_RUNTIME_WIDTH, "(msec)");
1207 printf(" %.*s %.*s %.*s %.*s %.*s %.*s\n",
1208 PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1209 PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1210 PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1211 PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1212 PRINT_RUNTIME_WIDTH, graph_dotted_line,
1213 PRINT_RUNTIME_WIDTH, graph_dotted_line);
1216 static void print_summary(struct perf_kwork *kwork)
1218 u64 time = kwork->timeend - kwork->timestart;
1220 printf(" Total count : %9" PRIu64 "\n", kwork->all_count);
1221 printf(" Total runtime (msec) : %9.3f (%.3f%% load average)\n",
1222 (double)kwork->all_runtime / NSEC_PER_MSEC,
1223 time == 0 ? 0 : (double)kwork->all_runtime / time);
1224 printf(" Total time span (msec) : %9.3f\n",
1225 (double)time / NSEC_PER_MSEC);
1228 static unsigned long long nr_list_entry(struct list_head *head)
1230 struct list_head *pos;
1231 unsigned long long n = 0;
1233 list_for_each(pos, head)
1239 static void print_skipped_events(struct perf_kwork *kwork)
1242 const char *const kwork_event_str[] = {
1243 [KWORK_TRACE_RAISE] = "raise",
1244 [KWORK_TRACE_ENTRY] = "entry",
1245 [KWORK_TRACE_EXIT] = "exit",
1248 if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1249 (kwork->nr_events != 0)) {
1250 printf(" INFO: %.3f%% skipped events (%" PRIu64 " including ",
1251 (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1252 (double)kwork->nr_events * 100.0,
1253 kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1255 for (i = 0; i < KWORK_TRACE_MAX; i++) {
1256 printf("%" PRIu64 " %s%s",
1257 kwork->nr_skipped_events[i],
1259 (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1264 printf(" INFO: use %lld atom pages\n",
1265 nr_list_entry(&kwork->atom_page_list));
1268 static void print_bad_events(struct perf_kwork *kwork)
1270 if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1271 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1272 (double)kwork->nr_lost_events /
1273 (double)kwork->nr_events * 100.0,
1274 kwork->nr_lost_events, kwork->nr_events,
1275 kwork->nr_lost_chunks);
1279 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1281 struct rb_node *node;
1282 struct kwork_work *data;
1283 struct rb_root_cached *root = &class->work_root;
1285 pr_debug("Sorting %s ...\n", class->name);
1287 node = rb_first_cached(root);
1291 rb_erase_cached(node, root);
1292 data = rb_entry(node, struct kwork_work, node);
1293 work_insert(&kwork->sorted_work_root,
1294 data, &kwork->sort_list);
1298 static void perf_kwork__sort(struct perf_kwork *kwork)
1300 struct kwork_class *class;
1302 list_for_each_entry(class, &kwork->class_list, list)
1303 work_sort(kwork, class);
1306 static int perf_kwork__check_config(struct perf_kwork *kwork,
1307 struct perf_session *session)
1310 struct evsel *evsel;
1311 struct kwork_class *class;
1313 static struct trace_kwork_handler report_ops = {
1314 .entry_event = report_entry_event,
1315 .exit_event = report_exit_event,
1317 static struct trace_kwork_handler latency_ops = {
1318 .raise_event = latency_raise_event,
1319 .entry_event = latency_entry_event,
1321 static struct trace_kwork_handler timehist_ops = {
1322 .raise_event = timehist_raise_event,
1323 .entry_event = timehist_entry_event,
1324 .exit_event = timehist_exit_event,
1327 switch (kwork->report) {
1328 case KWORK_REPORT_RUNTIME:
1329 kwork->tp_handler = &report_ops;
1331 case KWORK_REPORT_LATENCY:
1332 kwork->tp_handler = &latency_ops;
1334 case KWORK_REPORT_TIMEHIST:
1335 kwork->tp_handler = &timehist_ops;
1338 pr_debug("Invalid report type %d\n", kwork->report);
1342 list_for_each_entry(class, &kwork->class_list, list)
1343 if ((class->class_init != NULL) &&
1344 (class->class_init(class, session) != 0))
1347 if (kwork->cpu_list != NULL) {
1348 ret = perf_session__cpu_bitmap(session,
1352 pr_err("Invalid cpu bitmap\n");
1357 if (kwork->time_str != NULL) {
1358 ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1360 pr_err("Invalid time span\n");
1365 list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1366 if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1367 pr_debug("Samples do not have callchains\n");
1368 kwork->show_callchain = 0;
1369 symbol_conf.use_callchain = 0;
1376 static int perf_kwork__read_events(struct perf_kwork *kwork)
1379 struct perf_session *session = NULL;
1381 struct perf_data data = {
1383 .mode = PERF_DATA_MODE_READ,
1384 .force = kwork->force,
1387 session = perf_session__new(&data, &kwork->tool);
1388 if (IS_ERR(session)) {
1389 pr_debug("Error creating perf session\n");
1390 return PTR_ERR(session);
1393 symbol__init(&session->header.env);
1395 if (perf_kwork__check_config(kwork, session) != 0)
1398 if (session->tevent.pevent &&
1399 tep_set_function_resolver(session->tevent.pevent,
1400 machine__resolve_kernel_addr,
1401 &session->machines.host) < 0) {
1402 pr_err("Failed to set libtraceevent function resolver\n");
1406 if (kwork->report == KWORK_REPORT_TIMEHIST)
1407 timehist_print_header();
1409 ret = perf_session__process_events(session);
1411 pr_debug("Failed to process events, error %d\n", ret);
1415 kwork->nr_events = session->evlist->stats.nr_events[0];
1416 kwork->nr_lost_events = session->evlist->stats.total_lost;
1417 kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1420 perf_session__delete(session);
1424 static void process_skipped_events(struct perf_kwork *kwork,
1425 struct kwork_work *work)
1428 unsigned long long count;
1430 for (i = 0; i < KWORK_TRACE_MAX; i++) {
1431 count = nr_list_entry(&work->atom_list[i]);
1432 kwork->nr_skipped_events[i] += count;
1433 kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1437 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1438 struct kwork_class *class,
1439 struct kwork_work *key)
1441 struct kwork_work *work = NULL;
1443 work = work_new(key);
1447 work_insert(&class->work_root, work, &kwork->cmp_id);
1451 static void sig_handler(int sig)
1454 * Simply capture termination signal so that
1455 * the program can continue after pause returns
1457 pr_debug("Captuer signal %d\n", sig);
1460 static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1464 signal(SIGINT, sig_handler);
1465 signal(SIGTERM, sig_handler);
1467 ret = perf_kwork__trace_prepare_bpf(kwork);
1471 printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1473 perf_kwork__trace_start();
1476 * a simple pause, wait here for stop signal
1480 perf_kwork__trace_finish();
1482 perf_kwork__report_read_bpf(kwork);
1484 perf_kwork__report_cleanup_bpf();
1489 static int perf_kwork__report(struct perf_kwork *kwork)
1492 struct rb_node *next;
1493 struct kwork_work *work;
1496 ret = perf_kwork__report_bpf(kwork);
1498 ret = perf_kwork__read_events(kwork);
1503 perf_kwork__sort(kwork);
1507 ret = report_print_header(kwork);
1508 next = rb_first_cached(&kwork->sorted_work_root);
1510 work = rb_entry(next, struct kwork_work, node);
1511 process_skipped_events(kwork, work);
1513 if (work->nr_atoms != 0) {
1514 report_print_work(kwork, work);
1515 if (kwork->summary) {
1516 kwork->all_runtime += work->total_runtime;
1517 kwork->all_count += work->nr_atoms;
1520 next = rb_next(next);
1522 print_separator(ret);
1524 if (kwork->summary) {
1525 print_summary(kwork);
1526 print_separator(ret);
1529 print_bad_events(kwork);
1530 print_skipped_events(kwork);
1536 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1537 struct evsel *evsel,
1538 struct perf_sample *sample,
1539 struct machine *machine);
1541 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1542 union perf_event *event __maybe_unused,
1543 struct perf_sample *sample,
1544 struct evsel *evsel,
1545 struct machine *machine)
1549 if (evsel->handler != NULL) {
1550 tracepoint_handler f = evsel->handler;
1552 err = f(tool, evsel, sample, machine);
1558 static int perf_kwork__timehist(struct perf_kwork *kwork)
1561 * event handlers for timehist option
1563 kwork->tool.comm = perf_event__process_comm;
1564 kwork->tool.exit = perf_event__process_exit;
1565 kwork->tool.fork = perf_event__process_fork;
1566 kwork->tool.attr = perf_event__process_attr;
1567 kwork->tool.tracing_data = perf_event__process_tracing_data;
1568 kwork->tool.build_id = perf_event__process_build_id;
1569 kwork->tool.ordered_events = true;
1570 kwork->tool.ordering_requires_timestamps = true;
1571 symbol_conf.use_callchain = kwork->show_callchain;
1573 if (symbol__validate_sym_arguments()) {
1574 pr_err("Failed to validate sym arguments\n");
1580 return perf_kwork__read_events(kwork);
1583 static void setup_event_list(struct perf_kwork *kwork,
1584 const struct option *options,
1585 const char * const usage_msg[])
1588 struct kwork_class *class;
1589 char *tmp, *tok, *str;
1591 if (kwork->event_list_str == NULL)
1592 goto null_event_list_str;
1594 str = strdup(kwork->event_list_str);
1595 for (tok = strtok_r(str, ", ", &tmp);
1596 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1597 for (i = 0; i < KWORK_CLASS_MAX; i++) {
1598 class = kwork_class_supported_list[i];
1599 if (strcmp(tok, class->name) == 0) {
1600 list_add_tail(&class->list, &kwork->class_list);
1604 if (i == KWORK_CLASS_MAX) {
1605 usage_with_options_msg(usage_msg, options,
1606 "Unknown --event key: `%s'", tok);
1611 null_event_list_str:
1613 * config all kwork events if not specified
1615 if (list_empty(&kwork->class_list)) {
1616 for (i = 0; i < KWORK_CLASS_MAX; i++) {
1617 list_add_tail(&kwork_class_supported_list[i]->list,
1618 &kwork->class_list);
1622 pr_debug("Config event list:");
1623 list_for_each_entry(class, &kwork->class_list, list)
1624 pr_debug(" %s", class->name);
1628 static int perf_kwork__record(struct perf_kwork *kwork,
1629 int argc, const char **argv)
1631 const char **rec_argv;
1632 unsigned int rec_argc, i, j;
1633 struct kwork_class *class;
1635 const char *const record_args[] = {
1643 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1645 list_for_each_entry(class, &kwork->class_list, list)
1646 rec_argc += 2 * class->nr_tracepoints;
1648 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1649 if (rec_argv == NULL)
1652 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1653 rec_argv[i] = strdup(record_args[i]);
1655 list_for_each_entry(class, &kwork->class_list, list) {
1656 for (j = 0; j < class->nr_tracepoints; j++) {
1657 rec_argv[i++] = strdup("-e");
1658 rec_argv[i++] = strdup(class->tp_handlers[j].name);
1662 for (j = 1; j < (unsigned int)argc; j++, i++)
1663 rec_argv[i] = argv[j];
1665 BUG_ON(i != rec_argc);
1667 pr_debug("record comm: ");
1668 for (j = 0; j < rec_argc; j++)
1669 pr_debug("%s ", rec_argv[j]);
1672 return cmd_record(i, rec_argv);
1675 int cmd_kwork(int argc, const char **argv)
1677 static struct perf_kwork kwork = {
1678 .class_list = LIST_HEAD_INIT(kwork.class_list),
1680 .mmap = perf_event__process_mmap,
1681 .mmap2 = perf_event__process_mmap2,
1682 .sample = perf_kwork__process_tracepoint_sample,
1684 .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
1685 .sort_list = LIST_HEAD_INIT(kwork.sort_list),
1686 .cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
1687 .sorted_work_root = RB_ROOT_CACHED,
1689 .profile_name = NULL,
1693 .event_list_str = NULL,
1696 .show_callchain = false,
1701 .nr_lost_chunks = 0,
1702 .nr_lost_events = 0,
1705 .nr_skipped_events = { 0 },
1707 static const char default_report_sort_order[] = "runtime, max, count";
1708 static const char default_latency_sort_order[] = "avg, max, count";
1709 const struct option kwork_options[] = {
1710 OPT_INCR('v', "verbose", &verbose,
1711 "be more verbose (show symbol address, etc)"),
1712 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1713 "dump raw trace in ASCII"),
1714 OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1715 "list of kwork to profile (irq, softirq, workqueue, etc)"),
1716 OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1719 const struct option report_options[] = {
1720 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1721 "sort by key(s): runtime, max, count"),
1722 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1723 "list of cpus to profile"),
1724 OPT_STRING('n', "name", &kwork.profile_name, "name",
1725 "event name to profile"),
1726 OPT_STRING(0, "time", &kwork.time_str, "str",
1727 "Time span for analysis (start,stop)"),
1728 OPT_STRING('i', "input", &input_name, "file",
1730 OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1731 "Show summary with statistics"),
1732 #ifdef HAVE_BPF_SKEL
1733 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1734 "Use BPF to measure kwork runtime"),
1736 OPT_PARENT(kwork_options)
1738 const struct option latency_options[] = {
1739 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1740 "sort by key(s): avg, max, count"),
1741 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1742 "list of cpus to profile"),
1743 OPT_STRING('n', "name", &kwork.profile_name, "name",
1744 "event name to profile"),
1745 OPT_STRING(0, "time", &kwork.time_str, "str",
1746 "Time span for analysis (start,stop)"),
1747 OPT_STRING('i', "input", &input_name, "file",
1749 #ifdef HAVE_BPF_SKEL
1750 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1751 "Use BPF to measure kwork latency"),
1753 OPT_PARENT(kwork_options)
1755 const struct option timehist_options[] = {
1756 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1757 "file", "vmlinux pathname"),
1758 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1759 "file", "kallsyms pathname"),
1760 OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1761 "Display call chains if present"),
1762 OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1763 "Maximum number of functions to display backtrace."),
1764 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1765 "Look for files with symbols relative to this directory"),
1766 OPT_STRING(0, "time", &kwork.time_str, "str",
1767 "Time span for analysis (start,stop)"),
1768 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1769 "list of cpus to profile"),
1770 OPT_STRING('n', "name", &kwork.profile_name, "name",
1771 "event name to profile"),
1772 OPT_STRING('i', "input", &input_name, "file",
1774 OPT_PARENT(kwork_options)
1776 const char *kwork_usage[] = {
1780 const char * const report_usage[] = {
1781 "perf kwork report [<options>]",
1784 const char * const latency_usage[] = {
1785 "perf kwork latency [<options>]",
1788 const char * const timehist_usage[] = {
1789 "perf kwork timehist [<options>]",
1792 const char *const kwork_subcommands[] = {
1793 "record", "report", "latency", "timehist", NULL
1796 argc = parse_options_subcommand(argc, argv, kwork_options,
1797 kwork_subcommands, kwork_usage,
1798 PARSE_OPT_STOP_AT_NON_OPTION);
1800 usage_with_options(kwork_usage, kwork_options);
1802 setup_event_list(&kwork, kwork_options, kwork_usage);
1803 sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1805 if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1806 return perf_kwork__record(&kwork, argc, argv);
1807 else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1808 kwork.sort_order = default_report_sort_order;
1810 argc = parse_options(argc, argv, report_options, report_usage, 0);
1812 usage_with_options(report_usage, report_options);
1814 kwork.report = KWORK_REPORT_RUNTIME;
1815 setup_sorting(&kwork, report_options, report_usage);
1816 return perf_kwork__report(&kwork);
1817 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1818 kwork.sort_order = default_latency_sort_order;
1820 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1822 usage_with_options(latency_usage, latency_options);
1824 kwork.report = KWORK_REPORT_LATENCY;
1825 setup_sorting(&kwork, latency_options, latency_usage);
1826 return perf_kwork__report(&kwork);
1827 } else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
1829 argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
1831 usage_with_options(timehist_usage, timehist_options);
1833 kwork.report = KWORK_REPORT_TIMEHIST;
1834 return perf_kwork__timehist(&kwork);
1836 usage_with_options(kwork_usage, kwork_options);