]> Git Repo - linux.git/blob - tools/perf/tests/code-reading.c
x86/alternative: Make custom return thunk unconditional
[linux.git] / tools / perf / tests / code-reading.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/kernel.h>
4 #include <linux/types.h>
5 #include <inttypes.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <sys/param.h>
11 #include <perf/cpumap.h>
12 #include <perf/evlist.h>
13 #include <perf/mmap.h>
14
15 #include "debug.h"
16 #include "dso.h"
17 #include "env.h"
18 #include "parse-events.h"
19 #include "evlist.h"
20 #include "evsel.h"
21 #include "thread_map.h"
22 #include "machine.h"
23 #include "map.h"
24 #include "symbol.h"
25 #include "event.h"
26 #include "record.h"
27 #include "util/mmap.h"
28 #include "util/string2.h"
29 #include "util/synthetic-events.h"
30 #include "util/util.h"
31 #include "thread.h"
32
33 #include "tests.h"
34
35 #include <linux/ctype.h>
36
37 #define BUFSZ   1024
38 #define READLEN 128
39
40 struct state {
41         u64 done[1024];
42         size_t done_cnt;
43 };
44
45 static size_t read_objdump_chunk(const char **line, unsigned char **buf,
46                                  size_t *buf_len)
47 {
48         size_t bytes_read = 0;
49         unsigned char *chunk_start = *buf;
50
51         /* Read bytes */
52         while (*buf_len > 0) {
53                 char c1, c2;
54
55                 /* Get 2 hex digits */
56                 c1 = *(*line)++;
57                 if (!isxdigit(c1))
58                         break;
59                 c2 = *(*line)++;
60                 if (!isxdigit(c2))
61                         break;
62
63                 /* Store byte and advance buf */
64                 **buf = (hex(c1) << 4) | hex(c2);
65                 (*buf)++;
66                 (*buf_len)--;
67                 bytes_read++;
68
69                 /* End of chunk? */
70                 if (isspace(**line))
71                         break;
72         }
73
74         /*
75          * objdump will display raw insn as LE if code endian
76          * is LE and bytes_per_chunk > 1. In that case reverse
77          * the chunk we just read.
78          *
79          * see disassemble_bytes() at binutils/objdump.c for details
80          * how objdump chooses display endian)
81          */
82         if (bytes_read > 1 && !host_is_bigendian()) {
83                 unsigned char *chunk_end = chunk_start + bytes_read - 1;
84                 unsigned char tmp;
85
86                 while (chunk_start < chunk_end) {
87                         tmp = *chunk_start;
88                         *chunk_start = *chunk_end;
89                         *chunk_end = tmp;
90                         chunk_start++;
91                         chunk_end--;
92                 }
93         }
94
95         return bytes_read;
96 }
97
98 static size_t read_objdump_line(const char *line, unsigned char *buf,
99                                 size_t buf_len)
100 {
101         const char *p;
102         size_t ret, bytes_read = 0;
103
104         /* Skip to a colon */
105         p = strchr(line, ':');
106         if (!p)
107                 return 0;
108         p++;
109
110         /* Skip initial spaces */
111         while (*p) {
112                 if (!isspace(*p))
113                         break;
114                 p++;
115         }
116
117         do {
118                 ret = read_objdump_chunk(&p, &buf, &buf_len);
119                 bytes_read += ret;
120                 p++;
121         } while (ret > 0);
122
123         /* return number of successfully read bytes */
124         return bytes_read;
125 }
126
127 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
128 {
129         char *line = NULL;
130         size_t line_len, off_last = 0;
131         ssize_t ret;
132         int err = 0;
133         u64 addr, last_addr = start_addr;
134
135         while (off_last < *len) {
136                 size_t off, read_bytes, written_bytes;
137                 unsigned char tmp[BUFSZ];
138
139                 ret = getline(&line, &line_len, f);
140                 if (feof(f))
141                         break;
142                 if (ret < 0) {
143                         pr_debug("getline failed\n");
144                         err = -1;
145                         break;
146                 }
147
148                 /* read objdump data into temporary buffer */
149                 read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
150                 if (!read_bytes)
151                         continue;
152
153                 if (sscanf(line, "%"PRIx64, &addr) != 1)
154                         continue;
155                 if (addr < last_addr) {
156                         pr_debug("addr going backwards, read beyond section?\n");
157                         break;
158                 }
159                 last_addr = addr;
160
161                 /* copy it from temporary buffer to 'buf' according
162                  * to address on current objdump line */
163                 off = addr - start_addr;
164                 if (off >= *len)
165                         break;
166                 written_bytes = MIN(read_bytes, *len - off);
167                 memcpy(buf + off, tmp, written_bytes);
168                 off_last = off + written_bytes;
169         }
170
171         /* len returns number of bytes that could not be read */
172         *len -= off_last;
173
174         free(line);
175
176         return err;
177 }
178
179 static int read_via_objdump(const char *filename, u64 addr, void *buf,
180                             size_t len)
181 {
182         char cmd[PATH_MAX * 2];
183         const char *fmt;
184         FILE *f;
185         int ret;
186
187         fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
188         ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
189                        filename);
190         if (ret <= 0 || (size_t)ret >= sizeof(cmd))
191                 return -1;
192
193         pr_debug("Objdump command is: %s\n", cmd);
194
195         /* Ignore objdump errors */
196         strcat(cmd, " 2>/dev/null");
197
198         f = popen(cmd, "r");
199         if (!f) {
200                 pr_debug("popen failed\n");
201                 return -1;
202         }
203
204         ret = read_objdump_output(f, buf, &len, addr);
205         if (len) {
206                 pr_debug("objdump read too few bytes: %zd\n", len);
207                 if (!ret)
208                         ret = len;
209         }
210
211         pclose(f);
212
213         return ret;
214 }
215
216 static void dump_buf(unsigned char *buf, size_t len)
217 {
218         size_t i;
219
220         for (i = 0; i < len; i++) {
221                 pr_debug("0x%02x ", buf[i]);
222                 if (i % 16 == 15)
223                         pr_debug("\n");
224         }
225         pr_debug("\n");
226 }
227
228 static int read_object_code(u64 addr, size_t len, u8 cpumode,
229                             struct thread *thread, struct state *state)
230 {
231         struct addr_location al;
232         unsigned char buf1[BUFSZ] = {0};
233         unsigned char buf2[BUFSZ] = {0};
234         size_t ret_len;
235         u64 objdump_addr;
236         const char *objdump_name;
237         char decomp_name[KMOD_DECOMP_LEN];
238         bool decomp = false;
239         int ret, err = 0;
240         struct dso *dso;
241
242         pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
243
244         addr_location__init(&al);
245         if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
246                 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
247                         pr_debug("Hypervisor address can not be resolved - skipping\n");
248                         goto out;
249                 }
250
251                 pr_debug("thread__find_map failed\n");
252                 err = -1;
253                 goto out;
254         }
255         dso = map__dso(al.map);
256         pr_debug("File is: %s\n", dso->long_name);
257
258         if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
259                 pr_debug("Unexpected kernel address - skipping\n");
260                 goto out;
261         }
262
263         pr_debug("On file address is: %#"PRIx64"\n", al.addr);
264
265         if (len > BUFSZ)
266                 len = BUFSZ;
267
268         /* Do not go off the map */
269         if (addr + len > map__end(al.map))
270                 len = map__end(al.map) - addr;
271
272         /* Read the object code using perf */
273         ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
274                                         al.addr, buf1, len);
275         if (ret_len != len) {
276                 pr_debug("dso__data_read_offset failed\n");
277                 err = -1;
278                 goto out;
279         }
280
281         /*
282          * Converting addresses for use by objdump requires more information.
283          * map__load() does that.  See map__rip_2objdump() for details.
284          */
285         if (map__load(al.map)) {
286                 err = -1;
287                 goto out;
288         }
289
290         /* objdump struggles with kcore - try each map only once */
291         if (dso__is_kcore(dso)) {
292                 size_t d;
293
294                 for (d = 0; d < state->done_cnt; d++) {
295                         if (state->done[d] == map__start(al.map)) {
296                                 pr_debug("kcore map tested already");
297                                 pr_debug(" - skipping\n");
298                                 goto out;
299                         }
300                 }
301                 if (state->done_cnt >= ARRAY_SIZE(state->done)) {
302                         pr_debug("Too many kcore maps - skipping\n");
303                         goto out;
304                 }
305                 state->done[state->done_cnt++] = map__start(al.map);
306         }
307
308         objdump_name = dso->long_name;
309         if (dso__needs_decompress(dso)) {
310                 if (dso__decompress_kmodule_path(dso, objdump_name,
311                                                  decomp_name,
312                                                  sizeof(decomp_name)) < 0) {
313                         pr_debug("decompression failed\n");
314                         err = -1;
315                         goto out;
316                 }
317
318                 decomp = true;
319                 objdump_name = decomp_name;
320         }
321
322         /* Read the object code using objdump */
323         objdump_addr = map__rip_2objdump(al.map, al.addr);
324         ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
325
326         if (decomp)
327                 unlink(objdump_name);
328
329         if (ret > 0) {
330                 /*
331                  * The kernel maps are inaccurate - assume objdump is right in
332                  * that case.
333                  */
334                 if (cpumode == PERF_RECORD_MISC_KERNEL ||
335                     cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
336                         len -= ret;
337                         if (len) {
338                                 pr_debug("Reducing len to %zu\n", len);
339                         } else if (dso__is_kcore(dso)) {
340                                 /*
341                                  * objdump cannot handle very large segments
342                                  * that may be found in kcore.
343                                  */
344                                 pr_debug("objdump failed for kcore");
345                                 pr_debug(" - skipping\n");
346                         } else {
347                                 err = -1;
348                         }
349                         goto out;
350                 }
351         }
352         if (ret < 0) {
353                 pr_debug("read_via_objdump failed\n");
354                 err = -1;
355                 goto out;
356         }
357
358         /* The results should be identical */
359         if (memcmp(buf1, buf2, len)) {
360                 pr_debug("Bytes read differ from those read by objdump\n");
361                 pr_debug("buf1 (dso):\n");
362                 dump_buf(buf1, len);
363                 pr_debug("buf2 (objdump):\n");
364                 dump_buf(buf2, len);
365                 err = -1;
366                 goto out;
367         }
368         pr_debug("Bytes read match those read by objdump\n");
369 out:
370         addr_location__exit(&al);
371         return err;
372 }
373
374 static int process_sample_event(struct machine *machine,
375                                 struct evlist *evlist,
376                                 union perf_event *event, struct state *state)
377 {
378         struct perf_sample sample;
379         struct thread *thread;
380         int ret;
381
382         if (evlist__parse_sample(evlist, event, &sample)) {
383                 pr_debug("evlist__parse_sample failed\n");
384                 return -1;
385         }
386
387         thread = machine__findnew_thread(machine, sample.pid, sample.tid);
388         if (!thread) {
389                 pr_debug("machine__findnew_thread failed\n");
390                 return -1;
391         }
392
393         ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
394         thread__put(thread);
395         return ret;
396 }
397
398 static int process_event(struct machine *machine, struct evlist *evlist,
399                          union perf_event *event, struct state *state)
400 {
401         if (event->header.type == PERF_RECORD_SAMPLE)
402                 return process_sample_event(machine, evlist, event, state);
403
404         if (event->header.type == PERF_RECORD_THROTTLE ||
405             event->header.type == PERF_RECORD_UNTHROTTLE)
406                 return 0;
407
408         if (event->header.type < PERF_RECORD_MAX) {
409                 int ret;
410
411                 ret = machine__process_event(machine, event, NULL);
412                 if (ret < 0)
413                         pr_debug("machine__process_event failed, event type %u\n",
414                                  event->header.type);
415                 return ret;
416         }
417
418         return 0;
419 }
420
421 static int process_events(struct machine *machine, struct evlist *evlist,
422                           struct state *state)
423 {
424         union perf_event *event;
425         struct mmap *md;
426         int i, ret;
427
428         for (i = 0; i < evlist->core.nr_mmaps; i++) {
429                 md = &evlist->mmap[i];
430                 if (perf_mmap__read_init(&md->core) < 0)
431                         continue;
432
433                 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
434                         ret = process_event(machine, evlist, event, state);
435                         perf_mmap__consume(&md->core);
436                         if (ret < 0)
437                                 return ret;
438                 }
439                 perf_mmap__read_done(&md->core);
440         }
441         return 0;
442 }
443
444 static int comp(const void *a, const void *b)
445 {
446         return *(int *)a - *(int *)b;
447 }
448
449 static void do_sort_something(void)
450 {
451         int buf[40960], i;
452
453         for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
454                 buf[i] = ARRAY_SIZE(buf) - i - 1;
455
456         qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
457
458         for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
459                 if (buf[i] != i) {
460                         pr_debug("qsort failed\n");
461                         break;
462                 }
463         }
464 }
465
466 static void sort_something(void)
467 {
468         int i;
469
470         for (i = 0; i < 10; i++)
471                 do_sort_something();
472 }
473
474 static void syscall_something(void)
475 {
476         int pipefd[2];
477         int i;
478
479         for (i = 0; i < 1000; i++) {
480                 if (pipe(pipefd) < 0) {
481                         pr_debug("pipe failed\n");
482                         break;
483                 }
484                 close(pipefd[1]);
485                 close(pipefd[0]);
486         }
487 }
488
489 static void fs_something(void)
490 {
491         const char *test_file_name = "temp-perf-code-reading-test-file--";
492         FILE *f;
493         int i;
494
495         for (i = 0; i < 1000; i++) {
496                 f = fopen(test_file_name, "w+");
497                 if (f) {
498                         fclose(f);
499                         unlink(test_file_name);
500                 }
501         }
502 }
503
504 #ifdef __s390x__
505 #include "header.h" // for get_cpuid()
506 #endif
507
508 static const char *do_determine_event(bool excl_kernel)
509 {
510         const char *event = excl_kernel ? "cycles:u" : "cycles";
511
512 #ifdef __s390x__
513         char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
514         unsigned int family;
515         int ret, cpum_cf_a;
516
517         if (get_cpuid(cpuid, sizeof(cpuid)))
518                 goto out_clocks;
519         ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
520                      model, cpum_cf_v, &cpum_cf_a);
521         if (ret != 5)            /* Not available */
522                 goto out_clocks;
523         if (excl_kernel && (cpum_cf_a & 4))
524                 return event;
525         if (!excl_kernel && (cpum_cf_a & 2))
526                 return event;
527
528         /* Fall through: missing authorization */
529 out_clocks:
530         event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
531
532 #endif
533         return event;
534 }
535
536 static void do_something(void)
537 {
538         fs_something();
539
540         sort_something();
541
542         syscall_something();
543 }
544
545 enum {
546         TEST_CODE_READING_OK,
547         TEST_CODE_READING_NO_VMLINUX,
548         TEST_CODE_READING_NO_KCORE,
549         TEST_CODE_READING_NO_ACCESS,
550         TEST_CODE_READING_NO_KERNEL_OBJ,
551 };
552
553 static int do_test_code_reading(bool try_kcore)
554 {
555         struct machine *machine;
556         struct thread *thread;
557         struct record_opts opts = {
558                 .mmap_pages          = UINT_MAX,
559                 .user_freq           = UINT_MAX,
560                 .user_interval       = ULLONG_MAX,
561                 .freq                = 500,
562                 .target              = {
563                         .uses_mmap   = true,
564                 },
565         };
566         struct state state = {
567                 .done_cnt = 0,
568         };
569         struct perf_thread_map *threads = NULL;
570         struct perf_cpu_map *cpus = NULL;
571         struct evlist *evlist = NULL;
572         struct evsel *evsel = NULL;
573         int err = -1, ret;
574         pid_t pid;
575         struct map *map;
576         bool have_vmlinux, have_kcore, excl_kernel = false;
577         struct dso *dso;
578
579         pid = getpid();
580
581         machine = machine__new_host();
582         machine->env = &perf_env;
583
584         ret = machine__create_kernel_maps(machine);
585         if (ret < 0) {
586                 pr_debug("machine__create_kernel_maps failed\n");
587                 goto out_err;
588         }
589
590         /* Force the use of kallsyms instead of vmlinux to try kcore */
591         if (try_kcore)
592                 symbol_conf.kallsyms_name = "/proc/kallsyms";
593
594         /* Load kernel map */
595         map = machine__kernel_map(machine);
596         ret = map__load(map);
597         if (ret < 0) {
598                 pr_debug("map__load failed\n");
599                 goto out_err;
600         }
601         dso = map__dso(map);
602         have_vmlinux = dso__is_vmlinux(dso);
603         have_kcore = dso__is_kcore(dso);
604
605         /* 2nd time through we just try kcore */
606         if (try_kcore && !have_kcore)
607                 return TEST_CODE_READING_NO_KCORE;
608
609         /* No point getting kernel events if there is no kernel object */
610         if (!have_vmlinux && !have_kcore)
611                 excl_kernel = true;
612
613         threads = thread_map__new_by_tid(pid);
614         if (!threads) {
615                 pr_debug("thread_map__new_by_tid failed\n");
616                 goto out_err;
617         }
618
619         ret = perf_event__synthesize_thread_map(NULL, threads,
620                                                 perf_event__process, machine,
621                                                 true, false);
622         if (ret < 0) {
623                 pr_debug("perf_event__synthesize_thread_map failed\n");
624                 goto out_err;
625         }
626
627         thread = machine__findnew_thread(machine, pid, pid);
628         if (!thread) {
629                 pr_debug("machine__findnew_thread failed\n");
630                 goto out_put;
631         }
632
633         cpus = perf_cpu_map__new(NULL);
634         if (!cpus) {
635                 pr_debug("perf_cpu_map__new failed\n");
636                 goto out_put;
637         }
638
639         while (1) {
640                 const char *str;
641
642                 evlist = evlist__new();
643                 if (!evlist) {
644                         pr_debug("evlist__new failed\n");
645                         goto out_put;
646                 }
647
648                 perf_evlist__set_maps(&evlist->core, cpus, threads);
649
650                 str = do_determine_event(excl_kernel);
651                 pr_debug("Parsing event '%s'\n", str);
652                 ret = parse_event(evlist, str);
653                 if (ret < 0) {
654                         pr_debug("parse_events failed\n");
655                         goto out_put;
656                 }
657
658                 evlist__config(evlist, &opts, NULL);
659
660                 evsel = evlist__first(evlist);
661
662                 evsel->core.attr.comm = 1;
663                 evsel->core.attr.disabled = 1;
664                 evsel->core.attr.enable_on_exec = 0;
665
666                 ret = evlist__open(evlist);
667                 if (ret < 0) {
668                         if (!excl_kernel) {
669                                 excl_kernel = true;
670                                 /*
671                                  * Both cpus and threads are now owned by evlist
672                                  * and will be freed by following perf_evlist__set_maps
673                                  * call. Getting reference to keep them alive.
674                                  */
675                                 perf_cpu_map__get(cpus);
676                                 perf_thread_map__get(threads);
677                                 perf_evlist__set_maps(&evlist->core, NULL, NULL);
678                                 evlist__delete(evlist);
679                                 evlist = NULL;
680                                 continue;
681                         }
682
683                         if (verbose > 0) {
684                                 char errbuf[512];
685                                 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
686                                 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
687                         }
688
689                         goto out_put;
690                 }
691                 break;
692         }
693
694         ret = evlist__mmap(evlist, UINT_MAX);
695         if (ret < 0) {
696                 pr_debug("evlist__mmap failed\n");
697                 goto out_put;
698         }
699
700         evlist__enable(evlist);
701
702         do_something();
703
704         evlist__disable(evlist);
705
706         ret = process_events(machine, evlist, &state);
707         if (ret < 0)
708                 goto out_put;
709
710         if (!have_vmlinux && !have_kcore && !try_kcore)
711                 err = TEST_CODE_READING_NO_KERNEL_OBJ;
712         else if (!have_vmlinux && !try_kcore)
713                 err = TEST_CODE_READING_NO_VMLINUX;
714         else if (excl_kernel)
715                 err = TEST_CODE_READING_NO_ACCESS;
716         else
717                 err = TEST_CODE_READING_OK;
718 out_put:
719         thread__put(thread);
720 out_err:
721         evlist__delete(evlist);
722         perf_cpu_map__put(cpus);
723         perf_thread_map__put(threads);
724         machine__delete(machine);
725
726         return err;
727 }
728
729 static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
730 {
731         int ret;
732
733         ret = do_test_code_reading(false);
734         if (!ret)
735                 ret = do_test_code_reading(true);
736
737         switch (ret) {
738         case TEST_CODE_READING_OK:
739                 return 0;
740         case TEST_CODE_READING_NO_VMLINUX:
741                 pr_debug("no vmlinux\n");
742                 return 0;
743         case TEST_CODE_READING_NO_KCORE:
744                 pr_debug("no kcore\n");
745                 return 0;
746         case TEST_CODE_READING_NO_ACCESS:
747                 pr_debug("no access\n");
748                 return 0;
749         case TEST_CODE_READING_NO_KERNEL_OBJ:
750                 pr_debug("no kernel obj\n");
751                 return 0;
752         default:
753                 return -1;
754         };
755 }
756
757 DEFINE_SUITE("Object code reading", code_reading);
This page took 0.079436 seconds and 4 git commands to generate.