1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
11 #include "thread-stack.h"
13 #include "namespaces.h"
18 #include "callchain.h"
20 #include <api/fs/fs.h>
22 int thread__init_maps(struct thread *thread, struct machine *machine)
24 pid_t pid = thread__pid(thread);
26 if (pid == thread__tid(thread) || pid == -1) {
27 thread__set_maps(thread, maps__new(machine));
29 struct thread *leader = machine__findnew_thread(machine, pid, pid);
32 thread__set_maps(thread, maps__get(thread__maps(leader)));
37 return thread__maps(thread) ? 0 : -1;
40 struct thread *thread__new(pid_t pid, pid_t tid)
42 RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
43 struct thread *thread;
45 if (ADD_RC_CHK(thread, _thread) != NULL) {
49 thread__set_pid(thread, pid);
50 thread__set_tid(thread, tid);
51 thread__set_ppid(thread, -1);
52 thread__set_cpu(thread, -1);
53 thread__set_guest_cpu(thread, -1);
54 thread__set_lbr_stitch_enable(thread, false);
55 INIT_LIST_HEAD(thread__namespaces_list(thread));
56 INIT_LIST_HEAD(thread__comm_list(thread));
57 init_rwsem(thread__namespaces_lock(thread));
58 init_rwsem(thread__comm_lock(thread));
60 snprintf(comm_str, sizeof(comm_str), ":%d", tid);
61 comm = comm__new(comm_str, 0, false);
65 list_add(&comm->list, thread__comm_list(thread));
66 refcount_set(thread__refcnt(thread), 1);
67 /* Thread holds first ref to nsdata. */
68 RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
69 srccode_state_init(thread__srccode_state(thread));
75 thread__delete(thread);
79 static void (*thread__priv_destructor)(void *priv);
81 void thread__set_priv_destructor(void (*destructor)(void *priv))
83 assert(thread__priv_destructor == NULL);
85 thread__priv_destructor = destructor;
88 void thread__delete(struct thread *thread)
90 struct namespaces *namespaces, *tmp_namespaces;
91 struct comm *comm, *tmp_comm;
93 thread_stack__free(thread);
95 if (thread__maps(thread)) {
96 maps__put(thread__maps(thread));
97 thread__set_maps(thread, NULL);
99 down_write(thread__namespaces_lock(thread));
100 list_for_each_entry_safe(namespaces, tmp_namespaces,
101 thread__namespaces_list(thread), list) {
102 list_del_init(&namespaces->list);
103 namespaces__free(namespaces);
105 up_write(thread__namespaces_lock(thread));
107 down_write(thread__comm_lock(thread));
108 list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
109 list_del_init(&comm->list);
112 up_write(thread__comm_lock(thread));
114 nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
115 srccode_state_free(thread__srccode_state(thread));
117 exit_rwsem(thread__namespaces_lock(thread));
118 exit_rwsem(thread__comm_lock(thread));
119 thread__free_stitch_list(thread);
121 if (thread__priv_destructor)
122 thread__priv_destructor(thread__priv(thread));
127 struct thread *thread__get(struct thread *thread)
129 struct thread *result;
131 if (RC_CHK_GET(result, thread))
132 refcount_inc(thread__refcnt(thread));
137 void thread__put(struct thread *thread)
139 if (thread && refcount_dec_and_test(thread__refcnt(thread)))
140 thread__delete(thread);
145 static struct namespaces *__thread__namespaces(struct thread *thread)
147 if (list_empty(thread__namespaces_list(thread)))
150 return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
153 struct namespaces *thread__namespaces(struct thread *thread)
155 struct namespaces *ns;
157 down_read(thread__namespaces_lock(thread));
158 ns = __thread__namespaces(thread);
159 up_read(thread__namespaces_lock(thread));
164 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
165 struct perf_record_namespaces *event)
167 struct namespaces *new, *curr = __thread__namespaces(thread);
169 new = namespaces__new(event);
173 list_add(&new->list, thread__namespaces_list(thread));
175 if (timestamp && curr) {
177 * setns syscall must have changed few or all the namespaces
178 * of this thread. Update end time for the namespaces
181 curr = list_next_entry(new, list);
182 curr->end_time = timestamp;
188 int thread__set_namespaces(struct thread *thread, u64 timestamp,
189 struct perf_record_namespaces *event)
193 down_write(thread__namespaces_lock(thread));
194 ret = __thread__set_namespaces(thread, timestamp, event);
195 up_write(thread__namespaces_lock(thread));
199 struct comm *thread__comm(struct thread *thread)
201 if (list_empty(thread__comm_list(thread)))
204 return list_first_entry(thread__comm_list(thread), struct comm, list);
207 struct comm *thread__exec_comm(struct thread *thread)
209 struct comm *comm, *last = NULL, *second_last = NULL;
211 list_for_each_entry(comm, thread__comm_list(thread), list) {
219 * 'last' with no start time might be the parent's comm of a synthesized
220 * thread (created by processing a synthesized fork event). For a main
221 * thread, that is very probably wrong. Prefer a later comm to avoid
224 if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
230 static int ____thread__set_comm(struct thread *thread, const char *str,
231 u64 timestamp, bool exec)
233 struct comm *new, *curr = thread__comm(thread);
235 /* Override the default :tid entry */
236 if (!thread__comm_set(thread)) {
237 int err = comm__override(curr, str, timestamp, exec);
241 new = comm__new(str, timestamp, exec);
244 list_add(&new->list, thread__comm_list(thread));
247 unwind__flush_access(thread__maps(thread));
250 thread__set_comm_set(thread, true);
255 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
260 down_write(thread__comm_lock(thread));
261 ret = ____thread__set_comm(thread, str, timestamp, exec);
262 up_write(thread__comm_lock(thread));
266 int thread__set_comm_from_proc(struct thread *thread)
273 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
274 thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
275 procfs__read_str(path, &comm, &sz) == 0) {
277 err = thread__set_comm(thread, comm, 0);
283 static const char *__thread__comm_str(struct thread *thread)
285 const struct comm *comm = thread__comm(thread);
290 return comm__str(comm);
293 const char *thread__comm_str(struct thread *thread)
297 down_read(thread__comm_lock(thread));
298 str = __thread__comm_str(thread);
299 up_read(thread__comm_lock(thread));
304 static int __thread__comm_len(struct thread *thread, const char *comm)
308 thread__set_comm_len(thread, strlen(comm));
310 return thread__var_comm_len(thread);
313 /* CHECKME: it should probably better return the max comm len from its comm list */
314 int thread__comm_len(struct thread *thread)
316 int comm_len = thread__var_comm_len(thread);
321 down_read(thread__comm_lock(thread));
322 comm = __thread__comm_str(thread);
323 comm_len = __thread__comm_len(thread, comm);
324 up_read(thread__comm_lock(thread));
330 size_t thread__fprintf(struct thread *thread, FILE *fp)
332 return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
333 maps__fprintf(thread__maps(thread), fp);
336 int thread__insert_map(struct thread *thread, struct map *map)
340 ret = unwind__prepare_access(thread__maps(thread), map, NULL);
344 return maps__fixup_overlap_and_insert(thread__maps(thread), map);
347 struct thread__prepare_access_maps_cb_args {
352 static int thread__prepare_access_maps_cb(struct map *map, void *data)
354 bool initialized = false;
355 struct thread__prepare_access_maps_cb_args *args = data;
357 args->err = unwind__prepare_access(args->maps, map, &initialized);
359 return (args->err || initialized) ? 1 : 0;
362 static int thread__prepare_access(struct thread *thread)
364 struct thread__prepare_access_maps_cb_args args = {
368 if (dwarf_callchain_users) {
369 args.maps = thread__maps(thread);
370 maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args);
376 static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
378 /* This is new thread, we share map groups for process. */
379 if (thread__pid(thread) == thread__pid(parent))
380 return thread__prepare_access(thread);
382 if (maps__equal(thread__maps(thread), thread__maps(parent))) {
383 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
384 thread__pid(thread), thread__tid(thread),
385 thread__pid(parent), thread__tid(parent));
388 /* But this one is new process, copy maps. */
389 return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0;
392 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
394 if (thread__comm_set(parent)) {
395 const char *comm = thread__comm_str(parent);
399 err = thread__set_comm(thread, comm, timestamp);
404 thread__set_ppid(thread, thread__tid(parent));
405 return thread__clone_maps(thread, parent, do_maps_clone);
408 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
409 struct addr_location *al)
412 const u8 cpumodes[] = {
413 PERF_RECORD_MISC_USER,
414 PERF_RECORD_MISC_KERNEL,
415 PERF_RECORD_MISC_GUEST_USER,
416 PERF_RECORD_MISC_GUEST_KERNEL
419 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
420 thread__find_symbol(thread, cpumodes[i], addr, al);
426 struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
428 if (thread__pid(thread) == thread__tid(thread))
429 return thread__get(thread);
431 if (thread__pid(thread) == -1)
434 return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
437 int thread__memcpy(struct thread *thread, struct machine *machine,
438 void *buf, u64 ip, int len, bool *is64bit)
440 u8 cpumode = PERF_RECORD_MISC_USER;
441 struct addr_location al;
445 if (machine__kernel_ip(machine, ip))
446 cpumode = PERF_RECORD_MISC_KERNEL;
448 addr_location__init(&al);
449 if (!thread__find_map(thread, cpumode, ip, &al)) {
450 addr_location__exit(&al);
454 dso = map__dso(al.map);
456 if (!dso || dso__data(dso)->status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
457 addr_location__exit(&al);
461 offset = map__map_ip(al.map, ip);
463 *is64bit = dso__is_64_bit(dso);
465 addr_location__exit(&al);
467 return dso__data_read_offset(dso, machine, offset, buf, len);
470 void thread__free_stitch_list(struct thread *thread)
472 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
473 struct stitch_list *pos, *tmp;
478 list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
479 map_symbol__exit(&pos->cursor.ms);
480 list_del_init(&pos->node);
484 list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
485 list_del_init(&pos->node);
489 for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
490 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
492 zfree(&lbr_stitch->prev_lbr_cursor);
493 free(thread__lbr_stitch(thread));
494 thread__set_lbr_stitch(thread, NULL);