]> Git Repo - linux.git/blob - tools/perf/util/thread.c
Merge tag 'perf-core-for-mingo-4.15-20171103' of git://git.kernel.org/pub/scm/linux...
[linux.git] / tools / perf / util / thread.c
1 #include "../perf.h"
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <linux/kernel.h>
7 #include "session.h"
8 #include "thread.h"
9 #include "thread-stack.h"
10 #include "util.h"
11 #include "debug.h"
12 #include "namespaces.h"
13 #include "comm.h"
14 #include "unwind.h"
15
16 #include <api/fs/fs.h>
17
18 int thread__init_map_groups(struct thread *thread, struct machine *machine)
19 {
20         pid_t pid = thread->pid_;
21
22         if (pid == thread->tid || pid == -1) {
23                 thread->mg = map_groups__new(machine);
24         } else {
25                 struct thread *leader = __machine__findnew_thread(machine, pid, pid);
26                 if (leader) {
27                         thread->mg = map_groups__get(leader->mg);
28                         thread__put(leader);
29                 }
30         }
31
32         return thread->mg ? 0 : -1;
33 }
34
35 struct thread *thread__new(pid_t pid, pid_t tid)
36 {
37         char *comm_str;
38         struct comm *comm;
39         struct thread *thread = zalloc(sizeof(*thread));
40
41         if (thread != NULL) {
42                 thread->pid_ = pid;
43                 thread->tid = tid;
44                 thread->ppid = -1;
45                 thread->cpu = -1;
46                 INIT_LIST_HEAD(&thread->namespaces_list);
47                 INIT_LIST_HEAD(&thread->comm_list);
48                 init_rwsem(&thread->namespaces_lock);
49                 init_rwsem(&thread->comm_lock);
50
51                 comm_str = malloc(32);
52                 if (!comm_str)
53                         goto err_thread;
54
55                 snprintf(comm_str, 32, ":%d", tid);
56                 comm = comm__new(comm_str, 0, false);
57                 free(comm_str);
58                 if (!comm)
59                         goto err_thread;
60
61                 list_add(&comm->list, &thread->comm_list);
62                 refcount_set(&thread->refcnt, 1);
63                 RB_CLEAR_NODE(&thread->rb_node);
64                 /* Thread holds first ref to nsdata. */
65                 thread->nsinfo = nsinfo__new(pid);
66         }
67
68         return thread;
69
70 err_thread:
71         free(thread);
72         return NULL;
73 }
74
75 void thread__delete(struct thread *thread)
76 {
77         struct namespaces *namespaces, *tmp_namespaces;
78         struct comm *comm, *tmp_comm;
79
80         BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
81
82         thread_stack__free(thread);
83
84         if (thread->mg) {
85                 map_groups__put(thread->mg);
86                 thread->mg = NULL;
87         }
88         down_write(&thread->namespaces_lock);
89         list_for_each_entry_safe(namespaces, tmp_namespaces,
90                                  &thread->namespaces_list, list) {
91                 list_del(&namespaces->list);
92                 namespaces__free(namespaces);
93         }
94         up_write(&thread->namespaces_lock);
95
96         down_write(&thread->comm_lock);
97         list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
98                 list_del(&comm->list);
99                 comm__free(comm);
100         }
101         up_write(&thread->comm_lock);
102
103         unwind__finish_access(thread);
104         nsinfo__zput(thread->nsinfo);
105
106         exit_rwsem(&thread->namespaces_lock);
107         exit_rwsem(&thread->comm_lock);
108         free(thread);
109 }
110
111 struct thread *thread__get(struct thread *thread)
112 {
113         if (thread)
114                 refcount_inc(&thread->refcnt);
115         return thread;
116 }
117
118 void thread__put(struct thread *thread)
119 {
120         if (thread && refcount_dec_and_test(&thread->refcnt)) {
121                 /*
122                  * Remove it from the dead_threads list, as last reference
123                  * is gone.
124                  */
125                 list_del_init(&thread->node);
126                 thread__delete(thread);
127         }
128 }
129
130 struct namespaces *thread__namespaces(const struct thread *thread)
131 {
132         if (list_empty(&thread->namespaces_list))
133                 return NULL;
134
135         return list_first_entry(&thread->namespaces_list, struct namespaces, list);
136 }
137
138 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
139                                     struct namespaces_event *event)
140 {
141         struct namespaces *new, *curr = thread__namespaces(thread);
142
143         new = namespaces__new(event);
144         if (!new)
145                 return -ENOMEM;
146
147         list_add(&new->list, &thread->namespaces_list);
148
149         if (timestamp && curr) {
150                 /*
151                  * setns syscall must have changed few or all the namespaces
152                  * of this thread. Update end time for the namespaces
153                  * previously used.
154                  */
155                 curr = list_next_entry(new, list);
156                 curr->end_time = timestamp;
157         }
158
159         return 0;
160 }
161
162 int thread__set_namespaces(struct thread *thread, u64 timestamp,
163                            struct namespaces_event *event)
164 {
165         int ret;
166
167         down_write(&thread->namespaces_lock);
168         ret = __thread__set_namespaces(thread, timestamp, event);
169         up_write(&thread->namespaces_lock);
170         return ret;
171 }
172
173 struct comm *thread__comm(const struct thread *thread)
174 {
175         if (list_empty(&thread->comm_list))
176                 return NULL;
177
178         return list_first_entry(&thread->comm_list, struct comm, list);
179 }
180
181 struct comm *thread__exec_comm(const struct thread *thread)
182 {
183         struct comm *comm, *last = NULL;
184
185         list_for_each_entry(comm, &thread->comm_list, list) {
186                 if (comm->exec)
187                         return comm;
188                 last = comm;
189         }
190
191         return last;
192 }
193
194 static int ____thread__set_comm(struct thread *thread, const char *str,
195                                 u64 timestamp, bool exec)
196 {
197         struct comm *new, *curr = thread__comm(thread);
198
199         /* Override the default :tid entry */
200         if (!thread->comm_set) {
201                 int err = comm__override(curr, str, timestamp, exec);
202                 if (err)
203                         return err;
204         } else {
205                 new = comm__new(str, timestamp, exec);
206                 if (!new)
207                         return -ENOMEM;
208                 list_add(&new->list, &thread->comm_list);
209
210                 if (exec)
211                         unwind__flush_access(thread);
212         }
213
214         thread->comm_set = true;
215
216         return 0;
217 }
218
219 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
220                        bool exec)
221 {
222         int ret;
223
224         down_write(&thread->comm_lock);
225         ret = ____thread__set_comm(thread, str, timestamp, exec);
226         up_write(&thread->comm_lock);
227         return ret;
228 }
229
230 int thread__set_comm_from_proc(struct thread *thread)
231 {
232         char path[64];
233         char *comm = NULL;
234         size_t sz;
235         int err = -1;
236
237         if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
238                        thread->pid_, thread->tid) >= (int)sizeof(path)) &&
239             procfs__read_str(path, &comm, &sz) == 0) {
240                 comm[sz - 1] = '\0';
241                 err = thread__set_comm(thread, comm, 0);
242         }
243
244         return err;
245 }
246
247 static const char *__thread__comm_str(const struct thread *thread)
248 {
249         const struct comm *comm = thread__comm(thread);
250
251         if (!comm)
252                 return NULL;
253
254         return comm__str(comm);
255 }
256
257 const char *thread__comm_str(const struct thread *thread)
258 {
259         const char *str;
260
261         down_read((struct rw_semaphore *)&thread->comm_lock);
262         str = __thread__comm_str(thread);
263         up_read((struct rw_semaphore *)&thread->comm_lock);
264
265         return str;
266 }
267
268 /* CHECKME: it should probably better return the max comm len from its comm list */
269 int thread__comm_len(struct thread *thread)
270 {
271         if (!thread->comm_len) {
272                 const char *comm = thread__comm_str(thread);
273                 if (!comm)
274                         return 0;
275                 thread->comm_len = strlen(comm);
276         }
277
278         return thread->comm_len;
279 }
280
281 size_t thread__fprintf(struct thread *thread, FILE *fp)
282 {
283         return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
284                map_groups__fprintf(thread->mg, fp);
285 }
286
287 int thread__insert_map(struct thread *thread, struct map *map)
288 {
289         int ret;
290
291         ret = unwind__prepare_access(thread, map, NULL);
292         if (ret)
293                 return ret;
294
295         map_groups__fixup_overlappings(thread->mg, map, stderr);
296         map_groups__insert(thread->mg, map);
297
298         return 0;
299 }
300
301 static int __thread__prepare_access(struct thread *thread)
302 {
303         bool initialized = false;
304         int i, err = 0;
305
306         for (i = 0; i < MAP__NR_TYPES; ++i) {
307                 struct maps *maps = &thread->mg->maps[i];
308                 struct map *map;
309
310                 down_read(&maps->lock);
311
312                 for (map = maps__first(maps); map; map = map__next(map)) {
313                         err = unwind__prepare_access(thread, map, &initialized);
314                         if (err || initialized)
315                                 break;
316                 }
317
318                 up_read(&maps->lock);
319         }
320
321         return err;
322 }
323
324 static int thread__prepare_access(struct thread *thread)
325 {
326         int err = 0;
327
328         if (symbol_conf.use_callchain)
329                 err = __thread__prepare_access(thread);
330
331         return err;
332 }
333
334 static int thread__clone_map_groups(struct thread *thread,
335                                     struct thread *parent)
336 {
337         int i;
338
339         /* This is new thread, we share map groups for process. */
340         if (thread->pid_ == parent->pid_)
341                 return thread__prepare_access(thread);
342
343         if (thread->mg == parent->mg) {
344                 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
345                          thread->pid_, thread->tid, parent->pid_, parent->tid);
346                 return 0;
347         }
348
349         /* But this one is new process, copy maps. */
350         for (i = 0; i < MAP__NR_TYPES; ++i)
351                 if (map_groups__clone(thread, parent->mg, i) < 0)
352                         return -ENOMEM;
353
354         return 0;
355 }
356
357 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
358 {
359         if (parent->comm_set) {
360                 const char *comm = thread__comm_str(parent);
361                 int err;
362                 if (!comm)
363                         return -ENOMEM;
364                 err = thread__set_comm(thread, comm, timestamp);
365                 if (err)
366                         return err;
367         }
368
369         thread->ppid = parent->tid;
370         return thread__clone_map_groups(thread, parent);
371 }
372
373 void thread__find_cpumode_addr_location(struct thread *thread,
374                                         enum map_type type, u64 addr,
375                                         struct addr_location *al)
376 {
377         size_t i;
378         const u8 cpumodes[] = {
379                 PERF_RECORD_MISC_USER,
380                 PERF_RECORD_MISC_KERNEL,
381                 PERF_RECORD_MISC_GUEST_USER,
382                 PERF_RECORD_MISC_GUEST_KERNEL
383         };
384
385         for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
386                 thread__find_addr_location(thread, cpumodes[i], type, addr, al);
387                 if (al->map)
388                         break;
389         }
390 }
391
392 struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
393 {
394         if (thread->pid_ == thread->tid)
395                 return thread__get(thread);
396
397         if (thread->pid_ == -1)
398                 return NULL;
399
400         return machine__find_thread(machine, thread->pid_, thread->pid_);
401 }
This page took 0.05697 seconds and 4 git commands to generate.