]> Git Repo - linux.git/blob - tools/bpf/bpftool/pids.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / tools / bpf / bpftool / pids.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2020 Facebook */
3 #include <errno.h>
4 #include <linux/err.h>
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <bpf/bpf.h>
12 #include <bpf/hashmap.h>
13
14 #include "main.h"
15 #include "skeleton/pid_iter.h"
16
17 #ifdef BPFTOOL_WITHOUT_SKELETONS
18
19 int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
20 {
21         return -ENOTSUP;
22 }
23 void delete_obj_refs_table(struct hashmap *map) {}
24 void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
25 void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
26
27 #else /* BPFTOOL_WITHOUT_SKELETONS */
28
29 #include "pid_iter.skel.h"
30
31 static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
32 {
33         struct hashmap_entry *entry;
34         struct obj_refs *refs;
35         struct obj_ref *ref;
36         int err, i;
37         void *tmp;
38
39         hashmap__for_each_key_entry(map, entry, e->id) {
40                 refs = entry->pvalue;
41
42                 for (i = 0; i < refs->ref_cnt; i++) {
43                         if (refs->refs[i].pid == e->pid)
44                                 return;
45                 }
46
47                 tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
48                 if (!tmp) {
49                         p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
50                               e->id, e->pid, e->comm);
51                         return;
52                 }
53                 refs->refs = tmp;
54                 ref = &refs->refs[refs->ref_cnt];
55                 ref->pid = e->pid;
56                 memcpy(ref->comm, e->comm, sizeof(ref->comm));
57                 refs->ref_cnt++;
58
59                 return;
60         }
61
62         /* new ref */
63         refs = calloc(1, sizeof(*refs));
64         if (!refs) {
65                 p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
66                       e->id, e->pid, e->comm);
67                 return;
68         }
69
70         refs->refs = malloc(sizeof(*refs->refs));
71         if (!refs->refs) {
72                 free(refs);
73                 p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
74                       e->id, e->pid, e->comm);
75                 return;
76         }
77         ref = &refs->refs[0];
78         ref->pid = e->pid;
79         memcpy(ref->comm, e->comm, sizeof(ref->comm));
80         refs->ref_cnt = 1;
81         refs->has_bpf_cookie = e->has_bpf_cookie;
82         refs->bpf_cookie = e->bpf_cookie;
83
84         err = hashmap__append(map, e->id, refs);
85         if (err)
86                 p_err("failed to append entry to hashmap for ID %u: %s",
87                       e->id, strerror(errno));
88 }
89
90 static int __printf(2, 0)
91 libbpf_print_none(__maybe_unused enum libbpf_print_level level,
92                   __maybe_unused const char *format,
93                   __maybe_unused va_list args)
94 {
95         return 0;
96 }
97
98 int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
99 {
100         struct pid_iter_entry *e;
101         char buf[4096 / sizeof(*e) * sizeof(*e)];
102         struct pid_iter_bpf *skel;
103         int err, ret, fd = -1, i;
104
105         *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
106         if (IS_ERR(*map)) {
107                 p_err("failed to create hashmap for PID references");
108                 return -1;
109         }
110         set_max_rlimit();
111
112         skel = pid_iter_bpf__open();
113         if (!skel) {
114                 p_err("failed to open PID iterator skeleton");
115                 return -1;
116         }
117
118         skel->rodata->obj_type = type;
119
120         if (!verifier_logs) {
121                 libbpf_print_fn_t default_print;
122
123                 /* Unless debug information is on, we don't want the output to
124                  * be polluted with libbpf errors if bpf_iter is not supported.
125                  */
126                 default_print = libbpf_set_print(libbpf_print_none);
127                 err = pid_iter_bpf__load(skel);
128                 libbpf_set_print(default_print);
129         } else {
130                 err = pid_iter_bpf__load(skel);
131         }
132         if (err) {
133                 /* too bad, kernel doesn't support BPF iterators yet */
134                 err = 0;
135                 goto out;
136         }
137         err = pid_iter_bpf__attach(skel);
138         if (err) {
139                 /* if we loaded above successfully, attach has to succeed */
140                 p_err("failed to attach PID iterator: %d", err);
141                 goto out;
142         }
143
144         fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
145         if (fd < 0) {
146                 err = -errno;
147                 p_err("failed to create PID iterator session: %d", err);
148                 goto out;
149         }
150
151         while (true) {
152                 ret = read(fd, buf, sizeof(buf));
153                 if (ret < 0) {
154                         if (errno == EAGAIN)
155                                 continue;
156                         err = -errno;
157                         p_err("failed to read PID iterator output: %d", err);
158                         goto out;
159                 }
160                 if (ret == 0)
161                         break;
162                 if (ret % sizeof(*e)) {
163                         err = -EINVAL;
164                         p_err("invalid PID iterator output format");
165                         goto out;
166                 }
167                 ret /= sizeof(*e);
168
169                 e = (void *)buf;
170                 for (i = 0; i < ret; i++, e++) {
171                         add_ref(*map, e);
172                 }
173         }
174         err = 0;
175 out:
176         if (fd >= 0)
177                 close(fd);
178         pid_iter_bpf__destroy(skel);
179         return err;
180 }
181
182 void delete_obj_refs_table(struct hashmap *map)
183 {
184         struct hashmap_entry *entry;
185         size_t bkt;
186
187         if (!map)
188                 return;
189
190         hashmap__for_each_entry(map, entry, bkt) {
191                 struct obj_refs *refs = entry->pvalue;
192
193                 free(refs->refs);
194                 free(refs);
195         }
196
197         hashmap__free(map);
198 }
199
200 void emit_obj_refs_json(struct hashmap *map, __u32 id,
201                         json_writer_t *json_writer)
202 {
203         struct hashmap_entry *entry;
204
205         if (hashmap__empty(map))
206                 return;
207
208         hashmap__for_each_key_entry(map, entry, id) {
209                 struct obj_refs *refs = entry->pvalue;
210                 int i;
211
212                 if (refs->ref_cnt == 0)
213                         break;
214
215                 if (refs->has_bpf_cookie)
216                         jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
217
218                 jsonw_name(json_writer, "pids");
219                 jsonw_start_array(json_writer);
220                 for (i = 0; i < refs->ref_cnt; i++) {
221                         struct obj_ref *ref = &refs->refs[i];
222
223                         jsonw_start_object(json_writer);
224                         jsonw_int_field(json_writer, "pid", ref->pid);
225                         jsonw_string_field(json_writer, "comm", ref->comm);
226                         jsonw_end_object(json_writer);
227                 }
228                 jsonw_end_array(json_writer);
229                 break;
230         }
231 }
232
233 void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
234 {
235         struct hashmap_entry *entry;
236
237         if (hashmap__empty(map))
238                 return;
239
240         hashmap__for_each_key_entry(map, entry, id) {
241                 struct obj_refs *refs = entry->pvalue;
242                 int i;
243
244                 if (refs->ref_cnt == 0)
245                         break;
246
247                 if (refs->has_bpf_cookie)
248                         printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
249
250                 printf("%s", prefix);
251                 for (i = 0; i < refs->ref_cnt; i++) {
252                         struct obj_ref *ref = &refs->refs[i];
253
254                         printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
255                 }
256                 break;
257         }
258 }
259
260
261 #endif
This page took 0.048256 seconds and 4 git commands to generate.