]>
Commit | Line | Data |
---|---|---|
54cb65d8 EC |
1 | /* |
2 | * QEMU Plugin Core code | |
3 | * | |
4 | * This is the core code that deals with injecting instrumentation into the code | |
5 | * | |
6 | * Copyright (C) 2017, Emilio G. Cota <[email protected]> | |
7 | * Copyright (C) 2019, Linaro | |
8 | * | |
9 | * License: GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | * SPDX-License-Identifier: GPL-2.0-or-later | |
13 | */ | |
14 | #include "qemu/osdep.h" | |
15 | #include "qemu/error-report.h" | |
16 | #include "qemu/config-file.h" | |
17 | #include "qapi/error.h" | |
18 | #include "qemu/option.h" | |
19 | #include "qemu/rcu_queue.h" | |
20 | #include "qemu/xxhash.h" | |
21 | #include "qemu/rcu.h" | |
22 | #include "hw/core/cpu.h" | |
23 | #include "exec/cpu-common.h" | |
24 | ||
25 | #include "cpu.h" | |
26 | #include "exec/exec-all.h" | |
27 | #include "exec/helper-proto.h" | |
28 | #include "sysemu/sysemu.h" | |
29 | #include "tcg/tcg.h" | |
30 | #include "tcg/tcg-op.h" | |
31 | #include "trace/mem-internal.h" /* mem_info macros */ | |
32 | #include "plugin.h" | |
33 | ||
34 | struct qemu_plugin_cb { | |
35 | struct qemu_plugin_ctx *ctx; | |
36 | union qemu_plugin_cb_sig f; | |
37 | void *udata; | |
38 | QLIST_ENTRY(qemu_plugin_cb) entry; | |
39 | }; | |
40 | ||
41 | struct qemu_plugin_state plugin; | |
42 | ||
43 | struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) | |
44 | { | |
45 | struct qemu_plugin_ctx *ctx; | |
46 | qemu_plugin_id_t *id_p; | |
47 | ||
48 | id_p = g_hash_table_lookup(plugin.id_ht, &id); | |
49 | ctx = container_of(id_p, struct qemu_plugin_ctx, id); | |
50 | if (ctx == NULL) { | |
51 | error_report("plugin: invalid plugin id %" PRIu64, id); | |
52 | abort(); | |
53 | } | |
54 | return ctx; | |
55 | } | |
56 | ||
57 | static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) | |
58 | { | |
59 | bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); | |
60 | cpu_tb_jmp_cache_clear(cpu); | |
61 | } | |
62 | ||
63 | static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) | |
64 | { | |
65 | CPUState *cpu = container_of(k, CPUState, cpu_index); | |
66 | run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); | |
67 | ||
68 | if (cpu->created) { | |
69 | async_run_on_cpu(cpu, plugin_cpu_update__async, mask); | |
70 | } else { | |
71 | plugin_cpu_update__async(cpu, mask); | |
72 | } | |
73 | } | |
74 | ||
75 | void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, | |
76 | enum qemu_plugin_event ev) | |
77 | { | |
78 | struct qemu_plugin_cb *cb = ctx->callbacks[ev]; | |
79 | ||
80 | if (cb == NULL) { | |
81 | return; | |
82 | } | |
83 | QLIST_REMOVE_RCU(cb, entry); | |
84 | g_free(cb); | |
85 | ctx->callbacks[ev] = NULL; | |
86 | if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { | |
87 | clear_bit(ev, plugin.mask); | |
88 | g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); | |
89 | } | |
90 | } | |
91 | ||
92 | static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) | |
93 | { | |
94 | struct qemu_plugin_cb *cb, *next; | |
95 | ||
96 | switch (ev) { | |
97 | case QEMU_PLUGIN_EV_VCPU_INIT: | |
98 | case QEMU_PLUGIN_EV_VCPU_EXIT: | |
99 | case QEMU_PLUGIN_EV_VCPU_IDLE: | |
100 | case QEMU_PLUGIN_EV_VCPU_RESUME: | |
101 | /* iterate safely; plugins might uninstall themselves at any time */ | |
102 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
103 | qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; | |
104 | ||
105 | func(cb->ctx->id, cpu->cpu_index); | |
106 | } | |
107 | break; | |
108 | default: | |
109 | g_assert_not_reached(); | |
110 | } | |
111 | } | |
112 | ||
113 | static void plugin_cb__simple(enum qemu_plugin_event ev) | |
114 | { | |
115 | struct qemu_plugin_cb *cb, *next; | |
116 | ||
117 | switch (ev) { | |
118 | case QEMU_PLUGIN_EV_FLUSH: | |
119 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
120 | qemu_plugin_simple_cb_t func = cb->f.simple; | |
121 | ||
122 | func(cb->ctx->id); | |
123 | } | |
124 | break; | |
125 | default: | |
126 | g_assert_not_reached(); | |
127 | } | |
128 | } | |
129 | ||
130 | static void plugin_cb__udata(enum qemu_plugin_event ev) | |
131 | { | |
132 | struct qemu_plugin_cb *cb, *next; | |
133 | ||
134 | switch (ev) { | |
135 | case QEMU_PLUGIN_EV_ATEXIT: | |
136 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
137 | qemu_plugin_udata_cb_t func = cb->f.udata; | |
138 | ||
139 | func(cb->ctx->id, cb->udata); | |
140 | } | |
141 | break; | |
142 | default: | |
143 | g_assert_not_reached(); | |
144 | } | |
145 | } | |
146 | ||
147 | static void | |
148 | do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, | |
149 | void *func, void *udata) | |
150 | { | |
151 | struct qemu_plugin_ctx *ctx; | |
152 | ||
153 | qemu_rec_mutex_lock(&plugin.lock); | |
154 | ctx = plugin_id_to_ctx_locked(id); | |
155 | /* if the plugin is on its way out, ignore this request */ | |
156 | if (unlikely(ctx->uninstalling)) { | |
157 | goto out_unlock; | |
158 | } | |
159 | if (func) { | |
160 | struct qemu_plugin_cb *cb = ctx->callbacks[ev]; | |
161 | ||
162 | if (cb) { | |
163 | cb->f.generic = func; | |
164 | cb->udata = udata; | |
165 | } else { | |
166 | cb = g_new(struct qemu_plugin_cb, 1); | |
167 | cb->ctx = ctx; | |
168 | cb->f.generic = func; | |
169 | cb->udata = udata; | |
170 | ctx->callbacks[ev] = cb; | |
171 | QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); | |
172 | if (!test_bit(ev, plugin.mask)) { | |
173 | set_bit(ev, plugin.mask); | |
174 | g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, | |
175 | NULL); | |
176 | } | |
177 | } | |
178 | } else { | |
179 | plugin_unregister_cb__locked(ctx, ev); | |
180 | } | |
181 | out_unlock: | |
182 | qemu_rec_mutex_unlock(&plugin.lock); | |
183 | } | |
184 | ||
185 | void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, | |
186 | void *func) | |
187 | { | |
188 | do_plugin_register_cb(id, ev, func, NULL); | |
189 | } | |
190 | ||
191 | void | |
192 | plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, | |
193 | void *func, void *udata) | |
194 | { | |
195 | do_plugin_register_cb(id, ev, func, udata); | |
196 | } | |
197 | ||
198 | void qemu_plugin_vcpu_init_hook(CPUState *cpu) | |
199 | { | |
200 | bool success; | |
201 | ||
202 | qemu_rec_mutex_lock(&plugin.lock); | |
203 | plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); | |
204 | success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, | |
205 | &cpu->cpu_index); | |
206 | g_assert(success); | |
207 | qemu_rec_mutex_unlock(&plugin.lock); | |
208 | ||
209 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); | |
210 | } | |
211 | ||
212 | void qemu_plugin_vcpu_exit_hook(CPUState *cpu) | |
213 | { | |
214 | bool success; | |
215 | ||
216 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); | |
217 | ||
218 | qemu_rec_mutex_lock(&plugin.lock); | |
219 | success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); | |
220 | g_assert(success); | |
221 | qemu_rec_mutex_unlock(&plugin.lock); | |
222 | } | |
223 | ||
224 | struct plugin_for_each_args { | |
225 | struct qemu_plugin_ctx *ctx; | |
226 | qemu_plugin_vcpu_simple_cb_t cb; | |
227 | }; | |
228 | ||
229 | static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) | |
230 | { | |
231 | struct plugin_for_each_args *args = udata; | |
232 | int cpu_index = *(int *)k; | |
233 | ||
234 | args->cb(args->ctx->id, cpu_index); | |
235 | } | |
236 | ||
237 | void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, | |
238 | qemu_plugin_vcpu_simple_cb_t cb) | |
239 | { | |
240 | struct plugin_for_each_args args; | |
241 | ||
242 | if (cb == NULL) { | |
243 | return; | |
244 | } | |
245 | qemu_rec_mutex_lock(&plugin.lock); | |
246 | args.ctx = plugin_id_to_ctx_locked(id); | |
247 | args.cb = cb; | |
248 | g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); | |
249 | qemu_rec_mutex_unlock(&plugin.lock); | |
250 | } | |
251 | ||
252 | /* Allocate and return a callback record */ | |
253 | static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) | |
254 | { | |
255 | GArray *cbs = *arr; | |
256 | ||
257 | if (!cbs) { | |
258 | cbs = g_array_sized_new(false, false, | |
259 | sizeof(struct qemu_plugin_dyn_cb), 1); | |
260 | *arr = cbs; | |
261 | } | |
262 | ||
263 | g_array_set_size(cbs, cbs->len + 1); | |
264 | return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); | |
265 | } | |
266 | ||
267 | void plugin_register_inline_op(GArray **arr, | |
268 | enum qemu_plugin_mem_rw rw, | |
269 | enum qemu_plugin_op op, void *ptr, | |
270 | uint64_t imm) | |
271 | { | |
272 | struct qemu_plugin_dyn_cb *dyn_cb; | |
273 | ||
274 | dyn_cb = plugin_get_dyn_cb(arr); | |
275 | dyn_cb->userp = ptr; | |
276 | dyn_cb->type = PLUGIN_CB_INLINE; | |
277 | dyn_cb->rw = rw; | |
278 | dyn_cb->inline_insn.op = op; | |
279 | dyn_cb->inline_insn.imm = imm; | |
280 | } | |
281 | ||
282 | static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags) | |
283 | { | |
284 | uint32_t ret; | |
285 | ||
286 | switch (flags) { | |
287 | case QEMU_PLUGIN_CB_RW_REGS: | |
288 | ret = 0; | |
289 | case QEMU_PLUGIN_CB_R_REGS: | |
290 | ret = TCG_CALL_NO_WG; | |
291 | break; | |
292 | case QEMU_PLUGIN_CB_NO_REGS: | |
293 | default: | |
294 | ret = TCG_CALL_NO_RWG; | |
295 | } | |
296 | return ret; | |
297 | } | |
298 | ||
299 | inline void | |
300 | plugin_register_dyn_cb__udata(GArray **arr, | |
301 | qemu_plugin_vcpu_udata_cb_t cb, | |
302 | enum qemu_plugin_cb_flags flags, void *udata) | |
303 | { | |
304 | struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); | |
305 | ||
306 | dyn_cb->userp = udata; | |
307 | dyn_cb->tcg_flags = cb_to_tcg_flags(flags); | |
308 | dyn_cb->f.vcpu_udata = cb; | |
309 | dyn_cb->type = PLUGIN_CB_REGULAR; | |
310 | } | |
311 | ||
312 | void plugin_register_vcpu_mem_cb(GArray **arr, | |
313 | void *cb, | |
314 | enum qemu_plugin_cb_flags flags, | |
315 | enum qemu_plugin_mem_rw rw, | |
316 | void *udata) | |
317 | { | |
318 | struct qemu_plugin_dyn_cb *dyn_cb; | |
319 | ||
320 | dyn_cb = plugin_get_dyn_cb(arr); | |
321 | dyn_cb->userp = udata; | |
322 | dyn_cb->tcg_flags = cb_to_tcg_flags(flags); | |
323 | dyn_cb->type = PLUGIN_CB_REGULAR; | |
324 | dyn_cb->rw = rw; | |
325 | dyn_cb->f.generic = cb; | |
326 | } | |
327 | ||
328 | void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) | |
329 | { | |
330 | struct qemu_plugin_cb *cb, *next; | |
331 | enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; | |
332 | ||
333 | /* no plugin_mask check here; caller should have checked */ | |
334 | ||
335 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
336 | qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; | |
337 | ||
338 | func(cb->ctx->id, tb); | |
339 | } | |
340 | } | |
341 | ||
342 | void | |
343 | qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, | |
344 | uint64_t a3, uint64_t a4, uint64_t a5, | |
345 | uint64_t a6, uint64_t a7, uint64_t a8) | |
346 | { | |
347 | struct qemu_plugin_cb *cb, *next; | |
348 | enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; | |
349 | ||
350 | if (!test_bit(ev, cpu->plugin_mask)) { | |
351 | return; | |
352 | } | |
353 | ||
354 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
355 | qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; | |
356 | ||
357 | func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); | |
358 | } | |
359 | } | |
360 | ||
361 | void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) | |
362 | { | |
363 | struct qemu_plugin_cb *cb, *next; | |
364 | enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; | |
365 | ||
366 | if (!test_bit(ev, cpu->plugin_mask)) { | |
367 | return; | |
368 | } | |
369 | ||
370 | QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { | |
371 | qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; | |
372 | ||
373 | func(cb->ctx->id, cpu->cpu_index, num, ret); | |
374 | } | |
375 | } | |
376 | ||
377 | void qemu_plugin_vcpu_idle_cb(CPUState *cpu) | |
378 | { | |
379 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); | |
380 | } | |
381 | ||
382 | void qemu_plugin_vcpu_resume_cb(CPUState *cpu) | |
383 | { | |
384 | plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); | |
385 | } | |
386 | ||
387 | void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, | |
388 | qemu_plugin_vcpu_simple_cb_t cb) | |
389 | { | |
390 | plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); | |
391 | } | |
392 | ||
393 | void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, | |
394 | qemu_plugin_vcpu_simple_cb_t cb) | |
395 | { | |
396 | plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); | |
397 | } | |
398 | ||
399 | void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, | |
400 | qemu_plugin_simple_cb_t cb) | |
401 | { | |
402 | plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); | |
403 | } | |
404 | ||
405 | static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) | |
406 | { | |
407 | g_array_free((GArray *) p, true); | |
408 | return true; | |
409 | } | |
410 | ||
411 | void qemu_plugin_flush_cb(void) | |
412 | { | |
413 | qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); | |
414 | qht_reset(&plugin.dyn_cb_arr_ht); | |
415 | ||
416 | plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); | |
417 | } | |
418 | ||
419 | void exec_inline_op(struct qemu_plugin_dyn_cb *cb) | |
420 | { | |
421 | uint64_t *val = cb->userp; | |
422 | ||
423 | switch (cb->inline_insn.op) { | |
424 | case QEMU_PLUGIN_INLINE_ADD_U64: | |
425 | *val += cb->inline_insn.imm; | |
426 | break; | |
427 | default: | |
428 | g_assert_not_reached(); | |
429 | } | |
430 | } | |
431 | ||
432 | void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) | |
433 | { | |
434 | GArray *arr = cpu->plugin_mem_cbs; | |
435 | size_t i; | |
436 | ||
437 | if (arr == NULL) { | |
438 | return; | |
439 | } | |
440 | for (i = 0; i < arr->len; i++) { | |
441 | struct qemu_plugin_dyn_cb *cb = | |
442 | &g_array_index(arr, struct qemu_plugin_dyn_cb, i); | |
443 | int w = !!(info & TRACE_MEM_ST) + 1; | |
444 | ||
445 | if (!(w & cb->rw)) { | |
446 | break; | |
447 | } | |
448 | switch (cb->type) { | |
449 | case PLUGIN_CB_REGULAR: | |
450 | cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); | |
451 | break; | |
452 | case PLUGIN_CB_INLINE: | |
453 | exec_inline_op(cb); | |
454 | break; | |
455 | default: | |
456 | g_assert_not_reached(); | |
457 | } | |
458 | } | |
459 | } | |
460 | ||
461 | void qemu_plugin_atexit_cb(void) | |
462 | { | |
463 | plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); | |
464 | } | |
465 | ||
466 | void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, | |
467 | qemu_plugin_udata_cb_t cb, | |
468 | void *udata) | |
469 | { | |
470 | plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); | |
471 | } | |
472 | ||
473 | /* | |
474 | * Call this function after longjmp'ing to the main loop. It's possible that the | |
475 | * last instruction of a TB might have used helpers, and therefore the | |
476 | * "disable" instruction will never execute because it ended up as dead code. | |
477 | */ | |
478 | void qemu_plugin_disable_mem_helpers(CPUState *cpu) | |
479 | { | |
480 | cpu->plugin_mem_cbs = NULL; | |
481 | } | |
482 | ||
483 | static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) | |
484 | { | |
485 | return ap == bp; | |
486 | } | |
487 | ||
488 | static void __attribute__((__constructor__)) plugin_init(void) | |
489 | { | |
490 | int i; | |
491 | ||
492 | for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { | |
493 | QLIST_INIT(&plugin.cb_lists[i]); | |
494 | } | |
495 | qemu_rec_mutex_init(&plugin.lock); | |
496 | plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); | |
497 | plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); | |
498 | QTAILQ_INIT(&plugin.ctxs); | |
499 | qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, | |
500 | QHT_MODE_AUTO_RESIZE); | |
501 | atexit(qemu_plugin_atexit_cb); | |
502 | } |