]>
Commit | Line | Data |
---|---|---|
38b47b19 EC |
1 | /* |
2 | * plugin-gen.c - TCG-related bits of plugin infrastructure | |
3 | * | |
4 | * Copyright (C) 2018, Emilio G. Cota <[email protected]> | |
5 | * License: GNU GPL, version 2 or later. | |
6 | * See the COPYING file in the top-level directory. | |
7 | * | |
8 | * We support instrumentation at an instruction granularity. That is, | |
9 | * if a plugin wants to instrument the memory accesses performed by a | |
10 | * particular instruction, it can just do that instead of instrumenting | |
11 | * all memory accesses. Thus, in order to do this we first have to | |
12 | * translate a TB, so that plugins can decide what/where to instrument. | |
13 | * | |
14 | * Injecting the desired instrumentation could be done with a second | |
15 | * translation pass that combined the instrumentation requests, but that | |
16 | * would be ugly and inefficient since we would decode the guest code twice. | |
17 | * Instead, during TB translation we add "empty" instrumentation calls for all | |
18 | * possible instrumentation events, and then once we collect the instrumentation | |
19 | * requests from plugins, we either "fill in" those empty events or remove them | |
20 | * if they have no requests. | |
21 | * | |
22 | * When "filling in" an event we first copy the empty callback's TCG ops. This | |
23 | * might seem unnecessary, but it is done to support an arbitrary number | |
24 | * of callbacks per event. Take for example a regular instruction callback. | |
25 | * We first generate a callback to an empty helper function. Then, if two | |
26 | * plugins register one callback each for this instruction, we make two copies | |
27 | * of the TCG ops generated for the empty callback, substituting the function | |
28 | * pointer that points to the empty helper function with the plugins' desired | |
29 | * callback functions. After that we remove the empty callback's ops. | |
30 | * | |
31 | * Note that the location in TCGOp.args[] of the pointer to a helper function | |
32 | * varies across different guest and host architectures. Instead of duplicating | |
33 | * the logic that figures this out, we rely on the fact that the empty | |
34 | * callbacks point to empty functions that are unique pointers in the program. | |
35 | * Thus, to find the right location we just have to look for a match in | |
36 | * TCGOp.args[]. This is the main reason why we first copy an empty callback's | |
37 | * TCG ops and then fill them in; regardless of whether we have one or many | |
38 | * callbacks for that event, the logic to add all of them is the same. | |
39 | * | |
40 | * When generating more than one callback per event, we make a small | |
41 | * optimization to avoid generating redundant operations. For instance, for the | |
42 | * second and all subsequent callbacks of an event, we do not need to reload the | |
43 | * CPU's index into a TCG temp, since the first callback did it already. | |
44 | */ | |
45 | #include "qemu/osdep.h" | |
46 | #include "cpu.h" | |
47 | #include "tcg/tcg.h" | |
48 | #include "tcg/tcg-op.h" | |
49 | #include "trace/mem.h" | |
50 | #include "exec/exec-all.h" | |
51 | #include "exec/plugin-gen.h" | |
52 | #include "exec/translator.h" | |
53 | ||
54 | #ifdef CONFIG_SOFTMMU | |
55 | # define CONFIG_SOFTMMU_GATE 1 | |
56 | #else | |
57 | # define CONFIG_SOFTMMU_GATE 0 | |
58 | #endif | |
59 | ||
60 | /* | |
61 | * plugin_cb_start TCG op args[]: | |
62 | * 0: enum plugin_gen_from | |
63 | * 1: enum plugin_gen_cb | |
64 | * 2: set to 1 for mem callback that is a write, 0 otherwise. | |
65 | */ | |
66 | ||
67 | enum plugin_gen_from { | |
68 | PLUGIN_GEN_FROM_TB, | |
69 | PLUGIN_GEN_FROM_INSN, | |
70 | PLUGIN_GEN_FROM_MEM, | |
71 | PLUGIN_GEN_AFTER_INSN, | |
72 | PLUGIN_GEN_N_FROMS, | |
73 | }; | |
74 | ||
75 | enum plugin_gen_cb { | |
76 | PLUGIN_GEN_CB_UDATA, | |
77 | PLUGIN_GEN_CB_INLINE, | |
78 | PLUGIN_GEN_CB_MEM, | |
79 | PLUGIN_GEN_ENABLE_MEM_HELPER, | |
80 | PLUGIN_GEN_DISABLE_MEM_HELPER, | |
81 | PLUGIN_GEN_N_CBS, | |
82 | }; | |
83 | ||
84 | /* | |
85 | * These helpers are stubs that get dynamically switched out for calls | |
86 | * direct to the plugin if they are subscribed to. | |
87 | */ | |
88 | void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) | |
89 | { } | |
90 | ||
91 | void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, | |
92 | qemu_plugin_meminfo_t info, uint64_t vaddr, | |
93 | void *userdata) | |
94 | { } | |
95 | ||
96 | static void do_gen_mem_cb(TCGv vaddr, uint32_t info) | |
97 | { | |
98 | TCGv_i32 cpu_index = tcg_temp_new_i32(); | |
99 | TCGv_i32 meminfo = tcg_const_i32(info); | |
100 | TCGv_i64 vaddr64 = tcg_temp_new_i64(); | |
101 | TCGv_ptr udata = tcg_const_ptr(NULL); | |
102 | ||
103 | tcg_gen_ld_i32(cpu_index, cpu_env, | |
104 | -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); | |
105 | tcg_gen_extu_tl_i64(vaddr64, vaddr); | |
106 | ||
107 | gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); | |
108 | ||
109 | tcg_temp_free_ptr(udata); | |
110 | tcg_temp_free_i64(vaddr64); | |
111 | tcg_temp_free_i32(meminfo); | |
112 | tcg_temp_free_i32(cpu_index); | |
113 | } | |
114 | ||
115 | static void gen_empty_udata_cb(void) | |
116 | { | |
117 | TCGv_i32 cpu_index = tcg_temp_new_i32(); | |
118 | TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */ | |
119 | ||
120 | tcg_gen_ld_i32(cpu_index, cpu_env, | |
121 | -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); | |
122 | gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); | |
123 | ||
124 | tcg_temp_free_ptr(udata); | |
125 | tcg_temp_free_i32(cpu_index); | |
126 | } | |
127 | ||
128 | /* | |
129 | * For now we only support addi_i64. | |
130 | * When we support more ops, we can generate one empty inline cb for each. | |
131 | */ | |
132 | static void gen_empty_inline_cb(void) | |
133 | { | |
134 | TCGv_i64 val = tcg_temp_new_i64(); | |
135 | TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */ | |
136 | ||
137 | tcg_gen_ld_i64(val, ptr, 0); | |
138 | /* pass an immediate != 0 so that it doesn't get optimized away */ | |
139 | tcg_gen_addi_i64(val, val, 0xdeadface); | |
140 | tcg_gen_st_i64(val, ptr, 0); | |
141 | tcg_temp_free_ptr(ptr); | |
142 | tcg_temp_free_i64(val); | |
143 | } | |
144 | ||
145 | static void gen_empty_mem_cb(TCGv addr, uint32_t info) | |
146 | { | |
147 | do_gen_mem_cb(addr, info); | |
148 | } | |
149 | ||
150 | /* | |
151 | * Share the same function for enable/disable. When enabling, the NULL | |
152 | * pointer will be overwritten later. | |
153 | */ | |
154 | static void gen_empty_mem_helper(void) | |
155 | { | |
156 | TCGv_ptr ptr; | |
157 | ||
158 | ptr = tcg_const_ptr(NULL); | |
159 | tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - | |
160 | offsetof(ArchCPU, env)); | |
161 | tcg_temp_free_ptr(ptr); | |
162 | } | |
163 | ||
164 | static inline | |
165 | void gen_plugin_cb_start(enum plugin_gen_from from, | |
166 | enum plugin_gen_cb type, unsigned wr) | |
167 | { | |
168 | TCGOp *op; | |
169 | ||
170 | tcg_gen_plugin_cb_start(from, type, wr); | |
171 | op = tcg_last_op(); | |
172 | QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link); | |
173 | } | |
174 | ||
175 | static void gen_wrapped(enum plugin_gen_from from, | |
176 | enum plugin_gen_cb type, void (*func)(void)) | |
177 | { | |
178 | gen_plugin_cb_start(from, type, 0); | |
179 | func(); | |
180 | tcg_gen_plugin_cb_end(); | |
181 | } | |
182 | ||
183 | static inline void plugin_gen_empty_callback(enum plugin_gen_from from) | |
184 | { | |
185 | switch (from) { | |
186 | case PLUGIN_GEN_AFTER_INSN: | |
187 | gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER, | |
188 | gen_empty_mem_helper); | |
189 | break; | |
190 | case PLUGIN_GEN_FROM_INSN: | |
191 | /* | |
192 | * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being | |
193 | * the first callback of an instruction | |
194 | */ | |
195 | gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER, | |
196 | gen_empty_mem_helper); | |
197 | /* fall through */ | |
198 | case PLUGIN_GEN_FROM_TB: | |
199 | gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); | |
200 | gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); | |
201 | break; | |
202 | default: | |
203 | g_assert_not_reached(); | |
204 | } | |
205 | } | |
206 | ||
207 | union mem_gen_fn { | |
208 | void (*mem_fn)(TCGv, uint32_t); | |
209 | void (*inline_fn)(void); | |
210 | }; | |
211 | ||
212 | static void gen_mem_wrapped(enum plugin_gen_cb type, | |
213 | const union mem_gen_fn *f, TCGv addr, | |
214 | uint32_t info, bool is_mem) | |
215 | { | |
216 | int wr = !!(info & TRACE_MEM_ST); | |
217 | ||
218 | gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr); | |
219 | if (is_mem) { | |
220 | f->mem_fn(addr, info); | |
221 | } else { | |
222 | f->inline_fn(); | |
223 | } | |
224 | tcg_gen_plugin_cb_end(); | |
225 | } | |
226 | ||
227 | void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) | |
228 | { | |
229 | union mem_gen_fn fn; | |
230 | ||
231 | fn.mem_fn = gen_empty_mem_cb; | |
232 | gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); | |
233 | ||
234 | fn.inline_fn = gen_empty_inline_cb; | |
235 | gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); | |
236 | } | |
237 | ||
238 | static TCGOp *find_op(TCGOp *op, TCGOpcode opc) | |
239 | { | |
240 | while (op) { | |
241 | if (op->opc == opc) { | |
242 | return op; | |
243 | } | |
244 | op = QTAILQ_NEXT(op, link); | |
245 | } | |
246 | return NULL; | |
247 | } | |
248 | ||
249 | static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end) | |
250 | { | |
251 | TCGOp *ret = QTAILQ_NEXT(end, link); | |
252 | ||
253 | QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link); | |
254 | return ret; | |
255 | } | |
256 | ||
257 | /* remove all ops until (and including) plugin_cb_end */ | |
258 | static TCGOp *rm_ops(TCGOp *op) | |
259 | { | |
260 | TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end); | |
261 | ||
262 | tcg_debug_assert(end_op); | |
263 | return rm_ops_range(op, end_op); | |
264 | } | |
265 | ||
266 | static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op) | |
267 | { | |
268 | *begin_op = QTAILQ_NEXT(*begin_op, link); | |
269 | tcg_debug_assert(*begin_op); | |
270 | op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc); | |
271 | memcpy(op->args, (*begin_op)->args, sizeof(op->args)); | |
272 | return op; | |
273 | } | |
274 | ||
275 | static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) | |
276 | { | |
277 | op = copy_op_nocheck(begin_op, op); | |
278 | tcg_debug_assert((*begin_op)->opc == opc); | |
279 | return op; | |
280 | } | |
281 | ||
282 | static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) | |
283 | { | |
284 | if (TCG_TARGET_REG_BITS == 32) { | |
285 | /* mov_i32 */ | |
286 | op = copy_op(begin_op, op, INDEX_op_mov_i32); | |
287 | /* movi_i32 */ | |
288 | op = copy_op(begin_op, op, INDEX_op_movi_i32); | |
289 | } else { | |
290 | /* extu_i32_i64 */ | |
291 | op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); | |
292 | } | |
293 | return op; | |
294 | } | |
295 | ||
296 | static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) | |
297 | { | |
298 | if (TCG_TARGET_REG_BITS == 32) { | |
299 | /* 2x mov_i32 */ | |
300 | op = copy_op(begin_op, op, INDEX_op_mov_i32); | |
301 | op = copy_op(begin_op, op, INDEX_op_mov_i32); | |
302 | } else { | |
303 | /* mov_i64 */ | |
304 | op = copy_op(begin_op, op, INDEX_op_mov_i64); | |
305 | } | |
306 | return op; | |
307 | } | |
308 | ||
309 | static TCGOp *copy_movi_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) | |
310 | { | |
311 | if (TCG_TARGET_REG_BITS == 32) { | |
312 | /* 2x movi_i32 */ | |
313 | op = copy_op(begin_op, op, INDEX_op_movi_i32); | |
314 | op->args[1] = v; | |
315 | ||
316 | op = copy_op(begin_op, op, INDEX_op_movi_i32); | |
317 | op->args[1] = v >> 32; | |
318 | } else { | |
319 | /* movi_i64 */ | |
320 | op = copy_op(begin_op, op, INDEX_op_movi_i64); | |
321 | op->args[1] = v; | |
322 | } | |
323 | return op; | |
324 | } | |
325 | ||
326 | static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) | |
327 | { | |
328 | if (UINTPTR_MAX == UINT32_MAX) { | |
329 | /* movi_i32 */ | |
330 | op = copy_op(begin_op, op, INDEX_op_movi_i32); | |
331 | op->args[1] = (uintptr_t)ptr; | |
332 | } else { | |
333 | /* movi_i64 */ | |
334 | op = copy_movi_i64(begin_op, op, (uint64_t)(uintptr_t)ptr); | |
335 | } | |
336 | return op; | |
337 | } | |
338 | ||
339 | static TCGOp *copy_const_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) | |
340 | { | |
341 | return copy_movi_i64(begin_op, op, v); | |
342 | } | |
343 | ||
344 | static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) | |
345 | { | |
346 | if (TARGET_LONG_BITS == 32) { | |
347 | /* extu_i32_i64 */ | |
348 | op = copy_extu_i32_i64(begin_op, op); | |
349 | } else { | |
350 | /* mov_i64 */ | |
351 | op = copy_mov_i64(begin_op, op); | |
352 | } | |
353 | return op; | |
354 | } | |
355 | ||
356 | static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) | |
357 | { | |
358 | if (TCG_TARGET_REG_BITS == 32) { | |
359 | /* 2x ld_i32 */ | |
360 | op = copy_op(begin_op, op, INDEX_op_ld_i32); | |
361 | op = copy_op(begin_op, op, INDEX_op_ld_i32); | |
362 | } else { | |
363 | /* ld_i64 */ | |
364 | op = copy_op(begin_op, op, INDEX_op_ld_i64); | |
365 | } | |
366 | return op; | |
367 | } | |
368 | ||
369 | static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op) | |
370 | { | |
371 | if (TCG_TARGET_REG_BITS == 32) { | |
372 | /* 2x st_i32 */ | |
373 | op = copy_op(begin_op, op, INDEX_op_st_i32); | |
374 | op = copy_op(begin_op, op, INDEX_op_st_i32); | |
375 | } else { | |
376 | /* st_i64 */ | |
377 | op = copy_op(begin_op, op, INDEX_op_st_i64); | |
378 | } | |
379 | return op; | |
380 | } | |
381 | ||
382 | static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op) | |
383 | { | |
384 | if (TCG_TARGET_REG_BITS == 32) { | |
385 | /* all 32-bit backends must implement add2_i32 */ | |
386 | g_assert(TCG_TARGET_HAS_add2_i32); | |
387 | op = copy_op(begin_op, op, INDEX_op_add2_i32); | |
388 | } else { | |
389 | op = copy_op(begin_op, op, INDEX_op_add_i64); | |
390 | } | |
391 | return op; | |
392 | } | |
393 | ||
394 | static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) | |
395 | { | |
396 | if (UINTPTR_MAX == UINT32_MAX) { | |
397 | /* st_i32 */ | |
398 | op = copy_op(begin_op, op, INDEX_op_st_i32); | |
399 | } else { | |
400 | /* st_i64 */ | |
401 | op = copy_st_i64(begin_op, op); | |
402 | } | |
403 | return op; | |
404 | } | |
405 | ||
406 | static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, | |
407 | void *func, unsigned tcg_flags, int *cb_idx) | |
408 | { | |
409 | /* copy all ops until the call */ | |
410 | do { | |
411 | op = copy_op_nocheck(begin_op, op); | |
412 | } while (op->opc != INDEX_op_call); | |
413 | ||
414 | /* fill in the op call */ | |
415 | op->param1 = (*begin_op)->param1; | |
416 | op->param2 = (*begin_op)->param2; | |
417 | tcg_debug_assert(op->life == 0); | |
418 | if (*cb_idx == -1) { | |
419 | int i; | |
420 | ||
421 | /* | |
422 | * Instead of working out the position of the callback in args[], just | |
423 | * look for @empty_func, since it should be a unique pointer. | |
424 | */ | |
425 | for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) { | |
426 | if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) { | |
427 | *cb_idx = i; | |
428 | break; | |
429 | } | |
430 | } | |
431 | tcg_debug_assert(i < MAX_OPC_PARAM_ARGS); | |
432 | } | |
433 | op->args[*cb_idx] = (uintptr_t)func; | |
434 | op->args[*cb_idx + 1] = tcg_flags; | |
435 | ||
436 | return op; | |
437 | } | |
438 | ||
439 | static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, | |
440 | TCGOp *begin_op, TCGOp *op, int *cb_idx) | |
441 | { | |
442 | /* const_ptr */ | |
443 | op = copy_const_ptr(&begin_op, op, cb->userp); | |
444 | ||
445 | /* copy the ld_i32, but note that we only have to copy it once */ | |
446 | begin_op = QTAILQ_NEXT(begin_op, link); | |
447 | tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); | |
448 | if (*cb_idx == -1) { | |
449 | op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); | |
450 | memcpy(op->args, begin_op->args, sizeof(op->args)); | |
451 | } | |
452 | ||
453 | /* call */ | |
454 | op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), | |
455 | cb->f.vcpu_udata, cb->tcg_flags, cb_idx); | |
456 | ||
457 | return op; | |
458 | } | |
459 | ||
460 | static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, | |
461 | TCGOp *begin_op, TCGOp *op, | |
462 | int *unused) | |
463 | { | |
464 | /* const_ptr */ | |
465 | op = copy_const_ptr(&begin_op, op, cb->userp); | |
466 | ||
467 | /* ld_i64 */ | |
468 | op = copy_ld_i64(&begin_op, op); | |
469 | ||
470 | /* const_i64 */ | |
471 | op = copy_const_i64(&begin_op, op, cb->inline_insn.imm); | |
472 | ||
473 | /* add_i64 */ | |
474 | op = copy_add_i64(&begin_op, op); | |
475 | ||
476 | /* st_i64 */ | |
477 | op = copy_st_i64(&begin_op, op); | |
478 | ||
479 | return op; | |
480 | } | |
481 | ||
482 | static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, | |
483 | TCGOp *begin_op, TCGOp *op, int *cb_idx) | |
484 | { | |
485 | enum plugin_gen_cb type = begin_op->args[1]; | |
486 | ||
487 | tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); | |
488 | ||
489 | /* const_i32 == movi_i32 ("info", so it remains as is) */ | |
490 | op = copy_op(&begin_op, op, INDEX_op_movi_i32); | |
491 | ||
492 | /* const_ptr */ | |
493 | op = copy_const_ptr(&begin_op, op, cb->userp); | |
494 | ||
495 | /* copy the ld_i32, but note that we only have to copy it once */ | |
496 | begin_op = QTAILQ_NEXT(begin_op, link); | |
497 | tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); | |
498 | if (*cb_idx == -1) { | |
499 | op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); | |
500 | memcpy(op->args, begin_op->args, sizeof(op->args)); | |
501 | } | |
502 | ||
503 | /* extu_tl_i64 */ | |
504 | op = copy_extu_tl_i64(&begin_op, op); | |
505 | ||
506 | if (type == PLUGIN_GEN_CB_MEM) { | |
507 | /* call */ | |
508 | op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), | |
509 | cb->f.vcpu_udata, cb->tcg_flags, cb_idx); | |
510 | } | |
511 | ||
512 | return op; | |
513 | } | |
514 | ||
515 | typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb, | |
516 | TCGOp *begin_op, TCGOp *op, int *intp); | |
517 | typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb); | |
518 | ||
519 | static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) | |
520 | { | |
521 | return true; | |
522 | } | |
523 | ||
524 | static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) | |
525 | { | |
526 | int w; | |
527 | ||
528 | w = op->args[2]; | |
529 | return !!(cb->rw & (w + 1)); | |
530 | } | |
531 | ||
532 | static inline | |
533 | void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject, | |
534 | op_ok_fn ok) | |
535 | { | |
536 | TCGOp *end_op; | |
537 | TCGOp *op; | |
538 | int cb_idx = -1; | |
539 | int i; | |
540 | ||
541 | if (!cbs || cbs->len == 0) { | |
542 | rm_ops(begin_op); | |
543 | return; | |
544 | } | |
545 | ||
546 | end_op = find_op(begin_op, INDEX_op_plugin_cb_end); | |
547 | tcg_debug_assert(end_op); | |
548 | ||
549 | op = end_op; | |
550 | for (i = 0; i < cbs->len; i++) { | |
551 | struct qemu_plugin_dyn_cb *cb = | |
552 | &g_array_index(cbs, struct qemu_plugin_dyn_cb, i); | |
553 | ||
554 | if (!ok(begin_op, cb)) { | |
555 | continue; | |
556 | } | |
557 | op = inject(cb, begin_op, op, &cb_idx); | |
558 | } | |
559 | rm_ops_range(begin_op, end_op); | |
560 | } | |
561 | ||
562 | static void | |
563 | inject_udata_cb(const GArray *cbs, TCGOp *begin_op) | |
564 | { | |
565 | inject_cb_type(cbs, begin_op, append_udata_cb, op_ok); | |
566 | } | |
567 | ||
568 | static void | |
569 | inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok) | |
570 | { | |
571 | inject_cb_type(cbs, begin_op, append_inline_cb, ok); | |
572 | } | |
573 | ||
574 | static void | |
575 | inject_mem_cb(const GArray *cbs, TCGOp *begin_op) | |
576 | { | |
577 | inject_cb_type(cbs, begin_op, append_mem_cb, op_rw); | |
578 | } | |
579 | ||
580 | /* we could change the ops in place, but we can reuse more code by copying */ | |
581 | static void inject_mem_helper(TCGOp *begin_op, GArray *arr) | |
582 | { | |
583 | TCGOp *orig_op = begin_op; | |
584 | TCGOp *end_op; | |
585 | TCGOp *op; | |
586 | ||
587 | end_op = find_op(begin_op, INDEX_op_plugin_cb_end); | |
588 | tcg_debug_assert(end_op); | |
589 | ||
590 | /* const ptr */ | |
591 | op = copy_const_ptr(&begin_op, end_op, arr); | |
592 | ||
593 | /* st_ptr */ | |
594 | op = copy_st_ptr(&begin_op, op); | |
595 | ||
596 | rm_ops_range(orig_op, end_op); | |
597 | } | |
598 | ||
599 | /* | |
600 | * Tracking memory accesses performed from helpers requires extra work. | |
601 | * If an instruction is emulated with helpers, we do two things: | |
602 | * (1) copy the CB descriptors, and keep track of it so that they can be | |
603 | * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so | |
604 | * that we can read them at run-time (i.e. when the helper executes). | |
605 | * This run-time access is performed from qemu_plugin_vcpu_mem_cb. | |
606 | * | |
607 | * Note that plugin_gen_disable_mem_helpers undoes (2). Since it | |
608 | * is possible that the code we generate after the instruction is | |
609 | * dead, we also add checks before generating tb_exit etc. | |
610 | */ | |
611 | static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn, | |
612 | TCGOp *begin_op) | |
613 | { | |
614 | GArray *cbs[2]; | |
615 | GArray *arr; | |
616 | size_t n_cbs, i; | |
617 | ||
618 | cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR]; | |
619 | cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; | |
620 | ||
621 | n_cbs = 0; | |
622 | for (i = 0; i < ARRAY_SIZE(cbs); i++) { | |
623 | n_cbs += cbs[i]->len; | |
624 | } | |
625 | ||
626 | plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs; | |
627 | if (likely(!plugin_insn->mem_helper)) { | |
628 | rm_ops(begin_op); | |
629 | return; | |
630 | } | |
631 | ||
632 | arr = g_array_sized_new(false, false, | |
633 | sizeof(struct qemu_plugin_dyn_cb), n_cbs); | |
634 | ||
635 | for (i = 0; i < ARRAY_SIZE(cbs); i++) { | |
636 | g_array_append_vals(arr, cbs[i]->data, cbs[i]->len); | |
637 | } | |
638 | ||
639 | qemu_plugin_add_dyn_cb_arr(arr); | |
640 | inject_mem_helper(begin_op, arr); | |
641 | } | |
642 | ||
643 | static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, | |
644 | TCGOp *begin_op) | |
645 | { | |
646 | if (likely(!plugin_insn->mem_helper)) { | |
647 | rm_ops(begin_op); | |
648 | return; | |
649 | } | |
650 | inject_mem_helper(begin_op, NULL); | |
651 | } | |
652 | ||
653 | /* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ | |
654 | void plugin_gen_disable_mem_helpers(void) | |
655 | { | |
656 | TCGv_ptr ptr; | |
657 | ||
658 | if (likely(tcg_ctx->plugin_insn == NULL || | |
659 | !tcg_ctx->plugin_insn->mem_helper)) { | |
660 | return; | |
661 | } | |
662 | ptr = tcg_const_ptr(NULL); | |
663 | tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - | |
664 | offsetof(ArchCPU, env)); | |
665 | tcg_temp_free_ptr(ptr); | |
666 | tcg_ctx->plugin_insn->mem_helper = false; | |
667 | } | |
668 | ||
669 | static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, | |
670 | TCGOp *begin_op) | |
671 | { | |
672 | inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); | |
673 | } | |
674 | ||
675 | static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, | |
676 | TCGOp *begin_op) | |
677 | { | |
678 | inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok); | |
679 | } | |
680 | ||
681 | static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, | |
682 | TCGOp *begin_op, int insn_idx) | |
683 | { | |
684 | struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); | |
685 | ||
686 | inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); | |
687 | } | |
688 | ||
689 | static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, | |
690 | TCGOp *begin_op, int insn_idx) | |
691 | { | |
692 | struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); | |
693 | inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], | |
694 | begin_op, op_ok); | |
695 | } | |
696 | ||
697 | static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb, | |
698 | TCGOp *begin_op, int insn_idx) | |
699 | { | |
700 | struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); | |
701 | inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op); | |
702 | } | |
703 | ||
704 | static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb, | |
705 | TCGOp *begin_op, int insn_idx) | |
706 | { | |
707 | const GArray *cbs; | |
708 | struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); | |
709 | ||
710 | cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; | |
711 | inject_inline_cb(cbs, begin_op, op_rw); | |
712 | } | |
713 | ||
714 | static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb, | |
715 | TCGOp *begin_op, int insn_idx) | |
716 | { | |
717 | struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); | |
718 | inject_mem_enable_helper(insn, begin_op); | |
719 | } | |
720 | ||
721 | static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb, | |
722 | TCGOp *begin_op, int insn_idx) | |
723 | { | |
724 | struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); | |
725 | inject_mem_disable_helper(insn, begin_op); | |
726 | } | |
727 | ||
728 | static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op, | |
729 | int insn_idx) | |
730 | { | |
731 | enum plugin_gen_from from = begin_op->args[0]; | |
732 | enum plugin_gen_cb type = begin_op->args[1]; | |
733 | ||
734 | switch (from) { | |
735 | case PLUGIN_GEN_FROM_TB: | |
736 | switch (type) { | |
737 | case PLUGIN_GEN_CB_UDATA: | |
738 | plugin_gen_tb_udata(ptb, begin_op); | |
739 | return; | |
740 | case PLUGIN_GEN_CB_INLINE: | |
741 | plugin_gen_tb_inline(ptb, begin_op); | |
742 | return; | |
743 | default: | |
744 | g_assert_not_reached(); | |
745 | } | |
746 | case PLUGIN_GEN_FROM_INSN: | |
747 | switch (type) { | |
748 | case PLUGIN_GEN_CB_UDATA: | |
749 | plugin_gen_insn_udata(ptb, begin_op, insn_idx); | |
750 | return; | |
751 | case PLUGIN_GEN_CB_INLINE: | |
752 | plugin_gen_insn_inline(ptb, begin_op, insn_idx); | |
753 | return; | |
754 | case PLUGIN_GEN_ENABLE_MEM_HELPER: | |
755 | plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx); | |
756 | return; | |
757 | default: | |
758 | g_assert_not_reached(); | |
759 | } | |
760 | case PLUGIN_GEN_FROM_MEM: | |
761 | switch (type) { | |
762 | case PLUGIN_GEN_CB_MEM: | |
763 | plugin_gen_mem_regular(ptb, begin_op, insn_idx); | |
764 | return; | |
765 | case PLUGIN_GEN_CB_INLINE: | |
766 | plugin_gen_mem_inline(ptb, begin_op, insn_idx); | |
767 | return; | |
768 | default: | |
769 | g_assert_not_reached(); | |
770 | } | |
771 | case PLUGIN_GEN_AFTER_INSN: | |
772 | switch (type) { | |
773 | case PLUGIN_GEN_DISABLE_MEM_HELPER: | |
774 | plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx); | |
775 | return; | |
776 | default: | |
777 | g_assert_not_reached(); | |
778 | } | |
779 | default: | |
780 | g_assert_not_reached(); | |
781 | } | |
782 | } | |
783 | ||
784 | /* #define DEBUG_PLUGIN_GEN_OPS */ | |
785 | static void pr_ops(void) | |
786 | { | |
787 | #ifdef DEBUG_PLUGIN_GEN_OPS | |
788 | TCGOp *op; | |
789 | int i = 0; | |
790 | ||
791 | QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { | |
792 | const char *name = ""; | |
793 | const char *type = ""; | |
794 | ||
795 | if (op->opc == INDEX_op_plugin_cb_start) { | |
796 | switch (op->args[0]) { | |
797 | case PLUGIN_GEN_FROM_TB: | |
798 | name = "tb"; | |
799 | break; | |
800 | case PLUGIN_GEN_FROM_INSN: | |
801 | name = "insn"; | |
802 | break; | |
803 | case PLUGIN_GEN_FROM_MEM: | |
804 | name = "mem"; | |
805 | break; | |
806 | case PLUGIN_GEN_AFTER_INSN: | |
807 | name = "after insn"; | |
808 | break; | |
809 | default: | |
810 | break; | |
811 | } | |
812 | switch (op->args[1]) { | |
813 | case PLUGIN_GEN_CB_UDATA: | |
814 | type = "udata"; | |
815 | break; | |
816 | case PLUGIN_GEN_CB_INLINE: | |
817 | type = "inline"; | |
818 | break; | |
819 | case PLUGIN_GEN_CB_MEM: | |
820 | type = "mem"; | |
821 | break; | |
822 | case PLUGIN_GEN_ENABLE_MEM_HELPER: | |
823 | type = "enable mem helper"; | |
824 | break; | |
825 | case PLUGIN_GEN_DISABLE_MEM_HELPER: | |
826 | type = "disable mem helper"; | |
827 | break; | |
828 | default: | |
829 | break; | |
830 | } | |
831 | } | |
832 | printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type); | |
833 | i++; | |
834 | } | |
835 | #endif | |
836 | } | |
837 | ||
838 | static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) | |
839 | { | |
840 | TCGOp *op; | |
841 | int insn_idx; | |
842 | ||
843 | pr_ops(); | |
844 | insn_idx = -1; | |
845 | QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) { | |
846 | enum plugin_gen_from from = op->args[0]; | |
847 | enum plugin_gen_cb type = op->args[1]; | |
848 | ||
849 | tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start); | |
850 | /* ENABLE_MEM_HELPER is the first callback of an instruction */ | |
851 | if (from == PLUGIN_GEN_FROM_INSN && | |
852 | type == PLUGIN_GEN_ENABLE_MEM_HELPER) { | |
853 | insn_idx++; | |
854 | } | |
855 | plugin_inject_cb(plugin_tb, op, insn_idx); | |
856 | } | |
857 | pr_ops(); | |
858 | } | |
859 | ||
860 | bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb) | |
861 | { | |
862 | struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; | |
863 | bool ret = false; | |
864 | ||
865 | if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { | |
866 | ret = true; | |
867 | ||
868 | QSIMPLEQ_INIT(&tcg_ctx->plugin_ops); | |
869 | ptb->vaddr = tb->pc; | |
870 | ptb->vaddr2 = -1; | |
871 | get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); | |
872 | ptb->haddr2 = NULL; | |
873 | ||
874 | plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); | |
875 | } | |
876 | return ret; | |
877 | } | |
878 | ||
879 | void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) | |
880 | { | |
881 | struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; | |
882 | struct qemu_plugin_insn *pinsn; | |
883 | ||
884 | pinsn = qemu_plugin_tb_insn_get(ptb); | |
885 | tcg_ctx->plugin_insn = pinsn; | |
886 | pinsn->vaddr = db->pc_next; | |
887 | plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); | |
888 | ||
889 | /* | |
890 | * Detect page crossing to get the new host address. | |
891 | * Note that we skip this when haddr1 == NULL, e.g. when we're | |
892 | * fetching instructions from a region not backed by RAM. | |
893 | */ | |
894 | if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && | |
895 | unlikely((db->pc_next & TARGET_PAGE_MASK) != | |
896 | (db->pc_first & TARGET_PAGE_MASK))) { | |
897 | get_page_addr_code_hostp(cpu->env_ptr, db->pc_next, | |
898 | &ptb->haddr2); | |
899 | ptb->vaddr2 = db->pc_next; | |
900 | } | |
901 | if (likely(ptb->vaddr2 == -1)) { | |
902 | pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; | |
903 | } else { | |
904 | pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; | |
905 | } | |
906 | } | |
907 | ||
908 | void plugin_gen_insn_end(void) | |
909 | { | |
910 | plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); | |
911 | } | |
912 | ||
913 | void plugin_gen_tb_end(CPUState *cpu) | |
914 | { | |
915 | struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; | |
916 | int i; | |
917 | ||
918 | /* collect instrumentation requests */ | |
919 | qemu_plugin_tb_trans_cb(cpu, ptb); | |
920 | ||
921 | /* inject the instrumentation at the appropriate places */ | |
922 | plugin_gen_inject(ptb); | |
923 | ||
924 | /* clean up */ | |
925 | for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { | |
926 | if (ptb->cbs[i]) { | |
927 | g_array_set_size(ptb->cbs[i], 0); | |
928 | } | |
929 | } | |
930 | ptb->n = 0; | |
931 | tcg_ctx->plugin_insn = NULL; | |
932 | } |