]>
Commit | Line | Data |
---|---|---|
1 | /* GDB-specific functions for operating on agent expressions. | |
2 | ||
3 | Copyright (C) 1998, 1999, 2000, 2001, 2003, 2007 | |
4 | Free Software Foundation, Inc. | |
5 | ||
6 | This file is part of GDB. | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 2 of the License, or | |
11 | (at your option) any later version. | |
12 | ||
13 | This program is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with this program; if not, write to the Free Software | |
20 | Foundation, Inc., 51 Franklin Street, Fifth Floor, | |
21 | Boston, MA 02110-1301, USA. */ | |
22 | ||
23 | #include "defs.h" | |
24 | #include "symtab.h" | |
25 | #include "symfile.h" | |
26 | #include "gdbtypes.h" | |
27 | #include "value.h" | |
28 | #include "expression.h" | |
29 | #include "command.h" | |
30 | #include "gdbcmd.h" | |
31 | #include "frame.h" | |
32 | #include "target.h" | |
33 | #include "ax.h" | |
34 | #include "ax-gdb.h" | |
35 | #include "gdb_string.h" | |
36 | #include "block.h" | |
37 | #include "regcache.h" | |
38 | ||
39 | /* To make sense of this file, you should read doc/agentexpr.texi. | |
40 | Then look at the types and enums in ax-gdb.h. For the code itself, | |
41 | look at gen_expr, towards the bottom; that's the main function that | |
42 | looks at the GDB expressions and calls everything else to generate | |
43 | code. | |
44 | ||
45 | I'm beginning to wonder whether it wouldn't be nicer to internally | |
46 | generate trees, with types, and then spit out the bytecode in | |
47 | linear form afterwards; we could generate fewer `swap', `ext', and | |
48 | `zero_ext' bytecodes that way; it would make good constant folding | |
49 | easier, too. But at the moment, I think we should be willing to | |
50 | pay for the simplicity of this code with less-than-optimal bytecode | |
51 | strings. | |
52 | ||
53 | Remember, "GBD" stands for "Great Britain, Dammit!" So be careful. */ | |
54 | \f | |
55 | ||
56 | ||
57 | /* Prototypes for local functions. */ | |
58 | ||
59 | /* There's a standard order to the arguments of these functions: | |
60 | union exp_element ** --- pointer into expression | |
61 | struct agent_expr * --- agent expression buffer to generate code into | |
62 | struct axs_value * --- describes value left on top of stack */ | |
63 | ||
64 | static struct value *const_var_ref (struct symbol *var); | |
65 | static struct value *const_expr (union exp_element **pc); | |
66 | static struct value *maybe_const_expr (union exp_element **pc); | |
67 | ||
68 | static void gen_traced_pop (struct agent_expr *, struct axs_value *); | |
69 | ||
70 | static void gen_sign_extend (struct agent_expr *, struct type *); | |
71 | static void gen_extend (struct agent_expr *, struct type *); | |
72 | static void gen_fetch (struct agent_expr *, struct type *); | |
73 | static void gen_left_shift (struct agent_expr *, int); | |
74 | ||
75 | ||
76 | static void gen_frame_args_address (struct agent_expr *); | |
77 | static void gen_frame_locals_address (struct agent_expr *); | |
78 | static void gen_offset (struct agent_expr *ax, int offset); | |
79 | static void gen_sym_offset (struct agent_expr *, struct symbol *); | |
80 | static void gen_var_ref (struct agent_expr *ax, | |
81 | struct axs_value *value, struct symbol *var); | |
82 | ||
83 | ||
84 | static void gen_int_literal (struct agent_expr *ax, | |
85 | struct axs_value *value, | |
86 | LONGEST k, struct type *type); | |
87 | ||
88 | ||
89 | static void require_rvalue (struct agent_expr *ax, struct axs_value *value); | |
90 | static void gen_usual_unary (struct agent_expr *ax, struct axs_value *value); | |
91 | static int type_wider_than (struct type *type1, struct type *type2); | |
92 | static struct type *max_type (struct type *type1, struct type *type2); | |
93 | static void gen_conversion (struct agent_expr *ax, | |
94 | struct type *from, struct type *to); | |
95 | static int is_nontrivial_conversion (struct type *from, struct type *to); | |
96 | static void gen_usual_arithmetic (struct agent_expr *ax, | |
97 | struct axs_value *value1, | |
98 | struct axs_value *value2); | |
99 | static void gen_integral_promotions (struct agent_expr *ax, | |
100 | struct axs_value *value); | |
101 | static void gen_cast (struct agent_expr *ax, | |
102 | struct axs_value *value, struct type *type); | |
103 | static void gen_scale (struct agent_expr *ax, | |
104 | enum agent_op op, struct type *type); | |
105 | static void gen_add (struct agent_expr *ax, | |
106 | struct axs_value *value, | |
107 | struct axs_value *value1, | |
108 | struct axs_value *value2, char *name); | |
109 | static void gen_sub (struct agent_expr *ax, | |
110 | struct axs_value *value, | |
111 | struct axs_value *value1, struct axs_value *value2); | |
112 | static void gen_binop (struct agent_expr *ax, | |
113 | struct axs_value *value, | |
114 | struct axs_value *value1, | |
115 | struct axs_value *value2, | |
116 | enum agent_op op, | |
117 | enum agent_op op_unsigned, int may_carry, char *name); | |
118 | static void gen_logical_not (struct agent_expr *ax, struct axs_value *value); | |
119 | static void gen_complement (struct agent_expr *ax, struct axs_value *value); | |
120 | static void gen_deref (struct agent_expr *, struct axs_value *); | |
121 | static void gen_address_of (struct agent_expr *, struct axs_value *); | |
122 | static int find_field (struct type *type, char *name); | |
123 | static void gen_bitfield_ref (struct agent_expr *ax, | |
124 | struct axs_value *value, | |
125 | struct type *type, int start, int end); | |
126 | static void gen_struct_ref (struct agent_expr *ax, | |
127 | struct axs_value *value, | |
128 | char *field, | |
129 | char *operator_name, char *operand_name); | |
130 | static void gen_repeat (union exp_element **pc, | |
131 | struct agent_expr *ax, struct axs_value *value); | |
132 | static void gen_sizeof (union exp_element **pc, | |
133 | struct agent_expr *ax, struct axs_value *value); | |
134 | static void gen_expr (union exp_element **pc, | |
135 | struct agent_expr *ax, struct axs_value *value); | |
136 | ||
137 | static void agent_command (char *exp, int from_tty); | |
138 | \f | |
139 | ||
140 | /* Detecting constant expressions. */ | |
141 | ||
142 | /* If the variable reference at *PC is a constant, return its value. | |
143 | Otherwise, return zero. | |
144 | ||
145 | Hey, Wally! How can a variable reference be a constant? | |
146 | ||
147 | Well, Beav, this function really handles the OP_VAR_VALUE operator, | |
148 | not specifically variable references. GDB uses OP_VAR_VALUE to | |
149 | refer to any kind of symbolic reference: function names, enum | |
150 | elements, and goto labels are all handled through the OP_VAR_VALUE | |
151 | operator, even though they're constants. It makes sense given the | |
152 | situation. | |
153 | ||
154 | Gee, Wally, don'cha wonder sometimes if data representations that | |
155 | subvert commonly accepted definitions of terms in favor of heavily | |
156 | context-specific interpretations are really just a tool of the | |
157 | programming hegemony to preserve their power and exclude the | |
158 | proletariat? */ | |
159 | ||
160 | static struct value * | |
161 | const_var_ref (struct symbol *var) | |
162 | { | |
163 | struct type *type = SYMBOL_TYPE (var); | |
164 | ||
165 | switch (SYMBOL_CLASS (var)) | |
166 | { | |
167 | case LOC_CONST: | |
168 | return value_from_longest (type, (LONGEST) SYMBOL_VALUE (var)); | |
169 | ||
170 | case LOC_LABEL: | |
171 | return value_from_pointer (type, (CORE_ADDR) SYMBOL_VALUE_ADDRESS (var)); | |
172 | ||
173 | default: | |
174 | return 0; | |
175 | } | |
176 | } | |
177 | ||
178 | ||
179 | /* If the expression starting at *PC has a constant value, return it. | |
180 | Otherwise, return zero. If we return a value, then *PC will be | |
181 | advanced to the end of it. If we return zero, *PC could be | |
182 | anywhere. */ | |
183 | static struct value * | |
184 | const_expr (union exp_element **pc) | |
185 | { | |
186 | enum exp_opcode op = (*pc)->opcode; | |
187 | struct value *v1; | |
188 | ||
189 | switch (op) | |
190 | { | |
191 | case OP_LONG: | |
192 | { | |
193 | struct type *type = (*pc)[1].type; | |
194 | LONGEST k = (*pc)[2].longconst; | |
195 | (*pc) += 4; | |
196 | return value_from_longest (type, k); | |
197 | } | |
198 | ||
199 | case OP_VAR_VALUE: | |
200 | { | |
201 | struct value *v = const_var_ref ((*pc)[2].symbol); | |
202 | (*pc) += 4; | |
203 | return v; | |
204 | } | |
205 | ||
206 | /* We could add more operators in here. */ | |
207 | ||
208 | case UNOP_NEG: | |
209 | (*pc)++; | |
210 | v1 = const_expr (pc); | |
211 | if (v1) | |
212 | return value_neg (v1); | |
213 | else | |
214 | return 0; | |
215 | ||
216 | default: | |
217 | return 0; | |
218 | } | |
219 | } | |
220 | ||
221 | ||
222 | /* Like const_expr, but guarantee also that *PC is undisturbed if the | |
223 | expression is not constant. */ | |
224 | static struct value * | |
225 | maybe_const_expr (union exp_element **pc) | |
226 | { | |
227 | union exp_element *tentative_pc = *pc; | |
228 | struct value *v = const_expr (&tentative_pc); | |
229 | ||
230 | /* If we got a value, then update the real PC. */ | |
231 | if (v) | |
232 | *pc = tentative_pc; | |
233 | ||
234 | return v; | |
235 | } | |
236 | \f | |
237 | ||
238 | /* Generating bytecode from GDB expressions: general assumptions */ | |
239 | ||
240 | /* Here are a few general assumptions made throughout the code; if you | |
241 | want to make a change that contradicts one of these, then you'd | |
242 | better scan things pretty thoroughly. | |
243 | ||
244 | - We assume that all values occupy one stack element. For example, | |
245 | sometimes we'll swap to get at the left argument to a binary | |
246 | operator. If we decide that void values should occupy no stack | |
247 | elements, or that synthetic arrays (whose size is determined at | |
248 | run time, created by the `@' operator) should occupy two stack | |
249 | elements (address and length), then this will cause trouble. | |
250 | ||
251 | - We assume the stack elements are infinitely wide, and that we | |
252 | don't have to worry what happens if the user requests an | |
253 | operation that is wider than the actual interpreter's stack. | |
254 | That is, it's up to the interpreter to handle directly all the | |
255 | integer widths the user has access to. (Woe betide the language | |
256 | with bignums!) | |
257 | ||
258 | - We don't support side effects. Thus, we don't have to worry about | |
259 | GCC's generalized lvalues, function calls, etc. | |
260 | ||
261 | - We don't support floating point. Many places where we switch on | |
262 | some type don't bother to include cases for floating point; there | |
263 | may be even more subtle ways this assumption exists. For | |
264 | example, the arguments to % must be integers. | |
265 | ||
266 | - We assume all subexpressions have a static, unchanging type. If | |
267 | we tried to support convenience variables, this would be a | |
268 | problem. | |
269 | ||
270 | - All values on the stack should always be fully zero- or | |
271 | sign-extended. | |
272 | ||
273 | (I wasn't sure whether to choose this or its opposite --- that | |
274 | only addresses are assumed extended --- but it turns out that | |
275 | neither convention completely eliminates spurious extend | |
276 | operations (if everything is always extended, then you have to | |
277 | extend after add, because it could overflow; if nothing is | |
278 | extended, then you end up producing extends whenever you change | |
279 | sizes), and this is simpler.) */ | |
280 | \f | |
281 | ||
282 | /* Generating bytecode from GDB expressions: the `trace' kludge */ | |
283 | ||
284 | /* The compiler in this file is a general-purpose mechanism for | |
285 | translating GDB expressions into bytecode. One ought to be able to | |
286 | find a million and one uses for it. | |
287 | ||
288 | However, at the moment it is HOPELESSLY BRAIN-DAMAGED for the sake | |
289 | of expediency. Let he who is without sin cast the first stone. | |
290 | ||
291 | For the data tracing facility, we need to insert `trace' bytecodes | |
292 | before each data fetch; this records all the memory that the | |
293 | expression touches in the course of evaluation, so that memory will | |
294 | be available when the user later tries to evaluate the expression | |
295 | in GDB. | |
296 | ||
297 | This should be done (I think) in a post-processing pass, that walks | |
298 | an arbitrary agent expression and inserts `trace' operations at the | |
299 | appropriate points. But it's much faster to just hack them | |
300 | directly into the code. And since we're in a crunch, that's what | |
301 | I've done. | |
302 | ||
303 | Setting the flag trace_kludge to non-zero enables the code that | |
304 | emits the trace bytecodes at the appropriate points. */ | |
305 | static int trace_kludge; | |
306 | ||
307 | /* Trace the lvalue on the stack, if it needs it. In either case, pop | |
308 | the value. Useful on the left side of a comma, and at the end of | |
309 | an expression being used for tracing. */ | |
310 | static void | |
311 | gen_traced_pop (struct agent_expr *ax, struct axs_value *value) | |
312 | { | |
313 | if (trace_kludge) | |
314 | switch (value->kind) | |
315 | { | |
316 | case axs_rvalue: | |
317 | /* We don't trace rvalues, just the lvalues necessary to | |
318 | produce them. So just dispose of this value. */ | |
319 | ax_simple (ax, aop_pop); | |
320 | break; | |
321 | ||
322 | case axs_lvalue_memory: | |
323 | { | |
324 | int length = TYPE_LENGTH (value->type); | |
325 | ||
326 | /* There's no point in trying to use a trace_quick bytecode | |
327 | here, since "trace_quick SIZE pop" is three bytes, whereas | |
328 | "const8 SIZE trace" is also three bytes, does the same | |
329 | thing, and the simplest code which generates that will also | |
330 | work correctly for objects with large sizes. */ | |
331 | ax_const_l (ax, length); | |
332 | ax_simple (ax, aop_trace); | |
333 | } | |
334 | break; | |
335 | ||
336 | case axs_lvalue_register: | |
337 | /* We need to mention the register somewhere in the bytecode, | |
338 | so ax_reqs will pick it up and add it to the mask of | |
339 | registers used. */ | |
340 | ax_reg (ax, value->u.reg); | |
341 | ax_simple (ax, aop_pop); | |
342 | break; | |
343 | } | |
344 | else | |
345 | /* If we're not tracing, just pop the value. */ | |
346 | ax_simple (ax, aop_pop); | |
347 | } | |
348 | \f | |
349 | ||
350 | ||
351 | /* Generating bytecode from GDB expressions: helper functions */ | |
352 | ||
353 | /* Assume that the lower bits of the top of the stack is a value of | |
354 | type TYPE, and the upper bits are zero. Sign-extend if necessary. */ | |
355 | static void | |
356 | gen_sign_extend (struct agent_expr *ax, struct type *type) | |
357 | { | |
358 | /* Do we need to sign-extend this? */ | |
359 | if (!TYPE_UNSIGNED (type)) | |
360 | ax_ext (ax, TYPE_LENGTH (type) * TARGET_CHAR_BIT); | |
361 | } | |
362 | ||
363 | ||
364 | /* Assume the lower bits of the top of the stack hold a value of type | |
365 | TYPE, and the upper bits are garbage. Sign-extend or truncate as | |
366 | needed. */ | |
367 | static void | |
368 | gen_extend (struct agent_expr *ax, struct type *type) | |
369 | { | |
370 | int bits = TYPE_LENGTH (type) * TARGET_CHAR_BIT; | |
371 | /* I just had to. */ | |
372 | ((TYPE_UNSIGNED (type) ? ax_zero_ext : ax_ext) (ax, bits)); | |
373 | } | |
374 | ||
375 | ||
376 | /* Assume that the top of the stack contains a value of type "pointer | |
377 | to TYPE"; generate code to fetch its value. Note that TYPE is the | |
378 | target type, not the pointer type. */ | |
379 | static void | |
380 | gen_fetch (struct agent_expr *ax, struct type *type) | |
381 | { | |
382 | if (trace_kludge) | |
383 | { | |
384 | /* Record the area of memory we're about to fetch. */ | |
385 | ax_trace_quick (ax, TYPE_LENGTH (type)); | |
386 | } | |
387 | ||
388 | switch (TYPE_CODE (type)) | |
389 | { | |
390 | case TYPE_CODE_PTR: | |
391 | case TYPE_CODE_ENUM: | |
392 | case TYPE_CODE_INT: | |
393 | case TYPE_CODE_CHAR: | |
394 | /* It's a scalar value, so we know how to dereference it. How | |
395 | many bytes long is it? */ | |
396 | switch (TYPE_LENGTH (type)) | |
397 | { | |
398 | case 8 / TARGET_CHAR_BIT: | |
399 | ax_simple (ax, aop_ref8); | |
400 | break; | |
401 | case 16 / TARGET_CHAR_BIT: | |
402 | ax_simple (ax, aop_ref16); | |
403 | break; | |
404 | case 32 / TARGET_CHAR_BIT: | |
405 | ax_simple (ax, aop_ref32); | |
406 | break; | |
407 | case 64 / TARGET_CHAR_BIT: | |
408 | ax_simple (ax, aop_ref64); | |
409 | break; | |
410 | ||
411 | /* Either our caller shouldn't have asked us to dereference | |
412 | that pointer (other code's fault), or we're not | |
413 | implementing something we should be (this code's fault). | |
414 | In any case, it's a bug the user shouldn't see. */ | |
415 | default: | |
416 | internal_error (__FILE__, __LINE__, | |
417 | _("gen_fetch: strange size")); | |
418 | } | |
419 | ||
420 | gen_sign_extend (ax, type); | |
421 | break; | |
422 | ||
423 | default: | |
424 | /* Either our caller shouldn't have asked us to dereference that | |
425 | pointer (other code's fault), or we're not implementing | |
426 | something we should be (this code's fault). In any case, | |
427 | it's a bug the user shouldn't see. */ | |
428 | internal_error (__FILE__, __LINE__, | |
429 | _("gen_fetch: bad type code")); | |
430 | } | |
431 | } | |
432 | ||
433 | ||
434 | /* Generate code to left shift the top of the stack by DISTANCE bits, or | |
435 | right shift it by -DISTANCE bits if DISTANCE < 0. This generates | |
436 | unsigned (logical) right shifts. */ | |
437 | static void | |
438 | gen_left_shift (struct agent_expr *ax, int distance) | |
439 | { | |
440 | if (distance > 0) | |
441 | { | |
442 | ax_const_l (ax, distance); | |
443 | ax_simple (ax, aop_lsh); | |
444 | } | |
445 | else if (distance < 0) | |
446 | { | |
447 | ax_const_l (ax, -distance); | |
448 | ax_simple (ax, aop_rsh_unsigned); | |
449 | } | |
450 | } | |
451 | \f | |
452 | ||
453 | ||
454 | /* Generating bytecode from GDB expressions: symbol references */ | |
455 | ||
456 | /* Generate code to push the base address of the argument portion of | |
457 | the top stack frame. */ | |
458 | static void | |
459 | gen_frame_args_address (struct agent_expr *ax) | |
460 | { | |
461 | int frame_reg; | |
462 | LONGEST frame_offset; | |
463 | ||
464 | gdbarch_virtual_frame_pointer (current_gdbarch, | |
465 | ax->scope, &frame_reg, &frame_offset); | |
466 | ax_reg (ax, frame_reg); | |
467 | gen_offset (ax, frame_offset); | |
468 | } | |
469 | ||
470 | ||
471 | /* Generate code to push the base address of the locals portion of the | |
472 | top stack frame. */ | |
473 | static void | |
474 | gen_frame_locals_address (struct agent_expr *ax) | |
475 | { | |
476 | int frame_reg; | |
477 | LONGEST frame_offset; | |
478 | ||
479 | gdbarch_virtual_frame_pointer (current_gdbarch, | |
480 | ax->scope, &frame_reg, &frame_offset); | |
481 | ax_reg (ax, frame_reg); | |
482 | gen_offset (ax, frame_offset); | |
483 | } | |
484 | ||
485 | ||
486 | /* Generate code to add OFFSET to the top of the stack. Try to | |
487 | generate short and readable code. We use this for getting to | |
488 | variables on the stack, and structure members. If we were | |
489 | programming in ML, it would be clearer why these are the same | |
490 | thing. */ | |
491 | static void | |
492 | gen_offset (struct agent_expr *ax, int offset) | |
493 | { | |
494 | /* It would suffice to simply push the offset and add it, but this | |
495 | makes it easier to read positive and negative offsets in the | |
496 | bytecode. */ | |
497 | if (offset > 0) | |
498 | { | |
499 | ax_const_l (ax, offset); | |
500 | ax_simple (ax, aop_add); | |
501 | } | |
502 | else if (offset < 0) | |
503 | { | |
504 | ax_const_l (ax, -offset); | |
505 | ax_simple (ax, aop_sub); | |
506 | } | |
507 | } | |
508 | ||
509 | ||
510 | /* In many cases, a symbol's value is the offset from some other | |
511 | address (stack frame, base register, etc.) Generate code to add | |
512 | VAR's value to the top of the stack. */ | |
513 | static void | |
514 | gen_sym_offset (struct agent_expr *ax, struct symbol *var) | |
515 | { | |
516 | gen_offset (ax, SYMBOL_VALUE (var)); | |
517 | } | |
518 | ||
519 | ||
520 | /* Generate code for a variable reference to AX. The variable is the | |
521 | symbol VAR. Set VALUE to describe the result. */ | |
522 | ||
523 | static void | |
524 | gen_var_ref (struct agent_expr *ax, struct axs_value *value, struct symbol *var) | |
525 | { | |
526 | /* Dereference any typedefs. */ | |
527 | value->type = check_typedef (SYMBOL_TYPE (var)); | |
528 | ||
529 | /* I'm imitating the code in read_var_value. */ | |
530 | switch (SYMBOL_CLASS (var)) | |
531 | { | |
532 | case LOC_CONST: /* A constant, like an enum value. */ | |
533 | ax_const_l (ax, (LONGEST) SYMBOL_VALUE (var)); | |
534 | value->kind = axs_rvalue; | |
535 | break; | |
536 | ||
537 | case LOC_LABEL: /* A goto label, being used as a value. */ | |
538 | ax_const_l (ax, (LONGEST) SYMBOL_VALUE_ADDRESS (var)); | |
539 | value->kind = axs_rvalue; | |
540 | break; | |
541 | ||
542 | case LOC_CONST_BYTES: | |
543 | internal_error (__FILE__, __LINE__, | |
544 | _("gen_var_ref: LOC_CONST_BYTES symbols are not supported")); | |
545 | ||
546 | /* Variable at a fixed location in memory. Easy. */ | |
547 | case LOC_STATIC: | |
548 | /* Push the address of the variable. */ | |
549 | ax_const_l (ax, SYMBOL_VALUE_ADDRESS (var)); | |
550 | value->kind = axs_lvalue_memory; | |
551 | break; | |
552 | ||
553 | case LOC_ARG: /* var lives in argument area of frame */ | |
554 | gen_frame_args_address (ax); | |
555 | gen_sym_offset (ax, var); | |
556 | value->kind = axs_lvalue_memory; | |
557 | break; | |
558 | ||
559 | case LOC_REF_ARG: /* As above, but the frame slot really | |
560 | holds the address of the variable. */ | |
561 | gen_frame_args_address (ax); | |
562 | gen_sym_offset (ax, var); | |
563 | /* Don't assume any particular pointer size. */ | |
564 | gen_fetch (ax, lookup_pointer_type (builtin_type_void)); | |
565 | value->kind = axs_lvalue_memory; | |
566 | break; | |
567 | ||
568 | case LOC_LOCAL: /* var lives in locals area of frame */ | |
569 | case LOC_LOCAL_ARG: | |
570 | gen_frame_locals_address (ax); | |
571 | gen_sym_offset (ax, var); | |
572 | value->kind = axs_lvalue_memory; | |
573 | break; | |
574 | ||
575 | case LOC_BASEREG: /* relative to some base register */ | |
576 | case LOC_BASEREG_ARG: | |
577 | ax_reg (ax, SYMBOL_BASEREG (var)); | |
578 | gen_sym_offset (ax, var); | |
579 | value->kind = axs_lvalue_memory; | |
580 | break; | |
581 | ||
582 | case LOC_TYPEDEF: | |
583 | error (_("Cannot compute value of typedef `%s'."), | |
584 | SYMBOL_PRINT_NAME (var)); | |
585 | break; | |
586 | ||
587 | case LOC_BLOCK: | |
588 | ax_const_l (ax, BLOCK_START (SYMBOL_BLOCK_VALUE (var))); | |
589 | value->kind = axs_rvalue; | |
590 | break; | |
591 | ||
592 | case LOC_REGISTER: | |
593 | case LOC_REGPARM: | |
594 | /* Don't generate any code at all; in the process of treating | |
595 | this as an lvalue or rvalue, the caller will generate the | |
596 | right code. */ | |
597 | value->kind = axs_lvalue_register; | |
598 | value->u.reg = SYMBOL_VALUE (var); | |
599 | break; | |
600 | ||
601 | /* A lot like LOC_REF_ARG, but the pointer lives directly in a | |
602 | register, not on the stack. Simpler than LOC_REGISTER and | |
603 | LOC_REGPARM, because it's just like any other case where the | |
604 | thing has a real address. */ | |
605 | case LOC_REGPARM_ADDR: | |
606 | ax_reg (ax, SYMBOL_VALUE (var)); | |
607 | value->kind = axs_lvalue_memory; | |
608 | break; | |
609 | ||
610 | case LOC_UNRESOLVED: | |
611 | { | |
612 | struct minimal_symbol *msym | |
613 | = lookup_minimal_symbol (DEPRECATED_SYMBOL_NAME (var), NULL, NULL); | |
614 | if (!msym) | |
615 | error (_("Couldn't resolve symbol `%s'."), SYMBOL_PRINT_NAME (var)); | |
616 | ||
617 | /* Push the address of the variable. */ | |
618 | ax_const_l (ax, SYMBOL_VALUE_ADDRESS (msym)); | |
619 | value->kind = axs_lvalue_memory; | |
620 | } | |
621 | break; | |
622 | ||
623 | case LOC_COMPUTED: | |
624 | case LOC_COMPUTED_ARG: | |
625 | /* FIXME: cagney/2004-01-26: It should be possible to | |
626 | unconditionally call the SYMBOL_OPS method when available. | |
627 | Unfortunately DWARF 2 stores the frame-base (instead of the | |
628 | function) location in a function's symbol. Oops! For the | |
629 | moment enable this when/where applicable. */ | |
630 | SYMBOL_OPS (var)->tracepoint_var_ref (var, ax, value); | |
631 | break; | |
632 | ||
633 | case LOC_OPTIMIZED_OUT: | |
634 | error (_("The variable `%s' has been optimized out."), | |
635 | SYMBOL_PRINT_NAME (var)); | |
636 | break; | |
637 | ||
638 | default: | |
639 | error (_("Cannot find value of botched symbol `%s'."), | |
640 | SYMBOL_PRINT_NAME (var)); | |
641 | break; | |
642 | } | |
643 | } | |
644 | \f | |
645 | ||
646 | ||
647 | /* Generating bytecode from GDB expressions: literals */ | |
648 | ||
649 | static void | |
650 | gen_int_literal (struct agent_expr *ax, struct axs_value *value, LONGEST k, | |
651 | struct type *type) | |
652 | { | |
653 | ax_const_l (ax, k); | |
654 | value->kind = axs_rvalue; | |
655 | value->type = type; | |
656 | } | |
657 | \f | |
658 | ||
659 | ||
660 | /* Generating bytecode from GDB expressions: unary conversions, casts */ | |
661 | ||
662 | /* Take what's on the top of the stack (as described by VALUE), and | |
663 | try to make an rvalue out of it. Signal an error if we can't do | |
664 | that. */ | |
665 | static void | |
666 | require_rvalue (struct agent_expr *ax, struct axs_value *value) | |
667 | { | |
668 | switch (value->kind) | |
669 | { | |
670 | case axs_rvalue: | |
671 | /* It's already an rvalue. */ | |
672 | break; | |
673 | ||
674 | case axs_lvalue_memory: | |
675 | /* The top of stack is the address of the object. Dereference. */ | |
676 | gen_fetch (ax, value->type); | |
677 | break; | |
678 | ||
679 | case axs_lvalue_register: | |
680 | /* There's nothing on the stack, but value->u.reg is the | |
681 | register number containing the value. | |
682 | ||
683 | When we add floating-point support, this is going to have to | |
684 | change. What about SPARC register pairs, for example? */ | |
685 | ax_reg (ax, value->u.reg); | |
686 | gen_extend (ax, value->type); | |
687 | break; | |
688 | } | |
689 | ||
690 | value->kind = axs_rvalue; | |
691 | } | |
692 | ||
693 | ||
694 | /* Assume the top of the stack is described by VALUE, and perform the | |
695 | usual unary conversions. This is motivated by ANSI 6.2.2, but of | |
696 | course GDB expressions are not ANSI; they're the mishmash union of | |
697 | a bunch of languages. Rah. | |
698 | ||
699 | NOTE! This function promises to produce an rvalue only when the | |
700 | incoming value is of an appropriate type. In other words, the | |
701 | consumer of the value this function produces may assume the value | |
702 | is an rvalue only after checking its type. | |
703 | ||
704 | The immediate issue is that if the user tries to use a structure or | |
705 | union as an operand of, say, the `+' operator, we don't want to try | |
706 | to convert that structure to an rvalue; require_rvalue will bomb on | |
707 | structs and unions. Rather, we want to simply pass the struct | |
708 | lvalue through unchanged, and let `+' raise an error. */ | |
709 | ||
710 | static void | |
711 | gen_usual_unary (struct agent_expr *ax, struct axs_value *value) | |
712 | { | |
713 | /* We don't have to generate any code for the usual integral | |
714 | conversions, since values are always represented as full-width on | |
715 | the stack. Should we tweak the type? */ | |
716 | ||
717 | /* Some types require special handling. */ | |
718 | switch (TYPE_CODE (value->type)) | |
719 | { | |
720 | /* Functions get converted to a pointer to the function. */ | |
721 | case TYPE_CODE_FUNC: | |
722 | value->type = lookup_pointer_type (value->type); | |
723 | value->kind = axs_rvalue; /* Should always be true, but just in case. */ | |
724 | break; | |
725 | ||
726 | /* Arrays get converted to a pointer to their first element, and | |
727 | are no longer an lvalue. */ | |
728 | case TYPE_CODE_ARRAY: | |
729 | { | |
730 | struct type *elements = TYPE_TARGET_TYPE (value->type); | |
731 | value->type = lookup_pointer_type (elements); | |
732 | value->kind = axs_rvalue; | |
733 | /* We don't need to generate any code; the address of the array | |
734 | is also the address of its first element. */ | |
735 | } | |
736 | break; | |
737 | ||
738 | /* Don't try to convert structures and unions to rvalues. Let the | |
739 | consumer signal an error. */ | |
740 | case TYPE_CODE_STRUCT: | |
741 | case TYPE_CODE_UNION: | |
742 | return; | |
743 | ||
744 | /* If the value is an enum, call it an integer. */ | |
745 | case TYPE_CODE_ENUM: | |
746 | value->type = builtin_type_int; | |
747 | break; | |
748 | } | |
749 | ||
750 | /* If the value is an lvalue, dereference it. */ | |
751 | require_rvalue (ax, value); | |
752 | } | |
753 | ||
754 | ||
755 | /* Return non-zero iff the type TYPE1 is considered "wider" than the | |
756 | type TYPE2, according to the rules described in gen_usual_arithmetic. */ | |
757 | static int | |
758 | type_wider_than (struct type *type1, struct type *type2) | |
759 | { | |
760 | return (TYPE_LENGTH (type1) > TYPE_LENGTH (type2) | |
761 | || (TYPE_LENGTH (type1) == TYPE_LENGTH (type2) | |
762 | && TYPE_UNSIGNED (type1) | |
763 | && !TYPE_UNSIGNED (type2))); | |
764 | } | |
765 | ||
766 | ||
767 | /* Return the "wider" of the two types TYPE1 and TYPE2. */ | |
768 | static struct type * | |
769 | max_type (struct type *type1, struct type *type2) | |
770 | { | |
771 | return type_wider_than (type1, type2) ? type1 : type2; | |
772 | } | |
773 | ||
774 | ||
775 | /* Generate code to convert a scalar value of type FROM to type TO. */ | |
776 | static void | |
777 | gen_conversion (struct agent_expr *ax, struct type *from, struct type *to) | |
778 | { | |
779 | /* Perhaps there is a more graceful way to state these rules. */ | |
780 | ||
781 | /* If we're converting to a narrower type, then we need to clear out | |
782 | the upper bits. */ | |
783 | if (TYPE_LENGTH (to) < TYPE_LENGTH (from)) | |
784 | gen_extend (ax, from); | |
785 | ||
786 | /* If the two values have equal width, but different signednesses, | |
787 | then we need to extend. */ | |
788 | else if (TYPE_LENGTH (to) == TYPE_LENGTH (from)) | |
789 | { | |
790 | if (TYPE_UNSIGNED (from) != TYPE_UNSIGNED (to)) | |
791 | gen_extend (ax, to); | |
792 | } | |
793 | ||
794 | /* If we're converting to a wider type, and becoming unsigned, then | |
795 | we need to zero out any possible sign bits. */ | |
796 | else if (TYPE_LENGTH (to) > TYPE_LENGTH (from)) | |
797 | { | |
798 | if (TYPE_UNSIGNED (to)) | |
799 | gen_extend (ax, to); | |
800 | } | |
801 | } | |
802 | ||
803 | ||
804 | /* Return non-zero iff the type FROM will require any bytecodes to be | |
805 | emitted to be converted to the type TO. */ | |
806 | static int | |
807 | is_nontrivial_conversion (struct type *from, struct type *to) | |
808 | { | |
809 | struct agent_expr *ax = new_agent_expr (0); | |
810 | int nontrivial; | |
811 | ||
812 | /* Actually generate the code, and see if anything came out. At the | |
813 | moment, it would be trivial to replicate the code in | |
814 | gen_conversion here, but in the future, when we're supporting | |
815 | floating point and the like, it may not be. Doing things this | |
816 | way allows this function to be independent of the logic in | |
817 | gen_conversion. */ | |
818 | gen_conversion (ax, from, to); | |
819 | nontrivial = ax->len > 0; | |
820 | free_agent_expr (ax); | |
821 | return nontrivial; | |
822 | } | |
823 | ||
824 | ||
825 | /* Generate code to perform the "usual arithmetic conversions" (ANSI C | |
826 | 6.2.1.5) for the two operands of an arithmetic operator. This | |
827 | effectively finds a "least upper bound" type for the two arguments, | |
828 | and promotes each argument to that type. *VALUE1 and *VALUE2 | |
829 | describe the values as they are passed in, and as they are left. */ | |
830 | static void | |
831 | gen_usual_arithmetic (struct agent_expr *ax, struct axs_value *value1, | |
832 | struct axs_value *value2) | |
833 | { | |
834 | /* Do the usual binary conversions. */ | |
835 | if (TYPE_CODE (value1->type) == TYPE_CODE_INT | |
836 | && TYPE_CODE (value2->type) == TYPE_CODE_INT) | |
837 | { | |
838 | /* The ANSI integral promotions seem to work this way: Order the | |
839 | integer types by size, and then by signedness: an n-bit | |
840 | unsigned type is considered "wider" than an n-bit signed | |
841 | type. Promote to the "wider" of the two types, and always | |
842 | promote at least to int. */ | |
843 | struct type *target = max_type (builtin_type_int, | |
844 | max_type (value1->type, value2->type)); | |
845 | ||
846 | /* Deal with value2, on the top of the stack. */ | |
847 | gen_conversion (ax, value2->type, target); | |
848 | ||
849 | /* Deal with value1, not on the top of the stack. Don't | |
850 | generate the `swap' instructions if we're not actually going | |
851 | to do anything. */ | |
852 | if (is_nontrivial_conversion (value1->type, target)) | |
853 | { | |
854 | ax_simple (ax, aop_swap); | |
855 | gen_conversion (ax, value1->type, target); | |
856 | ax_simple (ax, aop_swap); | |
857 | } | |
858 | ||
859 | value1->type = value2->type = target; | |
860 | } | |
861 | } | |
862 | ||
863 | ||
864 | /* Generate code to perform the integral promotions (ANSI 6.2.1.1) on | |
865 | the value on the top of the stack, as described by VALUE. Assume | |
866 | the value has integral type. */ | |
867 | static void | |
868 | gen_integral_promotions (struct agent_expr *ax, struct axs_value *value) | |
869 | { | |
870 | if (!type_wider_than (value->type, builtin_type_int)) | |
871 | { | |
872 | gen_conversion (ax, value->type, builtin_type_int); | |
873 | value->type = builtin_type_int; | |
874 | } | |
875 | else if (!type_wider_than (value->type, builtin_type_unsigned_int)) | |
876 | { | |
877 | gen_conversion (ax, value->type, builtin_type_unsigned_int); | |
878 | value->type = builtin_type_unsigned_int; | |
879 | } | |
880 | } | |
881 | ||
882 | ||
883 | /* Generate code for a cast to TYPE. */ | |
884 | static void | |
885 | gen_cast (struct agent_expr *ax, struct axs_value *value, struct type *type) | |
886 | { | |
887 | /* GCC does allow casts to yield lvalues, so this should be fixed | |
888 | before merging these changes into the trunk. */ | |
889 | require_rvalue (ax, value); | |
890 | /* Dereference typedefs. */ | |
891 | type = check_typedef (type); | |
892 | ||
893 | switch (TYPE_CODE (type)) | |
894 | { | |
895 | case TYPE_CODE_PTR: | |
896 | /* It's implementation-defined, and I'll bet this is what GCC | |
897 | does. */ | |
898 | break; | |
899 | ||
900 | case TYPE_CODE_ARRAY: | |
901 | case TYPE_CODE_STRUCT: | |
902 | case TYPE_CODE_UNION: | |
903 | case TYPE_CODE_FUNC: | |
904 | error (_("Invalid type cast: intended type must be scalar.")); | |
905 | ||
906 | case TYPE_CODE_ENUM: | |
907 | /* We don't have to worry about the size of the value, because | |
908 | all our integral values are fully sign-extended, and when | |
909 | casting pointers we can do anything we like. Is there any | |
910 | way for us to actually know what GCC actually does with a | |
911 | cast like this? */ | |
912 | value->type = type; | |
913 | break; | |
914 | ||
915 | case TYPE_CODE_INT: | |
916 | gen_conversion (ax, value->type, type); | |
917 | break; | |
918 | ||
919 | case TYPE_CODE_VOID: | |
920 | /* We could pop the value, and rely on everyone else to check | |
921 | the type and notice that this value doesn't occupy a stack | |
922 | slot. But for now, leave the value on the stack, and | |
923 | preserve the "value == stack element" assumption. */ | |
924 | break; | |
925 | ||
926 | default: | |
927 | error (_("Casts to requested type are not yet implemented.")); | |
928 | } | |
929 | ||
930 | value->type = type; | |
931 | } | |
932 | \f | |
933 | ||
934 | ||
935 | /* Generating bytecode from GDB expressions: arithmetic */ | |
936 | ||
937 | /* Scale the integer on the top of the stack by the size of the target | |
938 | of the pointer type TYPE. */ | |
939 | static void | |
940 | gen_scale (struct agent_expr *ax, enum agent_op op, struct type *type) | |
941 | { | |
942 | struct type *element = TYPE_TARGET_TYPE (type); | |
943 | ||
944 | if (TYPE_LENGTH (element) != 1) | |
945 | { | |
946 | ax_const_l (ax, TYPE_LENGTH (element)); | |
947 | ax_simple (ax, op); | |
948 | } | |
949 | } | |
950 | ||
951 | ||
952 | /* Generate code for an addition; non-trivial because we deal with | |
953 | pointer arithmetic. We set VALUE to describe the result value; we | |
954 | assume VALUE1 and VALUE2 describe the two operands, and that | |
955 | they've undergone the usual binary conversions. Used by both | |
956 | BINOP_ADD and BINOP_SUBSCRIPT. NAME is used in error messages. */ | |
957 | static void | |
958 | gen_add (struct agent_expr *ax, struct axs_value *value, | |
959 | struct axs_value *value1, struct axs_value *value2, char *name) | |
960 | { | |
961 | /* Is it INT+PTR? */ | |
962 | if (TYPE_CODE (value1->type) == TYPE_CODE_INT | |
963 | && TYPE_CODE (value2->type) == TYPE_CODE_PTR) | |
964 | { | |
965 | /* Swap the values and proceed normally. */ | |
966 | ax_simple (ax, aop_swap); | |
967 | gen_scale (ax, aop_mul, value2->type); | |
968 | ax_simple (ax, aop_add); | |
969 | gen_extend (ax, value2->type); /* Catch overflow. */ | |
970 | value->type = value2->type; | |
971 | } | |
972 | ||
973 | /* Is it PTR+INT? */ | |
974 | else if (TYPE_CODE (value1->type) == TYPE_CODE_PTR | |
975 | && TYPE_CODE (value2->type) == TYPE_CODE_INT) | |
976 | { | |
977 | gen_scale (ax, aop_mul, value1->type); | |
978 | ax_simple (ax, aop_add); | |
979 | gen_extend (ax, value1->type); /* Catch overflow. */ | |
980 | value->type = value1->type; | |
981 | } | |
982 | ||
983 | /* Must be number + number; the usual binary conversions will have | |
984 | brought them both to the same width. */ | |
985 | else if (TYPE_CODE (value1->type) == TYPE_CODE_INT | |
986 | && TYPE_CODE (value2->type) == TYPE_CODE_INT) | |
987 | { | |
988 | ax_simple (ax, aop_add); | |
989 | gen_extend (ax, value1->type); /* Catch overflow. */ | |
990 | value->type = value1->type; | |
991 | } | |
992 | ||
993 | else | |
994 | error (_("Invalid combination of types in %s."), name); | |
995 | ||
996 | value->kind = axs_rvalue; | |
997 | } | |
998 | ||
999 | ||
1000 | /* Generate code for an addition; non-trivial because we have to deal | |
1001 | with pointer arithmetic. We set VALUE to describe the result | |
1002 | value; we assume VALUE1 and VALUE2 describe the two operands, and | |
1003 | that they've undergone the usual binary conversions. */ | |
1004 | static void | |
1005 | gen_sub (struct agent_expr *ax, struct axs_value *value, | |
1006 | struct axs_value *value1, struct axs_value *value2) | |
1007 | { | |
1008 | if (TYPE_CODE (value1->type) == TYPE_CODE_PTR) | |
1009 | { | |
1010 | /* Is it PTR - INT? */ | |
1011 | if (TYPE_CODE (value2->type) == TYPE_CODE_INT) | |
1012 | { | |
1013 | gen_scale (ax, aop_mul, value1->type); | |
1014 | ax_simple (ax, aop_sub); | |
1015 | gen_extend (ax, value1->type); /* Catch overflow. */ | |
1016 | value->type = value1->type; | |
1017 | } | |
1018 | ||
1019 | /* Is it PTR - PTR? Strictly speaking, the types ought to | |
1020 | match, but this is what the normal GDB expression evaluator | |
1021 | tests for. */ | |
1022 | else if (TYPE_CODE (value2->type) == TYPE_CODE_PTR | |
1023 | && (TYPE_LENGTH (TYPE_TARGET_TYPE (value1->type)) | |
1024 | == TYPE_LENGTH (TYPE_TARGET_TYPE (value2->type)))) | |
1025 | { | |
1026 | ax_simple (ax, aop_sub); | |
1027 | gen_scale (ax, aop_div_unsigned, value1->type); | |
1028 | value->type = builtin_type_long; /* FIXME --- should be ptrdiff_t */ | |
1029 | } | |
1030 | else | |
1031 | error (_("\ | |
1032 | First argument of `-' is a pointer, but second argument is neither\n\ | |
1033 | an integer nor a pointer of the same type.")); | |
1034 | } | |
1035 | ||
1036 | /* Must be number + number. */ | |
1037 | else if (TYPE_CODE (value1->type) == TYPE_CODE_INT | |
1038 | && TYPE_CODE (value2->type) == TYPE_CODE_INT) | |
1039 | { | |
1040 | ax_simple (ax, aop_sub); | |
1041 | gen_extend (ax, value1->type); /* Catch overflow. */ | |
1042 | value->type = value1->type; | |
1043 | } | |
1044 | ||
1045 | else | |
1046 | error (_("Invalid combination of types in subtraction.")); | |
1047 | ||
1048 | value->kind = axs_rvalue; | |
1049 | } | |
1050 | ||
1051 | /* Generate code for a binary operator that doesn't do pointer magic. | |
1052 | We set VALUE to describe the result value; we assume VALUE1 and | |
1053 | VALUE2 describe the two operands, and that they've undergone the | |
1054 | usual binary conversions. MAY_CARRY should be non-zero iff the | |
1055 | result needs to be extended. NAME is the English name of the | |
1056 | operator, used in error messages */ | |
1057 | static void | |
1058 | gen_binop (struct agent_expr *ax, struct axs_value *value, | |
1059 | struct axs_value *value1, struct axs_value *value2, enum agent_op op, | |
1060 | enum agent_op op_unsigned, int may_carry, char *name) | |
1061 | { | |
1062 | /* We only handle INT op INT. */ | |
1063 | if ((TYPE_CODE (value1->type) != TYPE_CODE_INT) | |
1064 | || (TYPE_CODE (value2->type) != TYPE_CODE_INT)) | |
1065 | error (_("Invalid combination of types in %s."), name); | |
1066 | ||
1067 | ax_simple (ax, | |
1068 | TYPE_UNSIGNED (value1->type) ? op_unsigned : op); | |
1069 | if (may_carry) | |
1070 | gen_extend (ax, value1->type); /* catch overflow */ | |
1071 | value->type = value1->type; | |
1072 | value->kind = axs_rvalue; | |
1073 | } | |
1074 | ||
1075 | ||
1076 | static void | |
1077 | gen_logical_not (struct agent_expr *ax, struct axs_value *value) | |
1078 | { | |
1079 | if (TYPE_CODE (value->type) != TYPE_CODE_INT | |
1080 | && TYPE_CODE (value->type) != TYPE_CODE_PTR) | |
1081 | error (_("Invalid type of operand to `!'.")); | |
1082 | ||
1083 | gen_usual_unary (ax, value); | |
1084 | ax_simple (ax, aop_log_not); | |
1085 | value->type = builtin_type_int; | |
1086 | } | |
1087 | ||
1088 | ||
1089 | static void | |
1090 | gen_complement (struct agent_expr *ax, struct axs_value *value) | |
1091 | { | |
1092 | if (TYPE_CODE (value->type) != TYPE_CODE_INT) | |
1093 | error (_("Invalid type of operand to `~'.")); | |
1094 | ||
1095 | gen_usual_unary (ax, value); | |
1096 | gen_integral_promotions (ax, value); | |
1097 | ax_simple (ax, aop_bit_not); | |
1098 | gen_extend (ax, value->type); | |
1099 | } | |
1100 | \f | |
1101 | ||
1102 | ||
1103 | /* Generating bytecode from GDB expressions: * & . -> @ sizeof */ | |
1104 | ||
1105 | /* Dereference the value on the top of the stack. */ | |
1106 | static void | |
1107 | gen_deref (struct agent_expr *ax, struct axs_value *value) | |
1108 | { | |
1109 | /* The caller should check the type, because several operators use | |
1110 | this, and we don't know what error message to generate. */ | |
1111 | if (TYPE_CODE (value->type) != TYPE_CODE_PTR) | |
1112 | internal_error (__FILE__, __LINE__, | |
1113 | _("gen_deref: expected a pointer")); | |
1114 | ||
1115 | /* We've got an rvalue now, which is a pointer. We want to yield an | |
1116 | lvalue, whose address is exactly that pointer. So we don't | |
1117 | actually emit any code; we just change the type from "Pointer to | |
1118 | T" to "T", and mark the value as an lvalue in memory. Leave it | |
1119 | to the consumer to actually dereference it. */ | |
1120 | value->type = check_typedef (TYPE_TARGET_TYPE (value->type)); | |
1121 | value->kind = ((TYPE_CODE (value->type) == TYPE_CODE_FUNC) | |
1122 | ? axs_rvalue : axs_lvalue_memory); | |
1123 | } | |
1124 | ||
1125 | ||
1126 | /* Produce the address of the lvalue on the top of the stack. */ | |
1127 | static void | |
1128 | gen_address_of (struct agent_expr *ax, struct axs_value *value) | |
1129 | { | |
1130 | /* Special case for taking the address of a function. The ANSI | |
1131 | standard describes this as a special case, too, so this | |
1132 | arrangement is not without motivation. */ | |
1133 | if (TYPE_CODE (value->type) == TYPE_CODE_FUNC) | |
1134 | /* The value's already an rvalue on the stack, so we just need to | |
1135 | change the type. */ | |
1136 | value->type = lookup_pointer_type (value->type); | |
1137 | else | |
1138 | switch (value->kind) | |
1139 | { | |
1140 | case axs_rvalue: | |
1141 | error (_("Operand of `&' is an rvalue, which has no address.")); | |
1142 | ||
1143 | case axs_lvalue_register: | |
1144 | error (_("Operand of `&' is in a register, and has no address.")); | |
1145 | ||
1146 | case axs_lvalue_memory: | |
1147 | value->kind = axs_rvalue; | |
1148 | value->type = lookup_pointer_type (value->type); | |
1149 | break; | |
1150 | } | |
1151 | } | |
1152 | ||
1153 | ||
1154 | /* A lot of this stuff will have to change to support C++. But we're | |
1155 | not going to deal with that at the moment. */ | |
1156 | ||
1157 | /* Find the field in the structure type TYPE named NAME, and return | |
1158 | its index in TYPE's field array. */ | |
1159 | static int | |
1160 | find_field (struct type *type, char *name) | |
1161 | { | |
1162 | int i; | |
1163 | ||
1164 | CHECK_TYPEDEF (type); | |
1165 | ||
1166 | /* Make sure this isn't C++. */ | |
1167 | if (TYPE_N_BASECLASSES (type) != 0) | |
1168 | internal_error (__FILE__, __LINE__, | |
1169 | _("find_field: derived classes supported")); | |
1170 | ||
1171 | for (i = 0; i < TYPE_NFIELDS (type); i++) | |
1172 | { | |
1173 | char *this_name = TYPE_FIELD_NAME (type, i); | |
1174 | ||
1175 | if (this_name && strcmp (name, this_name) == 0) | |
1176 | return i; | |
1177 | ||
1178 | if (this_name[0] == '\0') | |
1179 | internal_error (__FILE__, __LINE__, | |
1180 | _("find_field: anonymous unions not supported")); | |
1181 | } | |
1182 | ||
1183 | error (_("Couldn't find member named `%s' in struct/union `%s'"), | |
1184 | name, TYPE_TAG_NAME (type)); | |
1185 | ||
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | ||
1190 | /* Generate code to push the value of a bitfield of a structure whose | |
1191 | address is on the top of the stack. START and END give the | |
1192 | starting and one-past-ending *bit* numbers of the field within the | |
1193 | structure. */ | |
1194 | static void | |
1195 | gen_bitfield_ref (struct agent_expr *ax, struct axs_value *value, | |
1196 | struct type *type, int start, int end) | |
1197 | { | |
1198 | /* Note that ops[i] fetches 8 << i bits. */ | |
1199 | static enum agent_op ops[] | |
1200 | = | |
1201 | {aop_ref8, aop_ref16, aop_ref32, aop_ref64}; | |
1202 | static int num_ops = (sizeof (ops) / sizeof (ops[0])); | |
1203 | ||
1204 | /* We don't want to touch any byte that the bitfield doesn't | |
1205 | actually occupy; we shouldn't make any accesses we're not | |
1206 | explicitly permitted to. We rely here on the fact that the | |
1207 | bytecode `ref' operators work on unaligned addresses. | |
1208 | ||
1209 | It takes some fancy footwork to get the stack to work the way | |
1210 | we'd like. Say we're retrieving a bitfield that requires three | |
1211 | fetches. Initially, the stack just contains the address: | |
1212 | addr | |
1213 | For the first fetch, we duplicate the address | |
1214 | addr addr | |
1215 | then add the byte offset, do the fetch, and shift and mask as | |
1216 | needed, yielding a fragment of the value, properly aligned for | |
1217 | the final bitwise or: | |
1218 | addr frag1 | |
1219 | then we swap, and repeat the process: | |
1220 | frag1 addr --- address on top | |
1221 | frag1 addr addr --- duplicate it | |
1222 | frag1 addr frag2 --- get second fragment | |
1223 | frag1 frag2 addr --- swap again | |
1224 | frag1 frag2 frag3 --- get third fragment | |
1225 | Notice that, since the third fragment is the last one, we don't | |
1226 | bother duplicating the address this time. Now we have all the | |
1227 | fragments on the stack, and we can simply `or' them together, | |
1228 | yielding the final value of the bitfield. */ | |
1229 | ||
1230 | /* The first and one-after-last bits in the field, but rounded down | |
1231 | and up to byte boundaries. */ | |
1232 | int bound_start = (start / TARGET_CHAR_BIT) * TARGET_CHAR_BIT; | |
1233 | int bound_end = (((end + TARGET_CHAR_BIT - 1) | |
1234 | / TARGET_CHAR_BIT) | |
1235 | * TARGET_CHAR_BIT); | |
1236 | ||
1237 | /* current bit offset within the structure */ | |
1238 | int offset; | |
1239 | ||
1240 | /* The index in ops of the opcode we're considering. */ | |
1241 | int op; | |
1242 | ||
1243 | /* The number of fragments we generated in the process. Probably | |
1244 | equal to the number of `one' bits in bytesize, but who cares? */ | |
1245 | int fragment_count; | |
1246 | ||
1247 | /* Dereference any typedefs. */ | |
1248 | type = check_typedef (type); | |
1249 | ||
1250 | /* Can we fetch the number of bits requested at all? */ | |
1251 | if ((end - start) > ((1 << num_ops) * 8)) | |
1252 | internal_error (__FILE__, __LINE__, | |
1253 | _("gen_bitfield_ref: bitfield too wide")); | |
1254 | ||
1255 | /* Note that we know here that we only need to try each opcode once. | |
1256 | That may not be true on machines with weird byte sizes. */ | |
1257 | offset = bound_start; | |
1258 | fragment_count = 0; | |
1259 | for (op = num_ops - 1; op >= 0; op--) | |
1260 | { | |
1261 | /* number of bits that ops[op] would fetch */ | |
1262 | int op_size = 8 << op; | |
1263 | ||
1264 | /* The stack at this point, from bottom to top, contains zero or | |
1265 | more fragments, then the address. */ | |
1266 | ||
1267 | /* Does this fetch fit within the bitfield? */ | |
1268 | if (offset + op_size <= bound_end) | |
1269 | { | |
1270 | /* Is this the last fragment? */ | |
1271 | int last_frag = (offset + op_size == bound_end); | |
1272 | ||
1273 | if (!last_frag) | |
1274 | ax_simple (ax, aop_dup); /* keep a copy of the address */ | |
1275 | ||
1276 | /* Add the offset. */ | |
1277 | gen_offset (ax, offset / TARGET_CHAR_BIT); | |
1278 | ||
1279 | if (trace_kludge) | |
1280 | { | |
1281 | /* Record the area of memory we're about to fetch. */ | |
1282 | ax_trace_quick (ax, op_size / TARGET_CHAR_BIT); | |
1283 | } | |
1284 | ||
1285 | /* Perform the fetch. */ | |
1286 | ax_simple (ax, ops[op]); | |
1287 | ||
1288 | /* Shift the bits we have to their proper position. | |
1289 | gen_left_shift will generate right shifts when the operand | |
1290 | is negative. | |
1291 | ||
1292 | A big-endian field diagram to ponder: | |
1293 | byte 0 byte 1 byte 2 byte 3 byte 4 byte 5 byte 6 byte 7 | |
1294 | +------++------++------++------++------++------++------++------+ | |
1295 | xxxxAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBCCCCCxxxxxxxxxxx | |
1296 | ^ ^ ^ ^ | |
1297 | bit number 16 32 48 53 | |
1298 | These are bit numbers as supplied by GDB. Note that the | |
1299 | bit numbers run from right to left once you've fetched the | |
1300 | value! | |
1301 | ||
1302 | A little-endian field diagram to ponder: | |
1303 | byte 7 byte 6 byte 5 byte 4 byte 3 byte 2 byte 1 byte 0 | |
1304 | +------++------++------++------++------++------++------++------+ | |
1305 | xxxxxxxxxxxAAAAABBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCxxxx | |
1306 | ^ ^ ^ ^ ^ | |
1307 | bit number 48 32 16 4 0 | |
1308 | ||
1309 | In both cases, the most significant end is on the left | |
1310 | (i.e. normal numeric writing order), which means that you | |
1311 | don't go crazy thinking about `left' and `right' shifts. | |
1312 | ||
1313 | We don't have to worry about masking yet: | |
1314 | - If they contain garbage off the least significant end, then we | |
1315 | must be looking at the low end of the field, and the right | |
1316 | shift will wipe them out. | |
1317 | - If they contain garbage off the most significant end, then we | |
1318 | must be looking at the most significant end of the word, and | |
1319 | the sign/zero extension will wipe them out. | |
1320 | - If we're in the interior of the word, then there is no garbage | |
1321 | on either end, because the ref operators zero-extend. */ | |
1322 | if (gdbarch_byte_order (current_gdbarch) == BFD_ENDIAN_BIG) | |
1323 | gen_left_shift (ax, end - (offset + op_size)); | |
1324 | else | |
1325 | gen_left_shift (ax, offset - start); | |
1326 | ||
1327 | if (!last_frag) | |
1328 | /* Bring the copy of the address up to the top. */ | |
1329 | ax_simple (ax, aop_swap); | |
1330 | ||
1331 | offset += op_size; | |
1332 | fragment_count++; | |
1333 | } | |
1334 | } | |
1335 | ||
1336 | /* Generate enough bitwise `or' operations to combine all the | |
1337 | fragments we left on the stack. */ | |
1338 | while (fragment_count-- > 1) | |
1339 | ax_simple (ax, aop_bit_or); | |
1340 | ||
1341 | /* Sign- or zero-extend the value as appropriate. */ | |
1342 | ((TYPE_UNSIGNED (type) ? ax_zero_ext : ax_ext) (ax, end - start)); | |
1343 | ||
1344 | /* This is *not* an lvalue. Ugh. */ | |
1345 | value->kind = axs_rvalue; | |
1346 | value->type = type; | |
1347 | } | |
1348 | ||
1349 | ||
1350 | /* Generate code to reference the member named FIELD of a structure or | |
1351 | union. The top of the stack, as described by VALUE, should have | |
1352 | type (pointer to a)* struct/union. OPERATOR_NAME is the name of | |
1353 | the operator being compiled, and OPERAND_NAME is the kind of thing | |
1354 | it operates on; we use them in error messages. */ | |
1355 | static void | |
1356 | gen_struct_ref (struct agent_expr *ax, struct axs_value *value, char *field, | |
1357 | char *operator_name, char *operand_name) | |
1358 | { | |
1359 | struct type *type; | |
1360 | int i; | |
1361 | ||
1362 | /* Follow pointers until we reach a non-pointer. These aren't the C | |
1363 | semantics, but they're what the normal GDB evaluator does, so we | |
1364 | should at least be consistent. */ | |
1365 | while (TYPE_CODE (value->type) == TYPE_CODE_PTR) | |
1366 | { | |
1367 | gen_usual_unary (ax, value); | |
1368 | gen_deref (ax, value); | |
1369 | } | |
1370 | type = check_typedef (value->type); | |
1371 | ||
1372 | /* This must yield a structure or a union. */ | |
1373 | if (TYPE_CODE (type) != TYPE_CODE_STRUCT | |
1374 | && TYPE_CODE (type) != TYPE_CODE_UNION) | |
1375 | error (_("The left operand of `%s' is not a %s."), | |
1376 | operator_name, operand_name); | |
1377 | ||
1378 | /* And it must be in memory; we don't deal with structure rvalues, | |
1379 | or structures living in registers. */ | |
1380 | if (value->kind != axs_lvalue_memory) | |
1381 | error (_("Structure does not live in memory.")); | |
1382 | ||
1383 | i = find_field (type, field); | |
1384 | ||
1385 | /* Is this a bitfield? */ | |
1386 | if (TYPE_FIELD_PACKED (type, i)) | |
1387 | gen_bitfield_ref (ax, value, TYPE_FIELD_TYPE (type, i), | |
1388 | TYPE_FIELD_BITPOS (type, i), | |
1389 | (TYPE_FIELD_BITPOS (type, i) | |
1390 | + TYPE_FIELD_BITSIZE (type, i))); | |
1391 | else | |
1392 | { | |
1393 | gen_offset (ax, TYPE_FIELD_BITPOS (type, i) / TARGET_CHAR_BIT); | |
1394 | value->kind = axs_lvalue_memory; | |
1395 | value->type = TYPE_FIELD_TYPE (type, i); | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | ||
1400 | /* Generate code for GDB's magical `repeat' operator. | |
1401 | LVALUE @ INT creates an array INT elements long, and whose elements | |
1402 | have the same type as LVALUE, located in memory so that LVALUE is | |
1403 | its first element. For example, argv[0]@argc gives you the array | |
1404 | of command-line arguments. | |
1405 | ||
1406 | Unfortunately, because we have to know the types before we actually | |
1407 | have a value for the expression, we can't implement this perfectly | |
1408 | without changing the type system, having values that occupy two | |
1409 | stack slots, doing weird things with sizeof, etc. So we require | |
1410 | the right operand to be a constant expression. */ | |
1411 | static void | |
1412 | gen_repeat (union exp_element **pc, struct agent_expr *ax, | |
1413 | struct axs_value *value) | |
1414 | { | |
1415 | struct axs_value value1; | |
1416 | /* We don't want to turn this into an rvalue, so no conversions | |
1417 | here. */ | |
1418 | gen_expr (pc, ax, &value1); | |
1419 | if (value1.kind != axs_lvalue_memory) | |
1420 | error (_("Left operand of `@' must be an object in memory.")); | |
1421 | ||
1422 | /* Evaluate the length; it had better be a constant. */ | |
1423 | { | |
1424 | struct value *v = const_expr (pc); | |
1425 | int length; | |
1426 | ||
1427 | if (!v) | |
1428 | error (_("Right operand of `@' must be a constant, in agent expressions.")); | |
1429 | if (TYPE_CODE (value_type (v)) != TYPE_CODE_INT) | |
1430 | error (_("Right operand of `@' must be an integer.")); | |
1431 | length = value_as_long (v); | |
1432 | if (length <= 0) | |
1433 | error (_("Right operand of `@' must be positive.")); | |
1434 | ||
1435 | /* The top of the stack is already the address of the object, so | |
1436 | all we need to do is frob the type of the lvalue. */ | |
1437 | { | |
1438 | /* FIXME-type-allocation: need a way to free this type when we are | |
1439 | done with it. */ | |
1440 | struct type *range | |
1441 | = create_range_type (0, builtin_type_int, 0, length - 1); | |
1442 | struct type *array = create_array_type (0, value1.type, range); | |
1443 | ||
1444 | value->kind = axs_lvalue_memory; | |
1445 | value->type = array; | |
1446 | } | |
1447 | } | |
1448 | } | |
1449 | ||
1450 | ||
1451 | /* Emit code for the `sizeof' operator. | |
1452 | *PC should point at the start of the operand expression; we advance it | |
1453 | to the first instruction after the operand. */ | |
1454 | static void | |
1455 | gen_sizeof (union exp_element **pc, struct agent_expr *ax, | |
1456 | struct axs_value *value) | |
1457 | { | |
1458 | /* We don't care about the value of the operand expression; we only | |
1459 | care about its type. However, in the current arrangement, the | |
1460 | only way to find an expression's type is to generate code for it. | |
1461 | So we generate code for the operand, and then throw it away, | |
1462 | replacing it with code that simply pushes its size. */ | |
1463 | int start = ax->len; | |
1464 | gen_expr (pc, ax, value); | |
1465 | ||
1466 | /* Throw away the code we just generated. */ | |
1467 | ax->len = start; | |
1468 | ||
1469 | ax_const_l (ax, TYPE_LENGTH (value->type)); | |
1470 | value->kind = axs_rvalue; | |
1471 | value->type = builtin_type_int; | |
1472 | } | |
1473 | \f | |
1474 | ||
1475 | /* Generating bytecode from GDB expressions: general recursive thingy */ | |
1476 | ||
1477 | /* XXX: i18n */ | |
1478 | /* A gen_expr function written by a Gen-X'er guy. | |
1479 | Append code for the subexpression of EXPR starting at *POS_P to AX. */ | |
1480 | static void | |
1481 | gen_expr (union exp_element **pc, struct agent_expr *ax, | |
1482 | struct axs_value *value) | |
1483 | { | |
1484 | /* Used to hold the descriptions of operand expressions. */ | |
1485 | struct axs_value value1, value2; | |
1486 | enum exp_opcode op = (*pc)[0].opcode; | |
1487 | ||
1488 | /* If we're looking at a constant expression, just push its value. */ | |
1489 | { | |
1490 | struct value *v = maybe_const_expr (pc); | |
1491 | ||
1492 | if (v) | |
1493 | { | |
1494 | ax_const_l (ax, value_as_long (v)); | |
1495 | value->kind = axs_rvalue; | |
1496 | value->type = check_typedef (value_type (v)); | |
1497 | return; | |
1498 | } | |
1499 | } | |
1500 | ||
1501 | /* Otherwise, go ahead and generate code for it. */ | |
1502 | switch (op) | |
1503 | { | |
1504 | /* Binary arithmetic operators. */ | |
1505 | case BINOP_ADD: | |
1506 | case BINOP_SUB: | |
1507 | case BINOP_MUL: | |
1508 | case BINOP_DIV: | |
1509 | case BINOP_REM: | |
1510 | case BINOP_SUBSCRIPT: | |
1511 | case BINOP_BITWISE_AND: | |
1512 | case BINOP_BITWISE_IOR: | |
1513 | case BINOP_BITWISE_XOR: | |
1514 | (*pc)++; | |
1515 | gen_expr (pc, ax, &value1); | |
1516 | gen_usual_unary (ax, &value1); | |
1517 | gen_expr (pc, ax, &value2); | |
1518 | gen_usual_unary (ax, &value2); | |
1519 | gen_usual_arithmetic (ax, &value1, &value2); | |
1520 | switch (op) | |
1521 | { | |
1522 | case BINOP_ADD: | |
1523 | gen_add (ax, value, &value1, &value2, "addition"); | |
1524 | break; | |
1525 | case BINOP_SUB: | |
1526 | gen_sub (ax, value, &value1, &value2); | |
1527 | break; | |
1528 | case BINOP_MUL: | |
1529 | gen_binop (ax, value, &value1, &value2, | |
1530 | aop_mul, aop_mul, 1, "multiplication"); | |
1531 | break; | |
1532 | case BINOP_DIV: | |
1533 | gen_binop (ax, value, &value1, &value2, | |
1534 | aop_div_signed, aop_div_unsigned, 1, "division"); | |
1535 | break; | |
1536 | case BINOP_REM: | |
1537 | gen_binop (ax, value, &value1, &value2, | |
1538 | aop_rem_signed, aop_rem_unsigned, 1, "remainder"); | |
1539 | break; | |
1540 | case BINOP_SUBSCRIPT: | |
1541 | gen_add (ax, value, &value1, &value2, "array subscripting"); | |
1542 | if (TYPE_CODE (value->type) != TYPE_CODE_PTR) | |
1543 | error (_("Invalid combination of types in array subscripting.")); | |
1544 | gen_deref (ax, value); | |
1545 | break; | |
1546 | case BINOP_BITWISE_AND: | |
1547 | gen_binop (ax, value, &value1, &value2, | |
1548 | aop_bit_and, aop_bit_and, 0, "bitwise and"); | |
1549 | break; | |
1550 | ||
1551 | case BINOP_BITWISE_IOR: | |
1552 | gen_binop (ax, value, &value1, &value2, | |
1553 | aop_bit_or, aop_bit_or, 0, "bitwise or"); | |
1554 | break; | |
1555 | ||
1556 | case BINOP_BITWISE_XOR: | |
1557 | gen_binop (ax, value, &value1, &value2, | |
1558 | aop_bit_xor, aop_bit_xor, 0, "bitwise exclusive-or"); | |
1559 | break; | |
1560 | ||
1561 | default: | |
1562 | /* We should only list operators in the outer case statement | |
1563 | that we actually handle in the inner case statement. */ | |
1564 | internal_error (__FILE__, __LINE__, | |
1565 | _("gen_expr: op case sets don't match")); | |
1566 | } | |
1567 | break; | |
1568 | ||
1569 | /* Note that we need to be a little subtle about generating code | |
1570 | for comma. In C, we can do some optimizations here because | |
1571 | we know the left operand is only being evaluated for effect. | |
1572 | However, if the tracing kludge is in effect, then we always | |
1573 | need to evaluate the left hand side fully, so that all the | |
1574 | variables it mentions get traced. */ | |
1575 | case BINOP_COMMA: | |
1576 | (*pc)++; | |
1577 | gen_expr (pc, ax, &value1); | |
1578 | /* Don't just dispose of the left operand. We might be tracing, | |
1579 | in which case we want to emit code to trace it if it's an | |
1580 | lvalue. */ | |
1581 | gen_traced_pop (ax, &value1); | |
1582 | gen_expr (pc, ax, value); | |
1583 | /* It's the consumer's responsibility to trace the right operand. */ | |
1584 | break; | |
1585 | ||
1586 | case OP_LONG: /* some integer constant */ | |
1587 | { | |
1588 | struct type *type = (*pc)[1].type; | |
1589 | LONGEST k = (*pc)[2].longconst; | |
1590 | (*pc) += 4; | |
1591 | gen_int_literal (ax, value, k, type); | |
1592 | } | |
1593 | break; | |
1594 | ||
1595 | case OP_VAR_VALUE: | |
1596 | gen_var_ref (ax, value, (*pc)[2].symbol); | |
1597 | (*pc) += 4; | |
1598 | break; | |
1599 | ||
1600 | case OP_REGISTER: | |
1601 | { | |
1602 | const char *name = &(*pc)[2].string; | |
1603 | int reg; | |
1604 | (*pc) += 4 + BYTES_TO_EXP_ELEM ((*pc)[1].longconst + 1); | |
1605 | reg = frame_map_name_to_regnum (deprecated_safe_get_selected_frame (), | |
1606 | name, strlen (name)); | |
1607 | if (reg == -1) | |
1608 | internal_error (__FILE__, __LINE__, | |
1609 | _("Register $%s not available"), name); | |
1610 | value->kind = axs_lvalue_register; | |
1611 | value->u.reg = reg; | |
1612 | value->type = register_type (current_gdbarch, reg); | |
1613 | } | |
1614 | break; | |
1615 | ||
1616 | case OP_INTERNALVAR: | |
1617 | error (_("GDB agent expressions cannot use convenience variables.")); | |
1618 | ||
1619 | /* Weirdo operator: see comments for gen_repeat for details. */ | |
1620 | case BINOP_REPEAT: | |
1621 | /* Note that gen_repeat handles its own argument evaluation. */ | |
1622 | (*pc)++; | |
1623 | gen_repeat (pc, ax, value); | |
1624 | break; | |
1625 | ||
1626 | case UNOP_CAST: | |
1627 | { | |
1628 | struct type *type = (*pc)[1].type; | |
1629 | (*pc) += 3; | |
1630 | gen_expr (pc, ax, value); | |
1631 | gen_cast (ax, value, type); | |
1632 | } | |
1633 | break; | |
1634 | ||
1635 | case UNOP_MEMVAL: | |
1636 | { | |
1637 | struct type *type = check_typedef ((*pc)[1].type); | |
1638 | (*pc) += 3; | |
1639 | gen_expr (pc, ax, value); | |
1640 | /* I'm not sure I understand UNOP_MEMVAL entirely. I think | |
1641 | it's just a hack for dealing with minsyms; you take some | |
1642 | integer constant, pretend it's the address of an lvalue of | |
1643 | the given type, and dereference it. */ | |
1644 | if (value->kind != axs_rvalue) | |
1645 | /* This would be weird. */ | |
1646 | internal_error (__FILE__, __LINE__, | |
1647 | _("gen_expr: OP_MEMVAL operand isn't an rvalue???")); | |
1648 | value->type = type; | |
1649 | value->kind = axs_lvalue_memory; | |
1650 | } | |
1651 | break; | |
1652 | ||
1653 | case UNOP_PLUS: | |
1654 | (*pc)++; | |
1655 | /* + FOO is equivalent to 0 + FOO, which can be optimized. */ | |
1656 | gen_expr (pc, ax, value); | |
1657 | gen_usual_unary (ax, value); | |
1658 | break; | |
1659 | ||
1660 | case UNOP_NEG: | |
1661 | (*pc)++; | |
1662 | /* -FOO is equivalent to 0 - FOO. */ | |
1663 | gen_int_literal (ax, &value1, (LONGEST) 0, builtin_type_int); | |
1664 | gen_usual_unary (ax, &value1); /* shouldn't do much */ | |
1665 | gen_expr (pc, ax, &value2); | |
1666 | gen_usual_unary (ax, &value2); | |
1667 | gen_usual_arithmetic (ax, &value1, &value2); | |
1668 | gen_sub (ax, value, &value1, &value2); | |
1669 | break; | |
1670 | ||
1671 | case UNOP_LOGICAL_NOT: | |
1672 | (*pc)++; | |
1673 | gen_expr (pc, ax, value); | |
1674 | gen_logical_not (ax, value); | |
1675 | break; | |
1676 | ||
1677 | case UNOP_COMPLEMENT: | |
1678 | (*pc)++; | |
1679 | gen_expr (pc, ax, value); | |
1680 | gen_complement (ax, value); | |
1681 | break; | |
1682 | ||
1683 | case UNOP_IND: | |
1684 | (*pc)++; | |
1685 | gen_expr (pc, ax, value); | |
1686 | gen_usual_unary (ax, value); | |
1687 | if (TYPE_CODE (value->type) != TYPE_CODE_PTR) | |
1688 | error (_("Argument of unary `*' is not a pointer.")); | |
1689 | gen_deref (ax, value); | |
1690 | break; | |
1691 | ||
1692 | case UNOP_ADDR: | |
1693 | (*pc)++; | |
1694 | gen_expr (pc, ax, value); | |
1695 | gen_address_of (ax, value); | |
1696 | break; | |
1697 | ||
1698 | case UNOP_SIZEOF: | |
1699 | (*pc)++; | |
1700 | /* Notice that gen_sizeof handles its own operand, unlike most | |
1701 | of the other unary operator functions. This is because we | |
1702 | have to throw away the code we generate. */ | |
1703 | gen_sizeof (pc, ax, value); | |
1704 | break; | |
1705 | ||
1706 | case STRUCTOP_STRUCT: | |
1707 | case STRUCTOP_PTR: | |
1708 | { | |
1709 | int length = (*pc)[1].longconst; | |
1710 | char *name = &(*pc)[2].string; | |
1711 | ||
1712 | (*pc) += 4 + BYTES_TO_EXP_ELEM (length + 1); | |
1713 | gen_expr (pc, ax, value); | |
1714 | if (op == STRUCTOP_STRUCT) | |
1715 | gen_struct_ref (ax, value, name, ".", "structure or union"); | |
1716 | else if (op == STRUCTOP_PTR) | |
1717 | gen_struct_ref (ax, value, name, "->", | |
1718 | "pointer to a structure or union"); | |
1719 | else | |
1720 | /* If this `if' chain doesn't handle it, then the case list | |
1721 | shouldn't mention it, and we shouldn't be here. */ | |
1722 | internal_error (__FILE__, __LINE__, | |
1723 | _("gen_expr: unhandled struct case")); | |
1724 | } | |
1725 | break; | |
1726 | ||
1727 | case OP_TYPE: | |
1728 | error (_("Attempt to use a type name as an expression.")); | |
1729 | ||
1730 | default: | |
1731 | error (_("Unsupported operator in expression.")); | |
1732 | } | |
1733 | } | |
1734 | \f | |
1735 | ||
1736 | ||
1737 | /* Generating bytecode from GDB expressions: driver */ | |
1738 | ||
1739 | /* Given a GDB expression EXPR, produce a string of agent bytecode | |
1740 | which computes its value. Return the agent expression, and set | |
1741 | *VALUE to describe its type, and whether it's an lvalue or rvalue. */ | |
1742 | struct agent_expr * | |
1743 | expr_to_agent (struct expression *expr, struct axs_value *value) | |
1744 | { | |
1745 | struct cleanup *old_chain = 0; | |
1746 | struct agent_expr *ax = new_agent_expr (0); | |
1747 | union exp_element *pc; | |
1748 | ||
1749 | old_chain = make_cleanup_free_agent_expr (ax); | |
1750 | ||
1751 | pc = expr->elts; | |
1752 | trace_kludge = 0; | |
1753 | gen_expr (&pc, ax, value); | |
1754 | ||
1755 | /* We have successfully built the agent expr, so cancel the cleanup | |
1756 | request. If we add more cleanups that we always want done, this | |
1757 | will have to get more complicated. */ | |
1758 | discard_cleanups (old_chain); | |
1759 | return ax; | |
1760 | } | |
1761 | ||
1762 | ||
1763 | #if 0 /* not used */ | |
1764 | /* Given a GDB expression EXPR denoting an lvalue in memory, produce a | |
1765 | string of agent bytecode which will leave its address and size on | |
1766 | the top of stack. Return the agent expression. | |
1767 | ||
1768 | Not sure this function is useful at all. */ | |
1769 | struct agent_expr * | |
1770 | expr_to_address_and_size (struct expression *expr) | |
1771 | { | |
1772 | struct axs_value value; | |
1773 | struct agent_expr *ax = expr_to_agent (expr, &value); | |
1774 | ||
1775 | /* Complain if the result is not a memory lvalue. */ | |
1776 | if (value.kind != axs_lvalue_memory) | |
1777 | { | |
1778 | free_agent_expr (ax); | |
1779 | error (_("Expression does not denote an object in memory.")); | |
1780 | } | |
1781 | ||
1782 | /* Push the object's size on the stack. */ | |
1783 | ax_const_l (ax, TYPE_LENGTH (value.type)); | |
1784 | ||
1785 | return ax; | |
1786 | } | |
1787 | #endif | |
1788 | ||
1789 | /* Given a GDB expression EXPR, return bytecode to trace its value. | |
1790 | The result will use the `trace' and `trace_quick' bytecodes to | |
1791 | record the value of all memory touched by the expression. The | |
1792 | caller can then use the ax_reqs function to discover which | |
1793 | registers it relies upon. */ | |
1794 | struct agent_expr * | |
1795 | gen_trace_for_expr (CORE_ADDR scope, struct expression *expr) | |
1796 | { | |
1797 | struct cleanup *old_chain = 0; | |
1798 | struct agent_expr *ax = new_agent_expr (scope); | |
1799 | union exp_element *pc; | |
1800 | struct axs_value value; | |
1801 | ||
1802 | old_chain = make_cleanup_free_agent_expr (ax); | |
1803 | ||
1804 | pc = expr->elts; | |
1805 | trace_kludge = 1; | |
1806 | gen_expr (&pc, ax, &value); | |
1807 | ||
1808 | /* Make sure we record the final object, and get rid of it. */ | |
1809 | gen_traced_pop (ax, &value); | |
1810 | ||
1811 | /* Oh, and terminate. */ | |
1812 | ax_simple (ax, aop_end); | |
1813 | ||
1814 | /* We have successfully built the agent expr, so cancel the cleanup | |
1815 | request. If we add more cleanups that we always want done, this | |
1816 | will have to get more complicated. */ | |
1817 | discard_cleanups (old_chain); | |
1818 | return ax; | |
1819 | } | |
1820 | ||
1821 | static void | |
1822 | agent_command (char *exp, int from_tty) | |
1823 | { | |
1824 | struct cleanup *old_chain = 0; | |
1825 | struct expression *expr; | |
1826 | struct agent_expr *agent; | |
1827 | struct frame_info *fi = get_current_frame (); /* need current scope */ | |
1828 | ||
1829 | /* We don't deal with overlay debugging at the moment. We need to | |
1830 | think more carefully about this. If you copy this code into | |
1831 | another command, change the error message; the user shouldn't | |
1832 | have to know anything about agent expressions. */ | |
1833 | if (overlay_debugging) | |
1834 | error (_("GDB can't do agent expression translation with overlays.")); | |
1835 | ||
1836 | if (exp == 0) | |
1837 | error_no_arg (_("expression to translate")); | |
1838 | ||
1839 | expr = parse_expression (exp); | |
1840 | old_chain = make_cleanup (free_current_contents, &expr); | |
1841 | agent = gen_trace_for_expr (get_frame_pc (fi), expr); | |
1842 | make_cleanup_free_agent_expr (agent); | |
1843 | ax_print (gdb_stdout, agent); | |
1844 | ||
1845 | /* It would be nice to call ax_reqs here to gather some general info | |
1846 | about the expression, and then print out the result. */ | |
1847 | ||
1848 | do_cleanups (old_chain); | |
1849 | dont_repeat (); | |
1850 | } | |
1851 | \f | |
1852 | ||
1853 | /* Initialization code. */ | |
1854 | ||
1855 | void _initialize_ax_gdb (void); | |
1856 | void | |
1857 | _initialize_ax_gdb (void) | |
1858 | { | |
1859 | add_cmd ("agent", class_maintenance, agent_command, | |
1860 | _("Translate an expression into remote agent bytecode."), | |
1861 | &maintenancelist); | |
1862 | } |