]>
Commit | Line | Data |
---|---|---|
c906108c | 1 | /* Low level packing and unpacking of values for GDB, the GNU Debugger. |
1bac305b | 2 | |
3666a048 | 3 | Copyright (C) 1986-2021 Free Software Foundation, Inc. |
c906108c | 4 | |
c5aa993b | 5 | This file is part of GDB. |
c906108c | 6 | |
c5aa993b JM |
7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 9 | the Free Software Foundation; either version 3 of the License, or |
c5aa993b | 10 | (at your option) any later version. |
c906108c | 11 | |
c5aa993b JM |
12 | This program is distributed in the hope that it will be useful, |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
c906108c | 16 | |
c5aa993b | 17 | You should have received a copy of the GNU General Public License |
a9762ec7 | 18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
c906108c SS |
19 | |
20 | #include "defs.h" | |
e17c207e | 21 | #include "arch-utils.h" |
c906108c SS |
22 | #include "symtab.h" |
23 | #include "gdbtypes.h" | |
24 | #include "value.h" | |
25 | #include "gdbcore.h" | |
c906108c SS |
26 | #include "command.h" |
27 | #include "gdbcmd.h" | |
28 | #include "target.h" | |
29 | #include "language.h" | |
c906108c | 30 | #include "demangle.h" |
36160dc4 | 31 | #include "regcache.h" |
fe898f56 | 32 | #include "block.h" |
70100014 | 33 | #include "target-float.h" |
bccdca4a | 34 | #include "objfiles.h" |
79a45b7d | 35 | #include "valprint.h" |
bc3b79fd | 36 | #include "cli/cli-decode.h" |
6dddc817 | 37 | #include "extension.h" |
3bd0f5ef | 38 | #include <ctype.h> |
0914bcdb | 39 | #include "tracepoint.h" |
be335936 | 40 | #include "cp-abi.h" |
a58e2656 | 41 | #include "user-regs.h" |
325fac50 | 42 | #include <algorithm> |
eb3ff9a5 | 43 | #include "completer.h" |
268a13a5 TT |
44 | #include "gdbsupport/selftest.h" |
45 | #include "gdbsupport/array-view.h" | |
7f6aba03 | 46 | #include "cli/cli-style.h" |
413403fc | 47 | #include "expop.h" |
328d42d8 | 48 | #include "inferior.h" |
0914bcdb | 49 | |
bc3b79fd TJB |
50 | /* Definition of a user function. */ |
51 | struct internal_function | |
52 | { | |
53 | /* The name of the function. It is a bit odd to have this in the | |
54 | function itself -- the user might use a differently-named | |
55 | convenience variable to hold the function. */ | |
56 | char *name; | |
57 | ||
58 | /* The handler. */ | |
59 | internal_function_fn handler; | |
60 | ||
61 | /* User data for the handler. */ | |
62 | void *cookie; | |
63 | }; | |
64 | ||
4e07d55f PA |
65 | /* Defines an [OFFSET, OFFSET + LENGTH) range. */ |
66 | ||
67 | struct range | |
68 | { | |
69 | /* Lowest offset in the range. */ | |
6b850546 | 70 | LONGEST offset; |
4e07d55f PA |
71 | |
72 | /* Length of the range. */ | |
6b850546 | 73 | LONGEST length; |
4e07d55f | 74 | |
0c7e6dd8 TT |
75 | /* Returns true if THIS is strictly less than OTHER, useful for |
76 | searching. We keep ranges sorted by offset and coalesce | |
77 | overlapping and contiguous ranges, so this just compares the | |
78 | starting offset. */ | |
4e07d55f | 79 | |
0c7e6dd8 TT |
80 | bool operator< (const range &other) const |
81 | { | |
82 | return offset < other.offset; | |
83 | } | |
d5f4488f SM |
84 | |
85 | /* Returns true if THIS is equal to OTHER. */ | |
86 | bool operator== (const range &other) const | |
87 | { | |
88 | return offset == other.offset && length == other.length; | |
89 | } | |
0c7e6dd8 | 90 | }; |
4e07d55f PA |
91 | |
92 | /* Returns true if the ranges defined by [offset1, offset1+len1) and | |
93 | [offset2, offset2+len2) overlap. */ | |
94 | ||
95 | static int | |
6b850546 DT |
96 | ranges_overlap (LONGEST offset1, LONGEST len1, |
97 | LONGEST offset2, LONGEST len2) | |
4e07d55f PA |
98 | { |
99 | ULONGEST h, l; | |
100 | ||
325fac50 PA |
101 | l = std::max (offset1, offset2); |
102 | h = std::min (offset1 + len1, offset2 + len2); | |
4e07d55f PA |
103 | return (l < h); |
104 | } | |
105 | ||
4e07d55f PA |
106 | /* Returns true if RANGES contains any range that overlaps [OFFSET, |
107 | OFFSET+LENGTH). */ | |
108 | ||
109 | static int | |
0c7e6dd8 TT |
110 | ranges_contain (const std::vector<range> &ranges, LONGEST offset, |
111 | LONGEST length) | |
4e07d55f | 112 | { |
0c7e6dd8 | 113 | range what; |
4e07d55f PA |
114 | |
115 | what.offset = offset; | |
116 | what.length = length; | |
117 | ||
118 | /* We keep ranges sorted by offset and coalesce overlapping and | |
119 | contiguous ranges, so to check if a range list contains a given | |
120 | range, we can do a binary search for the position the given range | |
121 | would be inserted if we only considered the starting OFFSET of | |
122 | ranges. We call that position I. Since we also have LENGTH to | |
123 | care for (this is a range afterall), we need to check if the | |
124 | _previous_ range overlaps the I range. E.g., | |
125 | ||
dda83cd7 SM |
126 | R |
127 | |---| | |
4e07d55f PA |
128 | |---| |---| |------| ... |--| |
129 | 0 1 2 N | |
130 | ||
131 | I=1 | |
132 | ||
133 | In the case above, the binary search would return `I=1', meaning, | |
134 | this OFFSET should be inserted at position 1, and the current | |
135 | position 1 should be pushed further (and before 2). But, `0' | |
136 | overlaps with R. | |
137 | ||
138 | Then we need to check if the I range overlaps the I range itself. | |
139 | E.g., | |
140 | ||
dda83cd7 SM |
141 | R |
142 | |---| | |
4e07d55f PA |
143 | |---| |---| |-------| ... |--| |
144 | 0 1 2 N | |
145 | ||
146 | I=1 | |
147 | */ | |
148 | ||
4e07d55f | 149 | |
0c7e6dd8 TT |
150 | auto i = std::lower_bound (ranges.begin (), ranges.end (), what); |
151 | ||
152 | if (i > ranges.begin ()) | |
4e07d55f | 153 | { |
0c7e6dd8 | 154 | const struct range &bef = *(i - 1); |
4e07d55f | 155 | |
0c7e6dd8 | 156 | if (ranges_overlap (bef.offset, bef.length, offset, length)) |
4e07d55f PA |
157 | return 1; |
158 | } | |
159 | ||
0c7e6dd8 | 160 | if (i < ranges.end ()) |
4e07d55f | 161 | { |
0c7e6dd8 | 162 | const struct range &r = *i; |
4e07d55f | 163 | |
0c7e6dd8 | 164 | if (ranges_overlap (r.offset, r.length, offset, length)) |
4e07d55f PA |
165 | return 1; |
166 | } | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
bc3b79fd TJB |
171 | static struct cmd_list_element *functionlist; |
172 | ||
87784a47 TT |
173 | /* Note that the fields in this structure are arranged to save a bit |
174 | of memory. */ | |
175 | ||
91294c83 AC |
176 | struct value |
177 | { | |
466ce3ae TT |
178 | explicit value (struct type *type_) |
179 | : modifiable (1), | |
180 | lazy (1), | |
181 | initialized (1), | |
182 | stack (0), | |
183 | type (type_), | |
184 | enclosing_type (type_) | |
185 | { | |
466ce3ae TT |
186 | } |
187 | ||
188 | ~value () | |
189 | { | |
466ce3ae TT |
190 | if (VALUE_LVAL (this) == lval_computed) |
191 | { | |
192 | const struct lval_funcs *funcs = location.computed.funcs; | |
193 | ||
194 | if (funcs->free_closure) | |
195 | funcs->free_closure (this); | |
196 | } | |
197 | else if (VALUE_LVAL (this) == lval_xcallable) | |
198 | delete location.xm_worker; | |
466ce3ae TT |
199 | } |
200 | ||
201 | DISABLE_COPY_AND_ASSIGN (value); | |
202 | ||
91294c83 AC |
203 | /* Type of value; either not an lval, or one of the various |
204 | different possible kinds of lval. */ | |
466ce3ae | 205 | enum lval_type lval = not_lval; |
91294c83 AC |
206 | |
207 | /* Is it modifiable? Only relevant if lval != not_lval. */ | |
87784a47 TT |
208 | unsigned int modifiable : 1; |
209 | ||
210 | /* If zero, contents of this value are in the contents field. If | |
211 | nonzero, contents are in inferior. If the lval field is lval_memory, | |
212 | the contents are in inferior memory at location.address plus offset. | |
213 | The lval field may also be lval_register. | |
214 | ||
215 | WARNING: This field is used by the code which handles watchpoints | |
216 | (see breakpoint.c) to decide whether a particular value can be | |
217 | watched by hardware watchpoints. If the lazy flag is set for | |
218 | some member of a value chain, it is assumed that this member of | |
219 | the chain doesn't need to be watched as part of watching the | |
220 | value itself. This is how GDB avoids watching the entire struct | |
221 | or array when the user wants to watch a single struct member or | |
222 | array element. If you ever change the way lazy flag is set and | |
223 | reset, be sure to consider this use as well! */ | |
224 | unsigned int lazy : 1; | |
225 | ||
87784a47 TT |
226 | /* If value is a variable, is it initialized or not. */ |
227 | unsigned int initialized : 1; | |
228 | ||
229 | /* If value is from the stack. If this is set, read_stack will be | |
230 | used instead of read_memory to enable extra caching. */ | |
231 | unsigned int stack : 1; | |
91294c83 AC |
232 | |
233 | /* Location of value (if lval). */ | |
234 | union | |
235 | { | |
7dc54575 | 236 | /* If lval == lval_memory, this is the address in the inferior */ |
91294c83 AC |
237 | CORE_ADDR address; |
238 | ||
7dc54575 YQ |
239 | /*If lval == lval_register, the value is from a register. */ |
240 | struct | |
241 | { | |
242 | /* Register number. */ | |
243 | int regnum; | |
244 | /* Frame ID of "next" frame to which a register value is relative. | |
245 | If the register value is found relative to frame F, then the | |
246 | frame id of F->next will be stored in next_frame_id. */ | |
247 | struct frame_id next_frame_id; | |
248 | } reg; | |
249 | ||
91294c83 AC |
250 | /* Pointer to internal variable. */ |
251 | struct internalvar *internalvar; | |
5f5233d4 | 252 | |
e81e7f5e SC |
253 | /* Pointer to xmethod worker. */ |
254 | struct xmethod_worker *xm_worker; | |
255 | ||
5f5233d4 PA |
256 | /* If lval == lval_computed, this is a set of function pointers |
257 | to use to access and describe the value, and a closure pointer | |
258 | for them to use. */ | |
259 | struct | |
260 | { | |
c8f2448a JK |
261 | /* Functions to call. */ |
262 | const struct lval_funcs *funcs; | |
263 | ||
264 | /* Closure for those functions to use. */ | |
265 | void *closure; | |
5f5233d4 | 266 | } computed; |
41a883c8 | 267 | } location {}; |
91294c83 | 268 | |
3723fda8 | 269 | /* Describes offset of a value within lval of a structure in target |
7dc54575 YQ |
270 | addressable memory units. Note also the member embedded_offset |
271 | below. */ | |
466ce3ae | 272 | LONGEST offset = 0; |
91294c83 AC |
273 | |
274 | /* Only used for bitfields; number of bits contained in them. */ | |
466ce3ae | 275 | LONGEST bitsize = 0; |
91294c83 AC |
276 | |
277 | /* Only used for bitfields; position of start of field. For | |
d5a22e77 TT |
278 | little-endian targets, it is the position of the LSB. For |
279 | big-endian targets, it is the position of the MSB. */ | |
466ce3ae | 280 | LONGEST bitpos = 0; |
91294c83 | 281 | |
87784a47 TT |
282 | /* The number of references to this value. When a value is created, |
283 | the value chain holds a reference, so REFERENCE_COUNT is 1. If | |
284 | release_value is called, this value is removed from the chain but | |
285 | the caller of release_value now has a reference to this value. | |
286 | The caller must arrange for a call to value_free later. */ | |
466ce3ae | 287 | int reference_count = 1; |
87784a47 | 288 | |
4ea48cc1 DJ |
289 | /* Only used for bitfields; the containing value. This allows a |
290 | single read from the target when displaying multiple | |
291 | bitfields. */ | |
2c8331b9 | 292 | value_ref_ptr parent; |
4ea48cc1 | 293 | |
91294c83 AC |
294 | /* Type of the value. */ |
295 | struct type *type; | |
296 | ||
297 | /* If a value represents a C++ object, then the `type' field gives | |
298 | the object's compile-time type. If the object actually belongs | |
299 | to some class derived from `type', perhaps with other base | |
300 | classes and additional members, then `type' is just a subobject | |
301 | of the real thing, and the full object is probably larger than | |
302 | `type' would suggest. | |
303 | ||
304 | If `type' is a dynamic class (i.e. one with a vtable), then GDB | |
305 | can actually determine the object's run-time type by looking at | |
306 | the run-time type information in the vtable. When this | |
307 | information is available, we may elect to read in the entire | |
308 | object, for several reasons: | |
309 | ||
310 | - When printing the value, the user would probably rather see the | |
311 | full object, not just the limited portion apparent from the | |
312 | compile-time type. | |
313 | ||
314 | - If `type' has virtual base classes, then even printing `type' | |
315 | alone may require reaching outside the `type' portion of the | |
316 | object to wherever the virtual base class has been stored. | |
317 | ||
318 | When we store the entire object, `enclosing_type' is the run-time | |
319 | type -- the complete object -- and `embedded_offset' is the | |
3723fda8 SM |
320 | offset of `type' within that larger type, in target addressable memory |
321 | units. The value_contents() macro takes `embedded_offset' into account, | |
322 | so most GDB code continues to see the `type' portion of the value, just | |
323 | as the inferior would. | |
91294c83 AC |
324 | |
325 | If `type' is a pointer to an object, then `enclosing_type' is a | |
326 | pointer to the object's run-time type, and `pointed_to_offset' is | |
3723fda8 SM |
327 | the offset in target addressable memory units from the full object |
328 | to the pointed-to object -- that is, the value `embedded_offset' would | |
329 | have if we followed the pointer and fetched the complete object. | |
330 | (I don't really see the point. Why not just determine the | |
331 | run-time type when you indirect, and avoid the special case? The | |
332 | contents don't matter until you indirect anyway.) | |
91294c83 AC |
333 | |
334 | If we're not doing anything fancy, `enclosing_type' is equal to | |
335 | `type', and `embedded_offset' is zero, so everything works | |
336 | normally. */ | |
337 | struct type *enclosing_type; | |
466ce3ae TT |
338 | LONGEST embedded_offset = 0; |
339 | LONGEST pointed_to_offset = 0; | |
91294c83 | 340 | |
3e3d7139 JG |
341 | /* Actual contents of the value. Target byte-order. NULL or not |
342 | valid if lazy is nonzero. */ | |
14c88955 | 343 | gdb::unique_xmalloc_ptr<gdb_byte> contents; |
828d3400 | 344 | |
4e07d55f PA |
345 | /* Unavailable ranges in CONTENTS. We mark unavailable ranges, |
346 | rather than available, since the common and default case is for a | |
9a0dc9e3 PA |
347 | value to be available. This is filled in at value read time. |
348 | The unavailable ranges are tracked in bits. Note that a contents | |
349 | bit that has been optimized out doesn't really exist in the | |
350 | program, so it can't be marked unavailable either. */ | |
0c7e6dd8 | 351 | std::vector<range> unavailable; |
9a0dc9e3 PA |
352 | |
353 | /* Likewise, but for optimized out contents (a chunk of the value of | |
354 | a variable that does not actually exist in the program). If LVAL | |
355 | is lval_register, this is a register ($pc, $sp, etc., never a | |
356 | program variable) that has not been saved in the frame. Not | |
357 | saved registers and optimized-out program variables values are | |
358 | treated pretty much the same, except not-saved registers have a | |
359 | different string representation and related error strings. */ | |
0c7e6dd8 | 360 | std::vector<range> optimized_out; |
91294c83 AC |
361 | }; |
362 | ||
e512cdbd SM |
363 | /* See value.h. */ |
364 | ||
365 | struct gdbarch * | |
366 | get_value_arch (const struct value *value) | |
367 | { | |
8ee511af | 368 | return value_type (value)->arch (); |
e512cdbd SM |
369 | } |
370 | ||
4e07d55f | 371 | int |
6b850546 | 372 | value_bits_available (const struct value *value, LONGEST offset, LONGEST length) |
4e07d55f PA |
373 | { |
374 | gdb_assert (!value->lazy); | |
375 | ||
376 | return !ranges_contain (value->unavailable, offset, length); | |
377 | } | |
378 | ||
bdf22206 | 379 | int |
6b850546 DT |
380 | value_bytes_available (const struct value *value, |
381 | LONGEST offset, LONGEST length) | |
bdf22206 AB |
382 | { |
383 | return value_bits_available (value, | |
384 | offset * TARGET_CHAR_BIT, | |
385 | length * TARGET_CHAR_BIT); | |
386 | } | |
387 | ||
9a0dc9e3 PA |
388 | int |
389 | value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length) | |
390 | { | |
391 | gdb_assert (!value->lazy); | |
392 | ||
393 | return ranges_contain (value->optimized_out, bit_offset, bit_length); | |
394 | } | |
395 | ||
ec0a52e1 PA |
396 | int |
397 | value_entirely_available (struct value *value) | |
398 | { | |
399 | /* We can only tell whether the whole value is available when we try | |
400 | to read it. */ | |
401 | if (value->lazy) | |
402 | value_fetch_lazy (value); | |
403 | ||
0c7e6dd8 | 404 | if (value->unavailable.empty ()) |
ec0a52e1 PA |
405 | return 1; |
406 | return 0; | |
407 | } | |
408 | ||
9a0dc9e3 PA |
409 | /* Returns true if VALUE is entirely covered by RANGES. If the value |
410 | is lazy, it'll be read now. Note that RANGE is a pointer to | |
411 | pointer because reading the value might change *RANGE. */ | |
412 | ||
413 | static int | |
414 | value_entirely_covered_by_range_vector (struct value *value, | |
0c7e6dd8 | 415 | const std::vector<range> &ranges) |
6211c335 | 416 | { |
9a0dc9e3 PA |
417 | /* We can only tell whether the whole value is optimized out / |
418 | unavailable when we try to read it. */ | |
6211c335 YQ |
419 | if (value->lazy) |
420 | value_fetch_lazy (value); | |
421 | ||
0c7e6dd8 | 422 | if (ranges.size () == 1) |
6211c335 | 423 | { |
0c7e6dd8 | 424 | const struct range &t = ranges[0]; |
6211c335 | 425 | |
0c7e6dd8 TT |
426 | if (t.offset == 0 |
427 | && t.length == (TARGET_CHAR_BIT | |
428 | * TYPE_LENGTH (value_enclosing_type (value)))) | |
6211c335 YQ |
429 | return 1; |
430 | } | |
431 | ||
432 | return 0; | |
433 | } | |
434 | ||
9a0dc9e3 PA |
435 | int |
436 | value_entirely_unavailable (struct value *value) | |
437 | { | |
0c7e6dd8 | 438 | return value_entirely_covered_by_range_vector (value, value->unavailable); |
9a0dc9e3 PA |
439 | } |
440 | ||
441 | int | |
442 | value_entirely_optimized_out (struct value *value) | |
443 | { | |
0c7e6dd8 | 444 | return value_entirely_covered_by_range_vector (value, value->optimized_out); |
9a0dc9e3 PA |
445 | } |
446 | ||
447 | /* Insert into the vector pointed to by VECTORP the bit range starting of | |
448 | OFFSET bits, and extending for the next LENGTH bits. */ | |
449 | ||
450 | static void | |
0c7e6dd8 | 451 | insert_into_bit_range_vector (std::vector<range> *vectorp, |
6b850546 | 452 | LONGEST offset, LONGEST length) |
4e07d55f | 453 | { |
0c7e6dd8 | 454 | range newr; |
4e07d55f PA |
455 | |
456 | /* Insert the range sorted. If there's overlap or the new range | |
457 | would be contiguous with an existing range, merge. */ | |
458 | ||
459 | newr.offset = offset; | |
460 | newr.length = length; | |
461 | ||
462 | /* Do a binary search for the position the given range would be | |
463 | inserted if we only considered the starting OFFSET of ranges. | |
464 | Call that position I. Since we also have LENGTH to care for | |
465 | (this is a range afterall), we need to check if the _previous_ | |
466 | range overlaps the I range. E.g., calling R the new range: | |
467 | ||
468 | #1 - overlaps with previous | |
469 | ||
470 | R | |
471 | |-...-| | |
472 | |---| |---| |------| ... |--| | |
473 | 0 1 2 N | |
474 | ||
475 | I=1 | |
476 | ||
477 | In the case #1 above, the binary search would return `I=1', | |
478 | meaning, this OFFSET should be inserted at position 1, and the | |
479 | current position 1 should be pushed further (and become 2). But, | |
480 | note that `0' overlaps with R, so we want to merge them. | |
481 | ||
482 | A similar consideration needs to be taken if the new range would | |
483 | be contiguous with the previous range: | |
484 | ||
485 | #2 - contiguous with previous | |
486 | ||
487 | R | |
488 | |-...-| | |
489 | |--| |---| |------| ... |--| | |
490 | 0 1 2 N | |
491 | ||
492 | I=1 | |
493 | ||
494 | If there's no overlap with the previous range, as in: | |
495 | ||
496 | #3 - not overlapping and not contiguous | |
497 | ||
498 | R | |
499 | |-...-| | |
500 | |--| |---| |------| ... |--| | |
501 | 0 1 2 N | |
502 | ||
503 | I=1 | |
504 | ||
505 | or if I is 0: | |
506 | ||
507 | #4 - R is the range with lowest offset | |
508 | ||
509 | R | |
510 | |-...-| | |
dda83cd7 SM |
511 | |--| |---| |------| ... |--| |
512 | 0 1 2 N | |
4e07d55f PA |
513 | |
514 | I=0 | |
515 | ||
516 | ... we just push the new range to I. | |
517 | ||
518 | All the 4 cases above need to consider that the new range may | |
519 | also overlap several of the ranges that follow, or that R may be | |
520 | contiguous with the following range, and merge. E.g., | |
521 | ||
522 | #5 - overlapping following ranges | |
523 | ||
524 | R | |
525 | |------------------------| | |
dda83cd7 SM |
526 | |--| |---| |------| ... |--| |
527 | 0 1 2 N | |
4e07d55f PA |
528 | |
529 | I=0 | |
530 | ||
531 | or: | |
532 | ||
533 | R | |
534 | |-------| | |
535 | |--| |---| |------| ... |--| | |
536 | 0 1 2 N | |
537 | ||
538 | I=1 | |
539 | ||
540 | */ | |
541 | ||
0c7e6dd8 TT |
542 | auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr); |
543 | if (i > vectorp->begin ()) | |
4e07d55f | 544 | { |
0c7e6dd8 | 545 | struct range &bef = *(i - 1); |
4e07d55f | 546 | |
0c7e6dd8 | 547 | if (ranges_overlap (bef.offset, bef.length, offset, length)) |
4e07d55f PA |
548 | { |
549 | /* #1 */ | |
0c7e6dd8 TT |
550 | ULONGEST l = std::min (bef.offset, offset); |
551 | ULONGEST h = std::max (bef.offset + bef.length, offset + length); | |
4e07d55f | 552 | |
0c7e6dd8 TT |
553 | bef.offset = l; |
554 | bef.length = h - l; | |
4e07d55f PA |
555 | i--; |
556 | } | |
0c7e6dd8 | 557 | else if (offset == bef.offset + bef.length) |
4e07d55f PA |
558 | { |
559 | /* #2 */ | |
0c7e6dd8 | 560 | bef.length += length; |
4e07d55f PA |
561 | i--; |
562 | } | |
563 | else | |
564 | { | |
565 | /* #3 */ | |
0c7e6dd8 | 566 | i = vectorp->insert (i, newr); |
4e07d55f PA |
567 | } |
568 | } | |
569 | else | |
570 | { | |
571 | /* #4 */ | |
0c7e6dd8 | 572 | i = vectorp->insert (i, newr); |
4e07d55f PA |
573 | } |
574 | ||
575 | /* Check whether the ranges following the one we've just added or | |
576 | touched can be folded in (#5 above). */ | |
0c7e6dd8 | 577 | if (i != vectorp->end () && i + 1 < vectorp->end ()) |
4e07d55f | 578 | { |
4e07d55f | 579 | int removed = 0; |
0c7e6dd8 | 580 | auto next = i + 1; |
4e07d55f PA |
581 | |
582 | /* Get the range we just touched. */ | |
0c7e6dd8 | 583 | struct range &t = *i; |
4e07d55f PA |
584 | removed = 0; |
585 | ||
586 | i = next; | |
0c7e6dd8 TT |
587 | for (; i < vectorp->end (); i++) |
588 | { | |
589 | struct range &r = *i; | |
590 | if (r.offset <= t.offset + t.length) | |
591 | { | |
592 | ULONGEST l, h; | |
593 | ||
594 | l = std::min (t.offset, r.offset); | |
595 | h = std::max (t.offset + t.length, r.offset + r.length); | |
596 | ||
597 | t.offset = l; | |
598 | t.length = h - l; | |
599 | ||
600 | removed++; | |
601 | } | |
602 | else | |
603 | { | |
604 | /* If we couldn't merge this one, we won't be able to | |
605 | merge following ones either, since the ranges are | |
606 | always sorted by OFFSET. */ | |
607 | break; | |
608 | } | |
609 | } | |
4e07d55f PA |
610 | |
611 | if (removed != 0) | |
0c7e6dd8 | 612 | vectorp->erase (next, next + removed); |
4e07d55f PA |
613 | } |
614 | } | |
615 | ||
9a0dc9e3 | 616 | void |
6b850546 DT |
617 | mark_value_bits_unavailable (struct value *value, |
618 | LONGEST offset, LONGEST length) | |
9a0dc9e3 PA |
619 | { |
620 | insert_into_bit_range_vector (&value->unavailable, offset, length); | |
621 | } | |
622 | ||
bdf22206 | 623 | void |
6b850546 DT |
624 | mark_value_bytes_unavailable (struct value *value, |
625 | LONGEST offset, LONGEST length) | |
bdf22206 AB |
626 | { |
627 | mark_value_bits_unavailable (value, | |
628 | offset * TARGET_CHAR_BIT, | |
629 | length * TARGET_CHAR_BIT); | |
630 | } | |
631 | ||
c8c1c22f PA |
632 | /* Find the first range in RANGES that overlaps the range defined by |
633 | OFFSET and LENGTH, starting at element POS in the RANGES vector, | |
634 | Returns the index into RANGES where such overlapping range was | |
635 | found, or -1 if none was found. */ | |
636 | ||
637 | static int | |
0c7e6dd8 | 638 | find_first_range_overlap (const std::vector<range> *ranges, int pos, |
6b850546 | 639 | LONGEST offset, LONGEST length) |
c8c1c22f | 640 | { |
c8c1c22f PA |
641 | int i; |
642 | ||
0c7e6dd8 TT |
643 | for (i = pos; i < ranges->size (); i++) |
644 | { | |
645 | const range &r = (*ranges)[i]; | |
646 | if (ranges_overlap (r.offset, r.length, offset, length)) | |
647 | return i; | |
648 | } | |
c8c1c22f PA |
649 | |
650 | return -1; | |
651 | } | |
652 | ||
bdf22206 AB |
653 | /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at |
654 | PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise | |
655 | return non-zero. | |
656 | ||
657 | It must always be the case that: | |
658 | OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT | |
659 | ||
660 | It is assumed that memory can be accessed from: | |
661 | PTR + (OFFSET_BITS / TARGET_CHAR_BIT) | |
662 | to: | |
663 | PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1) | |
dda83cd7 | 664 | / TARGET_CHAR_BIT) */ |
bdf22206 AB |
665 | static int |
666 | memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits, | |
667 | const gdb_byte *ptr2, size_t offset2_bits, | |
668 | size_t length_bits) | |
669 | { | |
670 | gdb_assert (offset1_bits % TARGET_CHAR_BIT | |
671 | == offset2_bits % TARGET_CHAR_BIT); | |
672 | ||
673 | if (offset1_bits % TARGET_CHAR_BIT != 0) | |
674 | { | |
675 | size_t bits; | |
676 | gdb_byte mask, b1, b2; | |
677 | ||
678 | /* The offset from the base pointers PTR1 and PTR2 is not a complete | |
679 | number of bytes. A number of bits up to either the next exact | |
680 | byte boundary, or LENGTH_BITS (which ever is sooner) will be | |
681 | compared. */ | |
682 | bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT; | |
683 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); | |
684 | mask = (1 << bits) - 1; | |
685 | ||
686 | if (length_bits < bits) | |
687 | { | |
688 | mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1); | |
689 | bits = length_bits; | |
690 | } | |
691 | ||
692 | /* Now load the two bytes and mask off the bits we care about. */ | |
693 | b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask; | |
694 | b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask; | |
695 | ||
696 | if (b1 != b2) | |
697 | return 1; | |
698 | ||
699 | /* Now update the length and offsets to take account of the bits | |
700 | we've just compared. */ | |
701 | length_bits -= bits; | |
702 | offset1_bits += bits; | |
703 | offset2_bits += bits; | |
704 | } | |
705 | ||
706 | if (length_bits % TARGET_CHAR_BIT != 0) | |
707 | { | |
708 | size_t bits; | |
709 | size_t o1, o2; | |
710 | gdb_byte mask, b1, b2; | |
711 | ||
712 | /* The length is not an exact number of bytes. After the previous | |
713 | IF.. block then the offsets are byte aligned, or the | |
714 | length is zero (in which case this code is not reached). Compare | |
715 | a number of bits at the end of the region, starting from an exact | |
716 | byte boundary. */ | |
717 | bits = length_bits % TARGET_CHAR_BIT; | |
718 | o1 = offset1_bits + length_bits - bits; | |
719 | o2 = offset2_bits + length_bits - bits; | |
720 | ||
721 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); | |
722 | mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits); | |
723 | ||
724 | gdb_assert (o1 % TARGET_CHAR_BIT == 0); | |
725 | gdb_assert (o2 % TARGET_CHAR_BIT == 0); | |
726 | ||
727 | b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask; | |
728 | b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask; | |
729 | ||
730 | if (b1 != b2) | |
731 | return 1; | |
732 | ||
733 | length_bits -= bits; | |
734 | } | |
735 | ||
736 | if (length_bits > 0) | |
737 | { | |
738 | /* We've now taken care of any stray "bits" at the start, or end of | |
739 | the region to compare, the remainder can be covered with a simple | |
740 | memcmp. */ | |
741 | gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0); | |
742 | gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0); | |
743 | gdb_assert (length_bits % TARGET_CHAR_BIT == 0); | |
744 | ||
745 | return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT, | |
746 | ptr2 + offset2_bits / TARGET_CHAR_BIT, | |
747 | length_bits / TARGET_CHAR_BIT); | |
748 | } | |
749 | ||
750 | /* Length is zero, regions match. */ | |
751 | return 0; | |
752 | } | |
753 | ||
9a0dc9e3 PA |
754 | /* Helper struct for find_first_range_overlap_and_match and |
755 | value_contents_bits_eq. Keep track of which slot of a given ranges | |
756 | vector have we last looked at. */ | |
bdf22206 | 757 | |
9a0dc9e3 PA |
758 | struct ranges_and_idx |
759 | { | |
760 | /* The ranges. */ | |
0c7e6dd8 | 761 | const std::vector<range> *ranges; |
9a0dc9e3 PA |
762 | |
763 | /* The range we've last found in RANGES. Given ranges are sorted, | |
764 | we can start the next lookup here. */ | |
765 | int idx; | |
766 | }; | |
767 | ||
768 | /* Helper function for value_contents_bits_eq. Compare LENGTH bits of | |
769 | RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's | |
770 | ranges starting at OFFSET2 bits. Return true if the ranges match | |
771 | and fill in *L and *H with the overlapping window relative to | |
772 | (both) OFFSET1 or OFFSET2. */ | |
bdf22206 AB |
773 | |
774 | static int | |
9a0dc9e3 PA |
775 | find_first_range_overlap_and_match (struct ranges_and_idx *rp1, |
776 | struct ranges_and_idx *rp2, | |
6b850546 DT |
777 | LONGEST offset1, LONGEST offset2, |
778 | LONGEST length, ULONGEST *l, ULONGEST *h) | |
c8c1c22f | 779 | { |
9a0dc9e3 PA |
780 | rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx, |
781 | offset1, length); | |
782 | rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx, | |
783 | offset2, length); | |
c8c1c22f | 784 | |
9a0dc9e3 PA |
785 | if (rp1->idx == -1 && rp2->idx == -1) |
786 | { | |
787 | *l = length; | |
788 | *h = length; | |
789 | return 1; | |
790 | } | |
791 | else if (rp1->idx == -1 || rp2->idx == -1) | |
792 | return 0; | |
793 | else | |
c8c1c22f | 794 | { |
0c7e6dd8 | 795 | const range *r1, *r2; |
c8c1c22f PA |
796 | ULONGEST l1, h1; |
797 | ULONGEST l2, h2; | |
798 | ||
0c7e6dd8 TT |
799 | r1 = &(*rp1->ranges)[rp1->idx]; |
800 | r2 = &(*rp2->ranges)[rp2->idx]; | |
c8c1c22f PA |
801 | |
802 | /* Get the unavailable windows intersected by the incoming | |
803 | ranges. The first and last ranges that overlap the argument | |
804 | range may be wider than said incoming arguments ranges. */ | |
325fac50 PA |
805 | l1 = std::max (offset1, r1->offset); |
806 | h1 = std::min (offset1 + length, r1->offset + r1->length); | |
c8c1c22f | 807 | |
325fac50 PA |
808 | l2 = std::max (offset2, r2->offset); |
809 | h2 = std::min (offset2 + length, offset2 + r2->length); | |
c8c1c22f PA |
810 | |
811 | /* Make them relative to the respective start offsets, so we can | |
812 | compare them for equality. */ | |
813 | l1 -= offset1; | |
814 | h1 -= offset1; | |
815 | ||
816 | l2 -= offset2; | |
817 | h2 -= offset2; | |
818 | ||
9a0dc9e3 | 819 | /* Different ranges, no match. */ |
c8c1c22f PA |
820 | if (l1 != l2 || h1 != h2) |
821 | return 0; | |
822 | ||
9a0dc9e3 PA |
823 | *h = h1; |
824 | *l = l1; | |
825 | return 1; | |
826 | } | |
827 | } | |
828 | ||
829 | /* Helper function for value_contents_eq. The only difference is that | |
830 | this function is bit rather than byte based. | |
831 | ||
832 | Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits | |
833 | with LENGTH bits of VAL2's contents starting at OFFSET2 bits. | |
834 | Return true if the available bits match. */ | |
835 | ||
98ead37e | 836 | static bool |
9a0dc9e3 PA |
837 | value_contents_bits_eq (const struct value *val1, int offset1, |
838 | const struct value *val2, int offset2, | |
839 | int length) | |
840 | { | |
841 | /* Each array element corresponds to a ranges source (unavailable, | |
842 | optimized out). '1' is for VAL1, '2' for VAL2. */ | |
843 | struct ranges_and_idx rp1[2], rp2[2]; | |
844 | ||
845 | /* See function description in value.h. */ | |
846 | gdb_assert (!val1->lazy && !val2->lazy); | |
847 | ||
848 | /* We shouldn't be trying to compare past the end of the values. */ | |
849 | gdb_assert (offset1 + length | |
850 | <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT); | |
851 | gdb_assert (offset2 + length | |
852 | <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT); | |
853 | ||
854 | memset (&rp1, 0, sizeof (rp1)); | |
855 | memset (&rp2, 0, sizeof (rp2)); | |
0c7e6dd8 TT |
856 | rp1[0].ranges = &val1->unavailable; |
857 | rp2[0].ranges = &val2->unavailable; | |
858 | rp1[1].ranges = &val1->optimized_out; | |
859 | rp2[1].ranges = &val2->optimized_out; | |
9a0dc9e3 PA |
860 | |
861 | while (length > 0) | |
862 | { | |
000339af | 863 | ULONGEST l = 0, h = 0; /* init for gcc -Wall */ |
9a0dc9e3 PA |
864 | int i; |
865 | ||
866 | for (i = 0; i < 2; i++) | |
867 | { | |
868 | ULONGEST l_tmp, h_tmp; | |
869 | ||
870 | /* The contents only match equal if the invalid/unavailable | |
871 | contents ranges match as well. */ | |
872 | if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i], | |
873 | offset1, offset2, length, | |
874 | &l_tmp, &h_tmp)) | |
98ead37e | 875 | return false; |
9a0dc9e3 PA |
876 | |
877 | /* We're interested in the lowest/first range found. */ | |
878 | if (i == 0 || l_tmp < l) | |
879 | { | |
880 | l = l_tmp; | |
881 | h = h_tmp; | |
882 | } | |
883 | } | |
884 | ||
885 | /* Compare the available/valid contents. */ | |
14c88955 TT |
886 | if (memcmp_with_bit_offsets (val1->contents.get (), offset1, |
887 | val2->contents.get (), offset2, l) != 0) | |
98ead37e | 888 | return false; |
c8c1c22f | 889 | |
9a0dc9e3 PA |
890 | length -= h; |
891 | offset1 += h; | |
892 | offset2 += h; | |
c8c1c22f PA |
893 | } |
894 | ||
98ead37e | 895 | return true; |
c8c1c22f PA |
896 | } |
897 | ||
98ead37e | 898 | bool |
6b850546 DT |
899 | value_contents_eq (const struct value *val1, LONGEST offset1, |
900 | const struct value *val2, LONGEST offset2, | |
901 | LONGEST length) | |
bdf22206 | 902 | { |
9a0dc9e3 PA |
903 | return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT, |
904 | val2, offset2 * TARGET_CHAR_BIT, | |
905 | length * TARGET_CHAR_BIT); | |
bdf22206 AB |
906 | } |
907 | ||
c906108c | 908 | |
4d0266a0 TT |
909 | /* The value-history records all the values printed by print commands |
910 | during this session. */ | |
c906108c | 911 | |
4d0266a0 | 912 | static std::vector<value_ref_ptr> value_history; |
bc3b79fd | 913 | |
c906108c SS |
914 | \f |
915 | /* List of all value objects currently allocated | |
916 | (except for those released by calls to release_value) | |
917 | This is so they can be freed after each command. */ | |
918 | ||
062d818d | 919 | static std::vector<value_ref_ptr> all_values; |
c906108c | 920 | |
3e3d7139 JG |
921 | /* Allocate a lazy value for type TYPE. Its actual content is |
922 | "lazily" allocated too: the content field of the return value is | |
923 | NULL; it will be allocated when it is fetched from the target. */ | |
c906108c | 924 | |
f23631e4 | 925 | struct value * |
3e3d7139 | 926 | allocate_value_lazy (struct type *type) |
c906108c | 927 | { |
f23631e4 | 928 | struct value *val; |
c54eabfa JK |
929 | |
930 | /* Call check_typedef on our type to make sure that, if TYPE | |
931 | is a TYPE_CODE_TYPEDEF, its length is set to the length | |
932 | of the target type instead of zero. However, we do not | |
933 | replace the typedef type by the target type, because we want | |
934 | to keep the typedef in order to be able to set the VAL's type | |
935 | description correctly. */ | |
936 | check_typedef (type); | |
c906108c | 937 | |
466ce3ae | 938 | val = new struct value (type); |
828d3400 DJ |
939 | |
940 | /* Values start out on the all_values chain. */ | |
062d818d | 941 | all_values.emplace_back (val); |
828d3400 | 942 | |
c906108c SS |
943 | return val; |
944 | } | |
945 | ||
5fdf6324 AB |
946 | /* The maximum size, in bytes, that GDB will try to allocate for a value. |
947 | The initial value of 64k was not selected for any specific reason, it is | |
948 | just a reasonable starting point. */ | |
949 | ||
950 | static int max_value_size = 65536; /* 64k bytes */ | |
951 | ||
952 | /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of | |
953 | LONGEST, otherwise GDB will not be able to parse integer values from the | |
954 | CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would | |
955 | be unable to parse "set max-value-size 2". | |
956 | ||
957 | As we want a consistent GDB experience across hosts with different sizes | |
958 | of LONGEST, this arbitrary minimum value was selected, so long as this | |
959 | is bigger than LONGEST on all GDB supported hosts we're fine. */ | |
960 | ||
961 | #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16 | |
962 | gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE); | |
963 | ||
964 | /* Implement the "set max-value-size" command. */ | |
965 | ||
966 | static void | |
eb4c3f4a | 967 | set_max_value_size (const char *args, int from_tty, |
5fdf6324 AB |
968 | struct cmd_list_element *c) |
969 | { | |
970 | gdb_assert (max_value_size == -1 || max_value_size >= 0); | |
971 | ||
972 | if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE) | |
973 | { | |
974 | max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE; | |
975 | error (_("max-value-size set too low, increasing to %d bytes"), | |
976 | max_value_size); | |
977 | } | |
978 | } | |
979 | ||
980 | /* Implement the "show max-value-size" command. */ | |
981 | ||
982 | static void | |
983 | show_max_value_size (struct ui_file *file, int from_tty, | |
984 | struct cmd_list_element *c, const char *value) | |
985 | { | |
986 | if (max_value_size == -1) | |
987 | fprintf_filtered (file, _("Maximum value size is unlimited.\n")); | |
988 | else | |
989 | fprintf_filtered (file, _("Maximum value size is %d bytes.\n"), | |
990 | max_value_size); | |
991 | } | |
992 | ||
993 | /* Called before we attempt to allocate or reallocate a buffer for the | |
994 | contents of a value. TYPE is the type of the value for which we are | |
995 | allocating the buffer. If the buffer is too large (based on the user | |
996 | controllable setting) then throw an error. If this function returns | |
997 | then we should attempt to allocate the buffer. */ | |
998 | ||
999 | static void | |
1000 | check_type_length_before_alloc (const struct type *type) | |
1001 | { | |
6d8a0a5e | 1002 | ULONGEST length = TYPE_LENGTH (type); |
5fdf6324 AB |
1003 | |
1004 | if (max_value_size > -1 && length > max_value_size) | |
1005 | { | |
7d93a1e0 | 1006 | if (type->name () != NULL) |
6d8a0a5e LM |
1007 | error (_("value of type `%s' requires %s bytes, which is more " |
1008 | "than max-value-size"), type->name (), pulongest (length)); | |
5fdf6324 | 1009 | else |
6d8a0a5e LM |
1010 | error (_("value requires %s bytes, which is more than " |
1011 | "max-value-size"), pulongest (length)); | |
5fdf6324 AB |
1012 | } |
1013 | } | |
1014 | ||
3e3d7139 JG |
1015 | /* Allocate the contents of VAL if it has not been allocated yet. */ |
1016 | ||
548b762d | 1017 | static void |
3e3d7139 JG |
1018 | allocate_value_contents (struct value *val) |
1019 | { | |
1020 | if (!val->contents) | |
5fdf6324 AB |
1021 | { |
1022 | check_type_length_before_alloc (val->enclosing_type); | |
14c88955 TT |
1023 | val->contents.reset |
1024 | ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type))); | |
5fdf6324 | 1025 | } |
3e3d7139 JG |
1026 | } |
1027 | ||
1028 | /* Allocate a value and its contents for type TYPE. */ | |
1029 | ||
1030 | struct value * | |
1031 | allocate_value (struct type *type) | |
1032 | { | |
1033 | struct value *val = allocate_value_lazy (type); | |
a109c7c1 | 1034 | |
3e3d7139 JG |
1035 | allocate_value_contents (val); |
1036 | val->lazy = 0; | |
1037 | return val; | |
1038 | } | |
1039 | ||
c906108c | 1040 | /* Allocate a value that has the correct length |
938f5214 | 1041 | for COUNT repetitions of type TYPE. */ |
c906108c | 1042 | |
f23631e4 | 1043 | struct value * |
fba45db2 | 1044 | allocate_repeat_value (struct type *type, int count) |
c906108c | 1045 | { |
22c12a6c AB |
1046 | /* Despite the fact that we are really creating an array of TYPE here, we |
1047 | use the string lower bound as the array lower bound. This seems to | |
1048 | work fine for now. */ | |
1049 | int low_bound = current_language->string_lower_bound (); | |
c906108c SS |
1050 | /* FIXME-type-allocation: need a way to free this type when we are |
1051 | done with it. */ | |
e3506a9f UW |
1052 | struct type *array_type |
1053 | = lookup_array_range_type (type, low_bound, count + low_bound - 1); | |
a109c7c1 | 1054 | |
e3506a9f | 1055 | return allocate_value (array_type); |
c906108c SS |
1056 | } |
1057 | ||
5f5233d4 PA |
1058 | struct value * |
1059 | allocate_computed_value (struct type *type, | |
dda83cd7 SM |
1060 | const struct lval_funcs *funcs, |
1061 | void *closure) | |
5f5233d4 | 1062 | { |
41e8491f | 1063 | struct value *v = allocate_value_lazy (type); |
a109c7c1 | 1064 | |
5f5233d4 PA |
1065 | VALUE_LVAL (v) = lval_computed; |
1066 | v->location.computed.funcs = funcs; | |
1067 | v->location.computed.closure = closure; | |
5f5233d4 PA |
1068 | |
1069 | return v; | |
1070 | } | |
1071 | ||
a7035dbb JK |
1072 | /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */ |
1073 | ||
1074 | struct value * | |
1075 | allocate_optimized_out_value (struct type *type) | |
1076 | { | |
1077 | struct value *retval = allocate_value_lazy (type); | |
1078 | ||
9a0dc9e3 PA |
1079 | mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type)); |
1080 | set_value_lazy (retval, 0); | |
a7035dbb JK |
1081 | return retval; |
1082 | } | |
1083 | ||
df407dfe AC |
1084 | /* Accessor methods. */ |
1085 | ||
1086 | struct type * | |
0e03807e | 1087 | value_type (const struct value *value) |
df407dfe AC |
1088 | { |
1089 | return value->type; | |
1090 | } | |
04624583 AC |
1091 | void |
1092 | deprecated_set_value_type (struct value *value, struct type *type) | |
1093 | { | |
1094 | value->type = type; | |
1095 | } | |
df407dfe | 1096 | |
6b850546 | 1097 | LONGEST |
0e03807e | 1098 | value_offset (const struct value *value) |
df407dfe AC |
1099 | { |
1100 | return value->offset; | |
1101 | } | |
f5cf64a7 | 1102 | void |
6b850546 | 1103 | set_value_offset (struct value *value, LONGEST offset) |
f5cf64a7 AC |
1104 | { |
1105 | value->offset = offset; | |
1106 | } | |
df407dfe | 1107 | |
6b850546 | 1108 | LONGEST |
0e03807e | 1109 | value_bitpos (const struct value *value) |
df407dfe AC |
1110 | { |
1111 | return value->bitpos; | |
1112 | } | |
9bbda503 | 1113 | void |
6b850546 | 1114 | set_value_bitpos (struct value *value, LONGEST bit) |
9bbda503 AC |
1115 | { |
1116 | value->bitpos = bit; | |
1117 | } | |
df407dfe | 1118 | |
6b850546 | 1119 | LONGEST |
0e03807e | 1120 | value_bitsize (const struct value *value) |
df407dfe AC |
1121 | { |
1122 | return value->bitsize; | |
1123 | } | |
9bbda503 | 1124 | void |
6b850546 | 1125 | set_value_bitsize (struct value *value, LONGEST bit) |
9bbda503 AC |
1126 | { |
1127 | value->bitsize = bit; | |
1128 | } | |
df407dfe | 1129 | |
4ea48cc1 | 1130 | struct value * |
4bf7b526 | 1131 | value_parent (const struct value *value) |
4ea48cc1 | 1132 | { |
2c8331b9 | 1133 | return value->parent.get (); |
4ea48cc1 DJ |
1134 | } |
1135 | ||
53ba8333 JB |
1136 | /* See value.h. */ |
1137 | ||
1138 | void | |
1139 | set_value_parent (struct value *value, struct value *parent) | |
1140 | { | |
bbfa6f00 | 1141 | value->parent = value_ref_ptr::new_reference (parent); |
53ba8333 JB |
1142 | } |
1143 | ||
fc1a4b47 | 1144 | gdb_byte * |
990a07ab AC |
1145 | value_contents_raw (struct value *value) |
1146 | { | |
3ae385af SM |
1147 | struct gdbarch *arch = get_value_arch (value); |
1148 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
1149 | ||
3e3d7139 | 1150 | allocate_value_contents (value); |
14c88955 | 1151 | return value->contents.get () + value->embedded_offset * unit_size; |
990a07ab AC |
1152 | } |
1153 | ||
fc1a4b47 | 1154 | gdb_byte * |
990a07ab AC |
1155 | value_contents_all_raw (struct value *value) |
1156 | { | |
3e3d7139 | 1157 | allocate_value_contents (value); |
14c88955 | 1158 | return value->contents.get (); |
990a07ab AC |
1159 | } |
1160 | ||
4754a64e | 1161 | struct type * |
4bf7b526 | 1162 | value_enclosing_type (const struct value *value) |
4754a64e AC |
1163 | { |
1164 | return value->enclosing_type; | |
1165 | } | |
1166 | ||
8264ba82 AG |
1167 | /* Look at value.h for description. */ |
1168 | ||
1169 | struct type * | |
1170 | value_actual_type (struct value *value, int resolve_simple_types, | |
1171 | int *real_type_found) | |
1172 | { | |
1173 | struct value_print_options opts; | |
8264ba82 AG |
1174 | struct type *result; |
1175 | ||
1176 | get_user_print_options (&opts); | |
1177 | ||
1178 | if (real_type_found) | |
1179 | *real_type_found = 0; | |
1180 | result = value_type (value); | |
1181 | if (opts.objectprint) | |
1182 | { | |
5e34c6c3 LM |
1183 | /* If result's target type is TYPE_CODE_STRUCT, proceed to |
1184 | fetch its rtti type. */ | |
78134374 SM |
1185 | if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result)) |
1186 | && (check_typedef (TYPE_TARGET_TYPE (result))->code () | |
1187 | == TYPE_CODE_STRUCT) | |
ecf2e90c | 1188 | && !value_optimized_out (value)) |
dda83cd7 SM |
1189 | { |
1190 | struct type *real_type; | |
1191 | ||
1192 | real_type = value_rtti_indirect_type (value, NULL, NULL, NULL); | |
1193 | if (real_type) | |
1194 | { | |
1195 | if (real_type_found) | |
1196 | *real_type_found = 1; | |
1197 | result = real_type; | |
1198 | } | |
1199 | } | |
8264ba82 | 1200 | else if (resolve_simple_types) |
dda83cd7 SM |
1201 | { |
1202 | if (real_type_found) | |
1203 | *real_type_found = 1; | |
1204 | result = value_enclosing_type (value); | |
1205 | } | |
8264ba82 AG |
1206 | } |
1207 | ||
1208 | return result; | |
1209 | } | |
1210 | ||
901461f8 PA |
1211 | void |
1212 | error_value_optimized_out (void) | |
1213 | { | |
1214 | error (_("value has been optimized out")); | |
1215 | } | |
1216 | ||
0e03807e | 1217 | static void |
4e07d55f | 1218 | require_not_optimized_out (const struct value *value) |
0e03807e | 1219 | { |
0c7e6dd8 | 1220 | if (!value->optimized_out.empty ()) |
901461f8 PA |
1221 | { |
1222 | if (value->lval == lval_register) | |
1223 | error (_("register has not been saved in frame")); | |
1224 | else | |
1225 | error_value_optimized_out (); | |
1226 | } | |
0e03807e TT |
1227 | } |
1228 | ||
4e07d55f PA |
1229 | static void |
1230 | require_available (const struct value *value) | |
1231 | { | |
0c7e6dd8 | 1232 | if (!value->unavailable.empty ()) |
8af8e3bc | 1233 | throw_error (NOT_AVAILABLE_ERROR, _("value is not available")); |
4e07d55f PA |
1234 | } |
1235 | ||
fc1a4b47 | 1236 | const gdb_byte * |
0e03807e | 1237 | value_contents_for_printing (struct value *value) |
46615f07 AC |
1238 | { |
1239 | if (value->lazy) | |
1240 | value_fetch_lazy (value); | |
14c88955 | 1241 | return value->contents.get (); |
46615f07 AC |
1242 | } |
1243 | ||
de4127a3 PA |
1244 | const gdb_byte * |
1245 | value_contents_for_printing_const (const struct value *value) | |
1246 | { | |
1247 | gdb_assert (!value->lazy); | |
14c88955 | 1248 | return value->contents.get (); |
de4127a3 PA |
1249 | } |
1250 | ||
0e03807e TT |
1251 | const gdb_byte * |
1252 | value_contents_all (struct value *value) | |
1253 | { | |
1254 | const gdb_byte *result = value_contents_for_printing (value); | |
1255 | require_not_optimized_out (value); | |
4e07d55f | 1256 | require_available (value); |
0e03807e TT |
1257 | return result; |
1258 | } | |
1259 | ||
9a0dc9e3 PA |
1260 | /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET, |
1261 | SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */ | |
1262 | ||
1263 | static void | |
0c7e6dd8 TT |
1264 | ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset, |
1265 | const std::vector<range> &src_range, int src_bit_offset, | |
9a0dc9e3 PA |
1266 | int bit_length) |
1267 | { | |
0c7e6dd8 | 1268 | for (const range &r : src_range) |
9a0dc9e3 PA |
1269 | { |
1270 | ULONGEST h, l; | |
1271 | ||
0c7e6dd8 TT |
1272 | l = std::max (r.offset, (LONGEST) src_bit_offset); |
1273 | h = std::min (r.offset + r.length, | |
325fac50 | 1274 | (LONGEST) src_bit_offset + bit_length); |
9a0dc9e3 PA |
1275 | |
1276 | if (l < h) | |
1277 | insert_into_bit_range_vector (dst_range, | |
1278 | dst_bit_offset + (l - src_bit_offset), | |
1279 | h - l); | |
1280 | } | |
1281 | } | |
1282 | ||
4875ffdb PA |
1283 | /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET, |
1284 | SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */ | |
1285 | ||
1286 | static void | |
1287 | value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset, | |
1288 | const struct value *src, int src_bit_offset, | |
1289 | int bit_length) | |
1290 | { | |
1291 | ranges_copy_adjusted (&dst->unavailable, dst_bit_offset, | |
1292 | src->unavailable, src_bit_offset, | |
1293 | bit_length); | |
1294 | ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset, | |
1295 | src->optimized_out, src_bit_offset, | |
1296 | bit_length); | |
1297 | } | |
1298 | ||
3ae385af | 1299 | /* Copy LENGTH target addressable memory units of SRC value's (all) contents |
29976f3f PA |
1300 | (value_contents_all) starting at SRC_OFFSET, into DST value's (all) |
1301 | contents, starting at DST_OFFSET. If unavailable contents are | |
1302 | being copied from SRC, the corresponding DST contents are marked | |
1303 | unavailable accordingly. Neither DST nor SRC may be lazy | |
1304 | values. | |
1305 | ||
1306 | It is assumed the contents of DST in the [DST_OFFSET, | |
1307 | DST_OFFSET+LENGTH) range are wholly available. */ | |
39d37385 | 1308 | |
f73e424f | 1309 | static void |
6b850546 DT |
1310 | value_contents_copy_raw (struct value *dst, LONGEST dst_offset, |
1311 | struct value *src, LONGEST src_offset, LONGEST length) | |
39d37385 | 1312 | { |
6b850546 | 1313 | LONGEST src_bit_offset, dst_bit_offset, bit_length; |
3ae385af SM |
1314 | struct gdbarch *arch = get_value_arch (src); |
1315 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
39d37385 PA |
1316 | |
1317 | /* A lazy DST would make that this copy operation useless, since as | |
1318 | soon as DST's contents were un-lazied (by a later value_contents | |
1319 | call, say), the contents would be overwritten. A lazy SRC would | |
1320 | mean we'd be copying garbage. */ | |
1321 | gdb_assert (!dst->lazy && !src->lazy); | |
1322 | ||
29976f3f PA |
1323 | /* The overwritten DST range gets unavailability ORed in, not |
1324 | replaced. Make sure to remember to implement replacing if it | |
1325 | turns out actually necessary. */ | |
1326 | gdb_assert (value_bytes_available (dst, dst_offset, length)); | |
9a0dc9e3 PA |
1327 | gdb_assert (!value_bits_any_optimized_out (dst, |
1328 | TARGET_CHAR_BIT * dst_offset, | |
1329 | TARGET_CHAR_BIT * length)); | |
29976f3f | 1330 | |
39d37385 | 1331 | /* Copy the data. */ |
3ae385af SM |
1332 | memcpy (value_contents_all_raw (dst) + dst_offset * unit_size, |
1333 | value_contents_all_raw (src) + src_offset * unit_size, | |
1334 | length * unit_size); | |
39d37385 PA |
1335 | |
1336 | /* Copy the meta-data, adjusted. */ | |
3ae385af SM |
1337 | src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT; |
1338 | dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT; | |
1339 | bit_length = length * unit_size * HOST_CHAR_BIT; | |
39d37385 | 1340 | |
4875ffdb PA |
1341 | value_ranges_copy_adjusted (dst, dst_bit_offset, |
1342 | src, src_bit_offset, | |
1343 | bit_length); | |
39d37385 PA |
1344 | } |
1345 | ||
29976f3f PA |
1346 | /* Copy LENGTH bytes of SRC value's (all) contents |
1347 | (value_contents_all) starting at SRC_OFFSET byte, into DST value's | |
1348 | (all) contents, starting at DST_OFFSET. If unavailable contents | |
1349 | are being copied from SRC, the corresponding DST contents are | |
1350 | marked unavailable accordingly. DST must not be lazy. If SRC is | |
9a0dc9e3 | 1351 | lazy, it will be fetched now. |
29976f3f PA |
1352 | |
1353 | It is assumed the contents of DST in the [DST_OFFSET, | |
1354 | DST_OFFSET+LENGTH) range are wholly available. */ | |
39d37385 PA |
1355 | |
1356 | void | |
6b850546 DT |
1357 | value_contents_copy (struct value *dst, LONGEST dst_offset, |
1358 | struct value *src, LONGEST src_offset, LONGEST length) | |
39d37385 | 1359 | { |
39d37385 PA |
1360 | if (src->lazy) |
1361 | value_fetch_lazy (src); | |
1362 | ||
1363 | value_contents_copy_raw (dst, dst_offset, src, src_offset, length); | |
1364 | } | |
1365 | ||
d69fe07e | 1366 | int |
4bf7b526 | 1367 | value_lazy (const struct value *value) |
d69fe07e AC |
1368 | { |
1369 | return value->lazy; | |
1370 | } | |
1371 | ||
dfa52d88 AC |
1372 | void |
1373 | set_value_lazy (struct value *value, int val) | |
1374 | { | |
1375 | value->lazy = val; | |
1376 | } | |
1377 | ||
4e5d721f | 1378 | int |
4bf7b526 | 1379 | value_stack (const struct value *value) |
4e5d721f DE |
1380 | { |
1381 | return value->stack; | |
1382 | } | |
1383 | ||
1384 | void | |
1385 | set_value_stack (struct value *value, int val) | |
1386 | { | |
1387 | value->stack = val; | |
1388 | } | |
1389 | ||
fc1a4b47 | 1390 | const gdb_byte * |
0fd88904 AC |
1391 | value_contents (struct value *value) |
1392 | { | |
0e03807e TT |
1393 | const gdb_byte *result = value_contents_writeable (value); |
1394 | require_not_optimized_out (value); | |
4e07d55f | 1395 | require_available (value); |
0e03807e | 1396 | return result; |
0fd88904 AC |
1397 | } |
1398 | ||
fc1a4b47 | 1399 | gdb_byte * |
0fd88904 AC |
1400 | value_contents_writeable (struct value *value) |
1401 | { | |
1402 | if (value->lazy) | |
1403 | value_fetch_lazy (value); | |
fc0c53a0 | 1404 | return value_contents_raw (value); |
0fd88904 AC |
1405 | } |
1406 | ||
feb13ab0 AC |
1407 | int |
1408 | value_optimized_out (struct value *value) | |
1409 | { | |
691a26f5 AB |
1410 | /* We can only know if a value is optimized out once we have tried to |
1411 | fetch it. */ | |
0c7e6dd8 | 1412 | if (value->optimized_out.empty () && value->lazy) |
ecf2e90c | 1413 | { |
a70b8144 | 1414 | try |
ecf2e90c DB |
1415 | { |
1416 | value_fetch_lazy (value); | |
1417 | } | |
230d2906 | 1418 | catch (const gdb_exception_error &ex) |
ecf2e90c | 1419 | { |
6d7aa592 PA |
1420 | switch (ex.error) |
1421 | { | |
1422 | case MEMORY_ERROR: | |
1423 | case OPTIMIZED_OUT_ERROR: | |
1424 | case NOT_AVAILABLE_ERROR: | |
1425 | /* These can normally happen when we try to access an | |
1426 | optimized out or unavailable register, either in a | |
1427 | physical register or spilled to memory. */ | |
1428 | break; | |
1429 | default: | |
1430 | throw; | |
1431 | } | |
ecf2e90c | 1432 | } |
ecf2e90c | 1433 | } |
691a26f5 | 1434 | |
0c7e6dd8 | 1435 | return !value->optimized_out.empty (); |
feb13ab0 AC |
1436 | } |
1437 | ||
9a0dc9e3 PA |
1438 | /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and |
1439 | the following LENGTH bytes. */ | |
eca07816 | 1440 | |
feb13ab0 | 1441 | void |
9a0dc9e3 | 1442 | mark_value_bytes_optimized_out (struct value *value, int offset, int length) |
feb13ab0 | 1443 | { |
9a0dc9e3 PA |
1444 | mark_value_bits_optimized_out (value, |
1445 | offset * TARGET_CHAR_BIT, | |
1446 | length * TARGET_CHAR_BIT); | |
feb13ab0 | 1447 | } |
13c3b5f5 | 1448 | |
9a0dc9e3 | 1449 | /* See value.h. */ |
0e03807e | 1450 | |
9a0dc9e3 | 1451 | void |
6b850546 DT |
1452 | mark_value_bits_optimized_out (struct value *value, |
1453 | LONGEST offset, LONGEST length) | |
0e03807e | 1454 | { |
9a0dc9e3 | 1455 | insert_into_bit_range_vector (&value->optimized_out, offset, length); |
0e03807e TT |
1456 | } |
1457 | ||
8cf6f0b1 TT |
1458 | int |
1459 | value_bits_synthetic_pointer (const struct value *value, | |
6b850546 | 1460 | LONGEST offset, LONGEST length) |
8cf6f0b1 | 1461 | { |
e7303042 | 1462 | if (value->lval != lval_computed |
8cf6f0b1 TT |
1463 | || !value->location.computed.funcs->check_synthetic_pointer) |
1464 | return 0; | |
1465 | return value->location.computed.funcs->check_synthetic_pointer (value, | |
1466 | offset, | |
1467 | length); | |
1468 | } | |
1469 | ||
6b850546 | 1470 | LONGEST |
4bf7b526 | 1471 | value_embedded_offset (const struct value *value) |
13c3b5f5 AC |
1472 | { |
1473 | return value->embedded_offset; | |
1474 | } | |
1475 | ||
1476 | void | |
6b850546 | 1477 | set_value_embedded_offset (struct value *value, LONGEST val) |
13c3b5f5 AC |
1478 | { |
1479 | value->embedded_offset = val; | |
1480 | } | |
b44d461b | 1481 | |
6b850546 | 1482 | LONGEST |
4bf7b526 | 1483 | value_pointed_to_offset (const struct value *value) |
b44d461b AC |
1484 | { |
1485 | return value->pointed_to_offset; | |
1486 | } | |
1487 | ||
1488 | void | |
6b850546 | 1489 | set_value_pointed_to_offset (struct value *value, LONGEST val) |
b44d461b AC |
1490 | { |
1491 | value->pointed_to_offset = val; | |
1492 | } | |
13bb5560 | 1493 | |
c8f2448a | 1494 | const struct lval_funcs * |
a471c594 | 1495 | value_computed_funcs (const struct value *v) |
5f5233d4 | 1496 | { |
a471c594 | 1497 | gdb_assert (value_lval_const (v) == lval_computed); |
5f5233d4 PA |
1498 | |
1499 | return v->location.computed.funcs; | |
1500 | } | |
1501 | ||
1502 | void * | |
0e03807e | 1503 | value_computed_closure (const struct value *v) |
5f5233d4 | 1504 | { |
0e03807e | 1505 | gdb_assert (v->lval == lval_computed); |
5f5233d4 PA |
1506 | |
1507 | return v->location.computed.closure; | |
1508 | } | |
1509 | ||
13bb5560 AC |
1510 | enum lval_type * |
1511 | deprecated_value_lval_hack (struct value *value) | |
1512 | { | |
1513 | return &value->lval; | |
1514 | } | |
1515 | ||
a471c594 JK |
1516 | enum lval_type |
1517 | value_lval_const (const struct value *value) | |
1518 | { | |
1519 | return value->lval; | |
1520 | } | |
1521 | ||
42ae5230 | 1522 | CORE_ADDR |
de4127a3 | 1523 | value_address (const struct value *value) |
42ae5230 | 1524 | { |
1a088441 | 1525 | if (value->lval != lval_memory) |
42ae5230 | 1526 | return 0; |
53ba8333 | 1527 | if (value->parent != NULL) |
2c8331b9 | 1528 | return value_address (value->parent.get ()) + value->offset; |
9920b434 BH |
1529 | if (NULL != TYPE_DATA_LOCATION (value_type (value))) |
1530 | { | |
1531 | gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value))); | |
1532 | return TYPE_DATA_LOCATION_ADDR (value_type (value)); | |
1533 | } | |
1534 | ||
1535 | return value->location.address + value->offset; | |
42ae5230 TT |
1536 | } |
1537 | ||
1538 | CORE_ADDR | |
4bf7b526 | 1539 | value_raw_address (const struct value *value) |
42ae5230 | 1540 | { |
1a088441 | 1541 | if (value->lval != lval_memory) |
42ae5230 TT |
1542 | return 0; |
1543 | return value->location.address; | |
1544 | } | |
1545 | ||
1546 | void | |
1547 | set_value_address (struct value *value, CORE_ADDR addr) | |
13bb5560 | 1548 | { |
1a088441 | 1549 | gdb_assert (value->lval == lval_memory); |
42ae5230 | 1550 | value->location.address = addr; |
13bb5560 AC |
1551 | } |
1552 | ||
1553 | struct internalvar ** | |
1554 | deprecated_value_internalvar_hack (struct value *value) | |
1555 | { | |
1556 | return &value->location.internalvar; | |
1557 | } | |
1558 | ||
1559 | struct frame_id * | |
41b56feb | 1560 | deprecated_value_next_frame_id_hack (struct value *value) |
13bb5560 | 1561 | { |
7c2ba67e | 1562 | gdb_assert (value->lval == lval_register); |
7dc54575 | 1563 | return &value->location.reg.next_frame_id; |
13bb5560 AC |
1564 | } |
1565 | ||
7dc54575 | 1566 | int * |
13bb5560 AC |
1567 | deprecated_value_regnum_hack (struct value *value) |
1568 | { | |
7c2ba67e | 1569 | gdb_assert (value->lval == lval_register); |
7dc54575 | 1570 | return &value->location.reg.regnum; |
13bb5560 | 1571 | } |
88e3b34b AC |
1572 | |
1573 | int | |
4bf7b526 | 1574 | deprecated_value_modifiable (const struct value *value) |
88e3b34b AC |
1575 | { |
1576 | return value->modifiable; | |
1577 | } | |
990a07ab | 1578 | \f |
c906108c SS |
1579 | /* Return a mark in the value chain. All values allocated after the |
1580 | mark is obtained (except for those released) are subject to being freed | |
1581 | if a subsequent value_free_to_mark is passed the mark. */ | |
f23631e4 | 1582 | struct value * |
fba45db2 | 1583 | value_mark (void) |
c906108c | 1584 | { |
062d818d TT |
1585 | if (all_values.empty ()) |
1586 | return nullptr; | |
1587 | return all_values.back ().get (); | |
c906108c SS |
1588 | } |
1589 | ||
bbfa6f00 | 1590 | /* See value.h. */ |
828d3400 | 1591 | |
bbfa6f00 | 1592 | void |
828d3400 DJ |
1593 | value_incref (struct value *val) |
1594 | { | |
1595 | val->reference_count++; | |
1596 | } | |
1597 | ||
1598 | /* Release a reference to VAL, which was acquired with value_incref. | |
1599 | This function is also called to deallocate values from the value | |
1600 | chain. */ | |
1601 | ||
3e3d7139 | 1602 | void |
22bc8444 | 1603 | value_decref (struct value *val) |
3e3d7139 | 1604 | { |
466ce3ae | 1605 | if (val != nullptr) |
5f5233d4 | 1606 | { |
828d3400 DJ |
1607 | gdb_assert (val->reference_count > 0); |
1608 | val->reference_count--; | |
466ce3ae TT |
1609 | if (val->reference_count == 0) |
1610 | delete val; | |
5f5233d4 | 1611 | } |
3e3d7139 JG |
1612 | } |
1613 | ||
c906108c SS |
1614 | /* Free all values allocated since MARK was obtained by value_mark |
1615 | (except for those released). */ | |
1616 | void | |
4bf7b526 | 1617 | value_free_to_mark (const struct value *mark) |
c906108c | 1618 | { |
062d818d TT |
1619 | auto iter = std::find (all_values.begin (), all_values.end (), mark); |
1620 | if (iter == all_values.end ()) | |
1621 | all_values.clear (); | |
1622 | else | |
1623 | all_values.erase (iter + 1, all_values.end ()); | |
c906108c SS |
1624 | } |
1625 | ||
c906108c SS |
1626 | /* Remove VAL from the chain all_values |
1627 | so it will not be freed automatically. */ | |
1628 | ||
22bc8444 | 1629 | value_ref_ptr |
f23631e4 | 1630 | release_value (struct value *val) |
c906108c | 1631 | { |
850645cf TT |
1632 | if (val == nullptr) |
1633 | return value_ref_ptr (); | |
1634 | ||
062d818d TT |
1635 | std::vector<value_ref_ptr>::reverse_iterator iter; |
1636 | for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter) | |
c906108c | 1637 | { |
062d818d | 1638 | if (*iter == val) |
c906108c | 1639 | { |
062d818d TT |
1640 | value_ref_ptr result = *iter; |
1641 | all_values.erase (iter.base () - 1); | |
1642 | return result; | |
c906108c SS |
1643 | } |
1644 | } | |
c906108c | 1645 | |
062d818d TT |
1646 | /* We must always return an owned reference. Normally this happens |
1647 | because we transfer the reference from the value chain, but in | |
1648 | this case the value was not on the chain. */ | |
bbfa6f00 | 1649 | return value_ref_ptr::new_reference (val); |
e848a8a5 TT |
1650 | } |
1651 | ||
a6535de1 TT |
1652 | /* See value.h. */ |
1653 | ||
1654 | std::vector<value_ref_ptr> | |
4bf7b526 | 1655 | value_release_to_mark (const struct value *mark) |
c906108c | 1656 | { |
a6535de1 | 1657 | std::vector<value_ref_ptr> result; |
c906108c | 1658 | |
062d818d TT |
1659 | auto iter = std::find (all_values.begin (), all_values.end (), mark); |
1660 | if (iter == all_values.end ()) | |
1661 | std::swap (result, all_values); | |
1662 | else | |
e848a8a5 | 1663 | { |
062d818d TT |
1664 | std::move (iter + 1, all_values.end (), std::back_inserter (result)); |
1665 | all_values.erase (iter + 1, all_values.end ()); | |
e848a8a5 | 1666 | } |
062d818d | 1667 | std::reverse (result.begin (), result.end ()); |
a6535de1 | 1668 | return result; |
c906108c SS |
1669 | } |
1670 | ||
1671 | /* Return a copy of the value ARG. | |
1672 | It contains the same contents, for same memory address, | |
1673 | but it's a different block of storage. */ | |
1674 | ||
f23631e4 AC |
1675 | struct value * |
1676 | value_copy (struct value *arg) | |
c906108c | 1677 | { |
4754a64e | 1678 | struct type *encl_type = value_enclosing_type (arg); |
3e3d7139 JG |
1679 | struct value *val; |
1680 | ||
1681 | if (value_lazy (arg)) | |
1682 | val = allocate_value_lazy (encl_type); | |
1683 | else | |
1684 | val = allocate_value (encl_type); | |
df407dfe | 1685 | val->type = arg->type; |
c906108c | 1686 | VALUE_LVAL (val) = VALUE_LVAL (arg); |
6f7c8fc2 | 1687 | val->location = arg->location; |
df407dfe AC |
1688 | val->offset = arg->offset; |
1689 | val->bitpos = arg->bitpos; | |
1690 | val->bitsize = arg->bitsize; | |
d69fe07e | 1691 | val->lazy = arg->lazy; |
13c3b5f5 | 1692 | val->embedded_offset = value_embedded_offset (arg); |
b44d461b | 1693 | val->pointed_to_offset = arg->pointed_to_offset; |
c906108c | 1694 | val->modifiable = arg->modifiable; |
d69fe07e | 1695 | if (!value_lazy (val)) |
c906108c | 1696 | { |
990a07ab | 1697 | memcpy (value_contents_all_raw (val), value_contents_all_raw (arg), |
4754a64e | 1698 | TYPE_LENGTH (value_enclosing_type (arg))); |
c906108c SS |
1699 | |
1700 | } | |
0c7e6dd8 TT |
1701 | val->unavailable = arg->unavailable; |
1702 | val->optimized_out = arg->optimized_out; | |
2c8331b9 | 1703 | val->parent = arg->parent; |
5f5233d4 PA |
1704 | if (VALUE_LVAL (val) == lval_computed) |
1705 | { | |
c8f2448a | 1706 | const struct lval_funcs *funcs = val->location.computed.funcs; |
5f5233d4 PA |
1707 | |
1708 | if (funcs->copy_closure) | |
dda83cd7 | 1709 | val->location.computed.closure = funcs->copy_closure (val); |
5f5233d4 | 1710 | } |
c906108c SS |
1711 | return val; |
1712 | } | |
74bcbdf3 | 1713 | |
4c082a81 SC |
1714 | /* Return a "const" and/or "volatile" qualified version of the value V. |
1715 | If CNST is true, then the returned value will be qualified with | |
1716 | "const". | |
1717 | if VOLTL is true, then the returned value will be qualified with | |
1718 | "volatile". */ | |
1719 | ||
1720 | struct value * | |
1721 | make_cv_value (int cnst, int voltl, struct value *v) | |
1722 | { | |
1723 | struct type *val_type = value_type (v); | |
1724 | struct type *enclosing_type = value_enclosing_type (v); | |
1725 | struct value *cv_val = value_copy (v); | |
1726 | ||
1727 | deprecated_set_value_type (cv_val, | |
1728 | make_cv_type (cnst, voltl, val_type, NULL)); | |
1729 | set_value_enclosing_type (cv_val, | |
1730 | make_cv_type (cnst, voltl, enclosing_type, NULL)); | |
1731 | ||
1732 | return cv_val; | |
1733 | } | |
1734 | ||
c37f7098 KW |
1735 | /* Return a version of ARG that is non-lvalue. */ |
1736 | ||
1737 | struct value * | |
1738 | value_non_lval (struct value *arg) | |
1739 | { | |
1740 | if (VALUE_LVAL (arg) != not_lval) | |
1741 | { | |
1742 | struct type *enc_type = value_enclosing_type (arg); | |
1743 | struct value *val = allocate_value (enc_type); | |
1744 | ||
1745 | memcpy (value_contents_all_raw (val), value_contents_all (arg), | |
1746 | TYPE_LENGTH (enc_type)); | |
1747 | val->type = arg->type; | |
1748 | set_value_embedded_offset (val, value_embedded_offset (arg)); | |
1749 | set_value_pointed_to_offset (val, value_pointed_to_offset (arg)); | |
1750 | return val; | |
1751 | } | |
1752 | return arg; | |
1753 | } | |
1754 | ||
6c659fc2 SC |
1755 | /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */ |
1756 | ||
1757 | void | |
1758 | value_force_lval (struct value *v, CORE_ADDR addr) | |
1759 | { | |
1760 | gdb_assert (VALUE_LVAL (v) == not_lval); | |
1761 | ||
1762 | write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v))); | |
1763 | v->lval = lval_memory; | |
1764 | v->location.address = addr; | |
1765 | } | |
1766 | ||
74bcbdf3 | 1767 | void |
0e03807e TT |
1768 | set_value_component_location (struct value *component, |
1769 | const struct value *whole) | |
74bcbdf3 | 1770 | { |
9920b434 BH |
1771 | struct type *type; |
1772 | ||
e81e7f5e SC |
1773 | gdb_assert (whole->lval != lval_xcallable); |
1774 | ||
0e03807e | 1775 | if (whole->lval == lval_internalvar) |
74bcbdf3 PA |
1776 | VALUE_LVAL (component) = lval_internalvar_component; |
1777 | else | |
0e03807e | 1778 | VALUE_LVAL (component) = whole->lval; |
5f5233d4 | 1779 | |
74bcbdf3 | 1780 | component->location = whole->location; |
0e03807e | 1781 | if (whole->lval == lval_computed) |
5f5233d4 | 1782 | { |
c8f2448a | 1783 | const struct lval_funcs *funcs = whole->location.computed.funcs; |
5f5233d4 PA |
1784 | |
1785 | if (funcs->copy_closure) | |
dda83cd7 | 1786 | component->location.computed.closure = funcs->copy_closure (whole); |
5f5233d4 | 1787 | } |
9920b434 | 1788 | |
3c8c6de2 AB |
1789 | /* If the WHOLE value has a dynamically resolved location property then |
1790 | update the address of the COMPONENT. */ | |
9920b434 BH |
1791 | type = value_type (whole); |
1792 | if (NULL != TYPE_DATA_LOCATION (type) | |
1793 | && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST) | |
1794 | set_value_address (component, TYPE_DATA_LOCATION_ADDR (type)); | |
3c8c6de2 AB |
1795 | |
1796 | /* Similarly, if the COMPONENT value has a dynamically resolved location | |
1797 | property then update its address. */ | |
1798 | type = value_type (component); | |
1799 | if (NULL != TYPE_DATA_LOCATION (type) | |
1800 | && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST) | |
1801 | { | |
1802 | /* If the COMPONENT has a dynamic location, and is an | |
1803 | lval_internalvar_component, then we change it to a lval_memory. | |
1804 | ||
1805 | Usually a component of an internalvar is created non-lazy, and has | |
1806 | its content immediately copied from the parent internalvar. | |
1807 | However, for components with a dynamic location, the content of | |
1808 | the component is not contained within the parent, but is instead | |
1809 | accessed indirectly. Further, the component will be created as a | |
1810 | lazy value. | |
1811 | ||
1812 | By changing the type of the component to lval_memory we ensure | |
1813 | that value_fetch_lazy can successfully load the component. | |
1814 | ||
1815 | This solution isn't ideal, but a real fix would require values to | |
1816 | carry around both the parent value contents, and the contents of | |
1817 | any dynamic fields within the parent. This is a substantial | |
1818 | change to how values work in GDB. */ | |
1819 | if (VALUE_LVAL (component) == lval_internalvar_component) | |
1820 | { | |
1821 | gdb_assert (value_lazy (component)); | |
1822 | VALUE_LVAL (component) = lval_memory; | |
1823 | } | |
1824 | else | |
1825 | gdb_assert (VALUE_LVAL (component) == lval_memory); | |
1826 | set_value_address (component, TYPE_DATA_LOCATION_ADDR (type)); | |
1827 | } | |
74bcbdf3 PA |
1828 | } |
1829 | ||
c906108c SS |
1830 | /* Access to the value history. */ |
1831 | ||
1832 | /* Record a new value in the value history. | |
eddf0bae | 1833 | Returns the absolute history index of the entry. */ |
c906108c SS |
1834 | |
1835 | int | |
f23631e4 | 1836 | record_latest_value (struct value *val) |
c906108c | 1837 | { |
c906108c SS |
1838 | /* We don't want this value to have anything to do with the inferior anymore. |
1839 | In particular, "set $1 = 50" should not affect the variable from which | |
1840 | the value was taken, and fast watchpoints should be able to assume that | |
1841 | a value on the value history never changes. */ | |
d69fe07e | 1842 | if (value_lazy (val)) |
c906108c SS |
1843 | value_fetch_lazy (val); |
1844 | /* We preserve VALUE_LVAL so that the user can find out where it was fetched | |
1845 | from. This is a bit dubious, because then *&$1 does not just return $1 | |
1846 | but the current contents of that location. c'est la vie... */ | |
1847 | val->modifiable = 0; | |
350e1a76 | 1848 | |
4d0266a0 | 1849 | value_history.push_back (release_value (val)); |
a109c7c1 | 1850 | |
4d0266a0 | 1851 | return value_history.size (); |
c906108c SS |
1852 | } |
1853 | ||
1854 | /* Return a copy of the value in the history with sequence number NUM. */ | |
1855 | ||
f23631e4 | 1856 | struct value * |
fba45db2 | 1857 | access_value_history (int num) |
c906108c | 1858 | { |
52f0bd74 | 1859 | int absnum = num; |
c906108c SS |
1860 | |
1861 | if (absnum <= 0) | |
4d0266a0 | 1862 | absnum += value_history.size (); |
c906108c SS |
1863 | |
1864 | if (absnum <= 0) | |
1865 | { | |
1866 | if (num == 0) | |
8a3fe4f8 | 1867 | error (_("The history is empty.")); |
c906108c | 1868 | else if (num == 1) |
8a3fe4f8 | 1869 | error (_("There is only one value in the history.")); |
c906108c | 1870 | else |
8a3fe4f8 | 1871 | error (_("History does not go back to $$%d."), -num); |
c906108c | 1872 | } |
4d0266a0 | 1873 | if (absnum > value_history.size ()) |
8a3fe4f8 | 1874 | error (_("History has not yet reached $%d."), absnum); |
c906108c SS |
1875 | |
1876 | absnum--; | |
1877 | ||
4d0266a0 | 1878 | return value_copy (value_history[absnum].get ()); |
c906108c SS |
1879 | } |
1880 | ||
c906108c | 1881 | static void |
5fed81ff | 1882 | show_values (const char *num_exp, int from_tty) |
c906108c | 1883 | { |
52f0bd74 | 1884 | int i; |
f23631e4 | 1885 | struct value *val; |
c906108c SS |
1886 | static int num = 1; |
1887 | ||
1888 | if (num_exp) | |
1889 | { | |
f132ba9d | 1890 | /* "show values +" should print from the stored position. |
dda83cd7 | 1891 | "show values <exp>" should print around value number <exp>. */ |
c906108c | 1892 | if (num_exp[0] != '+' || num_exp[1] != '\0') |
bb518678 | 1893 | num = parse_and_eval_long (num_exp) - 5; |
c906108c SS |
1894 | } |
1895 | else | |
1896 | { | |
f132ba9d | 1897 | /* "show values" means print the last 10 values. */ |
4d0266a0 | 1898 | num = value_history.size () - 9; |
c906108c SS |
1899 | } |
1900 | ||
1901 | if (num <= 0) | |
1902 | num = 1; | |
1903 | ||
4d0266a0 | 1904 | for (i = num; i < num + 10 && i <= value_history.size (); i++) |
c906108c | 1905 | { |
79a45b7d | 1906 | struct value_print_options opts; |
a109c7c1 | 1907 | |
c906108c | 1908 | val = access_value_history (i); |
a3f17187 | 1909 | printf_filtered (("$%d = "), i); |
79a45b7d TT |
1910 | get_user_print_options (&opts); |
1911 | value_print (val, gdb_stdout, &opts); | |
a3f17187 | 1912 | printf_filtered (("\n")); |
c906108c SS |
1913 | } |
1914 | ||
f132ba9d | 1915 | /* The next "show values +" should start after what we just printed. */ |
c906108c SS |
1916 | num += 10; |
1917 | ||
1918 | /* Hitting just return after this command should do the same thing as | |
f132ba9d TJB |
1919 | "show values +". If num_exp is null, this is unnecessary, since |
1920 | "show values +" is not useful after "show values". */ | |
c906108c | 1921 | if (from_tty && num_exp) |
85c4be7c | 1922 | set_repeat_arguments ("+"); |
c906108c SS |
1923 | } |
1924 | \f | |
52059ffd TT |
1925 | enum internalvar_kind |
1926 | { | |
1927 | /* The internal variable is empty. */ | |
1928 | INTERNALVAR_VOID, | |
1929 | ||
1930 | /* The value of the internal variable is provided directly as | |
1931 | a GDB value object. */ | |
1932 | INTERNALVAR_VALUE, | |
1933 | ||
1934 | /* A fresh value is computed via a call-back routine on every | |
1935 | access to the internal variable. */ | |
1936 | INTERNALVAR_MAKE_VALUE, | |
1937 | ||
1938 | /* The internal variable holds a GDB internal convenience function. */ | |
1939 | INTERNALVAR_FUNCTION, | |
1940 | ||
1941 | /* The variable holds an integer value. */ | |
1942 | INTERNALVAR_INTEGER, | |
1943 | ||
1944 | /* The variable holds a GDB-provided string. */ | |
1945 | INTERNALVAR_STRING, | |
1946 | }; | |
1947 | ||
1948 | union internalvar_data | |
1949 | { | |
1950 | /* A value object used with INTERNALVAR_VALUE. */ | |
1951 | struct value *value; | |
1952 | ||
1953 | /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */ | |
1954 | struct | |
1955 | { | |
1956 | /* The functions to call. */ | |
1957 | const struct internalvar_funcs *functions; | |
1958 | ||
1959 | /* The function's user-data. */ | |
1960 | void *data; | |
1961 | } make_value; | |
1962 | ||
1963 | /* The internal function used with INTERNALVAR_FUNCTION. */ | |
1964 | struct | |
1965 | { | |
1966 | struct internal_function *function; | |
1967 | /* True if this is the canonical name for the function. */ | |
1968 | int canonical; | |
1969 | } fn; | |
1970 | ||
1971 | /* An integer value used with INTERNALVAR_INTEGER. */ | |
1972 | struct | |
1973 | { | |
1974 | /* If type is non-NULL, it will be used as the type to generate | |
1975 | a value for this internal variable. If type is NULL, a default | |
1976 | integer type for the architecture is used. */ | |
1977 | struct type *type; | |
1978 | LONGEST val; | |
1979 | } integer; | |
1980 | ||
1981 | /* A string value used with INTERNALVAR_STRING. */ | |
1982 | char *string; | |
1983 | }; | |
1984 | ||
c906108c SS |
1985 | /* Internal variables. These are variables within the debugger |
1986 | that hold values assigned by debugger commands. | |
1987 | The user refers to them with a '$' prefix | |
1988 | that does not appear in the variable names stored internally. */ | |
1989 | ||
4fa62494 UW |
1990 | struct internalvar |
1991 | { | |
1992 | struct internalvar *next; | |
1993 | char *name; | |
4fa62494 | 1994 | |
78267919 UW |
1995 | /* We support various different kinds of content of an internal variable. |
1996 | enum internalvar_kind specifies the kind, and union internalvar_data | |
1997 | provides the data associated with this particular kind. */ | |
1998 | ||
52059ffd | 1999 | enum internalvar_kind kind; |
4fa62494 | 2000 | |
52059ffd | 2001 | union internalvar_data u; |
4fa62494 UW |
2002 | }; |
2003 | ||
c906108c SS |
2004 | static struct internalvar *internalvars; |
2005 | ||
3e43a32a MS |
2006 | /* If the variable does not already exist create it and give it the |
2007 | value given. If no value is given then the default is zero. */ | |
53e5f3cf | 2008 | static void |
0b39b52e | 2009 | init_if_undefined_command (const char* args, int from_tty) |
53e5f3cf | 2010 | { |
413403fc | 2011 | struct internalvar *intvar = nullptr; |
53e5f3cf AS |
2012 | |
2013 | /* Parse the expression - this is taken from set_command(). */ | |
4d01a485 | 2014 | expression_up expr = parse_expression (args); |
53e5f3cf AS |
2015 | |
2016 | /* Validate the expression. | |
2017 | Was the expression an assignment? | |
2018 | Or even an expression at all? */ | |
3dd93bf8 | 2019 | if (expr->first_opcode () != BINOP_ASSIGN) |
53e5f3cf AS |
2020 | error (_("Init-if-undefined requires an assignment expression.")); |
2021 | ||
1eaebe02 TT |
2022 | /* Extract the variable from the parsed expression. */ |
2023 | expr::assign_operation *assign | |
2024 | = dynamic_cast<expr::assign_operation *> (expr->op.get ()); | |
2025 | if (assign != nullptr) | |
413403fc | 2026 | { |
1eaebe02 TT |
2027 | expr::operation *lhs = assign->get_lhs (); |
2028 | expr::internalvar_operation *ivarop | |
2029 | = dynamic_cast<expr::internalvar_operation *> (lhs); | |
2030 | if (ivarop != nullptr) | |
2031 | intvar = ivarop->get_internalvar (); | |
413403fc TT |
2032 | } |
2033 | ||
2034 | if (intvar == nullptr) | |
3e43a32a MS |
2035 | error (_("The first parameter to init-if-undefined " |
2036 | "should be a GDB variable.")); | |
53e5f3cf AS |
2037 | |
2038 | /* Only evaluate the expression if the lvalue is void. | |
85102364 | 2039 | This may still fail if the expression is invalid. */ |
78267919 | 2040 | if (intvar->kind == INTERNALVAR_VOID) |
4d01a485 | 2041 | evaluate_expression (expr.get ()); |
53e5f3cf AS |
2042 | } |
2043 | ||
2044 | ||
c906108c SS |
2045 | /* Look up an internal variable with name NAME. NAME should not |
2046 | normally include a dollar sign. | |
2047 | ||
2048 | If the specified internal variable does not exist, | |
c4a3d09a | 2049 | the return value is NULL. */ |
c906108c SS |
2050 | |
2051 | struct internalvar * | |
bc3b79fd | 2052 | lookup_only_internalvar (const char *name) |
c906108c | 2053 | { |
52f0bd74 | 2054 | struct internalvar *var; |
c906108c SS |
2055 | |
2056 | for (var = internalvars; var; var = var->next) | |
5cb316ef | 2057 | if (strcmp (var->name, name) == 0) |
c906108c SS |
2058 | return var; |
2059 | ||
c4a3d09a MF |
2060 | return NULL; |
2061 | } | |
2062 | ||
eb3ff9a5 PA |
2063 | /* Complete NAME by comparing it to the names of internal |
2064 | variables. */ | |
d55637df | 2065 | |
eb3ff9a5 PA |
2066 | void |
2067 | complete_internalvar (completion_tracker &tracker, const char *name) | |
d55637df | 2068 | { |
d55637df TT |
2069 | struct internalvar *var; |
2070 | int len; | |
2071 | ||
2072 | len = strlen (name); | |
2073 | ||
2074 | for (var = internalvars; var; var = var->next) | |
2075 | if (strncmp (var->name, name, len) == 0) | |
b02f78f9 | 2076 | tracker.add_completion (make_unique_xstrdup (var->name)); |
d55637df | 2077 | } |
c4a3d09a MF |
2078 | |
2079 | /* Create an internal variable with name NAME and with a void value. | |
2080 | NAME should not normally include a dollar sign. */ | |
2081 | ||
2082 | struct internalvar * | |
bc3b79fd | 2083 | create_internalvar (const char *name) |
c4a3d09a | 2084 | { |
8d749320 | 2085 | struct internalvar *var = XNEW (struct internalvar); |
a109c7c1 | 2086 | |
395f9c91 | 2087 | var->name = xstrdup (name); |
78267919 | 2088 | var->kind = INTERNALVAR_VOID; |
c906108c SS |
2089 | var->next = internalvars; |
2090 | internalvars = var; | |
2091 | return var; | |
2092 | } | |
2093 | ||
4aa995e1 PA |
2094 | /* Create an internal variable with name NAME and register FUN as the |
2095 | function that value_of_internalvar uses to create a value whenever | |
2096 | this variable is referenced. NAME should not normally include a | |
22d2b532 SDJ |
2097 | dollar sign. DATA is passed uninterpreted to FUN when it is |
2098 | called. CLEANUP, if not NULL, is called when the internal variable | |
2099 | is destroyed. It is passed DATA as its only argument. */ | |
4aa995e1 PA |
2100 | |
2101 | struct internalvar * | |
22d2b532 SDJ |
2102 | create_internalvar_type_lazy (const char *name, |
2103 | const struct internalvar_funcs *funcs, | |
2104 | void *data) | |
4aa995e1 | 2105 | { |
4fa62494 | 2106 | struct internalvar *var = create_internalvar (name); |
a109c7c1 | 2107 | |
78267919 | 2108 | var->kind = INTERNALVAR_MAKE_VALUE; |
22d2b532 SDJ |
2109 | var->u.make_value.functions = funcs; |
2110 | var->u.make_value.data = data; | |
4aa995e1 PA |
2111 | return var; |
2112 | } | |
c4a3d09a | 2113 | |
22d2b532 SDJ |
2114 | /* See documentation in value.h. */ |
2115 | ||
2116 | int | |
2117 | compile_internalvar_to_ax (struct internalvar *var, | |
2118 | struct agent_expr *expr, | |
2119 | struct axs_value *value) | |
2120 | { | |
2121 | if (var->kind != INTERNALVAR_MAKE_VALUE | |
2122 | || var->u.make_value.functions->compile_to_ax == NULL) | |
2123 | return 0; | |
2124 | ||
2125 | var->u.make_value.functions->compile_to_ax (var, expr, value, | |
2126 | var->u.make_value.data); | |
2127 | return 1; | |
2128 | } | |
2129 | ||
c4a3d09a MF |
2130 | /* Look up an internal variable with name NAME. NAME should not |
2131 | normally include a dollar sign. | |
2132 | ||
2133 | If the specified internal variable does not exist, | |
2134 | one is created, with a void value. */ | |
2135 | ||
2136 | struct internalvar * | |
bc3b79fd | 2137 | lookup_internalvar (const char *name) |
c4a3d09a MF |
2138 | { |
2139 | struct internalvar *var; | |
2140 | ||
2141 | var = lookup_only_internalvar (name); | |
2142 | if (var) | |
2143 | return var; | |
2144 | ||
2145 | return create_internalvar (name); | |
2146 | } | |
2147 | ||
78267919 UW |
2148 | /* Return current value of internal variable VAR. For variables that |
2149 | are not inherently typed, use a value type appropriate for GDBARCH. */ | |
2150 | ||
f23631e4 | 2151 | struct value * |
78267919 | 2152 | value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var) |
c906108c | 2153 | { |
f23631e4 | 2154 | struct value *val; |
0914bcdb SS |
2155 | struct trace_state_variable *tsv; |
2156 | ||
2157 | /* If there is a trace state variable of the same name, assume that | |
2158 | is what we really want to see. */ | |
2159 | tsv = find_trace_state_variable (var->name); | |
2160 | if (tsv) | |
2161 | { | |
2162 | tsv->value_known = target_get_trace_state_variable_value (tsv->number, | |
2163 | &(tsv->value)); | |
2164 | if (tsv->value_known) | |
2165 | val = value_from_longest (builtin_type (gdbarch)->builtin_int64, | |
2166 | tsv->value); | |
2167 | else | |
2168 | val = allocate_value (builtin_type (gdbarch)->builtin_void); | |
2169 | return val; | |
2170 | } | |
c906108c | 2171 | |
78267919 | 2172 | switch (var->kind) |
5f5233d4 | 2173 | { |
78267919 UW |
2174 | case INTERNALVAR_VOID: |
2175 | val = allocate_value (builtin_type (gdbarch)->builtin_void); | |
2176 | break; | |
4fa62494 | 2177 | |
78267919 UW |
2178 | case INTERNALVAR_FUNCTION: |
2179 | val = allocate_value (builtin_type (gdbarch)->internal_fn); | |
2180 | break; | |
4fa62494 | 2181 | |
cab0c772 UW |
2182 | case INTERNALVAR_INTEGER: |
2183 | if (!var->u.integer.type) | |
78267919 | 2184 | val = value_from_longest (builtin_type (gdbarch)->builtin_int, |
cab0c772 | 2185 | var->u.integer.val); |
78267919 | 2186 | else |
cab0c772 UW |
2187 | val = value_from_longest (var->u.integer.type, var->u.integer.val); |
2188 | break; | |
2189 | ||
78267919 UW |
2190 | case INTERNALVAR_STRING: |
2191 | val = value_cstring (var->u.string, strlen (var->u.string), | |
2192 | builtin_type (gdbarch)->builtin_char); | |
2193 | break; | |
4fa62494 | 2194 | |
78267919 UW |
2195 | case INTERNALVAR_VALUE: |
2196 | val = value_copy (var->u.value); | |
4aa995e1 PA |
2197 | if (value_lazy (val)) |
2198 | value_fetch_lazy (val); | |
78267919 | 2199 | break; |
4aa995e1 | 2200 | |
78267919 | 2201 | case INTERNALVAR_MAKE_VALUE: |
22d2b532 SDJ |
2202 | val = (*var->u.make_value.functions->make_value) (gdbarch, var, |
2203 | var->u.make_value.data); | |
78267919 UW |
2204 | break; |
2205 | ||
2206 | default: | |
9b20d036 | 2207 | internal_error (__FILE__, __LINE__, _("bad kind")); |
78267919 UW |
2208 | } |
2209 | ||
2210 | /* Change the VALUE_LVAL to lval_internalvar so that future operations | |
2211 | on this value go back to affect the original internal variable. | |
2212 | ||
2213 | Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have | |
30baf67b | 2214 | no underlying modifiable state in the internal variable. |
78267919 UW |
2215 | |
2216 | Likewise, if the variable's value is a computed lvalue, we want | |
2217 | references to it to produce another computed lvalue, where | |
2218 | references and assignments actually operate through the | |
2219 | computed value's functions. | |
2220 | ||
2221 | This means that internal variables with computed values | |
2222 | behave a little differently from other internal variables: | |
2223 | assignments to them don't just replace the previous value | |
2224 | altogether. At the moment, this seems like the behavior we | |
2225 | want. */ | |
2226 | ||
2227 | if (var->kind != INTERNALVAR_MAKE_VALUE | |
2228 | && val->lval != lval_computed) | |
2229 | { | |
2230 | VALUE_LVAL (val) = lval_internalvar; | |
2231 | VALUE_INTERNALVAR (val) = var; | |
5f5233d4 | 2232 | } |
d3c139e9 | 2233 | |
4fa62494 UW |
2234 | return val; |
2235 | } | |
d3c139e9 | 2236 | |
4fa62494 UW |
2237 | int |
2238 | get_internalvar_integer (struct internalvar *var, LONGEST *result) | |
2239 | { | |
3158c6ed | 2240 | if (var->kind == INTERNALVAR_INTEGER) |
4fa62494 | 2241 | { |
cab0c772 UW |
2242 | *result = var->u.integer.val; |
2243 | return 1; | |
3158c6ed | 2244 | } |
d3c139e9 | 2245 | |
3158c6ed PA |
2246 | if (var->kind == INTERNALVAR_VALUE) |
2247 | { | |
2248 | struct type *type = check_typedef (value_type (var->u.value)); | |
2249 | ||
78134374 | 2250 | if (type->code () == TYPE_CODE_INT) |
3158c6ed PA |
2251 | { |
2252 | *result = value_as_long (var->u.value); | |
2253 | return 1; | |
2254 | } | |
4fa62494 | 2255 | } |
3158c6ed PA |
2256 | |
2257 | return 0; | |
4fa62494 | 2258 | } |
d3c139e9 | 2259 | |
4fa62494 UW |
2260 | static int |
2261 | get_internalvar_function (struct internalvar *var, | |
2262 | struct internal_function **result) | |
2263 | { | |
78267919 | 2264 | switch (var->kind) |
d3c139e9 | 2265 | { |
78267919 UW |
2266 | case INTERNALVAR_FUNCTION: |
2267 | *result = var->u.fn.function; | |
4fa62494 | 2268 | return 1; |
d3c139e9 | 2269 | |
4fa62494 UW |
2270 | default: |
2271 | return 0; | |
2272 | } | |
c906108c SS |
2273 | } |
2274 | ||
2275 | void | |
6b850546 DT |
2276 | set_internalvar_component (struct internalvar *var, |
2277 | LONGEST offset, LONGEST bitpos, | |
2278 | LONGEST bitsize, struct value *newval) | |
c906108c | 2279 | { |
4fa62494 | 2280 | gdb_byte *addr; |
3ae385af SM |
2281 | struct gdbarch *arch; |
2282 | int unit_size; | |
c906108c | 2283 | |
78267919 | 2284 | switch (var->kind) |
4fa62494 | 2285 | { |
78267919 UW |
2286 | case INTERNALVAR_VALUE: |
2287 | addr = value_contents_writeable (var->u.value); | |
3ae385af SM |
2288 | arch = get_value_arch (var->u.value); |
2289 | unit_size = gdbarch_addressable_memory_unit_size (arch); | |
4fa62494 UW |
2290 | |
2291 | if (bitsize) | |
50810684 | 2292 | modify_field (value_type (var->u.value), addr + offset, |
4fa62494 UW |
2293 | value_as_long (newval), bitpos, bitsize); |
2294 | else | |
3ae385af | 2295 | memcpy (addr + offset * unit_size, value_contents (newval), |
4fa62494 UW |
2296 | TYPE_LENGTH (value_type (newval))); |
2297 | break; | |
78267919 UW |
2298 | |
2299 | default: | |
2300 | /* We can never get a component of any other kind. */ | |
9b20d036 | 2301 | internal_error (__FILE__, __LINE__, _("set_internalvar_component")); |
4fa62494 | 2302 | } |
c906108c SS |
2303 | } |
2304 | ||
2305 | void | |
f23631e4 | 2306 | set_internalvar (struct internalvar *var, struct value *val) |
c906108c | 2307 | { |
78267919 | 2308 | enum internalvar_kind new_kind; |
4fa62494 | 2309 | union internalvar_data new_data = { 0 }; |
c906108c | 2310 | |
78267919 | 2311 | if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical) |
bc3b79fd TJB |
2312 | error (_("Cannot overwrite convenience function %s"), var->name); |
2313 | ||
4fa62494 | 2314 | /* Prepare new contents. */ |
78134374 | 2315 | switch (check_typedef (value_type (val))->code ()) |
4fa62494 UW |
2316 | { |
2317 | case TYPE_CODE_VOID: | |
78267919 | 2318 | new_kind = INTERNALVAR_VOID; |
4fa62494 UW |
2319 | break; |
2320 | ||
2321 | case TYPE_CODE_INTERNAL_FUNCTION: | |
2322 | gdb_assert (VALUE_LVAL (val) == lval_internalvar); | |
78267919 UW |
2323 | new_kind = INTERNALVAR_FUNCTION; |
2324 | get_internalvar_function (VALUE_INTERNALVAR (val), | |
2325 | &new_data.fn.function); | |
2326 | /* Copies created here are never canonical. */ | |
4fa62494 UW |
2327 | break; |
2328 | ||
4fa62494 | 2329 | default: |
78267919 | 2330 | new_kind = INTERNALVAR_VALUE; |
895dafa6 TT |
2331 | struct value *copy = value_copy (val); |
2332 | copy->modifiable = 1; | |
4fa62494 UW |
2333 | |
2334 | /* Force the value to be fetched from the target now, to avoid problems | |
2335 | later when this internalvar is referenced and the target is gone or | |
2336 | has changed. */ | |
895dafa6 TT |
2337 | if (value_lazy (copy)) |
2338 | value_fetch_lazy (copy); | |
4fa62494 UW |
2339 | |
2340 | /* Release the value from the value chain to prevent it from being | |
2341 | deleted by free_all_values. From here on this function should not | |
2342 | call error () until new_data is installed into the var->u to avoid | |
2343 | leaking memory. */ | |
895dafa6 | 2344 | new_data.value = release_value (copy).release (); |
9920b434 BH |
2345 | |
2346 | /* Internal variables which are created from values with a dynamic | |
dda83cd7 SM |
2347 | location don't need the location property of the origin anymore. |
2348 | The resolved dynamic location is used prior then any other address | |
2349 | when accessing the value. | |
2350 | If we keep it, we would still refer to the origin value. | |
2351 | Remove the location property in case it exist. */ | |
7aa91313 | 2352 | value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION); |
9920b434 | 2353 | |
4fa62494 UW |
2354 | break; |
2355 | } | |
2356 | ||
2357 | /* Clean up old contents. */ | |
2358 | clear_internalvar (var); | |
2359 | ||
2360 | /* Switch over. */ | |
78267919 | 2361 | var->kind = new_kind; |
4fa62494 | 2362 | var->u = new_data; |
c906108c SS |
2363 | /* End code which must not call error(). */ |
2364 | } | |
2365 | ||
4fa62494 UW |
2366 | void |
2367 | set_internalvar_integer (struct internalvar *var, LONGEST l) | |
2368 | { | |
2369 | /* Clean up old contents. */ | |
2370 | clear_internalvar (var); | |
2371 | ||
cab0c772 UW |
2372 | var->kind = INTERNALVAR_INTEGER; |
2373 | var->u.integer.type = NULL; | |
2374 | var->u.integer.val = l; | |
78267919 UW |
2375 | } |
2376 | ||
2377 | void | |
2378 | set_internalvar_string (struct internalvar *var, const char *string) | |
2379 | { | |
2380 | /* Clean up old contents. */ | |
2381 | clear_internalvar (var); | |
2382 | ||
2383 | var->kind = INTERNALVAR_STRING; | |
2384 | var->u.string = xstrdup (string); | |
4fa62494 UW |
2385 | } |
2386 | ||
2387 | static void | |
2388 | set_internalvar_function (struct internalvar *var, struct internal_function *f) | |
2389 | { | |
2390 | /* Clean up old contents. */ | |
2391 | clear_internalvar (var); | |
2392 | ||
78267919 UW |
2393 | var->kind = INTERNALVAR_FUNCTION; |
2394 | var->u.fn.function = f; | |
2395 | var->u.fn.canonical = 1; | |
2396 | /* Variables installed here are always the canonical version. */ | |
4fa62494 UW |
2397 | } |
2398 | ||
2399 | void | |
2400 | clear_internalvar (struct internalvar *var) | |
2401 | { | |
2402 | /* Clean up old contents. */ | |
78267919 | 2403 | switch (var->kind) |
4fa62494 | 2404 | { |
78267919 | 2405 | case INTERNALVAR_VALUE: |
22bc8444 | 2406 | value_decref (var->u.value); |
78267919 UW |
2407 | break; |
2408 | ||
2409 | case INTERNALVAR_STRING: | |
2410 | xfree (var->u.string); | |
4fa62494 UW |
2411 | break; |
2412 | ||
22d2b532 SDJ |
2413 | case INTERNALVAR_MAKE_VALUE: |
2414 | if (var->u.make_value.functions->destroy != NULL) | |
2415 | var->u.make_value.functions->destroy (var->u.make_value.data); | |
2416 | break; | |
2417 | ||
4fa62494 | 2418 | default: |
4fa62494 UW |
2419 | break; |
2420 | } | |
2421 | ||
78267919 UW |
2422 | /* Reset to void kind. */ |
2423 | var->kind = INTERNALVAR_VOID; | |
4fa62494 UW |
2424 | } |
2425 | ||
baf20f76 | 2426 | const char * |
4bf7b526 | 2427 | internalvar_name (const struct internalvar *var) |
c906108c SS |
2428 | { |
2429 | return var->name; | |
2430 | } | |
2431 | ||
4fa62494 UW |
2432 | static struct internal_function * |
2433 | create_internal_function (const char *name, | |
2434 | internal_function_fn handler, void *cookie) | |
bc3b79fd | 2435 | { |
bc3b79fd | 2436 | struct internal_function *ifn = XNEW (struct internal_function); |
a109c7c1 | 2437 | |
bc3b79fd TJB |
2438 | ifn->name = xstrdup (name); |
2439 | ifn->handler = handler; | |
2440 | ifn->cookie = cookie; | |
4fa62494 | 2441 | return ifn; |
bc3b79fd TJB |
2442 | } |
2443 | ||
91f87213 | 2444 | const char * |
bc3b79fd TJB |
2445 | value_internal_function_name (struct value *val) |
2446 | { | |
4fa62494 UW |
2447 | struct internal_function *ifn; |
2448 | int result; | |
2449 | ||
2450 | gdb_assert (VALUE_LVAL (val) == lval_internalvar); | |
2451 | result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn); | |
2452 | gdb_assert (result); | |
2453 | ||
bc3b79fd TJB |
2454 | return ifn->name; |
2455 | } | |
2456 | ||
2457 | struct value * | |
d452c4bc UW |
2458 | call_internal_function (struct gdbarch *gdbarch, |
2459 | const struct language_defn *language, | |
2460 | struct value *func, int argc, struct value **argv) | |
bc3b79fd | 2461 | { |
4fa62494 UW |
2462 | struct internal_function *ifn; |
2463 | int result; | |
2464 | ||
2465 | gdb_assert (VALUE_LVAL (func) == lval_internalvar); | |
2466 | result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn); | |
2467 | gdb_assert (result); | |
2468 | ||
d452c4bc | 2469 | return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv); |
bc3b79fd TJB |
2470 | } |
2471 | ||
2472 | /* The 'function' command. This does nothing -- it is just a | |
2473 | placeholder to let "help function NAME" work. This is also used as | |
2474 | the implementation of the sub-command that is created when | |
2475 | registering an internal function. */ | |
2476 | static void | |
981a3fb3 | 2477 | function_command (const char *command, int from_tty) |
bc3b79fd TJB |
2478 | { |
2479 | /* Do nothing. */ | |
2480 | } | |
2481 | ||
1a6d41c6 TT |
2482 | /* Helper function that does the work for add_internal_function. */ |
2483 | ||
2484 | static struct cmd_list_element * | |
2485 | do_add_internal_function (const char *name, const char *doc, | |
2486 | internal_function_fn handler, void *cookie) | |
bc3b79fd | 2487 | { |
4fa62494 | 2488 | struct internal_function *ifn; |
bc3b79fd | 2489 | struct internalvar *var = lookup_internalvar (name); |
4fa62494 UW |
2490 | |
2491 | ifn = create_internal_function (name, handler, cookie); | |
2492 | set_internalvar_function (var, ifn); | |
bc3b79fd | 2493 | |
3ea16160 | 2494 | return add_cmd (name, no_class, function_command, doc, &functionlist); |
1a6d41c6 TT |
2495 | } |
2496 | ||
2497 | /* See value.h. */ | |
2498 | ||
2499 | void | |
2500 | add_internal_function (const char *name, const char *doc, | |
2501 | internal_function_fn handler, void *cookie) | |
2502 | { | |
2503 | do_add_internal_function (name, doc, handler, cookie); | |
2504 | } | |
2505 | ||
2506 | /* See value.h. */ | |
2507 | ||
2508 | void | |
3ea16160 TT |
2509 | add_internal_function (gdb::unique_xmalloc_ptr<char> &&name, |
2510 | gdb::unique_xmalloc_ptr<char> &&doc, | |
1a6d41c6 TT |
2511 | internal_function_fn handler, void *cookie) |
2512 | { | |
2513 | struct cmd_list_element *cmd | |
3ea16160 | 2514 | = do_add_internal_function (name.get (), doc.get (), handler, cookie); |
1a6d41c6 TT |
2515 | doc.release (); |
2516 | cmd->doc_allocated = 1; | |
3ea16160 TT |
2517 | name.release (); |
2518 | cmd->name_allocated = 1; | |
bc3b79fd TJB |
2519 | } |
2520 | ||
ae5a43e0 DJ |
2521 | /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to |
2522 | prevent cycles / duplicates. */ | |
2523 | ||
4e7a5ef5 | 2524 | void |
ae5a43e0 DJ |
2525 | preserve_one_value (struct value *value, struct objfile *objfile, |
2526 | htab_t copied_types) | |
2527 | { | |
6ac37371 | 2528 | if (value->type->objfile_owner () == objfile) |
ae5a43e0 DJ |
2529 | value->type = copy_type_recursive (objfile, value->type, copied_types); |
2530 | ||
6ac37371 | 2531 | if (value->enclosing_type->objfile_owner () == objfile) |
ae5a43e0 DJ |
2532 | value->enclosing_type = copy_type_recursive (objfile, |
2533 | value->enclosing_type, | |
2534 | copied_types); | |
2535 | } | |
2536 | ||
78267919 UW |
2537 | /* Likewise for internal variable VAR. */ |
2538 | ||
2539 | static void | |
2540 | preserve_one_internalvar (struct internalvar *var, struct objfile *objfile, | |
2541 | htab_t copied_types) | |
2542 | { | |
2543 | switch (var->kind) | |
2544 | { | |
cab0c772 | 2545 | case INTERNALVAR_INTEGER: |
6ac37371 SM |
2546 | if (var->u.integer.type |
2547 | && var->u.integer.type->objfile_owner () == objfile) | |
cab0c772 UW |
2548 | var->u.integer.type |
2549 | = copy_type_recursive (objfile, var->u.integer.type, copied_types); | |
2550 | break; | |
2551 | ||
78267919 UW |
2552 | case INTERNALVAR_VALUE: |
2553 | preserve_one_value (var->u.value, objfile, copied_types); | |
2554 | break; | |
2555 | } | |
2556 | } | |
2557 | ||
ae5a43e0 DJ |
2558 | /* Update the internal variables and value history when OBJFILE is |
2559 | discarded; we must copy the types out of the objfile. New global types | |
2560 | will be created for every convenience variable which currently points to | |
2561 | this objfile's types, and the convenience variables will be adjusted to | |
2562 | use the new global types. */ | |
c906108c SS |
2563 | |
2564 | void | |
ae5a43e0 | 2565 | preserve_values (struct objfile *objfile) |
c906108c | 2566 | { |
52f0bd74 | 2567 | struct internalvar *var; |
c906108c | 2568 | |
ae5a43e0 DJ |
2569 | /* Create the hash table. We allocate on the objfile's obstack, since |
2570 | it is soon to be deleted. */ | |
6108fd18 | 2571 | htab_up copied_types = create_copied_types_hash (objfile); |
ae5a43e0 | 2572 | |
4d0266a0 | 2573 | for (const value_ref_ptr &item : value_history) |
6108fd18 | 2574 | preserve_one_value (item.get (), objfile, copied_types.get ()); |
ae5a43e0 DJ |
2575 | |
2576 | for (var = internalvars; var; var = var->next) | |
6108fd18 | 2577 | preserve_one_internalvar (var, objfile, copied_types.get ()); |
ae5a43e0 | 2578 | |
6108fd18 | 2579 | preserve_ext_lang_values (objfile, copied_types.get ()); |
c906108c SS |
2580 | } |
2581 | ||
2582 | static void | |
ad25e423 | 2583 | show_convenience (const char *ignore, int from_tty) |
c906108c | 2584 | { |
e17c207e | 2585 | struct gdbarch *gdbarch = get_current_arch (); |
52f0bd74 | 2586 | struct internalvar *var; |
c906108c | 2587 | int varseen = 0; |
79a45b7d | 2588 | struct value_print_options opts; |
c906108c | 2589 | |
79a45b7d | 2590 | get_user_print_options (&opts); |
c906108c SS |
2591 | for (var = internalvars; var; var = var->next) |
2592 | { | |
c709acd1 | 2593 | |
c906108c SS |
2594 | if (!varseen) |
2595 | { | |
2596 | varseen = 1; | |
2597 | } | |
a3f17187 | 2598 | printf_filtered (("$%s = "), var->name); |
c709acd1 | 2599 | |
a70b8144 | 2600 | try |
c709acd1 PA |
2601 | { |
2602 | struct value *val; | |
2603 | ||
2604 | val = value_of_internalvar (gdbarch, var); | |
2605 | value_print (val, gdb_stdout, &opts); | |
2606 | } | |
230d2906 | 2607 | catch (const gdb_exception_error &ex) |
492d29ea | 2608 | { |
7f6aba03 TT |
2609 | fprintf_styled (gdb_stdout, metadata_style.style (), |
2610 | _("<error: %s>"), ex.what ()); | |
492d29ea | 2611 | } |
492d29ea | 2612 | |
a3f17187 | 2613 | printf_filtered (("\n")); |
c906108c SS |
2614 | } |
2615 | if (!varseen) | |
f47f77df DE |
2616 | { |
2617 | /* This text does not mention convenience functions on purpose. | |
2618 | The user can't create them except via Python, and if Python support | |
2619 | is installed this message will never be printed ($_streq will | |
2620 | exist). */ | |
2621 | printf_unfiltered (_("No debugger convenience variables now defined.\n" | |
2622 | "Convenience variables have " | |
2623 | "names starting with \"$\";\n" | |
2624 | "use \"set\" as in \"set " | |
2625 | "$foo = 5\" to define them.\n")); | |
2626 | } | |
c906108c SS |
2627 | } |
2628 | \f | |
ba18742c SM |
2629 | |
2630 | /* See value.h. */ | |
e81e7f5e SC |
2631 | |
2632 | struct value * | |
ba18742c | 2633 | value_from_xmethod (xmethod_worker_up &&worker) |
e81e7f5e | 2634 | { |
ba18742c | 2635 | struct value *v; |
e81e7f5e | 2636 | |
ba18742c SM |
2637 | v = allocate_value (builtin_type (target_gdbarch ())->xmethod); |
2638 | v->lval = lval_xcallable; | |
2639 | v->location.xm_worker = worker.release (); | |
2640 | v->modifiable = 0; | |
e81e7f5e | 2641 | |
ba18742c | 2642 | return v; |
e81e7f5e SC |
2643 | } |
2644 | ||
2ce1cdbf DE |
2645 | /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */ |
2646 | ||
2647 | struct type * | |
6b1747cd | 2648 | result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv) |
2ce1cdbf | 2649 | { |
78134374 | 2650 | gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD |
6b1747cd | 2651 | && method->lval == lval_xcallable && !argv.empty ()); |
2ce1cdbf | 2652 | |
6b1747cd | 2653 | return method->location.xm_worker->get_result_type (argv[0], argv.slice (1)); |
2ce1cdbf DE |
2654 | } |
2655 | ||
e81e7f5e SC |
2656 | /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */ |
2657 | ||
2658 | struct value * | |
6b1747cd | 2659 | call_xmethod (struct value *method, gdb::array_view<value *> argv) |
e81e7f5e | 2660 | { |
78134374 | 2661 | gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD |
6b1747cd | 2662 | && method->lval == lval_xcallable && !argv.empty ()); |
e81e7f5e | 2663 | |
6b1747cd | 2664 | return method->location.xm_worker->invoke (argv[0], argv.slice (1)); |
e81e7f5e SC |
2665 | } |
2666 | \f | |
c906108c SS |
2667 | /* Extract a value as a C number (either long or double). |
2668 | Knows how to convert fixed values to double, or | |
2669 | floating values to long. | |
2670 | Does not deallocate the value. */ | |
2671 | ||
2672 | LONGEST | |
f23631e4 | 2673 | value_as_long (struct value *val) |
c906108c SS |
2674 | { |
2675 | /* This coerces arrays and functions, which is necessary (e.g. | |
2676 | in disassemble_command). It also dereferences references, which | |
2677 | I suspect is the most logical thing to do. */ | |
994b9211 | 2678 | val = coerce_array (val); |
0fd88904 | 2679 | return unpack_long (value_type (val), value_contents (val)); |
c906108c SS |
2680 | } |
2681 | ||
581e13c1 | 2682 | /* Extract a value as a C pointer. Does not deallocate the value. |
4478b372 JB |
2683 | Note that val's type may not actually be a pointer; value_as_long |
2684 | handles all the cases. */ | |
c906108c | 2685 | CORE_ADDR |
f23631e4 | 2686 | value_as_address (struct value *val) |
c906108c | 2687 | { |
8ee511af | 2688 | struct gdbarch *gdbarch = value_type (val)->arch (); |
50810684 | 2689 | |
c906108c SS |
2690 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure |
2691 | whether we want this to be true eventually. */ | |
2692 | #if 0 | |
bf6ae464 | 2693 | /* gdbarch_addr_bits_remove is wrong if we are being called for a |
c906108c SS |
2694 | non-address (e.g. argument to "signal", "info break", etc.), or |
2695 | for pointers to char, in which the low bits *are* significant. */ | |
50810684 | 2696 | return gdbarch_addr_bits_remove (gdbarch, value_as_long (val)); |
c906108c | 2697 | #else |
f312f057 JB |
2698 | |
2699 | /* There are several targets (IA-64, PowerPC, and others) which | |
2700 | don't represent pointers to functions as simply the address of | |
2701 | the function's entry point. For example, on the IA-64, a | |
2702 | function pointer points to a two-word descriptor, generated by | |
2703 | the linker, which contains the function's entry point, and the | |
2704 | value the IA-64 "global pointer" register should have --- to | |
2705 | support position-independent code. The linker generates | |
2706 | descriptors only for those functions whose addresses are taken. | |
2707 | ||
2708 | On such targets, it's difficult for GDB to convert an arbitrary | |
2709 | function address into a function pointer; it has to either find | |
2710 | an existing descriptor for that function, or call malloc and | |
2711 | build its own. On some targets, it is impossible for GDB to | |
2712 | build a descriptor at all: the descriptor must contain a jump | |
2713 | instruction; data memory cannot be executed; and code memory | |
2714 | cannot be modified. | |
2715 | ||
2716 | Upon entry to this function, if VAL is a value of type `function' | |
2717 | (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then | |
42ae5230 | 2718 | value_address (val) is the address of the function. This is what |
f312f057 JB |
2719 | you'll get if you evaluate an expression like `main'. The call |
2720 | to COERCE_ARRAY below actually does all the usual unary | |
2721 | conversions, which includes converting values of type `function' | |
2722 | to `pointer to function'. This is the challenging conversion | |
2723 | discussed above. Then, `unpack_long' will convert that pointer | |
2724 | back into an address. | |
2725 | ||
2726 | So, suppose the user types `disassemble foo' on an architecture | |
2727 | with a strange function pointer representation, on which GDB | |
2728 | cannot build its own descriptors, and suppose further that `foo' | |
2729 | has no linker-built descriptor. The address->pointer conversion | |
2730 | will signal an error and prevent the command from running, even | |
2731 | though the next step would have been to convert the pointer | |
2732 | directly back into the same address. | |
2733 | ||
2734 | The following shortcut avoids this whole mess. If VAL is a | |
2735 | function, just return its address directly. */ | |
78134374 SM |
2736 | if (value_type (val)->code () == TYPE_CODE_FUNC |
2737 | || value_type (val)->code () == TYPE_CODE_METHOD) | |
42ae5230 | 2738 | return value_address (val); |
f312f057 | 2739 | |
994b9211 | 2740 | val = coerce_array (val); |
fc0c74b1 AC |
2741 | |
2742 | /* Some architectures (e.g. Harvard), map instruction and data | |
2743 | addresses onto a single large unified address space. For | |
2744 | instance: An architecture may consider a large integer in the | |
2745 | range 0x10000000 .. 0x1000ffff to already represent a data | |
2746 | addresses (hence not need a pointer to address conversion) while | |
2747 | a small integer would still need to be converted integer to | |
2748 | pointer to address. Just assume such architectures handle all | |
2749 | integer conversions in a single function. */ | |
2750 | ||
2751 | /* JimB writes: | |
2752 | ||
2753 | I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we | |
2754 | must admonish GDB hackers to make sure its behavior matches the | |
2755 | compiler's, whenever possible. | |
2756 | ||
2757 | In general, I think GDB should evaluate expressions the same way | |
2758 | the compiler does. When the user copies an expression out of | |
2759 | their source code and hands it to a `print' command, they should | |
2760 | get the same value the compiler would have computed. Any | |
2761 | deviation from this rule can cause major confusion and annoyance, | |
2762 | and needs to be justified carefully. In other words, GDB doesn't | |
2763 | really have the freedom to do these conversions in clever and | |
2764 | useful ways. | |
2765 | ||
2766 | AndrewC pointed out that users aren't complaining about how GDB | |
2767 | casts integers to pointers; they are complaining that they can't | |
2768 | take an address from a disassembly listing and give it to `x/i'. | |
2769 | This is certainly important. | |
2770 | ||
79dd2d24 | 2771 | Adding an architecture method like integer_to_address() certainly |
fc0c74b1 AC |
2772 | makes it possible for GDB to "get it right" in all circumstances |
2773 | --- the target has complete control over how things get done, so | |
2774 | people can Do The Right Thing for their target without breaking | |
2775 | anyone else. The standard doesn't specify how integers get | |
2776 | converted to pointers; usually, the ABI doesn't either, but | |
2777 | ABI-specific code is a more reasonable place to handle it. */ | |
2778 | ||
78134374 | 2779 | if (value_type (val)->code () != TYPE_CODE_PTR |
aa006118 | 2780 | && !TYPE_IS_REFERENCE (value_type (val)) |
50810684 UW |
2781 | && gdbarch_integer_to_address_p (gdbarch)) |
2782 | return gdbarch_integer_to_address (gdbarch, value_type (val), | |
0fd88904 | 2783 | value_contents (val)); |
fc0c74b1 | 2784 | |
0fd88904 | 2785 | return unpack_long (value_type (val), value_contents (val)); |
c906108c SS |
2786 | #endif |
2787 | } | |
2788 | \f | |
2789 | /* Unpack raw data (copied from debugee, target byte order) at VALADDR | |
2790 | as a long, or as a double, assuming the raw data is described | |
2791 | by type TYPE. Knows how to convert different sizes of values | |
2792 | and can convert between fixed and floating point. We don't assume | |
2793 | any alignment for the raw data. Return value is in host byte order. | |
2794 | ||
2795 | If you want functions and arrays to be coerced to pointers, and | |
2796 | references to be dereferenced, call value_as_long() instead. | |
2797 | ||
2798 | C++: It is assumed that the front-end has taken care of | |
2799 | all matters concerning pointers to members. A pointer | |
2800 | to member which reaches here is considered to be equivalent | |
2801 | to an INT (or some size). After all, it is only an offset. */ | |
2802 | ||
2803 | LONGEST | |
fc1a4b47 | 2804 | unpack_long (struct type *type, const gdb_byte *valaddr) |
c906108c | 2805 | { |
09584414 | 2806 | if (is_fixed_point_type (type)) |
d19937a7 | 2807 | type = type->fixed_point_type_base_type (); |
09584414 | 2808 | |
34877895 | 2809 | enum bfd_endian byte_order = type_byte_order (type); |
78134374 | 2810 | enum type_code code = type->code (); |
52f0bd74 | 2811 | int len = TYPE_LENGTH (type); |
c6d940a9 | 2812 | int nosign = type->is_unsigned (); |
c906108c | 2813 | |
c906108c SS |
2814 | switch (code) |
2815 | { | |
2816 | case TYPE_CODE_TYPEDEF: | |
2817 | return unpack_long (check_typedef (type), valaddr); | |
2818 | case TYPE_CODE_ENUM: | |
4f2aea11 | 2819 | case TYPE_CODE_FLAGS: |
c906108c SS |
2820 | case TYPE_CODE_BOOL: |
2821 | case TYPE_CODE_INT: | |
2822 | case TYPE_CODE_CHAR: | |
2823 | case TYPE_CODE_RANGE: | |
0d5de010 | 2824 | case TYPE_CODE_MEMBERPTR: |
4e962e74 TT |
2825 | { |
2826 | LONGEST result; | |
20a5fcbd TT |
2827 | |
2828 | if (type->bit_size_differs_p ()) | |
2829 | { | |
2830 | unsigned bit_off = type->bit_offset (); | |
2831 | unsigned bit_size = type->bit_size (); | |
2832 | if (bit_size == 0) | |
2833 | { | |
2834 | /* unpack_bits_as_long doesn't handle this case the | |
2835 | way we'd like, so handle it here. */ | |
2836 | result = 0; | |
2837 | } | |
2838 | else | |
2839 | result = unpack_bits_as_long (type, valaddr, bit_off, bit_size); | |
2840 | } | |
4e962e74 | 2841 | else |
20a5fcbd TT |
2842 | { |
2843 | if (nosign) | |
2844 | result = extract_unsigned_integer (valaddr, len, byte_order); | |
2845 | else | |
2846 | result = extract_signed_integer (valaddr, len, byte_order); | |
2847 | } | |
4e962e74 | 2848 | if (code == TYPE_CODE_RANGE) |
599088e3 | 2849 | result += type->bounds ()->bias; |
4e962e74 TT |
2850 | return result; |
2851 | } | |
c906108c SS |
2852 | |
2853 | case TYPE_CODE_FLT: | |
4ef30785 | 2854 | case TYPE_CODE_DECFLOAT: |
50637b26 | 2855 | return target_float_to_longest (valaddr, type); |
4ef30785 | 2856 | |
09584414 JB |
2857 | case TYPE_CODE_FIXED_POINT: |
2858 | { | |
2859 | gdb_mpq vq; | |
c9f0b43f JB |
2860 | vq.read_fixed_point (gdb::make_array_view (valaddr, len), |
2861 | byte_order, nosign, | |
e6fcee3a | 2862 | type->fixed_point_scaling_factor ()); |
09584414 JB |
2863 | |
2864 | gdb_mpz vz; | |
2865 | mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val)); | |
2866 | return vz.as_integer<LONGEST> (); | |
2867 | } | |
2868 | ||
c906108c SS |
2869 | case TYPE_CODE_PTR: |
2870 | case TYPE_CODE_REF: | |
aa006118 | 2871 | case TYPE_CODE_RVALUE_REF: |
c906108c | 2872 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure |
dda83cd7 | 2873 | whether we want this to be true eventually. */ |
4478b372 | 2874 | return extract_typed_address (valaddr, type); |
c906108c | 2875 | |
c906108c | 2876 | default: |
8a3fe4f8 | 2877 | error (_("Value can't be converted to integer.")); |
c906108c | 2878 | } |
c906108c SS |
2879 | } |
2880 | ||
c906108c SS |
2881 | /* Unpack raw data (copied from debugee, target byte order) at VALADDR |
2882 | as a CORE_ADDR, assuming the raw data is described by type TYPE. | |
2883 | We don't assume any alignment for the raw data. Return value is in | |
2884 | host byte order. | |
2885 | ||
2886 | If you want functions and arrays to be coerced to pointers, and | |
1aa20aa8 | 2887 | references to be dereferenced, call value_as_address() instead. |
c906108c SS |
2888 | |
2889 | C++: It is assumed that the front-end has taken care of | |
2890 | all matters concerning pointers to members. A pointer | |
2891 | to member which reaches here is considered to be equivalent | |
2892 | to an INT (or some size). After all, it is only an offset. */ | |
2893 | ||
2894 | CORE_ADDR | |
fc1a4b47 | 2895 | unpack_pointer (struct type *type, const gdb_byte *valaddr) |
c906108c SS |
2896 | { |
2897 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure | |
2898 | whether we want this to be true eventually. */ | |
2899 | return unpack_long (type, valaddr); | |
2900 | } | |
4478b372 | 2901 | |
70100014 UW |
2902 | bool |
2903 | is_floating_value (struct value *val) | |
2904 | { | |
2905 | struct type *type = check_typedef (value_type (val)); | |
2906 | ||
2907 | if (is_floating_type (type)) | |
2908 | { | |
2909 | if (!target_float_is_valid (value_contents (val), type)) | |
2910 | error (_("Invalid floating value found in program.")); | |
2911 | return true; | |
2912 | } | |
2913 | ||
2914 | return false; | |
2915 | } | |
2916 | ||
c906108c | 2917 | \f |
1596cb5d | 2918 | /* Get the value of the FIELDNO'th field (which must be static) of |
686d4def | 2919 | TYPE. */ |
c906108c | 2920 | |
f23631e4 | 2921 | struct value * |
fba45db2 | 2922 | value_static_field (struct type *type, int fieldno) |
c906108c | 2923 | { |
948e66d9 DJ |
2924 | struct value *retval; |
2925 | ||
1596cb5d | 2926 | switch (TYPE_FIELD_LOC_KIND (type, fieldno)) |
c906108c | 2927 | { |
1596cb5d | 2928 | case FIELD_LOC_KIND_PHYSADDR: |
940da03e | 2929 | retval = value_at_lazy (type->field (fieldno).type (), |
52e9fde8 | 2930 | TYPE_FIELD_STATIC_PHYSADDR (type, fieldno)); |
1596cb5d DE |
2931 | break; |
2932 | case FIELD_LOC_KIND_PHYSNAME: | |
c906108c | 2933 | { |
ff355380 | 2934 | const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno); |
581e13c1 | 2935 | /* TYPE_FIELD_NAME (type, fieldno); */ |
d12307c1 | 2936 | struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0); |
94af9270 | 2937 | |
d12307c1 | 2938 | if (sym.symbol == NULL) |
c906108c | 2939 | { |
a109c7c1 | 2940 | /* With some compilers, e.g. HP aCC, static data members are |
581e13c1 | 2941 | reported as non-debuggable symbols. */ |
3b7344d5 TT |
2942 | struct bound_minimal_symbol msym |
2943 | = lookup_minimal_symbol (phys_name, NULL, NULL); | |
940da03e | 2944 | struct type *field_type = type->field (fieldno).type (); |
a109c7c1 | 2945 | |
3b7344d5 | 2946 | if (!msym.minsym) |
c2e0e465 | 2947 | retval = allocate_optimized_out_value (field_type); |
c906108c | 2948 | else |
c2e0e465 | 2949 | retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym)); |
c906108c SS |
2950 | } |
2951 | else | |
d12307c1 | 2952 | retval = value_of_variable (sym.symbol, sym.block); |
1596cb5d | 2953 | break; |
c906108c | 2954 | } |
1596cb5d | 2955 | default: |
f3574227 | 2956 | gdb_assert_not_reached ("unexpected field location kind"); |
1596cb5d DE |
2957 | } |
2958 | ||
948e66d9 | 2959 | return retval; |
c906108c SS |
2960 | } |
2961 | ||
4dfea560 DE |
2962 | /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE. |
2963 | You have to be careful here, since the size of the data area for the value | |
2964 | is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger | |
2965 | than the old enclosing type, you have to allocate more space for the | |
2966 | data. */ | |
2b127877 | 2967 | |
4dfea560 DE |
2968 | void |
2969 | set_value_enclosing_type (struct value *val, struct type *new_encl_type) | |
2b127877 | 2970 | { |
5fdf6324 AB |
2971 | if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val))) |
2972 | { | |
2973 | check_type_length_before_alloc (new_encl_type); | |
2974 | val->contents | |
14c88955 TT |
2975 | .reset ((gdb_byte *) xrealloc (val->contents.release (), |
2976 | TYPE_LENGTH (new_encl_type))); | |
5fdf6324 | 2977 | } |
3e3d7139 JG |
2978 | |
2979 | val->enclosing_type = new_encl_type; | |
2b127877 DB |
2980 | } |
2981 | ||
c906108c SS |
2982 | /* Given a value ARG1 (offset by OFFSET bytes) |
2983 | of a struct or union type ARG_TYPE, | |
2984 | extract and return the value of one of its (non-static) fields. | |
581e13c1 | 2985 | FIELDNO says which field. */ |
c906108c | 2986 | |
f23631e4 | 2987 | struct value * |
6b850546 | 2988 | value_primitive_field (struct value *arg1, LONGEST offset, |
aa1ee363 | 2989 | int fieldno, struct type *arg_type) |
c906108c | 2990 | { |
f23631e4 | 2991 | struct value *v; |
52f0bd74 | 2992 | struct type *type; |
3ae385af SM |
2993 | struct gdbarch *arch = get_value_arch (arg1); |
2994 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
c906108c | 2995 | |
f168693b | 2996 | arg_type = check_typedef (arg_type); |
940da03e | 2997 | type = arg_type->field (fieldno).type (); |
c54eabfa JK |
2998 | |
2999 | /* Call check_typedef on our type to make sure that, if TYPE | |
3000 | is a TYPE_CODE_TYPEDEF, its length is set to the length | |
3001 | of the target type instead of zero. However, we do not | |
3002 | replace the typedef type by the target type, because we want | |
3003 | to keep the typedef in order to be able to print the type | |
3004 | description correctly. */ | |
3005 | check_typedef (type); | |
c906108c | 3006 | |
691a26f5 | 3007 | if (TYPE_FIELD_BITSIZE (arg_type, fieldno)) |
c906108c | 3008 | { |
22c05d8a JK |
3009 | /* Handle packed fields. |
3010 | ||
3011 | Create a new value for the bitfield, with bitpos and bitsize | |
4ea48cc1 DJ |
3012 | set. If possible, arrange offset and bitpos so that we can |
3013 | do a single aligned read of the size of the containing type. | |
3014 | Otherwise, adjust offset to the byte containing the first | |
3015 | bit. Assume that the address, offset, and embedded offset | |
3016 | are sufficiently aligned. */ | |
22c05d8a | 3017 | |
6b850546 DT |
3018 | LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno); |
3019 | LONGEST container_bitsize = TYPE_LENGTH (type) * 8; | |
4ea48cc1 | 3020 | |
9a0dc9e3 PA |
3021 | v = allocate_value_lazy (type); |
3022 | v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno); | |
3023 | if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize | |
3024 | && TYPE_LENGTH (type) <= (int) sizeof (LONGEST)) | |
3025 | v->bitpos = bitpos % container_bitsize; | |
4ea48cc1 | 3026 | else |
9a0dc9e3 PA |
3027 | v->bitpos = bitpos % 8; |
3028 | v->offset = (value_embedded_offset (arg1) | |
3029 | + offset | |
3030 | + (bitpos - v->bitpos) / 8); | |
3031 | set_value_parent (v, arg1); | |
3032 | if (!value_lazy (arg1)) | |
3033 | value_fetch_lazy (v); | |
c906108c SS |
3034 | } |
3035 | else if (fieldno < TYPE_N_BASECLASSES (arg_type)) | |
3036 | { | |
3037 | /* This field is actually a base subobject, so preserve the | |
39d37385 PA |
3038 | entire object's contents for later references to virtual |
3039 | bases, etc. */ | |
6b850546 | 3040 | LONGEST boffset; |
a4e2ee12 DJ |
3041 | |
3042 | /* Lazy register values with offsets are not supported. */ | |
3043 | if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) | |
3044 | value_fetch_lazy (arg1); | |
3045 | ||
9a0dc9e3 PA |
3046 | /* We special case virtual inheritance here because this |
3047 | requires access to the contents, which we would rather avoid | |
3048 | for references to ordinary fields of unavailable values. */ | |
3049 | if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno)) | |
3050 | boffset = baseclass_offset (arg_type, fieldno, | |
3051 | value_contents (arg1), | |
3052 | value_embedded_offset (arg1), | |
3053 | value_address (arg1), | |
3054 | arg1); | |
c906108c | 3055 | else |
9a0dc9e3 | 3056 | boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8; |
691a26f5 | 3057 | |
9a0dc9e3 PA |
3058 | if (value_lazy (arg1)) |
3059 | v = allocate_value_lazy (value_enclosing_type (arg1)); | |
3060 | else | |
3061 | { | |
3062 | v = allocate_value (value_enclosing_type (arg1)); | |
3063 | value_contents_copy_raw (v, 0, arg1, 0, | |
3064 | TYPE_LENGTH (value_enclosing_type (arg1))); | |
3e3d7139 | 3065 | } |
9a0dc9e3 PA |
3066 | v->type = type; |
3067 | v->offset = value_offset (arg1); | |
3068 | v->embedded_offset = offset + value_embedded_offset (arg1) + boffset; | |
c906108c | 3069 | } |
9920b434 BH |
3070 | else if (NULL != TYPE_DATA_LOCATION (type)) |
3071 | { | |
3072 | /* Field is a dynamic data member. */ | |
3073 | ||
3074 | gdb_assert (0 == offset); | |
3075 | /* We expect an already resolved data location. */ | |
3076 | gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type)); | |
3077 | /* For dynamic data types defer memory allocation | |
dda83cd7 | 3078 | until we actual access the value. */ |
9920b434 BH |
3079 | v = allocate_value_lazy (type); |
3080 | } | |
c906108c SS |
3081 | else |
3082 | { | |
3083 | /* Plain old data member */ | |
3ae385af | 3084 | offset += (TYPE_FIELD_BITPOS (arg_type, fieldno) |
dda83cd7 | 3085 | / (HOST_CHAR_BIT * unit_size)); |
a4e2ee12 DJ |
3086 | |
3087 | /* Lazy register values with offsets are not supported. */ | |
3088 | if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) | |
3089 | value_fetch_lazy (arg1); | |
3090 | ||
9a0dc9e3 | 3091 | if (value_lazy (arg1)) |
3e3d7139 | 3092 | v = allocate_value_lazy (type); |
c906108c | 3093 | else |
3e3d7139 JG |
3094 | { |
3095 | v = allocate_value (type); | |
39d37385 PA |
3096 | value_contents_copy_raw (v, value_embedded_offset (v), |
3097 | arg1, value_embedded_offset (arg1) + offset, | |
3ae385af | 3098 | type_length_units (type)); |
3e3d7139 | 3099 | } |
df407dfe | 3100 | v->offset = (value_offset (arg1) + offset |
13c3b5f5 | 3101 | + value_embedded_offset (arg1)); |
c906108c | 3102 | } |
74bcbdf3 | 3103 | set_value_component_location (v, arg1); |
c906108c SS |
3104 | return v; |
3105 | } | |
3106 | ||
3107 | /* Given a value ARG1 of a struct or union type, | |
3108 | extract and return the value of one of its (non-static) fields. | |
581e13c1 | 3109 | FIELDNO says which field. */ |
c906108c | 3110 | |
f23631e4 | 3111 | struct value * |
aa1ee363 | 3112 | value_field (struct value *arg1, int fieldno) |
c906108c | 3113 | { |
df407dfe | 3114 | return value_primitive_field (arg1, 0, fieldno, value_type (arg1)); |
c906108c SS |
3115 | } |
3116 | ||
3117 | /* Return a non-virtual function as a value. | |
3118 | F is the list of member functions which contains the desired method. | |
0478d61c FF |
3119 | J is an index into F which provides the desired method. |
3120 | ||
3121 | We only use the symbol for its address, so be happy with either a | |
581e13c1 | 3122 | full symbol or a minimal symbol. */ |
c906108c | 3123 | |
f23631e4 | 3124 | struct value * |
3e43a32a MS |
3125 | value_fn_field (struct value **arg1p, struct fn_field *f, |
3126 | int j, struct type *type, | |
6b850546 | 3127 | LONGEST offset) |
c906108c | 3128 | { |
f23631e4 | 3129 | struct value *v; |
52f0bd74 | 3130 | struct type *ftype = TYPE_FN_FIELD_TYPE (f, j); |
1d06ead6 | 3131 | const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j); |
c906108c | 3132 | struct symbol *sym; |
7c7b6655 | 3133 | struct bound_minimal_symbol msym; |
c906108c | 3134 | |
d12307c1 | 3135 | sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol; |
5ae326fa | 3136 | if (sym != NULL) |
0478d61c | 3137 | { |
7c7b6655 | 3138 | memset (&msym, 0, sizeof (msym)); |
5ae326fa AC |
3139 | } |
3140 | else | |
3141 | { | |
3142 | gdb_assert (sym == NULL); | |
7c7b6655 TT |
3143 | msym = lookup_bound_minimal_symbol (physname); |
3144 | if (msym.minsym == NULL) | |
5ae326fa | 3145 | return NULL; |
0478d61c FF |
3146 | } |
3147 | ||
c906108c | 3148 | v = allocate_value (ftype); |
1a088441 | 3149 | VALUE_LVAL (v) = lval_memory; |
0478d61c FF |
3150 | if (sym) |
3151 | { | |
2b1ffcfd | 3152 | set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym))); |
0478d61c FF |
3153 | } |
3154 | else | |
3155 | { | |
bccdca4a UW |
3156 | /* The minimal symbol might point to a function descriptor; |
3157 | resolve it to the actual code address instead. */ | |
7c7b6655 | 3158 | struct objfile *objfile = msym.objfile; |
08feed99 | 3159 | struct gdbarch *gdbarch = objfile->arch (); |
bccdca4a | 3160 | |
42ae5230 TT |
3161 | set_value_address (v, |
3162 | gdbarch_convert_from_func_ptr_addr | |
328d42d8 SM |
3163 | (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), |
3164 | current_inferior ()->top_target ())); | |
0478d61c | 3165 | } |
c906108c SS |
3166 | |
3167 | if (arg1p) | |
c5aa993b | 3168 | { |
df407dfe | 3169 | if (type != value_type (*arg1p)) |
c5aa993b JM |
3170 | *arg1p = value_ind (value_cast (lookup_pointer_type (type), |
3171 | value_addr (*arg1p))); | |
3172 | ||
070ad9f0 | 3173 | /* Move the `this' pointer according to the offset. |
dda83cd7 | 3174 | VALUE_OFFSET (*arg1p) += offset; */ |
c906108c SS |
3175 | } |
3176 | ||
3177 | return v; | |
3178 | } | |
3179 | ||
c906108c | 3180 | \f |
c906108c | 3181 | |
ef83a141 TT |
3182 | /* See value.h. */ |
3183 | ||
3184 | LONGEST | |
4875ffdb | 3185 | unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, |
6b850546 | 3186 | LONGEST bitpos, LONGEST bitsize) |
c906108c | 3187 | { |
34877895 | 3188 | enum bfd_endian byte_order = type_byte_order (field_type); |
c906108c SS |
3189 | ULONGEST val; |
3190 | ULONGEST valmask; | |
c906108c | 3191 | int lsbcount; |
6b850546 DT |
3192 | LONGEST bytes_read; |
3193 | LONGEST read_offset; | |
c906108c | 3194 | |
4a76eae5 DJ |
3195 | /* Read the minimum number of bytes required; there may not be |
3196 | enough bytes to read an entire ULONGEST. */ | |
f168693b | 3197 | field_type = check_typedef (field_type); |
4a76eae5 DJ |
3198 | if (bitsize) |
3199 | bytes_read = ((bitpos % 8) + bitsize + 7) / 8; | |
3200 | else | |
15ce8941 TT |
3201 | { |
3202 | bytes_read = TYPE_LENGTH (field_type); | |
3203 | bitsize = 8 * bytes_read; | |
3204 | } | |
4a76eae5 | 3205 | |
5467c6c8 PA |
3206 | read_offset = bitpos / 8; |
3207 | ||
4875ffdb | 3208 | val = extract_unsigned_integer (valaddr + read_offset, |
4a76eae5 | 3209 | bytes_read, byte_order); |
c906108c | 3210 | |
581e13c1 | 3211 | /* Extract bits. See comment above. */ |
c906108c | 3212 | |
d5a22e77 | 3213 | if (byte_order == BFD_ENDIAN_BIG) |
4a76eae5 | 3214 | lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize); |
c906108c SS |
3215 | else |
3216 | lsbcount = (bitpos % 8); | |
3217 | val >>= lsbcount; | |
3218 | ||
3219 | /* If the field does not entirely fill a LONGEST, then zero the sign bits. | |
581e13c1 | 3220 | If the field is signed, and is negative, then sign extend. */ |
c906108c | 3221 | |
15ce8941 | 3222 | if (bitsize < 8 * (int) sizeof (val)) |
c906108c SS |
3223 | { |
3224 | valmask = (((ULONGEST) 1) << bitsize) - 1; | |
3225 | val &= valmask; | |
c6d940a9 | 3226 | if (!field_type->is_unsigned ()) |
c906108c SS |
3227 | { |
3228 | if (val & (valmask ^ (valmask >> 1))) | |
3229 | { | |
3230 | val |= ~valmask; | |
3231 | } | |
3232 | } | |
3233 | } | |
5467c6c8 | 3234 | |
4875ffdb | 3235 | return val; |
5467c6c8 PA |
3236 | } |
3237 | ||
3238 | /* Unpack a field FIELDNO of the specified TYPE, from the object at | |
3239 | VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of | |
3240 | ORIGINAL_VALUE, which must not be NULL. See | |
3241 | unpack_value_bits_as_long for more details. */ | |
3242 | ||
3243 | int | |
3244 | unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr, | |
6b850546 | 3245 | LONGEST embedded_offset, int fieldno, |
5467c6c8 PA |
3246 | const struct value *val, LONGEST *result) |
3247 | { | |
4875ffdb PA |
3248 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
3249 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); | |
940da03e | 3250 | struct type *field_type = type->field (fieldno).type (); |
4875ffdb PA |
3251 | int bit_offset; |
3252 | ||
5467c6c8 PA |
3253 | gdb_assert (val != NULL); |
3254 | ||
4875ffdb PA |
3255 | bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; |
3256 | if (value_bits_any_optimized_out (val, bit_offset, bitsize) | |
3257 | || !value_bits_available (val, bit_offset, bitsize)) | |
3258 | return 0; | |
3259 | ||
3260 | *result = unpack_bits_as_long (field_type, valaddr + embedded_offset, | |
3261 | bitpos, bitsize); | |
3262 | return 1; | |
5467c6c8 PA |
3263 | } |
3264 | ||
3265 | /* Unpack a field FIELDNO of the specified TYPE, from the anonymous | |
4875ffdb | 3266 | object at VALADDR. See unpack_bits_as_long for more details. */ |
5467c6c8 PA |
3267 | |
3268 | LONGEST | |
3269 | unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) | |
3270 | { | |
4875ffdb PA |
3271 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
3272 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); | |
940da03e | 3273 | struct type *field_type = type->field (fieldno).type (); |
5467c6c8 | 3274 | |
4875ffdb PA |
3275 | return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize); |
3276 | } | |
3277 | ||
3278 | /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at | |
3279 | VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store | |
3280 | the contents in DEST_VAL, zero or sign extending if the type of | |
3281 | DEST_VAL is wider than BITSIZE. VALADDR points to the contents of | |
3282 | VAL. If the VAL's contents required to extract the bitfield from | |
3283 | are unavailable/optimized out, DEST_VAL is correspondingly | |
3284 | marked unavailable/optimized out. */ | |
3285 | ||
bb9d5f81 | 3286 | void |
4875ffdb | 3287 | unpack_value_bitfield (struct value *dest_val, |
6b850546 DT |
3288 | LONGEST bitpos, LONGEST bitsize, |
3289 | const gdb_byte *valaddr, LONGEST embedded_offset, | |
4875ffdb PA |
3290 | const struct value *val) |
3291 | { | |
3292 | enum bfd_endian byte_order; | |
3293 | int src_bit_offset; | |
3294 | int dst_bit_offset; | |
4875ffdb PA |
3295 | struct type *field_type = value_type (dest_val); |
3296 | ||
34877895 | 3297 | byte_order = type_byte_order (field_type); |
e5ca03b4 PA |
3298 | |
3299 | /* First, unpack and sign extend the bitfield as if it was wholly | |
3300 | valid. Optimized out/unavailable bits are read as zero, but | |
3301 | that's OK, as they'll end up marked below. If the VAL is | |
3302 | wholly-invalid we may have skipped allocating its contents, | |
3303 | though. See allocate_optimized_out_value. */ | |
3304 | if (valaddr != NULL) | |
3305 | { | |
3306 | LONGEST num; | |
3307 | ||
3308 | num = unpack_bits_as_long (field_type, valaddr + embedded_offset, | |
3309 | bitpos, bitsize); | |
3310 | store_signed_integer (value_contents_raw (dest_val), | |
3311 | TYPE_LENGTH (field_type), byte_order, num); | |
3312 | } | |
4875ffdb PA |
3313 | |
3314 | /* Now copy the optimized out / unavailability ranges to the right | |
3315 | bits. */ | |
3316 | src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; | |
3317 | if (byte_order == BFD_ENDIAN_BIG) | |
3318 | dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize; | |
3319 | else | |
3320 | dst_bit_offset = 0; | |
3321 | value_ranges_copy_adjusted (dest_val, dst_bit_offset, | |
3322 | val, src_bit_offset, bitsize); | |
5467c6c8 PA |
3323 | } |
3324 | ||
3325 | /* Return a new value with type TYPE, which is FIELDNO field of the | |
3326 | object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents | |
3327 | of VAL. If the VAL's contents required to extract the bitfield | |
4875ffdb PA |
3328 | from are unavailable/optimized out, the new value is |
3329 | correspondingly marked unavailable/optimized out. */ | |
5467c6c8 PA |
3330 | |
3331 | struct value * | |
3332 | value_field_bitfield (struct type *type, int fieldno, | |
3333 | const gdb_byte *valaddr, | |
6b850546 | 3334 | LONGEST embedded_offset, const struct value *val) |
5467c6c8 | 3335 | { |
4875ffdb PA |
3336 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
3337 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); | |
940da03e | 3338 | struct value *res_val = allocate_value (type->field (fieldno).type ()); |
5467c6c8 | 3339 | |
4875ffdb PA |
3340 | unpack_value_bitfield (res_val, bitpos, bitsize, |
3341 | valaddr, embedded_offset, val); | |
3342 | ||
3343 | return res_val; | |
4ea48cc1 DJ |
3344 | } |
3345 | ||
c906108c SS |
3346 | /* Modify the value of a bitfield. ADDR points to a block of memory in |
3347 | target byte order; the bitfield starts in the byte pointed to. FIELDVAL | |
3348 | is the desired value of the field, in host byte order. BITPOS and BITSIZE | |
581e13c1 | 3349 | indicate which bits (in target bit order) comprise the bitfield. |
19f220c3 | 3350 | Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and |
f4e88c8e | 3351 | 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */ |
c906108c SS |
3352 | |
3353 | void | |
50810684 | 3354 | modify_field (struct type *type, gdb_byte *addr, |
6b850546 | 3355 | LONGEST fieldval, LONGEST bitpos, LONGEST bitsize) |
c906108c | 3356 | { |
34877895 | 3357 | enum bfd_endian byte_order = type_byte_order (type); |
f4e88c8e PH |
3358 | ULONGEST oword; |
3359 | ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize); | |
6b850546 | 3360 | LONGEST bytesize; |
19f220c3 JK |
3361 | |
3362 | /* Normalize BITPOS. */ | |
3363 | addr += bitpos / 8; | |
3364 | bitpos %= 8; | |
c906108c SS |
3365 | |
3366 | /* If a negative fieldval fits in the field in question, chop | |
3367 | off the sign extension bits. */ | |
f4e88c8e PH |
3368 | if ((~fieldval & ~(mask >> 1)) == 0) |
3369 | fieldval &= mask; | |
c906108c SS |
3370 | |
3371 | /* Warn if value is too big to fit in the field in question. */ | |
f4e88c8e | 3372 | if (0 != (fieldval & ~mask)) |
c906108c SS |
3373 | { |
3374 | /* FIXME: would like to include fieldval in the message, but | |
dda83cd7 | 3375 | we don't have a sprintf_longest. */ |
6b850546 | 3376 | warning (_("Value does not fit in %s bits."), plongest (bitsize)); |
c906108c SS |
3377 | |
3378 | /* Truncate it, otherwise adjoining fields may be corrupted. */ | |
f4e88c8e | 3379 | fieldval &= mask; |
c906108c SS |
3380 | } |
3381 | ||
19f220c3 JK |
3382 | /* Ensure no bytes outside of the modified ones get accessed as it may cause |
3383 | false valgrind reports. */ | |
3384 | ||
3385 | bytesize = (bitpos + bitsize + 7) / 8; | |
3386 | oword = extract_unsigned_integer (addr, bytesize, byte_order); | |
c906108c SS |
3387 | |
3388 | /* Shifting for bit field depends on endianness of the target machine. */ | |
d5a22e77 | 3389 | if (byte_order == BFD_ENDIAN_BIG) |
19f220c3 | 3390 | bitpos = bytesize * 8 - bitpos - bitsize; |
c906108c | 3391 | |
f4e88c8e | 3392 | oword &= ~(mask << bitpos); |
c906108c SS |
3393 | oword |= fieldval << bitpos; |
3394 | ||
19f220c3 | 3395 | store_unsigned_integer (addr, bytesize, byte_order, oword); |
c906108c SS |
3396 | } |
3397 | \f | |
14d06750 | 3398 | /* Pack NUM into BUF using a target format of TYPE. */ |
c906108c | 3399 | |
14d06750 DJ |
3400 | void |
3401 | pack_long (gdb_byte *buf, struct type *type, LONGEST num) | |
c906108c | 3402 | { |
34877895 | 3403 | enum bfd_endian byte_order = type_byte_order (type); |
6b850546 | 3404 | LONGEST len; |
14d06750 DJ |
3405 | |
3406 | type = check_typedef (type); | |
c906108c SS |
3407 | len = TYPE_LENGTH (type); |
3408 | ||
78134374 | 3409 | switch (type->code ()) |
c906108c | 3410 | { |
4e962e74 | 3411 | case TYPE_CODE_RANGE: |
599088e3 | 3412 | num -= type->bounds ()->bias; |
4e962e74 | 3413 | /* Fall through. */ |
c906108c SS |
3414 | case TYPE_CODE_INT: |
3415 | case TYPE_CODE_CHAR: | |
3416 | case TYPE_CODE_ENUM: | |
4f2aea11 | 3417 | case TYPE_CODE_FLAGS: |
c906108c | 3418 | case TYPE_CODE_BOOL: |
0d5de010 | 3419 | case TYPE_CODE_MEMBERPTR: |
20a5fcbd TT |
3420 | if (type->bit_size_differs_p ()) |
3421 | { | |
3422 | unsigned bit_off = type->bit_offset (); | |
3423 | unsigned bit_size = type->bit_size (); | |
3424 | num &= ((ULONGEST) 1 << bit_size) - 1; | |
3425 | num <<= bit_off; | |
3426 | } | |
e17a4113 | 3427 | store_signed_integer (buf, len, byte_order, num); |
c906108c | 3428 | break; |
c5aa993b | 3429 | |
c906108c | 3430 | case TYPE_CODE_REF: |
aa006118 | 3431 | case TYPE_CODE_RVALUE_REF: |
c906108c | 3432 | case TYPE_CODE_PTR: |
14d06750 | 3433 | store_typed_address (buf, type, (CORE_ADDR) num); |
c906108c | 3434 | break; |
c5aa993b | 3435 | |
50637b26 UW |
3436 | case TYPE_CODE_FLT: |
3437 | case TYPE_CODE_DECFLOAT: | |
3438 | target_float_from_longest (buf, type, num); | |
3439 | break; | |
3440 | ||
c906108c | 3441 | default: |
14d06750 | 3442 | error (_("Unexpected type (%d) encountered for integer constant."), |
78134374 | 3443 | type->code ()); |
c906108c | 3444 | } |
14d06750 DJ |
3445 | } |
3446 | ||
3447 | ||
595939de PM |
3448 | /* Pack NUM into BUF using a target format of TYPE. */ |
3449 | ||
70221824 | 3450 | static void |
595939de PM |
3451 | pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) |
3452 | { | |
6b850546 | 3453 | LONGEST len; |
595939de PM |
3454 | enum bfd_endian byte_order; |
3455 | ||
3456 | type = check_typedef (type); | |
3457 | len = TYPE_LENGTH (type); | |
34877895 | 3458 | byte_order = type_byte_order (type); |
595939de | 3459 | |
78134374 | 3460 | switch (type->code ()) |
595939de PM |
3461 | { |
3462 | case TYPE_CODE_INT: | |
3463 | case TYPE_CODE_CHAR: | |
3464 | case TYPE_CODE_ENUM: | |
3465 | case TYPE_CODE_FLAGS: | |
3466 | case TYPE_CODE_BOOL: | |
3467 | case TYPE_CODE_RANGE: | |
3468 | case TYPE_CODE_MEMBERPTR: | |
20a5fcbd TT |
3469 | if (type->bit_size_differs_p ()) |
3470 | { | |
3471 | unsigned bit_off = type->bit_offset (); | |
3472 | unsigned bit_size = type->bit_size (); | |
3473 | num &= ((ULONGEST) 1 << bit_size) - 1; | |
3474 | num <<= bit_off; | |
3475 | } | |
595939de PM |
3476 | store_unsigned_integer (buf, len, byte_order, num); |
3477 | break; | |
3478 | ||
3479 | case TYPE_CODE_REF: | |
aa006118 | 3480 | case TYPE_CODE_RVALUE_REF: |
595939de PM |
3481 | case TYPE_CODE_PTR: |
3482 | store_typed_address (buf, type, (CORE_ADDR) num); | |
3483 | break; | |
3484 | ||
50637b26 UW |
3485 | case TYPE_CODE_FLT: |
3486 | case TYPE_CODE_DECFLOAT: | |
3487 | target_float_from_ulongest (buf, type, num); | |
3488 | break; | |
3489 | ||
595939de | 3490 | default: |
3e43a32a MS |
3491 | error (_("Unexpected type (%d) encountered " |
3492 | "for unsigned integer constant."), | |
78134374 | 3493 | type->code ()); |
595939de PM |
3494 | } |
3495 | } | |
3496 | ||
3497 | ||
14d06750 DJ |
3498 | /* Convert C numbers into newly allocated values. */ |
3499 | ||
3500 | struct value * | |
3501 | value_from_longest (struct type *type, LONGEST num) | |
3502 | { | |
3503 | struct value *val = allocate_value (type); | |
3504 | ||
3505 | pack_long (value_contents_raw (val), type, num); | |
c906108c SS |
3506 | return val; |
3507 | } | |
3508 | ||
4478b372 | 3509 | |
595939de PM |
3510 | /* Convert C unsigned numbers into newly allocated values. */ |
3511 | ||
3512 | struct value * | |
3513 | value_from_ulongest (struct type *type, ULONGEST num) | |
3514 | { | |
3515 | struct value *val = allocate_value (type); | |
3516 | ||
3517 | pack_unsigned_long (value_contents_raw (val), type, num); | |
3518 | ||
3519 | return val; | |
3520 | } | |
3521 | ||
3522 | ||
4478b372 | 3523 | /* Create a value representing a pointer of type TYPE to the address |
cb417230 | 3524 | ADDR. */ |
80180f79 | 3525 | |
f23631e4 | 3526 | struct value * |
4478b372 JB |
3527 | value_from_pointer (struct type *type, CORE_ADDR addr) |
3528 | { | |
cb417230 | 3529 | struct value *val = allocate_value (type); |
a109c7c1 | 3530 | |
80180f79 | 3531 | store_typed_address (value_contents_raw (val), |
cb417230 | 3532 | check_typedef (type), addr); |
4478b372 JB |
3533 | return val; |
3534 | } | |
3535 | ||
7584bb30 AB |
3536 | /* Create and return a value object of TYPE containing the value D. The |
3537 | TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once | |
3538 | it is converted to target format. */ | |
3539 | ||
3540 | struct value * | |
3541 | value_from_host_double (struct type *type, double d) | |
3542 | { | |
3543 | struct value *value = allocate_value (type); | |
78134374 | 3544 | gdb_assert (type->code () == TYPE_CODE_FLT); |
7584bb30 AB |
3545 | target_float_from_host_double (value_contents_raw (value), |
3546 | value_type (value), d); | |
3547 | return value; | |
3548 | } | |
4478b372 | 3549 | |
012370f6 TT |
3550 | /* Create a value of type TYPE whose contents come from VALADDR, if it |
3551 | is non-null, and whose memory address (in the inferior) is | |
3552 | ADDRESS. The type of the created value may differ from the passed | |
3553 | type TYPE. Make sure to retrieve values new type after this call. | |
3554 | Note that TYPE is not passed through resolve_dynamic_type; this is | |
3555 | a special API intended for use only by Ada. */ | |
3556 | ||
3557 | struct value * | |
3558 | value_from_contents_and_address_unresolved (struct type *type, | |
3559 | const gdb_byte *valaddr, | |
3560 | CORE_ADDR address) | |
3561 | { | |
3562 | struct value *v; | |
3563 | ||
3564 | if (valaddr == NULL) | |
3565 | v = allocate_value_lazy (type); | |
3566 | else | |
3567 | v = value_from_contents (type, valaddr); | |
012370f6 | 3568 | VALUE_LVAL (v) = lval_memory; |
1a088441 | 3569 | set_value_address (v, address); |
012370f6 TT |
3570 | return v; |
3571 | } | |
3572 | ||
8acb6b92 TT |
3573 | /* Create a value of type TYPE whose contents come from VALADDR, if it |
3574 | is non-null, and whose memory address (in the inferior) is | |
80180f79 SA |
3575 | ADDRESS. The type of the created value may differ from the passed |
3576 | type TYPE. Make sure to retrieve values new type after this call. */ | |
8acb6b92 TT |
3577 | |
3578 | struct value * | |
3579 | value_from_contents_and_address (struct type *type, | |
3580 | const gdb_byte *valaddr, | |
3581 | CORE_ADDR address) | |
3582 | { | |
b249d2c2 TT |
3583 | gdb::array_view<const gdb_byte> view; |
3584 | if (valaddr != nullptr) | |
3585 | view = gdb::make_array_view (valaddr, TYPE_LENGTH (type)); | |
3586 | struct type *resolved_type = resolve_dynamic_type (type, view, address); | |
d36430db | 3587 | struct type *resolved_type_no_typedef = check_typedef (resolved_type); |
41e8491f | 3588 | struct value *v; |
a109c7c1 | 3589 | |
8acb6b92 | 3590 | if (valaddr == NULL) |
80180f79 | 3591 | v = allocate_value_lazy (resolved_type); |
8acb6b92 | 3592 | else |
80180f79 | 3593 | v = value_from_contents (resolved_type, valaddr); |
d36430db JB |
3594 | if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL |
3595 | && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST) | |
3596 | address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef); | |
33d502b4 | 3597 | VALUE_LVAL (v) = lval_memory; |
1a088441 | 3598 | set_value_address (v, address); |
8acb6b92 TT |
3599 | return v; |
3600 | } | |
3601 | ||
8a9b8146 TT |
3602 | /* Create a value of type TYPE holding the contents CONTENTS. |
3603 | The new value is `not_lval'. */ | |
3604 | ||
3605 | struct value * | |
3606 | value_from_contents (struct type *type, const gdb_byte *contents) | |
3607 | { | |
3608 | struct value *result; | |
3609 | ||
3610 | result = allocate_value (type); | |
3611 | memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type)); | |
3612 | return result; | |
3613 | } | |
3614 | ||
3bd0f5ef MS |
3615 | /* Extract a value from the history file. Input will be of the form |
3616 | $digits or $$digits. See block comment above 'write_dollar_variable' | |
3617 | for details. */ | |
3618 | ||
3619 | struct value * | |
e799154c | 3620 | value_from_history_ref (const char *h, const char **endp) |
3bd0f5ef MS |
3621 | { |
3622 | int index, len; | |
3623 | ||
3624 | if (h[0] == '$') | |
3625 | len = 1; | |
3626 | else | |
3627 | return NULL; | |
3628 | ||
3629 | if (h[1] == '$') | |
3630 | len = 2; | |
3631 | ||
3632 | /* Find length of numeral string. */ | |
3633 | for (; isdigit (h[len]); len++) | |
3634 | ; | |
3635 | ||
3636 | /* Make sure numeral string is not part of an identifier. */ | |
3637 | if (h[len] == '_' || isalpha (h[len])) | |
3638 | return NULL; | |
3639 | ||
3640 | /* Now collect the index value. */ | |
3641 | if (h[1] == '$') | |
3642 | { | |
3643 | if (len == 2) | |
3644 | { | |
3645 | /* For some bizarre reason, "$$" is equivalent to "$$1", | |
3646 | rather than to "$$0" as it ought to be! */ | |
3647 | index = -1; | |
3648 | *endp += len; | |
3649 | } | |
3650 | else | |
e799154c TT |
3651 | { |
3652 | char *local_end; | |
3653 | ||
3654 | index = -strtol (&h[2], &local_end, 10); | |
3655 | *endp = local_end; | |
3656 | } | |
3bd0f5ef MS |
3657 | } |
3658 | else | |
3659 | { | |
3660 | if (len == 1) | |
3661 | { | |
3662 | /* "$" is equivalent to "$0". */ | |
3663 | index = 0; | |
3664 | *endp += len; | |
3665 | } | |
3666 | else | |
e799154c TT |
3667 | { |
3668 | char *local_end; | |
3669 | ||
3670 | index = strtol (&h[1], &local_end, 10); | |
3671 | *endp = local_end; | |
3672 | } | |
3bd0f5ef MS |
3673 | } |
3674 | ||
3675 | return access_value_history (index); | |
3676 | } | |
3677 | ||
3fff9862 YQ |
3678 | /* Get the component value (offset by OFFSET bytes) of a struct or |
3679 | union WHOLE. Component's type is TYPE. */ | |
3680 | ||
3681 | struct value * | |
3682 | value_from_component (struct value *whole, struct type *type, LONGEST offset) | |
3683 | { | |
3684 | struct value *v; | |
3685 | ||
3686 | if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole)) | |
3687 | v = allocate_value_lazy (type); | |
3688 | else | |
3689 | { | |
3690 | v = allocate_value (type); | |
3691 | value_contents_copy (v, value_embedded_offset (v), | |
3692 | whole, value_embedded_offset (whole) + offset, | |
3693 | type_length_units (type)); | |
3694 | } | |
3695 | v->offset = value_offset (whole) + offset + value_embedded_offset (whole); | |
3696 | set_value_component_location (v, whole); | |
3fff9862 YQ |
3697 | |
3698 | return v; | |
3699 | } | |
3700 | ||
a471c594 JK |
3701 | struct value * |
3702 | coerce_ref_if_computed (const struct value *arg) | |
3703 | { | |
3704 | const struct lval_funcs *funcs; | |
3705 | ||
aa006118 | 3706 | if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg)))) |
a471c594 JK |
3707 | return NULL; |
3708 | ||
3709 | if (value_lval_const (arg) != lval_computed) | |
3710 | return NULL; | |
3711 | ||
3712 | funcs = value_computed_funcs (arg); | |
3713 | if (funcs->coerce_ref == NULL) | |
3714 | return NULL; | |
3715 | ||
3716 | return funcs->coerce_ref (arg); | |
3717 | } | |
3718 | ||
dfcee124 AG |
3719 | /* Look at value.h for description. */ |
3720 | ||
3721 | struct value * | |
3722 | readjust_indirect_value_type (struct value *value, struct type *enc_type, | |
4bf7b526 | 3723 | const struct type *original_type, |
e79eb02f AB |
3724 | struct value *original_value, |
3725 | CORE_ADDR original_value_address) | |
dfcee124 | 3726 | { |
e79eb02f AB |
3727 | gdb_assert (original_type->code () == TYPE_CODE_PTR |
3728 | || TYPE_IS_REFERENCE (original_type)); | |
3729 | ||
3730 | struct type *original_target_type = TYPE_TARGET_TYPE (original_type); | |
3731 | gdb::array_view<const gdb_byte> view; | |
3732 | struct type *resolved_original_target_type | |
3733 | = resolve_dynamic_type (original_target_type, view, | |
3734 | original_value_address); | |
3735 | ||
dfcee124 | 3736 | /* Re-adjust type. */ |
e79eb02f | 3737 | deprecated_set_value_type (value, resolved_original_target_type); |
dfcee124 AG |
3738 | |
3739 | /* Add embedding info. */ | |
3740 | set_value_enclosing_type (value, enc_type); | |
3741 | set_value_embedded_offset (value, value_pointed_to_offset (original_value)); | |
3742 | ||
3743 | /* We may be pointing to an object of some derived type. */ | |
3744 | return value_full_object (value, NULL, 0, 0, 0); | |
3745 | } | |
3746 | ||
994b9211 AC |
3747 | struct value * |
3748 | coerce_ref (struct value *arg) | |
3749 | { | |
df407dfe | 3750 | struct type *value_type_arg_tmp = check_typedef (value_type (arg)); |
a471c594 | 3751 | struct value *retval; |
dfcee124 | 3752 | struct type *enc_type; |
a109c7c1 | 3753 | |
a471c594 JK |
3754 | retval = coerce_ref_if_computed (arg); |
3755 | if (retval) | |
3756 | return retval; | |
3757 | ||
aa006118 | 3758 | if (!TYPE_IS_REFERENCE (value_type_arg_tmp)) |
a471c594 JK |
3759 | return arg; |
3760 | ||
dfcee124 AG |
3761 | enc_type = check_typedef (value_enclosing_type (arg)); |
3762 | enc_type = TYPE_TARGET_TYPE (enc_type); | |
3763 | ||
e79eb02f AB |
3764 | CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg)); |
3765 | retval = value_at_lazy (enc_type, addr); | |
9f1f738a | 3766 | enc_type = value_type (retval); |
e79eb02f AB |
3767 | return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp, |
3768 | arg, addr); | |
994b9211 AC |
3769 | } |
3770 | ||
3771 | struct value * | |
3772 | coerce_array (struct value *arg) | |
3773 | { | |
f3134b88 TT |
3774 | struct type *type; |
3775 | ||
994b9211 | 3776 | arg = coerce_ref (arg); |
f3134b88 TT |
3777 | type = check_typedef (value_type (arg)); |
3778 | ||
78134374 | 3779 | switch (type->code ()) |
f3134b88 TT |
3780 | { |
3781 | case TYPE_CODE_ARRAY: | |
67bd3fd5 | 3782 | if (!type->is_vector () && current_language->c_style_arrays_p ()) |
f3134b88 TT |
3783 | arg = value_coerce_array (arg); |
3784 | break; | |
3785 | case TYPE_CODE_FUNC: | |
3786 | arg = value_coerce_function (arg); | |
3787 | break; | |
3788 | } | |
994b9211 AC |
3789 | return arg; |
3790 | } | |
c906108c | 3791 | \f |
c906108c | 3792 | |
bbfdfe1c DM |
3793 | /* Return the return value convention that will be used for the |
3794 | specified type. */ | |
3795 | ||
3796 | enum return_value_convention | |
3797 | struct_return_convention (struct gdbarch *gdbarch, | |
3798 | struct value *function, struct type *value_type) | |
3799 | { | |
78134374 | 3800 | enum type_code code = value_type->code (); |
bbfdfe1c DM |
3801 | |
3802 | if (code == TYPE_CODE_ERROR) | |
3803 | error (_("Function return type unknown.")); | |
3804 | ||
3805 | /* Probe the architecture for the return-value convention. */ | |
3806 | return gdbarch_return_value (gdbarch, function, value_type, | |
3807 | NULL, NULL, NULL); | |
3808 | } | |
3809 | ||
48436ce6 AC |
3810 | /* Return true if the function returning the specified type is using |
3811 | the convention of returning structures in memory (passing in the | |
82585c72 | 3812 | address as a hidden first parameter). */ |
c906108c SS |
3813 | |
3814 | int | |
d80b854b | 3815 | using_struct_return (struct gdbarch *gdbarch, |
6a3a010b | 3816 | struct value *function, struct type *value_type) |
c906108c | 3817 | { |
78134374 | 3818 | if (value_type->code () == TYPE_CODE_VOID) |
667e784f | 3819 | /* A void return value is never in memory. See also corresponding |
44e5158b | 3820 | code in "print_return_value". */ |
667e784f AC |
3821 | return 0; |
3822 | ||
bbfdfe1c | 3823 | return (struct_return_convention (gdbarch, function, value_type) |
31db7b6c | 3824 | != RETURN_VALUE_REGISTER_CONVENTION); |
c906108c SS |
3825 | } |
3826 | ||
42be36b3 CT |
3827 | /* Set the initialized field in a value struct. */ |
3828 | ||
3829 | void | |
3830 | set_value_initialized (struct value *val, int status) | |
3831 | { | |
3832 | val->initialized = status; | |
3833 | } | |
3834 | ||
3835 | /* Return the initialized field in a value struct. */ | |
3836 | ||
3837 | int | |
4bf7b526 | 3838 | value_initialized (const struct value *val) |
42be36b3 CT |
3839 | { |
3840 | return val->initialized; | |
3841 | } | |
3842 | ||
41c60b4b SM |
3843 | /* Helper for value_fetch_lazy when the value is a bitfield. */ |
3844 | ||
3845 | static void | |
3846 | value_fetch_lazy_bitfield (struct value *val) | |
3847 | { | |
3848 | gdb_assert (value_bitsize (val) != 0); | |
3849 | ||
3850 | /* To read a lazy bitfield, read the entire enclosing value. This | |
3851 | prevents reading the same block of (possibly volatile) memory once | |
3852 | per bitfield. It would be even better to read only the containing | |
3853 | word, but we have no way to record that just specific bits of a | |
3854 | value have been fetched. */ | |
41c60b4b SM |
3855 | struct value *parent = value_parent (val); |
3856 | ||
3857 | if (value_lazy (parent)) | |
3858 | value_fetch_lazy (parent); | |
3859 | ||
3860 | unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val), | |
3861 | value_contents_for_printing (parent), | |
3862 | value_offset (val), parent); | |
3863 | } | |
3864 | ||
3865 | /* Helper for value_fetch_lazy when the value is in memory. */ | |
3866 | ||
3867 | static void | |
3868 | value_fetch_lazy_memory (struct value *val) | |
3869 | { | |
3870 | gdb_assert (VALUE_LVAL (val) == lval_memory); | |
3871 | ||
3872 | CORE_ADDR addr = value_address (val); | |
3873 | struct type *type = check_typedef (value_enclosing_type (val)); | |
3874 | ||
3875 | if (TYPE_LENGTH (type)) | |
3876 | read_value_memory (val, 0, value_stack (val), | |
3877 | addr, value_contents_all_raw (val), | |
3878 | type_length_units (type)); | |
3879 | } | |
3880 | ||
3881 | /* Helper for value_fetch_lazy when the value is in a register. */ | |
3882 | ||
3883 | static void | |
3884 | value_fetch_lazy_register (struct value *val) | |
3885 | { | |
3886 | struct frame_info *next_frame; | |
3887 | int regnum; | |
3888 | struct type *type = check_typedef (value_type (val)); | |
3889 | struct value *new_val = val, *mark = value_mark (); | |
3890 | ||
3891 | /* Offsets are not supported here; lazy register values must | |
3892 | refer to the entire register. */ | |
3893 | gdb_assert (value_offset (val) == 0); | |
3894 | ||
3895 | while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val)) | |
3896 | { | |
3897 | struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val); | |
3898 | ||
3899 | next_frame = frame_find_by_id (next_frame_id); | |
3900 | regnum = VALUE_REGNUM (new_val); | |
3901 | ||
3902 | gdb_assert (next_frame != NULL); | |
3903 | ||
3904 | /* Convertible register routines are used for multi-register | |
3905 | values and for interpretation in different types | |
3906 | (e.g. float or int from a double register). Lazy | |
3907 | register values should have the register's natural type, | |
3908 | so they do not apply. */ | |
3909 | gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame), | |
3910 | regnum, type)); | |
3911 | ||
3912 | /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID. | |
3913 | Since a "->next" operation was performed when setting | |
3914 | this field, we do not need to perform a "next" operation | |
3915 | again when unwinding the register. That's why | |
3916 | frame_unwind_register_value() is called here instead of | |
3917 | get_frame_register_value(). */ | |
3918 | new_val = frame_unwind_register_value (next_frame, regnum); | |
3919 | ||
3920 | /* If we get another lazy lval_register value, it means the | |
3921 | register is found by reading it from NEXT_FRAME's next frame. | |
3922 | frame_unwind_register_value should never return a value with | |
3923 | the frame id pointing to NEXT_FRAME. If it does, it means we | |
3924 | either have two consecutive frames with the same frame id | |
3925 | in the frame chain, or some code is trying to unwind | |
3926 | behind get_prev_frame's back (e.g., a frame unwind | |
3927 | sniffer trying to unwind), bypassing its validations. In | |
3928 | any case, it should always be an internal error to end up | |
3929 | in this situation. */ | |
3930 | if (VALUE_LVAL (new_val) == lval_register | |
3931 | && value_lazy (new_val) | |
3932 | && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id)) | |
3933 | internal_error (__FILE__, __LINE__, | |
3934 | _("infinite loop while fetching a register")); | |
3935 | } | |
3936 | ||
3937 | /* If it's still lazy (for instance, a saved register on the | |
3938 | stack), fetch it. */ | |
3939 | if (value_lazy (new_val)) | |
3940 | value_fetch_lazy (new_val); | |
3941 | ||
3942 | /* Copy the contents and the unavailability/optimized-out | |
3943 | meta-data from NEW_VAL to VAL. */ | |
3944 | set_value_lazy (val, 0); | |
3945 | value_contents_copy (val, value_embedded_offset (val), | |
3946 | new_val, value_embedded_offset (new_val), | |
3947 | type_length_units (type)); | |
3948 | ||
3949 | if (frame_debug) | |
3950 | { | |
3951 | struct gdbarch *gdbarch; | |
3952 | struct frame_info *frame; | |
3953 | /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID, | |
3954 | so that the frame level will be shown correctly. */ | |
3955 | frame = frame_find_by_id (VALUE_FRAME_ID (val)); | |
3956 | regnum = VALUE_REGNUM (val); | |
3957 | gdbarch = get_frame_arch (frame); | |
3958 | ||
3959 | fprintf_unfiltered (gdb_stdlog, | |
3960 | "{ value_fetch_lazy " | |
3961 | "(frame=%d,regnum=%d(%s),...) ", | |
3962 | frame_relative_level (frame), regnum, | |
3963 | user_reg_map_regnum_to_name (gdbarch, regnum)); | |
3964 | ||
3965 | fprintf_unfiltered (gdb_stdlog, "->"); | |
3966 | if (value_optimized_out (new_val)) | |
3967 | { | |
3968 | fprintf_unfiltered (gdb_stdlog, " "); | |
3969 | val_print_optimized_out (new_val, gdb_stdlog); | |
3970 | } | |
3971 | else | |
3972 | { | |
3973 | int i; | |
3974 | const gdb_byte *buf = value_contents (new_val); | |
3975 | ||
3976 | if (VALUE_LVAL (new_val) == lval_register) | |
3977 | fprintf_unfiltered (gdb_stdlog, " register=%d", | |
3978 | VALUE_REGNUM (new_val)); | |
3979 | else if (VALUE_LVAL (new_val) == lval_memory) | |
3980 | fprintf_unfiltered (gdb_stdlog, " address=%s", | |
3981 | paddress (gdbarch, | |
3982 | value_address (new_val))); | |
3983 | else | |
3984 | fprintf_unfiltered (gdb_stdlog, " computed"); | |
3985 | ||
3986 | fprintf_unfiltered (gdb_stdlog, " bytes="); | |
3987 | fprintf_unfiltered (gdb_stdlog, "["); | |
3988 | for (i = 0; i < register_size (gdbarch, regnum); i++) | |
3989 | fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); | |
3990 | fprintf_unfiltered (gdb_stdlog, "]"); | |
3991 | } | |
3992 | ||
3993 | fprintf_unfiltered (gdb_stdlog, " }\n"); | |
3994 | } | |
3995 | ||
3996 | /* Dispose of the intermediate values. This prevents | |
3997 | watchpoints from trying to watch the saved frame pointer. */ | |
3998 | value_free_to_mark (mark); | |
3999 | } | |
4000 | ||
a844296a SM |
4001 | /* Load the actual content of a lazy value. Fetch the data from the |
4002 | user's process and clear the lazy flag to indicate that the data in | |
4003 | the buffer is valid. | |
a58e2656 AB |
4004 | |
4005 | If the value is zero-length, we avoid calling read_memory, which | |
4006 | would abort. We mark the value as fetched anyway -- all 0 bytes of | |
a844296a | 4007 | it. */ |
a58e2656 | 4008 | |
a844296a | 4009 | void |
a58e2656 AB |
4010 | value_fetch_lazy (struct value *val) |
4011 | { | |
4012 | gdb_assert (value_lazy (val)); | |
4013 | allocate_value_contents (val); | |
9a0dc9e3 PA |
4014 | /* A value is either lazy, or fully fetched. The |
4015 | availability/validity is only established as we try to fetch a | |
4016 | value. */ | |
0c7e6dd8 TT |
4017 | gdb_assert (val->optimized_out.empty ()); |
4018 | gdb_assert (val->unavailable.empty ()); | |
a58e2656 | 4019 | if (value_bitsize (val)) |
41c60b4b | 4020 | value_fetch_lazy_bitfield (val); |
a58e2656 | 4021 | else if (VALUE_LVAL (val) == lval_memory) |
41c60b4b | 4022 | value_fetch_lazy_memory (val); |
a58e2656 | 4023 | else if (VALUE_LVAL (val) == lval_register) |
41c60b4b | 4024 | value_fetch_lazy_register (val); |
a58e2656 AB |
4025 | else if (VALUE_LVAL (val) == lval_computed |
4026 | && value_computed_funcs (val)->read != NULL) | |
4027 | value_computed_funcs (val)->read (val); | |
a58e2656 AB |
4028 | else |
4029 | internal_error (__FILE__, __LINE__, _("Unexpected lazy value type.")); | |
4030 | ||
4031 | set_value_lazy (val, 0); | |
a58e2656 AB |
4032 | } |
4033 | ||
a280dbd1 SDJ |
4034 | /* Implementation of the convenience function $_isvoid. */ |
4035 | ||
4036 | static struct value * | |
4037 | isvoid_internal_fn (struct gdbarch *gdbarch, | |
4038 | const struct language_defn *language, | |
4039 | void *cookie, int argc, struct value **argv) | |
4040 | { | |
4041 | int ret; | |
4042 | ||
4043 | if (argc != 1) | |
6bc305f5 | 4044 | error (_("You must provide one argument for $_isvoid.")); |
a280dbd1 | 4045 | |
78134374 | 4046 | ret = value_type (argv[0])->code () == TYPE_CODE_VOID; |
a280dbd1 SDJ |
4047 | |
4048 | return value_from_longest (builtin_type (gdbarch)->builtin_int, ret); | |
4049 | } | |
4050 | ||
53a008a6 | 4051 | /* Implementation of the convenience function $_creal. Extracts the |
8bdc1658 AB |
4052 | real part from a complex number. */ |
4053 | ||
4054 | static struct value * | |
4055 | creal_internal_fn (struct gdbarch *gdbarch, | |
4056 | const struct language_defn *language, | |
4057 | void *cookie, int argc, struct value **argv) | |
4058 | { | |
4059 | if (argc != 1) | |
4060 | error (_("You must provide one argument for $_creal.")); | |
4061 | ||
4062 | value *cval = argv[0]; | |
4063 | type *ctype = check_typedef (value_type (cval)); | |
78134374 | 4064 | if (ctype->code () != TYPE_CODE_COMPLEX) |
8bdc1658 | 4065 | error (_("expected a complex number")); |
4c99290d | 4066 | return value_real_part (cval); |
8bdc1658 AB |
4067 | } |
4068 | ||
4069 | /* Implementation of the convenience function $_cimag. Extracts the | |
4070 | imaginary part from a complex number. */ | |
4071 | ||
4072 | static struct value * | |
4073 | cimag_internal_fn (struct gdbarch *gdbarch, | |
4074 | const struct language_defn *language, | |
4075 | void *cookie, int argc, | |
4076 | struct value **argv) | |
4077 | { | |
4078 | if (argc != 1) | |
4079 | error (_("You must provide one argument for $_cimag.")); | |
4080 | ||
4081 | value *cval = argv[0]; | |
4082 | type *ctype = check_typedef (value_type (cval)); | |
78134374 | 4083 | if (ctype->code () != TYPE_CODE_COMPLEX) |
8bdc1658 | 4084 | error (_("expected a complex number")); |
4c99290d | 4085 | return value_imaginary_part (cval); |
8bdc1658 AB |
4086 | } |
4087 | ||
d5f4488f SM |
4088 | #if GDB_SELF_TEST |
4089 | namespace selftests | |
4090 | { | |
4091 | ||
4092 | /* Test the ranges_contain function. */ | |
4093 | ||
4094 | static void | |
4095 | test_ranges_contain () | |
4096 | { | |
4097 | std::vector<range> ranges; | |
4098 | range r; | |
4099 | ||
4100 | /* [10, 14] */ | |
4101 | r.offset = 10; | |
4102 | r.length = 5; | |
4103 | ranges.push_back (r); | |
4104 | ||
4105 | /* [20, 24] */ | |
4106 | r.offset = 20; | |
4107 | r.length = 5; | |
4108 | ranges.push_back (r); | |
4109 | ||
4110 | /* [2, 6] */ | |
4111 | SELF_CHECK (!ranges_contain (ranges, 2, 5)); | |
4112 | /* [9, 13] */ | |
4113 | SELF_CHECK (ranges_contain (ranges, 9, 5)); | |
4114 | /* [10, 11] */ | |
4115 | SELF_CHECK (ranges_contain (ranges, 10, 2)); | |
4116 | /* [10, 14] */ | |
4117 | SELF_CHECK (ranges_contain (ranges, 10, 5)); | |
4118 | /* [13, 18] */ | |
4119 | SELF_CHECK (ranges_contain (ranges, 13, 6)); | |
4120 | /* [14, 18] */ | |
4121 | SELF_CHECK (ranges_contain (ranges, 14, 5)); | |
4122 | /* [15, 18] */ | |
4123 | SELF_CHECK (!ranges_contain (ranges, 15, 4)); | |
4124 | /* [16, 19] */ | |
4125 | SELF_CHECK (!ranges_contain (ranges, 16, 4)); | |
4126 | /* [16, 21] */ | |
4127 | SELF_CHECK (ranges_contain (ranges, 16, 6)); | |
4128 | /* [21, 21] */ | |
4129 | SELF_CHECK (ranges_contain (ranges, 21, 1)); | |
4130 | /* [21, 25] */ | |
4131 | SELF_CHECK (ranges_contain (ranges, 21, 5)); | |
4132 | /* [26, 28] */ | |
4133 | SELF_CHECK (!ranges_contain (ranges, 26, 3)); | |
4134 | } | |
4135 | ||
4136 | /* Check that RANGES contains the same ranges as EXPECTED. */ | |
4137 | ||
4138 | static bool | |
4139 | check_ranges_vector (gdb::array_view<const range> ranges, | |
4140 | gdb::array_view<const range> expected) | |
4141 | { | |
4142 | return ranges == expected; | |
4143 | } | |
4144 | ||
4145 | /* Test the insert_into_bit_range_vector function. */ | |
4146 | ||
4147 | static void | |
4148 | test_insert_into_bit_range_vector () | |
4149 | { | |
4150 | std::vector<range> ranges; | |
4151 | ||
4152 | /* [10, 14] */ | |
4153 | { | |
4154 | insert_into_bit_range_vector (&ranges, 10, 5); | |
4155 | static const range expected[] = { | |
4156 | {10, 5} | |
4157 | }; | |
4158 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4159 | } | |
4160 | ||
4161 | /* [10, 14] */ | |
4162 | { | |
4163 | insert_into_bit_range_vector (&ranges, 11, 4); | |
4164 | static const range expected = {10, 5}; | |
4165 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4166 | } | |
4167 | ||
4168 | /* [10, 14] [20, 24] */ | |
4169 | { | |
4170 | insert_into_bit_range_vector (&ranges, 20, 5); | |
4171 | static const range expected[] = { | |
4172 | {10, 5}, | |
4173 | {20, 5}, | |
4174 | }; | |
4175 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4176 | } | |
4177 | ||
4178 | /* [10, 14] [17, 24] */ | |
4179 | { | |
4180 | insert_into_bit_range_vector (&ranges, 17, 5); | |
4181 | static const range expected[] = { | |
4182 | {10, 5}, | |
4183 | {17, 8}, | |
4184 | }; | |
4185 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4186 | } | |
4187 | ||
4188 | /* [2, 8] [10, 14] [17, 24] */ | |
4189 | { | |
4190 | insert_into_bit_range_vector (&ranges, 2, 7); | |
4191 | static const range expected[] = { | |
4192 | {2, 7}, | |
4193 | {10, 5}, | |
4194 | {17, 8}, | |
4195 | }; | |
4196 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4197 | } | |
4198 | ||
4199 | /* [2, 14] [17, 24] */ | |
4200 | { | |
4201 | insert_into_bit_range_vector (&ranges, 9, 1); | |
4202 | static const range expected[] = { | |
4203 | {2, 13}, | |
4204 | {17, 8}, | |
4205 | }; | |
4206 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4207 | } | |
4208 | ||
4209 | /* [2, 14] [17, 24] */ | |
4210 | { | |
4211 | insert_into_bit_range_vector (&ranges, 9, 1); | |
4212 | static const range expected[] = { | |
4213 | {2, 13}, | |
4214 | {17, 8}, | |
4215 | }; | |
4216 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4217 | } | |
4218 | ||
4219 | /* [2, 33] */ | |
4220 | { | |
4221 | insert_into_bit_range_vector (&ranges, 4, 30); | |
4222 | static const range expected = {2, 32}; | |
4223 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4224 | } | |
4225 | } | |
4226 | ||
4227 | } /* namespace selftests */ | |
4228 | #endif /* GDB_SELF_TEST */ | |
4229 | ||
6c265988 | 4230 | void _initialize_values (); |
c906108c | 4231 | void |
6c265988 | 4232 | _initialize_values () |
c906108c | 4233 | { |
1a966eab | 4234 | add_cmd ("convenience", no_class, show_convenience, _("\ |
f47f77df DE |
4235 | Debugger convenience (\"$foo\") variables and functions.\n\ |
4236 | Convenience variables are created when you assign them values;\n\ | |
4237 | thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\ | |
1a966eab | 4238 | \n\ |
c906108c SS |
4239 | A few convenience variables are given values automatically:\n\ |
4240 | \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\ | |
f47f77df DE |
4241 | \"$__\" holds the contents of the last address examined with \"x\"." |
4242 | #ifdef HAVE_PYTHON | |
4243 | "\n\n\ | |
4244 | Convenience functions are defined via the Python API." | |
4245 | #endif | |
4246 | ), &showlist); | |
7e20dfcd | 4247 | add_alias_cmd ("conv", "convenience", no_class, 1, &showlist); |
c906108c | 4248 | |
db5f229b | 4249 | add_cmd ("values", no_set_class, show_values, _("\ |
3e43a32a | 4250 | Elements of value history around item number IDX (or last ten)."), |
c906108c | 4251 | &showlist); |
53e5f3cf AS |
4252 | |
4253 | add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\ | |
4254 | Initialize a convenience variable if necessary.\n\ | |
4255 | init-if-undefined VARIABLE = EXPRESSION\n\ | |
4256 | Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\ | |
4257 | exist or does not contain a value. The EXPRESSION is not evaluated if the\n\ | |
4258 | VARIABLE is already initialized.")); | |
bc3b79fd TJB |
4259 | |
4260 | add_prefix_cmd ("function", no_class, function_command, _("\ | |
4261 | Placeholder command for showing help on convenience functions."), | |
2f822da5 | 4262 | &functionlist, 0, &cmdlist); |
a280dbd1 SDJ |
4263 | |
4264 | add_internal_function ("_isvoid", _("\ | |
4265 | Check whether an expression is void.\n\ | |
4266 | Usage: $_isvoid (expression)\n\ | |
4267 | Return 1 if the expression is void, zero otherwise."), | |
4268 | isvoid_internal_fn, NULL); | |
5fdf6324 | 4269 | |
8bdc1658 AB |
4270 | add_internal_function ("_creal", _("\ |
4271 | Extract the real part of a complex number.\n\ | |
4272 | Usage: $_creal (expression)\n\ | |
4273 | Return the real part of a complex number, the type depends on the\n\ | |
4274 | type of a complex number."), | |
4275 | creal_internal_fn, NULL); | |
4276 | ||
4277 | add_internal_function ("_cimag", _("\ | |
4278 | Extract the imaginary part of a complex number.\n\ | |
4279 | Usage: $_cimag (expression)\n\ | |
4280 | Return the imaginary part of a complex number, the type depends on the\n\ | |
4281 | type of a complex number."), | |
4282 | cimag_internal_fn, NULL); | |
4283 | ||
5fdf6324 AB |
4284 | add_setshow_zuinteger_unlimited_cmd ("max-value-size", |
4285 | class_support, &max_value_size, _("\ | |
4286 | Set maximum sized value gdb will load from the inferior."), _("\ | |
4287 | Show maximum sized value gdb will load from the inferior."), _("\ | |
4288 | Use this to control the maximum size, in bytes, of a value that gdb\n\ | |
4289 | will load from the inferior. Setting this value to 'unlimited'\n\ | |
4290 | disables checking.\n\ | |
4291 | Setting this does not invalidate already allocated values, it only\n\ | |
4292 | prevents future values, larger than this size, from being allocated."), | |
4293 | set_max_value_size, | |
4294 | show_max_value_size, | |
4295 | &setlist, &showlist); | |
d5f4488f SM |
4296 | #if GDB_SELF_TEST |
4297 | selftests::register_test ("ranges_contain", selftests::test_ranges_contain); | |
4298 | selftests::register_test ("insert_into_bit_range_vector", | |
4299 | selftests::test_insert_into_bit_range_vector); | |
4300 | #endif | |
c906108c | 4301 | } |
9d1447e0 SDJ |
4302 | |
4303 | /* See value.h. */ | |
4304 | ||
4305 | void | |
4306 | finalize_values () | |
4307 | { | |
4308 | all_values.clear (); | |
4309 | } |