else if (value_lazy (val)
/* Be careful not to make a lazy not_lval value. */
|| (VALUE_LVAL (val) != not_lval
- && TYPE_LENGTH (type) > TYPE_LENGTH (value_type (val))))
+ && type->length () > value_type (val)->length ()))
result = allocate_value_lazy (type);
else
{
result = allocate_value (type);
- value_contents_copy (result, 0, val, 0, TYPE_LENGTH (type));
+ value_contents_copy (result, 0, val, 0, type->length ());
}
set_value_component_location (result, val);
set_value_bitsize (result, value_bitsize (val));
max_of_type (struct type *t)
{
if (t->is_unsigned ())
- return (LONGEST) umax_of_size (TYPE_LENGTH (t));
+ return (LONGEST) umax_of_size (t->length ());
else
- return max_of_size (TYPE_LENGTH (t));
+ return max_of_size (t->length ());
}
/* Minimum value of integral type T, as a signed quantity. */
if (t->is_unsigned ())
return 0;
else
- return min_of_size (TYPE_LENGTH (t));
+ return min_of_size (t->length ());
}
/* The largest value in the domain of TYPE, a discrete type, as an integer. */
return fold_storage.c_str ();
}
-/* The "encoded" form of DECODED, according to GNAT conventions. */
+/* The "encoded" form of DECODED, according to GNAT conventions. If
+ FOLD is true (the default), case-fold any ordinary symbol. Symbols
+ with <...> quoting are not folded in any case. */
std::string
-ada_encode (const char *decoded)
+ada_encode (const char *decoded, bool fold)
{
- if (decoded[0] != '<')
+ if (fold && decoded[0] != '<')
decoded = ada_fold_name (decoded);
return ada_encode_1 (decoded, true);
}
return
value_from_longest (lookup_pointer_type (bounds_type),
- addr - TYPE_LENGTH (bounds_type));
+ addr - bounds_type->length ());
}
else if (is_thick_pntr (type))
if (TYPE_FIELD_BITSIZE (type, 1) > 0)
return TYPE_FIELD_BITSIZE (type, 1);
else
- return 8 * TYPE_LENGTH (ada_check_typedef (type->field (1).type ()));
+ return 8 * ada_check_typedef (type->field (1).type ())->length ();
}
/* If TYPE is the type of an array descriptor (fat or thin pointer) or a
if (TYPE_FIELD_BITSIZE (type, 0) > 0)
return TYPE_FIELD_BITSIZE (type, 0);
else
- return TARGET_CHAR_BIT * TYPE_LENGTH (type->field (0).type ());
+ return TARGET_CHAR_BIT * type->field (0).type ()->length ();
}
/* If BOUNDS is an array-bounds structure (or pointer to one), return
if (TYPE_FIELD_BITSIZE (type, 2 * i + which - 2) > 0)
return TYPE_FIELD_BITSIZE (type, 2 * i + which - 2);
else
- return 8 * TYPE_LENGTH (type->field (2 * i + which - 2).type ());
+ return 8 * type->field (2 * i + which - 2).type ()->length ();
}
/* If TYPE is the type of an array-bounds structure, the type of its
int array_bitsize =
(hi - lo + 1) * TYPE_FIELD_BITSIZE (elt_type, 0);
- TYPE_LENGTH (array_type) = (array_bitsize + 7) / 8;
+ array_type->set_length ((array_bitsize + 7) / 8);
}
}
}
|| !get_discrete_bounds (index_type, &low_bound, &high_bound))
low_bound = high_bound = 0;
if (high_bound < low_bound)
- *elt_bits = TYPE_LENGTH (new_type) = 0;
+ {
+ *elt_bits = 0;
+ new_type->set_length (0);
+ }
else
{
*elt_bits *= (high_bound - low_bound + 1);
- TYPE_LENGTH (new_type) =
- (*elt_bits + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT;
+ new_type->set_length ((*elt_bits + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT);
}
new_type->set_is_fixed_instance (true);
LONGEST elt_bitsize = elt_len * TYPE_FIELD_BITSIZE (elt_type, 0);
TYPE_FIELD_BITSIZE (type, 0) = elt_bitsize;
- TYPE_LENGTH (type) = ((our_len * elt_bitsize + HOST_CHAR_BIT - 1)
- / HOST_CHAR_BIT);
+ type->set_length (((our_len * elt_bitsize + HOST_CHAR_BIT - 1)
+ / HOST_CHAR_BIT));
}
return our_len;
const gdb_byte *valaddr = value_contents_for_printing (arr).data ();
CORE_ADDR address = value_address (arr);
gdb::array_view<const gdb_byte> view
- = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
+ = gdb::make_array_view (valaddr, type->length ());
type = resolve_dynamic_type (type, view, address);
recursively_update_array_bitsize (type);
bit_size += 1;
mod >>= 1;
}
- bit_pos = HOST_CHAR_BIT * TYPE_LENGTH (value_type (arr)) - bit_size;
+ bit_pos = HOST_CHAR_BIT * value_type (arr)->length () - bit_size;
arr = ada_value_primitive_packed_val (arr, NULL,
bit_pos / HOST_CHAR_BIT,
bit_pos % HOST_CHAR_BIT,
is_big_endian, has_negatives (type),
is_scalar);
type = resolve_dynamic_type (type, staging, 0);
- if (TYPE_LENGTH (type) < (bit_size + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT)
+ if (type->length () < (bit_size + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT)
{
/* This happens when the length of the object is dynamic,
and is actually smaller than the space reserved for it.
normally equal to the maximum size of its element.
But, in reality, each element only actually spans a portion
of that stride. */
- bit_size = TYPE_LENGTH (type) * HOST_CHAR_BIT;
+ bit_size = type->length () * HOST_CHAR_BIT;
}
}
if (bit_size == 0)
{
- memset (unpacked, 0, TYPE_LENGTH (type));
+ memset (unpacked, 0, type->length ());
return v;
}
- if (staging.size () == TYPE_LENGTH (type))
+ if (staging.size () == type->length ())
{
/* Small short-cut: If we've unpacked the data into a buffer
of the same size as TYPE's length, then we can reuse that,
}
else
ada_unpack_from_contents (src, bit_offset, bit_size,
- unpacked, TYPE_LENGTH (type),
+ unpacked, type->length (),
is_big_endian, has_negatives (type), is_scalar);
return v;
read_memory (to_addr, buffer, len);
from_size = value_bitsize (fromval);
if (from_size == 0)
- from_size = TYPE_LENGTH (value_type (fromval)) * TARGET_CHAR_BIT;
+ from_size = value_type (fromval)->length () * TARGET_CHAR_BIT;
const int is_big_endian = type_byte_order (type) == BFD_ENDIAN_BIG;
ULONGEST from_offset = 0;
val = value_copy (toval);
memcpy (value_contents_raw (val).data (),
value_contents (fromval).data (),
- TYPE_LENGTH (type));
+ type->length ());
deprecated_set_value_type (val, type);
return val;
val = value_cast (value_type (component), val);
if (value_bitsize (component) == 0)
- bits = TARGET_CHAR_BIT * TYPE_LENGTH (value_type (component));
+ bits = TARGET_CHAR_BIT * value_type (component)->length ();
else
bits = value_bitsize (component);
if (is_scalar_type (check_typedef (value_type (component))))
src_offset
- = TYPE_LENGTH (value_type (component)) * TARGET_CHAR_BIT - bits;
+ = value_type (component)->length () * TARGET_CHAR_BIT - bits;
else
src_offset = 0;
copy_bitwise ((value_contents_writeable (container).data ()
ULONGEST stride = TYPE_FIELD_BITSIZE (slice_type, 0) / 8;
if (stride == 0)
- stride = TYPE_LENGTH (type0->target_type ());
+ stride = type0->target_type ()->length ();
base = value_as_address (array_ptr) + (*low_pos - *base_low_pos) * stride;
return value_at_lazy (slice_type, base);
return 0;
atype = atype->target_type ();
/* This can only happen if the actual argument is 'null'. */
- if (atype->code () == TYPE_CODE_INT && TYPE_LENGTH (atype) == 0)
+ if (atype->code () == TYPE_CODE_INT && atype->length () == 0)
return 1;
return ada_type_match (ftype->target_type (), atype);
case TYPE_CODE_INT:
}
}
-/* True iff TYPE is discrete (INT, RANGE, ENUM). */
+/* True iff TYPE is discrete, as defined in the Ada Reference Manual.
+ This essentially means one of (INT, RANGE, ENUM) -- but note that
+ "enum" includes character and boolean as well. */
static int
discrete_type_p (struct type *type)
case TYPE_CODE_RANGE:
case TYPE_CODE_ENUM:
case TYPE_CODE_BOOL:
+ case TYPE_CODE_CHAR:
return 1;
default:
return 0;
if (VALUE_LVAL (val) == not_lval
|| VALUE_LVAL (val) == lval_internalvar)
{
- int len = TYPE_LENGTH (ada_check_typedef (value_type (val)));
+ int len = ada_check_typedef (value_type (val))->length ();
const CORE_ADDR addr =
value_as_long (value_allocate_space_in_inferior (len));
static CORE_ADDR
value_pointer (struct value *value, struct type *type)
{
- unsigned len = TYPE_LENGTH (type);
+ unsigned len = type->length ();
gdb_byte *buf = (gdb_byte *) alloca (len);
CORE_ADDR addr;
global symbols are searched. */
struct bound_minimal_symbol
-ada_lookup_simple_minsym (const char *name)
+ada_lookup_simple_minsym (const char *name, struct objfile *objfile)
{
struct bound_minimal_symbol result;
symbol_name_matcher_ftype *match_name
= ada_get_symbol_name_matcher (lookup_name);
- for (objfile *objfile : current_program_space->objfiles ())
- {
- for (minimal_symbol *msymbol : objfile->msymbols ())
- {
- if (match_name (msymbol->linkage_name (), lookup_name, NULL)
- && msymbol->type () != mst_solib_trampoline)
- {
- result.minsym = msymbol;
- result.objfile = objfile;
- break;
- }
- }
- }
+ gdbarch_iterate_over_objfiles_in_search_order
+ (objfile != NULL ? objfile->arch () : target_gdbarch (),
+ [&result, lookup_name, match_name] (struct objfile *obj)
+ {
+ for (minimal_symbol *msymbol : obj->msymbols ())
+ {
+ if (match_name (msymbol->linkage_name (), lookup_name, nullptr)
+ && msymbol->type () != mst_solib_trampoline)
+ {
+ result.minsym = msymbol;
+ result.objfile = obj;
+ return 1;
+ }
+ }
+
+ return 0;
+ }, objfile);
return result;
}
gdb::array_view<const gdb_byte> contents;
if (valaddr != nullptr)
- contents = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
+ contents = gdb::make_array_view (valaddr, type->length ());
struct type *resolved_type = resolve_dynamic_type (type, contents, address);
if (find_struct_field ("_tag", resolved_type, 0, &tag_type, &tag_byte_offset,
NULL, NULL, NULL))
/* Storage_Offset'Last is used to indicate that a dynamic offset to
top is used. In this situation the offset is stored just after
the tag, in the object itself. */
- ULONGEST last = (((ULONGEST) 1) << (8 * TYPE_LENGTH (offset_type) - 1)) - 1;
+ ULONGEST last = (((ULONGEST) 1) << (8 * offset_type->length () - 1)) - 1;
if (offset_to_top == last)
{
struct value *tem = value_addr (tag);
type->set_code (TYPE_CODE_STRUCT);
INIT_NONE_SPECIFIC (type);
type->set_name ("<empty>");
- TYPE_LENGTH (type) = 0;
+ type->set_length (0);
return type;
}
CORE_ADDR address, struct value *dval0,
int keep_dynamic_fields)
{
- struct value *mark = value_mark ();
struct value *dval;
struct type *rtype;
int nfields, bit_len;
int fld_bit_len;
int f;
+ scoped_value_mark mark;
+
/* Compute the number of fields in this record type that are going
to be processed: unless keep_dynamic_fields, this includes only
fields whose position and length are static will be processed. */
an overflow should not happen in practice. So rather than
adding overflow recovery code to this already complex code,
we just assume that it's not going to happen. */
- fld_bit_len =
- TYPE_LENGTH (rtype->field (f).type ()) * TARGET_CHAR_BIT;
+ fld_bit_len = rtype->field (f).type ()->length () * TARGET_CHAR_BIT;
}
else
{
field_type = ada_typedef_target_type (field_type);
fld_bit_len =
- TYPE_LENGTH (ada_check_typedef (field_type)) * TARGET_CHAR_BIT;
+ ada_check_typedef (field_type)->length () * TARGET_CHAR_BIT;
}
}
if (off + fld_bit_len > bit_len)
bit_len = off + fld_bit_len;
off += fld_bit_len;
- TYPE_LENGTH (rtype) =
- align_up (bit_len, TARGET_CHAR_BIT) / TARGET_CHAR_BIT;
+ rtype->set_length (align_up (bit_len, TARGET_CHAR_BIT) / TARGET_CHAR_BIT);
}
/* We handle the variant part, if any, at the end because of certain
rtype->field (variant_field).set_type (branch_type);
rtype->field (variant_field).set_name ("S");
fld_bit_len =
- TYPE_LENGTH (rtype->field (variant_field).type ()) *
- TARGET_CHAR_BIT;
+ rtype->field (variant_field).type ()->length () * TARGET_CHAR_BIT;
if (off + fld_bit_len > bit_len)
bit_len = off + fld_bit_len;
- TYPE_LENGTH (rtype) =
- align_up (bit_len, TARGET_CHAR_BIT) / TARGET_CHAR_BIT;
+
+ rtype->set_length
+ (align_up (bit_len, TARGET_CHAR_BIT) / TARGET_CHAR_BIT);
}
}
probably in the debug info. In that case, we don't round up the size
of the resulting type. If this record is not part of another structure,
the current RTYPE length might be good enough for our purposes. */
- if (TYPE_LENGTH (type) <= 0)
+ if (type->length () <= 0)
{
if (rtype->name ())
warning (_("Invalid type size for `%s' detected: %s."),
- rtype->name (), pulongest (TYPE_LENGTH (type)));
+ rtype->name (), pulongest (type->length ()));
else
warning (_("Invalid type size for <unnamed> detected: %s."),
- pulongest (TYPE_LENGTH (type)));
+ pulongest (type->length ()));
}
else
- {
- TYPE_LENGTH (rtype) = align_up (TYPE_LENGTH (rtype),
- TYPE_LENGTH (type));
- }
+ rtype->set_length (align_up (rtype->length (), type->length ()));
- value_free_to_mark (mark);
return rtype;
}
type->set_name (ada_type_name (type0));
type->set_is_fixed_instance (true);
- TYPE_LENGTH (type) = 0;
+ type->set_length (0);
}
type->field (f).set_type (new_type);
type->field (f).set_name (type0->field (f).name ());
to_record_with_fixed_variant_part (struct type *type, const gdb_byte *valaddr,
CORE_ADDR address, struct value *dval0)
{
- struct value *mark = value_mark ();
struct value *dval;
struct type *rtype;
struct type *branch_type;
if (variant_field == -1)
return type;
+ scoped_value_mark mark;
if (dval0 == NULL)
{
dval = value_from_contents_and_address (type, valaddr, address);
rtype->set_name (ada_type_name (type));
rtype->set_is_fixed_instance (true);
- TYPE_LENGTH (rtype) = TYPE_LENGTH (type);
+ rtype->set_length (type->length ());
branch_type = to_fixed_variant_branch_type
(type->field (variant_field).type (),
rtype->field (variant_field).set_type (branch_type);
rtype->field (variant_field).set_name ("S");
TYPE_FIELD_BITSIZE (rtype, variant_field) = 0;
- TYPE_LENGTH (rtype) += TYPE_LENGTH (branch_type);
+ rtype->set_length (rtype->length () + branch_type->length ());
}
- TYPE_LENGTH (rtype) -= TYPE_LENGTH (type->field (variant_field).type ());
- value_free_to_mark (mark);
+ rtype->set_length (rtype->length ()
+ - type->field (variant_field).type ()->length ());
+
return rtype;
}
type was a regular (non-packed) array type. As a result, the
bitsize of the array elements needs to be set again, and the array
length needs to be recomputed based on that bitsize. */
- int len = TYPE_LENGTH (result) / TYPE_LENGTH (result->target_type ());
+ int len = result->length () / result->target_type ()->length ();
int elt_bitsize = TYPE_FIELD_BITSIZE (type0, 0);
TYPE_FIELD_BITSIZE (result, 0) = TYPE_FIELD_BITSIZE (type0, 0);
- TYPE_LENGTH (result) = len * elt_bitsize / HOST_CHAR_BIT;
- if (TYPE_LENGTH (result) * HOST_CHAR_BIT < len * elt_bitsize)
- TYPE_LENGTH (result)++;
+ result->set_length (len * elt_bitsize / HOST_CHAR_BIT);
+ if (result->length () * HOST_CHAR_BIT < len * elt_bitsize)
+ result->set_length (result->length () + 1);
}
result->set_is_fixed_instance (true);
xvz_name, except.what ());
}
- if (xvz_found && TYPE_LENGTH (fixed_record_type) != size)
+ if (xvz_found && fixed_record_type->length () != size)
{
fixed_record_type = copy_type (fixed_record_type);
- TYPE_LENGTH (fixed_record_type) = size;
+ fixed_record_type->set_length (size);
/* The FIXED_RECORD_TYPE may have be a stub. We have
observed this when the debugging info is STABS, and
gdb_assert (is_integral_type (type->target_type ()));
gdb_assert (value_type (val)->code () == TYPE_CODE_ARRAY);
gdb_assert (is_integral_type (value_type (val)->target_type ()));
- gdb_assert (TYPE_LENGTH (type->target_type ())
- > TYPE_LENGTH (value_type (val)->target_type ()));
+ gdb_assert (type->target_type ()->length ()
+ > value_type (val)->target_type ()->length ());
if (!get_array_bounds (type, &lo, &hi))
error (_("unable to determine array bounds"));
for (i = 0; i < hi - lo + 1; i++)
{
struct value *elt = value_cast (elt_type, value_subscript (val, lo + i));
- int elt_len = TYPE_LENGTH (elt_type);
+ int elt_len = elt_type->length ();
copy (value_contents_all (elt), res_contents.slice (elt_len * i, elt_len));
}
if (is_integral_type (type->target_type ())
&& is_integral_type (type2->target_type ())
- && TYPE_LENGTH (type2->target_type ())
- < TYPE_LENGTH (type->target_type ()))
+ && type2->target_type ()->length () < type->target_type ()->length ())
{
/* Allow implicit promotion of the array elements to
a wider type. */
return ada_promote_array_of_integrals (type, val);
}
- if (TYPE_LENGTH (type2->target_type ())
- != TYPE_LENGTH (type->target_type ()))
+ if (type2->target_type ()->length () != type->target_type ()->length ())
error (_("Incompatible types in assignment"));
deprecated_set_value_type (val, type);
}
val = allocate_value (type1);
store_unsigned_integer (value_contents_raw (val).data (),
- TYPE_LENGTH (value_type (val)),
+ value_type (val)->length (),
type_byte_order (type1), v);
return val;
}
/* FIXME: The following works only for types whose
representations use all bits (no padding or undefined bits)
and do not have user-defined equality. */
- return (TYPE_LENGTH (arg1_type) == TYPE_LENGTH (arg2_type)
+ return (arg1_type->length () == arg2_type->length ()
&& memcmp (value_contents (arg1).data (),
value_contents (arg2).data (),
- TYPE_LENGTH (arg1_type)) == 0);
+ arg1_type->length ()) == 0);
}
return value_equal (arg1, arg2);
}
return value_zero (builtin_type (exp->gdbarch)->builtin_int, not_lval);
else
return value_from_longest (builtin_type (exp->gdbarch)->builtin_int,
- TARGET_CHAR_BIT * TYPE_LENGTH (type));
+ TARGET_CHAR_BIT * type->length ());
}
/* A helper function for UNOP_ABS. */
const std::string &str = std::get<0> (m_storage);
const char *encoding;
- switch (TYPE_LENGTH (char_type))
+ switch (char_type->length ())
{
case 1:
{
default:
error (_("unexpected character type size %s"),
- pulongest (TYPE_LENGTH (char_type)));
+ pulongest (char_type->length ()));
}
auto_obstack converted;
struct type *stringtype
= lookup_array_range_type (char_type, 1,
obstack_object_size (&converted)
- / TYPE_LENGTH (char_type));
+ / char_type->length ());
struct value *val = allocate_value (stringtype);
memcpy (value_contents_raw (val).data (),
obstack_base (&converted),
/* create_static_range_type alters the resulting type's length
to match the size of the base_type, which is not what we want.
Set it back to the original range type's length. */
- TYPE_LENGTH (type) = TYPE_LENGTH (raw_type);
+ type->set_length (raw_type->length ());
type->set_name (name);
return type;
}
to most users. */
static int
-is_known_support_routine (struct frame_info *frame)
+is_known_support_routine (frame_info_ptr frame)
{
enum language func_lang;
int i;
part of the Ada run-time, starting from FI and moving upward. */
void
-ada_find_printable_frame (struct frame_info *fi)
+ada_find_printable_frame (frame_info_ptr fi)
{
for (; fi != NULL; fi = get_prev_frame (fi))
{
ada_unhandled_exception_name_addr_from_raise (void)
{
int frame_level;
- struct frame_info *fi;
+ frame_info_ptr fi;
struct ada_inferior_data *data = get_ada_inferior_data (current_inferior ());
/* To determine the name of this exception, we need to select
break;
default:
- internal_error (__FILE__, __LINE__, _("unexpected catchpoint type"));
+ internal_error (_("unexpected catchpoint type"));
break;
}
e_msg_val = ada_coerce_to_simple_array (e_msg_val);
gdb_assert (e_msg_val != NULL);
- e_msg_len = TYPE_LENGTH (value_type (e_msg_val));
+ e_msg_len = value_type (e_msg_val)->length ();
/* If the message string is empty, then treat it as if there was
no exception message. */
stop = true;
try
{
- struct value *mark;
-
- mark = value_mark ();
+ scoped_value_mark mark;
stop = value_true (evaluate_expression (ada_loc->excep_cond_expr.get ()));
- value_free_to_mark (mark);
}
catch (const gdb_exception &ex)
{
uiout->text (disposition == disp_del
? "\nTemporary catchpoint " : "\nCatchpoint ");
- uiout->field_signed ("bkptno", number);
+ print_num_locno (bs, uiout);
uiout->text (", ");
/* ada_exception_name_addr relies on the selected frame being the
break;
default:
- internal_error (__FILE__, __LINE__, _("unexpected catchpoint type"));
+ internal_error (_("unexpected catchpoint type"));
break;
}
break;
default:
- internal_error (__FILE__, __LINE__, _("unexpected catchpoint type"));
+ internal_error (_("unexpected catchpoint type"));
break;
}
}
break;
default:
- internal_error (__FILE__, __LINE__, _("unexpected catchpoint type"));
+ internal_error (_("unexpected catchpoint type"));
}
print_recreate_thread (fp);
}
return (data->exception_info->catch_handlers_sym);
break;
default:
- internal_error (__FILE__, __LINE__,
- _("unexpected catchpoint kind (%d)"), ex);
+ internal_error (_("unexpected catchpoint kind (%d)"), ex);
}
}
{
if (preg == NULL || preg->exec (name, 0, NULL, 0) == 0)
{
- struct bound_minimal_symbol msymbol
- = ada_lookup_simple_minsym (name);
+ symbol_name_match_type match_type = name_match_type_from_name (name);
+ lookup_name_info lookup_name (name, match_type);
- if (msymbol.minsym != NULL)
- {
- struct ada_exc_info info
- = {name, msymbol.value_address ()};
+ symbol_name_matcher_ftype *match_name
+ = ada_get_symbol_name_matcher (lookup_name);
- exceptions->push_back (info);
+ /* Iterate over all objfiles irrespective of scope or linker
+ namespaces so we get all exceptions anywhere in the
+ progspace. */
+ for (objfile *objfile : current_program_space->objfiles ())
+ {
+ for (minimal_symbol *msymbol : objfile->msymbols ())
+ {
+ if (match_name (msymbol->linkage_name (), lookup_name,
+ nullptr)
+ && msymbol->type () != mst_solib_trampoline)
+ {
+ ada_exc_info info
+ = {name, msymbol->value_address (objfile)};
+
+ exceptions->push_back (info);
+ }
+ }
}
}
}
static void
ada_add_exceptions_from_frame (compiled_regex *preg,
- struct frame_info *frame,
+ frame_info_ptr frame,
std::vector<ada_exc_info> *exceptions)
{
const struct block *block = get_frame_block (frame, 0);
SEARCH_GLOBAL_BLOCK | SEARCH_STATIC_BLOCK,
VARIABLES_DOMAIN);
+ /* Iterate over all objfiles irrespective of scope or linker namespaces
+ so we get all exceptions anywhere in the progspace. */
for (objfile *objfile : current_program_space->objfiles ())
{
for (compunit_symtab *s : objfile->compunits ())
struct value *read_var_value (struct symbol *var,
const struct block *var_block,
- struct frame_info *frame) const override
+ frame_info_ptr frame) const override
{
/* The only case where default_read_var_value is not sufficient
is when VAR is a renaming... */
/* Create the equivalent of the System.Storage_Elements.Storage_Offset
type. This is a signed integral type whose size is the same as
the size of addresses. */
- unsigned int addr_length = TYPE_LENGTH (system_addr_ptr);
+ unsigned int addr_length = system_addr_ptr->length ();
add (arch_integer_type (gdbarch, addr_length * HOST_CHAR_BIT, 0,
"storage_offset"));