#include "defs.h"
-#include <stdio.h>
-#include <string.h>
#include <ctype.h>
-#include <stdarg.h>
#include "demangle.h"
#include "gdb_regex.h"
#include "frame.h"
#include "block.h"
#include "infcall.h"
#include "dictionary.h"
-#include "exceptions.h"
#include "annotate.h"
#include "valprint.h"
#include "source.h"
static struct value *make_array_descriptor (struct type *, struct value *);
static void ada_add_block_symbols (struct obstack *,
- struct block *, const char *,
+ const struct block *, const char *,
domain_enum, struct objfile *, int);
static int is_nonfunction (struct ada_symbol_info *, int);
static void add_defn_to_vec (struct obstack *, struct symbol *,
- struct block *);
+ const struct block *);
static int num_defns_collected (struct obstack *);
static void ada_language_arch_info (struct gdbarch *,
struct language_arch_info *);
-static void check_size (const struct type *);
-
static struct value *ada_index_struct_field (int, struct value *, int,
struct type *);
static struct type *ada_find_any_type (const char *name);
\f
+/* The result of a symbol lookup to be stored in our symbol cache. */
+
+struct cache_entry
+{
+ /* The name used to perform the lookup. */
+ const char *name;
+ /* The namespace used during the lookup. */
+ domain_enum namespace;
+ /* The symbol returned by the lookup, or NULL if no matching symbol
+ was found. */
+ struct symbol *sym;
+ /* The block where the symbol was found, or NULL if no matching
+ symbol was found. */
+ const struct block *block;
+ /* A pointer to the next entry with the same hash. */
+ struct cache_entry *next;
+};
+
+/* The Ada symbol cache, used to store the result of Ada-mode symbol
+ lookups in the course of executing the user's commands.
+
+ The cache is implemented using a simple, fixed-sized hash.
+ The size is fixed on the grounds that there are not likely to be
+ all that many symbols looked up during any given session, regardless
+ of the size of the symbol table. If we decide to go to a resizable
+ table, let's just use the stuff from libiberty instead. */
+
+#define HASH_SIZE 1009
+
+struct ada_symbol_cache
+{
+ /* An obstack used to store the entries in our cache. */
+ struct obstack cache_space;
+
+ /* The root of the hash table used to implement our symbol cache. */
+ struct cache_entry *root[HASH_SIZE];
+};
+
+static void ada_free_symbol_cache (struct ada_symbol_cache *sym_cache);
/* Maximum-sized dynamic type. */
static unsigned int varsize_limit;
static void
maint_set_ada_cmd (char *args, int from_tty)
{
- help_list (maint_set_ada_cmdlist, "maintenance set ada ", -1, gdb_stdout);
+ help_list (maint_set_ada_cmdlist, "maintenance set ada ", all_commands,
+ gdb_stdout);
}
/* Implement the "maintenance show ada" (prefix) command. */
set_inferior_data (inf, ada_inferior_data, NULL);
}
+
+ /* program-space-specific data. */
+
+/* This module's per-program-space data. */
+struct ada_pspace_data
+{
+ /* The Ada symbol cache. */
+ struct ada_symbol_cache *sym_cache;
+};
+
+/* Key to our per-program-space data. */
+static const struct program_space_data *ada_pspace_data_handle;
+
+/* Return this module's data for the given program space (PSPACE).
+ If not is found, add a zero'ed one now.
+
+ This function always returns a valid object. */
+
+static struct ada_pspace_data *
+get_ada_pspace_data (struct program_space *pspace)
+{
+ struct ada_pspace_data *data;
+
+ data = program_space_data (pspace, ada_pspace_data_handle);
+ if (data == NULL)
+ {
+ data = XCNEW (struct ada_pspace_data);
+ set_program_space_data (pspace, ada_pspace_data_handle, data);
+ }
+
+ return data;
+}
+
+/* The cleanup callback for this module's per-program-space data. */
+
+static void
+ada_pspace_data_cleanup (struct program_space *pspace, void *data)
+{
+ struct ada_pspace_data *pspace_data = data;
+
+ if (pspace_data->sym_cache != NULL)
+ ada_free_symbol_cache (pspace_data->sym_cache);
+ xfree (pspace_data);
+}
+
/* Utilities */
/* If TYPE is a TYPE_CODE_TYPEDEF type, return the target type after
static const char *
ada_unqualified_name (const char *decoded_name)
{
- const char *result = strrchr (decoded_name, '.');
+ const char *result;
+
+ /* If the decoded name starts with '<', it means that the encoded
+ name does not follow standard naming conventions, and thus that
+ it is not your typical Ada symbol name. Trying to unqualify it
+ is therefore pointless and possibly erroneous. */
+ if (decoded_name[0] == '<')
+ return decoded_name;
+ result = strrchr (decoded_name, '.');
if (result != NULL)
result++; /* Skip the dot... */
else
/* Make sure that the object size is not unreasonable before
trying to allocate some memory for it. */
- check_size (type);
+ ada_ensure_varsize_limit (type);
if (value_lazy (val)
|| TYPE_LENGTH (type) > TYPE_LENGTH (value_type (val)))
else
{
result = allocate_value (type);
- memcpy (value_contents_raw (result), value_contents (val),
- TYPE_LENGTH (type));
+ value_contents_copy_raw (result, 0, val, 0, TYPE_LENGTH (type));
}
set_value_component_location (result, val);
set_value_bitsize (result, value_bitsize (val));
set_value_bitpos (result, value_bitpos (val));
set_value_address (result, value_address (val));
- set_value_optimized_out (result, value_optimized_out_const (val));
return result;
}
}
i.e. if it would be a bad idea to allocate a value of this type in
GDB. */
-static void
-check_size (const struct type *type)
+void
+ada_ensure_varsize_limit (const struct type *type)
{
if (TYPE_LENGTH (type) > varsize_limit)
error (_("object size is larger than varsize-limit"));
LONGEST
ada_discrete_type_high_bound (struct type *type)
{
+ type = resolve_dynamic_type (type, 0);
switch (TYPE_CODE (type))
{
case TYPE_CODE_RANGE:
LONGEST
ada_discrete_type_low_bound (struct type *type)
{
+ type = resolve_dynamic_type (type, 0);
switch (TYPE_CODE (type))
{
case TYPE_CODE_RANGE:
ada_update_initial_language (enum language lang)
{
if (lookup_minimal_symbol ("adainit", (const char *) NULL,
- (struct objfile *) NULL) != NULL)
+ (struct objfile *) NULL).minsym != NULL)
return language_ada;
return lang;
char *
ada_main_name (void)
{
- struct minimal_symbol *msym;
+ struct bound_minimal_symbol msym;
static char *main_program_name = NULL;
/* For Ada, the name of the main procedure is stored in a specific
in Ada. */
msym = lookup_minimal_symbol (ADA_MAIN_PROGRAM_SYMBOL_NAME, NULL, NULL);
- if (msym != NULL)
+ if (msym.minsym != NULL)
{
CORE_ADDR main_program_name_addr;
int err_code;
- main_program_name_addr = SYMBOL_VALUE_ADDRESS (msym);
+ main_program_name_addr = BMSYMBOL_VALUE_ADDRESS (msym);
if (main_program_name_addr == 0)
error (_("Invalid address for Ada main program name."));
struct value *high = desc_one_bound (descriptor, arity, 1);
arity -= 1;
- create_range_type (range_type, value_type (low),
- longest_to_int (value_as_long (low)),
- longest_to_int (value_as_long (high)));
+ create_static_range_type (range_type, value_type (low),
+ longest_to_int (value_as_long (low)),
+ longest_to_int (value_as_long (high)));
elt_type = create_array_type (array_type, elt_type, range_type);
if (ada_is_unconstrained_packed_array_type (value_type (arr)))
if (arrVal == NULL)
error (_("Bounds unavailable for null array pointer."));
- check_size (TYPE_TARGET_TYPE (value_type (arrVal)));
+ ada_ensure_varsize_limit (TYPE_TARGET_TYPE (value_type (arrVal)));
return value_ind (arrVal);
}
else if (ada_is_constrained_packed_array_type (value_type (arr)))
but with the bit sizes of its elements (and those of any
constituent arrays) recorded in the BITSIZE components of its
TYPE_FIELD_BITSIZE values, and with *ELT_BITS set to its total size
- in bits. */
+ in bits.
+
+ Note that, for arrays whose index type has an XA encoding where
+ a bound references a record discriminant, getting that discriminant,
+ and therefore the actual value of that bound, is not possible
+ because none of the given parameters gives us access to the record.
+ This function assumes that it is OK in the context where it is being
+ used to return an array whose bounds are still dynamic and where
+ the length is arbitrary. */
static struct type *
constrained_packed_array_type (struct type *type, long *elt_bits)
TYPE_FIELD_BITSIZE (new_type, 0) = *elt_bits;
TYPE_NAME (new_type) = ada_type_name (type);
- if (get_discrete_bounds (index_type, &low_bound, &high_bound) < 0)
+ if ((TYPE_CODE (check_typedef (index_type)) == TYPE_CODE_RANGE
+ && is_dynamic_type (check_typedef (index_type)))
+ || get_discrete_bounds (index_type, &low_bound, &high_bound) < 0)
low_bound = high_bound = 0;
if (high_bound < low_bound)
*elt_bits = TYPE_LENGTH (new_type) = 0;
{
struct type *type;
- arr = ada_coerce_ref (arr);
-
- /* If our value is a pointer, then dererence it. Make sure that
- this operation does not cause the target type to be fixed, as
- this would indirectly cause this array to be decoded. The rest
- of the routine assumes that the array hasn't been decoded yet,
- so we use the basic "value_ind" routine to perform the dereferencing,
- as opposed to using "ada_value_ind". */
+ /* If our value is a pointer, then dereference it. Likewise if
+ the value is a reference. Make sure that this operation does not
+ cause the target type to be fixed, as this would indirectly cause
+ this array to be decoded. The rest of the routine assumes that
+ the array hasn't been decoded yet, so we use the basic "coerce_ref"
+ and "value_ind" routines to perform the dereferencing, as opposed
+ to using "ada_coerce_ref" or "ada_value_ind". */
+ arr = coerce_ref (arr);
if (TYPE_CODE (ada_check_typedef (value_type (arr))) == TYPE_CODE_PTR)
arr = value_ind (arr);
else if (VALUE_LVAL (obj) == lval_memory && value_lazy (obj))
{
v = value_at (type, value_address (obj));
+ type = value_type (v);
bytes = (unsigned char *) alloca (len);
read_memory (value_address (v) + offset, bytes, len);
}
return elt;
}
-/* Assuming ARR is a pointer to a standard GDB array of type TYPE, the
- value of the element of *ARR at the ARITY indices given in
- IND. Does not read the entire array into memory. */
+/* Assuming ARR is a pointer to a GDB array, the value of the element
+ of *ARR at the ARITY indices given in IND.
+ Does not read the entire array into memory. */
static struct value *
-ada_value_ptr_subscript (struct value *arr, struct type *type, int arity,
- struct value **ind)
+ada_value_ptr_subscript (struct value *arr, int arity, struct value **ind)
{
int k;
+ struct type *type
+ = check_typedef (value_enclosing_type (ada_value_ind (arr)));
for (k = 0; k < arity; k += 1)
{
CORE_ADDR base = value_as_address (array_ptr)
+ ((low - ada_discrete_type_low_bound (TYPE_INDEX_TYPE (type0)))
* TYPE_LENGTH (TYPE_TARGET_TYPE (type0)));
- struct type *index_type =
- create_range_type (NULL, TYPE_TARGET_TYPE (TYPE_INDEX_TYPE (type0)),
- low, high);
+ struct type *index_type
+ = create_static_range_type (NULL,
+ TYPE_TARGET_TYPE (TYPE_INDEX_TYPE (type0)),
+ low, high);
struct type *slice_type =
create_array_type (NULL, TYPE_TARGET_TYPE (type0), index_type);
ada_value_slice (struct value *array, int low, int high)
{
struct type *type = ada_check_typedef (value_type (array));
- struct type *index_type =
- create_range_type (NULL, TYPE_INDEX_TYPE (type), low, high);
+ struct type *index_type
+ = create_static_range_type (NULL, TYPE_INDEX_TYPE (type), low, high);
struct type *slice_type =
create_array_type (NULL, TYPE_TARGET_TYPE (type), index_type);
static LONGEST
ada_array_bound (struct value *arr, int n, int which)
{
- struct type *arr_type = value_type (arr);
+ struct type *arr_type;
+
+ if (TYPE_CODE (check_typedef (value_type (arr))) == TYPE_CODE_PTR)
+ arr = value_ind (arr);
+ arr_type = value_enclosing_type (arr);
if (ada_is_constrained_packed_array_type (arr_type))
return ada_array_bound (decode_constrained_packed_array (arr), n, which);
static LONGEST
ada_array_length (struct value *arr, int n)
{
- struct type *arr_type = ada_check_typedef (value_type (arr));
+ struct type *arr_type;
+
+ if (TYPE_CODE (check_typedef (value_type (arr))) == TYPE_CODE_PTR)
+ arr = value_ind (arr);
+ arr_type = value_enclosing_type (arr);
if (ada_is_constrained_packed_array_type (arr_type))
return ada_array_length (decode_constrained_packed_array (arr), n);
empty_array (struct type *arr_type, int low)
{
struct type *arr_type0 = ada_check_typedef (arr_type);
- struct type *index_type =
- create_range_type (NULL, TYPE_TARGET_TYPE (TYPE_INDEX_TYPE (arr_type0)),
- low, low - 1);
+ struct type *index_type
+ = create_static_range_type
+ (NULL, TYPE_TARGET_TYPE (TYPE_INDEX_TYPE (arr_type0)), low, low - 1);
struct type *elt_type = ada_array_element_type (arr_type0, 1);
return allocate_value (create_array_type (NULL, elt_type, index_type));
(SYMBOL_CLASS (syms[i].sym) == LOC_CONST
&& SYMBOL_TYPE (syms[i].sym) != NULL
&& TYPE_CODE (SYMBOL_TYPE (syms[i].sym)) == TYPE_CODE_ENUM);
- struct symtab *symtab = SYMBOL_SYMTAB (syms[i].sym);
+ struct symtab *symtab = symbol_symtab (syms[i].sym);
if (SYMBOL_LINE (syms[i].sym) != 0 && symtab != NULL)
printf_unfiltered (_("[%d] %s at %s:%d\n"),
static struct value *
ada_read_renaming_var_value (struct symbol *renaming_sym,
- struct block *block)
+ const struct block *block)
{
const char *sym_name;
struct expression *expr;
return descriptor;
}
\f
-/* Dummy definitions for an experimental caching module that is not
- * used in the public sources. */
+ /* Symbol Cache Module */
+
+/* Performance measurements made as of 2010-01-15 indicate that
+ this cache does bring some noticeable improvements. Depending
+ on the type of entity being printed, the cache can make it as much
+ as an order of magnitude faster than without it.
+
+ The descriptive type DWARF extension has significantly reduced
+ the need for this cache, at least when DWARF is being used. However,
+ even in this case, some expensive name-based symbol searches are still
+ sometimes necessary - to find an XVZ variable, mostly. */
+
+/* Initialize the contents of SYM_CACHE. */
+
+static void
+ada_init_symbol_cache (struct ada_symbol_cache *sym_cache)
+{
+ obstack_init (&sym_cache->cache_space);
+ memset (sym_cache->root, '\000', sizeof (sym_cache->root));
+}
+
+/* Free the memory used by SYM_CACHE. */
+
+static void
+ada_free_symbol_cache (struct ada_symbol_cache *sym_cache)
+{
+ obstack_free (&sym_cache->cache_space, NULL);
+ xfree (sym_cache);
+}
+
+/* Return the symbol cache associated to the given program space PSPACE.
+ If not allocated for this PSPACE yet, allocate and initialize one. */
+
+static struct ada_symbol_cache *
+ada_get_symbol_cache (struct program_space *pspace)
+{
+ struct ada_pspace_data *pspace_data = get_ada_pspace_data (pspace);
+ struct ada_symbol_cache *sym_cache = pspace_data->sym_cache;
+
+ if (sym_cache == NULL)
+ {
+ sym_cache = XCNEW (struct ada_symbol_cache);
+ ada_init_symbol_cache (sym_cache);
+ }
+
+ return sym_cache;
+}
+
+/* Clear all entries from the symbol cache. */
+
+static void
+ada_clear_symbol_cache (void)
+{
+ struct ada_symbol_cache *sym_cache
+ = ada_get_symbol_cache (current_program_space);
+
+ obstack_free (&sym_cache->cache_space, NULL);
+ ada_init_symbol_cache (sym_cache);
+}
+
+/* Search our cache for an entry matching NAME and NAMESPACE.
+ Return it if found, or NULL otherwise. */
+
+static struct cache_entry **
+find_entry (const char *name, domain_enum namespace)
+{
+ struct ada_symbol_cache *sym_cache
+ = ada_get_symbol_cache (current_program_space);
+ int h = msymbol_hash (name) % HASH_SIZE;
+ struct cache_entry **e;
+
+ for (e = &sym_cache->root[h]; *e != NULL; e = &(*e)->next)
+ {
+ if (namespace == (*e)->namespace && strcmp (name, (*e)->name) == 0)
+ return e;
+ }
+ return NULL;
+}
+
+/* Search the symbol cache for an entry matching NAME and NAMESPACE.
+ Return 1 if found, 0 otherwise.
+
+ If an entry was found and SYM is not NULL, set *SYM to the entry's
+ SYM. Same principle for BLOCK if not NULL. */
static int
lookup_cached_symbol (const char *name, domain_enum namespace,
- struct symbol **sym, struct block **block)
+ struct symbol **sym, const struct block **block)
{
- return 0;
+ struct cache_entry **e = find_entry (name, namespace);
+
+ if (e == NULL)
+ return 0;
+ if (sym != NULL)
+ *sym = (*e)->sym;
+ if (block != NULL)
+ *block = (*e)->block;
+ return 1;
}
+/* Assuming that (SYM, BLOCK) is the result of the lookup of NAME
+ in domain NAMESPACE, save this result in our symbol cache. */
+
static void
cache_symbol (const char *name, domain_enum namespace, struct symbol *sym,
const struct block *block)
{
+ struct ada_symbol_cache *sym_cache
+ = ada_get_symbol_cache (current_program_space);
+ int h;
+ char *copy;
+ struct cache_entry *e;
+
+ /* If the symbol is a local symbol, then do not cache it, as a search
+ for that symbol depends on the context. To determine whether
+ the symbol is local or not, we check the block where we found it
+ against the global and static blocks of its associated symtab. */
+ if (sym
+ && BLOCKVECTOR_BLOCK (SYMTAB_BLOCKVECTOR (symbol_symtab (sym)),
+ GLOBAL_BLOCK) != block
+ && BLOCKVECTOR_BLOCK (SYMTAB_BLOCKVECTOR (symbol_symtab (sym)),
+ STATIC_BLOCK) != block)
+ return;
+
+ h = msymbol_hash (name) % HASH_SIZE;
+ e = (struct cache_entry *) obstack_alloc (&sym_cache->cache_space,
+ sizeof (*e));
+ e->next = sym_cache->root[h];
+ sym_cache->root[h] = e;
+ e->name = copy = obstack_alloc (&sym_cache->cache_space, strlen (name) + 1);
+ strcpy (copy, name);
+ e->sym = sym;
+ e->namespace = namespace;
+ e->block = block;
}
\f
/* Symbol Lookup */
static void
add_defn_to_vec (struct obstack *obstackp,
struct symbol *sym,
- struct block *block)
+ const struct block *block)
{
int i;
struct ada_symbol_info *prevDefns = defns_collected (obstackp, 0);
ALL_MSYMBOLS (objfile, msymbol)
{
- if (match_name (SYMBOL_LINKAGE_NAME (msymbol), name, wild_match_p)
+ if (match_name (MSYMBOL_LINKAGE_NAME (msymbol), name, wild_match_p)
&& MSYMBOL_TYPE (msymbol) != mst_solib_trampoline)
{
result.minsym = msymbol;
static void
ada_add_local_symbols (struct obstack *obstackp, const char *name,
- struct block *block, domain_enum domain,
+ const struct block *block, domain_enum domain,
int wild_match_p)
{
int block_depth = 0;
int full_search)
{
struct symbol *sym;
- struct block *block;
+ const struct block *block;
const char *name;
const int wild_match_p = should_use_wild_match (name0);
int cacheIfUnique;
/* Search specified block and its superiors. */
name = name0;
- block = (struct block *) block0; /* FIXME: No cast ought to be
- needed, but adding const will
- have a cascade effect. */
+ block = block0;
/* Special case: If the user specifies a symbol name inside package
Standard, do a non-wild matching of the symbol name without
static void
ada_add_block_symbols (struct obstack *obstackp,
- struct block *block, const char *name,
+ const struct block *block, const char *name,
domain_enum domain, struct objfile *objfile,
int wild)
{
int encoded_p;
VEC(char_ptr) *completions = VEC_alloc (char_ptr, 128);
struct symbol *sym;
- struct symtab *s;
+ struct compunit_symtab *s;
struct minimal_symbol *msymbol;
struct objfile *objfile;
- struct block *b, *surrounding_static_block = 0;
+ const struct block *b, *surrounding_static_block = 0;
int i;
struct block_iterator iter;
struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
ALL_MSYMBOLS (objfile, msymbol)
{
QUIT;
- symbol_completion_add (&completions, SYMBOL_LINKAGE_NAME (msymbol),
+ symbol_completion_add (&completions, MSYMBOL_LINKAGE_NAME (msymbol),
text, text_len, text0, word, wild_match_p,
encoded_p);
}
/* Go through the symtabs and check the externs and statics for
symbols which match. */
- ALL_SYMTABS (objfile, s)
+ ALL_COMPUNITS (objfile, s)
{
QUIT;
- b = BLOCKVECTOR_BLOCK (BLOCKVECTOR (s), GLOBAL_BLOCK);
+ b = BLOCKVECTOR_BLOCK (COMPUNIT_BLOCKVECTOR (s), GLOBAL_BLOCK);
ALL_BLOCK_SYMBOLS (b, iter, sym)
{
symbol_completion_add (&completions, SYMBOL_LINKAGE_NAME (sym),
}
}
- ALL_SYMTABS (objfile, s)
+ ALL_COMPUNITS (objfile, s)
{
QUIT;
- b = BLOCKVECTOR_BLOCK (BLOCKVECTOR (s), STATIC_BLOCK);
+ b = BLOCKVECTOR_BLOCK (COMPUNIT_BLOCKVECTOR (s), STATIC_BLOCK);
/* Don't do this block twice. */
if (b == surrounding_static_block)
continue;
struct value *discrim;
LONGEST discrim_val;
- outer = value_from_contents_and_address (outer_type, outer_valaddr, 0);
+ /* Using plain value_from_contents_and_address here causes problems
+ because we will end up trying to resolve a type that is currently
+ being constructed. */
+ outer = value_from_contents_and_address_unresolved (outer_type,
+ outer_valaddr, 0);
discrim = ada_value_struct_elt (outer, discrim_name, 1);
if (discrim == NULL)
return -1;
initialized, the type size may be completely bogus and
GDB may fail to allocate a value for it. So check the
size first before creating the value. */
- check_size (rtype);
- dval = value_from_contents_and_address (rtype, valaddr, address);
+ ada_ensure_varsize_limit (rtype);
+ /* Using plain value_from_contents_and_address here
+ causes problems because we will end up trying to
+ resolve a type that is currently being
+ constructed. */
+ dval = value_from_contents_and_address_unresolved (rtype,
+ valaddr,
+ address);
+ rtype = value_type (dval);
}
else
dval = dval0;
large (due to an uninitialized variable in the inferior)
that it would cause an overflow when adding it to the
record size. */
- check_size (field_type);
+ ada_ensure_varsize_limit (field_type);
TYPE_FIELD_TYPE (rtype, f) = field_type;
TYPE_FIELD_NAME (rtype, f) = TYPE_FIELD_NAME (type, f);
off = TYPE_FIELD_BITPOS (rtype, variant_field);
if (dval0 == NULL)
- dval = value_from_contents_and_address (rtype, valaddr, address);
+ {
+ /* Using plain value_from_contents_and_address here causes
+ problems because we will end up trying to resolve a type
+ that is currently being constructed. */
+ dval = value_from_contents_and_address_unresolved (rtype, valaddr,
+ address);
+ rtype = value_type (dval);
+ }
else
dval = dval0;
return type;
if (dval0 == NULL)
- dval = value_from_contents_and_address (type, valaddr, address);
+ {
+ dval = value_from_contents_and_address (type, valaddr, address);
+ type = value_type (dval);
+ }
else
dval = dval0;
return TYPE_FIELD_TYPE (var_type, which);
}
+/* Assuming RANGE_TYPE is a TYPE_CODE_RANGE, return nonzero if
+ ENCODING_TYPE, a type following the GNAT conventions for discrete
+ type encodings, only carries redundant information. */
+
+static int
+ada_is_redundant_range_encoding (struct type *range_type,
+ struct type *encoding_type)
+{
+ struct type *fixed_range_type;
+ char *bounds_str;
+ int n;
+ LONGEST lo, hi;
+
+ gdb_assert (TYPE_CODE (range_type) == TYPE_CODE_RANGE);
+
+ if (TYPE_CODE (get_base_type (range_type))
+ != TYPE_CODE (get_base_type (encoding_type)))
+ {
+ /* The compiler probably used a simple base type to describe
+ the range type instead of the range's actual base type,
+ expecting us to get the real base type from the encoding
+ anyway. In this situation, the encoding cannot be ignored
+ as redundant. */
+ return 0;
+ }
+
+ if (is_dynamic_type (range_type))
+ return 0;
+
+ if (TYPE_NAME (encoding_type) == NULL)
+ return 0;
+
+ bounds_str = strstr (TYPE_NAME (encoding_type), "___XDLU_");
+ if (bounds_str == NULL)
+ return 0;
+
+ n = 8; /* Skip "___XDLU_". */
+ if (!ada_scan_number (bounds_str, n, &lo, &n))
+ return 0;
+ if (TYPE_LOW_BOUND (range_type) != lo)
+ return 0;
+
+ n += 2; /* Skip the "__" separator between the two bounds. */
+ if (!ada_scan_number (bounds_str, n, &hi, &n))
+ return 0;
+ if (TYPE_HIGH_BOUND (range_type) != hi)
+ return 0;
+
+ return 1;
+}
+
+/* Given the array type ARRAY_TYPE, return nonzero if DESC_TYPE,
+ a type following the GNAT encoding for describing array type
+ indices, only carries redundant information. */
+
+static int
+ada_is_redundant_index_type_desc (struct type *array_type,
+ struct type *desc_type)
+{
+ struct type *this_layer = check_typedef (array_type);
+ int i;
+
+ for (i = 0; i < TYPE_NFIELDS (desc_type); i++)
+ {
+ if (!ada_is_redundant_range_encoding (TYPE_INDEX_TYPE (this_layer),
+ TYPE_FIELD_TYPE (desc_type, i)))
+ return 0;
+ this_layer = check_typedef (TYPE_TARGET_TYPE (this_layer));
+ }
+
+ return 1;
+}
+
/* Assuming that TYPE0 is an array type describing the type of a value
at ADDR, and that DVAL describes a record containing any
discriminants used in TYPE0, returns a type for the value that
index_type_desc = ada_find_parallel_type (type0, "___XA");
ada_fixup_array_indexes_type (index_type_desc);
+ if (index_type_desc != NULL
+ && ada_is_redundant_index_type_desc (type0, index_type_desc))
+ {
+ /* Ignore this ___XA parallel type, as it does not bring any
+ useful information. This allows us to avoid creating fixed
+ versions of the array's index types, which would be identical
+ to the original ones. This, in turn, can also help avoid
+ the creation of fixed versions of the array itself. */
+ index_type_desc = NULL;
+ }
+
if (index_type_desc == NULL)
{
struct type *elt_type0 = ada_check_typedef (TYPE_TARGET_TYPE (type0));
value_from_contents_and_address (fixed_record_type,
valaddr,
address);
+ fixed_record_type = value_type (obj);
if (real_type != NULL)
return to_fixed_record_type
(real_type, NULL,
enum exp_opcode op;
int tem;
int pc;
+ int preeval_pos;
struct value *arg1 = NULL, *arg2 = NULL, *arg3;
struct type *type;
int nargs, oplen;
return (value_from_longest
(value_type (arg1),
value_as_long (arg1) + value_as_long (arg2)));
+ if (TYPE_CODE (value_type (arg2)) == TYPE_CODE_PTR)
+ return (value_from_longest
+ (value_type (arg2),
+ value_as_long (arg1) + value_as_long (arg2)));
if ((ada_is_fixed_point_type (value_type (arg1))
|| ada_is_fixed_point_type (value_type (arg2)))
&& value_type (arg1) != value_type (arg2))
return (value_from_longest
(value_type (arg1),
value_as_long (arg1) - value_as_long (arg2)));
+ if (TYPE_CODE (value_type (arg2)) == TYPE_CODE_PTR)
+ return (value_from_longest
+ (value_type (arg2),
+ value_as_long (arg1) - value_as_long (arg2)));
if ((ada_is_fixed_point_type (value_type (arg1))
|| ada_is_fixed_point_type (value_type (arg2)))
&& value_type (arg1) != value_type (arg2))
*pos += 4;
goto nosideret;
}
- else if (SYMBOL_DOMAIN (exp->elts[pc + 2].symbol) == UNDEF_DOMAIN)
+
+ if (SYMBOL_DOMAIN (exp->elts[pc + 2].symbol) == UNDEF_DOMAIN)
/* Only encountered when an unresolved symbol occurs in a
context other than a function call, in which case, it is
invalid. */
error (_("Unexpected unresolved symbol, %s, during evaluation"),
SYMBOL_PRINT_NAME (exp->elts[pc + 2].symbol));
- else if (noside == EVAL_AVOID_SIDE_EFFECTS)
+
+ if (noside == EVAL_AVOID_SIDE_EFFECTS)
{
type = static_unwrap_type (SYMBOL_TYPE (exp->elts[pc + 2].symbol));
/* Check to see if this is a tagged type. We also need to handle
The latter should be shown as usual (as a pointer), whereas
a reference should mostly be transparent to the user. */
if (ada_is_tagged_type (type, 0)
- || (TYPE_CODE(type) == TYPE_CODE_REF
+ || (TYPE_CODE (type) == TYPE_CODE_REF
&& ada_is_tagged_type (TYPE_TARGET_TYPE (type), 0)))
- {
- /* Tagged types are a little special in the fact that the real
- type is dynamic and can only be determined by inspecting the
- object's tag. This means that we need to get the object's
- value first (EVAL_NORMAL) and then extract the actual object
- type from its tag.
-
- Note that we cannot skip the final step where we extract
- the object type from its tag, because the EVAL_NORMAL phase
- results in dynamic components being resolved into fixed ones.
- This can cause problems when trying to print the type
- description of tagged types whose parent has a dynamic size:
- We use the type name of the "_parent" component in order
- to print the name of the ancestor type in the type description.
- If that component had a dynamic size, the resolution into
- a fixed type would result in the loss of that type name,
- thus preventing us from printing the name of the ancestor
- type in the type description. */
- arg1 = evaluate_subexp (NULL_TYPE, exp, pos, EVAL_NORMAL);
-
- if (TYPE_CODE (type) != TYPE_CODE_REF)
- {
- struct type *actual_type;
-
- actual_type = type_from_tag (ada_value_tag (arg1));
- if (actual_type == NULL)
- /* If, for some reason, we were unable to determine
- the actual type from the tag, then use the static
- approximation that we just computed as a fallback.
- This can happen if the debugging information is
- incomplete, for instance. */
- actual_type = type;
- return value_zero (actual_type, not_lval);
- }
- else
- {
- /* In the case of a ref, ada_coerce_ref takes care
- of determining the actual type. But the evaluation
- should return a ref as it should be valid to ask
- for its address; so rebuild a ref after coerce. */
- arg1 = ada_coerce_ref (arg1);
- return value_ref (arg1);
- }
- }
+ {
+ /* Tagged types are a little special in the fact that the real
+ type is dynamic and can only be determined by inspecting the
+ object's tag. This means that we need to get the object's
+ value first (EVAL_NORMAL) and then extract the actual object
+ type from its tag.
+
+ Note that we cannot skip the final step where we extract
+ the object type from its tag, because the EVAL_NORMAL phase
+ results in dynamic components being resolved into fixed ones.
+ This can cause problems when trying to print the type
+ description of tagged types whose parent has a dynamic size:
+ We use the type name of the "_parent" component in order
+ to print the name of the ancestor type in the type description.
+ If that component had a dynamic size, the resolution into
+ a fixed type would result in the loss of that type name,
+ thus preventing us from printing the name of the ancestor
+ type in the type description. */
+ arg1 = evaluate_subexp (NULL_TYPE, exp, pos, EVAL_NORMAL);
+
+ if (TYPE_CODE (type) != TYPE_CODE_REF)
+ {
+ struct type *actual_type;
+
+ actual_type = type_from_tag (ada_value_tag (arg1));
+ if (actual_type == NULL)
+ /* If, for some reason, we were unable to determine
+ the actual type from the tag, then use the static
+ approximation that we just computed as a fallback.
+ This can happen if the debugging information is
+ incomplete, for instance. */
+ actual_type = type;
+ return value_zero (actual_type, not_lval);
+ }
+ else
+ {
+ /* In the case of a ref, ada_coerce_ref takes care
+ of determining the actual type. But the evaluation
+ should return a ref as it should be valid to ask
+ for its address; so rebuild a ref after coerce. */
+ arg1 = ada_coerce_ref (arg1);
+ return value_ref (arg1);
+ }
+ }
- *pos += 4;
- return value_zero
- (to_static_fixed_type
- (static_unwrap_type (SYMBOL_TYPE (exp->elts[pc + 2].symbol))),
- not_lval);
- }
- else
- {
- arg1 = evaluate_subexp_standard (expect_type, exp, pos, noside);
- return ada_to_fixed_value (arg1);
+ /* Records and unions for which GNAT encodings have been
+ generated need to be statically fixed as well.
+ Otherwise, non-static fixing produces a type where
+ all dynamic properties are removed, which prevents "ptype"
+ from being able to completely describe the type.
+ For instance, a case statement in a variant record would be
+ replaced by the relevant components based on the actual
+ value of the discriminants. */
+ if ((TYPE_CODE (type) == TYPE_CODE_STRUCT
+ && dynamic_template_type (type) != NULL)
+ || (TYPE_CODE (type) == TYPE_CODE_UNION
+ && ada_find_parallel_type (type, "___XVU") != NULL))
+ {
+ *pos += 4;
+ return value_zero (to_static_fixed_type (type), not_lval);
+ }
}
+ arg1 = evaluate_subexp_standard (expect_type, exp, pos, noside);
+ return ada_to_fixed_value (arg1);
+
case OP_FUNCALL:
(*pos) += 2;
(ada_coerce_to_simple_array (argvec[0]),
nargs, argvec + 1));
case TYPE_CODE_PTR: /* Pointer to array */
- type = to_fixed_array_type (TYPE_TARGET_TYPE (type), NULL, 1);
if (noside == EVAL_AVOID_SIDE_EFFECTS)
{
+ type = to_fixed_array_type (TYPE_TARGET_TYPE (type), NULL, 1);
type = ada_array_element_type (type, nargs);
if (type == NULL)
error (_("element type of array unknown"));
return value_zero (ada_aligned_type (type), lval_memory);
}
return
- unwrap_value (ada_value_ptr_subscript (argvec[0], type,
- nargs, argvec + 1));
+ unwrap_value (ada_value_ptr_subscript (argvec[0],
+ nargs, argvec + 1));
default:
error (_("Attempt to index or call something other than an "
if (ada_is_constrained_packed_array_type (value_type (arg1)))
arg1 = ada_coerce_to_simple_array (arg1);
- type = ada_index_type (value_type (arg1), tem,
- ada_attribute_name (op));
- if (type == NULL)
+ if (op == OP_ATR_LENGTH)
type = builtin_type (exp->gdbarch)->builtin_int;
+ else
+ {
+ type = ada_index_type (value_type (arg1), tem,
+ ada_attribute_name (op));
+ if (type == NULL)
+ type = builtin_type (exp->gdbarch)->builtin_int;
+ }
if (noside == EVAL_AVOID_SIDE_EFFECTS)
return allocate_value (type);
if (ada_is_constrained_packed_array_type (type_arg))
type_arg = decode_constrained_packed_array_type (type_arg);
- type = ada_index_type (type_arg, tem, ada_attribute_name (op));
- if (type == NULL)
+ if (op == OP_ATR_LENGTH)
type = builtin_type (exp->gdbarch)->builtin_int;
+ else
+ {
+ type = ada_index_type (type_arg, tem, ada_attribute_name (op));
+ if (type == NULL)
+ type = builtin_type (exp->gdbarch)->builtin_int;
+ }
if (noside == EVAL_AVOID_SIDE_EFFECTS)
return allocate_value (type);
return arg1;
case UNOP_IND:
+ preeval_pos = *pos;
arg1 = evaluate_subexp (NULL_TYPE, exp, pos, noside);
if (noside == EVAL_SKIP)
goto nosideret;
/* In C you can dereference an array to get the 1st elt. */
|| TYPE_CODE (type) == TYPE_CODE_ARRAY)
{
- type = to_static_fixed_type
- (ada_aligned_type
- (ada_check_typedef (TYPE_TARGET_TYPE (type))));
- check_size (type);
+ /* As mentioned in the OP_VAR_VALUE case, tagged types can
+ only be determined by inspecting the object's tag.
+ This means that we need to evaluate completely the
+ expression in order to get its type. */
+
+ if ((TYPE_CODE (type) == TYPE_CODE_REF
+ || TYPE_CODE (type) == TYPE_CODE_PTR)
+ && ada_is_tagged_type (TYPE_TARGET_TYPE (type), 0))
+ {
+ arg1 = evaluate_subexp (NULL_TYPE, exp, &preeval_pos,
+ EVAL_NORMAL);
+ type = value_type (ada_value_ind (arg1));
+ }
+ else
+ {
+ type = to_static_fixed_type
+ (ada_aligned_type
+ (ada_check_typedef (TYPE_TARGET_TYPE (type))));
+ }
+ ada_ensure_varsize_limit (type);
return value_zero (type, lval_memory);
}
else if (TYPE_CODE (type) == TYPE_CODE_INT)
case STRUCTOP_STRUCT:
tem = longest_to_int (exp->elts[pc + 1].longconst);
(*pos) += 3 + BYTES_TO_EXP_ELEM (tem + 1);
+ preeval_pos = *pos;
arg1 = evaluate_subexp (NULL_TYPE, exp, pos, noside);
if (noside == EVAL_SKIP)
goto nosideret;
type = ada_lookup_struct_elt_type (type1,
&exp->elts[pc + 2].string,
1, 1, NULL);
+
+ /* If the field is not found, check if it exists in the
+ extension of this object's type. This means that we
+ need to evaluate completely the expression. */
+
if (type == NULL)
- /* In this case, we assume that the field COULD exist
- in some extension of the type. Return an object of
- "type" void, which will match any formal
- (see ada_type_match). */
- return value_zero (builtin_type (exp->gdbarch)->builtin_void,
- lval_memory);
+ {
+ arg1 = evaluate_subexp (NULL_TYPE, exp, &preeval_pos,
+ EVAL_NORMAL);
+ arg1 = ada_value_struct_elt (arg1,
+ &exp->elts[pc + 2].string,
+ 0);
+ arg1 = unwrap_value (arg1);
+ type = value_type (ada_to_fixed_value (arg1));
+ }
}
else
type =
if (L < INT_MIN || U > INT_MAX)
return raw_type;
else
- return create_range_type (alloc_type_copy (raw_type), raw_type,
- ada_discrete_type_low_bound (raw_type),
- ada_discrete_type_high_bound (raw_type));
+ return create_static_range_type (alloc_type_copy (raw_type), raw_type,
+ L, U);
}
else
{
}
}
- type = create_range_type (alloc_type_copy (raw_type), base_type, L, U);
+ type = create_static_range_type (alloc_type_copy (raw_type),
+ base_type, L, U);
TYPE_NAME (type) = name;
return type;
}
variants of the runtime, we use a sniffer that will determine
the runtime variant used by the program being debugged. */
-/* Ada's standard exceptions. */
+/* Ada's standard exceptions.
+
+ The Ada 83 standard also defined Numeric_Error. But there so many
+ situations where it was unclear from the Ada 83 Reference Manual
+ (RM) whether Constraint_Error or Numeric_Error should be raised,
+ that the ARG (Ada Rapporteur Group) eventually issued a Binding
+ Interpretation saying that anytime the RM says that Numeric_Error
+ should be raised, the implementation may raise Constraint_Error.
+ Ada 95 went one step further and pretty much removed Numeric_Error
+ from the list of standard exceptions (it made it a renaming of
+ Constraint_Error, to help preserve compatibility when compiling
+ an Ada83 compiler). As such, we do not include Numeric_Error from
+ this list of standard exceptions. */
static char *standard_exc[] = {
"constraint_error",
the name of the exception being raised (this name is printed in
the catchpoint message, and is also used when trying to catch
a specific exception). We do not handle this case for now. */
- struct minimal_symbol *msym
+ struct bound_minimal_symbol msym
= lookup_minimal_symbol (einfo->catch_exception_sym, NULL, NULL);
- if (msym && MSYMBOL_TYPE (msym) != mst_solib_trampoline)
+ if (msym.minsym && MSYMBOL_TYPE (msym.minsym) != mst_solib_trampoline)
error (_("Your Ada runtime appears to be missing some debugging "
"information.\nCannot insert Ada exception catchpoint "
"in this configuration."));
re_comp (known_runtime_file_name_patterns[i]);
if (re_exec (lbasename (sal.symtab->filename)))
return 1;
- if (sal.symtab->objfile != NULL
- && re_exec (objfile_name (sal.symtab->objfile)))
+ if (SYMTAB_OBJFILE (sal.symtab) != NULL
+ && re_exec (objfile_name (SYMTAB_OBJFILE (sal.symtab))))
return 1;
}
if (msymbol.minsym != NULL)
{
struct ada_exc_info info
- = {standard_exc[i], SYMBOL_VALUE_ADDRESS (msymbol.minsym)};
+ = {standard_exc[i], BMSYMBOL_VALUE_ADDRESS (msymbol)};
VEC_safe_push (ada_exc_info, *exceptions, &info);
}
ada_add_exceptions_from_frame (regex_t *preg, struct frame_info *frame,
VEC(ada_exc_info) **exceptions)
{
- struct block *block = get_frame_block (frame, 0);
+ const struct block *block = get_frame_block (frame, 0);
while (block != 0)
{
ada_add_global_exceptions (regex_t *preg, VEC(ada_exc_info) **exceptions)
{
struct objfile *objfile;
- struct symtab *s;
+ struct compunit_symtab *s;
expand_symtabs_matching (NULL, ada_exc_search_name_matches,
VARIABLES_DOMAIN, preg);
- ALL_PRIMARY_SYMTABS (objfile, s)
+ ALL_COMPUNITS (objfile, s)
{
- struct blockvector *bv = BLOCKVECTOR (s);
+ const struct blockvector *bv = COMPUNIT_BLOCKVECTOR (s);
int i;
for (i = GLOBAL_BLOCK; i <= STATIC_BLOCK; i++)
}
static int
-parse (void)
+parse (struct parser_state *ps)
{
warnings_issued = 0;
- return ada_parse ();
+ return ada_parse (ps);
}
static const struct exp_descriptor ada_exp_descriptor = {
static struct value *
ada_read_var_value (struct symbol *var, struct frame_info *frame)
{
- struct block *frame_block = NULL;
+ const struct block *frame_block = NULL;
struct symbol *renaming_sym = NULL;
/* The only case where default_read_var_value is not sufficient
ada_get_symbol_name_cmp, /* la_get_symbol_name_cmp */
ada_iterate_over_symbols,
&ada_varobj_ops,
+ NULL,
+ NULL,
LANG_MAGIC
};
{
printf_unfiltered (_(\
"\"set ada\" must be followed by the name of a setting.\n"));
- help_list (set_ada_list, "set ada ", -1, gdb_stdout);
+ help_list (set_ada_list, "set ada ", all_commands, gdb_stdout);
}
/* Implement the "show ada" prefix command. */
ops->print_recreate = print_recreate_catch_assert;
}
+/* This module's 'new_objfile' observer. */
+
+static void
+ada_new_objfile_observer (struct objfile *objfile)
+{
+ ada_clear_symbol_cache ();
+}
+
+/* This module's 'free_objfile' observer. */
+
+static void
+ada_free_objfile_observer (struct objfile *objfile)
+{
+ ada_clear_symbol_cache ();
+}
+
void
_initialize_ada_language (void)
{
(256, htab_hash_string, (int (*)(const void *, const void *)) streq,
NULL, xcalloc, xfree);
- /* Setup per-inferior data. */
+ /* The ada-lang observers. */
+ observer_attach_new_objfile (ada_new_objfile_observer);
+ observer_attach_free_objfile (ada_free_objfile_observer);
observer_attach_inferior_exit (ada_inferior_exit);
+
+ /* Setup various context-specific data. */
ada_inferior_data
= register_inferior_data_with_cleanup (NULL, ada_inferior_data_cleanup);
+ ada_pspace_data_handle
+ = register_program_space_data_with_cleanup (NULL, ada_pspace_data_cleanup);
}