X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Fvalue.c;h=cb860509f80547c8a32dc1e5dea68b22a1fb59d4;hb=refs%2Fheads%2Fconcurrent-displaced-stepping-2020-04-01;hp=90423ed00286899fd10741bae65ea87526259382;hpb=3b4b2f160d288b85a1379d24fd0f4de19062f3fd;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/value.c b/gdb/value.c index 90423ed002..cb860509f8 100644 --- a/gdb/value.c +++ b/gdb/value.c @@ -1,6 +1,6 @@ /* Low level packing and unpacking of values for GDB, the GNU Debugger. - Copyright (C) 1986-2017 Free Software Foundation, Inc. + Copyright (C) 1986-2020 Free Software Foundation, Inc. This file is part of GDB. @@ -28,11 +28,9 @@ #include "target.h" #include "language.h" #include "demangle.h" -#include "doublest.h" -#include "floatformat.h" #include "regcache.h" #include "block.h" -#include "dfp.h" +#include "target-float.h" #include "objfiles.h" #include "valprint.h" #include "cli/cli-decode.h" @@ -43,6 +41,9 @@ #include "user-regs.h" #include #include "completer.h" +#include "gdbsupport/selftest.h" +#include "gdbsupport/array-view.h" +#include "cli/cli-style.h" /* Definition of a user function. */ struct internal_function @@ -68,11 +69,23 @@ struct range /* Length of the range. */ LONGEST length; -}; -typedef struct range range_s; + /* Returns true if THIS is strictly less than OTHER, useful for + searching. We keep ranges sorted by offset and coalesce + overlapping and contiguous ranges, so this just compares the + starting offset. */ + + bool operator< (const range &other) const + { + return offset < other.offset; + } -DEF_VEC_O(range_s); + /* Returns true if THIS is equal to OTHER. */ + bool operator== (const range &other) const + { + return offset == other.offset && length == other.length; + } +}; /* Returns true if the ranges defined by [offset1, offset1+len1) and [offset2, offset2+len2) overlap. */ @@ -88,25 +101,14 @@ ranges_overlap (LONGEST offset1, LONGEST len1, return (l < h); } -/* Returns true if the first argument is strictly less than the - second, useful for VEC_lower_bound. We keep ranges sorted by - offset and coalesce overlapping and contiguous ranges, so this just - compares the starting offset. */ - -static int -range_lessthan (const range_s *r1, const range_s *r2) -{ - return r1->offset < r2->offset; -} - /* Returns true if RANGES contains any range that overlaps [OFFSET, OFFSET+LENGTH). */ static int -ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length) +ranges_contain (const std::vector &ranges, LONGEST offset, + LONGEST length) { - range_s what; - LONGEST i; + range what; what.offset = offset; what.length = length; @@ -142,21 +144,22 @@ ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length) I=1 */ - i = VEC_lower_bound (range_s, ranges, &what, range_lessthan); - if (i > 0) + auto i = std::lower_bound (ranges.begin (), ranges.end (), what); + + if (i > ranges.begin ()) { - struct range *bef = VEC_index (range_s, ranges, i - 1); + const struct range &bef = *(i - 1); - if (ranges_overlap (bef->offset, bef->length, offset, length)) + if (ranges_overlap (bef.offset, bef.length, offset, length)) return 1; } - if (i < VEC_length (range_s, ranges)) + if (i < ranges.end ()) { - struct range *r = VEC_index (range_s, ranges, i); + const struct range &r = *i; - if (ranges_overlap (r->offset, r->length, offset, length)) + if (ranges_overlap (r.offset, r.length, offset, length)) return 1; } @@ -170,9 +173,34 @@ static struct cmd_list_element *functionlist; struct value { + explicit value (struct type *type_) + : modifiable (1), + lazy (1), + initialized (1), + stack (0), + type (type_), + enclosing_type (type_) + { + } + + ~value () + { + if (VALUE_LVAL (this) == lval_computed) + { + const struct lval_funcs *funcs = location.computed.funcs; + + if (funcs->free_closure) + funcs->free_closure (this); + } + else if (VALUE_LVAL (this) == lval_xcallable) + delete location.xm_worker; + } + + DISABLE_COPY_AND_ASSIGN (value); + /* Type of value; either not an lval, or one of the various different possible kinds of lval. */ - enum lval_type lval; + enum lval_type lval = not_lval; /* Is it modifiable? Only relevant if lval != not_lval. */ unsigned int modifiable : 1; @@ -200,9 +228,6 @@ struct value used instead of read_memory to enable extra caching. */ unsigned int stack : 1; - /* If the value has been released. */ - unsigned int released : 1; - /* Location of value (if lval). */ union { @@ -237,32 +262,32 @@ struct value /* Closure for those functions to use. */ void *closure; } computed; - } location; + } location {}; /* Describes offset of a value within lval of a structure in target addressable memory units. Note also the member embedded_offset below. */ - LONGEST offset; + LONGEST offset = 0; /* Only used for bitfields; number of bits contained in them. */ - LONGEST bitsize; + LONGEST bitsize = 0; /* Only used for bitfields; position of start of field. For - gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For - gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */ - LONGEST bitpos; + little-endian targets, it is the position of the LSB. For + big-endian targets, it is the position of the MSB. */ + LONGEST bitpos = 0; /* The number of references to this value. When a value is created, the value chain holds a reference, so REFERENCE_COUNT is 1. If release_value is called, this value is removed from the chain but the caller of release_value now has a reference to this value. The caller must arrange for a call to value_free later. */ - int reference_count; + int reference_count = 1; /* Only used for bitfields; the containing value. This allows a single read from the target when displaying multiple bitfields. */ - struct value *parent; + value_ref_ptr parent; /* Type of the value. */ struct type *type; @@ -308,18 +333,12 @@ struct value `type', and `embedded_offset' is zero, so everything works normally. */ struct type *enclosing_type; - LONGEST embedded_offset; - LONGEST pointed_to_offset; - - /* Values are stored in a chain, so that they can be deleted easily - over calls to the inferior. Values assigned to internal - variables, put into the value history or exposed to Python are - taken off this list. */ - struct value *next; + LONGEST embedded_offset = 0; + LONGEST pointed_to_offset = 0; /* Actual contents of the value. Target byte-order. NULL or not valid if lazy is nonzero. */ - gdb_byte *contents; + gdb::unique_xmalloc_ptr contents; /* Unavailable ranges in CONTENTS. We mark unavailable ranges, rather than available, since the common and default case is for a @@ -327,7 +346,7 @@ struct value The unavailable ranges are tracked in bits. Note that a contents bit that has been optimized out doesn't really exist in the program, so it can't be marked unavailable either. */ - VEC(range_s) *unavailable; + std::vector unavailable; /* Likewise, but for optimized out contents (a chunk of the value of a variable that does not actually exist in the program). If LVAL @@ -336,7 +355,7 @@ struct value saved registers and optimized-out program variables values are treated pretty much the same, except not-saved registers have a different string representation and related error strings. */ - VEC(range_s) *optimized_out; + std::vector optimized_out; }; /* See value.h. */ @@ -380,7 +399,7 @@ value_entirely_available (struct value *value) if (value->lazy) value_fetch_lazy (value); - if (VEC_empty (range_s, value->unavailable)) + if (value->unavailable.empty ()) return 1; return 0; } @@ -391,20 +410,20 @@ value_entirely_available (struct value *value) static int value_entirely_covered_by_range_vector (struct value *value, - VEC(range_s) **ranges) + const std::vector &ranges) { /* We can only tell whether the whole value is optimized out / unavailable when we try to read it. */ if (value->lazy) value_fetch_lazy (value); - if (VEC_length (range_s, *ranges) == 1) + if (ranges.size () == 1) { - struct range *t = VEC_index (range_s, *ranges, 0); + const struct range &t = ranges[0]; - if (t->offset == 0 - && t->length == (TARGET_CHAR_BIT - * TYPE_LENGTH (value_enclosing_type (value)))) + if (t.offset == 0 + && t.length == (TARGET_CHAR_BIT + * TYPE_LENGTH (value_enclosing_type (value)))) return 1; } @@ -414,24 +433,23 @@ value_entirely_covered_by_range_vector (struct value *value, int value_entirely_unavailable (struct value *value) { - return value_entirely_covered_by_range_vector (value, &value->unavailable); + return value_entirely_covered_by_range_vector (value, value->unavailable); } int value_entirely_optimized_out (struct value *value) { - return value_entirely_covered_by_range_vector (value, &value->optimized_out); + return value_entirely_covered_by_range_vector (value, value->optimized_out); } /* Insert into the vector pointed to by VECTORP the bit range starting of OFFSET bits, and extending for the next LENGTH bits. */ static void -insert_into_bit_range_vector (VEC(range_s) **vectorp, +insert_into_bit_range_vector (std::vector *vectorp, LONGEST offset, LONGEST length) { - range_s newr; - int i; + range newr; /* Insert the range sorted. If there's overlap or the new range would be contiguous with an existing range, merge. */ @@ -519,76 +537,77 @@ insert_into_bit_range_vector (VEC(range_s) **vectorp, */ - i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan); - if (i > 0) + auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr); + if (i > vectorp->begin ()) { - struct range *bef = VEC_index (range_s, *vectorp, i - 1); + struct range &bef = *(i - 1); - if (ranges_overlap (bef->offset, bef->length, offset, length)) + if (ranges_overlap (bef.offset, bef.length, offset, length)) { /* #1 */ - ULONGEST l = std::min (bef->offset, offset); - ULONGEST h = std::max (bef->offset + bef->length, offset + length); + ULONGEST l = std::min (bef.offset, offset); + ULONGEST h = std::max (bef.offset + bef.length, offset + length); - bef->offset = l; - bef->length = h - l; + bef.offset = l; + bef.length = h - l; i--; } - else if (offset == bef->offset + bef->length) + else if (offset == bef.offset + bef.length) { /* #2 */ - bef->length += length; + bef.length += length; i--; } else { /* #3 */ - VEC_safe_insert (range_s, *vectorp, i, &newr); + i = vectorp->insert (i, newr); } } else { /* #4 */ - VEC_safe_insert (range_s, *vectorp, i, &newr); + i = vectorp->insert (i, newr); } /* Check whether the ranges following the one we've just added or touched can be folded in (#5 above). */ - if (i + 1 < VEC_length (range_s, *vectorp)) + if (i != vectorp->end () && i + 1 < vectorp->end ()) { - struct range *t; - struct range *r; int removed = 0; - int next = i + 1; + auto next = i + 1; /* Get the range we just touched. */ - t = VEC_index (range_s, *vectorp, i); + struct range &t = *i; removed = 0; i = next; - for (; VEC_iterate (range_s, *vectorp, i, r); i++) - if (r->offset <= t->offset + t->length) - { - ULONGEST l, h; + for (; i < vectorp->end (); i++) + { + struct range &r = *i; + if (r.offset <= t.offset + t.length) + { + ULONGEST l, h; - l = std::min (t->offset, r->offset); - h = std::max (t->offset + t->length, r->offset + r->length); + l = std::min (t.offset, r.offset); + h = std::max (t.offset + t.length, r.offset + r.length); - t->offset = l; - t->length = h - l; + t.offset = l; + t.length = h - l; - removed++; - } - else - { - /* If we couldn't merge this one, we won't be able to - merge following ones either, since the ranges are - always sorted by OFFSET. */ - break; - } + removed++; + } + else + { + /* If we couldn't merge this one, we won't be able to + merge following ones either, since the ranges are + always sorted by OFFSET. */ + break; + } + } if (removed != 0) - VEC_block_remove (range_s, *vectorp, next, removed); + vectorp->erase (next, next + removed); } } @@ -614,15 +633,17 @@ mark_value_bytes_unavailable (struct value *value, found, or -1 if none was found. */ static int -find_first_range_overlap (VEC(range_s) *ranges, int pos, +find_first_range_overlap (const std::vector *ranges, int pos, LONGEST offset, LONGEST length) { - range_s *r; int i; - for (i = pos; VEC_iterate (range_s, ranges, i, r); i++) - if (ranges_overlap (r->offset, r->length, offset, length)) - return i; + for (i = pos; i < ranges->size (); i++) + { + const range &r = (*ranges)[i]; + if (ranges_overlap (r.offset, r.length, offset, length)) + return i; + } return -1; } @@ -735,7 +756,7 @@ memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits, struct ranges_and_idx { /* The ranges. */ - VEC(range_s) *ranges; + const std::vector *ranges; /* The range we've last found in RANGES. Given ranges are sorted, we can start the next lookup here. */ @@ -769,12 +790,12 @@ find_first_range_overlap_and_match (struct ranges_and_idx *rp1, return 0; else { - range_s *r1, *r2; + const range *r1, *r2; ULONGEST l1, h1; ULONGEST l2, h2; - r1 = VEC_index (range_s, rp1->ranges, rp1->idx); - r2 = VEC_index (range_s, rp2->ranges, rp2->idx); + r1 = &(*rp1->ranges)[rp1->idx]; + r2 = &(*rp2->ranges)[rp2->idx]; /* Get the unavailable windows intersected by the incoming ranges. The first and last ranges that overlap the argument @@ -810,7 +831,7 @@ find_first_range_overlap_and_match (struct ranges_and_idx *rp1, with LENGTH bits of VAL2's contents starting at OFFSET2 bits. Return true if the available bits match. */ -static int +static bool value_contents_bits_eq (const struct value *val1, int offset1, const struct value *val2, int offset2, int length) @@ -830,10 +851,10 @@ value_contents_bits_eq (const struct value *val1, int offset1, memset (&rp1, 0, sizeof (rp1)); memset (&rp2, 0, sizeof (rp2)); - rp1[0].ranges = val1->unavailable; - rp2[0].ranges = val2->unavailable; - rp1[1].ranges = val1->optimized_out; - rp2[1].ranges = val2->optimized_out; + rp1[0].ranges = &val1->unavailable; + rp2[0].ranges = &val2->unavailable; + rp1[1].ranges = &val1->optimized_out; + rp2[1].ranges = &val2->optimized_out; while (length > 0) { @@ -849,7 +870,7 @@ value_contents_bits_eq (const struct value *val1, int offset1, if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i], offset1, offset2, length, &l_tmp, &h_tmp)) - return 0; + return false; /* We're interested in the lowest/first range found. */ if (i == 0 || l_tmp < l) @@ -860,19 +881,19 @@ value_contents_bits_eq (const struct value *val1, int offset1, } /* Compare the available/valid contents. */ - if (memcmp_with_bit_offsets (val1->contents, offset1, - val2->contents, offset2, l) != 0) - return 0; + if (memcmp_with_bit_offsets (val1->contents.get (), offset1, + val2->contents.get (), offset2, l) != 0) + return false; length -= h; offset1 += h; offset2 += h; } - return 1; + return true; } -int +bool value_contents_eq (const struct value *val1, LONGEST offset1, const struct value *val2, LONGEST offset2, LONGEST length) @@ -882,37 +903,18 @@ value_contents_eq (const struct value *val1, LONGEST offset1, length * TARGET_CHAR_BIT); } -/* Prototypes for local functions. */ -static void show_values (char *, int); - - -/* The value-history records all the values printed - by print commands during this session. Each chunk - records 60 consecutive values. The first chunk on - the chain records the most recent values. - The total number of values is in value_history_count. */ - -#define VALUE_HISTORY_CHUNK 60 - -struct value_history_chunk - { - struct value_history_chunk *next; - struct value *values[VALUE_HISTORY_CHUNK]; - }; +/* The value-history records all the values printed by print commands + during this session. */ -/* Chain of chunks now in use. */ - -static struct value_history_chunk *value_history_chain; - -static int value_history_count; /* Abs number of last entry stored. */ +static std::vector value_history; /* List of all value objects currently allocated (except for those released by calls to release_value) This is so they can be freed after each command. */ -static struct value *all_values; +static std::vector all_values; /* Allocate a lazy value for type TYPE. Its actual content is "lazily" allocated too: the content field of the return value is @@ -931,25 +933,10 @@ allocate_value_lazy (struct type *type) description correctly. */ check_typedef (type); - val = XCNEW (struct value); - val->contents = NULL; - val->next = all_values; - all_values = val; - val->type = type; - val->enclosing_type = type; - VALUE_LVAL (val) = not_lval; - val->location.address = 0; - val->offset = 0; - val->bitpos = 0; - val->bitsize = 0; - val->lazy = 1; - val->embedded_offset = 0; - val->pointed_to_offset = 0; - val->modifiable = 1; - val->initialized = 1; /* Default to initialized. */ + val = new struct value (type); /* Values start out on the all_values chain. */ - val->reference_count = 1; + all_values.emplace_back (val); return val; } @@ -975,7 +962,7 @@ gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE); /* Implement the "set max-value-size" command. */ static void -set_max_value_size (char *args, int from_tty, +set_max_value_size (const char *args, int from_tty, struct cmd_list_element *c) { gdb_assert (max_value_size == -1 || max_value_size >= 0); @@ -1014,9 +1001,9 @@ check_type_length_before_alloc (const struct type *type) if (max_value_size > -1 && length > max_value_size) { - if (TYPE_NAME (type) != NULL) + if (type->name () != NULL) error (_("value of type `%s' requires %u bytes, which is more " - "than max-value-size"), TYPE_NAME (type), length); + "than max-value-size"), type->name (), length); else error (_("value requires %u bytes, which is more than " "max-value-size"), length); @@ -1031,8 +1018,8 @@ allocate_value_contents (struct value *val) if (!val->contents) { check_type_length_before_alloc (val->enclosing_type); - val->contents - = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)); + val->contents.reset + ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type))); } } @@ -1091,12 +1078,6 @@ allocate_optimized_out_value (struct type *type) /* Accessor methods. */ -struct value * -value_next (const struct value *value) -{ - return value->next; -} - struct type * value_type (const struct value *value) { @@ -1144,7 +1125,7 @@ set_value_bitsize (struct value *value, LONGEST bit) struct value * value_parent (const struct value *value) { - return value->parent; + return value->parent.get (); } /* See value.h. */ @@ -1152,12 +1133,7 @@ value_parent (const struct value *value) void set_value_parent (struct value *value, struct value *parent) { - struct value *old = value->parent; - - value->parent = parent; - if (parent != NULL) - value_incref (parent); - value_free (old); + value->parent = value_ref_ptr::new_reference (parent); } gdb_byte * @@ -1167,14 +1143,14 @@ value_contents_raw (struct value *value) int unit_size = gdbarch_addressable_memory_unit_size (arch); allocate_value_contents (value); - return value->contents + value->embedded_offset * unit_size; + return value->contents.get () + value->embedded_offset * unit_size; } gdb_byte * value_contents_all_raw (struct value *value) { allocate_value_contents (value); - return value->contents; + return value->contents.get (); } struct type * @@ -1201,9 +1177,9 @@ value_actual_type (struct value *value, int resolve_simple_types, { /* If result's target type is TYPE_CODE_STRUCT, proceed to fetch its rtti type. */ - if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result)) - && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result))) - == TYPE_CODE_STRUCT + if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result)) + && (check_typedef (TYPE_TARGET_TYPE (result))->code () + == TYPE_CODE_STRUCT) && !value_optimized_out (value)) { struct type *real_type; @@ -1236,7 +1212,7 @@ error_value_optimized_out (void) static void require_not_optimized_out (const struct value *value) { - if (!VEC_empty (range_s, value->optimized_out)) + if (!value->optimized_out.empty ()) { if (value->lval == lval_register) error (_("register has not been saved in frame")); @@ -1248,7 +1224,7 @@ require_not_optimized_out (const struct value *value) static void require_available (const struct value *value) { - if (!VEC_empty (range_s, value->unavailable)) + if (!value->unavailable.empty ()) throw_error (NOT_AVAILABLE_ERROR, _("value is not available")); } @@ -1257,14 +1233,14 @@ value_contents_for_printing (struct value *value) { if (value->lazy) value_fetch_lazy (value); - return value->contents; + return value->contents.get (); } const gdb_byte * value_contents_for_printing_const (const struct value *value) { gdb_assert (!value->lazy); - return value->contents; + return value->contents.get (); } const gdb_byte * @@ -1280,19 +1256,16 @@ value_contents_all (struct value *value) SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */ static void -ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset, - VEC (range_s) *src_range, int src_bit_offset, +ranges_copy_adjusted (std::vector *dst_range, int dst_bit_offset, + const std::vector &src_range, int src_bit_offset, int bit_length) { - range_s *r; - int i; - - for (i = 0; VEC_iterate (range_s, src_range, i, r); i++) + for (const range &r : src_range) { ULONGEST h, l; - l = std::max (r->offset, (LONGEST) src_bit_offset); - h = std::min (r->offset + r->length, + l = std::max (r.offset, (LONGEST) src_bit_offset); + h = std::min (r.offset + r.length, (LONGEST) src_bit_offset + bit_length); if (l < h) @@ -1431,20 +1404,19 @@ value_optimized_out (struct value *value) { /* We can only know if a value is optimized out once we have tried to fetch it. */ - if (VEC_empty (range_s, value->optimized_out) && value->lazy) + if (value->optimized_out.empty () && value->lazy) { - TRY + try { value_fetch_lazy (value); } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { /* Fall back to checking value->optimized_out. */ } - END_CATCH } - return !VEC_empty (range_s, value->optimized_out); + return !value->optimized_out.empty (); } /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and @@ -1537,7 +1509,7 @@ value_address (const struct value *value) if (value->lval != lval_memory) return 0; if (value->parent != NULL) - return value_address (value->parent) + value->offset; + return value_address (value->parent.get ()) + value->offset; if (NULL != TYPE_DATA_LOCATION (value_type (value))) { gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value))); @@ -1594,11 +1566,12 @@ deprecated_value_modifiable (const struct value *value) struct value * value_mark (void) { - return all_values; + if (all_values.empty ()) + return nullptr; + return all_values.back ().get (); } -/* Take a reference to VAL. VAL will not be deallocated until all - references are released. */ +/* See value.h. */ void value_incref (struct value *val) @@ -1611,34 +1584,15 @@ value_incref (struct value *val) chain. */ void -value_free (struct value *val) +value_decref (struct value *val) { - if (val) + if (val != nullptr) { gdb_assert (val->reference_count > 0); val->reference_count--; - if (val->reference_count > 0) - return; - - /* If there's an associated parent value, drop our reference to - it. */ - if (val->parent != NULL) - value_free (val->parent); - - if (VALUE_LVAL (val) == lval_computed) - { - const struct lval_funcs *funcs = val->location.computed.funcs; - - if (funcs->free_closure) - funcs->free_closure (val); - } - else if (VALUE_LVAL (val) == lval_xcallable) - free_xmethod_worker (val->location.xm_worker); - - xfree (val->contents); - VEC_free (range_s, val->unavailable); + if (val->reference_count == 0) + delete val; } - xfree (val); } /* Free all values allocated since MARK was obtained by value_mark @@ -1646,113 +1600,56 @@ value_free (struct value *val) void value_free_to_mark (const struct value *mark) { - struct value *val; - struct value *next; - - for (val = all_values; val && val != mark; val = next) - { - next = val->next; - val->released = 1; - value_free (val); - } - all_values = val; -} - -/* Free all the values that have been allocated (except for those released). - Call after each command, successful or not. - In practice this is called before each command, which is sufficient. */ - -void -free_all_values (void) -{ - struct value *val; - struct value *next; - - for (val = all_values; val; val = next) - { - next = val->next; - val->released = 1; - value_free (val); - } - - all_values = 0; -} - -/* Frees all the elements in a chain of values. */ - -void -free_value_chain (struct value *v) -{ - struct value *next; - - for (; v; v = next) - { - next = value_next (v); - value_free (v); - } + auto iter = std::find (all_values.begin (), all_values.end (), mark); + if (iter == all_values.end ()) + all_values.clear (); + else + all_values.erase (iter + 1, all_values.end ()); } /* Remove VAL from the chain all_values so it will not be freed automatically. */ -void +value_ref_ptr release_value (struct value *val) { - struct value *v; - - if (all_values == val) - { - all_values = val->next; - val->next = NULL; - val->released = 1; - return; - } + if (val == nullptr) + return value_ref_ptr (); - for (v = all_values; v; v = v->next) + std::vector::reverse_iterator iter; + for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter) { - if (v->next == val) + if (*iter == val) { - v->next = val->next; - val->next = NULL; - val->released = 1; - break; + value_ref_ptr result = *iter; + all_values.erase (iter.base () - 1); + return result; } } -} - -/* If the value is not already released, release it. - If the value is already released, increment its reference count. - That is, this function ensures that the value is released from the - value chain and that the caller owns a reference to it. */ -void -release_value_or_incref (struct value *val) -{ - if (val->released) - value_incref (val); - else - release_value (val); + /* We must always return an owned reference. Normally this happens + because we transfer the reference from the value chain, but in + this case the value was not on the chain. */ + return value_ref_ptr::new_reference (val); } -/* Release all values up to mark */ -struct value * +/* See value.h. */ + +std::vector value_release_to_mark (const struct value *mark) { - struct value *val; - struct value *next; + std::vector result; - for (val = next = all_values; next; next = next->next) + auto iter = std::find (all_values.begin (), all_values.end (), mark); + if (iter == all_values.end ()) + std::swap (result, all_values); + else { - if (next->next == mark) - { - all_values = next->next; - next->next = NULL; - return val; - } - next->released = 1; + std::move (iter + 1, all_values.end (), std::back_inserter (result)); + all_values.erase (iter + 1, all_values.end ()); } - all_values = 0; - return val; + std::reverse (result.begin (), result.end ()); + return result; } /* Return a copy of the value ARG. @@ -1785,9 +1682,9 @@ value_copy (struct value *arg) TYPE_LENGTH (value_enclosing_type (arg))); } - val->unavailable = VEC_copy (range_s, arg->unavailable); - val->optimized_out = VEC_copy (range_s, arg->optimized_out); - set_value_parent (val, arg->parent); + val->unavailable = arg->unavailable; + val->optimized_out = arg->optimized_out; + val->parent = arg->parent; if (VALUE_LVAL (val) == lval_computed) { const struct lval_funcs *funcs = val->location.computed.funcs; @@ -1889,8 +1786,6 @@ set_value_component_location (struct value *component, int record_latest_value (struct value *val) { - int i; - /* We don't want this value to have anything to do with the inferior anymore. In particular, "set $1 = 50" should not affect the variable from which the value was taken, and fast watchpoints should be able to assume that @@ -1902,29 +1797,9 @@ record_latest_value (struct value *val) but the current contents of that location. c'est la vie... */ val->modifiable = 0; - /* The value may have already been released, in which case we're adding a - new reference for its entry in the history. That is why we call - release_value_or_incref here instead of release_value. */ - release_value_or_incref (val); - - /* Here we treat value_history_count as origin-zero - and applying to the value being stored now. */ + value_history.push_back (release_value (val)); - i = value_history_count % VALUE_HISTORY_CHUNK; - if (i == 0) - { - struct value_history_chunk *newobj = XCNEW (struct value_history_chunk); - - newobj->next = value_history_chain; - value_history_chain = newobj; - } - - value_history_chain->values[i] = val; - - /* Now we regard value_history_count as origin-one - and applying to the value just stored. */ - - return ++value_history_count; + return value_history.size (); } /* Return a copy of the value in the history with sequence number NUM. */ @@ -1932,12 +1807,10 @@ record_latest_value (struct value *val) struct value * access_value_history (int num) { - struct value_history_chunk *chunk; - int i; int absnum = num; if (absnum <= 0) - absnum += value_history_count; + absnum += value_history.size (); if (absnum <= 0) { @@ -1948,24 +1821,16 @@ access_value_history (int num) else error (_("History does not go back to $$%d."), -num); } - if (absnum > value_history_count) + if (absnum > value_history.size ()) error (_("History has not yet reached $%d."), absnum); absnum--; - /* Now absnum is always absolute and origin zero. */ - - chunk = value_history_chain; - for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK - - absnum / VALUE_HISTORY_CHUNK; - i > 0; i--) - chunk = chunk->next; - - return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]); + return value_copy (value_history[absnum].get ()); } static void -show_values (char *num_exp, int from_tty) +show_values (const char *num_exp, int from_tty) { int i; struct value *val; @@ -1981,13 +1846,13 @@ show_values (char *num_exp, int from_tty) else { /* "show values" means print the last 10 values. */ - num = value_history_count - 9; + num = value_history.size () - 9; } if (num <= 0) num = 1; - for (i = num; i < num + 10 && i <= value_history_count; i++) + for (i = num; i < num + 10 && i <= value_history.size (); i++) { struct value_print_options opts; @@ -2005,10 +1870,7 @@ show_values (char *num_exp, int from_tty) "show values +". If num_exp is null, this is unnecessary, since "show values +" is not useful after "show values". */ if (from_tty && num_exp) - { - num_exp[0] = '+'; - num_exp[1] = '\0'; - } + set_repeat_arguments ("+"); } enum internalvar_kind @@ -2095,7 +1957,7 @@ static struct internalvar *internalvars; /* If the variable does not already exist create it and give it the value given. If no value is given then the default is zero. */ static void -init_if_undefined_command (char* args, int from_tty) +init_if_undefined_command (const char* args, int from_tty) { struct internalvar* intvar; @@ -2116,7 +1978,7 @@ init_if_undefined_command (char* args, int from_tty) intvar = expr->elts[2].internalvar; /* Only evaluate the expression if the lvalue is void. - This may still fail if the expresssion is invalid. */ + This may still fail if the expression is invalid. */ if (intvar->kind == INTERNALVAR_VOID) evaluate_expression (expr.get ()); } @@ -2153,11 +2015,7 @@ complete_internalvar (completion_tracker &tracker, const char *name) for (var = internalvars; var; var = var->next) if (strncmp (var->name, name, len) == 0) - { - gdb::unique_xmalloc_ptr copy (xstrdup (var->name)); - - tracker.add_completion (std::move (copy)); - } + tracker.add_completion (make_unique_xstrdup (var->name)); } /* Create an internal variable with name NAME and with a void value. @@ -2168,7 +2026,7 @@ create_internalvar (const char *name) { struct internalvar *var = XNEW (struct internalvar); - var->name = concat (name, (char *)NULL); + var->name = xstrdup (name); var->kind = INTERNALVAR_VOID; var->next = internalvars; internalvars = var; @@ -2295,7 +2153,7 @@ value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var) on this value go back to affect the original internal variable. Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have - no underlying modifyable state in the internal variable. + no underlying modifiable state in the internal variable. Likewise, if the variable's value is a computed lvalue, we want references to it to produce another computed lvalue, where @@ -2331,7 +2189,7 @@ get_internalvar_integer (struct internalvar *var, LONGEST *result) { struct type *type = check_typedef (value_type (var->u.value)); - if (TYPE_CODE (type) == TYPE_CODE_INT) + if (type->code () == TYPE_CODE_INT) { *result = value_as_long (var->u.value); return 1; @@ -2396,7 +2254,7 @@ set_internalvar (struct internalvar *var, struct value *val) error (_("Cannot overwrite convenience function %s"), var->name); /* Prepare new contents. */ - switch (TYPE_CODE (check_typedef (value_type (val)))) + switch (check_typedef (value_type (val))->code ()) { case TYPE_CODE_VOID: new_kind = INTERNALVAR_VOID; @@ -2412,20 +2270,20 @@ set_internalvar (struct internalvar *var, struct value *val) default: new_kind = INTERNALVAR_VALUE; - new_data.value = value_copy (val); - new_data.value->modifiable = 1; + struct value *copy = value_copy (val); + copy->modifiable = 1; /* Force the value to be fetched from the target now, to avoid problems later when this internalvar is referenced and the target is gone or has changed. */ - if (value_lazy (new_data.value)) - value_fetch_lazy (new_data.value); + if (value_lazy (copy)) + value_fetch_lazy (copy); /* Release the value from the value chain to prevent it from being deleted by free_all_values. From here on this function should not call error () until new_data is installed into the var->u to avoid leaking memory. */ - release_value (new_data.value); + new_data.value = release_value (copy).release (); /* Internal variables which are created from values with a dynamic location don't need the location property of the origin anymore. @@ -2433,7 +2291,7 @@ set_internalvar (struct internalvar *var, struct value *val) when accessing the value. If we keep it, we would still refer to the origin value. Remove the location property in case it exist. */ - remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value)); + value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION); break; } @@ -2487,7 +2345,7 @@ clear_internalvar (struct internalvar *var) switch (var->kind) { case INTERNALVAR_VALUE: - value_free (var->u.value); + value_decref (var->u.value); break; case INTERNALVAR_STRING: @@ -2558,38 +2416,48 @@ call_internal_function (struct gdbarch *gdbarch, the implementation of the sub-command that is created when registering an internal function. */ static void -function_command (char *command, int from_tty) +function_command (const char *command, int from_tty) { /* Do nothing. */ } -/* Clean up if an internal function's command is destroyed. */ -static void -function_destroyer (struct cmd_list_element *self, void *ignore) +/* Helper function that does the work for add_internal_function. */ + +static struct cmd_list_element * +do_add_internal_function (const char *name, const char *doc, + internal_function_fn handler, void *cookie) { - xfree ((char *) self->name); - xfree ((char *) self->doc); + struct internal_function *ifn; + struct internalvar *var = lookup_internalvar (name); + + ifn = create_internal_function (name, handler, cookie); + set_internalvar_function (var, ifn); + + return add_cmd (name, no_class, function_command, doc, &functionlist); } -/* Add a new internal function. NAME is the name of the function; DOC - is a documentation string describing the function. HANDLER is - called when the function is invoked. COOKIE is an arbitrary - pointer which is passed to HANDLER and is intended for "user - data". */ +/* See value.h. */ + void add_internal_function (const char *name, const char *doc, internal_function_fn handler, void *cookie) { - struct cmd_list_element *cmd; - struct internal_function *ifn; - struct internalvar *var = lookup_internalvar (name); + do_add_internal_function (name, doc, handler, cookie); +} - ifn = create_internal_function (name, handler, cookie); - set_internalvar_function (var, ifn); +/* See value.h. */ - cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc, - &functionlist); - cmd->destroyer = function_destroyer; +void +add_internal_function (gdb::unique_xmalloc_ptr &&name, + gdb::unique_xmalloc_ptr &&doc, + internal_function_fn handler, void *cookie) +{ + struct cmd_list_element *cmd + = do_add_internal_function (name.get (), doc.get (), handler, cookie); + doc.release (); + cmd->doc_allocated = 1; + name.release (); + cmd->name_allocated = 1; } /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to @@ -2638,18 +2506,14 @@ void preserve_values (struct objfile *objfile) { htab_t copied_types; - struct value_history_chunk *cur; struct internalvar *var; - int i; /* Create the hash table. We allocate on the objfile's obstack, since it is soon to be deleted. */ copied_types = create_copied_types_hash (objfile); - for (cur = value_history_chain; cur; cur = cur->next) - for (i = 0; i < VALUE_HISTORY_CHUNK; i++) - if (cur->values[i]) - preserve_one_value (cur->values[i], objfile, copied_types); + for (const value_ref_ptr &item : value_history) + preserve_one_value (item.get (), objfile, copied_types); for (var = internalvars; var; var = var->next) preserve_one_internalvar (var, objfile, copied_types); @@ -2677,18 +2541,18 @@ show_convenience (const char *ignore, int from_tty) } printf_filtered (("$%s = "), var->name); - TRY + try { struct value *val; val = value_of_internalvar (gdbarch, var); value_print (val, gdb_stdout, &opts); } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { - fprintf_filtered (gdb_stdout, _(""), ex.message); + fprintf_styled (gdb_stdout, metadata_style.style (), + _(""), ex.what ()); } - END_CATCH printf_filtered (("\n")); } @@ -2706,47 +2570,42 @@ show_convenience (const char *ignore, int from_tty) } } -/* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */ + +/* See value.h. */ struct value * -value_of_xmethod (struct xmethod_worker *worker) +value_from_xmethod (xmethod_worker_up &&worker) { - if (worker->value == NULL) - { - struct value *v; + struct value *v; - v = allocate_value (builtin_type (target_gdbarch ())->xmethod); - v->lval = lval_xcallable; - v->location.xm_worker = worker; - v->modifiable = 0; - worker->value = v; - } + v = allocate_value (builtin_type (target_gdbarch ())->xmethod); + v->lval = lval_xcallable; + v->location.xm_worker = worker.release (); + v->modifiable = 0; - return worker->value; + return v; } /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */ struct type * -result_type_of_xmethod (struct value *method, int argc, struct value **argv) +result_type_of_xmethod (struct value *method, gdb::array_view argv) { - gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD - && method->lval == lval_xcallable && argc > 0); + gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD + && method->lval == lval_xcallable && !argv.empty ()); - return get_xmethod_result_type (method->location.xm_worker, - argv[0], argv + 1, argc - 1); + return method->location.xm_worker->get_result_type (argv[0], argv.slice (1)); } /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */ struct value * -call_xmethod (struct value *method, int argc, struct value **argv) +call_xmethod (struct value *method, gdb::array_view argv) { - gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD - && method->lval == lval_xcallable && argc > 0); + gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD + && method->lval == lval_xcallable && !argv.empty ()); - return invoke_xmethod (method->location.xm_worker, - argv[0], argv + 1, argc - 1); + return method->location.xm_worker->invoke (argv[0], argv.slice (1)); } /* Extract a value as a C number (either long or double). @@ -2764,18 +2623,6 @@ value_as_long (struct value *val) return unpack_long (value_type (val), value_contents (val)); } -DOUBLEST -value_as_double (struct value *val) -{ - DOUBLEST foo; - int inv; - - foo = unpack_double (value_type (val), value_contents (val), &inv); - if (inv) - error (_("Invalid floating value found in program.")); - return foo; -} - /* Extract a value as a C pointer. Does not deallocate the value. Note that val's type may not actually be a pointer; value_as_long handles all the cases. */ @@ -2830,8 +2677,8 @@ value_as_address (struct value *val) The following shortcut avoids this whole mess. If VAL is a function, just return its address directly. */ - if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC - || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD) + if (value_type (val)->code () == TYPE_CODE_FUNC + || value_type (val)->code () == TYPE_CODE_METHOD) return value_address (val); val = coerce_array (val); @@ -2873,7 +2720,7 @@ value_as_address (struct value *val) converted to pointers; usually, the ABI doesn't either, but ABI-specific code is a more reasonable place to handle it. */ - if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR + if (value_type (val)->code () != TYPE_CODE_PTR && !TYPE_IS_REFERENCE (value_type (val)) && gdbarch_integer_to_address_p (gdbarch)) return gdbarch_integer_to_address (gdbarch, value_type (val), @@ -2900,8 +2747,8 @@ value_as_address (struct value *val) LONGEST unpack_long (struct type *type, const gdb_byte *valaddr) { - enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); - enum type_code code = TYPE_CODE (type); + enum bfd_endian byte_order = type_byte_order (type); + enum type_code code = type->code (); int len = TYPE_LENGTH (type); int nosign = TYPE_UNSIGNED (type); @@ -2916,16 +2763,20 @@ unpack_long (struct type *type, const gdb_byte *valaddr) case TYPE_CODE_CHAR: case TYPE_CODE_RANGE: case TYPE_CODE_MEMBERPTR: - if (nosign) - return extract_unsigned_integer (valaddr, len, byte_order); - else - return extract_signed_integer (valaddr, len, byte_order); + { + LONGEST result; + if (nosign) + result = extract_unsigned_integer (valaddr, len, byte_order); + else + result = extract_signed_integer (valaddr, len, byte_order); + if (code == TYPE_CODE_RANGE) + result += TYPE_RANGE_DATA (type)->bias; + return result; + } case TYPE_CODE_FLT: - return (LONGEST) extract_typed_floating (valaddr, type); - case TYPE_CODE_DECFLOAT: - return decimal_to_longest (valaddr, len, byte_order); + return target_float_to_longest (valaddr, type); case TYPE_CODE_PTR: case TYPE_CODE_REF: @@ -2937,67 +2788,6 @@ unpack_long (struct type *type, const gdb_byte *valaddr) default: error (_("Value can't be converted to integer.")); } - return 0; /* Placate lint. */ -} - -/* Return a double value from the specified type and address. - INVP points to an int which is set to 0 for valid value, - 1 for invalid value (bad float format). In either case, - the returned double is OK to use. Argument is in target - format, result is in host format. */ - -DOUBLEST -unpack_double (struct type *type, const gdb_byte *valaddr, int *invp) -{ - enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); - enum type_code code; - int len; - int nosign; - - *invp = 0; /* Assume valid. */ - type = check_typedef (type); - code = TYPE_CODE (type); - len = TYPE_LENGTH (type); - nosign = TYPE_UNSIGNED (type); - if (code == TYPE_CODE_FLT) - { - /* NOTE: cagney/2002-02-19: There was a test here to see if the - floating-point value was valid (using the macro - INVALID_FLOAT). That test/macro have been removed. - - It turns out that only the VAX defined this macro and then - only in a non-portable way. Fixing the portability problem - wouldn't help since the VAX floating-point code is also badly - bit-rotten. The target needs to add definitions for the - methods gdbarch_float_format and gdbarch_double_format - these - exactly describe the target floating-point format. The - problem here is that the corresponding floatformat_vax_f and - floatformat_vax_d values these methods should be set to are - also not defined either. Oops! - - Hopefully someone will add both the missing floatformat - definitions and the new cases for floatformat_is_valid (). */ - - if (!floatformat_is_valid (floatformat_from_type (type), valaddr)) - { - *invp = 1; - return 0.0; - } - - return extract_typed_floating (valaddr, type); - } - else if (code == TYPE_CODE_DECFLOAT) - return decimal_to_doublest (valaddr, len, byte_order); - else if (nosign) - { - /* Unsigned -- be sure we compensate for signed LONGEST. */ - return (ULONGEST) unpack_long (type, valaddr); - } - else - { - /* Signed -- we are OK with unpack_long. */ - return unpack_long (type, valaddr); - } } /* Unpack raw data (copied from debugee, target byte order) at VALADDR @@ -3021,6 +2811,21 @@ unpack_pointer (struct type *type, const gdb_byte *valaddr) return unpack_long (type, valaddr); } +bool +is_floating_value (struct value *val) +{ + struct type *type = check_typedef (value_type (val)); + + if (is_floating_type (type)) + { + if (!target_float_is_valid (value_contents (val), type)) + error (_("Invalid floating value found in program.")); + return true; + } + + return false; +} + /* Get the value of the FIELDNO'th field (which must be static) of TYPE. */ @@ -3048,14 +2853,12 @@ value_static_field (struct type *type, int fieldno) reported as non-debuggable symbols. */ struct bound_minimal_symbol msym = lookup_minimal_symbol (phys_name, NULL, NULL); + struct type *field_type = TYPE_FIELD_TYPE (type, fieldno); if (!msym.minsym) - return allocate_optimized_out_value (type); + retval = allocate_optimized_out_value (field_type); else - { - retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno), - BMSYMBOL_VALUE_ADDRESS (msym)); - } + retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym)); } else retval = value_of_variable (sym.symbol, sym.block); @@ -3081,7 +2884,8 @@ set_value_enclosing_type (struct value *val, struct type *new_encl_type) { check_type_length_before_alloc (new_encl_type); val->contents - = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type)); + .reset ((gdb_byte *) xrealloc (val->contents.release (), + TYPE_LENGTH (new_encl_type))); } val->enclosing_type = new_encl_type; @@ -3257,18 +3061,18 @@ value_fn_field (struct value **arg1p, struct fn_field *f, VALUE_LVAL (v) = lval_memory; if (sym) { - set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym))); + set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym))); } else { /* The minimal symbol might point to a function descriptor; resolve it to the actual code address instead. */ struct objfile *objfile = msym.objfile; - struct gdbarch *gdbarch = get_objfile_arch (objfile); + struct gdbarch *gdbarch = objfile->arch (); set_value_address (v, gdbarch_convert_from_func_ptr_addr - (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), ¤t_target)); + (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ())); } if (arg1p) @@ -3286,26 +3090,13 @@ value_fn_field (struct value **arg1p, struct fn_field *f, -/* Unpack a bitfield of the specified FIELD_TYPE, from the object at - VALADDR, and store the result in *RESULT. - The bitfield starts at BITPOS bits and contains BITSIZE bits. - - Extracting bits depends on endianness of the machine. Compute the - number of least significant bits to discard. For big endian machines, - we compute the total number of bits in the anonymous object, subtract - off the bit count from the MSB of the object to the MSB of the - bitfield, then the size of the bitfield, which leaves the LSB discard - count. For little endian machines, the discard count is simply the - number of bits from the LSB of the anonymous object to the LSB of the - bitfield. - - If the field is signed, we also do sign extension. */ +/* See value.h. */ -static LONGEST +LONGEST unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, LONGEST bitpos, LONGEST bitsize) { - enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type)); + enum bfd_endian byte_order = type_byte_order (field_type); ULONGEST val; ULONGEST valmask; int lsbcount; @@ -3318,7 +3109,10 @@ unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, if (bitsize) bytes_read = ((bitpos % 8) + bitsize + 7) / 8; else - bytes_read = TYPE_LENGTH (field_type); + { + bytes_read = TYPE_LENGTH (field_type); + bitsize = 8 * bytes_read; + } read_offset = bitpos / 8; @@ -3327,7 +3121,7 @@ unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, /* Extract bits. See comment above. */ - if (gdbarch_bits_big_endian (get_type_arch (field_type))) + if (byte_order == BFD_ENDIAN_BIG) lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize); else lsbcount = (bitpos % 8); @@ -3336,7 +3130,7 @@ unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, /* If the field does not entirely fill a LONGEST, then zero the sign bits. If the field is signed, and is negative, then sign extend. */ - if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val))) + if (bitsize < 8 * (int) sizeof (val)) { valmask = (((ULONGEST) 1) << bitsize) - 1; val &= valmask; @@ -3411,7 +3205,7 @@ unpack_value_bitfield (struct value *dest_val, int dst_bit_offset; struct type *field_type = value_type (dest_val); - byte_order = gdbarch_byte_order (get_type_arch (field_type)); + byte_order = type_byte_order (field_type); /* First, unpack and sign extend the bitfield as if it was wholly valid. Optimized out/unavailable bits are read as zero, but @@ -3471,7 +3265,7 @@ void modify_field (struct type *type, gdb_byte *addr, LONGEST fieldval, LONGEST bitpos, LONGEST bitsize) { - enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); + enum bfd_endian byte_order = type_byte_order (type); ULONGEST oword; ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize); LONGEST bytesize; @@ -3503,7 +3297,7 @@ modify_field (struct type *type, gdb_byte *addr, oword = extract_unsigned_integer (addr, bytesize, byte_order); /* Shifting for bit field depends on endianness of the target machine. */ - if (gdbarch_bits_big_endian (get_type_arch (type))) + if (byte_order == BFD_ENDIAN_BIG) bitpos = bytesize * 8 - bitpos - bitsize; oword &= ~(mask << bitpos); @@ -3517,20 +3311,22 @@ modify_field (struct type *type, gdb_byte *addr, void pack_long (gdb_byte *buf, struct type *type, LONGEST num) { - enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); + enum bfd_endian byte_order = type_byte_order (type); LONGEST len; type = check_typedef (type); len = TYPE_LENGTH (type); - switch (TYPE_CODE (type)) + switch (type->code ()) { + case TYPE_CODE_RANGE: + num -= TYPE_RANGE_DATA (type)->bias; + /* Fall through. */ case TYPE_CODE_INT: case TYPE_CODE_CHAR: case TYPE_CODE_ENUM: case TYPE_CODE_FLAGS: case TYPE_CODE_BOOL: - case TYPE_CODE_RANGE: case TYPE_CODE_MEMBERPTR: store_signed_integer (buf, len, byte_order, num); break; @@ -3541,9 +3337,14 @@ pack_long (gdb_byte *buf, struct type *type, LONGEST num) store_typed_address (buf, type, (CORE_ADDR) num); break; + case TYPE_CODE_FLT: + case TYPE_CODE_DECFLOAT: + target_float_from_longest (buf, type, num); + break; + default: error (_("Unexpected type (%d) encountered for integer constant."), - TYPE_CODE (type)); + type->code ()); } } @@ -3558,9 +3359,9 @@ pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) type = check_typedef (type); len = TYPE_LENGTH (type); - byte_order = gdbarch_byte_order (get_type_arch (type)); + byte_order = type_byte_order (type); - switch (TYPE_CODE (type)) + switch (type->code ()) { case TYPE_CODE_INT: case TYPE_CODE_CHAR: @@ -3578,10 +3379,15 @@ pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) store_typed_address (buf, type, (CORE_ADDR) num); break; + case TYPE_CODE_FLT: + case TYPE_CODE_DECFLOAT: + target_float_from_ulongest (buf, type, num); + break; + default: error (_("Unexpected type (%d) encountered " "for unsigned integer constant."), - TYPE_CODE (type)); + type->code ()); } } @@ -3624,6 +3430,19 @@ value_from_pointer (struct type *type, CORE_ADDR addr) return val; } +/* Create and return a value object of TYPE containing the value D. The + TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once + it is converted to target format. */ + +struct value * +value_from_host_double (struct type *type, double d) +{ + struct value *value = allocate_value (type); + gdb_assert (type->code () == TYPE_CODE_FLT); + target_float_from_host_double (value_contents_raw (value), + value_type (value), d); + return value; +} /* Create a value of type TYPE whose contents come from VALADDR, if it is non-null, and whose memory address (in the inferior) is @@ -3658,7 +3477,10 @@ value_from_contents_and_address (struct type *type, const gdb_byte *valaddr, CORE_ADDR address) { - struct type *resolved_type = resolve_dynamic_type (type, valaddr, address); + gdb::array_view view; + if (valaddr != nullptr) + view = gdb::make_array_view (valaddr, TYPE_LENGTH (type)); + struct type *resolved_type = resolve_dynamic_type (type, view, address); struct type *resolved_type_no_typedef = check_typedef (resolved_type); struct value *v; @@ -3687,32 +3509,6 @@ value_from_contents (struct type *type, const gdb_byte *contents) return result; } -struct value * -value_from_double (struct type *type, DOUBLEST num) -{ - struct value *val = allocate_value (type); - struct type *base_type = check_typedef (type); - enum type_code code = TYPE_CODE (base_type); - - if (code == TYPE_CODE_FLT) - { - store_typed_floating (value_contents_raw (val), base_type, num); - } - else - error (_("Unexpected type encountered for floating constant.")); - - return val; -} - -struct value * -value_from_decfloat (struct type *type, const gdb_byte *dec) -{ - struct value *val = allocate_value (type); - - memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type)); - return val; -} - /* Extract a value from the history file. Input will be of the form $digits or $$digits. See block comment above 'write_dollar_variable' for details. */ @@ -3868,7 +3664,7 @@ coerce_array (struct value *arg) arg = coerce_ref (arg); type = check_typedef (value_type (arg)); - switch (TYPE_CODE (type)) + switch (type->code ()) { case TYPE_CODE_ARRAY: if (!TYPE_VECTOR (type) && current_language->c_style_arrays) @@ -3889,7 +3685,7 @@ enum return_value_convention struct_return_convention (struct gdbarch *gdbarch, struct value *function, struct type *value_type) { - enum type_code code = TYPE_CODE (value_type); + enum type_code code = value_type->code (); if (code == TYPE_CODE_ERROR) error (_("Function return type unknown.")); @@ -3907,7 +3703,7 @@ int using_struct_return (struct gdbarch *gdbarch, struct value *function, struct type *value_type) { - if (TYPE_CODE (value_type) == TYPE_CODE_VOID) + if (value_type->code () == TYPE_CODE_VOID) /* A void return value is never in memory. See also corresponding code in "print_return_value". */ return 0; @@ -3932,6 +3728,164 @@ value_initialized (const struct value *val) return val->initialized; } +/* Helper for value_fetch_lazy when the value is a bitfield. */ + +static void +value_fetch_lazy_bitfield (struct value *val) +{ + gdb_assert (value_bitsize (val) != 0); + + /* To read a lazy bitfield, read the entire enclosing value. This + prevents reading the same block of (possibly volatile) memory once + per bitfield. It would be even better to read only the containing + word, but we have no way to record that just specific bits of a + value have been fetched. */ + struct value *parent = value_parent (val); + + if (value_lazy (parent)) + value_fetch_lazy (parent); + + unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val), + value_contents_for_printing (parent), + value_offset (val), parent); +} + +/* Helper for value_fetch_lazy when the value is in memory. */ + +static void +value_fetch_lazy_memory (struct value *val) +{ + gdb_assert (VALUE_LVAL (val) == lval_memory); + + CORE_ADDR addr = value_address (val); + struct type *type = check_typedef (value_enclosing_type (val)); + + if (TYPE_LENGTH (type)) + read_value_memory (val, 0, value_stack (val), + addr, value_contents_all_raw (val), + type_length_units (type)); +} + +/* Helper for value_fetch_lazy when the value is in a register. */ + +static void +value_fetch_lazy_register (struct value *val) +{ + struct frame_info *next_frame; + int regnum; + struct type *type = check_typedef (value_type (val)); + struct value *new_val = val, *mark = value_mark (); + + /* Offsets are not supported here; lazy register values must + refer to the entire register. */ + gdb_assert (value_offset (val) == 0); + + while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val)) + { + struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val); + + next_frame = frame_find_by_id (next_frame_id); + regnum = VALUE_REGNUM (new_val); + + gdb_assert (next_frame != NULL); + + /* Convertible register routines are used for multi-register + values and for interpretation in different types + (e.g. float or int from a double register). Lazy + register values should have the register's natural type, + so they do not apply. */ + gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame), + regnum, type)); + + /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID. + Since a "->next" operation was performed when setting + this field, we do not need to perform a "next" operation + again when unwinding the register. That's why + frame_unwind_register_value() is called here instead of + get_frame_register_value(). */ + new_val = frame_unwind_register_value (next_frame, regnum); + + /* If we get another lazy lval_register value, it means the + register is found by reading it from NEXT_FRAME's next frame. + frame_unwind_register_value should never return a value with + the frame id pointing to NEXT_FRAME. If it does, it means we + either have two consecutive frames with the same frame id + in the frame chain, or some code is trying to unwind + behind get_prev_frame's back (e.g., a frame unwind + sniffer trying to unwind), bypassing its validations. In + any case, it should always be an internal error to end up + in this situation. */ + if (VALUE_LVAL (new_val) == lval_register + && value_lazy (new_val) + && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id)) + internal_error (__FILE__, __LINE__, + _("infinite loop while fetching a register")); + } + + /* If it's still lazy (for instance, a saved register on the + stack), fetch it. */ + if (value_lazy (new_val)) + value_fetch_lazy (new_val); + + /* Copy the contents and the unavailability/optimized-out + meta-data from NEW_VAL to VAL. */ + set_value_lazy (val, 0); + value_contents_copy (val, value_embedded_offset (val), + new_val, value_embedded_offset (new_val), + type_length_units (type)); + + if (frame_debug) + { + struct gdbarch *gdbarch; + struct frame_info *frame; + /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID, + so that the frame level will be shown correctly. */ + frame = frame_find_by_id (VALUE_FRAME_ID (val)); + regnum = VALUE_REGNUM (val); + gdbarch = get_frame_arch (frame); + + fprintf_unfiltered (gdb_stdlog, + "{ value_fetch_lazy " + "(frame=%d,regnum=%d(%s),...) ", + frame_relative_level (frame), regnum, + user_reg_map_regnum_to_name (gdbarch, regnum)); + + fprintf_unfiltered (gdb_stdlog, "->"); + if (value_optimized_out (new_val)) + { + fprintf_unfiltered (gdb_stdlog, " "); + val_print_optimized_out (new_val, gdb_stdlog); + } + else + { + int i; + const gdb_byte *buf = value_contents (new_val); + + if (VALUE_LVAL (new_val) == lval_register) + fprintf_unfiltered (gdb_stdlog, " register=%d", + VALUE_REGNUM (new_val)); + else if (VALUE_LVAL (new_val) == lval_memory) + fprintf_unfiltered (gdb_stdlog, " address=%s", + paddress (gdbarch, + value_address (new_val))); + else + fprintf_unfiltered (gdb_stdlog, " computed"); + + fprintf_unfiltered (gdb_stdlog, " bytes="); + fprintf_unfiltered (gdb_stdlog, "["); + for (i = 0; i < register_size (gdbarch, regnum); i++) + fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); + fprintf_unfiltered (gdb_stdlog, "]"); + } + + fprintf_unfiltered (gdb_stdlog, " }\n"); + } + + /* Dispose of the intermediate values. This prevents + watchpoints from trying to watch the saved frame pointer. */ + value_free_to_mark (mark); +} + /* Load the actual content of a lazy value. Fetch the data from the user's process and clear the lazy flag to indicate that the data in the buffer is valid. @@ -3948,152 +3902,14 @@ value_fetch_lazy (struct value *val) /* A value is either lazy, or fully fetched. The availability/validity is only established as we try to fetch a value. */ - gdb_assert (VEC_empty (range_s, val->optimized_out)); - gdb_assert (VEC_empty (range_s, val->unavailable)); + gdb_assert (val->optimized_out.empty ()); + gdb_assert (val->unavailable.empty ()); if (value_bitsize (val)) - { - /* To read a lazy bitfield, read the entire enclosing value. This - prevents reading the same block of (possibly volatile) memory once - per bitfield. It would be even better to read only the containing - word, but we have no way to record that just specific bits of a - value have been fetched. */ - struct type *type = check_typedef (value_type (val)); - struct value *parent = value_parent (val); - - if (value_lazy (parent)) - value_fetch_lazy (parent); - - unpack_value_bitfield (val, - value_bitpos (val), value_bitsize (val), - value_contents_for_printing (parent), - value_offset (val), parent); - } + value_fetch_lazy_bitfield (val); else if (VALUE_LVAL (val) == lval_memory) - { - CORE_ADDR addr = value_address (val); - struct type *type = check_typedef (value_enclosing_type (val)); - - if (TYPE_LENGTH (type)) - read_value_memory (val, 0, value_stack (val), - addr, value_contents_all_raw (val), - type_length_units (type)); - } + value_fetch_lazy_memory (val); else if (VALUE_LVAL (val) == lval_register) - { - struct frame_info *next_frame; - int regnum; - struct type *type = check_typedef (value_type (val)); - struct value *new_val = val, *mark = value_mark (); - - /* Offsets are not supported here; lazy register values must - refer to the entire register. */ - gdb_assert (value_offset (val) == 0); - - while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val)) - { - struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val); - - next_frame = frame_find_by_id (next_frame_id); - regnum = VALUE_REGNUM (new_val); - - gdb_assert (next_frame != NULL); - - /* Convertible register routines are used for multi-register - values and for interpretation in different types - (e.g. float or int from a double register). Lazy - register values should have the register's natural type, - so they do not apply. */ - gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame), - regnum, type)); - - /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID. - Since a "->next" operation was performed when setting - this field, we do not need to perform a "next" operation - again when unwinding the register. That's why - frame_unwind_register_value() is called here instead of - get_frame_register_value(). */ - new_val = frame_unwind_register_value (next_frame, regnum); - - /* If we get another lazy lval_register value, it means the - register is found by reading it from NEXT_FRAME's next frame. - frame_unwind_register_value should never return a value with - the frame id pointing to NEXT_FRAME. If it does, it means we - either have two consecutive frames with the same frame id - in the frame chain, or some code is trying to unwind - behind get_prev_frame's back (e.g., a frame unwind - sniffer trying to unwind), bypassing its validations. In - any case, it should always be an internal error to end up - in this situation. */ - if (VALUE_LVAL (new_val) == lval_register - && value_lazy (new_val) - && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id)) - internal_error (__FILE__, __LINE__, - _("infinite loop while fetching a register")); - } - - /* If it's still lazy (for instance, a saved register on the - stack), fetch it. */ - if (value_lazy (new_val)) - value_fetch_lazy (new_val); - - /* Copy the contents and the unavailability/optimized-out - meta-data from NEW_VAL to VAL. */ - set_value_lazy (val, 0); - value_contents_copy (val, value_embedded_offset (val), - new_val, value_embedded_offset (new_val), - type_length_units (type)); - - if (frame_debug) - { - struct gdbarch *gdbarch; - struct frame_info *frame; - /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID, - so that the frame level will be shown correctly. */ - frame = frame_find_by_id (VALUE_FRAME_ID (val)); - regnum = VALUE_REGNUM (val); - gdbarch = get_frame_arch (frame); - - fprintf_unfiltered (gdb_stdlog, - "{ value_fetch_lazy " - "(frame=%d,regnum=%d(%s),...) ", - frame_relative_level (frame), regnum, - user_reg_map_regnum_to_name (gdbarch, regnum)); - - fprintf_unfiltered (gdb_stdlog, "->"); - if (value_optimized_out (new_val)) - { - fprintf_unfiltered (gdb_stdlog, " "); - val_print_optimized_out (new_val, gdb_stdlog); - } - else - { - int i; - const gdb_byte *buf = value_contents (new_val); - - if (VALUE_LVAL (new_val) == lval_register) - fprintf_unfiltered (gdb_stdlog, " register=%d", - VALUE_REGNUM (new_val)); - else if (VALUE_LVAL (new_val) == lval_memory) - fprintf_unfiltered (gdb_stdlog, " address=%s", - paddress (gdbarch, - value_address (new_val))); - else - fprintf_unfiltered (gdb_stdlog, " computed"); - - fprintf_unfiltered (gdb_stdlog, " bytes="); - fprintf_unfiltered (gdb_stdlog, "["); - for (i = 0; i < register_size (gdbarch, regnum); i++) - fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); - fprintf_unfiltered (gdb_stdlog, "]"); - } - - fprintf_unfiltered (gdb_stdlog, " }\n"); - } - - /* Dispose of the intermediate values. This prevents - watchpoints from trying to watch the saved frame pointer. */ - value_free_to_mark (mark); - } + value_fetch_lazy_register (val); else if (VALUE_LVAL (val) == lval_computed && value_computed_funcs (val)->read != NULL) value_computed_funcs (val)->read (val); @@ -4115,13 +3931,193 @@ isvoid_internal_fn (struct gdbarch *gdbarch, if (argc != 1) error (_("You must provide one argument for $_isvoid.")); - ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID; + ret = value_type (argv[0])->code () == TYPE_CODE_VOID; return value_from_longest (builtin_type (gdbarch)->builtin_int, ret); } +/* Implementation of the convenience function $_creal. Extracts the + real part from a complex number. */ + +static struct value * +creal_internal_fn (struct gdbarch *gdbarch, + const struct language_defn *language, + void *cookie, int argc, struct value **argv) +{ + if (argc != 1) + error (_("You must provide one argument for $_creal.")); + + value *cval = argv[0]; + type *ctype = check_typedef (value_type (cval)); + if (ctype->code () != TYPE_CODE_COMPLEX) + error (_("expected a complex number")); + return value_real_part (cval); +} + +/* Implementation of the convenience function $_cimag. Extracts the + imaginary part from a complex number. */ + +static struct value * +cimag_internal_fn (struct gdbarch *gdbarch, + const struct language_defn *language, + void *cookie, int argc, + struct value **argv) +{ + if (argc != 1) + error (_("You must provide one argument for $_cimag.")); + + value *cval = argv[0]; + type *ctype = check_typedef (value_type (cval)); + if (ctype->code () != TYPE_CODE_COMPLEX) + error (_("expected a complex number")); + return value_imaginary_part (cval); +} + +#if GDB_SELF_TEST +namespace selftests +{ + +/* Test the ranges_contain function. */ + +static void +test_ranges_contain () +{ + std::vector ranges; + range r; + + /* [10, 14] */ + r.offset = 10; + r.length = 5; + ranges.push_back (r); + + /* [20, 24] */ + r.offset = 20; + r.length = 5; + ranges.push_back (r); + + /* [2, 6] */ + SELF_CHECK (!ranges_contain (ranges, 2, 5)); + /* [9, 13] */ + SELF_CHECK (ranges_contain (ranges, 9, 5)); + /* [10, 11] */ + SELF_CHECK (ranges_contain (ranges, 10, 2)); + /* [10, 14] */ + SELF_CHECK (ranges_contain (ranges, 10, 5)); + /* [13, 18] */ + SELF_CHECK (ranges_contain (ranges, 13, 6)); + /* [14, 18] */ + SELF_CHECK (ranges_contain (ranges, 14, 5)); + /* [15, 18] */ + SELF_CHECK (!ranges_contain (ranges, 15, 4)); + /* [16, 19] */ + SELF_CHECK (!ranges_contain (ranges, 16, 4)); + /* [16, 21] */ + SELF_CHECK (ranges_contain (ranges, 16, 6)); + /* [21, 21] */ + SELF_CHECK (ranges_contain (ranges, 21, 1)); + /* [21, 25] */ + SELF_CHECK (ranges_contain (ranges, 21, 5)); + /* [26, 28] */ + SELF_CHECK (!ranges_contain (ranges, 26, 3)); +} + +/* Check that RANGES contains the same ranges as EXPECTED. */ + +static bool +check_ranges_vector (gdb::array_view ranges, + gdb::array_view expected) +{ + return ranges == expected; +} + +/* Test the insert_into_bit_range_vector function. */ + +static void +test_insert_into_bit_range_vector () +{ + std::vector ranges; + + /* [10, 14] */ + { + insert_into_bit_range_vector (&ranges, 10, 5); + static const range expected[] = { + {10, 5} + }; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [10, 14] */ + { + insert_into_bit_range_vector (&ranges, 11, 4); + static const range expected = {10, 5}; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [10, 14] [20, 24] */ + { + insert_into_bit_range_vector (&ranges, 20, 5); + static const range expected[] = { + {10, 5}, + {20, 5}, + }; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [10, 14] [17, 24] */ + { + insert_into_bit_range_vector (&ranges, 17, 5); + static const range expected[] = { + {10, 5}, + {17, 8}, + }; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [2, 8] [10, 14] [17, 24] */ + { + insert_into_bit_range_vector (&ranges, 2, 7); + static const range expected[] = { + {2, 7}, + {10, 5}, + {17, 8}, + }; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [2, 14] [17, 24] */ + { + insert_into_bit_range_vector (&ranges, 9, 1); + static const range expected[] = { + {2, 13}, + {17, 8}, + }; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [2, 14] [17, 24] */ + { + insert_into_bit_range_vector (&ranges, 9, 1); + static const range expected[] = { + {2, 13}, + {17, 8}, + }; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } + + /* [2, 33] */ + { + insert_into_bit_range_vector (&ranges, 4, 30); + static const range expected = {2, 32}; + SELF_CHECK (check_ranges_vector (ranges, expected)); + } +} + +} /* namespace selftests */ +#endif /* GDB_SELF_TEST */ + +void _initialize_values (); void -_initialize_values (void) +_initialize_values () { add_cmd ("convenience", no_class, show_convenience, _("\ Debugger convenience (\"$foo\") variables and functions.\n\ @@ -4159,6 +4155,20 @@ Usage: $_isvoid (expression)\n\ Return 1 if the expression is void, zero otherwise."), isvoid_internal_fn, NULL); + add_internal_function ("_creal", _("\ +Extract the real part of a complex number.\n\ +Usage: $_creal (expression)\n\ +Return the real part of a complex number, the type depends on the\n\ +type of a complex number."), + creal_internal_fn, NULL); + + add_internal_function ("_cimag", _("\ +Extract the imaginary part of a complex number.\n\ +Usage: $_cimag (expression)\n\ +Return the imaginary part of a complex number, the type depends on the\n\ +type of a complex number."), + cimag_internal_fn, NULL); + add_setshow_zuinteger_unlimited_cmd ("max-value-size", class_support, &max_value_size, _("\ Set maximum sized value gdb will load from the inferior."), _("\ @@ -4171,4 +4181,17 @@ prevents future values, larger than this size, from being allocated."), set_max_value_size, show_max_value_size, &setlist, &showlist); +#if GDB_SELF_TEST + selftests::register_test ("ranges_contain", selftests::test_ranges_contain); + selftests::register_test ("insert_into_bit_range_vector", + selftests::test_insert_into_bit_range_vector); +#endif +} + +/* See value.h. */ + +void +finalize_values () +{ + all_values.clear (); }