1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "arch-utils.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
40 #include "tracepoint.h"
42 #include "user-regs.h"
44 #include "completer.h"
46 /* Definition of a user function. */
47 struct internal_function
49 /* The name of the function. It is a bit odd to have this in the
50 function itself -- the user might use a differently-named
51 convenience variable to hold the function. */
55 internal_function_fn handler
;
57 /* User data for the handler. */
61 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
65 /* Lowest offset in the range. */
68 /* Length of the range. */
72 typedef struct range range_s
;
76 /* Returns true if the ranges defined by [offset1, offset1+len1) and
77 [offset2, offset2+len2) overlap. */
80 ranges_overlap (LONGEST offset1
, LONGEST len1
,
81 LONGEST offset2
, LONGEST len2
)
85 l
= std::max (offset1
, offset2
);
86 h
= std::min (offset1
+ len1
, offset2
+ len2
);
90 /* Returns true if the first argument is strictly less than the
91 second, useful for VEC_lower_bound. We keep ranges sorted by
92 offset and coalesce overlapping and contiguous ranges, so this just
93 compares the starting offset. */
96 range_lessthan (const range_s
*r1
, const range_s
*r2
)
98 return r1
->offset
< r2
->offset
;
101 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105 ranges_contain (VEC(range_s
) *ranges
, LONGEST offset
, LONGEST length
)
110 what
.offset
= offset
;
111 what
.length
= length
;
113 /* We keep ranges sorted by offset and coalesce overlapping and
114 contiguous ranges, so to check if a range list contains a given
115 range, we can do a binary search for the position the given range
116 would be inserted if we only considered the starting OFFSET of
117 ranges. We call that position I. Since we also have LENGTH to
118 care for (this is a range afterall), we need to check if the
119 _previous_ range overlaps the I range. E.g.,
123 |---| |---| |------| ... |--|
128 In the case above, the binary search would return `I=1', meaning,
129 this OFFSET should be inserted at position 1, and the current
130 position 1 should be pushed further (and before 2). But, `0'
133 Then we need to check if the I range overlaps the I range itself.
138 |---| |---| |-------| ... |--|
144 i
= VEC_lower_bound (range_s
, ranges
, &what
, range_lessthan
);
148 struct range
*bef
= VEC_index (range_s
, ranges
, i
- 1);
150 if (ranges_overlap (bef
->offset
, bef
->length
, offset
, length
))
154 if (i
< VEC_length (range_s
, ranges
))
156 struct range
*r
= VEC_index (range_s
, ranges
, i
);
158 if (ranges_overlap (r
->offset
, r
->length
, offset
, length
))
165 static struct cmd_list_element
*functionlist
;
167 /* Note that the fields in this structure are arranged to save a bit
172 /* Type of value; either not an lval, or one of the various
173 different possible kinds of lval. */
176 /* Is it modifiable? Only relevant if lval != not_lval. */
177 unsigned int modifiable
: 1;
179 /* If zero, contents of this value are in the contents field. If
180 nonzero, contents are in inferior. If the lval field is lval_memory,
181 the contents are in inferior memory at location.address plus offset.
182 The lval field may also be lval_register.
184 WARNING: This field is used by the code which handles watchpoints
185 (see breakpoint.c) to decide whether a particular value can be
186 watched by hardware watchpoints. If the lazy flag is set for
187 some member of a value chain, it is assumed that this member of
188 the chain doesn't need to be watched as part of watching the
189 value itself. This is how GDB avoids watching the entire struct
190 or array when the user wants to watch a single struct member or
191 array element. If you ever change the way lazy flag is set and
192 reset, be sure to consider this use as well! */
193 unsigned int lazy
: 1;
195 /* If value is a variable, is it initialized or not. */
196 unsigned int initialized
: 1;
198 /* If value is from the stack. If this is set, read_stack will be
199 used instead of read_memory to enable extra caching. */
200 unsigned int stack
: 1;
202 /* If the value has been released. */
203 unsigned int released
: 1;
205 /* Location of value (if lval). */
208 /* If lval == lval_memory, this is the address in the inferior */
211 /*If lval == lval_register, the value is from a register. */
214 /* Register number. */
216 /* Frame ID of "next" frame to which a register value is relative.
217 If the register value is found relative to frame F, then the
218 frame id of F->next will be stored in next_frame_id. */
219 struct frame_id next_frame_id
;
222 /* Pointer to internal variable. */
223 struct internalvar
*internalvar
;
225 /* Pointer to xmethod worker. */
226 struct xmethod_worker
*xm_worker
;
228 /* If lval == lval_computed, this is a set of function pointers
229 to use to access and describe the value, and a closure pointer
233 /* Functions to call. */
234 const struct lval_funcs
*funcs
;
236 /* Closure for those functions to use. */
241 /* Describes offset of a value within lval of a structure in target
242 addressable memory units. Note also the member embedded_offset
246 /* Only used for bitfields; number of bits contained in them. */
249 /* Only used for bitfields; position of start of field. For
250 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
251 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
254 /* The number of references to this value. When a value is created,
255 the value chain holds a reference, so REFERENCE_COUNT is 1. If
256 release_value is called, this value is removed from the chain but
257 the caller of release_value now has a reference to this value.
258 The caller must arrange for a call to value_free later. */
261 /* Only used for bitfields; the containing value. This allows a
262 single read from the target when displaying multiple
264 struct value
*parent
;
266 /* Type of the value. */
269 /* If a value represents a C++ object, then the `type' field gives
270 the object's compile-time type. If the object actually belongs
271 to some class derived from `type', perhaps with other base
272 classes and additional members, then `type' is just a subobject
273 of the real thing, and the full object is probably larger than
274 `type' would suggest.
276 If `type' is a dynamic class (i.e. one with a vtable), then GDB
277 can actually determine the object's run-time type by looking at
278 the run-time type information in the vtable. When this
279 information is available, we may elect to read in the entire
280 object, for several reasons:
282 - When printing the value, the user would probably rather see the
283 full object, not just the limited portion apparent from the
286 - If `type' has virtual base classes, then even printing `type'
287 alone may require reaching outside the `type' portion of the
288 object to wherever the virtual base class has been stored.
290 When we store the entire object, `enclosing_type' is the run-time
291 type -- the complete object -- and `embedded_offset' is the
292 offset of `type' within that larger type, in target addressable memory
293 units. The value_contents() macro takes `embedded_offset' into account,
294 so most GDB code continues to see the `type' portion of the value, just
295 as the inferior would.
297 If `type' is a pointer to an object, then `enclosing_type' is a
298 pointer to the object's run-time type, and `pointed_to_offset' is
299 the offset in target addressable memory units from the full object
300 to the pointed-to object -- that is, the value `embedded_offset' would
301 have if we followed the pointer and fetched the complete object.
302 (I don't really see the point. Why not just determine the
303 run-time type when you indirect, and avoid the special case? The
304 contents don't matter until you indirect anyway.)
306 If we're not doing anything fancy, `enclosing_type' is equal to
307 `type', and `embedded_offset' is zero, so everything works
309 struct type
*enclosing_type
;
310 LONGEST embedded_offset
;
311 LONGEST pointed_to_offset
;
313 /* Values are stored in a chain, so that they can be deleted easily
314 over calls to the inferior. Values assigned to internal
315 variables, put into the value history or exposed to Python are
316 taken off this list. */
319 /* Actual contents of the value. Target byte-order. NULL or not
320 valid if lazy is nonzero. */
323 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
324 rather than available, since the common and default case is for a
325 value to be available. This is filled in at value read time.
326 The unavailable ranges are tracked in bits. Note that a contents
327 bit that has been optimized out doesn't really exist in the
328 program, so it can't be marked unavailable either. */
329 VEC(range_s
) *unavailable
;
331 /* Likewise, but for optimized out contents (a chunk of the value of
332 a variable that does not actually exist in the program). If LVAL
333 is lval_register, this is a register ($pc, $sp, etc., never a
334 program variable) that has not been saved in the frame. Not
335 saved registers and optimized-out program variables values are
336 treated pretty much the same, except not-saved registers have a
337 different string representation and related error strings. */
338 VEC(range_s
) *optimized_out
;
344 get_value_arch (const struct value
*value
)
346 return get_type_arch (value_type (value
));
350 value_bits_available (const struct value
*value
, LONGEST offset
, LONGEST length
)
352 gdb_assert (!value
->lazy
);
354 return !ranges_contain (value
->unavailable
, offset
, length
);
358 value_bytes_available (const struct value
*value
,
359 LONGEST offset
, LONGEST length
)
361 return value_bits_available (value
,
362 offset
* TARGET_CHAR_BIT
,
363 length
* TARGET_CHAR_BIT
);
367 value_bits_any_optimized_out (const struct value
*value
, int bit_offset
, int bit_length
)
369 gdb_assert (!value
->lazy
);
371 return ranges_contain (value
->optimized_out
, bit_offset
, bit_length
);
375 value_entirely_available (struct value
*value
)
377 /* We can only tell whether the whole value is available when we try
380 value_fetch_lazy (value
);
382 if (VEC_empty (range_s
, value
->unavailable
))
387 /* Returns true if VALUE is entirely covered by RANGES. If the value
388 is lazy, it'll be read now. Note that RANGE is a pointer to
389 pointer because reading the value might change *RANGE. */
392 value_entirely_covered_by_range_vector (struct value
*value
,
393 VEC(range_s
) **ranges
)
395 /* We can only tell whether the whole value is optimized out /
396 unavailable when we try to read it. */
398 value_fetch_lazy (value
);
400 if (VEC_length (range_s
, *ranges
) == 1)
402 struct range
*t
= VEC_index (range_s
, *ranges
, 0);
405 && t
->length
== (TARGET_CHAR_BIT
406 * TYPE_LENGTH (value_enclosing_type (value
))))
414 value_entirely_unavailable (struct value
*value
)
416 return value_entirely_covered_by_range_vector (value
, &value
->unavailable
);
420 value_entirely_optimized_out (struct value
*value
)
422 return value_entirely_covered_by_range_vector (value
, &value
->optimized_out
);
425 /* Insert into the vector pointed to by VECTORP the bit range starting of
426 OFFSET bits, and extending for the next LENGTH bits. */
429 insert_into_bit_range_vector (VEC(range_s
) **vectorp
,
430 LONGEST offset
, LONGEST length
)
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
438 newr
.offset
= offset
;
439 newr
.length
= length
;
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
447 #1 - overlaps with previous
451 |---| |---| |------| ... |--|
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
464 #2 - contiguous with previous
468 |--| |---| |------| ... |--|
473 If there's no overlap with the previous range, as in:
475 #3 - not overlapping and not contiguous
479 |--| |---| |------| ... |--|
486 #4 - R is the range with lowest offset
490 |--| |---| |------| ... |--|
495 ... we just push the new range to I.
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
501 #5 - overlapping following ranges
504 |------------------------|
505 |--| |---| |------| ... |--|
514 |--| |---| |------| ... |--|
521 i
= VEC_lower_bound (range_s
, *vectorp
, &newr
, range_lessthan
);
524 struct range
*bef
= VEC_index (range_s
, *vectorp
, i
- 1);
526 if (ranges_overlap (bef
->offset
, bef
->length
, offset
, length
))
529 ULONGEST l
= std::min (bef
->offset
, offset
);
530 ULONGEST h
= std::max (bef
->offset
+ bef
->length
, offset
+ length
);
536 else if (offset
== bef
->offset
+ bef
->length
)
539 bef
->length
+= length
;
545 VEC_safe_insert (range_s
, *vectorp
, i
, &newr
);
551 VEC_safe_insert (range_s
, *vectorp
, i
, &newr
);
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i
+ 1 < VEC_length (range_s
, *vectorp
))
563 /* Get the range we just touched. */
564 t
= VEC_index (range_s
, *vectorp
, i
);
568 for (; VEC_iterate (range_s
, *vectorp
, i
, r
); i
++)
569 if (r
->offset
<= t
->offset
+ t
->length
)
573 l
= std::min (t
->offset
, r
->offset
);
574 h
= std::max (t
->offset
+ t
->length
, r
->offset
+ r
->length
);
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
590 VEC_block_remove (range_s
, *vectorp
, next
, removed
);
595 mark_value_bits_unavailable (struct value
*value
,
596 LONGEST offset
, LONGEST length
)
598 insert_into_bit_range_vector (&value
->unavailable
, offset
, length
);
602 mark_value_bytes_unavailable (struct value
*value
,
603 LONGEST offset
, LONGEST length
)
605 mark_value_bits_unavailable (value
,
606 offset
* TARGET_CHAR_BIT
,
607 length
* TARGET_CHAR_BIT
);
610 /* Find the first range in RANGES that overlaps the range defined by
611 OFFSET and LENGTH, starting at element POS in the RANGES vector,
612 Returns the index into RANGES where such overlapping range was
613 found, or -1 if none was found. */
616 find_first_range_overlap (VEC(range_s
) *ranges
, int pos
,
617 LONGEST offset
, LONGEST length
)
622 for (i
= pos
; VEC_iterate (range_s
, ranges
, i
, r
); i
++)
623 if (ranges_overlap (r
->offset
, r
->length
, offset
, length
))
629 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
630 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
633 It must always be the case that:
634 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
636 It is assumed that memory can be accessed from:
637 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
639 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
640 / TARGET_CHAR_BIT) */
642 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
643 const gdb_byte
*ptr2
, size_t offset2_bits
,
646 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
647 == offset2_bits
% TARGET_CHAR_BIT
);
649 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
652 gdb_byte mask
, b1
, b2
;
654 /* The offset from the base pointers PTR1 and PTR2 is not a complete
655 number of bytes. A number of bits up to either the next exact
656 byte boundary, or LENGTH_BITS (which ever is sooner) will be
658 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
659 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
660 mask
= (1 << bits
) - 1;
662 if (length_bits
< bits
)
664 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
668 /* Now load the two bytes and mask off the bits we care about. */
669 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
670 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
675 /* Now update the length and offsets to take account of the bits
676 we've just compared. */
678 offset1_bits
+= bits
;
679 offset2_bits
+= bits
;
682 if (length_bits
% TARGET_CHAR_BIT
!= 0)
686 gdb_byte mask
, b1
, b2
;
688 /* The length is not an exact number of bytes. After the previous
689 IF.. block then the offsets are byte aligned, or the
690 length is zero (in which case this code is not reached). Compare
691 a number of bits at the end of the region, starting from an exact
693 bits
= length_bits
% TARGET_CHAR_BIT
;
694 o1
= offset1_bits
+ length_bits
- bits
;
695 o2
= offset2_bits
+ length_bits
- bits
;
697 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
698 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
700 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
701 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
703 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
704 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
714 /* We've now taken care of any stray "bits" at the start, or end of
715 the region to compare, the remainder can be covered with a simple
717 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
718 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
719 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
721 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
722 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
723 length_bits
/ TARGET_CHAR_BIT
);
726 /* Length is zero, regions match. */
730 /* Helper struct for find_first_range_overlap_and_match and
731 value_contents_bits_eq. Keep track of which slot of a given ranges
732 vector have we last looked at. */
734 struct ranges_and_idx
737 VEC(range_s
) *ranges
;
739 /* The range we've last found in RANGES. Given ranges are sorted,
740 we can start the next lookup here. */
744 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
745 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
746 ranges starting at OFFSET2 bits. Return true if the ranges match
747 and fill in *L and *H with the overlapping window relative to
748 (both) OFFSET1 or OFFSET2. */
751 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
752 struct ranges_and_idx
*rp2
,
753 LONGEST offset1
, LONGEST offset2
,
754 LONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
756 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
758 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
761 if (rp1
->idx
== -1 && rp2
->idx
== -1)
767 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
775 r1
= VEC_index (range_s
, rp1
->ranges
, rp1
->idx
);
776 r2
= VEC_index (range_s
, rp2
->ranges
, rp2
->idx
);
778 /* Get the unavailable windows intersected by the incoming
779 ranges. The first and last ranges that overlap the argument
780 range may be wider than said incoming arguments ranges. */
781 l1
= std::max (offset1
, r1
->offset
);
782 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
784 l2
= std::max (offset2
, r2
->offset
);
785 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
787 /* Make them relative to the respective start offsets, so we can
788 compare them for equality. */
795 /* Different ranges, no match. */
796 if (l1
!= l2
|| h1
!= h2
)
805 /* Helper function for value_contents_eq. The only difference is that
806 this function is bit rather than byte based.
808 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
809 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
810 Return true if the available bits match. */
813 value_contents_bits_eq (const struct value
*val1
, int offset1
,
814 const struct value
*val2
, int offset2
,
817 /* Each array element corresponds to a ranges source (unavailable,
818 optimized out). '1' is for VAL1, '2' for VAL2. */
819 struct ranges_and_idx rp1
[2], rp2
[2];
821 /* See function description in value.h. */
822 gdb_assert (!val1
->lazy
&& !val2
->lazy
);
824 /* We shouldn't be trying to compare past the end of the values. */
825 gdb_assert (offset1
+ length
826 <= TYPE_LENGTH (val1
->enclosing_type
) * TARGET_CHAR_BIT
);
827 gdb_assert (offset2
+ length
828 <= TYPE_LENGTH (val2
->enclosing_type
) * TARGET_CHAR_BIT
);
830 memset (&rp1
, 0, sizeof (rp1
));
831 memset (&rp2
, 0, sizeof (rp2
));
832 rp1
[0].ranges
= val1
->unavailable
;
833 rp2
[0].ranges
= val2
->unavailable
;
834 rp1
[1].ranges
= val1
->optimized_out
;
835 rp2
[1].ranges
= val2
->optimized_out
;
839 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
842 for (i
= 0; i
< 2; i
++)
844 ULONGEST l_tmp
, h_tmp
;
846 /* The contents only match equal if the invalid/unavailable
847 contents ranges match as well. */
848 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
849 offset1
, offset2
, length
,
853 /* We're interested in the lowest/first range found. */
854 if (i
== 0 || l_tmp
< l
)
861 /* Compare the available/valid contents. */
862 if (memcmp_with_bit_offsets (val1
->contents
, offset1
,
863 val2
->contents
, offset2
, l
) != 0)
875 value_contents_eq (const struct value
*val1
, LONGEST offset1
,
876 const struct value
*val2
, LONGEST offset2
,
879 return value_contents_bits_eq (val1
, offset1
* TARGET_CHAR_BIT
,
880 val2
, offset2
* TARGET_CHAR_BIT
,
881 length
* TARGET_CHAR_BIT
);
884 /* Prototypes for local functions. */
886 static void show_values (char *, int);
888 static void show_convenience (char *, int);
891 /* The value-history records all the values printed
892 by print commands during this session. Each chunk
893 records 60 consecutive values. The first chunk on
894 the chain records the most recent values.
895 The total number of values is in value_history_count. */
897 #define VALUE_HISTORY_CHUNK 60
899 struct value_history_chunk
901 struct value_history_chunk
*next
;
902 struct value
*values
[VALUE_HISTORY_CHUNK
];
905 /* Chain of chunks now in use. */
907 static struct value_history_chunk
*value_history_chain
;
909 static int value_history_count
; /* Abs number of last entry stored. */
912 /* List of all value objects currently allocated
913 (except for those released by calls to release_value)
914 This is so they can be freed after each command. */
916 static struct value
*all_values
;
918 /* Allocate a lazy value for type TYPE. Its actual content is
919 "lazily" allocated too: the content field of the return value is
920 NULL; it will be allocated when it is fetched from the target. */
923 allocate_value_lazy (struct type
*type
)
927 /* Call check_typedef on our type to make sure that, if TYPE
928 is a TYPE_CODE_TYPEDEF, its length is set to the length
929 of the target type instead of zero. However, we do not
930 replace the typedef type by the target type, because we want
931 to keep the typedef in order to be able to set the VAL's type
932 description correctly. */
933 check_typedef (type
);
935 val
= XCNEW (struct value
);
936 val
->contents
= NULL
;
937 val
->next
= all_values
;
940 val
->enclosing_type
= type
;
941 VALUE_LVAL (val
) = not_lval
;
942 val
->location
.address
= 0;
947 val
->embedded_offset
= 0;
948 val
->pointed_to_offset
= 0;
950 val
->initialized
= 1; /* Default to initialized. */
952 /* Values start out on the all_values chain. */
953 val
->reference_count
= 1;
958 /* The maximum size, in bytes, that GDB will try to allocate for a value.
959 The initial value of 64k was not selected for any specific reason, it is
960 just a reasonable starting point. */
962 static int max_value_size
= 65536; /* 64k bytes */
964 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
965 LONGEST, otherwise GDB will not be able to parse integer values from the
966 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
967 be unable to parse "set max-value-size 2".
969 As we want a consistent GDB experience across hosts with different sizes
970 of LONGEST, this arbitrary minimum value was selected, so long as this
971 is bigger than LONGEST on all GDB supported hosts we're fine. */
973 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
974 gdb_static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
976 /* Implement the "set max-value-size" command. */
979 set_max_value_size (char *args
, int from_tty
,
980 struct cmd_list_element
*c
)
982 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
984 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
986 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
987 error (_("max-value-size set too low, increasing to %d bytes"),
992 /* Implement the "show max-value-size" command. */
995 show_max_value_size (struct ui_file
*file
, int from_tty
,
996 struct cmd_list_element
*c
, const char *value
)
998 if (max_value_size
== -1)
999 fprintf_filtered (file
, _("Maximum value size is unlimited.\n"));
1001 fprintf_filtered (file
, _("Maximum value size is %d bytes.\n"),
1005 /* Called before we attempt to allocate or reallocate a buffer for the
1006 contents of a value. TYPE is the type of the value for which we are
1007 allocating the buffer. If the buffer is too large (based on the user
1008 controllable setting) then throw an error. If this function returns
1009 then we should attempt to allocate the buffer. */
1012 check_type_length_before_alloc (const struct type
*type
)
1014 unsigned int length
= TYPE_LENGTH (type
);
1016 if (max_value_size
> -1 && length
> max_value_size
)
1018 if (TYPE_NAME (type
) != NULL
)
1019 error (_("value of type `%s' requires %u bytes, which is more "
1020 "than max-value-size"), TYPE_NAME (type
), length
);
1022 error (_("value requires %u bytes, which is more than "
1023 "max-value-size"), length
);
1027 /* Allocate the contents of VAL if it has not been allocated yet. */
1030 allocate_value_contents (struct value
*val
)
1034 check_type_length_before_alloc (val
->enclosing_type
);
1036 = (gdb_byte
*) xzalloc (TYPE_LENGTH (val
->enclosing_type
));
1040 /* Allocate a value and its contents for type TYPE. */
1043 allocate_value (struct type
*type
)
1045 struct value
*val
= allocate_value_lazy (type
);
1047 allocate_value_contents (val
);
1052 /* Allocate a value that has the correct length
1053 for COUNT repetitions of type TYPE. */
1056 allocate_repeat_value (struct type
*type
, int count
)
1058 int low_bound
= current_language
->string_lower_bound
; /* ??? */
1059 /* FIXME-type-allocation: need a way to free this type when we are
1061 struct type
*array_type
1062 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
1064 return allocate_value (array_type
);
1068 allocate_computed_value (struct type
*type
,
1069 const struct lval_funcs
*funcs
,
1072 struct value
*v
= allocate_value_lazy (type
);
1074 VALUE_LVAL (v
) = lval_computed
;
1075 v
->location
.computed
.funcs
= funcs
;
1076 v
->location
.computed
.closure
= closure
;
1081 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1084 allocate_optimized_out_value (struct type
*type
)
1086 struct value
*retval
= allocate_value_lazy (type
);
1088 mark_value_bytes_optimized_out (retval
, 0, TYPE_LENGTH (type
));
1089 set_value_lazy (retval
, 0);
1093 /* Accessor methods. */
1096 value_next (const struct value
*value
)
1102 value_type (const struct value
*value
)
1107 deprecated_set_value_type (struct value
*value
, struct type
*type
)
1113 value_offset (const struct value
*value
)
1115 return value
->offset
;
1118 set_value_offset (struct value
*value
, LONGEST offset
)
1120 value
->offset
= offset
;
1124 value_bitpos (const struct value
*value
)
1126 return value
->bitpos
;
1129 set_value_bitpos (struct value
*value
, LONGEST bit
)
1131 value
->bitpos
= bit
;
1135 value_bitsize (const struct value
*value
)
1137 return value
->bitsize
;
1140 set_value_bitsize (struct value
*value
, LONGEST bit
)
1142 value
->bitsize
= bit
;
1146 value_parent (const struct value
*value
)
1148 return value
->parent
;
1154 set_value_parent (struct value
*value
, struct value
*parent
)
1156 struct value
*old
= value
->parent
;
1158 value
->parent
= parent
;
1160 value_incref (parent
);
1165 value_contents_raw (struct value
*value
)
1167 struct gdbarch
*arch
= get_value_arch (value
);
1168 int unit_size
= gdbarch_addressable_memory_unit_size (arch
);
1170 allocate_value_contents (value
);
1171 return value
->contents
+ value
->embedded_offset
* unit_size
;
1175 value_contents_all_raw (struct value
*value
)
1177 allocate_value_contents (value
);
1178 return value
->contents
;
1182 value_enclosing_type (const struct value
*value
)
1184 return value
->enclosing_type
;
1187 /* Look at value.h for description. */
1190 value_actual_type (struct value
*value
, int resolve_simple_types
,
1191 int *real_type_found
)
1193 struct value_print_options opts
;
1194 struct type
*result
;
1196 get_user_print_options (&opts
);
1198 if (real_type_found
)
1199 *real_type_found
= 0;
1200 result
= value_type (value
);
1201 if (opts
.objectprint
)
1203 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1204 fetch its rtti type. */
1205 if ((TYPE_CODE (result
) == TYPE_CODE_PTR
|| TYPE_IS_REFERENCE (result
))
1206 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result
)))
1208 && !value_optimized_out (value
))
1210 struct type
*real_type
;
1212 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1215 if (real_type_found
)
1216 *real_type_found
= 1;
1220 else if (resolve_simple_types
)
1222 if (real_type_found
)
1223 *real_type_found
= 1;
1224 result
= value_enclosing_type (value
);
1232 error_value_optimized_out (void)
1234 error (_("value has been optimized out"));
1238 require_not_optimized_out (const struct value
*value
)
1240 if (!VEC_empty (range_s
, value
->optimized_out
))
1242 if (value
->lval
== lval_register
)
1243 error (_("register has not been saved in frame"));
1245 error_value_optimized_out ();
1250 require_available (const struct value
*value
)
1252 if (!VEC_empty (range_s
, value
->unavailable
))
1253 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1257 value_contents_for_printing (struct value
*value
)
1260 value_fetch_lazy (value
);
1261 return value
->contents
;
1265 value_contents_for_printing_const (const struct value
*value
)
1267 gdb_assert (!value
->lazy
);
1268 return value
->contents
;
1272 value_contents_all (struct value
*value
)
1274 const gdb_byte
*result
= value_contents_for_printing (value
);
1275 require_not_optimized_out (value
);
1276 require_available (value
);
1280 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1281 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1284 ranges_copy_adjusted (VEC (range_s
) **dst_range
, int dst_bit_offset
,
1285 VEC (range_s
) *src_range
, int src_bit_offset
,
1291 for (i
= 0; VEC_iterate (range_s
, src_range
, i
, r
); i
++)
1295 l
= std::max (r
->offset
, (LONGEST
) src_bit_offset
);
1296 h
= std::min (r
->offset
+ r
->length
,
1297 (LONGEST
) src_bit_offset
+ bit_length
);
1300 insert_into_bit_range_vector (dst_range
,
1301 dst_bit_offset
+ (l
- src_bit_offset
),
1306 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1307 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1310 value_ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1311 const struct value
*src
, int src_bit_offset
,
1314 ranges_copy_adjusted (&dst
->unavailable
, dst_bit_offset
,
1315 src
->unavailable
, src_bit_offset
,
1317 ranges_copy_adjusted (&dst
->optimized_out
, dst_bit_offset
,
1318 src
->optimized_out
, src_bit_offset
,
1322 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1323 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1324 contents, starting at DST_OFFSET. If unavailable contents are
1325 being copied from SRC, the corresponding DST contents are marked
1326 unavailable accordingly. Neither DST nor SRC may be lazy
1329 It is assumed the contents of DST in the [DST_OFFSET,
1330 DST_OFFSET+LENGTH) range are wholly available. */
1333 value_contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1334 struct value
*src
, LONGEST src_offset
, LONGEST length
)
1336 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1337 struct gdbarch
*arch
= get_value_arch (src
);
1338 int unit_size
= gdbarch_addressable_memory_unit_size (arch
);
1340 /* A lazy DST would make that this copy operation useless, since as
1341 soon as DST's contents were un-lazied (by a later value_contents
1342 call, say), the contents would be overwritten. A lazy SRC would
1343 mean we'd be copying garbage. */
1344 gdb_assert (!dst
->lazy
&& !src
->lazy
);
1346 /* The overwritten DST range gets unavailability ORed in, not
1347 replaced. Make sure to remember to implement replacing if it
1348 turns out actually necessary. */
1349 gdb_assert (value_bytes_available (dst
, dst_offset
, length
));
1350 gdb_assert (!value_bits_any_optimized_out (dst
,
1351 TARGET_CHAR_BIT
* dst_offset
,
1352 TARGET_CHAR_BIT
* length
));
1354 /* Copy the data. */
1355 memcpy (value_contents_all_raw (dst
) + dst_offset
* unit_size
,
1356 value_contents_all_raw (src
) + src_offset
* unit_size
,
1357 length
* unit_size
);
1359 /* Copy the meta-data, adjusted. */
1360 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1361 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1362 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1364 value_ranges_copy_adjusted (dst
, dst_bit_offset
,
1365 src
, src_bit_offset
,
1369 /* Copy LENGTH bytes of SRC value's (all) contents
1370 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1371 (all) contents, starting at DST_OFFSET. If unavailable contents
1372 are being copied from SRC, the corresponding DST contents are
1373 marked unavailable accordingly. DST must not be lazy. If SRC is
1374 lazy, it will be fetched now.
1376 It is assumed the contents of DST in the [DST_OFFSET,
1377 DST_OFFSET+LENGTH) range are wholly available. */
1380 value_contents_copy (struct value
*dst
, LONGEST dst_offset
,
1381 struct value
*src
, LONGEST src_offset
, LONGEST length
)
1384 value_fetch_lazy (src
);
1386 value_contents_copy_raw (dst
, dst_offset
, src
, src_offset
, length
);
1390 value_lazy (const struct value
*value
)
1396 set_value_lazy (struct value
*value
, int val
)
1402 value_stack (const struct value
*value
)
1404 return value
->stack
;
1408 set_value_stack (struct value
*value
, int val
)
1414 value_contents (struct value
*value
)
1416 const gdb_byte
*result
= value_contents_writeable (value
);
1417 require_not_optimized_out (value
);
1418 require_available (value
);
1423 value_contents_writeable (struct value
*value
)
1426 value_fetch_lazy (value
);
1427 return value_contents_raw (value
);
1431 value_optimized_out (struct value
*value
)
1433 /* We can only know if a value is optimized out once we have tried to
1435 if (VEC_empty (range_s
, value
->optimized_out
) && value
->lazy
)
1439 value_fetch_lazy (value
);
1441 CATCH (ex
, RETURN_MASK_ERROR
)
1443 /* Fall back to checking value->optimized_out. */
1448 return !VEC_empty (range_s
, value
->optimized_out
);
1451 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1452 the following LENGTH bytes. */
1455 mark_value_bytes_optimized_out (struct value
*value
, int offset
, int length
)
1457 mark_value_bits_optimized_out (value
,
1458 offset
* TARGET_CHAR_BIT
,
1459 length
* TARGET_CHAR_BIT
);
1465 mark_value_bits_optimized_out (struct value
*value
,
1466 LONGEST offset
, LONGEST length
)
1468 insert_into_bit_range_vector (&value
->optimized_out
, offset
, length
);
1472 value_bits_synthetic_pointer (const struct value
*value
,
1473 LONGEST offset
, LONGEST length
)
1475 if (value
->lval
!= lval_computed
1476 || !value
->location
.computed
.funcs
->check_synthetic_pointer
)
1478 return value
->location
.computed
.funcs
->check_synthetic_pointer (value
,
1484 value_embedded_offset (const struct value
*value
)
1486 return value
->embedded_offset
;
1490 set_value_embedded_offset (struct value
*value
, LONGEST val
)
1492 value
->embedded_offset
= val
;
1496 value_pointed_to_offset (const struct value
*value
)
1498 return value
->pointed_to_offset
;
1502 set_value_pointed_to_offset (struct value
*value
, LONGEST val
)
1504 value
->pointed_to_offset
= val
;
1507 const struct lval_funcs
*
1508 value_computed_funcs (const struct value
*v
)
1510 gdb_assert (value_lval_const (v
) == lval_computed
);
1512 return v
->location
.computed
.funcs
;
1516 value_computed_closure (const struct value
*v
)
1518 gdb_assert (v
->lval
== lval_computed
);
1520 return v
->location
.computed
.closure
;
1524 deprecated_value_lval_hack (struct value
*value
)
1526 return &value
->lval
;
1530 value_lval_const (const struct value
*value
)
1536 value_address (const struct value
*value
)
1538 if (value
->lval
!= lval_memory
)
1540 if (value
->parent
!= NULL
)
1541 return value_address (value
->parent
) + value
->offset
;
1542 if (NULL
!= TYPE_DATA_LOCATION (value_type (value
)))
1544 gdb_assert (PROP_CONST
== TYPE_DATA_LOCATION_KIND (value_type (value
)));
1545 return TYPE_DATA_LOCATION_ADDR (value_type (value
));
1548 return value
->location
.address
+ value
->offset
;
1552 value_raw_address (const struct value
*value
)
1554 if (value
->lval
!= lval_memory
)
1556 return value
->location
.address
;
1560 set_value_address (struct value
*value
, CORE_ADDR addr
)
1562 gdb_assert (value
->lval
== lval_memory
);
1563 value
->location
.address
= addr
;
1566 struct internalvar
**
1567 deprecated_value_internalvar_hack (struct value
*value
)
1569 return &value
->location
.internalvar
;
1573 deprecated_value_next_frame_id_hack (struct value
*value
)
1575 gdb_assert (value
->lval
== lval_register
);
1576 return &value
->location
.reg
.next_frame_id
;
1580 deprecated_value_regnum_hack (struct value
*value
)
1582 gdb_assert (value
->lval
== lval_register
);
1583 return &value
->location
.reg
.regnum
;
1587 deprecated_value_modifiable (const struct value
*value
)
1589 return value
->modifiable
;
1592 /* Return a mark in the value chain. All values allocated after the
1593 mark is obtained (except for those released) are subject to being freed
1594 if a subsequent value_free_to_mark is passed the mark. */
1601 /* Take a reference to VAL. VAL will not be deallocated until all
1602 references are released. */
1605 value_incref (struct value
*val
)
1607 val
->reference_count
++;
1610 /* Release a reference to VAL, which was acquired with value_incref.
1611 This function is also called to deallocate values from the value
1615 value_free (struct value
*val
)
1619 gdb_assert (val
->reference_count
> 0);
1620 val
->reference_count
--;
1621 if (val
->reference_count
> 0)
1624 /* If there's an associated parent value, drop our reference to
1626 if (val
->parent
!= NULL
)
1627 value_free (val
->parent
);
1629 if (VALUE_LVAL (val
) == lval_computed
)
1631 const struct lval_funcs
*funcs
= val
->location
.computed
.funcs
;
1633 if (funcs
->free_closure
)
1634 funcs
->free_closure (val
);
1636 else if (VALUE_LVAL (val
) == lval_xcallable
)
1637 free_xmethod_worker (val
->location
.xm_worker
);
1639 xfree (val
->contents
);
1640 VEC_free (range_s
, val
->unavailable
);
1645 /* Free all values allocated since MARK was obtained by value_mark
1646 (except for those released). */
1648 value_free_to_mark (const struct value
*mark
)
1653 for (val
= all_values
; val
&& val
!= mark
; val
= next
)
1662 /* Free all the values that have been allocated (except for those released).
1663 Call after each command, successful or not.
1664 In practice this is called before each command, which is sufficient. */
1667 free_all_values (void)
1672 for (val
= all_values
; val
; val
= next
)
1682 /* Frees all the elements in a chain of values. */
1685 free_value_chain (struct value
*v
)
1691 next
= value_next (v
);
1696 /* Remove VAL from the chain all_values
1697 so it will not be freed automatically. */
1700 release_value (struct value
*val
)
1704 if (all_values
== val
)
1706 all_values
= val
->next
;
1712 for (v
= all_values
; v
; v
= v
->next
)
1716 v
->next
= val
->next
;
1724 /* If the value is not already released, release it.
1725 If the value is already released, increment its reference count.
1726 That is, this function ensures that the value is released from the
1727 value chain and that the caller owns a reference to it. */
1730 release_value_or_incref (struct value
*val
)
1735 release_value (val
);
1738 /* Release all values up to mark */
1740 value_release_to_mark (const struct value
*mark
)
1745 for (val
= next
= all_values
; next
; next
= next
->next
)
1747 if (next
->next
== mark
)
1749 all_values
= next
->next
;
1759 /* Return a copy of the value ARG.
1760 It contains the same contents, for same memory address,
1761 but it's a different block of storage. */
1764 value_copy (struct value
*arg
)
1766 struct type
*encl_type
= value_enclosing_type (arg
);
1769 if (value_lazy (arg
))
1770 val
= allocate_value_lazy (encl_type
);
1772 val
= allocate_value (encl_type
);
1773 val
->type
= arg
->type
;
1774 VALUE_LVAL (val
) = VALUE_LVAL (arg
);
1775 val
->location
= arg
->location
;
1776 val
->offset
= arg
->offset
;
1777 val
->bitpos
= arg
->bitpos
;
1778 val
->bitsize
= arg
->bitsize
;
1779 val
->lazy
= arg
->lazy
;
1780 val
->embedded_offset
= value_embedded_offset (arg
);
1781 val
->pointed_to_offset
= arg
->pointed_to_offset
;
1782 val
->modifiable
= arg
->modifiable
;
1783 if (!value_lazy (val
))
1785 memcpy (value_contents_all_raw (val
), value_contents_all_raw (arg
),
1786 TYPE_LENGTH (value_enclosing_type (arg
)));
1789 val
->unavailable
= VEC_copy (range_s
, arg
->unavailable
);
1790 val
->optimized_out
= VEC_copy (range_s
, arg
->optimized_out
);
1791 set_value_parent (val
, arg
->parent
);
1792 if (VALUE_LVAL (val
) == lval_computed
)
1794 const struct lval_funcs
*funcs
= val
->location
.computed
.funcs
;
1796 if (funcs
->copy_closure
)
1797 val
->location
.computed
.closure
= funcs
->copy_closure (val
);
1802 /* Return a "const" and/or "volatile" qualified version of the value V.
1803 If CNST is true, then the returned value will be qualified with
1805 if VOLTL is true, then the returned value will be qualified with
1809 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1811 struct type
*val_type
= value_type (v
);
1812 struct type
*enclosing_type
= value_enclosing_type (v
);
1813 struct value
*cv_val
= value_copy (v
);
1815 deprecated_set_value_type (cv_val
,
1816 make_cv_type (cnst
, voltl
, val_type
, NULL
));
1817 set_value_enclosing_type (cv_val
,
1818 make_cv_type (cnst
, voltl
, enclosing_type
, NULL
));
1823 /* Return a version of ARG that is non-lvalue. */
1826 value_non_lval (struct value
*arg
)
1828 if (VALUE_LVAL (arg
) != not_lval
)
1830 struct type
*enc_type
= value_enclosing_type (arg
);
1831 struct value
*val
= allocate_value (enc_type
);
1833 memcpy (value_contents_all_raw (val
), value_contents_all (arg
),
1834 TYPE_LENGTH (enc_type
));
1835 val
->type
= arg
->type
;
1836 set_value_embedded_offset (val
, value_embedded_offset (arg
));
1837 set_value_pointed_to_offset (val
, value_pointed_to_offset (arg
));
1843 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1846 value_force_lval (struct value
*v
, CORE_ADDR addr
)
1848 gdb_assert (VALUE_LVAL (v
) == not_lval
);
1850 write_memory (addr
, value_contents_raw (v
), TYPE_LENGTH (value_type (v
)));
1851 v
->lval
= lval_memory
;
1852 v
->location
.address
= addr
;
1856 set_value_component_location (struct value
*component
,
1857 const struct value
*whole
)
1861 gdb_assert (whole
->lval
!= lval_xcallable
);
1863 if (whole
->lval
== lval_internalvar
)
1864 VALUE_LVAL (component
) = lval_internalvar_component
;
1866 VALUE_LVAL (component
) = whole
->lval
;
1868 component
->location
= whole
->location
;
1869 if (whole
->lval
== lval_computed
)
1871 const struct lval_funcs
*funcs
= whole
->location
.computed
.funcs
;
1873 if (funcs
->copy_closure
)
1874 component
->location
.computed
.closure
= funcs
->copy_closure (whole
);
1877 /* If type has a dynamic resolved location property
1878 update it's value address. */
1879 type
= value_type (whole
);
1880 if (NULL
!= TYPE_DATA_LOCATION (type
)
1881 && TYPE_DATA_LOCATION_KIND (type
) == PROP_CONST
)
1882 set_value_address (component
, TYPE_DATA_LOCATION_ADDR (type
));
1885 /* Access to the value history. */
1887 /* Record a new value in the value history.
1888 Returns the absolute history index of the entry. */
1891 record_latest_value (struct value
*val
)
1895 /* We don't want this value to have anything to do with the inferior anymore.
1896 In particular, "set $1 = 50" should not affect the variable from which
1897 the value was taken, and fast watchpoints should be able to assume that
1898 a value on the value history never changes. */
1899 if (value_lazy (val
))
1900 value_fetch_lazy (val
);
1901 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1902 from. This is a bit dubious, because then *&$1 does not just return $1
1903 but the current contents of that location. c'est la vie... */
1904 val
->modifiable
= 0;
1906 /* The value may have already been released, in which case we're adding a
1907 new reference for its entry in the history. That is why we call
1908 release_value_or_incref here instead of release_value. */
1909 release_value_or_incref (val
);
1911 /* Here we treat value_history_count as origin-zero
1912 and applying to the value being stored now. */
1914 i
= value_history_count
% VALUE_HISTORY_CHUNK
;
1917 struct value_history_chunk
*newobj
= XCNEW (struct value_history_chunk
);
1919 newobj
->next
= value_history_chain
;
1920 value_history_chain
= newobj
;
1923 value_history_chain
->values
[i
] = val
;
1925 /* Now we regard value_history_count as origin-one
1926 and applying to the value just stored. */
1928 return ++value_history_count
;
1931 /* Return a copy of the value in the history with sequence number NUM. */
1934 access_value_history (int num
)
1936 struct value_history_chunk
*chunk
;
1941 absnum
+= value_history_count
;
1946 error (_("The history is empty."));
1948 error (_("There is only one value in the history."));
1950 error (_("History does not go back to $$%d."), -num
);
1952 if (absnum
> value_history_count
)
1953 error (_("History has not yet reached $%d."), absnum
);
1957 /* Now absnum is always absolute and origin zero. */
1959 chunk
= value_history_chain
;
1960 for (i
= (value_history_count
- 1) / VALUE_HISTORY_CHUNK
1961 - absnum
/ VALUE_HISTORY_CHUNK
;
1963 chunk
= chunk
->next
;
1965 return value_copy (chunk
->values
[absnum
% VALUE_HISTORY_CHUNK
]);
1969 show_values (char *num_exp
, int from_tty
)
1977 /* "show values +" should print from the stored position.
1978 "show values <exp>" should print around value number <exp>. */
1979 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1980 num
= parse_and_eval_long (num_exp
) - 5;
1984 /* "show values" means print the last 10 values. */
1985 num
= value_history_count
- 9;
1991 for (i
= num
; i
< num
+ 10 && i
<= value_history_count
; i
++)
1993 struct value_print_options opts
;
1995 val
= access_value_history (i
);
1996 printf_filtered (("$%d = "), i
);
1997 get_user_print_options (&opts
);
1998 value_print (val
, gdb_stdout
, &opts
);
1999 printf_filtered (("\n"));
2002 /* The next "show values +" should start after what we just printed. */
2005 /* Hitting just return after this command should do the same thing as
2006 "show values +". If num_exp is null, this is unnecessary, since
2007 "show values +" is not useful after "show values". */
2008 if (from_tty
&& num_exp
)
2015 enum internalvar_kind
2017 /* The internal variable is empty. */
2020 /* The value of the internal variable is provided directly as
2021 a GDB value object. */
2024 /* A fresh value is computed via a call-back routine on every
2025 access to the internal variable. */
2026 INTERNALVAR_MAKE_VALUE
,
2028 /* The internal variable holds a GDB internal convenience function. */
2029 INTERNALVAR_FUNCTION
,
2031 /* The variable holds an integer value. */
2032 INTERNALVAR_INTEGER
,
2034 /* The variable holds a GDB-provided string. */
2038 union internalvar_data
2040 /* A value object used with INTERNALVAR_VALUE. */
2041 struct value
*value
;
2043 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2046 /* The functions to call. */
2047 const struct internalvar_funcs
*functions
;
2049 /* The function's user-data. */
2053 /* The internal function used with INTERNALVAR_FUNCTION. */
2056 struct internal_function
*function
;
2057 /* True if this is the canonical name for the function. */
2061 /* An integer value used with INTERNALVAR_INTEGER. */
2064 /* If type is non-NULL, it will be used as the type to generate
2065 a value for this internal variable. If type is NULL, a default
2066 integer type for the architecture is used. */
2071 /* A string value used with INTERNALVAR_STRING. */
2075 /* Internal variables. These are variables within the debugger
2076 that hold values assigned by debugger commands.
2077 The user refers to them with a '$' prefix
2078 that does not appear in the variable names stored internally. */
2082 struct internalvar
*next
;
2085 /* We support various different kinds of content of an internal variable.
2086 enum internalvar_kind specifies the kind, and union internalvar_data
2087 provides the data associated with this particular kind. */
2089 enum internalvar_kind kind
;
2091 union internalvar_data u
;
2094 static struct internalvar
*internalvars
;
2096 /* If the variable does not already exist create it and give it the
2097 value given. If no value is given then the default is zero. */
2099 init_if_undefined_command (char* args
, int from_tty
)
2101 struct internalvar
* intvar
;
2103 /* Parse the expression - this is taken from set_command(). */
2104 expression_up expr
= parse_expression (args
);
2106 /* Validate the expression.
2107 Was the expression an assignment?
2108 Or even an expression at all? */
2109 if (expr
->nelts
== 0 || expr
->elts
[0].opcode
!= BINOP_ASSIGN
)
2110 error (_("Init-if-undefined requires an assignment expression."));
2112 /* Extract the variable from the parsed expression.
2113 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2114 if (expr
->elts
[1].opcode
!= OP_INTERNALVAR
)
2115 error (_("The first parameter to init-if-undefined "
2116 "should be a GDB variable."));
2117 intvar
= expr
->elts
[2].internalvar
;
2119 /* Only evaluate the expression if the lvalue is void.
2120 This may still fail if the expresssion is invalid. */
2121 if (intvar
->kind
== INTERNALVAR_VOID
)
2122 evaluate_expression (expr
.get ());
2126 /* Look up an internal variable with name NAME. NAME should not
2127 normally include a dollar sign.
2129 If the specified internal variable does not exist,
2130 the return value is NULL. */
2132 struct internalvar
*
2133 lookup_only_internalvar (const char *name
)
2135 struct internalvar
*var
;
2137 for (var
= internalvars
; var
; var
= var
->next
)
2138 if (strcmp (var
->name
, name
) == 0)
2144 /* Complete NAME by comparing it to the names of internal
2148 complete_internalvar (completion_tracker
&tracker
, const char *name
)
2150 struct internalvar
*var
;
2153 len
= strlen (name
);
2155 for (var
= internalvars
; var
; var
= var
->next
)
2156 if (strncmp (var
->name
, name
, len
) == 0)
2158 gdb::unique_xmalloc_ptr
<char> copy (xstrdup (var
->name
));
2160 tracker
.add_completion (std::move (copy
));
2164 /* Create an internal variable with name NAME and with a void value.
2165 NAME should not normally include a dollar sign. */
2167 struct internalvar
*
2168 create_internalvar (const char *name
)
2170 struct internalvar
*var
= XNEW (struct internalvar
);
2172 var
->name
= concat (name
, (char *)NULL
);
2173 var
->kind
= INTERNALVAR_VOID
;
2174 var
->next
= internalvars
;
2179 /* Create an internal variable with name NAME and register FUN as the
2180 function that value_of_internalvar uses to create a value whenever
2181 this variable is referenced. NAME should not normally include a
2182 dollar sign. DATA is passed uninterpreted to FUN when it is
2183 called. CLEANUP, if not NULL, is called when the internal variable
2184 is destroyed. It is passed DATA as its only argument. */
2186 struct internalvar
*
2187 create_internalvar_type_lazy (const char *name
,
2188 const struct internalvar_funcs
*funcs
,
2191 struct internalvar
*var
= create_internalvar (name
);
2193 var
->kind
= INTERNALVAR_MAKE_VALUE
;
2194 var
->u
.make_value
.functions
= funcs
;
2195 var
->u
.make_value
.data
= data
;
2199 /* See documentation in value.h. */
2202 compile_internalvar_to_ax (struct internalvar
*var
,
2203 struct agent_expr
*expr
,
2204 struct axs_value
*value
)
2206 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2207 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2210 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2211 var
->u
.make_value
.data
);
2215 /* Look up an internal variable with name NAME. NAME should not
2216 normally include a dollar sign.
2218 If the specified internal variable does not exist,
2219 one is created, with a void value. */
2221 struct internalvar
*
2222 lookup_internalvar (const char *name
)
2224 struct internalvar
*var
;
2226 var
= lookup_only_internalvar (name
);
2230 return create_internalvar (name
);
2233 /* Return current value of internal variable VAR. For variables that
2234 are not inherently typed, use a value type appropriate for GDBARCH. */
2237 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2240 struct trace_state_variable
*tsv
;
2242 /* If there is a trace state variable of the same name, assume that
2243 is what we really want to see. */
2244 tsv
= find_trace_state_variable (var
->name
);
2247 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2249 if (tsv
->value_known
)
2250 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2253 val
= allocate_value (builtin_type (gdbarch
)->builtin_void
);
2259 case INTERNALVAR_VOID
:
2260 val
= allocate_value (builtin_type (gdbarch
)->builtin_void
);
2263 case INTERNALVAR_FUNCTION
:
2264 val
= allocate_value (builtin_type (gdbarch
)->internal_fn
);
2267 case INTERNALVAR_INTEGER
:
2268 if (!var
->u
.integer
.type
)
2269 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2270 var
->u
.integer
.val
);
2272 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2275 case INTERNALVAR_STRING
:
2276 val
= value_cstring (var
->u
.string
, strlen (var
->u
.string
),
2277 builtin_type (gdbarch
)->builtin_char
);
2280 case INTERNALVAR_VALUE
:
2281 val
= value_copy (var
->u
.value
);
2282 if (value_lazy (val
))
2283 value_fetch_lazy (val
);
2286 case INTERNALVAR_MAKE_VALUE
:
2287 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2288 var
->u
.make_value
.data
);
2292 internal_error (__FILE__
, __LINE__
, _("bad kind"));
2295 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2296 on this value go back to affect the original internal variable.
2298 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2299 no underlying modifyable state in the internal variable.
2301 Likewise, if the variable's value is a computed lvalue, we want
2302 references to it to produce another computed lvalue, where
2303 references and assignments actually operate through the
2304 computed value's functions.
2306 This means that internal variables with computed values
2307 behave a little differently from other internal variables:
2308 assignments to them don't just replace the previous value
2309 altogether. At the moment, this seems like the behavior we
2312 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2313 && val
->lval
!= lval_computed
)
2315 VALUE_LVAL (val
) = lval_internalvar
;
2316 VALUE_INTERNALVAR (val
) = var
;
2323 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2325 if (var
->kind
== INTERNALVAR_INTEGER
)
2327 *result
= var
->u
.integer
.val
;
2331 if (var
->kind
== INTERNALVAR_VALUE
)
2333 struct type
*type
= check_typedef (value_type (var
->u
.value
));
2335 if (TYPE_CODE (type
) == TYPE_CODE_INT
)
2337 *result
= value_as_long (var
->u
.value
);
2346 get_internalvar_function (struct internalvar
*var
,
2347 struct internal_function
**result
)
2351 case INTERNALVAR_FUNCTION
:
2352 *result
= var
->u
.fn
.function
;
2361 set_internalvar_component (struct internalvar
*var
,
2362 LONGEST offset
, LONGEST bitpos
,
2363 LONGEST bitsize
, struct value
*newval
)
2366 struct gdbarch
*arch
;
2371 case INTERNALVAR_VALUE
:
2372 addr
= value_contents_writeable (var
->u
.value
);
2373 arch
= get_value_arch (var
->u
.value
);
2374 unit_size
= gdbarch_addressable_memory_unit_size (arch
);
2377 modify_field (value_type (var
->u
.value
), addr
+ offset
,
2378 value_as_long (newval
), bitpos
, bitsize
);
2380 memcpy (addr
+ offset
* unit_size
, value_contents (newval
),
2381 TYPE_LENGTH (value_type (newval
)));
2385 /* We can never get a component of any other kind. */
2386 internal_error (__FILE__
, __LINE__
, _("set_internalvar_component"));
2391 set_internalvar (struct internalvar
*var
, struct value
*val
)
2393 enum internalvar_kind new_kind
;
2394 union internalvar_data new_data
= { 0 };
2396 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2397 error (_("Cannot overwrite convenience function %s"), var
->name
);
2399 /* Prepare new contents. */
2400 switch (TYPE_CODE (check_typedef (value_type (val
))))
2402 case TYPE_CODE_VOID
:
2403 new_kind
= INTERNALVAR_VOID
;
2406 case TYPE_CODE_INTERNAL_FUNCTION
:
2407 gdb_assert (VALUE_LVAL (val
) == lval_internalvar
);
2408 new_kind
= INTERNALVAR_FUNCTION
;
2409 get_internalvar_function (VALUE_INTERNALVAR (val
),
2410 &new_data
.fn
.function
);
2411 /* Copies created here are never canonical. */
2415 new_kind
= INTERNALVAR_VALUE
;
2416 new_data
.value
= value_copy (val
);
2417 new_data
.value
->modifiable
= 1;
2419 /* Force the value to be fetched from the target now, to avoid problems
2420 later when this internalvar is referenced and the target is gone or
2422 if (value_lazy (new_data
.value
))
2423 value_fetch_lazy (new_data
.value
);
2425 /* Release the value from the value chain to prevent it from being
2426 deleted by free_all_values. From here on this function should not
2427 call error () until new_data is installed into the var->u to avoid
2429 release_value (new_data
.value
);
2431 /* Internal variables which are created from values with a dynamic
2432 location don't need the location property of the origin anymore.
2433 The resolved dynamic location is used prior then any other address
2434 when accessing the value.
2435 If we keep it, we would still refer to the origin value.
2436 Remove the location property in case it exist. */
2437 remove_dyn_prop (DYN_PROP_DATA_LOCATION
, value_type (new_data
.value
));
2442 /* Clean up old contents. */
2443 clear_internalvar (var
);
2446 var
->kind
= new_kind
;
2448 /* End code which must not call error(). */
2452 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2454 /* Clean up old contents. */
2455 clear_internalvar (var
);
2457 var
->kind
= INTERNALVAR_INTEGER
;
2458 var
->u
.integer
.type
= NULL
;
2459 var
->u
.integer
.val
= l
;
2463 set_internalvar_string (struct internalvar
*var
, const char *string
)
2465 /* Clean up old contents. */
2466 clear_internalvar (var
);
2468 var
->kind
= INTERNALVAR_STRING
;
2469 var
->u
.string
= xstrdup (string
);
2473 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2475 /* Clean up old contents. */
2476 clear_internalvar (var
);
2478 var
->kind
= INTERNALVAR_FUNCTION
;
2479 var
->u
.fn
.function
= f
;
2480 var
->u
.fn
.canonical
= 1;
2481 /* Variables installed here are always the canonical version. */
2485 clear_internalvar (struct internalvar
*var
)
2487 /* Clean up old contents. */
2490 case INTERNALVAR_VALUE
:
2491 value_free (var
->u
.value
);
2494 case INTERNALVAR_STRING
:
2495 xfree (var
->u
.string
);
2498 case INTERNALVAR_MAKE_VALUE
:
2499 if (var
->u
.make_value
.functions
->destroy
!= NULL
)
2500 var
->u
.make_value
.functions
->destroy (var
->u
.make_value
.data
);
2507 /* Reset to void kind. */
2508 var
->kind
= INTERNALVAR_VOID
;
2512 internalvar_name (const struct internalvar
*var
)
2517 static struct internal_function
*
2518 create_internal_function (const char *name
,
2519 internal_function_fn handler
, void *cookie
)
2521 struct internal_function
*ifn
= XNEW (struct internal_function
);
2523 ifn
->name
= xstrdup (name
);
2524 ifn
->handler
= handler
;
2525 ifn
->cookie
= cookie
;
2530 value_internal_function_name (struct value
*val
)
2532 struct internal_function
*ifn
;
2535 gdb_assert (VALUE_LVAL (val
) == lval_internalvar
);
2536 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2537 gdb_assert (result
);
2543 call_internal_function (struct gdbarch
*gdbarch
,
2544 const struct language_defn
*language
,
2545 struct value
*func
, int argc
, struct value
**argv
)
2547 struct internal_function
*ifn
;
2550 gdb_assert (VALUE_LVAL (func
) == lval_internalvar
);
2551 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2552 gdb_assert (result
);
2554 return (*ifn
->handler
) (gdbarch
, language
, ifn
->cookie
, argc
, argv
);
2557 /* The 'function' command. This does nothing -- it is just a
2558 placeholder to let "help function NAME" work. This is also used as
2559 the implementation of the sub-command that is created when
2560 registering an internal function. */
2562 function_command (char *command
, int from_tty
)
2567 /* Clean up if an internal function's command is destroyed. */
2569 function_destroyer (struct cmd_list_element
*self
, void *ignore
)
2571 xfree ((char *) self
->name
);
2572 xfree ((char *) self
->doc
);
2575 /* Add a new internal function. NAME is the name of the function; DOC
2576 is a documentation string describing the function. HANDLER is
2577 called when the function is invoked. COOKIE is an arbitrary
2578 pointer which is passed to HANDLER and is intended for "user
2581 add_internal_function (const char *name
, const char *doc
,
2582 internal_function_fn handler
, void *cookie
)
2584 struct cmd_list_element
*cmd
;
2585 struct internal_function
*ifn
;
2586 struct internalvar
*var
= lookup_internalvar (name
);
2588 ifn
= create_internal_function (name
, handler
, cookie
);
2589 set_internalvar_function (var
, ifn
);
2591 cmd
= add_cmd (xstrdup (name
), no_class
, function_command
, (char *) doc
,
2593 cmd
->destroyer
= function_destroyer
;
2596 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2597 prevent cycles / duplicates. */
2600 preserve_one_value (struct value
*value
, struct objfile
*objfile
,
2601 htab_t copied_types
)
2603 if (TYPE_OBJFILE (value
->type
) == objfile
)
2604 value
->type
= copy_type_recursive (objfile
, value
->type
, copied_types
);
2606 if (TYPE_OBJFILE (value
->enclosing_type
) == objfile
)
2607 value
->enclosing_type
= copy_type_recursive (objfile
,
2608 value
->enclosing_type
,
2612 /* Likewise for internal variable VAR. */
2615 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2616 htab_t copied_types
)
2620 case INTERNALVAR_INTEGER
:
2621 if (var
->u
.integer
.type
&& TYPE_OBJFILE (var
->u
.integer
.type
) == objfile
)
2623 = copy_type_recursive (objfile
, var
->u
.integer
.type
, copied_types
);
2626 case INTERNALVAR_VALUE
:
2627 preserve_one_value (var
->u
.value
, objfile
, copied_types
);
2632 /* Update the internal variables and value history when OBJFILE is
2633 discarded; we must copy the types out of the objfile. New global types
2634 will be created for every convenience variable which currently points to
2635 this objfile's types, and the convenience variables will be adjusted to
2636 use the new global types. */
2639 preserve_values (struct objfile
*objfile
)
2641 htab_t copied_types
;
2642 struct value_history_chunk
*cur
;
2643 struct internalvar
*var
;
2646 /* Create the hash table. We allocate on the objfile's obstack, since
2647 it is soon to be deleted. */
2648 copied_types
= create_copied_types_hash (objfile
);
2650 for (cur
= value_history_chain
; cur
; cur
= cur
->next
)
2651 for (i
= 0; i
< VALUE_HISTORY_CHUNK
; i
++)
2653 preserve_one_value (cur
->values
[i
], objfile
, copied_types
);
2655 for (var
= internalvars
; var
; var
= var
->next
)
2656 preserve_one_internalvar (var
, objfile
, copied_types
);
2658 preserve_ext_lang_values (objfile
, copied_types
);
2660 htab_delete (copied_types
);
2664 show_convenience (char *ignore
, int from_tty
)
2666 struct gdbarch
*gdbarch
= get_current_arch ();
2667 struct internalvar
*var
;
2669 struct value_print_options opts
;
2671 get_user_print_options (&opts
);
2672 for (var
= internalvars
; var
; var
= var
->next
)
2679 printf_filtered (("$%s = "), var
->name
);
2685 val
= value_of_internalvar (gdbarch
, var
);
2686 value_print (val
, gdb_stdout
, &opts
);
2688 CATCH (ex
, RETURN_MASK_ERROR
)
2690 fprintf_filtered (gdb_stdout
, _("<error: %s>"), ex
.message
);
2694 printf_filtered (("\n"));
2698 /* This text does not mention convenience functions on purpose.
2699 The user can't create them except via Python, and if Python support
2700 is installed this message will never be printed ($_streq will
2702 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2703 "Convenience variables have "
2704 "names starting with \"$\";\n"
2705 "use \"set\" as in \"set "
2706 "$foo = 5\" to define them.\n"));
2710 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2713 value_of_xmethod (struct xmethod_worker
*worker
)
2715 if (worker
->value
== NULL
)
2719 v
= allocate_value (builtin_type (target_gdbarch ())->xmethod
);
2720 v
->lval
= lval_xcallable
;
2721 v
->location
.xm_worker
= worker
;
2726 return worker
->value
;
2729 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2732 result_type_of_xmethod (struct value
*method
, int argc
, struct value
**argv
)
2734 gdb_assert (TYPE_CODE (value_type (method
)) == TYPE_CODE_XMETHOD
2735 && method
->lval
== lval_xcallable
&& argc
> 0);
2737 return get_xmethod_result_type (method
->location
.xm_worker
,
2738 argv
[0], argv
+ 1, argc
- 1);
2741 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2744 call_xmethod (struct value
*method
, int argc
, struct value
**argv
)
2746 gdb_assert (TYPE_CODE (value_type (method
)) == TYPE_CODE_XMETHOD
2747 && method
->lval
== lval_xcallable
&& argc
> 0);
2749 return invoke_xmethod (method
->location
.xm_worker
,
2750 argv
[0], argv
+ 1, argc
- 1);
2753 /* Extract a value as a C number (either long or double).
2754 Knows how to convert fixed values to double, or
2755 floating values to long.
2756 Does not deallocate the value. */
2759 value_as_long (struct value
*val
)
2761 /* This coerces arrays and functions, which is necessary (e.g.
2762 in disassemble_command). It also dereferences references, which
2763 I suspect is the most logical thing to do. */
2764 val
= coerce_array (val
);
2765 return unpack_long (value_type (val
), value_contents (val
));
2769 value_as_double (struct value
*val
)
2774 foo
= unpack_double (value_type (val
), value_contents (val
), &inv
);
2776 error (_("Invalid floating value found in program."));
2780 /* Extract a value as a C pointer. Does not deallocate the value.
2781 Note that val's type may not actually be a pointer; value_as_long
2782 handles all the cases. */
2784 value_as_address (struct value
*val
)
2786 struct gdbarch
*gdbarch
= get_type_arch (value_type (val
));
2788 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2789 whether we want this to be true eventually. */
2791 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2792 non-address (e.g. argument to "signal", "info break", etc.), or
2793 for pointers to char, in which the low bits *are* significant. */
2794 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2797 /* There are several targets (IA-64, PowerPC, and others) which
2798 don't represent pointers to functions as simply the address of
2799 the function's entry point. For example, on the IA-64, a
2800 function pointer points to a two-word descriptor, generated by
2801 the linker, which contains the function's entry point, and the
2802 value the IA-64 "global pointer" register should have --- to
2803 support position-independent code. The linker generates
2804 descriptors only for those functions whose addresses are taken.
2806 On such targets, it's difficult for GDB to convert an arbitrary
2807 function address into a function pointer; it has to either find
2808 an existing descriptor for that function, or call malloc and
2809 build its own. On some targets, it is impossible for GDB to
2810 build a descriptor at all: the descriptor must contain a jump
2811 instruction; data memory cannot be executed; and code memory
2814 Upon entry to this function, if VAL is a value of type `function'
2815 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2816 value_address (val) is the address of the function. This is what
2817 you'll get if you evaluate an expression like `main'. The call
2818 to COERCE_ARRAY below actually does all the usual unary
2819 conversions, which includes converting values of type `function'
2820 to `pointer to function'. This is the challenging conversion
2821 discussed above. Then, `unpack_long' will convert that pointer
2822 back into an address.
2824 So, suppose the user types `disassemble foo' on an architecture
2825 with a strange function pointer representation, on which GDB
2826 cannot build its own descriptors, and suppose further that `foo'
2827 has no linker-built descriptor. The address->pointer conversion
2828 will signal an error and prevent the command from running, even
2829 though the next step would have been to convert the pointer
2830 directly back into the same address.
2832 The following shortcut avoids this whole mess. If VAL is a
2833 function, just return its address directly. */
2834 if (TYPE_CODE (value_type (val
)) == TYPE_CODE_FUNC
2835 || TYPE_CODE (value_type (val
)) == TYPE_CODE_METHOD
)
2836 return value_address (val
);
2838 val
= coerce_array (val
);
2840 /* Some architectures (e.g. Harvard), map instruction and data
2841 addresses onto a single large unified address space. For
2842 instance: An architecture may consider a large integer in the
2843 range 0x10000000 .. 0x1000ffff to already represent a data
2844 addresses (hence not need a pointer to address conversion) while
2845 a small integer would still need to be converted integer to
2846 pointer to address. Just assume such architectures handle all
2847 integer conversions in a single function. */
2851 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2852 must admonish GDB hackers to make sure its behavior matches the
2853 compiler's, whenever possible.
2855 In general, I think GDB should evaluate expressions the same way
2856 the compiler does. When the user copies an expression out of
2857 their source code and hands it to a `print' command, they should
2858 get the same value the compiler would have computed. Any
2859 deviation from this rule can cause major confusion and annoyance,
2860 and needs to be justified carefully. In other words, GDB doesn't
2861 really have the freedom to do these conversions in clever and
2864 AndrewC pointed out that users aren't complaining about how GDB
2865 casts integers to pointers; they are complaining that they can't
2866 take an address from a disassembly listing and give it to `x/i'.
2867 This is certainly important.
2869 Adding an architecture method like integer_to_address() certainly
2870 makes it possible for GDB to "get it right" in all circumstances
2871 --- the target has complete control over how things get done, so
2872 people can Do The Right Thing for their target without breaking
2873 anyone else. The standard doesn't specify how integers get
2874 converted to pointers; usually, the ABI doesn't either, but
2875 ABI-specific code is a more reasonable place to handle it. */
2877 if (TYPE_CODE (value_type (val
)) != TYPE_CODE_PTR
2878 && !TYPE_IS_REFERENCE (value_type (val
))
2879 && gdbarch_integer_to_address_p (gdbarch
))
2880 return gdbarch_integer_to_address (gdbarch
, value_type (val
),
2881 value_contents (val
));
2883 return unpack_long (value_type (val
), value_contents (val
));
2887 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2888 as a long, or as a double, assuming the raw data is described
2889 by type TYPE. Knows how to convert different sizes of values
2890 and can convert between fixed and floating point. We don't assume
2891 any alignment for the raw data. Return value is in host byte order.
2893 If you want functions and arrays to be coerced to pointers, and
2894 references to be dereferenced, call value_as_long() instead.
2896 C++: It is assumed that the front-end has taken care of
2897 all matters concerning pointers to members. A pointer
2898 to member which reaches here is considered to be equivalent
2899 to an INT (or some size). After all, it is only an offset. */
2902 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2904 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
2905 enum type_code code
= TYPE_CODE (type
);
2906 int len
= TYPE_LENGTH (type
);
2907 int nosign
= TYPE_UNSIGNED (type
);
2911 case TYPE_CODE_TYPEDEF
:
2912 return unpack_long (check_typedef (type
), valaddr
);
2913 case TYPE_CODE_ENUM
:
2914 case TYPE_CODE_FLAGS
:
2915 case TYPE_CODE_BOOL
:
2917 case TYPE_CODE_CHAR
:
2918 case TYPE_CODE_RANGE
:
2919 case TYPE_CODE_MEMBERPTR
:
2921 return extract_unsigned_integer (valaddr
, len
, byte_order
);
2923 return extract_signed_integer (valaddr
, len
, byte_order
);
2926 return (LONGEST
) extract_typed_floating (valaddr
, type
);
2928 case TYPE_CODE_DECFLOAT
:
2929 /* libdecnumber has a function to convert from decimal to integer, but
2930 it doesn't work when the decimal number has a fractional part. */
2931 return (LONGEST
) decimal_to_doublest (valaddr
, len
, byte_order
);
2935 case TYPE_CODE_RVALUE_REF
:
2936 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2937 whether we want this to be true eventually. */
2938 return extract_typed_address (valaddr
, type
);
2941 error (_("Value can't be converted to integer."));
2943 return 0; /* Placate lint. */
2946 /* Return a double value from the specified type and address.
2947 INVP points to an int which is set to 0 for valid value,
2948 1 for invalid value (bad float format). In either case,
2949 the returned double is OK to use. Argument is in target
2950 format, result is in host format. */
2953 unpack_double (struct type
*type
, const gdb_byte
*valaddr
, int *invp
)
2955 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
2956 enum type_code code
;
2960 *invp
= 0; /* Assume valid. */
2961 type
= check_typedef (type
);
2962 code
= TYPE_CODE (type
);
2963 len
= TYPE_LENGTH (type
);
2964 nosign
= TYPE_UNSIGNED (type
);
2965 if (code
== TYPE_CODE_FLT
)
2967 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2968 floating-point value was valid (using the macro
2969 INVALID_FLOAT). That test/macro have been removed.
2971 It turns out that only the VAX defined this macro and then
2972 only in a non-portable way. Fixing the portability problem
2973 wouldn't help since the VAX floating-point code is also badly
2974 bit-rotten. The target needs to add definitions for the
2975 methods gdbarch_float_format and gdbarch_double_format - these
2976 exactly describe the target floating-point format. The
2977 problem here is that the corresponding floatformat_vax_f and
2978 floatformat_vax_d values these methods should be set to are
2979 also not defined either. Oops!
2981 Hopefully someone will add both the missing floatformat
2982 definitions and the new cases for floatformat_is_valid (). */
2984 if (!floatformat_is_valid (floatformat_from_type (type
), valaddr
))
2990 return extract_typed_floating (valaddr
, type
);
2992 else if (code
== TYPE_CODE_DECFLOAT
)
2993 return decimal_to_doublest (valaddr
, len
, byte_order
);
2996 /* Unsigned -- be sure we compensate for signed LONGEST. */
2997 return (ULONGEST
) unpack_long (type
, valaddr
);
3001 /* Signed -- we are OK with unpack_long. */
3002 return unpack_long (type
, valaddr
);
3006 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3007 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3008 We don't assume any alignment for the raw data. Return value is in
3011 If you want functions and arrays to be coerced to pointers, and
3012 references to be dereferenced, call value_as_address() instead.
3014 C++: It is assumed that the front-end has taken care of
3015 all matters concerning pointers to members. A pointer
3016 to member which reaches here is considered to be equivalent
3017 to an INT (or some size). After all, it is only an offset. */
3020 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
3022 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3023 whether we want this to be true eventually. */
3024 return unpack_long (type
, valaddr
);
3028 /* Get the value of the FIELDNO'th field (which must be static) of
3032 value_static_field (struct type
*type
, int fieldno
)
3034 struct value
*retval
;
3036 switch (TYPE_FIELD_LOC_KIND (type
, fieldno
))
3038 case FIELD_LOC_KIND_PHYSADDR
:
3039 retval
= value_at_lazy (TYPE_FIELD_TYPE (type
, fieldno
),
3040 TYPE_FIELD_STATIC_PHYSADDR (type
, fieldno
));
3042 case FIELD_LOC_KIND_PHYSNAME
:
3044 const char *phys_name
= TYPE_FIELD_STATIC_PHYSNAME (type
, fieldno
);
3045 /* TYPE_FIELD_NAME (type, fieldno); */
3046 struct block_symbol sym
= lookup_symbol (phys_name
, 0, VAR_DOMAIN
, 0);
3048 if (sym
.symbol
== NULL
)
3050 /* With some compilers, e.g. HP aCC, static data members are
3051 reported as non-debuggable symbols. */
3052 struct bound_minimal_symbol msym
3053 = lookup_minimal_symbol (phys_name
, NULL
, NULL
);
3056 return allocate_optimized_out_value (type
);
3059 retval
= value_at_lazy (TYPE_FIELD_TYPE (type
, fieldno
),
3060 BMSYMBOL_VALUE_ADDRESS (msym
));
3064 retval
= value_of_variable (sym
.symbol
, sym
.block
);
3068 gdb_assert_not_reached ("unexpected field location kind");
3074 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3075 You have to be careful here, since the size of the data area for the value
3076 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3077 than the old enclosing type, you have to allocate more space for the
3081 set_value_enclosing_type (struct value
*val
, struct type
*new_encl_type
)
3083 if (TYPE_LENGTH (new_encl_type
) > TYPE_LENGTH (value_enclosing_type (val
)))
3085 check_type_length_before_alloc (new_encl_type
);
3087 = (gdb_byte
*) xrealloc (val
->contents
, TYPE_LENGTH (new_encl_type
));
3090 val
->enclosing_type
= new_encl_type
;
3093 /* Given a value ARG1 (offset by OFFSET bytes)
3094 of a struct or union type ARG_TYPE,
3095 extract and return the value of one of its (non-static) fields.
3096 FIELDNO says which field. */
3099 value_primitive_field (struct value
*arg1
, LONGEST offset
,
3100 int fieldno
, struct type
*arg_type
)
3104 struct gdbarch
*arch
= get_value_arch (arg1
);
3105 int unit_size
= gdbarch_addressable_memory_unit_size (arch
);
3107 arg_type
= check_typedef (arg_type
);
3108 type
= TYPE_FIELD_TYPE (arg_type
, fieldno
);
3110 /* Call check_typedef on our type to make sure that, if TYPE
3111 is a TYPE_CODE_TYPEDEF, its length is set to the length
3112 of the target type instead of zero. However, we do not
3113 replace the typedef type by the target type, because we want
3114 to keep the typedef in order to be able to print the type
3115 description correctly. */
3116 check_typedef (type
);
3118 if (TYPE_FIELD_BITSIZE (arg_type
, fieldno
))
3120 /* Handle packed fields.
3122 Create a new value for the bitfield, with bitpos and bitsize
3123 set. If possible, arrange offset and bitpos so that we can
3124 do a single aligned read of the size of the containing type.
3125 Otherwise, adjust offset to the byte containing the first
3126 bit. Assume that the address, offset, and embedded offset
3127 are sufficiently aligned. */
3129 LONGEST bitpos
= TYPE_FIELD_BITPOS (arg_type
, fieldno
);
3130 LONGEST container_bitsize
= TYPE_LENGTH (type
) * 8;
3132 v
= allocate_value_lazy (type
);
3133 v
->bitsize
= TYPE_FIELD_BITSIZE (arg_type
, fieldno
);
3134 if ((bitpos
% container_bitsize
) + v
->bitsize
<= container_bitsize
3135 && TYPE_LENGTH (type
) <= (int) sizeof (LONGEST
))
3136 v
->bitpos
= bitpos
% container_bitsize
;
3138 v
->bitpos
= bitpos
% 8;
3139 v
->offset
= (value_embedded_offset (arg1
)
3141 + (bitpos
- v
->bitpos
) / 8);
3142 set_value_parent (v
, arg1
);
3143 if (!value_lazy (arg1
))
3144 value_fetch_lazy (v
);
3146 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
3148 /* This field is actually a base subobject, so preserve the
3149 entire object's contents for later references to virtual
3153 /* Lazy register values with offsets are not supported. */
3154 if (VALUE_LVAL (arg1
) == lval_register
&& value_lazy (arg1
))
3155 value_fetch_lazy (arg1
);
3157 /* We special case virtual inheritance here because this
3158 requires access to the contents, which we would rather avoid
3159 for references to ordinary fields of unavailable values. */
3160 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3161 boffset
= baseclass_offset (arg_type
, fieldno
,
3162 value_contents (arg1
),
3163 value_embedded_offset (arg1
),
3164 value_address (arg1
),
3167 boffset
= TYPE_FIELD_BITPOS (arg_type
, fieldno
) / 8;
3169 if (value_lazy (arg1
))
3170 v
= allocate_value_lazy (value_enclosing_type (arg1
));
3173 v
= allocate_value (value_enclosing_type (arg1
));
3174 value_contents_copy_raw (v
, 0, arg1
, 0,
3175 TYPE_LENGTH (value_enclosing_type (arg1
)));
3178 v
->offset
= value_offset (arg1
);
3179 v
->embedded_offset
= offset
+ value_embedded_offset (arg1
) + boffset
;
3181 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3183 /* Field is a dynamic data member. */
3185 gdb_assert (0 == offset
);
3186 /* We expect an already resolved data location. */
3187 gdb_assert (PROP_CONST
== TYPE_DATA_LOCATION_KIND (type
));
3188 /* For dynamic data types defer memory allocation
3189 until we actual access the value. */
3190 v
= allocate_value_lazy (type
);
3194 /* Plain old data member */
3195 offset
+= (TYPE_FIELD_BITPOS (arg_type
, fieldno
)
3196 / (HOST_CHAR_BIT
* unit_size
));
3198 /* Lazy register values with offsets are not supported. */
3199 if (VALUE_LVAL (arg1
) == lval_register
&& value_lazy (arg1
))
3200 value_fetch_lazy (arg1
);
3202 if (value_lazy (arg1
))
3203 v
= allocate_value_lazy (type
);
3206 v
= allocate_value (type
);
3207 value_contents_copy_raw (v
, value_embedded_offset (v
),
3208 arg1
, value_embedded_offset (arg1
) + offset
,
3209 type_length_units (type
));
3211 v
->offset
= (value_offset (arg1
) + offset
3212 + value_embedded_offset (arg1
));
3214 set_value_component_location (v
, arg1
);
3218 /* Given a value ARG1 of a struct or union type,
3219 extract and return the value of one of its (non-static) fields.
3220 FIELDNO says which field. */
3223 value_field (struct value
*arg1
, int fieldno
)
3225 return value_primitive_field (arg1
, 0, fieldno
, value_type (arg1
));
3228 /* Return a non-virtual function as a value.
3229 F is the list of member functions which contains the desired method.
3230 J is an index into F which provides the desired method.
3232 We only use the symbol for its address, so be happy with either a
3233 full symbol or a minimal symbol. */
3236 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3237 int j
, struct type
*type
,
3241 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3242 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3244 struct bound_minimal_symbol msym
;
3246 sym
= lookup_symbol (physname
, 0, VAR_DOMAIN
, 0).symbol
;
3249 memset (&msym
, 0, sizeof (msym
));
3253 gdb_assert (sym
== NULL
);
3254 msym
= lookup_bound_minimal_symbol (physname
);
3255 if (msym
.minsym
== NULL
)
3259 v
= allocate_value (ftype
);
3260 VALUE_LVAL (v
) = lval_memory
;
3263 set_value_address (v
, BLOCK_START (SYMBOL_BLOCK_VALUE (sym
)));
3267 /* The minimal symbol might point to a function descriptor;
3268 resolve it to the actual code address instead. */
3269 struct objfile
*objfile
= msym
.objfile
;
3270 struct gdbarch
*gdbarch
= get_objfile_arch (objfile
);
3272 set_value_address (v
,
3273 gdbarch_convert_from_func_ptr_addr
3274 (gdbarch
, BMSYMBOL_VALUE_ADDRESS (msym
), ¤t_target
));
3279 if (type
!= value_type (*arg1p
))
3280 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3281 value_addr (*arg1p
)));
3283 /* Move the `this' pointer according to the offset.
3284 VALUE_OFFSET (*arg1p) += offset; */
3292 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3293 VALADDR, and store the result in *RESULT.
3294 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3296 Extracting bits depends on endianness of the machine. Compute the
3297 number of least significant bits to discard. For big endian machines,
3298 we compute the total number of bits in the anonymous object, subtract
3299 off the bit count from the MSB of the object to the MSB of the
3300 bitfield, then the size of the bitfield, which leaves the LSB discard
3301 count. For little endian machines, the discard count is simply the
3302 number of bits from the LSB of the anonymous object to the LSB of the
3305 If the field is signed, we also do sign extension. */
3308 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3309 LONGEST bitpos
, LONGEST bitsize
)
3311 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (field_type
));
3316 LONGEST read_offset
;
3318 /* Read the minimum number of bytes required; there may not be
3319 enough bytes to read an entire ULONGEST. */
3320 field_type
= check_typedef (field_type
);
3322 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3324 bytes_read
= TYPE_LENGTH (field_type
);
3326 read_offset
= bitpos
/ 8;
3328 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3329 bytes_read
, byte_order
);
3331 /* Extract bits. See comment above. */
3333 if (gdbarch_bits_big_endian (get_type_arch (field_type
)))
3334 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3336 lsbcount
= (bitpos
% 8);
3339 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3340 If the field is signed, and is negative, then sign extend. */
3342 if ((bitsize
> 0) && (bitsize
< 8 * (int) sizeof (val
)))
3344 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3346 if (!TYPE_UNSIGNED (field_type
))
3348 if (val
& (valmask
^ (valmask
>> 1)))
3358 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3359 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3360 ORIGINAL_VALUE, which must not be NULL. See
3361 unpack_value_bits_as_long for more details. */
3364 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3365 LONGEST embedded_offset
, int fieldno
,
3366 const struct value
*val
, LONGEST
*result
)
3368 int bitpos
= TYPE_FIELD_BITPOS (type
, fieldno
);
3369 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3370 struct type
*field_type
= TYPE_FIELD_TYPE (type
, fieldno
);
3373 gdb_assert (val
!= NULL
);
3375 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3376 if (value_bits_any_optimized_out (val
, bit_offset
, bitsize
)
3377 || !value_bits_available (val
, bit_offset
, bitsize
))
3380 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3385 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3386 object at VALADDR. See unpack_bits_as_long for more details. */
3389 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3391 int bitpos
= TYPE_FIELD_BITPOS (type
, fieldno
);
3392 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3393 struct type
*field_type
= TYPE_FIELD_TYPE (type
, fieldno
);
3395 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3398 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3399 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3400 the contents in DEST_VAL, zero or sign extending if the type of
3401 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3402 VAL. If the VAL's contents required to extract the bitfield from
3403 are unavailable/optimized out, DEST_VAL is correspondingly
3404 marked unavailable/optimized out. */
3407 unpack_value_bitfield (struct value
*dest_val
,
3408 LONGEST bitpos
, LONGEST bitsize
,
3409 const gdb_byte
*valaddr
, LONGEST embedded_offset
,
3410 const struct value
*val
)
3412 enum bfd_endian byte_order
;
3415 struct type
*field_type
= value_type (dest_val
);
3417 byte_order
= gdbarch_byte_order (get_type_arch (field_type
));
3419 /* First, unpack and sign extend the bitfield as if it was wholly
3420 valid. Optimized out/unavailable bits are read as zero, but
3421 that's OK, as they'll end up marked below. If the VAL is
3422 wholly-invalid we may have skipped allocating its contents,
3423 though. See allocate_optimized_out_value. */
3424 if (valaddr
!= NULL
)
3428 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3430 store_signed_integer (value_contents_raw (dest_val
),
3431 TYPE_LENGTH (field_type
), byte_order
, num
);
3434 /* Now copy the optimized out / unavailability ranges to the right
3436 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3437 if (byte_order
== BFD_ENDIAN_BIG
)
3438 dst_bit_offset
= TYPE_LENGTH (field_type
) * TARGET_CHAR_BIT
- bitsize
;
3441 value_ranges_copy_adjusted (dest_val
, dst_bit_offset
,
3442 val
, src_bit_offset
, bitsize
);
3445 /* Return a new value with type TYPE, which is FIELDNO field of the
3446 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3447 of VAL. If the VAL's contents required to extract the bitfield
3448 from are unavailable/optimized out, the new value is
3449 correspondingly marked unavailable/optimized out. */
3452 value_field_bitfield (struct type
*type
, int fieldno
,
3453 const gdb_byte
*valaddr
,
3454 LONGEST embedded_offset
, const struct value
*val
)
3456 int bitpos
= TYPE_FIELD_BITPOS (type
, fieldno
);
3457 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3458 struct value
*res_val
= allocate_value (TYPE_FIELD_TYPE (type
, fieldno
));
3460 unpack_value_bitfield (res_val
, bitpos
, bitsize
,
3461 valaddr
, embedded_offset
, val
);
3466 /* Modify the value of a bitfield. ADDR points to a block of memory in
3467 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3468 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3469 indicate which bits (in target bit order) comprise the bitfield.
3470 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3471 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3474 modify_field (struct type
*type
, gdb_byte
*addr
,
3475 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3477 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
3479 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3482 /* Normalize BITPOS. */
3486 /* If a negative fieldval fits in the field in question, chop
3487 off the sign extension bits. */
3488 if ((~fieldval
& ~(mask
>> 1)) == 0)
3491 /* Warn if value is too big to fit in the field in question. */
3492 if (0 != (fieldval
& ~mask
))
3494 /* FIXME: would like to include fieldval in the message, but
3495 we don't have a sprintf_longest. */
3496 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3498 /* Truncate it, otherwise adjoining fields may be corrupted. */
3502 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3503 false valgrind reports. */
3505 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3506 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3508 /* Shifting for bit field depends on endianness of the target machine. */
3509 if (gdbarch_bits_big_endian (get_type_arch (type
)))
3510 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3512 oword
&= ~(mask
<< bitpos
);
3513 oword
|= fieldval
<< bitpos
;
3515 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3518 /* Pack NUM into BUF using a target format of TYPE. */
3521 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3523 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
3526 type
= check_typedef (type
);
3527 len
= TYPE_LENGTH (type
);
3529 switch (TYPE_CODE (type
))
3532 case TYPE_CODE_CHAR
:
3533 case TYPE_CODE_ENUM
:
3534 case TYPE_CODE_FLAGS
:
3535 case TYPE_CODE_BOOL
:
3536 case TYPE_CODE_RANGE
:
3537 case TYPE_CODE_MEMBERPTR
:
3538 store_signed_integer (buf
, len
, byte_order
, num
);
3542 case TYPE_CODE_RVALUE_REF
:
3544 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3548 error (_("Unexpected type (%d) encountered for integer constant."),
3554 /* Pack NUM into BUF using a target format of TYPE. */
3557 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3560 enum bfd_endian byte_order
;
3562 type
= check_typedef (type
);
3563 len
= TYPE_LENGTH (type
);
3564 byte_order
= gdbarch_byte_order (get_type_arch (type
));
3566 switch (TYPE_CODE (type
))
3569 case TYPE_CODE_CHAR
:
3570 case TYPE_CODE_ENUM
:
3571 case TYPE_CODE_FLAGS
:
3572 case TYPE_CODE_BOOL
:
3573 case TYPE_CODE_RANGE
:
3574 case TYPE_CODE_MEMBERPTR
:
3575 store_unsigned_integer (buf
, len
, byte_order
, num
);
3579 case TYPE_CODE_RVALUE_REF
:
3581 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3585 error (_("Unexpected type (%d) encountered "
3586 "for unsigned integer constant."),
3592 /* Convert C numbers into newly allocated values. */
3595 value_from_longest (struct type
*type
, LONGEST num
)
3597 struct value
*val
= allocate_value (type
);
3599 pack_long (value_contents_raw (val
), type
, num
);
3604 /* Convert C unsigned numbers into newly allocated values. */
3607 value_from_ulongest (struct type
*type
, ULONGEST num
)
3609 struct value
*val
= allocate_value (type
);
3611 pack_unsigned_long (value_contents_raw (val
), type
, num
);
3617 /* Create a value representing a pointer of type TYPE to the address
3621 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3623 struct value
*val
= allocate_value (type
);
3625 store_typed_address (value_contents_raw (val
),
3626 check_typedef (type
), addr
);
3631 /* Create a value of type TYPE whose contents come from VALADDR, if it
3632 is non-null, and whose memory address (in the inferior) is
3633 ADDRESS. The type of the created value may differ from the passed
3634 type TYPE. Make sure to retrieve values new type after this call.
3635 Note that TYPE is not passed through resolve_dynamic_type; this is
3636 a special API intended for use only by Ada. */
3639 value_from_contents_and_address_unresolved (struct type
*type
,
3640 const gdb_byte
*valaddr
,
3645 if (valaddr
== NULL
)
3646 v
= allocate_value_lazy (type
);
3648 v
= value_from_contents (type
, valaddr
);
3649 VALUE_LVAL (v
) = lval_memory
;
3650 set_value_address (v
, address
);
3654 /* Create a value of type TYPE whose contents come from VALADDR, if it
3655 is non-null, and whose memory address (in the inferior) is
3656 ADDRESS. The type of the created value may differ from the passed
3657 type TYPE. Make sure to retrieve values new type after this call. */
3660 value_from_contents_and_address (struct type
*type
,
3661 const gdb_byte
*valaddr
,
3664 struct type
*resolved_type
= resolve_dynamic_type (type
, valaddr
, address
);
3665 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3668 if (valaddr
== NULL
)
3669 v
= allocate_value_lazy (resolved_type
);
3671 v
= value_from_contents (resolved_type
, valaddr
);
3672 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3673 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef
) == PROP_CONST
)
3674 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3675 VALUE_LVAL (v
) = lval_memory
;
3676 set_value_address (v
, address
);
3680 /* Create a value of type TYPE holding the contents CONTENTS.
3681 The new value is `not_lval'. */
3684 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3686 struct value
*result
;
3688 result
= allocate_value (type
);
3689 memcpy (value_contents_raw (result
), contents
, TYPE_LENGTH (type
));
3694 value_from_double (struct type
*type
, DOUBLEST num
)
3696 struct value
*val
= allocate_value (type
);
3697 struct type
*base_type
= check_typedef (type
);
3698 enum type_code code
= TYPE_CODE (base_type
);
3700 if (code
== TYPE_CODE_FLT
)
3702 store_typed_floating (value_contents_raw (val
), base_type
, num
);
3705 error (_("Unexpected type encountered for floating constant."));
3711 value_from_decfloat (struct type
*type
, const gdb_byte
*dec
)
3713 struct value
*val
= allocate_value (type
);
3715 memcpy (value_contents_raw (val
), dec
, TYPE_LENGTH (type
));
3719 /* Extract a value from the history file. Input will be of the form
3720 $digits or $$digits. See block comment above 'write_dollar_variable'
3724 value_from_history_ref (const char *h
, const char **endp
)
3736 /* Find length of numeral string. */
3737 for (; isdigit (h
[len
]); len
++)
3740 /* Make sure numeral string is not part of an identifier. */
3741 if (h
[len
] == '_' || isalpha (h
[len
]))
3744 /* Now collect the index value. */
3749 /* For some bizarre reason, "$$" is equivalent to "$$1",
3750 rather than to "$$0" as it ought to be! */
3758 index
= -strtol (&h
[2], &local_end
, 10);
3766 /* "$" is equivalent to "$0". */
3774 index
= strtol (&h
[1], &local_end
, 10);
3779 return access_value_history (index
);
3782 /* Get the component value (offset by OFFSET bytes) of a struct or
3783 union WHOLE. Component's type is TYPE. */
3786 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3790 if (VALUE_LVAL (whole
) == lval_memory
&& value_lazy (whole
))
3791 v
= allocate_value_lazy (type
);
3794 v
= allocate_value (type
);
3795 value_contents_copy (v
, value_embedded_offset (v
),
3796 whole
, value_embedded_offset (whole
) + offset
,
3797 type_length_units (type
));
3799 v
->offset
= value_offset (whole
) + offset
+ value_embedded_offset (whole
);
3800 set_value_component_location (v
, whole
);
3806 coerce_ref_if_computed (const struct value
*arg
)
3808 const struct lval_funcs
*funcs
;
3810 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg
))))
3813 if (value_lval_const (arg
) != lval_computed
)
3816 funcs
= value_computed_funcs (arg
);
3817 if (funcs
->coerce_ref
== NULL
)
3820 return funcs
->coerce_ref (arg
);
3823 /* Look at value.h for description. */
3826 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3827 const struct type
*original_type
,
3828 const struct value
*original_value
)
3830 /* Re-adjust type. */
3831 deprecated_set_value_type (value
, TYPE_TARGET_TYPE (original_type
));
3833 /* Add embedding info. */
3834 set_value_enclosing_type (value
, enc_type
);
3835 set_value_embedded_offset (value
, value_pointed_to_offset (original_value
));
3837 /* We may be pointing to an object of some derived type. */
3838 return value_full_object (value
, NULL
, 0, 0, 0);
3842 coerce_ref (struct value
*arg
)
3844 struct type
*value_type_arg_tmp
= check_typedef (value_type (arg
));
3845 struct value
*retval
;
3846 struct type
*enc_type
;
3848 retval
= coerce_ref_if_computed (arg
);
3852 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3855 enc_type
= check_typedef (value_enclosing_type (arg
));
3856 enc_type
= TYPE_TARGET_TYPE (enc_type
);
3858 retval
= value_at_lazy (enc_type
,
3859 unpack_pointer (value_type (arg
),
3860 value_contents (arg
)));
3861 enc_type
= value_type (retval
);
3862 return readjust_indirect_value_type (retval
, enc_type
,
3863 value_type_arg_tmp
, arg
);
3867 coerce_array (struct value
*arg
)
3871 arg
= coerce_ref (arg
);
3872 type
= check_typedef (value_type (arg
));
3874 switch (TYPE_CODE (type
))
3876 case TYPE_CODE_ARRAY
:
3877 if (!TYPE_VECTOR (type
) && current_language
->c_style_arrays
)
3878 arg
= value_coerce_array (arg
);
3880 case TYPE_CODE_FUNC
:
3881 arg
= value_coerce_function (arg
);
3888 /* Return the return value convention that will be used for the
3891 enum return_value_convention
3892 struct_return_convention (struct gdbarch
*gdbarch
,
3893 struct value
*function
, struct type
*value_type
)
3895 enum type_code code
= TYPE_CODE (value_type
);
3897 if (code
== TYPE_CODE_ERROR
)
3898 error (_("Function return type unknown."));
3900 /* Probe the architecture for the return-value convention. */
3901 return gdbarch_return_value (gdbarch
, function
, value_type
,
3905 /* Return true if the function returning the specified type is using
3906 the convention of returning structures in memory (passing in the
3907 address as a hidden first parameter). */
3910 using_struct_return (struct gdbarch
*gdbarch
,
3911 struct value
*function
, struct type
*value_type
)
3913 if (TYPE_CODE (value_type
) == TYPE_CODE_VOID
)
3914 /* A void return value is never in memory. See also corresponding
3915 code in "print_return_value". */
3918 return (struct_return_convention (gdbarch
, function
, value_type
)
3919 != RETURN_VALUE_REGISTER_CONVENTION
);
3922 /* Set the initialized field in a value struct. */
3925 set_value_initialized (struct value
*val
, int status
)
3927 val
->initialized
= status
;
3930 /* Return the initialized field in a value struct. */
3933 value_initialized (const struct value
*val
)
3935 return val
->initialized
;
3938 /* Load the actual content of a lazy value. Fetch the data from the
3939 user's process and clear the lazy flag to indicate that the data in
3940 the buffer is valid.
3942 If the value is zero-length, we avoid calling read_memory, which
3943 would abort. We mark the value as fetched anyway -- all 0 bytes of
3947 value_fetch_lazy (struct value
*val
)
3949 gdb_assert (value_lazy (val
));
3950 allocate_value_contents (val
);
3951 /* A value is either lazy, or fully fetched. The
3952 availability/validity is only established as we try to fetch a
3954 gdb_assert (VEC_empty (range_s
, val
->optimized_out
));
3955 gdb_assert (VEC_empty (range_s
, val
->unavailable
));
3956 if (value_bitsize (val
))
3958 /* To read a lazy bitfield, read the entire enclosing value. This
3959 prevents reading the same block of (possibly volatile) memory once
3960 per bitfield. It would be even better to read only the containing
3961 word, but we have no way to record that just specific bits of a
3962 value have been fetched. */
3963 struct type
*type
= check_typedef (value_type (val
));
3964 struct value
*parent
= value_parent (val
);
3966 if (value_lazy (parent
))
3967 value_fetch_lazy (parent
);
3969 unpack_value_bitfield (val
,
3970 value_bitpos (val
), value_bitsize (val
),
3971 value_contents_for_printing (parent
),
3972 value_offset (val
), parent
);
3974 else if (VALUE_LVAL (val
) == lval_memory
)
3976 CORE_ADDR addr
= value_address (val
);
3977 struct type
*type
= check_typedef (value_enclosing_type (val
));
3979 if (TYPE_LENGTH (type
))
3980 read_value_memory (val
, 0, value_stack (val
),
3981 addr
, value_contents_all_raw (val
),
3982 type_length_units (type
));
3984 else if (VALUE_LVAL (val
) == lval_register
)
3986 struct frame_info
*next_frame
;
3988 struct type
*type
= check_typedef (value_type (val
));
3989 struct value
*new_val
= val
, *mark
= value_mark ();
3991 /* Offsets are not supported here; lazy register values must
3992 refer to the entire register. */
3993 gdb_assert (value_offset (val
) == 0);
3995 while (VALUE_LVAL (new_val
) == lval_register
&& value_lazy (new_val
))
3997 struct frame_id next_frame_id
= VALUE_NEXT_FRAME_ID (new_val
);
3999 next_frame
= frame_find_by_id (next_frame_id
);
4000 regnum
= VALUE_REGNUM (new_val
);
4002 gdb_assert (next_frame
!= NULL
);
4004 /* Convertible register routines are used for multi-register
4005 values and for interpretation in different types
4006 (e.g. float or int from a double register). Lazy
4007 register values should have the register's natural type,
4008 so they do not apply. */
4009 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
4012 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4013 Since a "->next" operation was performed when setting
4014 this field, we do not need to perform a "next" operation
4015 again when unwinding the register. That's why
4016 frame_unwind_register_value() is called here instead of
4017 get_frame_register_value(). */
4018 new_val
= frame_unwind_register_value (next_frame
, regnum
);
4020 /* If we get another lazy lval_register value, it means the
4021 register is found by reading it from NEXT_FRAME's next frame.
4022 frame_unwind_register_value should never return a value with
4023 the frame id pointing to NEXT_FRAME. If it does, it means we
4024 either have two consecutive frames with the same frame id
4025 in the frame chain, or some code is trying to unwind
4026 behind get_prev_frame's back (e.g., a frame unwind
4027 sniffer trying to unwind), bypassing its validations. In
4028 any case, it should always be an internal error to end up
4029 in this situation. */
4030 if (VALUE_LVAL (new_val
) == lval_register
4031 && value_lazy (new_val
)
4032 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val
), next_frame_id
))
4033 internal_error (__FILE__
, __LINE__
,
4034 _("infinite loop while fetching a register"));
4037 /* If it's still lazy (for instance, a saved register on the
4038 stack), fetch it. */
4039 if (value_lazy (new_val
))
4040 value_fetch_lazy (new_val
);
4042 /* Copy the contents and the unavailability/optimized-out
4043 meta-data from NEW_VAL to VAL. */
4044 set_value_lazy (val
, 0);
4045 value_contents_copy (val
, value_embedded_offset (val
),
4046 new_val
, value_embedded_offset (new_val
),
4047 type_length_units (type
));
4051 struct gdbarch
*gdbarch
;
4052 struct frame_info
*frame
;
4053 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
4054 so that the frame level will be shown correctly. */
4055 frame
= frame_find_by_id (VALUE_FRAME_ID (val
));
4056 regnum
= VALUE_REGNUM (val
);
4057 gdbarch
= get_frame_arch (frame
);
4059 fprintf_unfiltered (gdb_stdlog
,
4060 "{ value_fetch_lazy "
4061 "(frame=%d,regnum=%d(%s),...) ",
4062 frame_relative_level (frame
), regnum
,
4063 user_reg_map_regnum_to_name (gdbarch
, regnum
));
4065 fprintf_unfiltered (gdb_stdlog
, "->");
4066 if (value_optimized_out (new_val
))
4068 fprintf_unfiltered (gdb_stdlog
, " ");
4069 val_print_optimized_out (new_val
, gdb_stdlog
);
4074 const gdb_byte
*buf
= value_contents (new_val
);
4076 if (VALUE_LVAL (new_val
) == lval_register
)
4077 fprintf_unfiltered (gdb_stdlog
, " register=%d",
4078 VALUE_REGNUM (new_val
));
4079 else if (VALUE_LVAL (new_val
) == lval_memory
)
4080 fprintf_unfiltered (gdb_stdlog
, " address=%s",
4082 value_address (new_val
)));
4084 fprintf_unfiltered (gdb_stdlog
, " computed");
4086 fprintf_unfiltered (gdb_stdlog
, " bytes=");
4087 fprintf_unfiltered (gdb_stdlog
, "[");
4088 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
4089 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
4090 fprintf_unfiltered (gdb_stdlog
, "]");
4093 fprintf_unfiltered (gdb_stdlog
, " }\n");
4096 /* Dispose of the intermediate values. This prevents
4097 watchpoints from trying to watch the saved frame pointer. */
4098 value_free_to_mark (mark
);
4100 else if (VALUE_LVAL (val
) == lval_computed
4101 && value_computed_funcs (val
)->read
!= NULL
)
4102 value_computed_funcs (val
)->read (val
);
4104 internal_error (__FILE__
, __LINE__
, _("Unexpected lazy value type."));
4106 set_value_lazy (val
, 0);
4109 /* Implementation of the convenience function $_isvoid. */
4111 static struct value
*
4112 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4113 const struct language_defn
*language
,
4114 void *cookie
, int argc
, struct value
**argv
)
4119 error (_("You must provide one argument for $_isvoid."));
4121 ret
= TYPE_CODE (value_type (argv
[0])) == TYPE_CODE_VOID
;
4123 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4127 _initialize_values (void)
4129 add_cmd ("convenience", no_class
, show_convenience
, _("\
4130 Debugger convenience (\"$foo\") variables and functions.\n\
4131 Convenience variables are created when you assign them values;\n\
4132 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4134 A few convenience variables are given values automatically:\n\
4135 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4136 \"$__\" holds the contents of the last address examined with \"x\"."
4139 Convenience functions are defined via the Python API."
4142 add_alias_cmd ("conv", "convenience", no_class
, 1, &showlist
);
4144 add_cmd ("values", no_set_class
, show_values
, _("\
4145 Elements of value history around item number IDX (or last ten)."),
4148 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4149 Initialize a convenience variable if necessary.\n\
4150 init-if-undefined VARIABLE = EXPRESSION\n\
4151 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4152 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4153 VARIABLE is already initialized."));
4155 add_prefix_cmd ("function", no_class
, function_command
, _("\
4156 Placeholder command for showing help on convenience functions."),
4157 &functionlist
, "function ", 0, &cmdlist
);
4159 add_internal_function ("_isvoid", _("\
4160 Check whether an expression is void.\n\
4161 Usage: $_isvoid (expression)\n\
4162 Return 1 if the expression is void, zero otherwise."),
4163 isvoid_internal_fn
, NULL
);
4165 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4166 class_support
, &max_value_size
, _("\
4167 Set maximum sized value gdb will load from the inferior."), _("\
4168 Show maximum sized value gdb will load from the inferior."), _("\
4169 Use this to control the maximum size, in bytes, of a value that gdb\n\
4170 will load from the inferior. Setting this value to 'unlimited'\n\
4171 disables checking.\n\
4172 Setting this does not invalidate already allocated values, it only\n\
4173 prevents future values, larger than this size, from being allocated."),
4175 show_max_value_size
,
4176 &setlist
, &showlist
);