Remove value::next and value::released
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44
45 /* Definition of a user function. */
46 struct internal_function
47 {
48 /* The name of the function. It is a bit odd to have this in the
49 function itself -- the user might use a differently-named
50 convenience variable to hold the function. */
51 char *name;
52
53 /* The handler. */
54 internal_function_fn handler;
55
56 /* User data for the handler. */
57 void *cookie;
58 };
59
60 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
61
62 struct range
63 {
64 /* Lowest offset in the range. */
65 LONGEST offset;
66
67 /* Length of the range. */
68 LONGEST length;
69 };
70
71 typedef struct range range_s;
72
73 DEF_VEC_O(range_s);
74
75 /* Returns true if the ranges defined by [offset1, offset1+len1) and
76 [offset2, offset2+len2) overlap. */
77
78 static int
79 ranges_overlap (LONGEST offset1, LONGEST len1,
80 LONGEST offset2, LONGEST len2)
81 {
82 ULONGEST h, l;
83
84 l = std::max (offset1, offset2);
85 h = std::min (offset1 + len1, offset2 + len2);
86 return (l < h);
87 }
88
89 /* Returns true if the first argument is strictly less than the
90 second, useful for VEC_lower_bound. We keep ranges sorted by
91 offset and coalesce overlapping and contiguous ranges, so this just
92 compares the starting offset. */
93
94 static int
95 range_lessthan (const range_s *r1, const range_s *r2)
96 {
97 return r1->offset < r2->offset;
98 }
99
100 /* Returns true if RANGES contains any range that overlaps [OFFSET,
101 OFFSET+LENGTH). */
102
103 static int
104 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
105 {
106 range_s what;
107 LONGEST i;
108
109 what.offset = offset;
110 what.length = length;
111
112 /* We keep ranges sorted by offset and coalesce overlapping and
113 contiguous ranges, so to check if a range list contains a given
114 range, we can do a binary search for the position the given range
115 would be inserted if we only considered the starting OFFSET of
116 ranges. We call that position I. Since we also have LENGTH to
117 care for (this is a range afterall), we need to check if the
118 _previous_ range overlaps the I range. E.g.,
119
120 R
121 |---|
122 |---| |---| |------| ... |--|
123 0 1 2 N
124
125 I=1
126
127 In the case above, the binary search would return `I=1', meaning,
128 this OFFSET should be inserted at position 1, and the current
129 position 1 should be pushed further (and before 2). But, `0'
130 overlaps with R.
131
132 Then we need to check if the I range overlaps the I range itself.
133 E.g.,
134
135 R
136 |---|
137 |---| |---| |-------| ... |--|
138 0 1 2 N
139
140 I=1
141 */
142
143 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
144
145 if (i > 0)
146 {
147 struct range *bef = VEC_index (range_s, ranges, i - 1);
148
149 if (ranges_overlap (bef->offset, bef->length, offset, length))
150 return 1;
151 }
152
153 if (i < VEC_length (range_s, ranges))
154 {
155 struct range *r = VEC_index (range_s, ranges, i);
156
157 if (ranges_overlap (r->offset, r->length, offset, length))
158 return 1;
159 }
160
161 return 0;
162 }
163
164 static struct cmd_list_element *functionlist;
165
166 /* Note that the fields in this structure are arranged to save a bit
167 of memory. */
168
169 struct value
170 {
171 /* Type of value; either not an lval, or one of the various
172 different possible kinds of lval. */
173 enum lval_type lval;
174
175 /* Is it modifiable? Only relevant if lval != not_lval. */
176 unsigned int modifiable : 1;
177
178 /* If zero, contents of this value are in the contents field. If
179 nonzero, contents are in inferior. If the lval field is lval_memory,
180 the contents are in inferior memory at location.address plus offset.
181 The lval field may also be lval_register.
182
183 WARNING: This field is used by the code which handles watchpoints
184 (see breakpoint.c) to decide whether a particular value can be
185 watched by hardware watchpoints. If the lazy flag is set for
186 some member of a value chain, it is assumed that this member of
187 the chain doesn't need to be watched as part of watching the
188 value itself. This is how GDB avoids watching the entire struct
189 or array when the user wants to watch a single struct member or
190 array element. If you ever change the way lazy flag is set and
191 reset, be sure to consider this use as well! */
192 unsigned int lazy : 1;
193
194 /* If value is a variable, is it initialized or not. */
195 unsigned int initialized : 1;
196
197 /* If value is from the stack. If this is set, read_stack will be
198 used instead of read_memory to enable extra caching. */
199 unsigned int stack : 1;
200
201 /* Location of value (if lval). */
202 union
203 {
204 /* If lval == lval_memory, this is the address in the inferior */
205 CORE_ADDR address;
206
207 /*If lval == lval_register, the value is from a register. */
208 struct
209 {
210 /* Register number. */
211 int regnum;
212 /* Frame ID of "next" frame to which a register value is relative.
213 If the register value is found relative to frame F, then the
214 frame id of F->next will be stored in next_frame_id. */
215 struct frame_id next_frame_id;
216 } reg;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. Note also the member embedded_offset
239 below. */
240 LONGEST offset;
241
242 /* Only used for bitfields; number of bits contained in them. */
243 LONGEST bitsize;
244
245 /* Only used for bitfields; position of start of field. For
246 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
247 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
248 LONGEST bitpos;
249
250 /* The number of references to this value. When a value is created,
251 the value chain holds a reference, so REFERENCE_COUNT is 1. If
252 release_value is called, this value is removed from the chain but
253 the caller of release_value now has a reference to this value.
254 The caller must arrange for a call to value_free later. */
255 int reference_count;
256
257 /* Only used for bitfields; the containing value. This allows a
258 single read from the target when displaying multiple
259 bitfields. */
260 struct value *parent;
261
262 /* Type of the value. */
263 struct type *type;
264
265 /* If a value represents a C++ object, then the `type' field gives
266 the object's compile-time type. If the object actually belongs
267 to some class derived from `type', perhaps with other base
268 classes and additional members, then `type' is just a subobject
269 of the real thing, and the full object is probably larger than
270 `type' would suggest.
271
272 If `type' is a dynamic class (i.e. one with a vtable), then GDB
273 can actually determine the object's run-time type by looking at
274 the run-time type information in the vtable. When this
275 information is available, we may elect to read in the entire
276 object, for several reasons:
277
278 - When printing the value, the user would probably rather see the
279 full object, not just the limited portion apparent from the
280 compile-time type.
281
282 - If `type' has virtual base classes, then even printing `type'
283 alone may require reaching outside the `type' portion of the
284 object to wherever the virtual base class has been stored.
285
286 When we store the entire object, `enclosing_type' is the run-time
287 type -- the complete object -- and `embedded_offset' is the
288 offset of `type' within that larger type, in target addressable memory
289 units. The value_contents() macro takes `embedded_offset' into account,
290 so most GDB code continues to see the `type' portion of the value, just
291 as the inferior would.
292
293 If `type' is a pointer to an object, then `enclosing_type' is a
294 pointer to the object's run-time type, and `pointed_to_offset' is
295 the offset in target addressable memory units from the full object
296 to the pointed-to object -- that is, the value `embedded_offset' would
297 have if we followed the pointer and fetched the complete object.
298 (I don't really see the point. Why not just determine the
299 run-time type when you indirect, and avoid the special case? The
300 contents don't matter until you indirect anyway.)
301
302 If we're not doing anything fancy, `enclosing_type' is equal to
303 `type', and `embedded_offset' is zero, so everything works
304 normally. */
305 struct type *enclosing_type;
306 LONGEST embedded_offset;
307 LONGEST pointed_to_offset;
308
309 /* Actual contents of the value. Target byte-order. NULL or not
310 valid if lazy is nonzero. */
311 gdb_byte *contents;
312
313 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
314 rather than available, since the common and default case is for a
315 value to be available. This is filled in at value read time.
316 The unavailable ranges are tracked in bits. Note that a contents
317 bit that has been optimized out doesn't really exist in the
318 program, so it can't be marked unavailable either. */
319 VEC(range_s) *unavailable;
320
321 /* Likewise, but for optimized out contents (a chunk of the value of
322 a variable that does not actually exist in the program). If LVAL
323 is lval_register, this is a register ($pc, $sp, etc., never a
324 program variable) that has not been saved in the frame. Not
325 saved registers and optimized-out program variables values are
326 treated pretty much the same, except not-saved registers have a
327 different string representation and related error strings. */
328 VEC(range_s) *optimized_out;
329 };
330
331 /* See value.h. */
332
333 struct gdbarch *
334 get_value_arch (const struct value *value)
335 {
336 return get_type_arch (value_type (value));
337 }
338
339 int
340 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
341 {
342 gdb_assert (!value->lazy);
343
344 return !ranges_contain (value->unavailable, offset, length);
345 }
346
347 int
348 value_bytes_available (const struct value *value,
349 LONGEST offset, LONGEST length)
350 {
351 return value_bits_available (value,
352 offset * TARGET_CHAR_BIT,
353 length * TARGET_CHAR_BIT);
354 }
355
356 int
357 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
358 {
359 gdb_assert (!value->lazy);
360
361 return ranges_contain (value->optimized_out, bit_offset, bit_length);
362 }
363
364 int
365 value_entirely_available (struct value *value)
366 {
367 /* We can only tell whether the whole value is available when we try
368 to read it. */
369 if (value->lazy)
370 value_fetch_lazy (value);
371
372 if (VEC_empty (range_s, value->unavailable))
373 return 1;
374 return 0;
375 }
376
377 /* Returns true if VALUE is entirely covered by RANGES. If the value
378 is lazy, it'll be read now. Note that RANGE is a pointer to
379 pointer because reading the value might change *RANGE. */
380
381 static int
382 value_entirely_covered_by_range_vector (struct value *value,
383 VEC(range_s) **ranges)
384 {
385 /* We can only tell whether the whole value is optimized out /
386 unavailable when we try to read it. */
387 if (value->lazy)
388 value_fetch_lazy (value);
389
390 if (VEC_length (range_s, *ranges) == 1)
391 {
392 struct range *t = VEC_index (range_s, *ranges, 0);
393
394 if (t->offset == 0
395 && t->length == (TARGET_CHAR_BIT
396 * TYPE_LENGTH (value_enclosing_type (value))))
397 return 1;
398 }
399
400 return 0;
401 }
402
403 int
404 value_entirely_unavailable (struct value *value)
405 {
406 return value_entirely_covered_by_range_vector (value, &value->unavailable);
407 }
408
409 int
410 value_entirely_optimized_out (struct value *value)
411 {
412 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
413 }
414
415 /* Insert into the vector pointed to by VECTORP the bit range starting of
416 OFFSET bits, and extending for the next LENGTH bits. */
417
418 static void
419 insert_into_bit_range_vector (VEC(range_s) **vectorp,
420 LONGEST offset, LONGEST length)
421 {
422 range_s newr;
423 int i;
424
425 /* Insert the range sorted. If there's overlap or the new range
426 would be contiguous with an existing range, merge. */
427
428 newr.offset = offset;
429 newr.length = length;
430
431 /* Do a binary search for the position the given range would be
432 inserted if we only considered the starting OFFSET of ranges.
433 Call that position I. Since we also have LENGTH to care for
434 (this is a range afterall), we need to check if the _previous_
435 range overlaps the I range. E.g., calling R the new range:
436
437 #1 - overlaps with previous
438
439 R
440 |-...-|
441 |---| |---| |------| ... |--|
442 0 1 2 N
443
444 I=1
445
446 In the case #1 above, the binary search would return `I=1',
447 meaning, this OFFSET should be inserted at position 1, and the
448 current position 1 should be pushed further (and become 2). But,
449 note that `0' overlaps with R, so we want to merge them.
450
451 A similar consideration needs to be taken if the new range would
452 be contiguous with the previous range:
453
454 #2 - contiguous with previous
455
456 R
457 |-...-|
458 |--| |---| |------| ... |--|
459 0 1 2 N
460
461 I=1
462
463 If there's no overlap with the previous range, as in:
464
465 #3 - not overlapping and not contiguous
466
467 R
468 |-...-|
469 |--| |---| |------| ... |--|
470 0 1 2 N
471
472 I=1
473
474 or if I is 0:
475
476 #4 - R is the range with lowest offset
477
478 R
479 |-...-|
480 |--| |---| |------| ... |--|
481 0 1 2 N
482
483 I=0
484
485 ... we just push the new range to I.
486
487 All the 4 cases above need to consider that the new range may
488 also overlap several of the ranges that follow, or that R may be
489 contiguous with the following range, and merge. E.g.,
490
491 #5 - overlapping following ranges
492
493 R
494 |------------------------|
495 |--| |---| |------| ... |--|
496 0 1 2 N
497
498 I=0
499
500 or:
501
502 R
503 |-------|
504 |--| |---| |------| ... |--|
505 0 1 2 N
506
507 I=1
508
509 */
510
511 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
512 if (i > 0)
513 {
514 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
515
516 if (ranges_overlap (bef->offset, bef->length, offset, length))
517 {
518 /* #1 */
519 ULONGEST l = std::min (bef->offset, offset);
520 ULONGEST h = std::max (bef->offset + bef->length, offset + length);
521
522 bef->offset = l;
523 bef->length = h - l;
524 i--;
525 }
526 else if (offset == bef->offset + bef->length)
527 {
528 /* #2 */
529 bef->length += length;
530 i--;
531 }
532 else
533 {
534 /* #3 */
535 VEC_safe_insert (range_s, *vectorp, i, &newr);
536 }
537 }
538 else
539 {
540 /* #4 */
541 VEC_safe_insert (range_s, *vectorp, i, &newr);
542 }
543
544 /* Check whether the ranges following the one we've just added or
545 touched can be folded in (#5 above). */
546 if (i + 1 < VEC_length (range_s, *vectorp))
547 {
548 struct range *t;
549 struct range *r;
550 int removed = 0;
551 int next = i + 1;
552
553 /* Get the range we just touched. */
554 t = VEC_index (range_s, *vectorp, i);
555 removed = 0;
556
557 i = next;
558 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
559 if (r->offset <= t->offset + t->length)
560 {
561 ULONGEST l, h;
562
563 l = std::min (t->offset, r->offset);
564 h = std::max (t->offset + t->length, r->offset + r->length);
565
566 t->offset = l;
567 t->length = h - l;
568
569 removed++;
570 }
571 else
572 {
573 /* If we couldn't merge this one, we won't be able to
574 merge following ones either, since the ranges are
575 always sorted by OFFSET. */
576 break;
577 }
578
579 if (removed != 0)
580 VEC_block_remove (range_s, *vectorp, next, removed);
581 }
582 }
583
584 void
585 mark_value_bits_unavailable (struct value *value,
586 LONGEST offset, LONGEST length)
587 {
588 insert_into_bit_range_vector (&value->unavailable, offset, length);
589 }
590
591 void
592 mark_value_bytes_unavailable (struct value *value,
593 LONGEST offset, LONGEST length)
594 {
595 mark_value_bits_unavailable (value,
596 offset * TARGET_CHAR_BIT,
597 length * TARGET_CHAR_BIT);
598 }
599
600 /* Find the first range in RANGES that overlaps the range defined by
601 OFFSET and LENGTH, starting at element POS in the RANGES vector,
602 Returns the index into RANGES where such overlapping range was
603 found, or -1 if none was found. */
604
605 static int
606 find_first_range_overlap (VEC(range_s) *ranges, int pos,
607 LONGEST offset, LONGEST length)
608 {
609 range_s *r;
610 int i;
611
612 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
613 if (ranges_overlap (r->offset, r->length, offset, length))
614 return i;
615
616 return -1;
617 }
618
619 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
620 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
621 return non-zero.
622
623 It must always be the case that:
624 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
625
626 It is assumed that memory can be accessed from:
627 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
628 to:
629 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
630 / TARGET_CHAR_BIT) */
631 static int
632 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
633 const gdb_byte *ptr2, size_t offset2_bits,
634 size_t length_bits)
635 {
636 gdb_assert (offset1_bits % TARGET_CHAR_BIT
637 == offset2_bits % TARGET_CHAR_BIT);
638
639 if (offset1_bits % TARGET_CHAR_BIT != 0)
640 {
641 size_t bits;
642 gdb_byte mask, b1, b2;
643
644 /* The offset from the base pointers PTR1 and PTR2 is not a complete
645 number of bytes. A number of bits up to either the next exact
646 byte boundary, or LENGTH_BITS (which ever is sooner) will be
647 compared. */
648 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
649 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
650 mask = (1 << bits) - 1;
651
652 if (length_bits < bits)
653 {
654 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
655 bits = length_bits;
656 }
657
658 /* Now load the two bytes and mask off the bits we care about. */
659 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
660 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
661
662 if (b1 != b2)
663 return 1;
664
665 /* Now update the length and offsets to take account of the bits
666 we've just compared. */
667 length_bits -= bits;
668 offset1_bits += bits;
669 offset2_bits += bits;
670 }
671
672 if (length_bits % TARGET_CHAR_BIT != 0)
673 {
674 size_t bits;
675 size_t o1, o2;
676 gdb_byte mask, b1, b2;
677
678 /* The length is not an exact number of bytes. After the previous
679 IF.. block then the offsets are byte aligned, or the
680 length is zero (in which case this code is not reached). Compare
681 a number of bits at the end of the region, starting from an exact
682 byte boundary. */
683 bits = length_bits % TARGET_CHAR_BIT;
684 o1 = offset1_bits + length_bits - bits;
685 o2 = offset2_bits + length_bits - bits;
686
687 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
688 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
689
690 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
691 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
692
693 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
694 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
695
696 if (b1 != b2)
697 return 1;
698
699 length_bits -= bits;
700 }
701
702 if (length_bits > 0)
703 {
704 /* We've now taken care of any stray "bits" at the start, or end of
705 the region to compare, the remainder can be covered with a simple
706 memcmp. */
707 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
708 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
709 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
710
711 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
712 ptr2 + offset2_bits / TARGET_CHAR_BIT,
713 length_bits / TARGET_CHAR_BIT);
714 }
715
716 /* Length is zero, regions match. */
717 return 0;
718 }
719
720 /* Helper struct for find_first_range_overlap_and_match and
721 value_contents_bits_eq. Keep track of which slot of a given ranges
722 vector have we last looked at. */
723
724 struct ranges_and_idx
725 {
726 /* The ranges. */
727 VEC(range_s) *ranges;
728
729 /* The range we've last found in RANGES. Given ranges are sorted,
730 we can start the next lookup here. */
731 int idx;
732 };
733
734 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
735 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
736 ranges starting at OFFSET2 bits. Return true if the ranges match
737 and fill in *L and *H with the overlapping window relative to
738 (both) OFFSET1 or OFFSET2. */
739
740 static int
741 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
742 struct ranges_and_idx *rp2,
743 LONGEST offset1, LONGEST offset2,
744 LONGEST length, ULONGEST *l, ULONGEST *h)
745 {
746 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
747 offset1, length);
748 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
749 offset2, length);
750
751 if (rp1->idx == -1 && rp2->idx == -1)
752 {
753 *l = length;
754 *h = length;
755 return 1;
756 }
757 else if (rp1->idx == -1 || rp2->idx == -1)
758 return 0;
759 else
760 {
761 range_s *r1, *r2;
762 ULONGEST l1, h1;
763 ULONGEST l2, h2;
764
765 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
766 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
767
768 /* Get the unavailable windows intersected by the incoming
769 ranges. The first and last ranges that overlap the argument
770 range may be wider than said incoming arguments ranges. */
771 l1 = std::max (offset1, r1->offset);
772 h1 = std::min (offset1 + length, r1->offset + r1->length);
773
774 l2 = std::max (offset2, r2->offset);
775 h2 = std::min (offset2 + length, offset2 + r2->length);
776
777 /* Make them relative to the respective start offsets, so we can
778 compare them for equality. */
779 l1 -= offset1;
780 h1 -= offset1;
781
782 l2 -= offset2;
783 h2 -= offset2;
784
785 /* Different ranges, no match. */
786 if (l1 != l2 || h1 != h2)
787 return 0;
788
789 *h = h1;
790 *l = l1;
791 return 1;
792 }
793 }
794
795 /* Helper function for value_contents_eq. The only difference is that
796 this function is bit rather than byte based.
797
798 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
799 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
800 Return true if the available bits match. */
801
802 static bool
803 value_contents_bits_eq (const struct value *val1, int offset1,
804 const struct value *val2, int offset2,
805 int length)
806 {
807 /* Each array element corresponds to a ranges source (unavailable,
808 optimized out). '1' is for VAL1, '2' for VAL2. */
809 struct ranges_and_idx rp1[2], rp2[2];
810
811 /* See function description in value.h. */
812 gdb_assert (!val1->lazy && !val2->lazy);
813
814 /* We shouldn't be trying to compare past the end of the values. */
815 gdb_assert (offset1 + length
816 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
817 gdb_assert (offset2 + length
818 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
819
820 memset (&rp1, 0, sizeof (rp1));
821 memset (&rp2, 0, sizeof (rp2));
822 rp1[0].ranges = val1->unavailable;
823 rp2[0].ranges = val2->unavailable;
824 rp1[1].ranges = val1->optimized_out;
825 rp2[1].ranges = val2->optimized_out;
826
827 while (length > 0)
828 {
829 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
830 int i;
831
832 for (i = 0; i < 2; i++)
833 {
834 ULONGEST l_tmp, h_tmp;
835
836 /* The contents only match equal if the invalid/unavailable
837 contents ranges match as well. */
838 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
839 offset1, offset2, length,
840 &l_tmp, &h_tmp))
841 return false;
842
843 /* We're interested in the lowest/first range found. */
844 if (i == 0 || l_tmp < l)
845 {
846 l = l_tmp;
847 h = h_tmp;
848 }
849 }
850
851 /* Compare the available/valid contents. */
852 if (memcmp_with_bit_offsets (val1->contents, offset1,
853 val2->contents, offset2, l) != 0)
854 return false;
855
856 length -= h;
857 offset1 += h;
858 offset2 += h;
859 }
860
861 return true;
862 }
863
864 bool
865 value_contents_eq (const struct value *val1, LONGEST offset1,
866 const struct value *val2, LONGEST offset2,
867 LONGEST length)
868 {
869 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
870 val2, offset2 * TARGET_CHAR_BIT,
871 length * TARGET_CHAR_BIT);
872 }
873
874
875 /* The value-history records all the values printed by print commands
876 during this session. */
877
878 static std::vector<value_ref_ptr> value_history;
879
880 \f
881 /* List of all value objects currently allocated
882 (except for those released by calls to release_value)
883 This is so they can be freed after each command. */
884
885 static std::vector<value_ref_ptr> all_values;
886
887 /* Allocate a lazy value for type TYPE. Its actual content is
888 "lazily" allocated too: the content field of the return value is
889 NULL; it will be allocated when it is fetched from the target. */
890
891 struct value *
892 allocate_value_lazy (struct type *type)
893 {
894 struct value *val;
895
896 /* Call check_typedef on our type to make sure that, if TYPE
897 is a TYPE_CODE_TYPEDEF, its length is set to the length
898 of the target type instead of zero. However, we do not
899 replace the typedef type by the target type, because we want
900 to keep the typedef in order to be able to set the VAL's type
901 description correctly. */
902 check_typedef (type);
903
904 val = XCNEW (struct value);
905 val->contents = NULL;
906 val->type = type;
907 val->enclosing_type = type;
908 VALUE_LVAL (val) = not_lval;
909 val->location.address = 0;
910 val->offset = 0;
911 val->bitpos = 0;
912 val->bitsize = 0;
913 val->lazy = 1;
914 val->embedded_offset = 0;
915 val->pointed_to_offset = 0;
916 val->modifiable = 1;
917 val->initialized = 1; /* Default to initialized. */
918
919 /* Values start out on the all_values chain. */
920 val->reference_count = 1;
921 all_values.emplace_back (val);
922
923 return val;
924 }
925
926 /* The maximum size, in bytes, that GDB will try to allocate for a value.
927 The initial value of 64k was not selected for any specific reason, it is
928 just a reasonable starting point. */
929
930 static int max_value_size = 65536; /* 64k bytes */
931
932 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
933 LONGEST, otherwise GDB will not be able to parse integer values from the
934 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
935 be unable to parse "set max-value-size 2".
936
937 As we want a consistent GDB experience across hosts with different sizes
938 of LONGEST, this arbitrary minimum value was selected, so long as this
939 is bigger than LONGEST on all GDB supported hosts we're fine. */
940
941 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
942 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
943
944 /* Implement the "set max-value-size" command. */
945
946 static void
947 set_max_value_size (const char *args, int from_tty,
948 struct cmd_list_element *c)
949 {
950 gdb_assert (max_value_size == -1 || max_value_size >= 0);
951
952 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
953 {
954 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
955 error (_("max-value-size set too low, increasing to %d bytes"),
956 max_value_size);
957 }
958 }
959
960 /* Implement the "show max-value-size" command. */
961
962 static void
963 show_max_value_size (struct ui_file *file, int from_tty,
964 struct cmd_list_element *c, const char *value)
965 {
966 if (max_value_size == -1)
967 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
968 else
969 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
970 max_value_size);
971 }
972
973 /* Called before we attempt to allocate or reallocate a buffer for the
974 contents of a value. TYPE is the type of the value for which we are
975 allocating the buffer. If the buffer is too large (based on the user
976 controllable setting) then throw an error. If this function returns
977 then we should attempt to allocate the buffer. */
978
979 static void
980 check_type_length_before_alloc (const struct type *type)
981 {
982 unsigned int length = TYPE_LENGTH (type);
983
984 if (max_value_size > -1 && length > max_value_size)
985 {
986 if (TYPE_NAME (type) != NULL)
987 error (_("value of type `%s' requires %u bytes, which is more "
988 "than max-value-size"), TYPE_NAME (type), length);
989 else
990 error (_("value requires %u bytes, which is more than "
991 "max-value-size"), length);
992 }
993 }
994
995 /* Allocate the contents of VAL if it has not been allocated yet. */
996
997 static void
998 allocate_value_contents (struct value *val)
999 {
1000 if (!val->contents)
1001 {
1002 check_type_length_before_alloc (val->enclosing_type);
1003 val->contents
1004 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1005 }
1006 }
1007
1008 /* Allocate a value and its contents for type TYPE. */
1009
1010 struct value *
1011 allocate_value (struct type *type)
1012 {
1013 struct value *val = allocate_value_lazy (type);
1014
1015 allocate_value_contents (val);
1016 val->lazy = 0;
1017 return val;
1018 }
1019
1020 /* Allocate a value that has the correct length
1021 for COUNT repetitions of type TYPE. */
1022
1023 struct value *
1024 allocate_repeat_value (struct type *type, int count)
1025 {
1026 int low_bound = current_language->string_lower_bound; /* ??? */
1027 /* FIXME-type-allocation: need a way to free this type when we are
1028 done with it. */
1029 struct type *array_type
1030 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1031
1032 return allocate_value (array_type);
1033 }
1034
1035 struct value *
1036 allocate_computed_value (struct type *type,
1037 const struct lval_funcs *funcs,
1038 void *closure)
1039 {
1040 struct value *v = allocate_value_lazy (type);
1041
1042 VALUE_LVAL (v) = lval_computed;
1043 v->location.computed.funcs = funcs;
1044 v->location.computed.closure = closure;
1045
1046 return v;
1047 }
1048
1049 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1050
1051 struct value *
1052 allocate_optimized_out_value (struct type *type)
1053 {
1054 struct value *retval = allocate_value_lazy (type);
1055
1056 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1057 set_value_lazy (retval, 0);
1058 return retval;
1059 }
1060
1061 /* Accessor methods. */
1062
1063 struct type *
1064 value_type (const struct value *value)
1065 {
1066 return value->type;
1067 }
1068 void
1069 deprecated_set_value_type (struct value *value, struct type *type)
1070 {
1071 value->type = type;
1072 }
1073
1074 LONGEST
1075 value_offset (const struct value *value)
1076 {
1077 return value->offset;
1078 }
1079 void
1080 set_value_offset (struct value *value, LONGEST offset)
1081 {
1082 value->offset = offset;
1083 }
1084
1085 LONGEST
1086 value_bitpos (const struct value *value)
1087 {
1088 return value->bitpos;
1089 }
1090 void
1091 set_value_bitpos (struct value *value, LONGEST bit)
1092 {
1093 value->bitpos = bit;
1094 }
1095
1096 LONGEST
1097 value_bitsize (const struct value *value)
1098 {
1099 return value->bitsize;
1100 }
1101 void
1102 set_value_bitsize (struct value *value, LONGEST bit)
1103 {
1104 value->bitsize = bit;
1105 }
1106
1107 struct value *
1108 value_parent (const struct value *value)
1109 {
1110 return value->parent;
1111 }
1112
1113 /* See value.h. */
1114
1115 void
1116 set_value_parent (struct value *value, struct value *parent)
1117 {
1118 struct value *old = value->parent;
1119
1120 value->parent = parent;
1121 if (parent != NULL)
1122 value_incref (parent);
1123 value_decref (old);
1124 }
1125
1126 gdb_byte *
1127 value_contents_raw (struct value *value)
1128 {
1129 struct gdbarch *arch = get_value_arch (value);
1130 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1131
1132 allocate_value_contents (value);
1133 return value->contents + value->embedded_offset * unit_size;
1134 }
1135
1136 gdb_byte *
1137 value_contents_all_raw (struct value *value)
1138 {
1139 allocate_value_contents (value);
1140 return value->contents;
1141 }
1142
1143 struct type *
1144 value_enclosing_type (const struct value *value)
1145 {
1146 return value->enclosing_type;
1147 }
1148
1149 /* Look at value.h for description. */
1150
1151 struct type *
1152 value_actual_type (struct value *value, int resolve_simple_types,
1153 int *real_type_found)
1154 {
1155 struct value_print_options opts;
1156 struct type *result;
1157
1158 get_user_print_options (&opts);
1159
1160 if (real_type_found)
1161 *real_type_found = 0;
1162 result = value_type (value);
1163 if (opts.objectprint)
1164 {
1165 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1166 fetch its rtti type. */
1167 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1168 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1169 == TYPE_CODE_STRUCT
1170 && !value_optimized_out (value))
1171 {
1172 struct type *real_type;
1173
1174 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1175 if (real_type)
1176 {
1177 if (real_type_found)
1178 *real_type_found = 1;
1179 result = real_type;
1180 }
1181 }
1182 else if (resolve_simple_types)
1183 {
1184 if (real_type_found)
1185 *real_type_found = 1;
1186 result = value_enclosing_type (value);
1187 }
1188 }
1189
1190 return result;
1191 }
1192
1193 void
1194 error_value_optimized_out (void)
1195 {
1196 error (_("value has been optimized out"));
1197 }
1198
1199 static void
1200 require_not_optimized_out (const struct value *value)
1201 {
1202 if (!VEC_empty (range_s, value->optimized_out))
1203 {
1204 if (value->lval == lval_register)
1205 error (_("register has not been saved in frame"));
1206 else
1207 error_value_optimized_out ();
1208 }
1209 }
1210
1211 static void
1212 require_available (const struct value *value)
1213 {
1214 if (!VEC_empty (range_s, value->unavailable))
1215 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1216 }
1217
1218 const gdb_byte *
1219 value_contents_for_printing (struct value *value)
1220 {
1221 if (value->lazy)
1222 value_fetch_lazy (value);
1223 return value->contents;
1224 }
1225
1226 const gdb_byte *
1227 value_contents_for_printing_const (const struct value *value)
1228 {
1229 gdb_assert (!value->lazy);
1230 return value->contents;
1231 }
1232
1233 const gdb_byte *
1234 value_contents_all (struct value *value)
1235 {
1236 const gdb_byte *result = value_contents_for_printing (value);
1237 require_not_optimized_out (value);
1238 require_available (value);
1239 return result;
1240 }
1241
1242 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1243 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1244
1245 static void
1246 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1247 VEC (range_s) *src_range, int src_bit_offset,
1248 int bit_length)
1249 {
1250 range_s *r;
1251 int i;
1252
1253 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1254 {
1255 ULONGEST h, l;
1256
1257 l = std::max (r->offset, (LONGEST) src_bit_offset);
1258 h = std::min (r->offset + r->length,
1259 (LONGEST) src_bit_offset + bit_length);
1260
1261 if (l < h)
1262 insert_into_bit_range_vector (dst_range,
1263 dst_bit_offset + (l - src_bit_offset),
1264 h - l);
1265 }
1266 }
1267
1268 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1269 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1270
1271 static void
1272 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1273 const struct value *src, int src_bit_offset,
1274 int bit_length)
1275 {
1276 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1277 src->unavailable, src_bit_offset,
1278 bit_length);
1279 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1280 src->optimized_out, src_bit_offset,
1281 bit_length);
1282 }
1283
1284 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1285 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1286 contents, starting at DST_OFFSET. If unavailable contents are
1287 being copied from SRC, the corresponding DST contents are marked
1288 unavailable accordingly. Neither DST nor SRC may be lazy
1289 values.
1290
1291 It is assumed the contents of DST in the [DST_OFFSET,
1292 DST_OFFSET+LENGTH) range are wholly available. */
1293
1294 void
1295 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1296 struct value *src, LONGEST src_offset, LONGEST length)
1297 {
1298 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1299 struct gdbarch *arch = get_value_arch (src);
1300 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1301
1302 /* A lazy DST would make that this copy operation useless, since as
1303 soon as DST's contents were un-lazied (by a later value_contents
1304 call, say), the contents would be overwritten. A lazy SRC would
1305 mean we'd be copying garbage. */
1306 gdb_assert (!dst->lazy && !src->lazy);
1307
1308 /* The overwritten DST range gets unavailability ORed in, not
1309 replaced. Make sure to remember to implement replacing if it
1310 turns out actually necessary. */
1311 gdb_assert (value_bytes_available (dst, dst_offset, length));
1312 gdb_assert (!value_bits_any_optimized_out (dst,
1313 TARGET_CHAR_BIT * dst_offset,
1314 TARGET_CHAR_BIT * length));
1315
1316 /* Copy the data. */
1317 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1318 value_contents_all_raw (src) + src_offset * unit_size,
1319 length * unit_size);
1320
1321 /* Copy the meta-data, adjusted. */
1322 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1323 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1324 bit_length = length * unit_size * HOST_CHAR_BIT;
1325
1326 value_ranges_copy_adjusted (dst, dst_bit_offset,
1327 src, src_bit_offset,
1328 bit_length);
1329 }
1330
1331 /* Copy LENGTH bytes of SRC value's (all) contents
1332 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1333 (all) contents, starting at DST_OFFSET. If unavailable contents
1334 are being copied from SRC, the corresponding DST contents are
1335 marked unavailable accordingly. DST must not be lazy. If SRC is
1336 lazy, it will be fetched now.
1337
1338 It is assumed the contents of DST in the [DST_OFFSET,
1339 DST_OFFSET+LENGTH) range are wholly available. */
1340
1341 void
1342 value_contents_copy (struct value *dst, LONGEST dst_offset,
1343 struct value *src, LONGEST src_offset, LONGEST length)
1344 {
1345 if (src->lazy)
1346 value_fetch_lazy (src);
1347
1348 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1349 }
1350
1351 int
1352 value_lazy (const struct value *value)
1353 {
1354 return value->lazy;
1355 }
1356
1357 void
1358 set_value_lazy (struct value *value, int val)
1359 {
1360 value->lazy = val;
1361 }
1362
1363 int
1364 value_stack (const struct value *value)
1365 {
1366 return value->stack;
1367 }
1368
1369 void
1370 set_value_stack (struct value *value, int val)
1371 {
1372 value->stack = val;
1373 }
1374
1375 const gdb_byte *
1376 value_contents (struct value *value)
1377 {
1378 const gdb_byte *result = value_contents_writeable (value);
1379 require_not_optimized_out (value);
1380 require_available (value);
1381 return result;
1382 }
1383
1384 gdb_byte *
1385 value_contents_writeable (struct value *value)
1386 {
1387 if (value->lazy)
1388 value_fetch_lazy (value);
1389 return value_contents_raw (value);
1390 }
1391
1392 int
1393 value_optimized_out (struct value *value)
1394 {
1395 /* We can only know if a value is optimized out once we have tried to
1396 fetch it. */
1397 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1398 {
1399 TRY
1400 {
1401 value_fetch_lazy (value);
1402 }
1403 CATCH (ex, RETURN_MASK_ERROR)
1404 {
1405 /* Fall back to checking value->optimized_out. */
1406 }
1407 END_CATCH
1408 }
1409
1410 return !VEC_empty (range_s, value->optimized_out);
1411 }
1412
1413 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1414 the following LENGTH bytes. */
1415
1416 void
1417 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1418 {
1419 mark_value_bits_optimized_out (value,
1420 offset * TARGET_CHAR_BIT,
1421 length * TARGET_CHAR_BIT);
1422 }
1423
1424 /* See value.h. */
1425
1426 void
1427 mark_value_bits_optimized_out (struct value *value,
1428 LONGEST offset, LONGEST length)
1429 {
1430 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1431 }
1432
1433 int
1434 value_bits_synthetic_pointer (const struct value *value,
1435 LONGEST offset, LONGEST length)
1436 {
1437 if (value->lval != lval_computed
1438 || !value->location.computed.funcs->check_synthetic_pointer)
1439 return 0;
1440 return value->location.computed.funcs->check_synthetic_pointer (value,
1441 offset,
1442 length);
1443 }
1444
1445 LONGEST
1446 value_embedded_offset (const struct value *value)
1447 {
1448 return value->embedded_offset;
1449 }
1450
1451 void
1452 set_value_embedded_offset (struct value *value, LONGEST val)
1453 {
1454 value->embedded_offset = val;
1455 }
1456
1457 LONGEST
1458 value_pointed_to_offset (const struct value *value)
1459 {
1460 return value->pointed_to_offset;
1461 }
1462
1463 void
1464 set_value_pointed_to_offset (struct value *value, LONGEST val)
1465 {
1466 value->pointed_to_offset = val;
1467 }
1468
1469 const struct lval_funcs *
1470 value_computed_funcs (const struct value *v)
1471 {
1472 gdb_assert (value_lval_const (v) == lval_computed);
1473
1474 return v->location.computed.funcs;
1475 }
1476
1477 void *
1478 value_computed_closure (const struct value *v)
1479 {
1480 gdb_assert (v->lval == lval_computed);
1481
1482 return v->location.computed.closure;
1483 }
1484
1485 enum lval_type *
1486 deprecated_value_lval_hack (struct value *value)
1487 {
1488 return &value->lval;
1489 }
1490
1491 enum lval_type
1492 value_lval_const (const struct value *value)
1493 {
1494 return value->lval;
1495 }
1496
1497 CORE_ADDR
1498 value_address (const struct value *value)
1499 {
1500 if (value->lval != lval_memory)
1501 return 0;
1502 if (value->parent != NULL)
1503 return value_address (value->parent) + value->offset;
1504 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1505 {
1506 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1507 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1508 }
1509
1510 return value->location.address + value->offset;
1511 }
1512
1513 CORE_ADDR
1514 value_raw_address (const struct value *value)
1515 {
1516 if (value->lval != lval_memory)
1517 return 0;
1518 return value->location.address;
1519 }
1520
1521 void
1522 set_value_address (struct value *value, CORE_ADDR addr)
1523 {
1524 gdb_assert (value->lval == lval_memory);
1525 value->location.address = addr;
1526 }
1527
1528 struct internalvar **
1529 deprecated_value_internalvar_hack (struct value *value)
1530 {
1531 return &value->location.internalvar;
1532 }
1533
1534 struct frame_id *
1535 deprecated_value_next_frame_id_hack (struct value *value)
1536 {
1537 gdb_assert (value->lval == lval_register);
1538 return &value->location.reg.next_frame_id;
1539 }
1540
1541 int *
1542 deprecated_value_regnum_hack (struct value *value)
1543 {
1544 gdb_assert (value->lval == lval_register);
1545 return &value->location.reg.regnum;
1546 }
1547
1548 int
1549 deprecated_value_modifiable (const struct value *value)
1550 {
1551 return value->modifiable;
1552 }
1553 \f
1554 /* Return a mark in the value chain. All values allocated after the
1555 mark is obtained (except for those released) are subject to being freed
1556 if a subsequent value_free_to_mark is passed the mark. */
1557 struct value *
1558 value_mark (void)
1559 {
1560 if (all_values.empty ())
1561 return nullptr;
1562 return all_values.back ().get ();
1563 }
1564
1565 /* Take a reference to VAL. VAL will not be deallocated until all
1566 references are released. */
1567
1568 struct value *
1569 value_incref (struct value *val)
1570 {
1571 val->reference_count++;
1572 return val;
1573 }
1574
1575 /* Release a reference to VAL, which was acquired with value_incref.
1576 This function is also called to deallocate values from the value
1577 chain. */
1578
1579 void
1580 value_decref (struct value *val)
1581 {
1582 if (val)
1583 {
1584 gdb_assert (val->reference_count > 0);
1585 val->reference_count--;
1586 if (val->reference_count > 0)
1587 return;
1588
1589 /* If there's an associated parent value, drop our reference to
1590 it. */
1591 if (val->parent != NULL)
1592 value_decref (val->parent);
1593
1594 if (VALUE_LVAL (val) == lval_computed)
1595 {
1596 const struct lval_funcs *funcs = val->location.computed.funcs;
1597
1598 if (funcs->free_closure)
1599 funcs->free_closure (val);
1600 }
1601 else if (VALUE_LVAL (val) == lval_xcallable)
1602 delete val->location.xm_worker;
1603
1604 xfree (val->contents);
1605 VEC_free (range_s, val->unavailable);
1606 }
1607 xfree (val);
1608 }
1609
1610 /* Free all values allocated since MARK was obtained by value_mark
1611 (except for those released). */
1612 void
1613 value_free_to_mark (const struct value *mark)
1614 {
1615 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1616 if (iter == all_values.end ())
1617 all_values.clear ();
1618 else
1619 all_values.erase (iter + 1, all_values.end ());
1620 }
1621
1622 /* Remove VAL from the chain all_values
1623 so it will not be freed automatically. */
1624
1625 value_ref_ptr
1626 release_value (struct value *val)
1627 {
1628 struct value *v;
1629
1630 if (val == nullptr)
1631 return value_ref_ptr ();
1632
1633 std::vector<value_ref_ptr>::reverse_iterator iter;
1634 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1635 {
1636 if (*iter == val)
1637 {
1638 value_ref_ptr result = *iter;
1639 all_values.erase (iter.base () - 1);
1640 return result;
1641 }
1642 }
1643
1644 /* We must always return an owned reference. Normally this happens
1645 because we transfer the reference from the value chain, but in
1646 this case the value was not on the chain. */
1647 return value_ref_ptr (value_incref (val));
1648 }
1649
1650 /* See value.h. */
1651
1652 std::vector<value_ref_ptr>
1653 value_release_to_mark (const struct value *mark)
1654 {
1655 std::vector<value_ref_ptr> result;
1656
1657 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1658 if (iter == all_values.end ())
1659 std::swap (result, all_values);
1660 else
1661 {
1662 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1663 all_values.erase (iter + 1, all_values.end ());
1664 }
1665 std::reverse (result.begin (), result.end ());
1666 return result;
1667 }
1668
1669 /* Return a copy of the value ARG.
1670 It contains the same contents, for same memory address,
1671 but it's a different block of storage. */
1672
1673 struct value *
1674 value_copy (struct value *arg)
1675 {
1676 struct type *encl_type = value_enclosing_type (arg);
1677 struct value *val;
1678
1679 if (value_lazy (arg))
1680 val = allocate_value_lazy (encl_type);
1681 else
1682 val = allocate_value (encl_type);
1683 val->type = arg->type;
1684 VALUE_LVAL (val) = VALUE_LVAL (arg);
1685 val->location = arg->location;
1686 val->offset = arg->offset;
1687 val->bitpos = arg->bitpos;
1688 val->bitsize = arg->bitsize;
1689 val->lazy = arg->lazy;
1690 val->embedded_offset = value_embedded_offset (arg);
1691 val->pointed_to_offset = arg->pointed_to_offset;
1692 val->modifiable = arg->modifiable;
1693 if (!value_lazy (val))
1694 {
1695 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1696 TYPE_LENGTH (value_enclosing_type (arg)));
1697
1698 }
1699 val->unavailable = VEC_copy (range_s, arg->unavailable);
1700 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1701 set_value_parent (val, arg->parent);
1702 if (VALUE_LVAL (val) == lval_computed)
1703 {
1704 const struct lval_funcs *funcs = val->location.computed.funcs;
1705
1706 if (funcs->copy_closure)
1707 val->location.computed.closure = funcs->copy_closure (val);
1708 }
1709 return val;
1710 }
1711
1712 /* Return a "const" and/or "volatile" qualified version of the value V.
1713 If CNST is true, then the returned value will be qualified with
1714 "const".
1715 if VOLTL is true, then the returned value will be qualified with
1716 "volatile". */
1717
1718 struct value *
1719 make_cv_value (int cnst, int voltl, struct value *v)
1720 {
1721 struct type *val_type = value_type (v);
1722 struct type *enclosing_type = value_enclosing_type (v);
1723 struct value *cv_val = value_copy (v);
1724
1725 deprecated_set_value_type (cv_val,
1726 make_cv_type (cnst, voltl, val_type, NULL));
1727 set_value_enclosing_type (cv_val,
1728 make_cv_type (cnst, voltl, enclosing_type, NULL));
1729
1730 return cv_val;
1731 }
1732
1733 /* Return a version of ARG that is non-lvalue. */
1734
1735 struct value *
1736 value_non_lval (struct value *arg)
1737 {
1738 if (VALUE_LVAL (arg) != not_lval)
1739 {
1740 struct type *enc_type = value_enclosing_type (arg);
1741 struct value *val = allocate_value (enc_type);
1742
1743 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1744 TYPE_LENGTH (enc_type));
1745 val->type = arg->type;
1746 set_value_embedded_offset (val, value_embedded_offset (arg));
1747 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1748 return val;
1749 }
1750 return arg;
1751 }
1752
1753 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1754
1755 void
1756 value_force_lval (struct value *v, CORE_ADDR addr)
1757 {
1758 gdb_assert (VALUE_LVAL (v) == not_lval);
1759
1760 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1761 v->lval = lval_memory;
1762 v->location.address = addr;
1763 }
1764
1765 void
1766 set_value_component_location (struct value *component,
1767 const struct value *whole)
1768 {
1769 struct type *type;
1770
1771 gdb_assert (whole->lval != lval_xcallable);
1772
1773 if (whole->lval == lval_internalvar)
1774 VALUE_LVAL (component) = lval_internalvar_component;
1775 else
1776 VALUE_LVAL (component) = whole->lval;
1777
1778 component->location = whole->location;
1779 if (whole->lval == lval_computed)
1780 {
1781 const struct lval_funcs *funcs = whole->location.computed.funcs;
1782
1783 if (funcs->copy_closure)
1784 component->location.computed.closure = funcs->copy_closure (whole);
1785 }
1786
1787 /* If type has a dynamic resolved location property
1788 update it's value address. */
1789 type = value_type (whole);
1790 if (NULL != TYPE_DATA_LOCATION (type)
1791 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1792 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1793 }
1794
1795 /* Access to the value history. */
1796
1797 /* Record a new value in the value history.
1798 Returns the absolute history index of the entry. */
1799
1800 int
1801 record_latest_value (struct value *val)
1802 {
1803 int i;
1804
1805 /* We don't want this value to have anything to do with the inferior anymore.
1806 In particular, "set $1 = 50" should not affect the variable from which
1807 the value was taken, and fast watchpoints should be able to assume that
1808 a value on the value history never changes. */
1809 if (value_lazy (val))
1810 value_fetch_lazy (val);
1811 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1812 from. This is a bit dubious, because then *&$1 does not just return $1
1813 but the current contents of that location. c'est la vie... */
1814 val->modifiable = 0;
1815
1816 value_history.push_back (release_value (val));
1817
1818 return value_history.size ();
1819 }
1820
1821 /* Return a copy of the value in the history with sequence number NUM. */
1822
1823 struct value *
1824 access_value_history (int num)
1825 {
1826 int i;
1827 int absnum = num;
1828
1829 if (absnum <= 0)
1830 absnum += value_history.size ();
1831
1832 if (absnum <= 0)
1833 {
1834 if (num == 0)
1835 error (_("The history is empty."));
1836 else if (num == 1)
1837 error (_("There is only one value in the history."));
1838 else
1839 error (_("History does not go back to $$%d."), -num);
1840 }
1841 if (absnum > value_history.size ())
1842 error (_("History has not yet reached $%d."), absnum);
1843
1844 absnum--;
1845
1846 return value_copy (value_history[absnum].get ());
1847 }
1848
1849 static void
1850 show_values (const char *num_exp, int from_tty)
1851 {
1852 int i;
1853 struct value *val;
1854 static int num = 1;
1855
1856 if (num_exp)
1857 {
1858 /* "show values +" should print from the stored position.
1859 "show values <exp>" should print around value number <exp>. */
1860 if (num_exp[0] != '+' || num_exp[1] != '\0')
1861 num = parse_and_eval_long (num_exp) - 5;
1862 }
1863 else
1864 {
1865 /* "show values" means print the last 10 values. */
1866 num = value_history.size () - 9;
1867 }
1868
1869 if (num <= 0)
1870 num = 1;
1871
1872 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1873 {
1874 struct value_print_options opts;
1875
1876 val = access_value_history (i);
1877 printf_filtered (("$%d = "), i);
1878 get_user_print_options (&opts);
1879 value_print (val, gdb_stdout, &opts);
1880 printf_filtered (("\n"));
1881 }
1882
1883 /* The next "show values +" should start after what we just printed. */
1884 num += 10;
1885
1886 /* Hitting just return after this command should do the same thing as
1887 "show values +". If num_exp is null, this is unnecessary, since
1888 "show values +" is not useful after "show values". */
1889 if (from_tty && num_exp)
1890 set_repeat_arguments ("+");
1891 }
1892 \f
1893 enum internalvar_kind
1894 {
1895 /* The internal variable is empty. */
1896 INTERNALVAR_VOID,
1897
1898 /* The value of the internal variable is provided directly as
1899 a GDB value object. */
1900 INTERNALVAR_VALUE,
1901
1902 /* A fresh value is computed via a call-back routine on every
1903 access to the internal variable. */
1904 INTERNALVAR_MAKE_VALUE,
1905
1906 /* The internal variable holds a GDB internal convenience function. */
1907 INTERNALVAR_FUNCTION,
1908
1909 /* The variable holds an integer value. */
1910 INTERNALVAR_INTEGER,
1911
1912 /* The variable holds a GDB-provided string. */
1913 INTERNALVAR_STRING,
1914 };
1915
1916 union internalvar_data
1917 {
1918 /* A value object used with INTERNALVAR_VALUE. */
1919 struct value *value;
1920
1921 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1922 struct
1923 {
1924 /* The functions to call. */
1925 const struct internalvar_funcs *functions;
1926
1927 /* The function's user-data. */
1928 void *data;
1929 } make_value;
1930
1931 /* The internal function used with INTERNALVAR_FUNCTION. */
1932 struct
1933 {
1934 struct internal_function *function;
1935 /* True if this is the canonical name for the function. */
1936 int canonical;
1937 } fn;
1938
1939 /* An integer value used with INTERNALVAR_INTEGER. */
1940 struct
1941 {
1942 /* If type is non-NULL, it will be used as the type to generate
1943 a value for this internal variable. If type is NULL, a default
1944 integer type for the architecture is used. */
1945 struct type *type;
1946 LONGEST val;
1947 } integer;
1948
1949 /* A string value used with INTERNALVAR_STRING. */
1950 char *string;
1951 };
1952
1953 /* Internal variables. These are variables within the debugger
1954 that hold values assigned by debugger commands.
1955 The user refers to them with a '$' prefix
1956 that does not appear in the variable names stored internally. */
1957
1958 struct internalvar
1959 {
1960 struct internalvar *next;
1961 char *name;
1962
1963 /* We support various different kinds of content of an internal variable.
1964 enum internalvar_kind specifies the kind, and union internalvar_data
1965 provides the data associated with this particular kind. */
1966
1967 enum internalvar_kind kind;
1968
1969 union internalvar_data u;
1970 };
1971
1972 static struct internalvar *internalvars;
1973
1974 /* If the variable does not already exist create it and give it the
1975 value given. If no value is given then the default is zero. */
1976 static void
1977 init_if_undefined_command (const char* args, int from_tty)
1978 {
1979 struct internalvar* intvar;
1980
1981 /* Parse the expression - this is taken from set_command(). */
1982 expression_up expr = parse_expression (args);
1983
1984 /* Validate the expression.
1985 Was the expression an assignment?
1986 Or even an expression at all? */
1987 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1988 error (_("Init-if-undefined requires an assignment expression."));
1989
1990 /* Extract the variable from the parsed expression.
1991 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1992 if (expr->elts[1].opcode != OP_INTERNALVAR)
1993 error (_("The first parameter to init-if-undefined "
1994 "should be a GDB variable."));
1995 intvar = expr->elts[2].internalvar;
1996
1997 /* Only evaluate the expression if the lvalue is void.
1998 This may still fail if the expresssion is invalid. */
1999 if (intvar->kind == INTERNALVAR_VOID)
2000 evaluate_expression (expr.get ());
2001 }
2002
2003
2004 /* Look up an internal variable with name NAME. NAME should not
2005 normally include a dollar sign.
2006
2007 If the specified internal variable does not exist,
2008 the return value is NULL. */
2009
2010 struct internalvar *
2011 lookup_only_internalvar (const char *name)
2012 {
2013 struct internalvar *var;
2014
2015 for (var = internalvars; var; var = var->next)
2016 if (strcmp (var->name, name) == 0)
2017 return var;
2018
2019 return NULL;
2020 }
2021
2022 /* Complete NAME by comparing it to the names of internal
2023 variables. */
2024
2025 void
2026 complete_internalvar (completion_tracker &tracker, const char *name)
2027 {
2028 struct internalvar *var;
2029 int len;
2030
2031 len = strlen (name);
2032
2033 for (var = internalvars; var; var = var->next)
2034 if (strncmp (var->name, name, len) == 0)
2035 {
2036 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2037
2038 tracker.add_completion (std::move (copy));
2039 }
2040 }
2041
2042 /* Create an internal variable with name NAME and with a void value.
2043 NAME should not normally include a dollar sign. */
2044
2045 struct internalvar *
2046 create_internalvar (const char *name)
2047 {
2048 struct internalvar *var = XNEW (struct internalvar);
2049
2050 var->name = concat (name, (char *)NULL);
2051 var->kind = INTERNALVAR_VOID;
2052 var->next = internalvars;
2053 internalvars = var;
2054 return var;
2055 }
2056
2057 /* Create an internal variable with name NAME and register FUN as the
2058 function that value_of_internalvar uses to create a value whenever
2059 this variable is referenced. NAME should not normally include a
2060 dollar sign. DATA is passed uninterpreted to FUN when it is
2061 called. CLEANUP, if not NULL, is called when the internal variable
2062 is destroyed. It is passed DATA as its only argument. */
2063
2064 struct internalvar *
2065 create_internalvar_type_lazy (const char *name,
2066 const struct internalvar_funcs *funcs,
2067 void *data)
2068 {
2069 struct internalvar *var = create_internalvar (name);
2070
2071 var->kind = INTERNALVAR_MAKE_VALUE;
2072 var->u.make_value.functions = funcs;
2073 var->u.make_value.data = data;
2074 return var;
2075 }
2076
2077 /* See documentation in value.h. */
2078
2079 int
2080 compile_internalvar_to_ax (struct internalvar *var,
2081 struct agent_expr *expr,
2082 struct axs_value *value)
2083 {
2084 if (var->kind != INTERNALVAR_MAKE_VALUE
2085 || var->u.make_value.functions->compile_to_ax == NULL)
2086 return 0;
2087
2088 var->u.make_value.functions->compile_to_ax (var, expr, value,
2089 var->u.make_value.data);
2090 return 1;
2091 }
2092
2093 /* Look up an internal variable with name NAME. NAME should not
2094 normally include a dollar sign.
2095
2096 If the specified internal variable does not exist,
2097 one is created, with a void value. */
2098
2099 struct internalvar *
2100 lookup_internalvar (const char *name)
2101 {
2102 struct internalvar *var;
2103
2104 var = lookup_only_internalvar (name);
2105 if (var)
2106 return var;
2107
2108 return create_internalvar (name);
2109 }
2110
2111 /* Return current value of internal variable VAR. For variables that
2112 are not inherently typed, use a value type appropriate for GDBARCH. */
2113
2114 struct value *
2115 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2116 {
2117 struct value *val;
2118 struct trace_state_variable *tsv;
2119
2120 /* If there is a trace state variable of the same name, assume that
2121 is what we really want to see. */
2122 tsv = find_trace_state_variable (var->name);
2123 if (tsv)
2124 {
2125 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2126 &(tsv->value));
2127 if (tsv->value_known)
2128 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2129 tsv->value);
2130 else
2131 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2132 return val;
2133 }
2134
2135 switch (var->kind)
2136 {
2137 case INTERNALVAR_VOID:
2138 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2139 break;
2140
2141 case INTERNALVAR_FUNCTION:
2142 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2143 break;
2144
2145 case INTERNALVAR_INTEGER:
2146 if (!var->u.integer.type)
2147 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2148 var->u.integer.val);
2149 else
2150 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2151 break;
2152
2153 case INTERNALVAR_STRING:
2154 val = value_cstring (var->u.string, strlen (var->u.string),
2155 builtin_type (gdbarch)->builtin_char);
2156 break;
2157
2158 case INTERNALVAR_VALUE:
2159 val = value_copy (var->u.value);
2160 if (value_lazy (val))
2161 value_fetch_lazy (val);
2162 break;
2163
2164 case INTERNALVAR_MAKE_VALUE:
2165 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2166 var->u.make_value.data);
2167 break;
2168
2169 default:
2170 internal_error (__FILE__, __LINE__, _("bad kind"));
2171 }
2172
2173 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2174 on this value go back to affect the original internal variable.
2175
2176 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2177 no underlying modifyable state in the internal variable.
2178
2179 Likewise, if the variable's value is a computed lvalue, we want
2180 references to it to produce another computed lvalue, where
2181 references and assignments actually operate through the
2182 computed value's functions.
2183
2184 This means that internal variables with computed values
2185 behave a little differently from other internal variables:
2186 assignments to them don't just replace the previous value
2187 altogether. At the moment, this seems like the behavior we
2188 want. */
2189
2190 if (var->kind != INTERNALVAR_MAKE_VALUE
2191 && val->lval != lval_computed)
2192 {
2193 VALUE_LVAL (val) = lval_internalvar;
2194 VALUE_INTERNALVAR (val) = var;
2195 }
2196
2197 return val;
2198 }
2199
2200 int
2201 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2202 {
2203 if (var->kind == INTERNALVAR_INTEGER)
2204 {
2205 *result = var->u.integer.val;
2206 return 1;
2207 }
2208
2209 if (var->kind == INTERNALVAR_VALUE)
2210 {
2211 struct type *type = check_typedef (value_type (var->u.value));
2212
2213 if (TYPE_CODE (type) == TYPE_CODE_INT)
2214 {
2215 *result = value_as_long (var->u.value);
2216 return 1;
2217 }
2218 }
2219
2220 return 0;
2221 }
2222
2223 static int
2224 get_internalvar_function (struct internalvar *var,
2225 struct internal_function **result)
2226 {
2227 switch (var->kind)
2228 {
2229 case INTERNALVAR_FUNCTION:
2230 *result = var->u.fn.function;
2231 return 1;
2232
2233 default:
2234 return 0;
2235 }
2236 }
2237
2238 void
2239 set_internalvar_component (struct internalvar *var,
2240 LONGEST offset, LONGEST bitpos,
2241 LONGEST bitsize, struct value *newval)
2242 {
2243 gdb_byte *addr;
2244 struct gdbarch *arch;
2245 int unit_size;
2246
2247 switch (var->kind)
2248 {
2249 case INTERNALVAR_VALUE:
2250 addr = value_contents_writeable (var->u.value);
2251 arch = get_value_arch (var->u.value);
2252 unit_size = gdbarch_addressable_memory_unit_size (arch);
2253
2254 if (bitsize)
2255 modify_field (value_type (var->u.value), addr + offset,
2256 value_as_long (newval), bitpos, bitsize);
2257 else
2258 memcpy (addr + offset * unit_size, value_contents (newval),
2259 TYPE_LENGTH (value_type (newval)));
2260 break;
2261
2262 default:
2263 /* We can never get a component of any other kind. */
2264 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2265 }
2266 }
2267
2268 void
2269 set_internalvar (struct internalvar *var, struct value *val)
2270 {
2271 enum internalvar_kind new_kind;
2272 union internalvar_data new_data = { 0 };
2273
2274 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2275 error (_("Cannot overwrite convenience function %s"), var->name);
2276
2277 /* Prepare new contents. */
2278 switch (TYPE_CODE (check_typedef (value_type (val))))
2279 {
2280 case TYPE_CODE_VOID:
2281 new_kind = INTERNALVAR_VOID;
2282 break;
2283
2284 case TYPE_CODE_INTERNAL_FUNCTION:
2285 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2286 new_kind = INTERNALVAR_FUNCTION;
2287 get_internalvar_function (VALUE_INTERNALVAR (val),
2288 &new_data.fn.function);
2289 /* Copies created here are never canonical. */
2290 break;
2291
2292 default:
2293 new_kind = INTERNALVAR_VALUE;
2294 new_data.value = value_copy (val);
2295 new_data.value->modifiable = 1;
2296
2297 /* Force the value to be fetched from the target now, to avoid problems
2298 later when this internalvar is referenced and the target is gone or
2299 has changed. */
2300 if (value_lazy (new_data.value))
2301 value_fetch_lazy (new_data.value);
2302
2303 /* Release the value from the value chain to prevent it from being
2304 deleted by free_all_values. From here on this function should not
2305 call error () until new_data is installed into the var->u to avoid
2306 leaking memory. */
2307 release_value (new_data.value).release ();
2308
2309 /* Internal variables which are created from values with a dynamic
2310 location don't need the location property of the origin anymore.
2311 The resolved dynamic location is used prior then any other address
2312 when accessing the value.
2313 If we keep it, we would still refer to the origin value.
2314 Remove the location property in case it exist. */
2315 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2316
2317 break;
2318 }
2319
2320 /* Clean up old contents. */
2321 clear_internalvar (var);
2322
2323 /* Switch over. */
2324 var->kind = new_kind;
2325 var->u = new_data;
2326 /* End code which must not call error(). */
2327 }
2328
2329 void
2330 set_internalvar_integer (struct internalvar *var, LONGEST l)
2331 {
2332 /* Clean up old contents. */
2333 clear_internalvar (var);
2334
2335 var->kind = INTERNALVAR_INTEGER;
2336 var->u.integer.type = NULL;
2337 var->u.integer.val = l;
2338 }
2339
2340 void
2341 set_internalvar_string (struct internalvar *var, const char *string)
2342 {
2343 /* Clean up old contents. */
2344 clear_internalvar (var);
2345
2346 var->kind = INTERNALVAR_STRING;
2347 var->u.string = xstrdup (string);
2348 }
2349
2350 static void
2351 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2352 {
2353 /* Clean up old contents. */
2354 clear_internalvar (var);
2355
2356 var->kind = INTERNALVAR_FUNCTION;
2357 var->u.fn.function = f;
2358 var->u.fn.canonical = 1;
2359 /* Variables installed here are always the canonical version. */
2360 }
2361
2362 void
2363 clear_internalvar (struct internalvar *var)
2364 {
2365 /* Clean up old contents. */
2366 switch (var->kind)
2367 {
2368 case INTERNALVAR_VALUE:
2369 value_decref (var->u.value);
2370 break;
2371
2372 case INTERNALVAR_STRING:
2373 xfree (var->u.string);
2374 break;
2375
2376 case INTERNALVAR_MAKE_VALUE:
2377 if (var->u.make_value.functions->destroy != NULL)
2378 var->u.make_value.functions->destroy (var->u.make_value.data);
2379 break;
2380
2381 default:
2382 break;
2383 }
2384
2385 /* Reset to void kind. */
2386 var->kind = INTERNALVAR_VOID;
2387 }
2388
2389 char *
2390 internalvar_name (const struct internalvar *var)
2391 {
2392 return var->name;
2393 }
2394
2395 static struct internal_function *
2396 create_internal_function (const char *name,
2397 internal_function_fn handler, void *cookie)
2398 {
2399 struct internal_function *ifn = XNEW (struct internal_function);
2400
2401 ifn->name = xstrdup (name);
2402 ifn->handler = handler;
2403 ifn->cookie = cookie;
2404 return ifn;
2405 }
2406
2407 char *
2408 value_internal_function_name (struct value *val)
2409 {
2410 struct internal_function *ifn;
2411 int result;
2412
2413 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2414 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2415 gdb_assert (result);
2416
2417 return ifn->name;
2418 }
2419
2420 struct value *
2421 call_internal_function (struct gdbarch *gdbarch,
2422 const struct language_defn *language,
2423 struct value *func, int argc, struct value **argv)
2424 {
2425 struct internal_function *ifn;
2426 int result;
2427
2428 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2429 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2430 gdb_assert (result);
2431
2432 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2433 }
2434
2435 /* The 'function' command. This does nothing -- it is just a
2436 placeholder to let "help function NAME" work. This is also used as
2437 the implementation of the sub-command that is created when
2438 registering an internal function. */
2439 static void
2440 function_command (const char *command, int from_tty)
2441 {
2442 /* Do nothing. */
2443 }
2444
2445 /* Clean up if an internal function's command is destroyed. */
2446 static void
2447 function_destroyer (struct cmd_list_element *self, void *ignore)
2448 {
2449 xfree ((char *) self->name);
2450 xfree ((char *) self->doc);
2451 }
2452
2453 /* Add a new internal function. NAME is the name of the function; DOC
2454 is a documentation string describing the function. HANDLER is
2455 called when the function is invoked. COOKIE is an arbitrary
2456 pointer which is passed to HANDLER and is intended for "user
2457 data". */
2458 void
2459 add_internal_function (const char *name, const char *doc,
2460 internal_function_fn handler, void *cookie)
2461 {
2462 struct cmd_list_element *cmd;
2463 struct internal_function *ifn;
2464 struct internalvar *var = lookup_internalvar (name);
2465
2466 ifn = create_internal_function (name, handler, cookie);
2467 set_internalvar_function (var, ifn);
2468
2469 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2470 &functionlist);
2471 cmd->destroyer = function_destroyer;
2472 }
2473
2474 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2475 prevent cycles / duplicates. */
2476
2477 void
2478 preserve_one_value (struct value *value, struct objfile *objfile,
2479 htab_t copied_types)
2480 {
2481 if (TYPE_OBJFILE (value->type) == objfile)
2482 value->type = copy_type_recursive (objfile, value->type, copied_types);
2483
2484 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2485 value->enclosing_type = copy_type_recursive (objfile,
2486 value->enclosing_type,
2487 copied_types);
2488 }
2489
2490 /* Likewise for internal variable VAR. */
2491
2492 static void
2493 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2494 htab_t copied_types)
2495 {
2496 switch (var->kind)
2497 {
2498 case INTERNALVAR_INTEGER:
2499 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2500 var->u.integer.type
2501 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2502 break;
2503
2504 case INTERNALVAR_VALUE:
2505 preserve_one_value (var->u.value, objfile, copied_types);
2506 break;
2507 }
2508 }
2509
2510 /* Update the internal variables and value history when OBJFILE is
2511 discarded; we must copy the types out of the objfile. New global types
2512 will be created for every convenience variable which currently points to
2513 this objfile's types, and the convenience variables will be adjusted to
2514 use the new global types. */
2515
2516 void
2517 preserve_values (struct objfile *objfile)
2518 {
2519 htab_t copied_types;
2520 struct internalvar *var;
2521 int i;
2522
2523 /* Create the hash table. We allocate on the objfile's obstack, since
2524 it is soon to be deleted. */
2525 copied_types = create_copied_types_hash (objfile);
2526
2527 for (const value_ref_ptr &item : value_history)
2528 preserve_one_value (item.get (), objfile, copied_types);
2529
2530 for (var = internalvars; var; var = var->next)
2531 preserve_one_internalvar (var, objfile, copied_types);
2532
2533 preserve_ext_lang_values (objfile, copied_types);
2534
2535 htab_delete (copied_types);
2536 }
2537
2538 static void
2539 show_convenience (const char *ignore, int from_tty)
2540 {
2541 struct gdbarch *gdbarch = get_current_arch ();
2542 struct internalvar *var;
2543 int varseen = 0;
2544 struct value_print_options opts;
2545
2546 get_user_print_options (&opts);
2547 for (var = internalvars; var; var = var->next)
2548 {
2549
2550 if (!varseen)
2551 {
2552 varseen = 1;
2553 }
2554 printf_filtered (("$%s = "), var->name);
2555
2556 TRY
2557 {
2558 struct value *val;
2559
2560 val = value_of_internalvar (gdbarch, var);
2561 value_print (val, gdb_stdout, &opts);
2562 }
2563 CATCH (ex, RETURN_MASK_ERROR)
2564 {
2565 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2566 }
2567 END_CATCH
2568
2569 printf_filtered (("\n"));
2570 }
2571 if (!varseen)
2572 {
2573 /* This text does not mention convenience functions on purpose.
2574 The user can't create them except via Python, and if Python support
2575 is installed this message will never be printed ($_streq will
2576 exist). */
2577 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2578 "Convenience variables have "
2579 "names starting with \"$\";\n"
2580 "use \"set\" as in \"set "
2581 "$foo = 5\" to define them.\n"));
2582 }
2583 }
2584 \f
2585
2586 /* See value.h. */
2587
2588 struct value *
2589 value_from_xmethod (xmethod_worker_up &&worker)
2590 {
2591 struct value *v;
2592
2593 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2594 v->lval = lval_xcallable;
2595 v->location.xm_worker = worker.release ();
2596 v->modifiable = 0;
2597
2598 return v;
2599 }
2600
2601 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2602
2603 struct type *
2604 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2605 {
2606 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2607 && method->lval == lval_xcallable && argc > 0);
2608
2609 return method->location.xm_worker->get_result_type
2610 (argv[0], argv + 1, argc - 1);
2611 }
2612
2613 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2614
2615 struct value *
2616 call_xmethod (struct value *method, int argc, struct value **argv)
2617 {
2618 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2619 && method->lval == lval_xcallable && argc > 0);
2620
2621 return method->location.xm_worker->invoke (argv[0], argv + 1, argc - 1);
2622 }
2623 \f
2624 /* Extract a value as a C number (either long or double).
2625 Knows how to convert fixed values to double, or
2626 floating values to long.
2627 Does not deallocate the value. */
2628
2629 LONGEST
2630 value_as_long (struct value *val)
2631 {
2632 /* This coerces arrays and functions, which is necessary (e.g.
2633 in disassemble_command). It also dereferences references, which
2634 I suspect is the most logical thing to do. */
2635 val = coerce_array (val);
2636 return unpack_long (value_type (val), value_contents (val));
2637 }
2638
2639 /* Extract a value as a C pointer. Does not deallocate the value.
2640 Note that val's type may not actually be a pointer; value_as_long
2641 handles all the cases. */
2642 CORE_ADDR
2643 value_as_address (struct value *val)
2644 {
2645 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2646
2647 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2648 whether we want this to be true eventually. */
2649 #if 0
2650 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2651 non-address (e.g. argument to "signal", "info break", etc.), or
2652 for pointers to char, in which the low bits *are* significant. */
2653 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2654 #else
2655
2656 /* There are several targets (IA-64, PowerPC, and others) which
2657 don't represent pointers to functions as simply the address of
2658 the function's entry point. For example, on the IA-64, a
2659 function pointer points to a two-word descriptor, generated by
2660 the linker, which contains the function's entry point, and the
2661 value the IA-64 "global pointer" register should have --- to
2662 support position-independent code. The linker generates
2663 descriptors only for those functions whose addresses are taken.
2664
2665 On such targets, it's difficult for GDB to convert an arbitrary
2666 function address into a function pointer; it has to either find
2667 an existing descriptor for that function, or call malloc and
2668 build its own. On some targets, it is impossible for GDB to
2669 build a descriptor at all: the descriptor must contain a jump
2670 instruction; data memory cannot be executed; and code memory
2671 cannot be modified.
2672
2673 Upon entry to this function, if VAL is a value of type `function'
2674 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2675 value_address (val) is the address of the function. This is what
2676 you'll get if you evaluate an expression like `main'. The call
2677 to COERCE_ARRAY below actually does all the usual unary
2678 conversions, which includes converting values of type `function'
2679 to `pointer to function'. This is the challenging conversion
2680 discussed above. Then, `unpack_long' will convert that pointer
2681 back into an address.
2682
2683 So, suppose the user types `disassemble foo' on an architecture
2684 with a strange function pointer representation, on which GDB
2685 cannot build its own descriptors, and suppose further that `foo'
2686 has no linker-built descriptor. The address->pointer conversion
2687 will signal an error and prevent the command from running, even
2688 though the next step would have been to convert the pointer
2689 directly back into the same address.
2690
2691 The following shortcut avoids this whole mess. If VAL is a
2692 function, just return its address directly. */
2693 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2694 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2695 return value_address (val);
2696
2697 val = coerce_array (val);
2698
2699 /* Some architectures (e.g. Harvard), map instruction and data
2700 addresses onto a single large unified address space. For
2701 instance: An architecture may consider a large integer in the
2702 range 0x10000000 .. 0x1000ffff to already represent a data
2703 addresses (hence not need a pointer to address conversion) while
2704 a small integer would still need to be converted integer to
2705 pointer to address. Just assume such architectures handle all
2706 integer conversions in a single function. */
2707
2708 /* JimB writes:
2709
2710 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2711 must admonish GDB hackers to make sure its behavior matches the
2712 compiler's, whenever possible.
2713
2714 In general, I think GDB should evaluate expressions the same way
2715 the compiler does. When the user copies an expression out of
2716 their source code and hands it to a `print' command, they should
2717 get the same value the compiler would have computed. Any
2718 deviation from this rule can cause major confusion and annoyance,
2719 and needs to be justified carefully. In other words, GDB doesn't
2720 really have the freedom to do these conversions in clever and
2721 useful ways.
2722
2723 AndrewC pointed out that users aren't complaining about how GDB
2724 casts integers to pointers; they are complaining that they can't
2725 take an address from a disassembly listing and give it to `x/i'.
2726 This is certainly important.
2727
2728 Adding an architecture method like integer_to_address() certainly
2729 makes it possible for GDB to "get it right" in all circumstances
2730 --- the target has complete control over how things get done, so
2731 people can Do The Right Thing for their target without breaking
2732 anyone else. The standard doesn't specify how integers get
2733 converted to pointers; usually, the ABI doesn't either, but
2734 ABI-specific code is a more reasonable place to handle it. */
2735
2736 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2737 && !TYPE_IS_REFERENCE (value_type (val))
2738 && gdbarch_integer_to_address_p (gdbarch))
2739 return gdbarch_integer_to_address (gdbarch, value_type (val),
2740 value_contents (val));
2741
2742 return unpack_long (value_type (val), value_contents (val));
2743 #endif
2744 }
2745 \f
2746 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2747 as a long, or as a double, assuming the raw data is described
2748 by type TYPE. Knows how to convert different sizes of values
2749 and can convert between fixed and floating point. We don't assume
2750 any alignment for the raw data. Return value is in host byte order.
2751
2752 If you want functions and arrays to be coerced to pointers, and
2753 references to be dereferenced, call value_as_long() instead.
2754
2755 C++: It is assumed that the front-end has taken care of
2756 all matters concerning pointers to members. A pointer
2757 to member which reaches here is considered to be equivalent
2758 to an INT (or some size). After all, it is only an offset. */
2759
2760 LONGEST
2761 unpack_long (struct type *type, const gdb_byte *valaddr)
2762 {
2763 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2764 enum type_code code = TYPE_CODE (type);
2765 int len = TYPE_LENGTH (type);
2766 int nosign = TYPE_UNSIGNED (type);
2767
2768 switch (code)
2769 {
2770 case TYPE_CODE_TYPEDEF:
2771 return unpack_long (check_typedef (type), valaddr);
2772 case TYPE_CODE_ENUM:
2773 case TYPE_CODE_FLAGS:
2774 case TYPE_CODE_BOOL:
2775 case TYPE_CODE_INT:
2776 case TYPE_CODE_CHAR:
2777 case TYPE_CODE_RANGE:
2778 case TYPE_CODE_MEMBERPTR:
2779 if (nosign)
2780 return extract_unsigned_integer (valaddr, len, byte_order);
2781 else
2782 return extract_signed_integer (valaddr, len, byte_order);
2783
2784 case TYPE_CODE_FLT:
2785 case TYPE_CODE_DECFLOAT:
2786 return target_float_to_longest (valaddr, type);
2787
2788 case TYPE_CODE_PTR:
2789 case TYPE_CODE_REF:
2790 case TYPE_CODE_RVALUE_REF:
2791 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2792 whether we want this to be true eventually. */
2793 return extract_typed_address (valaddr, type);
2794
2795 default:
2796 error (_("Value can't be converted to integer."));
2797 }
2798 return 0; /* Placate lint. */
2799 }
2800
2801 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2802 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2803 We don't assume any alignment for the raw data. Return value is in
2804 host byte order.
2805
2806 If you want functions and arrays to be coerced to pointers, and
2807 references to be dereferenced, call value_as_address() instead.
2808
2809 C++: It is assumed that the front-end has taken care of
2810 all matters concerning pointers to members. A pointer
2811 to member which reaches here is considered to be equivalent
2812 to an INT (or some size). After all, it is only an offset. */
2813
2814 CORE_ADDR
2815 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2816 {
2817 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2818 whether we want this to be true eventually. */
2819 return unpack_long (type, valaddr);
2820 }
2821
2822 bool
2823 is_floating_value (struct value *val)
2824 {
2825 struct type *type = check_typedef (value_type (val));
2826
2827 if (is_floating_type (type))
2828 {
2829 if (!target_float_is_valid (value_contents (val), type))
2830 error (_("Invalid floating value found in program."));
2831 return true;
2832 }
2833
2834 return false;
2835 }
2836
2837 \f
2838 /* Get the value of the FIELDNO'th field (which must be static) of
2839 TYPE. */
2840
2841 struct value *
2842 value_static_field (struct type *type, int fieldno)
2843 {
2844 struct value *retval;
2845
2846 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2847 {
2848 case FIELD_LOC_KIND_PHYSADDR:
2849 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2850 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2851 break;
2852 case FIELD_LOC_KIND_PHYSNAME:
2853 {
2854 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2855 /* TYPE_FIELD_NAME (type, fieldno); */
2856 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2857
2858 if (sym.symbol == NULL)
2859 {
2860 /* With some compilers, e.g. HP aCC, static data members are
2861 reported as non-debuggable symbols. */
2862 struct bound_minimal_symbol msym
2863 = lookup_minimal_symbol (phys_name, NULL, NULL);
2864 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2865
2866 if (!msym.minsym)
2867 retval = allocate_optimized_out_value (field_type);
2868 else
2869 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2870 }
2871 else
2872 retval = value_of_variable (sym.symbol, sym.block);
2873 break;
2874 }
2875 default:
2876 gdb_assert_not_reached ("unexpected field location kind");
2877 }
2878
2879 return retval;
2880 }
2881
2882 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2883 You have to be careful here, since the size of the data area for the value
2884 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2885 than the old enclosing type, you have to allocate more space for the
2886 data. */
2887
2888 void
2889 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2890 {
2891 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2892 {
2893 check_type_length_before_alloc (new_encl_type);
2894 val->contents
2895 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2896 }
2897
2898 val->enclosing_type = new_encl_type;
2899 }
2900
2901 /* Given a value ARG1 (offset by OFFSET bytes)
2902 of a struct or union type ARG_TYPE,
2903 extract and return the value of one of its (non-static) fields.
2904 FIELDNO says which field. */
2905
2906 struct value *
2907 value_primitive_field (struct value *arg1, LONGEST offset,
2908 int fieldno, struct type *arg_type)
2909 {
2910 struct value *v;
2911 struct type *type;
2912 struct gdbarch *arch = get_value_arch (arg1);
2913 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2914
2915 arg_type = check_typedef (arg_type);
2916 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2917
2918 /* Call check_typedef on our type to make sure that, if TYPE
2919 is a TYPE_CODE_TYPEDEF, its length is set to the length
2920 of the target type instead of zero. However, we do not
2921 replace the typedef type by the target type, because we want
2922 to keep the typedef in order to be able to print the type
2923 description correctly. */
2924 check_typedef (type);
2925
2926 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2927 {
2928 /* Handle packed fields.
2929
2930 Create a new value for the bitfield, with bitpos and bitsize
2931 set. If possible, arrange offset and bitpos so that we can
2932 do a single aligned read of the size of the containing type.
2933 Otherwise, adjust offset to the byte containing the first
2934 bit. Assume that the address, offset, and embedded offset
2935 are sufficiently aligned. */
2936
2937 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2938 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2939
2940 v = allocate_value_lazy (type);
2941 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2942 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2943 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2944 v->bitpos = bitpos % container_bitsize;
2945 else
2946 v->bitpos = bitpos % 8;
2947 v->offset = (value_embedded_offset (arg1)
2948 + offset
2949 + (bitpos - v->bitpos) / 8);
2950 set_value_parent (v, arg1);
2951 if (!value_lazy (arg1))
2952 value_fetch_lazy (v);
2953 }
2954 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2955 {
2956 /* This field is actually a base subobject, so preserve the
2957 entire object's contents for later references to virtual
2958 bases, etc. */
2959 LONGEST boffset;
2960
2961 /* Lazy register values with offsets are not supported. */
2962 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2963 value_fetch_lazy (arg1);
2964
2965 /* We special case virtual inheritance here because this
2966 requires access to the contents, which we would rather avoid
2967 for references to ordinary fields of unavailable values. */
2968 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2969 boffset = baseclass_offset (arg_type, fieldno,
2970 value_contents (arg1),
2971 value_embedded_offset (arg1),
2972 value_address (arg1),
2973 arg1);
2974 else
2975 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2976
2977 if (value_lazy (arg1))
2978 v = allocate_value_lazy (value_enclosing_type (arg1));
2979 else
2980 {
2981 v = allocate_value (value_enclosing_type (arg1));
2982 value_contents_copy_raw (v, 0, arg1, 0,
2983 TYPE_LENGTH (value_enclosing_type (arg1)));
2984 }
2985 v->type = type;
2986 v->offset = value_offset (arg1);
2987 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2988 }
2989 else if (NULL != TYPE_DATA_LOCATION (type))
2990 {
2991 /* Field is a dynamic data member. */
2992
2993 gdb_assert (0 == offset);
2994 /* We expect an already resolved data location. */
2995 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
2996 /* For dynamic data types defer memory allocation
2997 until we actual access the value. */
2998 v = allocate_value_lazy (type);
2999 }
3000 else
3001 {
3002 /* Plain old data member */
3003 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3004 / (HOST_CHAR_BIT * unit_size));
3005
3006 /* Lazy register values with offsets are not supported. */
3007 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3008 value_fetch_lazy (arg1);
3009
3010 if (value_lazy (arg1))
3011 v = allocate_value_lazy (type);
3012 else
3013 {
3014 v = allocate_value (type);
3015 value_contents_copy_raw (v, value_embedded_offset (v),
3016 arg1, value_embedded_offset (arg1) + offset,
3017 type_length_units (type));
3018 }
3019 v->offset = (value_offset (arg1) + offset
3020 + value_embedded_offset (arg1));
3021 }
3022 set_value_component_location (v, arg1);
3023 return v;
3024 }
3025
3026 /* Given a value ARG1 of a struct or union type,
3027 extract and return the value of one of its (non-static) fields.
3028 FIELDNO says which field. */
3029
3030 struct value *
3031 value_field (struct value *arg1, int fieldno)
3032 {
3033 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3034 }
3035
3036 /* Return a non-virtual function as a value.
3037 F is the list of member functions which contains the desired method.
3038 J is an index into F which provides the desired method.
3039
3040 We only use the symbol for its address, so be happy with either a
3041 full symbol or a minimal symbol. */
3042
3043 struct value *
3044 value_fn_field (struct value **arg1p, struct fn_field *f,
3045 int j, struct type *type,
3046 LONGEST offset)
3047 {
3048 struct value *v;
3049 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3050 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3051 struct symbol *sym;
3052 struct bound_minimal_symbol msym;
3053
3054 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3055 if (sym != NULL)
3056 {
3057 memset (&msym, 0, sizeof (msym));
3058 }
3059 else
3060 {
3061 gdb_assert (sym == NULL);
3062 msym = lookup_bound_minimal_symbol (physname);
3063 if (msym.minsym == NULL)
3064 return NULL;
3065 }
3066
3067 v = allocate_value (ftype);
3068 VALUE_LVAL (v) = lval_memory;
3069 if (sym)
3070 {
3071 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3072 }
3073 else
3074 {
3075 /* The minimal symbol might point to a function descriptor;
3076 resolve it to the actual code address instead. */
3077 struct objfile *objfile = msym.objfile;
3078 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3079
3080 set_value_address (v,
3081 gdbarch_convert_from_func_ptr_addr
3082 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3083 }
3084
3085 if (arg1p)
3086 {
3087 if (type != value_type (*arg1p))
3088 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3089 value_addr (*arg1p)));
3090
3091 /* Move the `this' pointer according to the offset.
3092 VALUE_OFFSET (*arg1p) += offset; */
3093 }
3094
3095 return v;
3096 }
3097
3098 \f
3099
3100 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3101 VALADDR, and store the result in *RESULT.
3102 The bitfield starts at BITPOS bits and contains BITSIZE bits; if
3103 BITSIZE is zero, then the length is taken from FIELD_TYPE.
3104
3105 Extracting bits depends on endianness of the machine. Compute the
3106 number of least significant bits to discard. For big endian machines,
3107 we compute the total number of bits in the anonymous object, subtract
3108 off the bit count from the MSB of the object to the MSB of the
3109 bitfield, then the size of the bitfield, which leaves the LSB discard
3110 count. For little endian machines, the discard count is simply the
3111 number of bits from the LSB of the anonymous object to the LSB of the
3112 bitfield.
3113
3114 If the field is signed, we also do sign extension. */
3115
3116 static LONGEST
3117 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3118 LONGEST bitpos, LONGEST bitsize)
3119 {
3120 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3121 ULONGEST val;
3122 ULONGEST valmask;
3123 int lsbcount;
3124 LONGEST bytes_read;
3125 LONGEST read_offset;
3126
3127 /* Read the minimum number of bytes required; there may not be
3128 enough bytes to read an entire ULONGEST. */
3129 field_type = check_typedef (field_type);
3130 if (bitsize)
3131 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3132 else
3133 {
3134 bytes_read = TYPE_LENGTH (field_type);
3135 bitsize = 8 * bytes_read;
3136 }
3137
3138 read_offset = bitpos / 8;
3139
3140 val = extract_unsigned_integer (valaddr + read_offset,
3141 bytes_read, byte_order);
3142
3143 /* Extract bits. See comment above. */
3144
3145 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3146 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3147 else
3148 lsbcount = (bitpos % 8);
3149 val >>= lsbcount;
3150
3151 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3152 If the field is signed, and is negative, then sign extend. */
3153
3154 if (bitsize < 8 * (int) sizeof (val))
3155 {
3156 valmask = (((ULONGEST) 1) << bitsize) - 1;
3157 val &= valmask;
3158 if (!TYPE_UNSIGNED (field_type))
3159 {
3160 if (val & (valmask ^ (valmask >> 1)))
3161 {
3162 val |= ~valmask;
3163 }
3164 }
3165 }
3166
3167 return val;
3168 }
3169
3170 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3171 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3172 ORIGINAL_VALUE, which must not be NULL. See
3173 unpack_value_bits_as_long for more details. */
3174
3175 int
3176 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3177 LONGEST embedded_offset, int fieldno,
3178 const struct value *val, LONGEST *result)
3179 {
3180 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3181 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3182 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3183 int bit_offset;
3184
3185 gdb_assert (val != NULL);
3186
3187 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3188 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3189 || !value_bits_available (val, bit_offset, bitsize))
3190 return 0;
3191
3192 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3193 bitpos, bitsize);
3194 return 1;
3195 }
3196
3197 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3198 object at VALADDR. See unpack_bits_as_long for more details. */
3199
3200 LONGEST
3201 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3202 {
3203 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3204 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3205 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3206
3207 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3208 }
3209
3210 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3211 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3212 the contents in DEST_VAL, zero or sign extending if the type of
3213 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3214 VAL. If the VAL's contents required to extract the bitfield from
3215 are unavailable/optimized out, DEST_VAL is correspondingly
3216 marked unavailable/optimized out. */
3217
3218 void
3219 unpack_value_bitfield (struct value *dest_val,
3220 LONGEST bitpos, LONGEST bitsize,
3221 const gdb_byte *valaddr, LONGEST embedded_offset,
3222 const struct value *val)
3223 {
3224 enum bfd_endian byte_order;
3225 int src_bit_offset;
3226 int dst_bit_offset;
3227 struct type *field_type = value_type (dest_val);
3228
3229 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3230
3231 /* First, unpack and sign extend the bitfield as if it was wholly
3232 valid. Optimized out/unavailable bits are read as zero, but
3233 that's OK, as they'll end up marked below. If the VAL is
3234 wholly-invalid we may have skipped allocating its contents,
3235 though. See allocate_optimized_out_value. */
3236 if (valaddr != NULL)
3237 {
3238 LONGEST num;
3239
3240 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3241 bitpos, bitsize);
3242 store_signed_integer (value_contents_raw (dest_val),
3243 TYPE_LENGTH (field_type), byte_order, num);
3244 }
3245
3246 /* Now copy the optimized out / unavailability ranges to the right
3247 bits. */
3248 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3249 if (byte_order == BFD_ENDIAN_BIG)
3250 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3251 else
3252 dst_bit_offset = 0;
3253 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3254 val, src_bit_offset, bitsize);
3255 }
3256
3257 /* Return a new value with type TYPE, which is FIELDNO field of the
3258 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3259 of VAL. If the VAL's contents required to extract the bitfield
3260 from are unavailable/optimized out, the new value is
3261 correspondingly marked unavailable/optimized out. */
3262
3263 struct value *
3264 value_field_bitfield (struct type *type, int fieldno,
3265 const gdb_byte *valaddr,
3266 LONGEST embedded_offset, const struct value *val)
3267 {
3268 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3269 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3270 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3271
3272 unpack_value_bitfield (res_val, bitpos, bitsize,
3273 valaddr, embedded_offset, val);
3274
3275 return res_val;
3276 }
3277
3278 /* Modify the value of a bitfield. ADDR points to a block of memory in
3279 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3280 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3281 indicate which bits (in target bit order) comprise the bitfield.
3282 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3283 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3284
3285 void
3286 modify_field (struct type *type, gdb_byte *addr,
3287 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3288 {
3289 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3290 ULONGEST oword;
3291 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3292 LONGEST bytesize;
3293
3294 /* Normalize BITPOS. */
3295 addr += bitpos / 8;
3296 bitpos %= 8;
3297
3298 /* If a negative fieldval fits in the field in question, chop
3299 off the sign extension bits. */
3300 if ((~fieldval & ~(mask >> 1)) == 0)
3301 fieldval &= mask;
3302
3303 /* Warn if value is too big to fit in the field in question. */
3304 if (0 != (fieldval & ~mask))
3305 {
3306 /* FIXME: would like to include fieldval in the message, but
3307 we don't have a sprintf_longest. */
3308 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3309
3310 /* Truncate it, otherwise adjoining fields may be corrupted. */
3311 fieldval &= mask;
3312 }
3313
3314 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3315 false valgrind reports. */
3316
3317 bytesize = (bitpos + bitsize + 7) / 8;
3318 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3319
3320 /* Shifting for bit field depends on endianness of the target machine. */
3321 if (gdbarch_bits_big_endian (get_type_arch (type)))
3322 bitpos = bytesize * 8 - bitpos - bitsize;
3323
3324 oword &= ~(mask << bitpos);
3325 oword |= fieldval << bitpos;
3326
3327 store_unsigned_integer (addr, bytesize, byte_order, oword);
3328 }
3329 \f
3330 /* Pack NUM into BUF using a target format of TYPE. */
3331
3332 void
3333 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3334 {
3335 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3336 LONGEST len;
3337
3338 type = check_typedef (type);
3339 len = TYPE_LENGTH (type);
3340
3341 switch (TYPE_CODE (type))
3342 {
3343 case TYPE_CODE_INT:
3344 case TYPE_CODE_CHAR:
3345 case TYPE_CODE_ENUM:
3346 case TYPE_CODE_FLAGS:
3347 case TYPE_CODE_BOOL:
3348 case TYPE_CODE_RANGE:
3349 case TYPE_CODE_MEMBERPTR:
3350 store_signed_integer (buf, len, byte_order, num);
3351 break;
3352
3353 case TYPE_CODE_REF:
3354 case TYPE_CODE_RVALUE_REF:
3355 case TYPE_CODE_PTR:
3356 store_typed_address (buf, type, (CORE_ADDR) num);
3357 break;
3358
3359 case TYPE_CODE_FLT:
3360 case TYPE_CODE_DECFLOAT:
3361 target_float_from_longest (buf, type, num);
3362 break;
3363
3364 default:
3365 error (_("Unexpected type (%d) encountered for integer constant."),
3366 TYPE_CODE (type));
3367 }
3368 }
3369
3370
3371 /* Pack NUM into BUF using a target format of TYPE. */
3372
3373 static void
3374 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3375 {
3376 LONGEST len;
3377 enum bfd_endian byte_order;
3378
3379 type = check_typedef (type);
3380 len = TYPE_LENGTH (type);
3381 byte_order = gdbarch_byte_order (get_type_arch (type));
3382
3383 switch (TYPE_CODE (type))
3384 {
3385 case TYPE_CODE_INT:
3386 case TYPE_CODE_CHAR:
3387 case TYPE_CODE_ENUM:
3388 case TYPE_CODE_FLAGS:
3389 case TYPE_CODE_BOOL:
3390 case TYPE_CODE_RANGE:
3391 case TYPE_CODE_MEMBERPTR:
3392 store_unsigned_integer (buf, len, byte_order, num);
3393 break;
3394
3395 case TYPE_CODE_REF:
3396 case TYPE_CODE_RVALUE_REF:
3397 case TYPE_CODE_PTR:
3398 store_typed_address (buf, type, (CORE_ADDR) num);
3399 break;
3400
3401 case TYPE_CODE_FLT:
3402 case TYPE_CODE_DECFLOAT:
3403 target_float_from_ulongest (buf, type, num);
3404 break;
3405
3406 default:
3407 error (_("Unexpected type (%d) encountered "
3408 "for unsigned integer constant."),
3409 TYPE_CODE (type));
3410 }
3411 }
3412
3413
3414 /* Convert C numbers into newly allocated values. */
3415
3416 struct value *
3417 value_from_longest (struct type *type, LONGEST num)
3418 {
3419 struct value *val = allocate_value (type);
3420
3421 pack_long (value_contents_raw (val), type, num);
3422 return val;
3423 }
3424
3425
3426 /* Convert C unsigned numbers into newly allocated values. */
3427
3428 struct value *
3429 value_from_ulongest (struct type *type, ULONGEST num)
3430 {
3431 struct value *val = allocate_value (type);
3432
3433 pack_unsigned_long (value_contents_raw (val), type, num);
3434
3435 return val;
3436 }
3437
3438
3439 /* Create a value representing a pointer of type TYPE to the address
3440 ADDR. */
3441
3442 struct value *
3443 value_from_pointer (struct type *type, CORE_ADDR addr)
3444 {
3445 struct value *val = allocate_value (type);
3446
3447 store_typed_address (value_contents_raw (val),
3448 check_typedef (type), addr);
3449 return val;
3450 }
3451
3452
3453 /* Create a value of type TYPE whose contents come from VALADDR, if it
3454 is non-null, and whose memory address (in the inferior) is
3455 ADDRESS. The type of the created value may differ from the passed
3456 type TYPE. Make sure to retrieve values new type after this call.
3457 Note that TYPE is not passed through resolve_dynamic_type; this is
3458 a special API intended for use only by Ada. */
3459
3460 struct value *
3461 value_from_contents_and_address_unresolved (struct type *type,
3462 const gdb_byte *valaddr,
3463 CORE_ADDR address)
3464 {
3465 struct value *v;
3466
3467 if (valaddr == NULL)
3468 v = allocate_value_lazy (type);
3469 else
3470 v = value_from_contents (type, valaddr);
3471 VALUE_LVAL (v) = lval_memory;
3472 set_value_address (v, address);
3473 return v;
3474 }
3475
3476 /* Create a value of type TYPE whose contents come from VALADDR, if it
3477 is non-null, and whose memory address (in the inferior) is
3478 ADDRESS. The type of the created value may differ from the passed
3479 type TYPE. Make sure to retrieve values new type after this call. */
3480
3481 struct value *
3482 value_from_contents_and_address (struct type *type,
3483 const gdb_byte *valaddr,
3484 CORE_ADDR address)
3485 {
3486 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3487 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3488 struct value *v;
3489
3490 if (valaddr == NULL)
3491 v = allocate_value_lazy (resolved_type);
3492 else
3493 v = value_from_contents (resolved_type, valaddr);
3494 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3495 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3496 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3497 VALUE_LVAL (v) = lval_memory;
3498 set_value_address (v, address);
3499 return v;
3500 }
3501
3502 /* Create a value of type TYPE holding the contents CONTENTS.
3503 The new value is `not_lval'. */
3504
3505 struct value *
3506 value_from_contents (struct type *type, const gdb_byte *contents)
3507 {
3508 struct value *result;
3509
3510 result = allocate_value (type);
3511 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3512 return result;
3513 }
3514
3515 /* Extract a value from the history file. Input will be of the form
3516 $digits or $$digits. See block comment above 'write_dollar_variable'
3517 for details. */
3518
3519 struct value *
3520 value_from_history_ref (const char *h, const char **endp)
3521 {
3522 int index, len;
3523
3524 if (h[0] == '$')
3525 len = 1;
3526 else
3527 return NULL;
3528
3529 if (h[1] == '$')
3530 len = 2;
3531
3532 /* Find length of numeral string. */
3533 for (; isdigit (h[len]); len++)
3534 ;
3535
3536 /* Make sure numeral string is not part of an identifier. */
3537 if (h[len] == '_' || isalpha (h[len]))
3538 return NULL;
3539
3540 /* Now collect the index value. */
3541 if (h[1] == '$')
3542 {
3543 if (len == 2)
3544 {
3545 /* For some bizarre reason, "$$" is equivalent to "$$1",
3546 rather than to "$$0" as it ought to be! */
3547 index = -1;
3548 *endp += len;
3549 }
3550 else
3551 {
3552 char *local_end;
3553
3554 index = -strtol (&h[2], &local_end, 10);
3555 *endp = local_end;
3556 }
3557 }
3558 else
3559 {
3560 if (len == 1)
3561 {
3562 /* "$" is equivalent to "$0". */
3563 index = 0;
3564 *endp += len;
3565 }
3566 else
3567 {
3568 char *local_end;
3569
3570 index = strtol (&h[1], &local_end, 10);
3571 *endp = local_end;
3572 }
3573 }
3574
3575 return access_value_history (index);
3576 }
3577
3578 /* Get the component value (offset by OFFSET bytes) of a struct or
3579 union WHOLE. Component's type is TYPE. */
3580
3581 struct value *
3582 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3583 {
3584 struct value *v;
3585
3586 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3587 v = allocate_value_lazy (type);
3588 else
3589 {
3590 v = allocate_value (type);
3591 value_contents_copy (v, value_embedded_offset (v),
3592 whole, value_embedded_offset (whole) + offset,
3593 type_length_units (type));
3594 }
3595 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3596 set_value_component_location (v, whole);
3597
3598 return v;
3599 }
3600
3601 struct value *
3602 coerce_ref_if_computed (const struct value *arg)
3603 {
3604 const struct lval_funcs *funcs;
3605
3606 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3607 return NULL;
3608
3609 if (value_lval_const (arg) != lval_computed)
3610 return NULL;
3611
3612 funcs = value_computed_funcs (arg);
3613 if (funcs->coerce_ref == NULL)
3614 return NULL;
3615
3616 return funcs->coerce_ref (arg);
3617 }
3618
3619 /* Look at value.h for description. */
3620
3621 struct value *
3622 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3623 const struct type *original_type,
3624 const struct value *original_value)
3625 {
3626 /* Re-adjust type. */
3627 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3628
3629 /* Add embedding info. */
3630 set_value_enclosing_type (value, enc_type);
3631 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3632
3633 /* We may be pointing to an object of some derived type. */
3634 return value_full_object (value, NULL, 0, 0, 0);
3635 }
3636
3637 struct value *
3638 coerce_ref (struct value *arg)
3639 {
3640 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3641 struct value *retval;
3642 struct type *enc_type;
3643
3644 retval = coerce_ref_if_computed (arg);
3645 if (retval)
3646 return retval;
3647
3648 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3649 return arg;
3650
3651 enc_type = check_typedef (value_enclosing_type (arg));
3652 enc_type = TYPE_TARGET_TYPE (enc_type);
3653
3654 retval = value_at_lazy (enc_type,
3655 unpack_pointer (value_type (arg),
3656 value_contents (arg)));
3657 enc_type = value_type (retval);
3658 return readjust_indirect_value_type (retval, enc_type,
3659 value_type_arg_tmp, arg);
3660 }
3661
3662 struct value *
3663 coerce_array (struct value *arg)
3664 {
3665 struct type *type;
3666
3667 arg = coerce_ref (arg);
3668 type = check_typedef (value_type (arg));
3669
3670 switch (TYPE_CODE (type))
3671 {
3672 case TYPE_CODE_ARRAY:
3673 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3674 arg = value_coerce_array (arg);
3675 break;
3676 case TYPE_CODE_FUNC:
3677 arg = value_coerce_function (arg);
3678 break;
3679 }
3680 return arg;
3681 }
3682 \f
3683
3684 /* Return the return value convention that will be used for the
3685 specified type. */
3686
3687 enum return_value_convention
3688 struct_return_convention (struct gdbarch *gdbarch,
3689 struct value *function, struct type *value_type)
3690 {
3691 enum type_code code = TYPE_CODE (value_type);
3692
3693 if (code == TYPE_CODE_ERROR)
3694 error (_("Function return type unknown."));
3695
3696 /* Probe the architecture for the return-value convention. */
3697 return gdbarch_return_value (gdbarch, function, value_type,
3698 NULL, NULL, NULL);
3699 }
3700
3701 /* Return true if the function returning the specified type is using
3702 the convention of returning structures in memory (passing in the
3703 address as a hidden first parameter). */
3704
3705 int
3706 using_struct_return (struct gdbarch *gdbarch,
3707 struct value *function, struct type *value_type)
3708 {
3709 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3710 /* A void return value is never in memory. See also corresponding
3711 code in "print_return_value". */
3712 return 0;
3713
3714 return (struct_return_convention (gdbarch, function, value_type)
3715 != RETURN_VALUE_REGISTER_CONVENTION);
3716 }
3717
3718 /* Set the initialized field in a value struct. */
3719
3720 void
3721 set_value_initialized (struct value *val, int status)
3722 {
3723 val->initialized = status;
3724 }
3725
3726 /* Return the initialized field in a value struct. */
3727
3728 int
3729 value_initialized (const struct value *val)
3730 {
3731 return val->initialized;
3732 }
3733
3734 /* Load the actual content of a lazy value. Fetch the data from the
3735 user's process and clear the lazy flag to indicate that the data in
3736 the buffer is valid.
3737
3738 If the value is zero-length, we avoid calling read_memory, which
3739 would abort. We mark the value as fetched anyway -- all 0 bytes of
3740 it. */
3741
3742 void
3743 value_fetch_lazy (struct value *val)
3744 {
3745 gdb_assert (value_lazy (val));
3746 allocate_value_contents (val);
3747 /* A value is either lazy, or fully fetched. The
3748 availability/validity is only established as we try to fetch a
3749 value. */
3750 gdb_assert (VEC_empty (range_s, val->optimized_out));
3751 gdb_assert (VEC_empty (range_s, val->unavailable));
3752 if (value_bitsize (val))
3753 {
3754 /* To read a lazy bitfield, read the entire enclosing value. This
3755 prevents reading the same block of (possibly volatile) memory once
3756 per bitfield. It would be even better to read only the containing
3757 word, but we have no way to record that just specific bits of a
3758 value have been fetched. */
3759 struct type *type = check_typedef (value_type (val));
3760 struct value *parent = value_parent (val);
3761
3762 if (value_lazy (parent))
3763 value_fetch_lazy (parent);
3764
3765 unpack_value_bitfield (val,
3766 value_bitpos (val), value_bitsize (val),
3767 value_contents_for_printing (parent),
3768 value_offset (val), parent);
3769 }
3770 else if (VALUE_LVAL (val) == lval_memory)
3771 {
3772 CORE_ADDR addr = value_address (val);
3773 struct type *type = check_typedef (value_enclosing_type (val));
3774
3775 if (TYPE_LENGTH (type))
3776 read_value_memory (val, 0, value_stack (val),
3777 addr, value_contents_all_raw (val),
3778 type_length_units (type));
3779 }
3780 else if (VALUE_LVAL (val) == lval_register)
3781 {
3782 struct frame_info *next_frame;
3783 int regnum;
3784 struct type *type = check_typedef (value_type (val));
3785 struct value *new_val = val, *mark = value_mark ();
3786
3787 /* Offsets are not supported here; lazy register values must
3788 refer to the entire register. */
3789 gdb_assert (value_offset (val) == 0);
3790
3791 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3792 {
3793 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3794
3795 next_frame = frame_find_by_id (next_frame_id);
3796 regnum = VALUE_REGNUM (new_val);
3797
3798 gdb_assert (next_frame != NULL);
3799
3800 /* Convertible register routines are used for multi-register
3801 values and for interpretation in different types
3802 (e.g. float or int from a double register). Lazy
3803 register values should have the register's natural type,
3804 so they do not apply. */
3805 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3806 regnum, type));
3807
3808 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3809 Since a "->next" operation was performed when setting
3810 this field, we do not need to perform a "next" operation
3811 again when unwinding the register. That's why
3812 frame_unwind_register_value() is called here instead of
3813 get_frame_register_value(). */
3814 new_val = frame_unwind_register_value (next_frame, regnum);
3815
3816 /* If we get another lazy lval_register value, it means the
3817 register is found by reading it from NEXT_FRAME's next frame.
3818 frame_unwind_register_value should never return a value with
3819 the frame id pointing to NEXT_FRAME. If it does, it means we
3820 either have two consecutive frames with the same frame id
3821 in the frame chain, or some code is trying to unwind
3822 behind get_prev_frame's back (e.g., a frame unwind
3823 sniffer trying to unwind), bypassing its validations. In
3824 any case, it should always be an internal error to end up
3825 in this situation. */
3826 if (VALUE_LVAL (new_val) == lval_register
3827 && value_lazy (new_val)
3828 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3829 internal_error (__FILE__, __LINE__,
3830 _("infinite loop while fetching a register"));
3831 }
3832
3833 /* If it's still lazy (for instance, a saved register on the
3834 stack), fetch it. */
3835 if (value_lazy (new_val))
3836 value_fetch_lazy (new_val);
3837
3838 /* Copy the contents and the unavailability/optimized-out
3839 meta-data from NEW_VAL to VAL. */
3840 set_value_lazy (val, 0);
3841 value_contents_copy (val, value_embedded_offset (val),
3842 new_val, value_embedded_offset (new_val),
3843 type_length_units (type));
3844
3845 if (frame_debug)
3846 {
3847 struct gdbarch *gdbarch;
3848 struct frame_info *frame;
3849 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3850 so that the frame level will be shown correctly. */
3851 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3852 regnum = VALUE_REGNUM (val);
3853 gdbarch = get_frame_arch (frame);
3854
3855 fprintf_unfiltered (gdb_stdlog,
3856 "{ value_fetch_lazy "
3857 "(frame=%d,regnum=%d(%s),...) ",
3858 frame_relative_level (frame), regnum,
3859 user_reg_map_regnum_to_name (gdbarch, regnum));
3860
3861 fprintf_unfiltered (gdb_stdlog, "->");
3862 if (value_optimized_out (new_val))
3863 {
3864 fprintf_unfiltered (gdb_stdlog, " ");
3865 val_print_optimized_out (new_val, gdb_stdlog);
3866 }
3867 else
3868 {
3869 int i;
3870 const gdb_byte *buf = value_contents (new_val);
3871
3872 if (VALUE_LVAL (new_val) == lval_register)
3873 fprintf_unfiltered (gdb_stdlog, " register=%d",
3874 VALUE_REGNUM (new_val));
3875 else if (VALUE_LVAL (new_val) == lval_memory)
3876 fprintf_unfiltered (gdb_stdlog, " address=%s",
3877 paddress (gdbarch,
3878 value_address (new_val)));
3879 else
3880 fprintf_unfiltered (gdb_stdlog, " computed");
3881
3882 fprintf_unfiltered (gdb_stdlog, " bytes=");
3883 fprintf_unfiltered (gdb_stdlog, "[");
3884 for (i = 0; i < register_size (gdbarch, regnum); i++)
3885 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3886 fprintf_unfiltered (gdb_stdlog, "]");
3887 }
3888
3889 fprintf_unfiltered (gdb_stdlog, " }\n");
3890 }
3891
3892 /* Dispose of the intermediate values. This prevents
3893 watchpoints from trying to watch the saved frame pointer. */
3894 value_free_to_mark (mark);
3895 }
3896 else if (VALUE_LVAL (val) == lval_computed
3897 && value_computed_funcs (val)->read != NULL)
3898 value_computed_funcs (val)->read (val);
3899 else
3900 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3901
3902 set_value_lazy (val, 0);
3903 }
3904
3905 /* Implementation of the convenience function $_isvoid. */
3906
3907 static struct value *
3908 isvoid_internal_fn (struct gdbarch *gdbarch,
3909 const struct language_defn *language,
3910 void *cookie, int argc, struct value **argv)
3911 {
3912 int ret;
3913
3914 if (argc != 1)
3915 error (_("You must provide one argument for $_isvoid."));
3916
3917 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3918
3919 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3920 }
3921
3922 void
3923 _initialize_values (void)
3924 {
3925 add_cmd ("convenience", no_class, show_convenience, _("\
3926 Debugger convenience (\"$foo\") variables and functions.\n\
3927 Convenience variables are created when you assign them values;\n\
3928 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3929 \n\
3930 A few convenience variables are given values automatically:\n\
3931 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3932 \"$__\" holds the contents of the last address examined with \"x\"."
3933 #ifdef HAVE_PYTHON
3934 "\n\n\
3935 Convenience functions are defined via the Python API."
3936 #endif
3937 ), &showlist);
3938 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3939
3940 add_cmd ("values", no_set_class, show_values, _("\
3941 Elements of value history around item number IDX (or last ten)."),
3942 &showlist);
3943
3944 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3945 Initialize a convenience variable if necessary.\n\
3946 init-if-undefined VARIABLE = EXPRESSION\n\
3947 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3948 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3949 VARIABLE is already initialized."));
3950
3951 add_prefix_cmd ("function", no_class, function_command, _("\
3952 Placeholder command for showing help on convenience functions."),
3953 &functionlist, "function ", 0, &cmdlist);
3954
3955 add_internal_function ("_isvoid", _("\
3956 Check whether an expression is void.\n\
3957 Usage: $_isvoid (expression)\n\
3958 Return 1 if the expression is void, zero otherwise."),
3959 isvoid_internal_fn, NULL);
3960
3961 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
3962 class_support, &max_value_size, _("\
3963 Set maximum sized value gdb will load from the inferior."), _("\
3964 Show maximum sized value gdb will load from the inferior."), _("\
3965 Use this to control the maximum size, in bytes, of a value that gdb\n\
3966 will load from the inferior. Setting this value to 'unlimited'\n\
3967 disables checking.\n\
3968 Setting this does not invalidate already allocated values, it only\n\
3969 prevents future values, larger than this size, from being allocated."),
3970 set_max_value_size,
3971 show_max_value_size,
3972 &setlist, &showlist);
3973 }
This page took 0.134064 seconds and 5 git commands to generate.