Create subobject value in pretty printer
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43 #include <algorithm>
44
45 /* Prototypes for exported functions. */
46
47 void _initialize_values (void);
48
49 /* Definition of a user function. */
50 struct internal_function
51 {
52 /* The name of the function. It is a bit odd to have this in the
53 function itself -- the user might use a differently-named
54 convenience variable to hold the function. */
55 char *name;
56
57 /* The handler. */
58 internal_function_fn handler;
59
60 /* User data for the handler. */
61 void *cookie;
62 };
63
64 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
65
66 struct range
67 {
68 /* Lowest offset in the range. */
69 LONGEST offset;
70
71 /* Length of the range. */
72 LONGEST length;
73 };
74
75 typedef struct range range_s;
76
77 DEF_VEC_O(range_s);
78
79 /* Returns true if the ranges defined by [offset1, offset1+len1) and
80 [offset2, offset2+len2) overlap. */
81
82 static int
83 ranges_overlap (LONGEST offset1, LONGEST len1,
84 LONGEST offset2, LONGEST len2)
85 {
86 ULONGEST h, l;
87
88 l = std::max (offset1, offset2);
89 h = std::min (offset1 + len1, offset2 + len2);
90 return (l < h);
91 }
92
93 /* Returns true if the first argument is strictly less than the
94 second, useful for VEC_lower_bound. We keep ranges sorted by
95 offset and coalesce overlapping and contiguous ranges, so this just
96 compares the starting offset. */
97
98 static int
99 range_lessthan (const range_s *r1, const range_s *r2)
100 {
101 return r1->offset < r2->offset;
102 }
103
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105 OFFSET+LENGTH). */
106
107 static int
108 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
109 {
110 range_s what;
111 LONGEST i;
112
113 what.offset = offset;
114 what.length = length;
115
116 /* We keep ranges sorted by offset and coalesce overlapping and
117 contiguous ranges, so to check if a range list contains a given
118 range, we can do a binary search for the position the given range
119 would be inserted if we only considered the starting OFFSET of
120 ranges. We call that position I. Since we also have LENGTH to
121 care for (this is a range afterall), we need to check if the
122 _previous_ range overlaps the I range. E.g.,
123
124 R
125 |---|
126 |---| |---| |------| ... |--|
127 0 1 2 N
128
129 I=1
130
131 In the case above, the binary search would return `I=1', meaning,
132 this OFFSET should be inserted at position 1, and the current
133 position 1 should be pushed further (and before 2). But, `0'
134 overlaps with R.
135
136 Then we need to check if the I range overlaps the I range itself.
137 E.g.,
138
139 R
140 |---|
141 |---| |---| |-------| ... |--|
142 0 1 2 N
143
144 I=1
145 */
146
147 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
148
149 if (i > 0)
150 {
151 struct range *bef = VEC_index (range_s, ranges, i - 1);
152
153 if (ranges_overlap (bef->offset, bef->length, offset, length))
154 return 1;
155 }
156
157 if (i < VEC_length (range_s, ranges))
158 {
159 struct range *r = VEC_index (range_s, ranges, i);
160
161 if (ranges_overlap (r->offset, r->length, offset, length))
162 return 1;
163 }
164
165 return 0;
166 }
167
168 static struct cmd_list_element *functionlist;
169
170 /* Note that the fields in this structure are arranged to save a bit
171 of memory. */
172
173 struct value
174 {
175 /* Type of value; either not an lval, or one of the various
176 different possible kinds of lval. */
177 enum lval_type lval;
178
179 /* Is it modifiable? Only relevant if lval != not_lval. */
180 unsigned int modifiable : 1;
181
182 /* If zero, contents of this value are in the contents field. If
183 nonzero, contents are in inferior. If the lval field is lval_memory,
184 the contents are in inferior memory at location.address plus offset.
185 The lval field may also be lval_register.
186
187 WARNING: This field is used by the code which handles watchpoints
188 (see breakpoint.c) to decide whether a particular value can be
189 watched by hardware watchpoints. If the lazy flag is set for
190 some member of a value chain, it is assumed that this member of
191 the chain doesn't need to be watched as part of watching the
192 value itself. This is how GDB avoids watching the entire struct
193 or array when the user wants to watch a single struct member or
194 array element. If you ever change the way lazy flag is set and
195 reset, be sure to consider this use as well! */
196 unsigned int lazy : 1;
197
198 /* If value is a variable, is it initialized or not. */
199 unsigned int initialized : 1;
200
201 /* If value is from the stack. If this is set, read_stack will be
202 used instead of read_memory to enable extra caching. */
203 unsigned int stack : 1;
204
205 /* If the value has been released. */
206 unsigned int released : 1;
207
208 /* Register number if the value is from a register. */
209 short regnum;
210
211 /* Location of value (if lval). */
212 union
213 {
214 /* If lval == lval_memory, this is the address in the inferior.
215 If lval == lval_register, this is the byte offset into the
216 registers structure. */
217 CORE_ADDR address;
218
219 /* Pointer to internal variable. */
220 struct internalvar *internalvar;
221
222 /* Pointer to xmethod worker. */
223 struct xmethod_worker *xm_worker;
224
225 /* If lval == lval_computed, this is a set of function pointers
226 to use to access and describe the value, and a closure pointer
227 for them to use. */
228 struct
229 {
230 /* Functions to call. */
231 const struct lval_funcs *funcs;
232
233 /* Closure for those functions to use. */
234 void *closure;
235 } computed;
236 } location;
237
238 /* Describes offset of a value within lval of a structure in target
239 addressable memory units. If lval == lval_memory, this is an offset to
240 the address. If lval == lval_register, this is a further offset from
241 location.address within the registers structure. Note also the member
242 embedded_offset below. */
243 LONGEST offset;
244
245 /* Only used for bitfields; number of bits contained in them. */
246 LONGEST bitsize;
247
248 /* Only used for bitfields; position of start of field. For
249 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
250 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
251 LONGEST bitpos;
252
253 /* The number of references to this value. When a value is created,
254 the value chain holds a reference, so REFERENCE_COUNT is 1. If
255 release_value is called, this value is removed from the chain but
256 the caller of release_value now has a reference to this value.
257 The caller must arrange for a call to value_free later. */
258 int reference_count;
259
260 /* Only used for bitfields; the containing value. This allows a
261 single read from the target when displaying multiple
262 bitfields. */
263 struct value *parent;
264
265 /* Frame ID of "next" frame to which a register value is relative. A
266 register value is indicated when the lval enum (above) is set to
267 lval_register. So, if the register value is found relative to frame F,
268 then the frame id of F->next will be stored in next_frame_id. */
269 struct frame_id next_frame_id;
270
271 /* Type of the value. */
272 struct type *type;
273
274 /* If a value represents a C++ object, then the `type' field gives
275 the object's compile-time type. If the object actually belongs
276 to some class derived from `type', perhaps with other base
277 classes and additional members, then `type' is just a subobject
278 of the real thing, and the full object is probably larger than
279 `type' would suggest.
280
281 If `type' is a dynamic class (i.e. one with a vtable), then GDB
282 can actually determine the object's run-time type by looking at
283 the run-time type information in the vtable. When this
284 information is available, we may elect to read in the entire
285 object, for several reasons:
286
287 - When printing the value, the user would probably rather see the
288 full object, not just the limited portion apparent from the
289 compile-time type.
290
291 - If `type' has virtual base classes, then even printing `type'
292 alone may require reaching outside the `type' portion of the
293 object to wherever the virtual base class has been stored.
294
295 When we store the entire object, `enclosing_type' is the run-time
296 type -- the complete object -- and `embedded_offset' is the
297 offset of `type' within that larger type, in target addressable memory
298 units. The value_contents() macro takes `embedded_offset' into account,
299 so most GDB code continues to see the `type' portion of the value, just
300 as the inferior would.
301
302 If `type' is a pointer to an object, then `enclosing_type' is a
303 pointer to the object's run-time type, and `pointed_to_offset' is
304 the offset in target addressable memory units from the full object
305 to the pointed-to object -- that is, the value `embedded_offset' would
306 have if we followed the pointer and fetched the complete object.
307 (I don't really see the point. Why not just determine the
308 run-time type when you indirect, and avoid the special case? The
309 contents don't matter until you indirect anyway.)
310
311 If we're not doing anything fancy, `enclosing_type' is equal to
312 `type', and `embedded_offset' is zero, so everything works
313 normally. */
314 struct type *enclosing_type;
315 LONGEST embedded_offset;
316 LONGEST pointed_to_offset;
317
318 /* Values are stored in a chain, so that they can be deleted easily
319 over calls to the inferior. Values assigned to internal
320 variables, put into the value history or exposed to Python are
321 taken off this list. */
322 struct value *next;
323
324 /* Actual contents of the value. Target byte-order. NULL or not
325 valid if lazy is nonzero. */
326 gdb_byte *contents;
327
328 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
329 rather than available, since the common and default case is for a
330 value to be available. This is filled in at value read time.
331 The unavailable ranges are tracked in bits. Note that a contents
332 bit that has been optimized out doesn't really exist in the
333 program, so it can't be marked unavailable either. */
334 VEC(range_s) *unavailable;
335
336 /* Likewise, but for optimized out contents (a chunk of the value of
337 a variable that does not actually exist in the program). If LVAL
338 is lval_register, this is a register ($pc, $sp, etc., never a
339 program variable) that has not been saved in the frame. Not
340 saved registers and optimized-out program variables values are
341 treated pretty much the same, except not-saved registers have a
342 different string representation and related error strings. */
343 VEC(range_s) *optimized_out;
344 };
345
346 /* See value.h. */
347
348 struct gdbarch *
349 get_value_arch (const struct value *value)
350 {
351 return get_type_arch (value_type (value));
352 }
353
354 int
355 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
356 {
357 gdb_assert (!value->lazy);
358
359 return !ranges_contain (value->unavailable, offset, length);
360 }
361
362 int
363 value_bytes_available (const struct value *value,
364 LONGEST offset, LONGEST length)
365 {
366 return value_bits_available (value,
367 offset * TARGET_CHAR_BIT,
368 length * TARGET_CHAR_BIT);
369 }
370
371 int
372 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
373 {
374 gdb_assert (!value->lazy);
375
376 return ranges_contain (value->optimized_out, bit_offset, bit_length);
377 }
378
379 int
380 value_entirely_available (struct value *value)
381 {
382 /* We can only tell whether the whole value is available when we try
383 to read it. */
384 if (value->lazy)
385 value_fetch_lazy (value);
386
387 if (VEC_empty (range_s, value->unavailable))
388 return 1;
389 return 0;
390 }
391
392 /* Returns true if VALUE is entirely covered by RANGES. If the value
393 is lazy, it'll be read now. Note that RANGE is a pointer to
394 pointer because reading the value might change *RANGE. */
395
396 static int
397 value_entirely_covered_by_range_vector (struct value *value,
398 VEC(range_s) **ranges)
399 {
400 /* We can only tell whether the whole value is optimized out /
401 unavailable when we try to read it. */
402 if (value->lazy)
403 value_fetch_lazy (value);
404
405 if (VEC_length (range_s, *ranges) == 1)
406 {
407 struct range *t = VEC_index (range_s, *ranges, 0);
408
409 if (t->offset == 0
410 && t->length == (TARGET_CHAR_BIT
411 * TYPE_LENGTH (value_enclosing_type (value))))
412 return 1;
413 }
414
415 return 0;
416 }
417
418 int
419 value_entirely_unavailable (struct value *value)
420 {
421 return value_entirely_covered_by_range_vector (value, &value->unavailable);
422 }
423
424 int
425 value_entirely_optimized_out (struct value *value)
426 {
427 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
428 }
429
430 /* Insert into the vector pointed to by VECTORP the bit range starting of
431 OFFSET bits, and extending for the next LENGTH bits. */
432
433 static void
434 insert_into_bit_range_vector (VEC(range_s) **vectorp,
435 LONGEST offset, LONGEST length)
436 {
437 range_s newr;
438 int i;
439
440 /* Insert the range sorted. If there's overlap or the new range
441 would be contiguous with an existing range, merge. */
442
443 newr.offset = offset;
444 newr.length = length;
445
446 /* Do a binary search for the position the given range would be
447 inserted if we only considered the starting OFFSET of ranges.
448 Call that position I. Since we also have LENGTH to care for
449 (this is a range afterall), we need to check if the _previous_
450 range overlaps the I range. E.g., calling R the new range:
451
452 #1 - overlaps with previous
453
454 R
455 |-...-|
456 |---| |---| |------| ... |--|
457 0 1 2 N
458
459 I=1
460
461 In the case #1 above, the binary search would return `I=1',
462 meaning, this OFFSET should be inserted at position 1, and the
463 current position 1 should be pushed further (and become 2). But,
464 note that `0' overlaps with R, so we want to merge them.
465
466 A similar consideration needs to be taken if the new range would
467 be contiguous with the previous range:
468
469 #2 - contiguous with previous
470
471 R
472 |-...-|
473 |--| |---| |------| ... |--|
474 0 1 2 N
475
476 I=1
477
478 If there's no overlap with the previous range, as in:
479
480 #3 - not overlapping and not contiguous
481
482 R
483 |-...-|
484 |--| |---| |------| ... |--|
485 0 1 2 N
486
487 I=1
488
489 or if I is 0:
490
491 #4 - R is the range with lowest offset
492
493 R
494 |-...-|
495 |--| |---| |------| ... |--|
496 0 1 2 N
497
498 I=0
499
500 ... we just push the new range to I.
501
502 All the 4 cases above need to consider that the new range may
503 also overlap several of the ranges that follow, or that R may be
504 contiguous with the following range, and merge. E.g.,
505
506 #5 - overlapping following ranges
507
508 R
509 |------------------------|
510 |--| |---| |------| ... |--|
511 0 1 2 N
512
513 I=0
514
515 or:
516
517 R
518 |-------|
519 |--| |---| |------| ... |--|
520 0 1 2 N
521
522 I=1
523
524 */
525
526 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
527 if (i > 0)
528 {
529 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
530
531 if (ranges_overlap (bef->offset, bef->length, offset, length))
532 {
533 /* #1 */
534 ULONGEST l = std::min (bef->offset, offset);
535 ULONGEST h = std::max (bef->offset + bef->length, offset + length);
536
537 bef->offset = l;
538 bef->length = h - l;
539 i--;
540 }
541 else if (offset == bef->offset + bef->length)
542 {
543 /* #2 */
544 bef->length += length;
545 i--;
546 }
547 else
548 {
549 /* #3 */
550 VEC_safe_insert (range_s, *vectorp, i, &newr);
551 }
552 }
553 else
554 {
555 /* #4 */
556 VEC_safe_insert (range_s, *vectorp, i, &newr);
557 }
558
559 /* Check whether the ranges following the one we've just added or
560 touched can be folded in (#5 above). */
561 if (i + 1 < VEC_length (range_s, *vectorp))
562 {
563 struct range *t;
564 struct range *r;
565 int removed = 0;
566 int next = i + 1;
567
568 /* Get the range we just touched. */
569 t = VEC_index (range_s, *vectorp, i);
570 removed = 0;
571
572 i = next;
573 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
574 if (r->offset <= t->offset + t->length)
575 {
576 ULONGEST l, h;
577
578 l = std::min (t->offset, r->offset);
579 h = std::max (t->offset + t->length, r->offset + r->length);
580
581 t->offset = l;
582 t->length = h - l;
583
584 removed++;
585 }
586 else
587 {
588 /* If we couldn't merge this one, we won't be able to
589 merge following ones either, since the ranges are
590 always sorted by OFFSET. */
591 break;
592 }
593
594 if (removed != 0)
595 VEC_block_remove (range_s, *vectorp, next, removed);
596 }
597 }
598
599 void
600 mark_value_bits_unavailable (struct value *value,
601 LONGEST offset, LONGEST length)
602 {
603 insert_into_bit_range_vector (&value->unavailable, offset, length);
604 }
605
606 void
607 mark_value_bytes_unavailable (struct value *value,
608 LONGEST offset, LONGEST length)
609 {
610 mark_value_bits_unavailable (value,
611 offset * TARGET_CHAR_BIT,
612 length * TARGET_CHAR_BIT);
613 }
614
615 /* Find the first range in RANGES that overlaps the range defined by
616 OFFSET and LENGTH, starting at element POS in the RANGES vector,
617 Returns the index into RANGES where such overlapping range was
618 found, or -1 if none was found. */
619
620 static int
621 find_first_range_overlap (VEC(range_s) *ranges, int pos,
622 LONGEST offset, LONGEST length)
623 {
624 range_s *r;
625 int i;
626
627 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
628 if (ranges_overlap (r->offset, r->length, offset, length))
629 return i;
630
631 return -1;
632 }
633
634 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
635 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
636 return non-zero.
637
638 It must always be the case that:
639 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
640
641 It is assumed that memory can be accessed from:
642 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
643 to:
644 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
645 / TARGET_CHAR_BIT) */
646 static int
647 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
648 const gdb_byte *ptr2, size_t offset2_bits,
649 size_t length_bits)
650 {
651 gdb_assert (offset1_bits % TARGET_CHAR_BIT
652 == offset2_bits % TARGET_CHAR_BIT);
653
654 if (offset1_bits % TARGET_CHAR_BIT != 0)
655 {
656 size_t bits;
657 gdb_byte mask, b1, b2;
658
659 /* The offset from the base pointers PTR1 and PTR2 is not a complete
660 number of bytes. A number of bits up to either the next exact
661 byte boundary, or LENGTH_BITS (which ever is sooner) will be
662 compared. */
663 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
664 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
665 mask = (1 << bits) - 1;
666
667 if (length_bits < bits)
668 {
669 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
670 bits = length_bits;
671 }
672
673 /* Now load the two bytes and mask off the bits we care about. */
674 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
675 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
676
677 if (b1 != b2)
678 return 1;
679
680 /* Now update the length and offsets to take account of the bits
681 we've just compared. */
682 length_bits -= bits;
683 offset1_bits += bits;
684 offset2_bits += bits;
685 }
686
687 if (length_bits % TARGET_CHAR_BIT != 0)
688 {
689 size_t bits;
690 size_t o1, o2;
691 gdb_byte mask, b1, b2;
692
693 /* The length is not an exact number of bytes. After the previous
694 IF.. block then the offsets are byte aligned, or the
695 length is zero (in which case this code is not reached). Compare
696 a number of bits at the end of the region, starting from an exact
697 byte boundary. */
698 bits = length_bits % TARGET_CHAR_BIT;
699 o1 = offset1_bits + length_bits - bits;
700 o2 = offset2_bits + length_bits - bits;
701
702 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
703 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
704
705 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
706 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
707
708 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
709 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
710
711 if (b1 != b2)
712 return 1;
713
714 length_bits -= bits;
715 }
716
717 if (length_bits > 0)
718 {
719 /* We've now taken care of any stray "bits" at the start, or end of
720 the region to compare, the remainder can be covered with a simple
721 memcmp. */
722 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
723 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
724 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
725
726 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
727 ptr2 + offset2_bits / TARGET_CHAR_BIT,
728 length_bits / TARGET_CHAR_BIT);
729 }
730
731 /* Length is zero, regions match. */
732 return 0;
733 }
734
735 /* Helper struct for find_first_range_overlap_and_match and
736 value_contents_bits_eq. Keep track of which slot of a given ranges
737 vector have we last looked at. */
738
739 struct ranges_and_idx
740 {
741 /* The ranges. */
742 VEC(range_s) *ranges;
743
744 /* The range we've last found in RANGES. Given ranges are sorted,
745 we can start the next lookup here. */
746 int idx;
747 };
748
749 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
750 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
751 ranges starting at OFFSET2 bits. Return true if the ranges match
752 and fill in *L and *H with the overlapping window relative to
753 (both) OFFSET1 or OFFSET2. */
754
755 static int
756 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
757 struct ranges_and_idx *rp2,
758 LONGEST offset1, LONGEST offset2,
759 LONGEST length, ULONGEST *l, ULONGEST *h)
760 {
761 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
762 offset1, length);
763 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
764 offset2, length);
765
766 if (rp1->idx == -1 && rp2->idx == -1)
767 {
768 *l = length;
769 *h = length;
770 return 1;
771 }
772 else if (rp1->idx == -1 || rp2->idx == -1)
773 return 0;
774 else
775 {
776 range_s *r1, *r2;
777 ULONGEST l1, h1;
778 ULONGEST l2, h2;
779
780 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
781 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
782
783 /* Get the unavailable windows intersected by the incoming
784 ranges. The first and last ranges that overlap the argument
785 range may be wider than said incoming arguments ranges. */
786 l1 = std::max (offset1, r1->offset);
787 h1 = std::min (offset1 + length, r1->offset + r1->length);
788
789 l2 = std::max (offset2, r2->offset);
790 h2 = std::min (offset2 + length, offset2 + r2->length);
791
792 /* Make them relative to the respective start offsets, so we can
793 compare them for equality. */
794 l1 -= offset1;
795 h1 -= offset1;
796
797 l2 -= offset2;
798 h2 -= offset2;
799
800 /* Different ranges, no match. */
801 if (l1 != l2 || h1 != h2)
802 return 0;
803
804 *h = h1;
805 *l = l1;
806 return 1;
807 }
808 }
809
810 /* Helper function for value_contents_eq. The only difference is that
811 this function is bit rather than byte based.
812
813 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
814 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
815 Return true if the available bits match. */
816
817 static int
818 value_contents_bits_eq (const struct value *val1, int offset1,
819 const struct value *val2, int offset2,
820 int length)
821 {
822 /* Each array element corresponds to a ranges source (unavailable,
823 optimized out). '1' is for VAL1, '2' for VAL2. */
824 struct ranges_and_idx rp1[2], rp2[2];
825
826 /* See function description in value.h. */
827 gdb_assert (!val1->lazy && !val2->lazy);
828
829 /* We shouldn't be trying to compare past the end of the values. */
830 gdb_assert (offset1 + length
831 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
832 gdb_assert (offset2 + length
833 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
834
835 memset (&rp1, 0, sizeof (rp1));
836 memset (&rp2, 0, sizeof (rp2));
837 rp1[0].ranges = val1->unavailable;
838 rp2[0].ranges = val2->unavailable;
839 rp1[1].ranges = val1->optimized_out;
840 rp2[1].ranges = val2->optimized_out;
841
842 while (length > 0)
843 {
844 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
845 int i;
846
847 for (i = 0; i < 2; i++)
848 {
849 ULONGEST l_tmp, h_tmp;
850
851 /* The contents only match equal if the invalid/unavailable
852 contents ranges match as well. */
853 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
854 offset1, offset2, length,
855 &l_tmp, &h_tmp))
856 return 0;
857
858 /* We're interested in the lowest/first range found. */
859 if (i == 0 || l_tmp < l)
860 {
861 l = l_tmp;
862 h = h_tmp;
863 }
864 }
865
866 /* Compare the available/valid contents. */
867 if (memcmp_with_bit_offsets (val1->contents, offset1,
868 val2->contents, offset2, l) != 0)
869 return 0;
870
871 length -= h;
872 offset1 += h;
873 offset2 += h;
874 }
875
876 return 1;
877 }
878
879 int
880 value_contents_eq (const struct value *val1, LONGEST offset1,
881 const struct value *val2, LONGEST offset2,
882 LONGEST length)
883 {
884 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
885 val2, offset2 * TARGET_CHAR_BIT,
886 length * TARGET_CHAR_BIT);
887 }
888
889 /* Prototypes for local functions. */
890
891 static void show_values (char *, int);
892
893 static void show_convenience (char *, int);
894
895
896 /* The value-history records all the values printed
897 by print commands during this session. Each chunk
898 records 60 consecutive values. The first chunk on
899 the chain records the most recent values.
900 The total number of values is in value_history_count. */
901
902 #define VALUE_HISTORY_CHUNK 60
903
904 struct value_history_chunk
905 {
906 struct value_history_chunk *next;
907 struct value *values[VALUE_HISTORY_CHUNK];
908 };
909
910 /* Chain of chunks now in use. */
911
912 static struct value_history_chunk *value_history_chain;
913
914 static int value_history_count; /* Abs number of last entry stored. */
915
916 \f
917 /* List of all value objects currently allocated
918 (except for those released by calls to release_value)
919 This is so they can be freed after each command. */
920
921 static struct value *all_values;
922
923 /* Allocate a lazy value for type TYPE. Its actual content is
924 "lazily" allocated too: the content field of the return value is
925 NULL; it will be allocated when it is fetched from the target. */
926
927 struct value *
928 allocate_value_lazy (struct type *type)
929 {
930 struct value *val;
931
932 /* Call check_typedef on our type to make sure that, if TYPE
933 is a TYPE_CODE_TYPEDEF, its length is set to the length
934 of the target type instead of zero. However, we do not
935 replace the typedef type by the target type, because we want
936 to keep the typedef in order to be able to set the VAL's type
937 description correctly. */
938 check_typedef (type);
939
940 val = XCNEW (struct value);
941 val->contents = NULL;
942 val->next = all_values;
943 all_values = val;
944 val->type = type;
945 val->enclosing_type = type;
946 VALUE_LVAL (val) = not_lval;
947 val->location.address = 0;
948 VALUE_NEXT_FRAME_ID (val) = null_frame_id;
949 val->offset = 0;
950 val->bitpos = 0;
951 val->bitsize = 0;
952 VALUE_REGNUM (val) = -1;
953 val->lazy = 1;
954 val->embedded_offset = 0;
955 val->pointed_to_offset = 0;
956 val->modifiable = 1;
957 val->initialized = 1; /* Default to initialized. */
958
959 /* Values start out on the all_values chain. */
960 val->reference_count = 1;
961
962 return val;
963 }
964
965 /* The maximum size, in bytes, that GDB will try to allocate for a value.
966 The initial value of 64k was not selected for any specific reason, it is
967 just a reasonable starting point. */
968
969 static int max_value_size = 65536; /* 64k bytes */
970
971 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
972 LONGEST, otherwise GDB will not be able to parse integer values from the
973 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
974 be unable to parse "set max-value-size 2".
975
976 As we want a consistent GDB experience across hosts with different sizes
977 of LONGEST, this arbitrary minimum value was selected, so long as this
978 is bigger than LONGEST on all GDB supported hosts we're fine. */
979
980 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
981 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
982
983 /* Implement the "set max-value-size" command. */
984
985 static void
986 set_max_value_size (char *args, int from_tty,
987 struct cmd_list_element *c)
988 {
989 gdb_assert (max_value_size == -1 || max_value_size >= 0);
990
991 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
992 {
993 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
994 error (_("max-value-size set too low, increasing to %d bytes"),
995 max_value_size);
996 }
997 }
998
999 /* Implement the "show max-value-size" command. */
1000
1001 static void
1002 show_max_value_size (struct ui_file *file, int from_tty,
1003 struct cmd_list_element *c, const char *value)
1004 {
1005 if (max_value_size == -1)
1006 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1007 else
1008 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1009 max_value_size);
1010 }
1011
1012 /* Called before we attempt to allocate or reallocate a buffer for the
1013 contents of a value. TYPE is the type of the value for which we are
1014 allocating the buffer. If the buffer is too large (based on the user
1015 controllable setting) then throw an error. If this function returns
1016 then we should attempt to allocate the buffer. */
1017
1018 static void
1019 check_type_length_before_alloc (const struct type *type)
1020 {
1021 unsigned int length = TYPE_LENGTH (type);
1022
1023 if (max_value_size > -1 && length > max_value_size)
1024 {
1025 if (TYPE_NAME (type) != NULL)
1026 error (_("value of type `%s' requires %u bytes, which is more "
1027 "than max-value-size"), TYPE_NAME (type), length);
1028 else
1029 error (_("value requires %u bytes, which is more than "
1030 "max-value-size"), length);
1031 }
1032 }
1033
1034 /* Allocate the contents of VAL if it has not been allocated yet. */
1035
1036 static void
1037 allocate_value_contents (struct value *val)
1038 {
1039 if (!val->contents)
1040 {
1041 check_type_length_before_alloc (val->enclosing_type);
1042 val->contents
1043 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1044 }
1045 }
1046
1047 /* Allocate a value and its contents for type TYPE. */
1048
1049 struct value *
1050 allocate_value (struct type *type)
1051 {
1052 struct value *val = allocate_value_lazy (type);
1053
1054 allocate_value_contents (val);
1055 val->lazy = 0;
1056 return val;
1057 }
1058
1059 /* Allocate a value that has the correct length
1060 for COUNT repetitions of type TYPE. */
1061
1062 struct value *
1063 allocate_repeat_value (struct type *type, int count)
1064 {
1065 int low_bound = current_language->string_lower_bound; /* ??? */
1066 /* FIXME-type-allocation: need a way to free this type when we are
1067 done with it. */
1068 struct type *array_type
1069 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1070
1071 return allocate_value (array_type);
1072 }
1073
1074 struct value *
1075 allocate_computed_value (struct type *type,
1076 const struct lval_funcs *funcs,
1077 void *closure)
1078 {
1079 struct value *v = allocate_value_lazy (type);
1080
1081 VALUE_LVAL (v) = lval_computed;
1082 v->location.computed.funcs = funcs;
1083 v->location.computed.closure = closure;
1084
1085 return v;
1086 }
1087
1088 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1089
1090 struct value *
1091 allocate_optimized_out_value (struct type *type)
1092 {
1093 struct value *retval = allocate_value_lazy (type);
1094
1095 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1096 set_value_lazy (retval, 0);
1097 return retval;
1098 }
1099
1100 /* Accessor methods. */
1101
1102 struct value *
1103 value_next (const struct value *value)
1104 {
1105 return value->next;
1106 }
1107
1108 struct type *
1109 value_type (const struct value *value)
1110 {
1111 return value->type;
1112 }
1113 void
1114 deprecated_set_value_type (struct value *value, struct type *type)
1115 {
1116 value->type = type;
1117 }
1118
1119 LONGEST
1120 value_offset (const struct value *value)
1121 {
1122 return value->offset;
1123 }
1124 void
1125 set_value_offset (struct value *value, LONGEST offset)
1126 {
1127 value->offset = offset;
1128 }
1129
1130 LONGEST
1131 value_bitpos (const struct value *value)
1132 {
1133 return value->bitpos;
1134 }
1135 void
1136 set_value_bitpos (struct value *value, LONGEST bit)
1137 {
1138 value->bitpos = bit;
1139 }
1140
1141 LONGEST
1142 value_bitsize (const struct value *value)
1143 {
1144 return value->bitsize;
1145 }
1146 void
1147 set_value_bitsize (struct value *value, LONGEST bit)
1148 {
1149 value->bitsize = bit;
1150 }
1151
1152 struct value *
1153 value_parent (const struct value *value)
1154 {
1155 return value->parent;
1156 }
1157
1158 /* See value.h. */
1159
1160 void
1161 set_value_parent (struct value *value, struct value *parent)
1162 {
1163 struct value *old = value->parent;
1164
1165 value->parent = parent;
1166 if (parent != NULL)
1167 value_incref (parent);
1168 value_free (old);
1169 }
1170
1171 gdb_byte *
1172 value_contents_raw (struct value *value)
1173 {
1174 struct gdbarch *arch = get_value_arch (value);
1175 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1176
1177 allocate_value_contents (value);
1178 return value->contents + value->embedded_offset * unit_size;
1179 }
1180
1181 gdb_byte *
1182 value_contents_all_raw (struct value *value)
1183 {
1184 allocate_value_contents (value);
1185 return value->contents;
1186 }
1187
1188 struct type *
1189 value_enclosing_type (const struct value *value)
1190 {
1191 return value->enclosing_type;
1192 }
1193
1194 /* Look at value.h for description. */
1195
1196 struct type *
1197 value_actual_type (struct value *value, int resolve_simple_types,
1198 int *real_type_found)
1199 {
1200 struct value_print_options opts;
1201 struct type *result;
1202
1203 get_user_print_options (&opts);
1204
1205 if (real_type_found)
1206 *real_type_found = 0;
1207 result = value_type (value);
1208 if (opts.objectprint)
1209 {
1210 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1211 fetch its rtti type. */
1212 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1213 || TYPE_CODE (result) == TYPE_CODE_REF)
1214 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1215 == TYPE_CODE_STRUCT
1216 && !value_optimized_out (value))
1217 {
1218 struct type *real_type;
1219
1220 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1221 if (real_type)
1222 {
1223 if (real_type_found)
1224 *real_type_found = 1;
1225 result = real_type;
1226 }
1227 }
1228 else if (resolve_simple_types)
1229 {
1230 if (real_type_found)
1231 *real_type_found = 1;
1232 result = value_enclosing_type (value);
1233 }
1234 }
1235
1236 return result;
1237 }
1238
1239 void
1240 error_value_optimized_out (void)
1241 {
1242 error (_("value has been optimized out"));
1243 }
1244
1245 static void
1246 require_not_optimized_out (const struct value *value)
1247 {
1248 if (!VEC_empty (range_s, value->optimized_out))
1249 {
1250 if (value->lval == lval_register)
1251 error (_("register has not been saved in frame"));
1252 else
1253 error_value_optimized_out ();
1254 }
1255 }
1256
1257 static void
1258 require_available (const struct value *value)
1259 {
1260 if (!VEC_empty (range_s, value->unavailable))
1261 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1262 }
1263
1264 const gdb_byte *
1265 value_contents_for_printing (struct value *value)
1266 {
1267 if (value->lazy)
1268 value_fetch_lazy (value);
1269 return value->contents;
1270 }
1271
1272 const gdb_byte *
1273 value_contents_for_printing_const (const struct value *value)
1274 {
1275 gdb_assert (!value->lazy);
1276 return value->contents;
1277 }
1278
1279 const gdb_byte *
1280 value_contents_all (struct value *value)
1281 {
1282 const gdb_byte *result = value_contents_for_printing (value);
1283 require_not_optimized_out (value);
1284 require_available (value);
1285 return result;
1286 }
1287
1288 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1289 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1290
1291 static void
1292 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1293 VEC (range_s) *src_range, int src_bit_offset,
1294 int bit_length)
1295 {
1296 range_s *r;
1297 int i;
1298
1299 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1300 {
1301 ULONGEST h, l;
1302
1303 l = std::max (r->offset, (LONGEST) src_bit_offset);
1304 h = std::min (r->offset + r->length,
1305 (LONGEST) src_bit_offset + bit_length);
1306
1307 if (l < h)
1308 insert_into_bit_range_vector (dst_range,
1309 dst_bit_offset + (l - src_bit_offset),
1310 h - l);
1311 }
1312 }
1313
1314 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1315 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1316
1317 static void
1318 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1319 const struct value *src, int src_bit_offset,
1320 int bit_length)
1321 {
1322 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1323 src->unavailable, src_bit_offset,
1324 bit_length);
1325 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1326 src->optimized_out, src_bit_offset,
1327 bit_length);
1328 }
1329
1330 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1331 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1332 contents, starting at DST_OFFSET. If unavailable contents are
1333 being copied from SRC, the corresponding DST contents are marked
1334 unavailable accordingly. Neither DST nor SRC may be lazy
1335 values.
1336
1337 It is assumed the contents of DST in the [DST_OFFSET,
1338 DST_OFFSET+LENGTH) range are wholly available. */
1339
1340 void
1341 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1342 struct value *src, LONGEST src_offset, LONGEST length)
1343 {
1344 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1345 struct gdbarch *arch = get_value_arch (src);
1346 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1347
1348 /* A lazy DST would make that this copy operation useless, since as
1349 soon as DST's contents were un-lazied (by a later value_contents
1350 call, say), the contents would be overwritten. A lazy SRC would
1351 mean we'd be copying garbage. */
1352 gdb_assert (!dst->lazy && !src->lazy);
1353
1354 /* The overwritten DST range gets unavailability ORed in, not
1355 replaced. Make sure to remember to implement replacing if it
1356 turns out actually necessary. */
1357 gdb_assert (value_bytes_available (dst, dst_offset, length));
1358 gdb_assert (!value_bits_any_optimized_out (dst,
1359 TARGET_CHAR_BIT * dst_offset,
1360 TARGET_CHAR_BIT * length));
1361
1362 /* Copy the data. */
1363 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1364 value_contents_all_raw (src) + src_offset * unit_size,
1365 length * unit_size);
1366
1367 /* Copy the meta-data, adjusted. */
1368 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1369 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1370 bit_length = length * unit_size * HOST_CHAR_BIT;
1371
1372 value_ranges_copy_adjusted (dst, dst_bit_offset,
1373 src, src_bit_offset,
1374 bit_length);
1375 }
1376
1377 /* Copy LENGTH bytes of SRC value's (all) contents
1378 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1379 (all) contents, starting at DST_OFFSET. If unavailable contents
1380 are being copied from SRC, the corresponding DST contents are
1381 marked unavailable accordingly. DST must not be lazy. If SRC is
1382 lazy, it will be fetched now.
1383
1384 It is assumed the contents of DST in the [DST_OFFSET,
1385 DST_OFFSET+LENGTH) range are wholly available. */
1386
1387 void
1388 value_contents_copy (struct value *dst, LONGEST dst_offset,
1389 struct value *src, LONGEST src_offset, LONGEST length)
1390 {
1391 if (src->lazy)
1392 value_fetch_lazy (src);
1393
1394 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1395 }
1396
1397 int
1398 value_lazy (const struct value *value)
1399 {
1400 return value->lazy;
1401 }
1402
1403 void
1404 set_value_lazy (struct value *value, int val)
1405 {
1406 value->lazy = val;
1407 }
1408
1409 int
1410 value_stack (const struct value *value)
1411 {
1412 return value->stack;
1413 }
1414
1415 void
1416 set_value_stack (struct value *value, int val)
1417 {
1418 value->stack = val;
1419 }
1420
1421 const gdb_byte *
1422 value_contents (struct value *value)
1423 {
1424 const gdb_byte *result = value_contents_writeable (value);
1425 require_not_optimized_out (value);
1426 require_available (value);
1427 return result;
1428 }
1429
1430 gdb_byte *
1431 value_contents_writeable (struct value *value)
1432 {
1433 if (value->lazy)
1434 value_fetch_lazy (value);
1435 return value_contents_raw (value);
1436 }
1437
1438 int
1439 value_optimized_out (struct value *value)
1440 {
1441 /* We can only know if a value is optimized out once we have tried to
1442 fetch it. */
1443 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1444 {
1445 TRY
1446 {
1447 value_fetch_lazy (value);
1448 }
1449 CATCH (ex, RETURN_MASK_ERROR)
1450 {
1451 /* Fall back to checking value->optimized_out. */
1452 }
1453 END_CATCH
1454 }
1455
1456 return !VEC_empty (range_s, value->optimized_out);
1457 }
1458
1459 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1460 the following LENGTH bytes. */
1461
1462 void
1463 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1464 {
1465 mark_value_bits_optimized_out (value,
1466 offset * TARGET_CHAR_BIT,
1467 length * TARGET_CHAR_BIT);
1468 }
1469
1470 /* See value.h. */
1471
1472 void
1473 mark_value_bits_optimized_out (struct value *value,
1474 LONGEST offset, LONGEST length)
1475 {
1476 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1477 }
1478
1479 int
1480 value_bits_synthetic_pointer (const struct value *value,
1481 LONGEST offset, LONGEST length)
1482 {
1483 if (value->lval != lval_computed
1484 || !value->location.computed.funcs->check_synthetic_pointer)
1485 return 0;
1486 return value->location.computed.funcs->check_synthetic_pointer (value,
1487 offset,
1488 length);
1489 }
1490
1491 LONGEST
1492 value_embedded_offset (const struct value *value)
1493 {
1494 return value->embedded_offset;
1495 }
1496
1497 void
1498 set_value_embedded_offset (struct value *value, LONGEST val)
1499 {
1500 value->embedded_offset = val;
1501 }
1502
1503 LONGEST
1504 value_pointed_to_offset (const struct value *value)
1505 {
1506 return value->pointed_to_offset;
1507 }
1508
1509 void
1510 set_value_pointed_to_offset (struct value *value, LONGEST val)
1511 {
1512 value->pointed_to_offset = val;
1513 }
1514
1515 const struct lval_funcs *
1516 value_computed_funcs (const struct value *v)
1517 {
1518 gdb_assert (value_lval_const (v) == lval_computed);
1519
1520 return v->location.computed.funcs;
1521 }
1522
1523 void *
1524 value_computed_closure (const struct value *v)
1525 {
1526 gdb_assert (v->lval == lval_computed);
1527
1528 return v->location.computed.closure;
1529 }
1530
1531 enum lval_type *
1532 deprecated_value_lval_hack (struct value *value)
1533 {
1534 return &value->lval;
1535 }
1536
1537 enum lval_type
1538 value_lval_const (const struct value *value)
1539 {
1540 return value->lval;
1541 }
1542
1543 CORE_ADDR
1544 value_address (const struct value *value)
1545 {
1546 if (value->lval == lval_internalvar
1547 || value->lval == lval_internalvar_component
1548 || value->lval == lval_xcallable)
1549 return 0;
1550 if (value->parent != NULL)
1551 return value_address (value->parent) + value->offset;
1552 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1553 {
1554 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1555 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1556 }
1557
1558 return value->location.address + value->offset;
1559 }
1560
1561 CORE_ADDR
1562 value_raw_address (const struct value *value)
1563 {
1564 if (value->lval == lval_internalvar
1565 || value->lval == lval_internalvar_component
1566 || value->lval == lval_xcallable)
1567 return 0;
1568 return value->location.address;
1569 }
1570
1571 void
1572 set_value_address (struct value *value, CORE_ADDR addr)
1573 {
1574 gdb_assert (value->lval != lval_internalvar
1575 && value->lval != lval_internalvar_component
1576 && value->lval != lval_xcallable);
1577 value->location.address = addr;
1578 }
1579
1580 struct internalvar **
1581 deprecated_value_internalvar_hack (struct value *value)
1582 {
1583 return &value->location.internalvar;
1584 }
1585
1586 struct frame_id *
1587 deprecated_value_next_frame_id_hack (struct value *value)
1588 {
1589 return &value->next_frame_id;
1590 }
1591
1592 short *
1593 deprecated_value_regnum_hack (struct value *value)
1594 {
1595 return &value->regnum;
1596 }
1597
1598 int
1599 deprecated_value_modifiable (const struct value *value)
1600 {
1601 return value->modifiable;
1602 }
1603 \f
1604 /* Return a mark in the value chain. All values allocated after the
1605 mark is obtained (except for those released) are subject to being freed
1606 if a subsequent value_free_to_mark is passed the mark. */
1607 struct value *
1608 value_mark (void)
1609 {
1610 return all_values;
1611 }
1612
1613 /* Take a reference to VAL. VAL will not be deallocated until all
1614 references are released. */
1615
1616 void
1617 value_incref (struct value *val)
1618 {
1619 val->reference_count++;
1620 }
1621
1622 /* Release a reference to VAL, which was acquired with value_incref.
1623 This function is also called to deallocate values from the value
1624 chain. */
1625
1626 void
1627 value_free (struct value *val)
1628 {
1629 if (val)
1630 {
1631 gdb_assert (val->reference_count > 0);
1632 val->reference_count--;
1633 if (val->reference_count > 0)
1634 return;
1635
1636 /* If there's an associated parent value, drop our reference to
1637 it. */
1638 if (val->parent != NULL)
1639 value_free (val->parent);
1640
1641 if (VALUE_LVAL (val) == lval_computed)
1642 {
1643 const struct lval_funcs *funcs = val->location.computed.funcs;
1644
1645 if (funcs->free_closure)
1646 funcs->free_closure (val);
1647 }
1648 else if (VALUE_LVAL (val) == lval_xcallable)
1649 free_xmethod_worker (val->location.xm_worker);
1650
1651 xfree (val->contents);
1652 VEC_free (range_s, val->unavailable);
1653 }
1654 xfree (val);
1655 }
1656
1657 /* Free all values allocated since MARK was obtained by value_mark
1658 (except for those released). */
1659 void
1660 value_free_to_mark (const struct value *mark)
1661 {
1662 struct value *val;
1663 struct value *next;
1664
1665 for (val = all_values; val && val != mark; val = next)
1666 {
1667 next = val->next;
1668 val->released = 1;
1669 value_free (val);
1670 }
1671 all_values = val;
1672 }
1673
1674 /* Free all the values that have been allocated (except for those released).
1675 Call after each command, successful or not.
1676 In practice this is called before each command, which is sufficient. */
1677
1678 void
1679 free_all_values (void)
1680 {
1681 struct value *val;
1682 struct value *next;
1683
1684 for (val = all_values; val; val = next)
1685 {
1686 next = val->next;
1687 val->released = 1;
1688 value_free (val);
1689 }
1690
1691 all_values = 0;
1692 }
1693
1694 /* Frees all the elements in a chain of values. */
1695
1696 void
1697 free_value_chain (struct value *v)
1698 {
1699 struct value *next;
1700
1701 for (; v; v = next)
1702 {
1703 next = value_next (v);
1704 value_free (v);
1705 }
1706 }
1707
1708 /* Remove VAL from the chain all_values
1709 so it will not be freed automatically. */
1710
1711 void
1712 release_value (struct value *val)
1713 {
1714 struct value *v;
1715
1716 if (all_values == val)
1717 {
1718 all_values = val->next;
1719 val->next = NULL;
1720 val->released = 1;
1721 return;
1722 }
1723
1724 for (v = all_values; v; v = v->next)
1725 {
1726 if (v->next == val)
1727 {
1728 v->next = val->next;
1729 val->next = NULL;
1730 val->released = 1;
1731 break;
1732 }
1733 }
1734 }
1735
1736 /* If the value is not already released, release it.
1737 If the value is already released, increment its reference count.
1738 That is, this function ensures that the value is released from the
1739 value chain and that the caller owns a reference to it. */
1740
1741 void
1742 release_value_or_incref (struct value *val)
1743 {
1744 if (val->released)
1745 value_incref (val);
1746 else
1747 release_value (val);
1748 }
1749
1750 /* Release all values up to mark */
1751 struct value *
1752 value_release_to_mark (const struct value *mark)
1753 {
1754 struct value *val;
1755 struct value *next;
1756
1757 for (val = next = all_values; next; next = next->next)
1758 {
1759 if (next->next == mark)
1760 {
1761 all_values = next->next;
1762 next->next = NULL;
1763 return val;
1764 }
1765 next->released = 1;
1766 }
1767 all_values = 0;
1768 return val;
1769 }
1770
1771 /* Return a copy of the value ARG.
1772 It contains the same contents, for same memory address,
1773 but it's a different block of storage. */
1774
1775 struct value *
1776 value_copy (struct value *arg)
1777 {
1778 struct type *encl_type = value_enclosing_type (arg);
1779 struct value *val;
1780
1781 if (value_lazy (arg))
1782 val = allocate_value_lazy (encl_type);
1783 else
1784 val = allocate_value (encl_type);
1785 val->type = arg->type;
1786 VALUE_LVAL (val) = VALUE_LVAL (arg);
1787 val->location = arg->location;
1788 val->offset = arg->offset;
1789 val->bitpos = arg->bitpos;
1790 val->bitsize = arg->bitsize;
1791 VALUE_NEXT_FRAME_ID (val) = VALUE_NEXT_FRAME_ID (arg);
1792 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1793 val->lazy = arg->lazy;
1794 val->embedded_offset = value_embedded_offset (arg);
1795 val->pointed_to_offset = arg->pointed_to_offset;
1796 val->modifiable = arg->modifiable;
1797 if (!value_lazy (val))
1798 {
1799 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1800 TYPE_LENGTH (value_enclosing_type (arg)));
1801
1802 }
1803 val->unavailable = VEC_copy (range_s, arg->unavailable);
1804 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1805 set_value_parent (val, arg->parent);
1806 if (VALUE_LVAL (val) == lval_computed)
1807 {
1808 const struct lval_funcs *funcs = val->location.computed.funcs;
1809
1810 if (funcs->copy_closure)
1811 val->location.computed.closure = funcs->copy_closure (val);
1812 }
1813 return val;
1814 }
1815
1816 /* Return a "const" and/or "volatile" qualified version of the value V.
1817 If CNST is true, then the returned value will be qualified with
1818 "const".
1819 if VOLTL is true, then the returned value will be qualified with
1820 "volatile". */
1821
1822 struct value *
1823 make_cv_value (int cnst, int voltl, struct value *v)
1824 {
1825 struct type *val_type = value_type (v);
1826 struct type *enclosing_type = value_enclosing_type (v);
1827 struct value *cv_val = value_copy (v);
1828
1829 deprecated_set_value_type (cv_val,
1830 make_cv_type (cnst, voltl, val_type, NULL));
1831 set_value_enclosing_type (cv_val,
1832 make_cv_type (cnst, voltl, enclosing_type, NULL));
1833
1834 return cv_val;
1835 }
1836
1837 /* Return a version of ARG that is non-lvalue. */
1838
1839 struct value *
1840 value_non_lval (struct value *arg)
1841 {
1842 if (VALUE_LVAL (arg) != not_lval)
1843 {
1844 struct type *enc_type = value_enclosing_type (arg);
1845 struct value *val = allocate_value (enc_type);
1846
1847 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1848 TYPE_LENGTH (enc_type));
1849 val->type = arg->type;
1850 set_value_embedded_offset (val, value_embedded_offset (arg));
1851 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1852 return val;
1853 }
1854 return arg;
1855 }
1856
1857 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1858
1859 void
1860 value_force_lval (struct value *v, CORE_ADDR addr)
1861 {
1862 gdb_assert (VALUE_LVAL (v) == not_lval);
1863
1864 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1865 v->lval = lval_memory;
1866 v->location.address = addr;
1867 }
1868
1869 void
1870 set_value_component_location (struct value *component,
1871 const struct value *whole)
1872 {
1873 struct type *type;
1874
1875 gdb_assert (whole->lval != lval_xcallable);
1876
1877 if (whole->lval == lval_internalvar)
1878 VALUE_LVAL (component) = lval_internalvar_component;
1879 else
1880 VALUE_LVAL (component) = whole->lval;
1881
1882 component->location = whole->location;
1883 if (whole->lval == lval_computed)
1884 {
1885 const struct lval_funcs *funcs = whole->location.computed.funcs;
1886
1887 if (funcs->copy_closure)
1888 component->location.computed.closure = funcs->copy_closure (whole);
1889 }
1890
1891 /* If type has a dynamic resolved location property
1892 update it's value address. */
1893 type = value_type (whole);
1894 if (NULL != TYPE_DATA_LOCATION (type)
1895 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1896 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1897 }
1898
1899 /* Access to the value history. */
1900
1901 /* Record a new value in the value history.
1902 Returns the absolute history index of the entry. */
1903
1904 int
1905 record_latest_value (struct value *val)
1906 {
1907 int i;
1908
1909 /* We don't want this value to have anything to do with the inferior anymore.
1910 In particular, "set $1 = 50" should not affect the variable from which
1911 the value was taken, and fast watchpoints should be able to assume that
1912 a value on the value history never changes. */
1913 if (value_lazy (val))
1914 value_fetch_lazy (val);
1915 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1916 from. This is a bit dubious, because then *&$1 does not just return $1
1917 but the current contents of that location. c'est la vie... */
1918 val->modifiable = 0;
1919
1920 /* The value may have already been released, in which case we're adding a
1921 new reference for its entry in the history. That is why we call
1922 release_value_or_incref here instead of release_value. */
1923 release_value_or_incref (val);
1924
1925 /* Here we treat value_history_count as origin-zero
1926 and applying to the value being stored now. */
1927
1928 i = value_history_count % VALUE_HISTORY_CHUNK;
1929 if (i == 0)
1930 {
1931 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1932
1933 newobj->next = value_history_chain;
1934 value_history_chain = newobj;
1935 }
1936
1937 value_history_chain->values[i] = val;
1938
1939 /* Now we regard value_history_count as origin-one
1940 and applying to the value just stored. */
1941
1942 return ++value_history_count;
1943 }
1944
1945 /* Return a copy of the value in the history with sequence number NUM. */
1946
1947 struct value *
1948 access_value_history (int num)
1949 {
1950 struct value_history_chunk *chunk;
1951 int i;
1952 int absnum = num;
1953
1954 if (absnum <= 0)
1955 absnum += value_history_count;
1956
1957 if (absnum <= 0)
1958 {
1959 if (num == 0)
1960 error (_("The history is empty."));
1961 else if (num == 1)
1962 error (_("There is only one value in the history."));
1963 else
1964 error (_("History does not go back to $$%d."), -num);
1965 }
1966 if (absnum > value_history_count)
1967 error (_("History has not yet reached $%d."), absnum);
1968
1969 absnum--;
1970
1971 /* Now absnum is always absolute and origin zero. */
1972
1973 chunk = value_history_chain;
1974 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1975 - absnum / VALUE_HISTORY_CHUNK;
1976 i > 0; i--)
1977 chunk = chunk->next;
1978
1979 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1980 }
1981
1982 static void
1983 show_values (char *num_exp, int from_tty)
1984 {
1985 int i;
1986 struct value *val;
1987 static int num = 1;
1988
1989 if (num_exp)
1990 {
1991 /* "show values +" should print from the stored position.
1992 "show values <exp>" should print around value number <exp>. */
1993 if (num_exp[0] != '+' || num_exp[1] != '\0')
1994 num = parse_and_eval_long (num_exp) - 5;
1995 }
1996 else
1997 {
1998 /* "show values" means print the last 10 values. */
1999 num = value_history_count - 9;
2000 }
2001
2002 if (num <= 0)
2003 num = 1;
2004
2005 for (i = num; i < num + 10 && i <= value_history_count; i++)
2006 {
2007 struct value_print_options opts;
2008
2009 val = access_value_history (i);
2010 printf_filtered (("$%d = "), i);
2011 get_user_print_options (&opts);
2012 value_print (val, gdb_stdout, &opts);
2013 printf_filtered (("\n"));
2014 }
2015
2016 /* The next "show values +" should start after what we just printed. */
2017 num += 10;
2018
2019 /* Hitting just return after this command should do the same thing as
2020 "show values +". If num_exp is null, this is unnecessary, since
2021 "show values +" is not useful after "show values". */
2022 if (from_tty && num_exp)
2023 {
2024 num_exp[0] = '+';
2025 num_exp[1] = '\0';
2026 }
2027 }
2028 \f
2029 enum internalvar_kind
2030 {
2031 /* The internal variable is empty. */
2032 INTERNALVAR_VOID,
2033
2034 /* The value of the internal variable is provided directly as
2035 a GDB value object. */
2036 INTERNALVAR_VALUE,
2037
2038 /* A fresh value is computed via a call-back routine on every
2039 access to the internal variable. */
2040 INTERNALVAR_MAKE_VALUE,
2041
2042 /* The internal variable holds a GDB internal convenience function. */
2043 INTERNALVAR_FUNCTION,
2044
2045 /* The variable holds an integer value. */
2046 INTERNALVAR_INTEGER,
2047
2048 /* The variable holds a GDB-provided string. */
2049 INTERNALVAR_STRING,
2050 };
2051
2052 union internalvar_data
2053 {
2054 /* A value object used with INTERNALVAR_VALUE. */
2055 struct value *value;
2056
2057 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2058 struct
2059 {
2060 /* The functions to call. */
2061 const struct internalvar_funcs *functions;
2062
2063 /* The function's user-data. */
2064 void *data;
2065 } make_value;
2066
2067 /* The internal function used with INTERNALVAR_FUNCTION. */
2068 struct
2069 {
2070 struct internal_function *function;
2071 /* True if this is the canonical name for the function. */
2072 int canonical;
2073 } fn;
2074
2075 /* An integer value used with INTERNALVAR_INTEGER. */
2076 struct
2077 {
2078 /* If type is non-NULL, it will be used as the type to generate
2079 a value for this internal variable. If type is NULL, a default
2080 integer type for the architecture is used. */
2081 struct type *type;
2082 LONGEST val;
2083 } integer;
2084
2085 /* A string value used with INTERNALVAR_STRING. */
2086 char *string;
2087 };
2088
2089 /* Internal variables. These are variables within the debugger
2090 that hold values assigned by debugger commands.
2091 The user refers to them with a '$' prefix
2092 that does not appear in the variable names stored internally. */
2093
2094 struct internalvar
2095 {
2096 struct internalvar *next;
2097 char *name;
2098
2099 /* We support various different kinds of content of an internal variable.
2100 enum internalvar_kind specifies the kind, and union internalvar_data
2101 provides the data associated with this particular kind. */
2102
2103 enum internalvar_kind kind;
2104
2105 union internalvar_data u;
2106 };
2107
2108 static struct internalvar *internalvars;
2109
2110 /* If the variable does not already exist create it and give it the
2111 value given. If no value is given then the default is zero. */
2112 static void
2113 init_if_undefined_command (char* args, int from_tty)
2114 {
2115 struct internalvar* intvar;
2116
2117 /* Parse the expression - this is taken from set_command(). */
2118 expression_up expr = parse_expression (args);
2119
2120 /* Validate the expression.
2121 Was the expression an assignment?
2122 Or even an expression at all? */
2123 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2124 error (_("Init-if-undefined requires an assignment expression."));
2125
2126 /* Extract the variable from the parsed expression.
2127 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2128 if (expr->elts[1].opcode != OP_INTERNALVAR)
2129 error (_("The first parameter to init-if-undefined "
2130 "should be a GDB variable."));
2131 intvar = expr->elts[2].internalvar;
2132
2133 /* Only evaluate the expression if the lvalue is void.
2134 This may still fail if the expresssion is invalid. */
2135 if (intvar->kind == INTERNALVAR_VOID)
2136 evaluate_expression (expr.get ());
2137 }
2138
2139
2140 /* Look up an internal variable with name NAME. NAME should not
2141 normally include a dollar sign.
2142
2143 If the specified internal variable does not exist,
2144 the return value is NULL. */
2145
2146 struct internalvar *
2147 lookup_only_internalvar (const char *name)
2148 {
2149 struct internalvar *var;
2150
2151 for (var = internalvars; var; var = var->next)
2152 if (strcmp (var->name, name) == 0)
2153 return var;
2154
2155 return NULL;
2156 }
2157
2158 /* Complete NAME by comparing it to the names of internal variables.
2159 Returns a vector of newly allocated strings, or NULL if no matches
2160 were found. */
2161
2162 VEC (char_ptr) *
2163 complete_internalvar (const char *name)
2164 {
2165 VEC (char_ptr) *result = NULL;
2166 struct internalvar *var;
2167 int len;
2168
2169 len = strlen (name);
2170
2171 for (var = internalvars; var; var = var->next)
2172 if (strncmp (var->name, name, len) == 0)
2173 {
2174 char *r = xstrdup (var->name);
2175
2176 VEC_safe_push (char_ptr, result, r);
2177 }
2178
2179 return result;
2180 }
2181
2182 /* Create an internal variable with name NAME and with a void value.
2183 NAME should not normally include a dollar sign. */
2184
2185 struct internalvar *
2186 create_internalvar (const char *name)
2187 {
2188 struct internalvar *var = XNEW (struct internalvar);
2189
2190 var->name = concat (name, (char *)NULL);
2191 var->kind = INTERNALVAR_VOID;
2192 var->next = internalvars;
2193 internalvars = var;
2194 return var;
2195 }
2196
2197 /* Create an internal variable with name NAME and register FUN as the
2198 function that value_of_internalvar uses to create a value whenever
2199 this variable is referenced. NAME should not normally include a
2200 dollar sign. DATA is passed uninterpreted to FUN when it is
2201 called. CLEANUP, if not NULL, is called when the internal variable
2202 is destroyed. It is passed DATA as its only argument. */
2203
2204 struct internalvar *
2205 create_internalvar_type_lazy (const char *name,
2206 const struct internalvar_funcs *funcs,
2207 void *data)
2208 {
2209 struct internalvar *var = create_internalvar (name);
2210
2211 var->kind = INTERNALVAR_MAKE_VALUE;
2212 var->u.make_value.functions = funcs;
2213 var->u.make_value.data = data;
2214 return var;
2215 }
2216
2217 /* See documentation in value.h. */
2218
2219 int
2220 compile_internalvar_to_ax (struct internalvar *var,
2221 struct agent_expr *expr,
2222 struct axs_value *value)
2223 {
2224 if (var->kind != INTERNALVAR_MAKE_VALUE
2225 || var->u.make_value.functions->compile_to_ax == NULL)
2226 return 0;
2227
2228 var->u.make_value.functions->compile_to_ax (var, expr, value,
2229 var->u.make_value.data);
2230 return 1;
2231 }
2232
2233 /* Look up an internal variable with name NAME. NAME should not
2234 normally include a dollar sign.
2235
2236 If the specified internal variable does not exist,
2237 one is created, with a void value. */
2238
2239 struct internalvar *
2240 lookup_internalvar (const char *name)
2241 {
2242 struct internalvar *var;
2243
2244 var = lookup_only_internalvar (name);
2245 if (var)
2246 return var;
2247
2248 return create_internalvar (name);
2249 }
2250
2251 /* Return current value of internal variable VAR. For variables that
2252 are not inherently typed, use a value type appropriate for GDBARCH. */
2253
2254 struct value *
2255 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2256 {
2257 struct value *val;
2258 struct trace_state_variable *tsv;
2259
2260 /* If there is a trace state variable of the same name, assume that
2261 is what we really want to see. */
2262 tsv = find_trace_state_variable (var->name);
2263 if (tsv)
2264 {
2265 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2266 &(tsv->value));
2267 if (tsv->value_known)
2268 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2269 tsv->value);
2270 else
2271 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2272 return val;
2273 }
2274
2275 switch (var->kind)
2276 {
2277 case INTERNALVAR_VOID:
2278 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2279 break;
2280
2281 case INTERNALVAR_FUNCTION:
2282 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2283 break;
2284
2285 case INTERNALVAR_INTEGER:
2286 if (!var->u.integer.type)
2287 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2288 var->u.integer.val);
2289 else
2290 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2291 break;
2292
2293 case INTERNALVAR_STRING:
2294 val = value_cstring (var->u.string, strlen (var->u.string),
2295 builtin_type (gdbarch)->builtin_char);
2296 break;
2297
2298 case INTERNALVAR_VALUE:
2299 val = value_copy (var->u.value);
2300 if (value_lazy (val))
2301 value_fetch_lazy (val);
2302 break;
2303
2304 case INTERNALVAR_MAKE_VALUE:
2305 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2306 var->u.make_value.data);
2307 break;
2308
2309 default:
2310 internal_error (__FILE__, __LINE__, _("bad kind"));
2311 }
2312
2313 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2314 on this value go back to affect the original internal variable.
2315
2316 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2317 no underlying modifyable state in the internal variable.
2318
2319 Likewise, if the variable's value is a computed lvalue, we want
2320 references to it to produce another computed lvalue, where
2321 references and assignments actually operate through the
2322 computed value's functions.
2323
2324 This means that internal variables with computed values
2325 behave a little differently from other internal variables:
2326 assignments to them don't just replace the previous value
2327 altogether. At the moment, this seems like the behavior we
2328 want. */
2329
2330 if (var->kind != INTERNALVAR_MAKE_VALUE
2331 && val->lval != lval_computed)
2332 {
2333 VALUE_LVAL (val) = lval_internalvar;
2334 VALUE_INTERNALVAR (val) = var;
2335 }
2336
2337 return val;
2338 }
2339
2340 int
2341 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2342 {
2343 if (var->kind == INTERNALVAR_INTEGER)
2344 {
2345 *result = var->u.integer.val;
2346 return 1;
2347 }
2348
2349 if (var->kind == INTERNALVAR_VALUE)
2350 {
2351 struct type *type = check_typedef (value_type (var->u.value));
2352
2353 if (TYPE_CODE (type) == TYPE_CODE_INT)
2354 {
2355 *result = value_as_long (var->u.value);
2356 return 1;
2357 }
2358 }
2359
2360 return 0;
2361 }
2362
2363 static int
2364 get_internalvar_function (struct internalvar *var,
2365 struct internal_function **result)
2366 {
2367 switch (var->kind)
2368 {
2369 case INTERNALVAR_FUNCTION:
2370 *result = var->u.fn.function;
2371 return 1;
2372
2373 default:
2374 return 0;
2375 }
2376 }
2377
2378 void
2379 set_internalvar_component (struct internalvar *var,
2380 LONGEST offset, LONGEST bitpos,
2381 LONGEST bitsize, struct value *newval)
2382 {
2383 gdb_byte *addr;
2384 struct gdbarch *arch;
2385 int unit_size;
2386
2387 switch (var->kind)
2388 {
2389 case INTERNALVAR_VALUE:
2390 addr = value_contents_writeable (var->u.value);
2391 arch = get_value_arch (var->u.value);
2392 unit_size = gdbarch_addressable_memory_unit_size (arch);
2393
2394 if (bitsize)
2395 modify_field (value_type (var->u.value), addr + offset,
2396 value_as_long (newval), bitpos, bitsize);
2397 else
2398 memcpy (addr + offset * unit_size, value_contents (newval),
2399 TYPE_LENGTH (value_type (newval)));
2400 break;
2401
2402 default:
2403 /* We can never get a component of any other kind. */
2404 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2405 }
2406 }
2407
2408 void
2409 set_internalvar (struct internalvar *var, struct value *val)
2410 {
2411 enum internalvar_kind new_kind;
2412 union internalvar_data new_data = { 0 };
2413
2414 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2415 error (_("Cannot overwrite convenience function %s"), var->name);
2416
2417 /* Prepare new contents. */
2418 switch (TYPE_CODE (check_typedef (value_type (val))))
2419 {
2420 case TYPE_CODE_VOID:
2421 new_kind = INTERNALVAR_VOID;
2422 break;
2423
2424 case TYPE_CODE_INTERNAL_FUNCTION:
2425 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2426 new_kind = INTERNALVAR_FUNCTION;
2427 get_internalvar_function (VALUE_INTERNALVAR (val),
2428 &new_data.fn.function);
2429 /* Copies created here are never canonical. */
2430 break;
2431
2432 default:
2433 new_kind = INTERNALVAR_VALUE;
2434 new_data.value = value_copy (val);
2435 new_data.value->modifiable = 1;
2436
2437 /* Force the value to be fetched from the target now, to avoid problems
2438 later when this internalvar is referenced and the target is gone or
2439 has changed. */
2440 if (value_lazy (new_data.value))
2441 value_fetch_lazy (new_data.value);
2442
2443 /* Release the value from the value chain to prevent it from being
2444 deleted by free_all_values. From here on this function should not
2445 call error () until new_data is installed into the var->u to avoid
2446 leaking memory. */
2447 release_value (new_data.value);
2448
2449 /* Internal variables which are created from values with a dynamic
2450 location don't need the location property of the origin anymore.
2451 The resolved dynamic location is used prior then any other address
2452 when accessing the value.
2453 If we keep it, we would still refer to the origin value.
2454 Remove the location property in case it exist. */
2455 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2456
2457 break;
2458 }
2459
2460 /* Clean up old contents. */
2461 clear_internalvar (var);
2462
2463 /* Switch over. */
2464 var->kind = new_kind;
2465 var->u = new_data;
2466 /* End code which must not call error(). */
2467 }
2468
2469 void
2470 set_internalvar_integer (struct internalvar *var, LONGEST l)
2471 {
2472 /* Clean up old contents. */
2473 clear_internalvar (var);
2474
2475 var->kind = INTERNALVAR_INTEGER;
2476 var->u.integer.type = NULL;
2477 var->u.integer.val = l;
2478 }
2479
2480 void
2481 set_internalvar_string (struct internalvar *var, const char *string)
2482 {
2483 /* Clean up old contents. */
2484 clear_internalvar (var);
2485
2486 var->kind = INTERNALVAR_STRING;
2487 var->u.string = xstrdup (string);
2488 }
2489
2490 static void
2491 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2492 {
2493 /* Clean up old contents. */
2494 clear_internalvar (var);
2495
2496 var->kind = INTERNALVAR_FUNCTION;
2497 var->u.fn.function = f;
2498 var->u.fn.canonical = 1;
2499 /* Variables installed here are always the canonical version. */
2500 }
2501
2502 void
2503 clear_internalvar (struct internalvar *var)
2504 {
2505 /* Clean up old contents. */
2506 switch (var->kind)
2507 {
2508 case INTERNALVAR_VALUE:
2509 value_free (var->u.value);
2510 break;
2511
2512 case INTERNALVAR_STRING:
2513 xfree (var->u.string);
2514 break;
2515
2516 case INTERNALVAR_MAKE_VALUE:
2517 if (var->u.make_value.functions->destroy != NULL)
2518 var->u.make_value.functions->destroy (var->u.make_value.data);
2519 break;
2520
2521 default:
2522 break;
2523 }
2524
2525 /* Reset to void kind. */
2526 var->kind = INTERNALVAR_VOID;
2527 }
2528
2529 char *
2530 internalvar_name (const struct internalvar *var)
2531 {
2532 return var->name;
2533 }
2534
2535 static struct internal_function *
2536 create_internal_function (const char *name,
2537 internal_function_fn handler, void *cookie)
2538 {
2539 struct internal_function *ifn = XNEW (struct internal_function);
2540
2541 ifn->name = xstrdup (name);
2542 ifn->handler = handler;
2543 ifn->cookie = cookie;
2544 return ifn;
2545 }
2546
2547 char *
2548 value_internal_function_name (struct value *val)
2549 {
2550 struct internal_function *ifn;
2551 int result;
2552
2553 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2554 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2555 gdb_assert (result);
2556
2557 return ifn->name;
2558 }
2559
2560 struct value *
2561 call_internal_function (struct gdbarch *gdbarch,
2562 const struct language_defn *language,
2563 struct value *func, int argc, struct value **argv)
2564 {
2565 struct internal_function *ifn;
2566 int result;
2567
2568 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2569 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2570 gdb_assert (result);
2571
2572 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2573 }
2574
2575 /* The 'function' command. This does nothing -- it is just a
2576 placeholder to let "help function NAME" work. This is also used as
2577 the implementation of the sub-command that is created when
2578 registering an internal function. */
2579 static void
2580 function_command (char *command, int from_tty)
2581 {
2582 /* Do nothing. */
2583 }
2584
2585 /* Clean up if an internal function's command is destroyed. */
2586 static void
2587 function_destroyer (struct cmd_list_element *self, void *ignore)
2588 {
2589 xfree ((char *) self->name);
2590 xfree ((char *) self->doc);
2591 }
2592
2593 /* Add a new internal function. NAME is the name of the function; DOC
2594 is a documentation string describing the function. HANDLER is
2595 called when the function is invoked. COOKIE is an arbitrary
2596 pointer which is passed to HANDLER and is intended for "user
2597 data". */
2598 void
2599 add_internal_function (const char *name, const char *doc,
2600 internal_function_fn handler, void *cookie)
2601 {
2602 struct cmd_list_element *cmd;
2603 struct internal_function *ifn;
2604 struct internalvar *var = lookup_internalvar (name);
2605
2606 ifn = create_internal_function (name, handler, cookie);
2607 set_internalvar_function (var, ifn);
2608
2609 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2610 &functionlist);
2611 cmd->destroyer = function_destroyer;
2612 }
2613
2614 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2615 prevent cycles / duplicates. */
2616
2617 void
2618 preserve_one_value (struct value *value, struct objfile *objfile,
2619 htab_t copied_types)
2620 {
2621 if (TYPE_OBJFILE (value->type) == objfile)
2622 value->type = copy_type_recursive (objfile, value->type, copied_types);
2623
2624 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2625 value->enclosing_type = copy_type_recursive (objfile,
2626 value->enclosing_type,
2627 copied_types);
2628 }
2629
2630 /* Likewise for internal variable VAR. */
2631
2632 static void
2633 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2634 htab_t copied_types)
2635 {
2636 switch (var->kind)
2637 {
2638 case INTERNALVAR_INTEGER:
2639 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2640 var->u.integer.type
2641 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2642 break;
2643
2644 case INTERNALVAR_VALUE:
2645 preserve_one_value (var->u.value, objfile, copied_types);
2646 break;
2647 }
2648 }
2649
2650 /* Update the internal variables and value history when OBJFILE is
2651 discarded; we must copy the types out of the objfile. New global types
2652 will be created for every convenience variable which currently points to
2653 this objfile's types, and the convenience variables will be adjusted to
2654 use the new global types. */
2655
2656 void
2657 preserve_values (struct objfile *objfile)
2658 {
2659 htab_t copied_types;
2660 struct value_history_chunk *cur;
2661 struct internalvar *var;
2662 int i;
2663
2664 /* Create the hash table. We allocate on the objfile's obstack, since
2665 it is soon to be deleted. */
2666 copied_types = create_copied_types_hash (objfile);
2667
2668 for (cur = value_history_chain; cur; cur = cur->next)
2669 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2670 if (cur->values[i])
2671 preserve_one_value (cur->values[i], objfile, copied_types);
2672
2673 for (var = internalvars; var; var = var->next)
2674 preserve_one_internalvar (var, objfile, copied_types);
2675
2676 preserve_ext_lang_values (objfile, copied_types);
2677
2678 htab_delete (copied_types);
2679 }
2680
2681 static void
2682 show_convenience (char *ignore, int from_tty)
2683 {
2684 struct gdbarch *gdbarch = get_current_arch ();
2685 struct internalvar *var;
2686 int varseen = 0;
2687 struct value_print_options opts;
2688
2689 get_user_print_options (&opts);
2690 for (var = internalvars; var; var = var->next)
2691 {
2692
2693 if (!varseen)
2694 {
2695 varseen = 1;
2696 }
2697 printf_filtered (("$%s = "), var->name);
2698
2699 TRY
2700 {
2701 struct value *val;
2702
2703 val = value_of_internalvar (gdbarch, var);
2704 value_print (val, gdb_stdout, &opts);
2705 }
2706 CATCH (ex, RETURN_MASK_ERROR)
2707 {
2708 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2709 }
2710 END_CATCH
2711
2712 printf_filtered (("\n"));
2713 }
2714 if (!varseen)
2715 {
2716 /* This text does not mention convenience functions on purpose.
2717 The user can't create them except via Python, and if Python support
2718 is installed this message will never be printed ($_streq will
2719 exist). */
2720 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2721 "Convenience variables have "
2722 "names starting with \"$\";\n"
2723 "use \"set\" as in \"set "
2724 "$foo = 5\" to define them.\n"));
2725 }
2726 }
2727 \f
2728 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2729
2730 struct value *
2731 value_of_xmethod (struct xmethod_worker *worker)
2732 {
2733 if (worker->value == NULL)
2734 {
2735 struct value *v;
2736
2737 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2738 v->lval = lval_xcallable;
2739 v->location.xm_worker = worker;
2740 v->modifiable = 0;
2741 worker->value = v;
2742 }
2743
2744 return worker->value;
2745 }
2746
2747 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2748
2749 struct type *
2750 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2751 {
2752 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2753 && method->lval == lval_xcallable && argc > 0);
2754
2755 return get_xmethod_result_type (method->location.xm_worker,
2756 argv[0], argv + 1, argc - 1);
2757 }
2758
2759 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2760
2761 struct value *
2762 call_xmethod (struct value *method, int argc, struct value **argv)
2763 {
2764 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2765 && method->lval == lval_xcallable && argc > 0);
2766
2767 return invoke_xmethod (method->location.xm_worker,
2768 argv[0], argv + 1, argc - 1);
2769 }
2770 \f
2771 /* Extract a value as a C number (either long or double).
2772 Knows how to convert fixed values to double, or
2773 floating values to long.
2774 Does not deallocate the value. */
2775
2776 LONGEST
2777 value_as_long (struct value *val)
2778 {
2779 /* This coerces arrays and functions, which is necessary (e.g.
2780 in disassemble_command). It also dereferences references, which
2781 I suspect is the most logical thing to do. */
2782 val = coerce_array (val);
2783 return unpack_long (value_type (val), value_contents (val));
2784 }
2785
2786 DOUBLEST
2787 value_as_double (struct value *val)
2788 {
2789 DOUBLEST foo;
2790 int inv;
2791
2792 foo = unpack_double (value_type (val), value_contents (val), &inv);
2793 if (inv)
2794 error (_("Invalid floating value found in program."));
2795 return foo;
2796 }
2797
2798 /* Extract a value as a C pointer. Does not deallocate the value.
2799 Note that val's type may not actually be a pointer; value_as_long
2800 handles all the cases. */
2801 CORE_ADDR
2802 value_as_address (struct value *val)
2803 {
2804 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2805
2806 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2807 whether we want this to be true eventually. */
2808 #if 0
2809 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2810 non-address (e.g. argument to "signal", "info break", etc.), or
2811 for pointers to char, in which the low bits *are* significant. */
2812 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2813 #else
2814
2815 /* There are several targets (IA-64, PowerPC, and others) which
2816 don't represent pointers to functions as simply the address of
2817 the function's entry point. For example, on the IA-64, a
2818 function pointer points to a two-word descriptor, generated by
2819 the linker, which contains the function's entry point, and the
2820 value the IA-64 "global pointer" register should have --- to
2821 support position-independent code. The linker generates
2822 descriptors only for those functions whose addresses are taken.
2823
2824 On such targets, it's difficult for GDB to convert an arbitrary
2825 function address into a function pointer; it has to either find
2826 an existing descriptor for that function, or call malloc and
2827 build its own. On some targets, it is impossible for GDB to
2828 build a descriptor at all: the descriptor must contain a jump
2829 instruction; data memory cannot be executed; and code memory
2830 cannot be modified.
2831
2832 Upon entry to this function, if VAL is a value of type `function'
2833 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2834 value_address (val) is the address of the function. This is what
2835 you'll get if you evaluate an expression like `main'. The call
2836 to COERCE_ARRAY below actually does all the usual unary
2837 conversions, which includes converting values of type `function'
2838 to `pointer to function'. This is the challenging conversion
2839 discussed above. Then, `unpack_long' will convert that pointer
2840 back into an address.
2841
2842 So, suppose the user types `disassemble foo' on an architecture
2843 with a strange function pointer representation, on which GDB
2844 cannot build its own descriptors, and suppose further that `foo'
2845 has no linker-built descriptor. The address->pointer conversion
2846 will signal an error and prevent the command from running, even
2847 though the next step would have been to convert the pointer
2848 directly back into the same address.
2849
2850 The following shortcut avoids this whole mess. If VAL is a
2851 function, just return its address directly. */
2852 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2853 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2854 return value_address (val);
2855
2856 val = coerce_array (val);
2857
2858 /* Some architectures (e.g. Harvard), map instruction and data
2859 addresses onto a single large unified address space. For
2860 instance: An architecture may consider a large integer in the
2861 range 0x10000000 .. 0x1000ffff to already represent a data
2862 addresses (hence not need a pointer to address conversion) while
2863 a small integer would still need to be converted integer to
2864 pointer to address. Just assume such architectures handle all
2865 integer conversions in a single function. */
2866
2867 /* JimB writes:
2868
2869 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2870 must admonish GDB hackers to make sure its behavior matches the
2871 compiler's, whenever possible.
2872
2873 In general, I think GDB should evaluate expressions the same way
2874 the compiler does. When the user copies an expression out of
2875 their source code and hands it to a `print' command, they should
2876 get the same value the compiler would have computed. Any
2877 deviation from this rule can cause major confusion and annoyance,
2878 and needs to be justified carefully. In other words, GDB doesn't
2879 really have the freedom to do these conversions in clever and
2880 useful ways.
2881
2882 AndrewC pointed out that users aren't complaining about how GDB
2883 casts integers to pointers; they are complaining that they can't
2884 take an address from a disassembly listing and give it to `x/i'.
2885 This is certainly important.
2886
2887 Adding an architecture method like integer_to_address() certainly
2888 makes it possible for GDB to "get it right" in all circumstances
2889 --- the target has complete control over how things get done, so
2890 people can Do The Right Thing for their target without breaking
2891 anyone else. The standard doesn't specify how integers get
2892 converted to pointers; usually, the ABI doesn't either, but
2893 ABI-specific code is a more reasonable place to handle it. */
2894
2895 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2896 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2897 && gdbarch_integer_to_address_p (gdbarch))
2898 return gdbarch_integer_to_address (gdbarch, value_type (val),
2899 value_contents (val));
2900
2901 return unpack_long (value_type (val), value_contents (val));
2902 #endif
2903 }
2904 \f
2905 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2906 as a long, or as a double, assuming the raw data is described
2907 by type TYPE. Knows how to convert different sizes of values
2908 and can convert between fixed and floating point. We don't assume
2909 any alignment for the raw data. Return value is in host byte order.
2910
2911 If you want functions and arrays to be coerced to pointers, and
2912 references to be dereferenced, call value_as_long() instead.
2913
2914 C++: It is assumed that the front-end has taken care of
2915 all matters concerning pointers to members. A pointer
2916 to member which reaches here is considered to be equivalent
2917 to an INT (or some size). After all, it is only an offset. */
2918
2919 LONGEST
2920 unpack_long (struct type *type, const gdb_byte *valaddr)
2921 {
2922 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2923 enum type_code code = TYPE_CODE (type);
2924 int len = TYPE_LENGTH (type);
2925 int nosign = TYPE_UNSIGNED (type);
2926
2927 switch (code)
2928 {
2929 case TYPE_CODE_TYPEDEF:
2930 return unpack_long (check_typedef (type), valaddr);
2931 case TYPE_CODE_ENUM:
2932 case TYPE_CODE_FLAGS:
2933 case TYPE_CODE_BOOL:
2934 case TYPE_CODE_INT:
2935 case TYPE_CODE_CHAR:
2936 case TYPE_CODE_RANGE:
2937 case TYPE_CODE_MEMBERPTR:
2938 if (nosign)
2939 return extract_unsigned_integer (valaddr, len, byte_order);
2940 else
2941 return extract_signed_integer (valaddr, len, byte_order);
2942
2943 case TYPE_CODE_FLT:
2944 return (LONGEST) extract_typed_floating (valaddr, type);
2945
2946 case TYPE_CODE_DECFLOAT:
2947 /* libdecnumber has a function to convert from decimal to integer, but
2948 it doesn't work when the decimal number has a fractional part. */
2949 return (LONGEST) decimal_to_doublest (valaddr, len, byte_order);
2950
2951 case TYPE_CODE_PTR:
2952 case TYPE_CODE_REF:
2953 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2954 whether we want this to be true eventually. */
2955 return extract_typed_address (valaddr, type);
2956
2957 default:
2958 error (_("Value can't be converted to integer."));
2959 }
2960 return 0; /* Placate lint. */
2961 }
2962
2963 /* Return a double value from the specified type and address.
2964 INVP points to an int which is set to 0 for valid value,
2965 1 for invalid value (bad float format). In either case,
2966 the returned double is OK to use. Argument is in target
2967 format, result is in host format. */
2968
2969 DOUBLEST
2970 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2971 {
2972 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2973 enum type_code code;
2974 int len;
2975 int nosign;
2976
2977 *invp = 0; /* Assume valid. */
2978 type = check_typedef (type);
2979 code = TYPE_CODE (type);
2980 len = TYPE_LENGTH (type);
2981 nosign = TYPE_UNSIGNED (type);
2982 if (code == TYPE_CODE_FLT)
2983 {
2984 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2985 floating-point value was valid (using the macro
2986 INVALID_FLOAT). That test/macro have been removed.
2987
2988 It turns out that only the VAX defined this macro and then
2989 only in a non-portable way. Fixing the portability problem
2990 wouldn't help since the VAX floating-point code is also badly
2991 bit-rotten. The target needs to add definitions for the
2992 methods gdbarch_float_format and gdbarch_double_format - these
2993 exactly describe the target floating-point format. The
2994 problem here is that the corresponding floatformat_vax_f and
2995 floatformat_vax_d values these methods should be set to are
2996 also not defined either. Oops!
2997
2998 Hopefully someone will add both the missing floatformat
2999 definitions and the new cases for floatformat_is_valid (). */
3000
3001 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
3002 {
3003 *invp = 1;
3004 return 0.0;
3005 }
3006
3007 return extract_typed_floating (valaddr, type);
3008 }
3009 else if (code == TYPE_CODE_DECFLOAT)
3010 return decimal_to_doublest (valaddr, len, byte_order);
3011 else if (nosign)
3012 {
3013 /* Unsigned -- be sure we compensate for signed LONGEST. */
3014 return (ULONGEST) unpack_long (type, valaddr);
3015 }
3016 else
3017 {
3018 /* Signed -- we are OK with unpack_long. */
3019 return unpack_long (type, valaddr);
3020 }
3021 }
3022
3023 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3024 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3025 We don't assume any alignment for the raw data. Return value is in
3026 host byte order.
3027
3028 If you want functions and arrays to be coerced to pointers, and
3029 references to be dereferenced, call value_as_address() instead.
3030
3031 C++: It is assumed that the front-end has taken care of
3032 all matters concerning pointers to members. A pointer
3033 to member which reaches here is considered to be equivalent
3034 to an INT (or some size). After all, it is only an offset. */
3035
3036 CORE_ADDR
3037 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3038 {
3039 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3040 whether we want this to be true eventually. */
3041 return unpack_long (type, valaddr);
3042 }
3043
3044 \f
3045 /* Get the value of the FIELDNO'th field (which must be static) of
3046 TYPE. */
3047
3048 struct value *
3049 value_static_field (struct type *type, int fieldno)
3050 {
3051 struct value *retval;
3052
3053 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3054 {
3055 case FIELD_LOC_KIND_PHYSADDR:
3056 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3057 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3058 break;
3059 case FIELD_LOC_KIND_PHYSNAME:
3060 {
3061 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3062 /* TYPE_FIELD_NAME (type, fieldno); */
3063 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3064
3065 if (sym.symbol == NULL)
3066 {
3067 /* With some compilers, e.g. HP aCC, static data members are
3068 reported as non-debuggable symbols. */
3069 struct bound_minimal_symbol msym
3070 = lookup_minimal_symbol (phys_name, NULL, NULL);
3071
3072 if (!msym.minsym)
3073 return allocate_optimized_out_value (type);
3074 else
3075 {
3076 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3077 BMSYMBOL_VALUE_ADDRESS (msym));
3078 }
3079 }
3080 else
3081 retval = value_of_variable (sym.symbol, sym.block);
3082 break;
3083 }
3084 default:
3085 gdb_assert_not_reached ("unexpected field location kind");
3086 }
3087
3088 return retval;
3089 }
3090
3091 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3092 You have to be careful here, since the size of the data area for the value
3093 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3094 than the old enclosing type, you have to allocate more space for the
3095 data. */
3096
3097 void
3098 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3099 {
3100 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3101 {
3102 check_type_length_before_alloc (new_encl_type);
3103 val->contents
3104 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3105 }
3106
3107 val->enclosing_type = new_encl_type;
3108 }
3109
3110 /* Given a value ARG1 (offset by OFFSET bytes)
3111 of a struct or union type ARG_TYPE,
3112 extract and return the value of one of its (non-static) fields.
3113 FIELDNO says which field. */
3114
3115 struct value *
3116 value_primitive_field (struct value *arg1, LONGEST offset,
3117 int fieldno, struct type *arg_type)
3118 {
3119 struct value *v;
3120 struct type *type;
3121 struct gdbarch *arch = get_value_arch (arg1);
3122 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3123
3124 arg_type = check_typedef (arg_type);
3125 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3126
3127 /* Call check_typedef on our type to make sure that, if TYPE
3128 is a TYPE_CODE_TYPEDEF, its length is set to the length
3129 of the target type instead of zero. However, we do not
3130 replace the typedef type by the target type, because we want
3131 to keep the typedef in order to be able to print the type
3132 description correctly. */
3133 check_typedef (type);
3134
3135 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3136 {
3137 /* Handle packed fields.
3138
3139 Create a new value for the bitfield, with bitpos and bitsize
3140 set. If possible, arrange offset and bitpos so that we can
3141 do a single aligned read of the size of the containing type.
3142 Otherwise, adjust offset to the byte containing the first
3143 bit. Assume that the address, offset, and embedded offset
3144 are sufficiently aligned. */
3145
3146 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3147 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3148
3149 v = allocate_value_lazy (type);
3150 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3151 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3152 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3153 v->bitpos = bitpos % container_bitsize;
3154 else
3155 v->bitpos = bitpos % 8;
3156 v->offset = (value_embedded_offset (arg1)
3157 + offset
3158 + (bitpos - v->bitpos) / 8);
3159 set_value_parent (v, arg1);
3160 if (!value_lazy (arg1))
3161 value_fetch_lazy (v);
3162 }
3163 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3164 {
3165 /* This field is actually a base subobject, so preserve the
3166 entire object's contents for later references to virtual
3167 bases, etc. */
3168 LONGEST boffset;
3169
3170 /* Lazy register values with offsets are not supported. */
3171 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3172 value_fetch_lazy (arg1);
3173
3174 /* We special case virtual inheritance here because this
3175 requires access to the contents, which we would rather avoid
3176 for references to ordinary fields of unavailable values. */
3177 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3178 boffset = baseclass_offset (arg_type, fieldno,
3179 value_contents (arg1),
3180 value_embedded_offset (arg1),
3181 value_address (arg1),
3182 arg1);
3183 else
3184 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3185
3186 if (value_lazy (arg1))
3187 v = allocate_value_lazy (value_enclosing_type (arg1));
3188 else
3189 {
3190 v = allocate_value (value_enclosing_type (arg1));
3191 value_contents_copy_raw (v, 0, arg1, 0,
3192 TYPE_LENGTH (value_enclosing_type (arg1)));
3193 }
3194 v->type = type;
3195 v->offset = value_offset (arg1);
3196 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3197 }
3198 else if (NULL != TYPE_DATA_LOCATION (type))
3199 {
3200 /* Field is a dynamic data member. */
3201
3202 gdb_assert (0 == offset);
3203 /* We expect an already resolved data location. */
3204 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3205 /* For dynamic data types defer memory allocation
3206 until we actual access the value. */
3207 v = allocate_value_lazy (type);
3208 }
3209 else
3210 {
3211 /* Plain old data member */
3212 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3213 / (HOST_CHAR_BIT * unit_size));
3214
3215 /* Lazy register values with offsets are not supported. */
3216 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3217 value_fetch_lazy (arg1);
3218
3219 if (value_lazy (arg1))
3220 v = allocate_value_lazy (type);
3221 else
3222 {
3223 v = allocate_value (type);
3224 value_contents_copy_raw (v, value_embedded_offset (v),
3225 arg1, value_embedded_offset (arg1) + offset,
3226 type_length_units (type));
3227 }
3228 v->offset = (value_offset (arg1) + offset
3229 + value_embedded_offset (arg1));
3230 }
3231 set_value_component_location (v, arg1);
3232 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3233 VALUE_NEXT_FRAME_ID (v) = VALUE_NEXT_FRAME_ID (arg1);
3234 return v;
3235 }
3236
3237 /* Given a value ARG1 of a struct or union type,
3238 extract and return the value of one of its (non-static) fields.
3239 FIELDNO says which field. */
3240
3241 struct value *
3242 value_field (struct value *arg1, int fieldno)
3243 {
3244 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3245 }
3246
3247 /* Return a non-virtual function as a value.
3248 F is the list of member functions which contains the desired method.
3249 J is an index into F which provides the desired method.
3250
3251 We only use the symbol for its address, so be happy with either a
3252 full symbol or a minimal symbol. */
3253
3254 struct value *
3255 value_fn_field (struct value **arg1p, struct fn_field *f,
3256 int j, struct type *type,
3257 LONGEST offset)
3258 {
3259 struct value *v;
3260 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3261 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3262 struct symbol *sym;
3263 struct bound_minimal_symbol msym;
3264
3265 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3266 if (sym != NULL)
3267 {
3268 memset (&msym, 0, sizeof (msym));
3269 }
3270 else
3271 {
3272 gdb_assert (sym == NULL);
3273 msym = lookup_bound_minimal_symbol (physname);
3274 if (msym.minsym == NULL)
3275 return NULL;
3276 }
3277
3278 v = allocate_value (ftype);
3279 if (sym)
3280 {
3281 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3282 }
3283 else
3284 {
3285 /* The minimal symbol might point to a function descriptor;
3286 resolve it to the actual code address instead. */
3287 struct objfile *objfile = msym.objfile;
3288 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3289
3290 set_value_address (v,
3291 gdbarch_convert_from_func_ptr_addr
3292 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3293 }
3294
3295 if (arg1p)
3296 {
3297 if (type != value_type (*arg1p))
3298 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3299 value_addr (*arg1p)));
3300
3301 /* Move the `this' pointer according to the offset.
3302 VALUE_OFFSET (*arg1p) += offset; */
3303 }
3304
3305 return v;
3306 }
3307
3308 \f
3309
3310 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3311 VALADDR, and store the result in *RESULT.
3312 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3313
3314 Extracting bits depends on endianness of the machine. Compute the
3315 number of least significant bits to discard. For big endian machines,
3316 we compute the total number of bits in the anonymous object, subtract
3317 off the bit count from the MSB of the object to the MSB of the
3318 bitfield, then the size of the bitfield, which leaves the LSB discard
3319 count. For little endian machines, the discard count is simply the
3320 number of bits from the LSB of the anonymous object to the LSB of the
3321 bitfield.
3322
3323 If the field is signed, we also do sign extension. */
3324
3325 static LONGEST
3326 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3327 LONGEST bitpos, LONGEST bitsize)
3328 {
3329 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3330 ULONGEST val;
3331 ULONGEST valmask;
3332 int lsbcount;
3333 LONGEST bytes_read;
3334 LONGEST read_offset;
3335
3336 /* Read the minimum number of bytes required; there may not be
3337 enough bytes to read an entire ULONGEST. */
3338 field_type = check_typedef (field_type);
3339 if (bitsize)
3340 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3341 else
3342 bytes_read = TYPE_LENGTH (field_type);
3343
3344 read_offset = bitpos / 8;
3345
3346 val = extract_unsigned_integer (valaddr + read_offset,
3347 bytes_read, byte_order);
3348
3349 /* Extract bits. See comment above. */
3350
3351 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3352 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3353 else
3354 lsbcount = (bitpos % 8);
3355 val >>= lsbcount;
3356
3357 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3358 If the field is signed, and is negative, then sign extend. */
3359
3360 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3361 {
3362 valmask = (((ULONGEST) 1) << bitsize) - 1;
3363 val &= valmask;
3364 if (!TYPE_UNSIGNED (field_type))
3365 {
3366 if (val & (valmask ^ (valmask >> 1)))
3367 {
3368 val |= ~valmask;
3369 }
3370 }
3371 }
3372
3373 return val;
3374 }
3375
3376 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3377 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3378 ORIGINAL_VALUE, which must not be NULL. See
3379 unpack_value_bits_as_long for more details. */
3380
3381 int
3382 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3383 LONGEST embedded_offset, int fieldno,
3384 const struct value *val, LONGEST *result)
3385 {
3386 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3387 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3388 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3389 int bit_offset;
3390
3391 gdb_assert (val != NULL);
3392
3393 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3394 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3395 || !value_bits_available (val, bit_offset, bitsize))
3396 return 0;
3397
3398 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3399 bitpos, bitsize);
3400 return 1;
3401 }
3402
3403 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3404 object at VALADDR. See unpack_bits_as_long for more details. */
3405
3406 LONGEST
3407 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3408 {
3409 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3410 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3411 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3412
3413 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3414 }
3415
3416 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3417 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3418 the contents in DEST_VAL, zero or sign extending if the type of
3419 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3420 VAL. If the VAL's contents required to extract the bitfield from
3421 are unavailable/optimized out, DEST_VAL is correspondingly
3422 marked unavailable/optimized out. */
3423
3424 void
3425 unpack_value_bitfield (struct value *dest_val,
3426 LONGEST bitpos, LONGEST bitsize,
3427 const gdb_byte *valaddr, LONGEST embedded_offset,
3428 const struct value *val)
3429 {
3430 enum bfd_endian byte_order;
3431 int src_bit_offset;
3432 int dst_bit_offset;
3433 struct type *field_type = value_type (dest_val);
3434
3435 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3436
3437 /* First, unpack and sign extend the bitfield as if it was wholly
3438 valid. Optimized out/unavailable bits are read as zero, but
3439 that's OK, as they'll end up marked below. If the VAL is
3440 wholly-invalid we may have skipped allocating its contents,
3441 though. See allocate_optimized_out_value. */
3442 if (valaddr != NULL)
3443 {
3444 LONGEST num;
3445
3446 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3447 bitpos, bitsize);
3448 store_signed_integer (value_contents_raw (dest_val),
3449 TYPE_LENGTH (field_type), byte_order, num);
3450 }
3451
3452 /* Now copy the optimized out / unavailability ranges to the right
3453 bits. */
3454 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3455 if (byte_order == BFD_ENDIAN_BIG)
3456 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3457 else
3458 dst_bit_offset = 0;
3459 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3460 val, src_bit_offset, bitsize);
3461 }
3462
3463 /* Return a new value with type TYPE, which is FIELDNO field of the
3464 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3465 of VAL. If the VAL's contents required to extract the bitfield
3466 from are unavailable/optimized out, the new value is
3467 correspondingly marked unavailable/optimized out. */
3468
3469 struct value *
3470 value_field_bitfield (struct type *type, int fieldno,
3471 const gdb_byte *valaddr,
3472 LONGEST embedded_offset, const struct value *val)
3473 {
3474 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3475 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3476 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3477
3478 unpack_value_bitfield (res_val, bitpos, bitsize,
3479 valaddr, embedded_offset, val);
3480
3481 return res_val;
3482 }
3483
3484 /* Modify the value of a bitfield. ADDR points to a block of memory in
3485 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3486 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3487 indicate which bits (in target bit order) comprise the bitfield.
3488 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3489 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3490
3491 void
3492 modify_field (struct type *type, gdb_byte *addr,
3493 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3494 {
3495 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3496 ULONGEST oword;
3497 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3498 LONGEST bytesize;
3499
3500 /* Normalize BITPOS. */
3501 addr += bitpos / 8;
3502 bitpos %= 8;
3503
3504 /* If a negative fieldval fits in the field in question, chop
3505 off the sign extension bits. */
3506 if ((~fieldval & ~(mask >> 1)) == 0)
3507 fieldval &= mask;
3508
3509 /* Warn if value is too big to fit in the field in question. */
3510 if (0 != (fieldval & ~mask))
3511 {
3512 /* FIXME: would like to include fieldval in the message, but
3513 we don't have a sprintf_longest. */
3514 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3515
3516 /* Truncate it, otherwise adjoining fields may be corrupted. */
3517 fieldval &= mask;
3518 }
3519
3520 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3521 false valgrind reports. */
3522
3523 bytesize = (bitpos + bitsize + 7) / 8;
3524 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3525
3526 /* Shifting for bit field depends on endianness of the target machine. */
3527 if (gdbarch_bits_big_endian (get_type_arch (type)))
3528 bitpos = bytesize * 8 - bitpos - bitsize;
3529
3530 oword &= ~(mask << bitpos);
3531 oword |= fieldval << bitpos;
3532
3533 store_unsigned_integer (addr, bytesize, byte_order, oword);
3534 }
3535 \f
3536 /* Pack NUM into BUF using a target format of TYPE. */
3537
3538 void
3539 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3540 {
3541 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3542 LONGEST len;
3543
3544 type = check_typedef (type);
3545 len = TYPE_LENGTH (type);
3546
3547 switch (TYPE_CODE (type))
3548 {
3549 case TYPE_CODE_INT:
3550 case TYPE_CODE_CHAR:
3551 case TYPE_CODE_ENUM:
3552 case TYPE_CODE_FLAGS:
3553 case TYPE_CODE_BOOL:
3554 case TYPE_CODE_RANGE:
3555 case TYPE_CODE_MEMBERPTR:
3556 store_signed_integer (buf, len, byte_order, num);
3557 break;
3558
3559 case TYPE_CODE_REF:
3560 case TYPE_CODE_PTR:
3561 store_typed_address (buf, type, (CORE_ADDR) num);
3562 break;
3563
3564 default:
3565 error (_("Unexpected type (%d) encountered for integer constant."),
3566 TYPE_CODE (type));
3567 }
3568 }
3569
3570
3571 /* Pack NUM into BUF using a target format of TYPE. */
3572
3573 static void
3574 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3575 {
3576 LONGEST len;
3577 enum bfd_endian byte_order;
3578
3579 type = check_typedef (type);
3580 len = TYPE_LENGTH (type);
3581 byte_order = gdbarch_byte_order (get_type_arch (type));
3582
3583 switch (TYPE_CODE (type))
3584 {
3585 case TYPE_CODE_INT:
3586 case TYPE_CODE_CHAR:
3587 case TYPE_CODE_ENUM:
3588 case TYPE_CODE_FLAGS:
3589 case TYPE_CODE_BOOL:
3590 case TYPE_CODE_RANGE:
3591 case TYPE_CODE_MEMBERPTR:
3592 store_unsigned_integer (buf, len, byte_order, num);
3593 break;
3594
3595 case TYPE_CODE_REF:
3596 case TYPE_CODE_PTR:
3597 store_typed_address (buf, type, (CORE_ADDR) num);
3598 break;
3599
3600 default:
3601 error (_("Unexpected type (%d) encountered "
3602 "for unsigned integer constant."),
3603 TYPE_CODE (type));
3604 }
3605 }
3606
3607
3608 /* Convert C numbers into newly allocated values. */
3609
3610 struct value *
3611 value_from_longest (struct type *type, LONGEST num)
3612 {
3613 struct value *val = allocate_value (type);
3614
3615 pack_long (value_contents_raw (val), type, num);
3616 return val;
3617 }
3618
3619
3620 /* Convert C unsigned numbers into newly allocated values. */
3621
3622 struct value *
3623 value_from_ulongest (struct type *type, ULONGEST num)
3624 {
3625 struct value *val = allocate_value (type);
3626
3627 pack_unsigned_long (value_contents_raw (val), type, num);
3628
3629 return val;
3630 }
3631
3632
3633 /* Create a value representing a pointer of type TYPE to the address
3634 ADDR. */
3635
3636 struct value *
3637 value_from_pointer (struct type *type, CORE_ADDR addr)
3638 {
3639 struct value *val = allocate_value (type);
3640
3641 store_typed_address (value_contents_raw (val),
3642 check_typedef (type), addr);
3643 return val;
3644 }
3645
3646
3647 /* Create a value of type TYPE whose contents come from VALADDR, if it
3648 is non-null, and whose memory address (in the inferior) is
3649 ADDRESS. The type of the created value may differ from the passed
3650 type TYPE. Make sure to retrieve values new type after this call.
3651 Note that TYPE is not passed through resolve_dynamic_type; this is
3652 a special API intended for use only by Ada. */
3653
3654 struct value *
3655 value_from_contents_and_address_unresolved (struct type *type,
3656 const gdb_byte *valaddr,
3657 CORE_ADDR address)
3658 {
3659 struct value *v;
3660
3661 if (valaddr == NULL)
3662 v = allocate_value_lazy (type);
3663 else
3664 v = value_from_contents (type, valaddr);
3665 set_value_address (v, address);
3666 VALUE_LVAL (v) = lval_memory;
3667 return v;
3668 }
3669
3670 /* Create a value of type TYPE whose contents come from VALADDR, if it
3671 is non-null, and whose memory address (in the inferior) is
3672 ADDRESS. The type of the created value may differ from the passed
3673 type TYPE. Make sure to retrieve values new type after this call. */
3674
3675 struct value *
3676 value_from_contents_and_address (struct type *type,
3677 const gdb_byte *valaddr,
3678 CORE_ADDR address)
3679 {
3680 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3681 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3682 struct value *v;
3683
3684 if (valaddr == NULL)
3685 v = allocate_value_lazy (resolved_type);
3686 else
3687 v = value_from_contents (resolved_type, valaddr);
3688 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3689 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3690 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3691 set_value_address (v, address);
3692 VALUE_LVAL (v) = lval_memory;
3693 return v;
3694 }
3695
3696 /* Create a value of type TYPE holding the contents CONTENTS.
3697 The new value is `not_lval'. */
3698
3699 struct value *
3700 value_from_contents (struct type *type, const gdb_byte *contents)
3701 {
3702 struct value *result;
3703
3704 result = allocate_value (type);
3705 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3706 return result;
3707 }
3708
3709 struct value *
3710 value_from_double (struct type *type, DOUBLEST num)
3711 {
3712 struct value *val = allocate_value (type);
3713 struct type *base_type = check_typedef (type);
3714 enum type_code code = TYPE_CODE (base_type);
3715
3716 if (code == TYPE_CODE_FLT)
3717 {
3718 store_typed_floating (value_contents_raw (val), base_type, num);
3719 }
3720 else
3721 error (_("Unexpected type encountered for floating constant."));
3722
3723 return val;
3724 }
3725
3726 struct value *
3727 value_from_decfloat (struct type *type, const gdb_byte *dec)
3728 {
3729 struct value *val = allocate_value (type);
3730
3731 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3732 return val;
3733 }
3734
3735 /* Extract a value from the history file. Input will be of the form
3736 $digits or $$digits. See block comment above 'write_dollar_variable'
3737 for details. */
3738
3739 struct value *
3740 value_from_history_ref (const char *h, const char **endp)
3741 {
3742 int index, len;
3743
3744 if (h[0] == '$')
3745 len = 1;
3746 else
3747 return NULL;
3748
3749 if (h[1] == '$')
3750 len = 2;
3751
3752 /* Find length of numeral string. */
3753 for (; isdigit (h[len]); len++)
3754 ;
3755
3756 /* Make sure numeral string is not part of an identifier. */
3757 if (h[len] == '_' || isalpha (h[len]))
3758 return NULL;
3759
3760 /* Now collect the index value. */
3761 if (h[1] == '$')
3762 {
3763 if (len == 2)
3764 {
3765 /* For some bizarre reason, "$$" is equivalent to "$$1",
3766 rather than to "$$0" as it ought to be! */
3767 index = -1;
3768 *endp += len;
3769 }
3770 else
3771 {
3772 char *local_end;
3773
3774 index = -strtol (&h[2], &local_end, 10);
3775 *endp = local_end;
3776 }
3777 }
3778 else
3779 {
3780 if (len == 1)
3781 {
3782 /* "$" is equivalent to "$0". */
3783 index = 0;
3784 *endp += len;
3785 }
3786 else
3787 {
3788 char *local_end;
3789
3790 index = strtol (&h[1], &local_end, 10);
3791 *endp = local_end;
3792 }
3793 }
3794
3795 return access_value_history (index);
3796 }
3797
3798 /* Get the component value (offset by OFFSET bytes) of a struct or
3799 union WHOLE. Component's type is TYPE. */
3800
3801 struct value *
3802 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3803 {
3804 struct value *v;
3805
3806 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3807 v = allocate_value_lazy (type);
3808 else
3809 {
3810 v = allocate_value (type);
3811 value_contents_copy (v, value_embedded_offset (v),
3812 whole, value_embedded_offset (whole) + offset,
3813 type_length_units (type));
3814 }
3815 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3816 set_value_component_location (v, whole);
3817 VALUE_REGNUM (v) = VALUE_REGNUM (whole);
3818 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (whole);
3819
3820 return v;
3821 }
3822
3823 struct value *
3824 coerce_ref_if_computed (const struct value *arg)
3825 {
3826 const struct lval_funcs *funcs;
3827
3828 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3829 return NULL;
3830
3831 if (value_lval_const (arg) != lval_computed)
3832 return NULL;
3833
3834 funcs = value_computed_funcs (arg);
3835 if (funcs->coerce_ref == NULL)
3836 return NULL;
3837
3838 return funcs->coerce_ref (arg);
3839 }
3840
3841 /* Look at value.h for description. */
3842
3843 struct value *
3844 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3845 const struct type *original_type,
3846 const struct value *original_value)
3847 {
3848 /* Re-adjust type. */
3849 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3850
3851 /* Add embedding info. */
3852 set_value_enclosing_type (value, enc_type);
3853 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3854
3855 /* We may be pointing to an object of some derived type. */
3856 return value_full_object (value, NULL, 0, 0, 0);
3857 }
3858
3859 struct value *
3860 coerce_ref (struct value *arg)
3861 {
3862 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3863 struct value *retval;
3864 struct type *enc_type;
3865
3866 retval = coerce_ref_if_computed (arg);
3867 if (retval)
3868 return retval;
3869
3870 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3871 return arg;
3872
3873 enc_type = check_typedef (value_enclosing_type (arg));
3874 enc_type = TYPE_TARGET_TYPE (enc_type);
3875
3876 retval = value_at_lazy (enc_type,
3877 unpack_pointer (value_type (arg),
3878 value_contents (arg)));
3879 enc_type = value_type (retval);
3880 return readjust_indirect_value_type (retval, enc_type,
3881 value_type_arg_tmp, arg);
3882 }
3883
3884 struct value *
3885 coerce_array (struct value *arg)
3886 {
3887 struct type *type;
3888
3889 arg = coerce_ref (arg);
3890 type = check_typedef (value_type (arg));
3891
3892 switch (TYPE_CODE (type))
3893 {
3894 case TYPE_CODE_ARRAY:
3895 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3896 arg = value_coerce_array (arg);
3897 break;
3898 case TYPE_CODE_FUNC:
3899 arg = value_coerce_function (arg);
3900 break;
3901 }
3902 return arg;
3903 }
3904 \f
3905
3906 /* Return the return value convention that will be used for the
3907 specified type. */
3908
3909 enum return_value_convention
3910 struct_return_convention (struct gdbarch *gdbarch,
3911 struct value *function, struct type *value_type)
3912 {
3913 enum type_code code = TYPE_CODE (value_type);
3914
3915 if (code == TYPE_CODE_ERROR)
3916 error (_("Function return type unknown."));
3917
3918 /* Probe the architecture for the return-value convention. */
3919 return gdbarch_return_value (gdbarch, function, value_type,
3920 NULL, NULL, NULL);
3921 }
3922
3923 /* Return true if the function returning the specified type is using
3924 the convention of returning structures in memory (passing in the
3925 address as a hidden first parameter). */
3926
3927 int
3928 using_struct_return (struct gdbarch *gdbarch,
3929 struct value *function, struct type *value_type)
3930 {
3931 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3932 /* A void return value is never in memory. See also corresponding
3933 code in "print_return_value". */
3934 return 0;
3935
3936 return (struct_return_convention (gdbarch, function, value_type)
3937 != RETURN_VALUE_REGISTER_CONVENTION);
3938 }
3939
3940 /* Set the initialized field in a value struct. */
3941
3942 void
3943 set_value_initialized (struct value *val, int status)
3944 {
3945 val->initialized = status;
3946 }
3947
3948 /* Return the initialized field in a value struct. */
3949
3950 int
3951 value_initialized (const struct value *val)
3952 {
3953 return val->initialized;
3954 }
3955
3956 /* Load the actual content of a lazy value. Fetch the data from the
3957 user's process and clear the lazy flag to indicate that the data in
3958 the buffer is valid.
3959
3960 If the value is zero-length, we avoid calling read_memory, which
3961 would abort. We mark the value as fetched anyway -- all 0 bytes of
3962 it. */
3963
3964 void
3965 value_fetch_lazy (struct value *val)
3966 {
3967 gdb_assert (value_lazy (val));
3968 allocate_value_contents (val);
3969 /* A value is either lazy, or fully fetched. The
3970 availability/validity is only established as we try to fetch a
3971 value. */
3972 gdb_assert (VEC_empty (range_s, val->optimized_out));
3973 gdb_assert (VEC_empty (range_s, val->unavailable));
3974 if (value_bitsize (val))
3975 {
3976 /* To read a lazy bitfield, read the entire enclosing value. This
3977 prevents reading the same block of (possibly volatile) memory once
3978 per bitfield. It would be even better to read only the containing
3979 word, but we have no way to record that just specific bits of a
3980 value have been fetched. */
3981 struct type *type = check_typedef (value_type (val));
3982 struct value *parent = value_parent (val);
3983
3984 if (value_lazy (parent))
3985 value_fetch_lazy (parent);
3986
3987 unpack_value_bitfield (val,
3988 value_bitpos (val), value_bitsize (val),
3989 value_contents_for_printing (parent),
3990 value_offset (val), parent);
3991 }
3992 else if (VALUE_LVAL (val) == lval_memory)
3993 {
3994 CORE_ADDR addr = value_address (val);
3995 struct type *type = check_typedef (value_enclosing_type (val));
3996
3997 if (TYPE_LENGTH (type))
3998 read_value_memory (val, 0, value_stack (val),
3999 addr, value_contents_all_raw (val),
4000 type_length_units (type));
4001 }
4002 else if (VALUE_LVAL (val) == lval_register)
4003 {
4004 struct frame_info *next_frame;
4005 int regnum;
4006 struct type *type = check_typedef (value_type (val));
4007 struct value *new_val = val, *mark = value_mark ();
4008
4009 /* Offsets are not supported here; lazy register values must
4010 refer to the entire register. */
4011 gdb_assert (value_offset (val) == 0);
4012
4013 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
4014 {
4015 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
4016
4017 next_frame = frame_find_by_id (next_frame_id);
4018 regnum = VALUE_REGNUM (new_val);
4019
4020 gdb_assert (next_frame != NULL);
4021
4022 /* Convertible register routines are used for multi-register
4023 values and for interpretation in different types
4024 (e.g. float or int from a double register). Lazy
4025 register values should have the register's natural type,
4026 so they do not apply. */
4027 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4028 regnum, type));
4029
4030 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4031 Since a "->next" operation was performed when setting
4032 this field, we do not need to perform a "next" operation
4033 again when unwinding the register. That's why
4034 frame_unwind_register_value() is called here instead of
4035 get_frame_register_value(). */
4036 new_val = frame_unwind_register_value (next_frame, regnum);
4037
4038 /* If we get another lazy lval_register value, it means the
4039 register is found by reading it from NEXT_FRAME's next frame.
4040 frame_unwind_register_value should never return a value with
4041 the frame id pointing to NEXT_FRAME. If it does, it means we
4042 either have two consecutive frames with the same frame id
4043 in the frame chain, or some code is trying to unwind
4044 behind get_prev_frame's back (e.g., a frame unwind
4045 sniffer trying to unwind), bypassing its validations. In
4046 any case, it should always be an internal error to end up
4047 in this situation. */
4048 if (VALUE_LVAL (new_val) == lval_register
4049 && value_lazy (new_val)
4050 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
4051 internal_error (__FILE__, __LINE__,
4052 _("infinite loop while fetching a register"));
4053 }
4054
4055 /* If it's still lazy (for instance, a saved register on the
4056 stack), fetch it. */
4057 if (value_lazy (new_val))
4058 value_fetch_lazy (new_val);
4059
4060 /* Copy the contents and the unavailability/optimized-out
4061 meta-data from NEW_VAL to VAL. */
4062 set_value_lazy (val, 0);
4063 value_contents_copy (val, value_embedded_offset (val),
4064 new_val, value_embedded_offset (new_val),
4065 type_length_units (type));
4066
4067 if (frame_debug)
4068 {
4069 struct gdbarch *gdbarch;
4070 struct frame_info *frame;
4071 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
4072 so that the frame level will be shown correctly. */
4073 frame = frame_find_by_id (VALUE_FRAME_ID (val));
4074 regnum = VALUE_REGNUM (val);
4075 gdbarch = get_frame_arch (frame);
4076
4077 fprintf_unfiltered (gdb_stdlog,
4078 "{ value_fetch_lazy "
4079 "(frame=%d,regnum=%d(%s),...) ",
4080 frame_relative_level (frame), regnum,
4081 user_reg_map_regnum_to_name (gdbarch, regnum));
4082
4083 fprintf_unfiltered (gdb_stdlog, "->");
4084 if (value_optimized_out (new_val))
4085 {
4086 fprintf_unfiltered (gdb_stdlog, " ");
4087 val_print_optimized_out (new_val, gdb_stdlog);
4088 }
4089 else
4090 {
4091 int i;
4092 const gdb_byte *buf = value_contents (new_val);
4093
4094 if (VALUE_LVAL (new_val) == lval_register)
4095 fprintf_unfiltered (gdb_stdlog, " register=%d",
4096 VALUE_REGNUM (new_val));
4097 else if (VALUE_LVAL (new_val) == lval_memory)
4098 fprintf_unfiltered (gdb_stdlog, " address=%s",
4099 paddress (gdbarch,
4100 value_address (new_val)));
4101 else
4102 fprintf_unfiltered (gdb_stdlog, " computed");
4103
4104 fprintf_unfiltered (gdb_stdlog, " bytes=");
4105 fprintf_unfiltered (gdb_stdlog, "[");
4106 for (i = 0; i < register_size (gdbarch, regnum); i++)
4107 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4108 fprintf_unfiltered (gdb_stdlog, "]");
4109 }
4110
4111 fprintf_unfiltered (gdb_stdlog, " }\n");
4112 }
4113
4114 /* Dispose of the intermediate values. This prevents
4115 watchpoints from trying to watch the saved frame pointer. */
4116 value_free_to_mark (mark);
4117 }
4118 else if (VALUE_LVAL (val) == lval_computed
4119 && value_computed_funcs (val)->read != NULL)
4120 value_computed_funcs (val)->read (val);
4121 else
4122 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4123
4124 set_value_lazy (val, 0);
4125 }
4126
4127 /* Implementation of the convenience function $_isvoid. */
4128
4129 static struct value *
4130 isvoid_internal_fn (struct gdbarch *gdbarch,
4131 const struct language_defn *language,
4132 void *cookie, int argc, struct value **argv)
4133 {
4134 int ret;
4135
4136 if (argc != 1)
4137 error (_("You must provide one argument for $_isvoid."));
4138
4139 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4140
4141 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4142 }
4143
4144 void
4145 _initialize_values (void)
4146 {
4147 add_cmd ("convenience", no_class, show_convenience, _("\
4148 Debugger convenience (\"$foo\") variables and functions.\n\
4149 Convenience variables are created when you assign them values;\n\
4150 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4151 \n\
4152 A few convenience variables are given values automatically:\n\
4153 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4154 \"$__\" holds the contents of the last address examined with \"x\"."
4155 #ifdef HAVE_PYTHON
4156 "\n\n\
4157 Convenience functions are defined via the Python API."
4158 #endif
4159 ), &showlist);
4160 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4161
4162 add_cmd ("values", no_set_class, show_values, _("\
4163 Elements of value history around item number IDX (or last ten)."),
4164 &showlist);
4165
4166 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4167 Initialize a convenience variable if necessary.\n\
4168 init-if-undefined VARIABLE = EXPRESSION\n\
4169 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4170 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4171 VARIABLE is already initialized."));
4172
4173 add_prefix_cmd ("function", no_class, function_command, _("\
4174 Placeholder command for showing help on convenience functions."),
4175 &functionlist, "function ", 0, &cmdlist);
4176
4177 add_internal_function ("_isvoid", _("\
4178 Check whether an expression is void.\n\
4179 Usage: $_isvoid (expression)\n\
4180 Return 1 if the expression is void, zero otherwise."),
4181 isvoid_internal_fn, NULL);
4182
4183 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4184 class_support, &max_value_size, _("\
4185 Set maximum sized value gdb will load from the inferior."), _("\
4186 Show maximum sized value gdb will load from the inferior."), _("\
4187 Use this to control the maximum size, in bytes, of a value that gdb\n\
4188 will load from the inferior. Setting this value to 'unlimited'\n\
4189 disables checking.\n\
4190 Setting this does not invalidate already allocated values, it only\n\
4191 prevents future values, larger than this size, from being allocated."),
4192 set_max_value_size,
4193 show_max_value_size,
4194 &setlist, &showlist);
4195 }
This page took 0.126223 seconds and 4 git commands to generate.