Introduce class completion_tracker & rewrite completion<->readline interaction
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43 #include <algorithm>
44 #include "completer.h"
45
46 /* Prototypes for exported functions. */
47
48 void _initialize_values (void);
49
50 /* Definition of a user function. */
51 struct internal_function
52 {
53 /* The name of the function. It is a bit odd to have this in the
54 function itself -- the user might use a differently-named
55 convenience variable to hold the function. */
56 char *name;
57
58 /* The handler. */
59 internal_function_fn handler;
60
61 /* User data for the handler. */
62 void *cookie;
63 };
64
65 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
66
67 struct range
68 {
69 /* Lowest offset in the range. */
70 LONGEST offset;
71
72 /* Length of the range. */
73 LONGEST length;
74 };
75
76 typedef struct range range_s;
77
78 DEF_VEC_O(range_s);
79
80 /* Returns true if the ranges defined by [offset1, offset1+len1) and
81 [offset2, offset2+len2) overlap. */
82
83 static int
84 ranges_overlap (LONGEST offset1, LONGEST len1,
85 LONGEST offset2, LONGEST len2)
86 {
87 ULONGEST h, l;
88
89 l = std::max (offset1, offset2);
90 h = std::min (offset1 + len1, offset2 + len2);
91 return (l < h);
92 }
93
94 /* Returns true if the first argument is strictly less than the
95 second, useful for VEC_lower_bound. We keep ranges sorted by
96 offset and coalesce overlapping and contiguous ranges, so this just
97 compares the starting offset. */
98
99 static int
100 range_lessthan (const range_s *r1, const range_s *r2)
101 {
102 return r1->offset < r2->offset;
103 }
104
105 /* Returns true if RANGES contains any range that overlaps [OFFSET,
106 OFFSET+LENGTH). */
107
108 static int
109 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
110 {
111 range_s what;
112 LONGEST i;
113
114 what.offset = offset;
115 what.length = length;
116
117 /* We keep ranges sorted by offset and coalesce overlapping and
118 contiguous ranges, so to check if a range list contains a given
119 range, we can do a binary search for the position the given range
120 would be inserted if we only considered the starting OFFSET of
121 ranges. We call that position I. Since we also have LENGTH to
122 care for (this is a range afterall), we need to check if the
123 _previous_ range overlaps the I range. E.g.,
124
125 R
126 |---|
127 |---| |---| |------| ... |--|
128 0 1 2 N
129
130 I=1
131
132 In the case above, the binary search would return `I=1', meaning,
133 this OFFSET should be inserted at position 1, and the current
134 position 1 should be pushed further (and before 2). But, `0'
135 overlaps with R.
136
137 Then we need to check if the I range overlaps the I range itself.
138 E.g.,
139
140 R
141 |---|
142 |---| |---| |-------| ... |--|
143 0 1 2 N
144
145 I=1
146 */
147
148 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
149
150 if (i > 0)
151 {
152 struct range *bef = VEC_index (range_s, ranges, i - 1);
153
154 if (ranges_overlap (bef->offset, bef->length, offset, length))
155 return 1;
156 }
157
158 if (i < VEC_length (range_s, ranges))
159 {
160 struct range *r = VEC_index (range_s, ranges, i);
161
162 if (ranges_overlap (r->offset, r->length, offset, length))
163 return 1;
164 }
165
166 return 0;
167 }
168
169 static struct cmd_list_element *functionlist;
170
171 /* Note that the fields in this structure are arranged to save a bit
172 of memory. */
173
174 struct value
175 {
176 /* Type of value; either not an lval, or one of the various
177 different possible kinds of lval. */
178 enum lval_type lval;
179
180 /* Is it modifiable? Only relevant if lval != not_lval. */
181 unsigned int modifiable : 1;
182
183 /* If zero, contents of this value are in the contents field. If
184 nonzero, contents are in inferior. If the lval field is lval_memory,
185 the contents are in inferior memory at location.address plus offset.
186 The lval field may also be lval_register.
187
188 WARNING: This field is used by the code which handles watchpoints
189 (see breakpoint.c) to decide whether a particular value can be
190 watched by hardware watchpoints. If the lazy flag is set for
191 some member of a value chain, it is assumed that this member of
192 the chain doesn't need to be watched as part of watching the
193 value itself. This is how GDB avoids watching the entire struct
194 or array when the user wants to watch a single struct member or
195 array element. If you ever change the way lazy flag is set and
196 reset, be sure to consider this use as well! */
197 unsigned int lazy : 1;
198
199 /* If value is a variable, is it initialized or not. */
200 unsigned int initialized : 1;
201
202 /* If value is from the stack. If this is set, read_stack will be
203 used instead of read_memory to enable extra caching. */
204 unsigned int stack : 1;
205
206 /* If the value has been released. */
207 unsigned int released : 1;
208
209 /* Location of value (if lval). */
210 union
211 {
212 /* If lval == lval_memory, this is the address in the inferior */
213 CORE_ADDR address;
214
215 /*If lval == lval_register, the value is from a register. */
216 struct
217 {
218 /* Register number. */
219 int regnum;
220 /* Frame ID of "next" frame to which a register value is relative.
221 If the register value is found relative to frame F, then the
222 frame id of F->next will be stored in next_frame_id. */
223 struct frame_id next_frame_id;
224 } reg;
225
226 /* Pointer to internal variable. */
227 struct internalvar *internalvar;
228
229 /* Pointer to xmethod worker. */
230 struct xmethod_worker *xm_worker;
231
232 /* If lval == lval_computed, this is a set of function pointers
233 to use to access and describe the value, and a closure pointer
234 for them to use. */
235 struct
236 {
237 /* Functions to call. */
238 const struct lval_funcs *funcs;
239
240 /* Closure for those functions to use. */
241 void *closure;
242 } computed;
243 } location;
244
245 /* Describes offset of a value within lval of a structure in target
246 addressable memory units. Note also the member embedded_offset
247 below. */
248 LONGEST offset;
249
250 /* Only used for bitfields; number of bits contained in them. */
251 LONGEST bitsize;
252
253 /* Only used for bitfields; position of start of field. For
254 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
255 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
256 LONGEST bitpos;
257
258 /* The number of references to this value. When a value is created,
259 the value chain holds a reference, so REFERENCE_COUNT is 1. If
260 release_value is called, this value is removed from the chain but
261 the caller of release_value now has a reference to this value.
262 The caller must arrange for a call to value_free later. */
263 int reference_count;
264
265 /* Only used for bitfields; the containing value. This allows a
266 single read from the target when displaying multiple
267 bitfields. */
268 struct value *parent;
269
270 /* Type of the value. */
271 struct type *type;
272
273 /* If a value represents a C++ object, then the `type' field gives
274 the object's compile-time type. If the object actually belongs
275 to some class derived from `type', perhaps with other base
276 classes and additional members, then `type' is just a subobject
277 of the real thing, and the full object is probably larger than
278 `type' would suggest.
279
280 If `type' is a dynamic class (i.e. one with a vtable), then GDB
281 can actually determine the object's run-time type by looking at
282 the run-time type information in the vtable. When this
283 information is available, we may elect to read in the entire
284 object, for several reasons:
285
286 - When printing the value, the user would probably rather see the
287 full object, not just the limited portion apparent from the
288 compile-time type.
289
290 - If `type' has virtual base classes, then even printing `type'
291 alone may require reaching outside the `type' portion of the
292 object to wherever the virtual base class has been stored.
293
294 When we store the entire object, `enclosing_type' is the run-time
295 type -- the complete object -- and `embedded_offset' is the
296 offset of `type' within that larger type, in target addressable memory
297 units. The value_contents() macro takes `embedded_offset' into account,
298 so most GDB code continues to see the `type' portion of the value, just
299 as the inferior would.
300
301 If `type' is a pointer to an object, then `enclosing_type' is a
302 pointer to the object's run-time type, and `pointed_to_offset' is
303 the offset in target addressable memory units from the full object
304 to the pointed-to object -- that is, the value `embedded_offset' would
305 have if we followed the pointer and fetched the complete object.
306 (I don't really see the point. Why not just determine the
307 run-time type when you indirect, and avoid the special case? The
308 contents don't matter until you indirect anyway.)
309
310 If we're not doing anything fancy, `enclosing_type' is equal to
311 `type', and `embedded_offset' is zero, so everything works
312 normally. */
313 struct type *enclosing_type;
314 LONGEST embedded_offset;
315 LONGEST pointed_to_offset;
316
317 /* Values are stored in a chain, so that they can be deleted easily
318 over calls to the inferior. Values assigned to internal
319 variables, put into the value history or exposed to Python are
320 taken off this list. */
321 struct value *next;
322
323 /* Actual contents of the value. Target byte-order. NULL or not
324 valid if lazy is nonzero. */
325 gdb_byte *contents;
326
327 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
328 rather than available, since the common and default case is for a
329 value to be available. This is filled in at value read time.
330 The unavailable ranges are tracked in bits. Note that a contents
331 bit that has been optimized out doesn't really exist in the
332 program, so it can't be marked unavailable either. */
333 VEC(range_s) *unavailable;
334
335 /* Likewise, but for optimized out contents (a chunk of the value of
336 a variable that does not actually exist in the program). If LVAL
337 is lval_register, this is a register ($pc, $sp, etc., never a
338 program variable) that has not been saved in the frame. Not
339 saved registers and optimized-out program variables values are
340 treated pretty much the same, except not-saved registers have a
341 different string representation and related error strings. */
342 VEC(range_s) *optimized_out;
343 };
344
345 /* See value.h. */
346
347 struct gdbarch *
348 get_value_arch (const struct value *value)
349 {
350 return get_type_arch (value_type (value));
351 }
352
353 int
354 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
355 {
356 gdb_assert (!value->lazy);
357
358 return !ranges_contain (value->unavailable, offset, length);
359 }
360
361 int
362 value_bytes_available (const struct value *value,
363 LONGEST offset, LONGEST length)
364 {
365 return value_bits_available (value,
366 offset * TARGET_CHAR_BIT,
367 length * TARGET_CHAR_BIT);
368 }
369
370 int
371 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
372 {
373 gdb_assert (!value->lazy);
374
375 return ranges_contain (value->optimized_out, bit_offset, bit_length);
376 }
377
378 int
379 value_entirely_available (struct value *value)
380 {
381 /* We can only tell whether the whole value is available when we try
382 to read it. */
383 if (value->lazy)
384 value_fetch_lazy (value);
385
386 if (VEC_empty (range_s, value->unavailable))
387 return 1;
388 return 0;
389 }
390
391 /* Returns true if VALUE is entirely covered by RANGES. If the value
392 is lazy, it'll be read now. Note that RANGE is a pointer to
393 pointer because reading the value might change *RANGE. */
394
395 static int
396 value_entirely_covered_by_range_vector (struct value *value,
397 VEC(range_s) **ranges)
398 {
399 /* We can only tell whether the whole value is optimized out /
400 unavailable when we try to read it. */
401 if (value->lazy)
402 value_fetch_lazy (value);
403
404 if (VEC_length (range_s, *ranges) == 1)
405 {
406 struct range *t = VEC_index (range_s, *ranges, 0);
407
408 if (t->offset == 0
409 && t->length == (TARGET_CHAR_BIT
410 * TYPE_LENGTH (value_enclosing_type (value))))
411 return 1;
412 }
413
414 return 0;
415 }
416
417 int
418 value_entirely_unavailable (struct value *value)
419 {
420 return value_entirely_covered_by_range_vector (value, &value->unavailable);
421 }
422
423 int
424 value_entirely_optimized_out (struct value *value)
425 {
426 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
427 }
428
429 /* Insert into the vector pointed to by VECTORP the bit range starting of
430 OFFSET bits, and extending for the next LENGTH bits. */
431
432 static void
433 insert_into_bit_range_vector (VEC(range_s) **vectorp,
434 LONGEST offset, LONGEST length)
435 {
436 range_s newr;
437 int i;
438
439 /* Insert the range sorted. If there's overlap or the new range
440 would be contiguous with an existing range, merge. */
441
442 newr.offset = offset;
443 newr.length = length;
444
445 /* Do a binary search for the position the given range would be
446 inserted if we only considered the starting OFFSET of ranges.
447 Call that position I. Since we also have LENGTH to care for
448 (this is a range afterall), we need to check if the _previous_
449 range overlaps the I range. E.g., calling R the new range:
450
451 #1 - overlaps with previous
452
453 R
454 |-...-|
455 |---| |---| |------| ... |--|
456 0 1 2 N
457
458 I=1
459
460 In the case #1 above, the binary search would return `I=1',
461 meaning, this OFFSET should be inserted at position 1, and the
462 current position 1 should be pushed further (and become 2). But,
463 note that `0' overlaps with R, so we want to merge them.
464
465 A similar consideration needs to be taken if the new range would
466 be contiguous with the previous range:
467
468 #2 - contiguous with previous
469
470 R
471 |-...-|
472 |--| |---| |------| ... |--|
473 0 1 2 N
474
475 I=1
476
477 If there's no overlap with the previous range, as in:
478
479 #3 - not overlapping and not contiguous
480
481 R
482 |-...-|
483 |--| |---| |------| ... |--|
484 0 1 2 N
485
486 I=1
487
488 or if I is 0:
489
490 #4 - R is the range with lowest offset
491
492 R
493 |-...-|
494 |--| |---| |------| ... |--|
495 0 1 2 N
496
497 I=0
498
499 ... we just push the new range to I.
500
501 All the 4 cases above need to consider that the new range may
502 also overlap several of the ranges that follow, or that R may be
503 contiguous with the following range, and merge. E.g.,
504
505 #5 - overlapping following ranges
506
507 R
508 |------------------------|
509 |--| |---| |------| ... |--|
510 0 1 2 N
511
512 I=0
513
514 or:
515
516 R
517 |-------|
518 |--| |---| |------| ... |--|
519 0 1 2 N
520
521 I=1
522
523 */
524
525 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
526 if (i > 0)
527 {
528 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
529
530 if (ranges_overlap (bef->offset, bef->length, offset, length))
531 {
532 /* #1 */
533 ULONGEST l = std::min (bef->offset, offset);
534 ULONGEST h = std::max (bef->offset + bef->length, offset + length);
535
536 bef->offset = l;
537 bef->length = h - l;
538 i--;
539 }
540 else if (offset == bef->offset + bef->length)
541 {
542 /* #2 */
543 bef->length += length;
544 i--;
545 }
546 else
547 {
548 /* #3 */
549 VEC_safe_insert (range_s, *vectorp, i, &newr);
550 }
551 }
552 else
553 {
554 /* #4 */
555 VEC_safe_insert (range_s, *vectorp, i, &newr);
556 }
557
558 /* Check whether the ranges following the one we've just added or
559 touched can be folded in (#5 above). */
560 if (i + 1 < VEC_length (range_s, *vectorp))
561 {
562 struct range *t;
563 struct range *r;
564 int removed = 0;
565 int next = i + 1;
566
567 /* Get the range we just touched. */
568 t = VEC_index (range_s, *vectorp, i);
569 removed = 0;
570
571 i = next;
572 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
573 if (r->offset <= t->offset + t->length)
574 {
575 ULONGEST l, h;
576
577 l = std::min (t->offset, r->offset);
578 h = std::max (t->offset + t->length, r->offset + r->length);
579
580 t->offset = l;
581 t->length = h - l;
582
583 removed++;
584 }
585 else
586 {
587 /* If we couldn't merge this one, we won't be able to
588 merge following ones either, since the ranges are
589 always sorted by OFFSET. */
590 break;
591 }
592
593 if (removed != 0)
594 VEC_block_remove (range_s, *vectorp, next, removed);
595 }
596 }
597
598 void
599 mark_value_bits_unavailable (struct value *value,
600 LONGEST offset, LONGEST length)
601 {
602 insert_into_bit_range_vector (&value->unavailable, offset, length);
603 }
604
605 void
606 mark_value_bytes_unavailable (struct value *value,
607 LONGEST offset, LONGEST length)
608 {
609 mark_value_bits_unavailable (value,
610 offset * TARGET_CHAR_BIT,
611 length * TARGET_CHAR_BIT);
612 }
613
614 /* Find the first range in RANGES that overlaps the range defined by
615 OFFSET and LENGTH, starting at element POS in the RANGES vector,
616 Returns the index into RANGES where such overlapping range was
617 found, or -1 if none was found. */
618
619 static int
620 find_first_range_overlap (VEC(range_s) *ranges, int pos,
621 LONGEST offset, LONGEST length)
622 {
623 range_s *r;
624 int i;
625
626 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
627 if (ranges_overlap (r->offset, r->length, offset, length))
628 return i;
629
630 return -1;
631 }
632
633 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
634 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
635 return non-zero.
636
637 It must always be the case that:
638 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
639
640 It is assumed that memory can be accessed from:
641 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
642 to:
643 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
644 / TARGET_CHAR_BIT) */
645 static int
646 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
647 const gdb_byte *ptr2, size_t offset2_bits,
648 size_t length_bits)
649 {
650 gdb_assert (offset1_bits % TARGET_CHAR_BIT
651 == offset2_bits % TARGET_CHAR_BIT);
652
653 if (offset1_bits % TARGET_CHAR_BIT != 0)
654 {
655 size_t bits;
656 gdb_byte mask, b1, b2;
657
658 /* The offset from the base pointers PTR1 and PTR2 is not a complete
659 number of bytes. A number of bits up to either the next exact
660 byte boundary, or LENGTH_BITS (which ever is sooner) will be
661 compared. */
662 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
663 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
664 mask = (1 << bits) - 1;
665
666 if (length_bits < bits)
667 {
668 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
669 bits = length_bits;
670 }
671
672 /* Now load the two bytes and mask off the bits we care about. */
673 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
674 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
675
676 if (b1 != b2)
677 return 1;
678
679 /* Now update the length and offsets to take account of the bits
680 we've just compared. */
681 length_bits -= bits;
682 offset1_bits += bits;
683 offset2_bits += bits;
684 }
685
686 if (length_bits % TARGET_CHAR_BIT != 0)
687 {
688 size_t bits;
689 size_t o1, o2;
690 gdb_byte mask, b1, b2;
691
692 /* The length is not an exact number of bytes. After the previous
693 IF.. block then the offsets are byte aligned, or the
694 length is zero (in which case this code is not reached). Compare
695 a number of bits at the end of the region, starting from an exact
696 byte boundary. */
697 bits = length_bits % TARGET_CHAR_BIT;
698 o1 = offset1_bits + length_bits - bits;
699 o2 = offset2_bits + length_bits - bits;
700
701 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
702 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
703
704 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
705 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
706
707 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
708 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
709
710 if (b1 != b2)
711 return 1;
712
713 length_bits -= bits;
714 }
715
716 if (length_bits > 0)
717 {
718 /* We've now taken care of any stray "bits" at the start, or end of
719 the region to compare, the remainder can be covered with a simple
720 memcmp. */
721 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
722 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
723 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
724
725 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
726 ptr2 + offset2_bits / TARGET_CHAR_BIT,
727 length_bits / TARGET_CHAR_BIT);
728 }
729
730 /* Length is zero, regions match. */
731 return 0;
732 }
733
734 /* Helper struct for find_first_range_overlap_and_match and
735 value_contents_bits_eq. Keep track of which slot of a given ranges
736 vector have we last looked at. */
737
738 struct ranges_and_idx
739 {
740 /* The ranges. */
741 VEC(range_s) *ranges;
742
743 /* The range we've last found in RANGES. Given ranges are sorted,
744 we can start the next lookup here. */
745 int idx;
746 };
747
748 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
749 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
750 ranges starting at OFFSET2 bits. Return true if the ranges match
751 and fill in *L and *H with the overlapping window relative to
752 (both) OFFSET1 or OFFSET2. */
753
754 static int
755 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
756 struct ranges_and_idx *rp2,
757 LONGEST offset1, LONGEST offset2,
758 LONGEST length, ULONGEST *l, ULONGEST *h)
759 {
760 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
761 offset1, length);
762 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
763 offset2, length);
764
765 if (rp1->idx == -1 && rp2->idx == -1)
766 {
767 *l = length;
768 *h = length;
769 return 1;
770 }
771 else if (rp1->idx == -1 || rp2->idx == -1)
772 return 0;
773 else
774 {
775 range_s *r1, *r2;
776 ULONGEST l1, h1;
777 ULONGEST l2, h2;
778
779 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
780 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
781
782 /* Get the unavailable windows intersected by the incoming
783 ranges. The first and last ranges that overlap the argument
784 range may be wider than said incoming arguments ranges. */
785 l1 = std::max (offset1, r1->offset);
786 h1 = std::min (offset1 + length, r1->offset + r1->length);
787
788 l2 = std::max (offset2, r2->offset);
789 h2 = std::min (offset2 + length, offset2 + r2->length);
790
791 /* Make them relative to the respective start offsets, so we can
792 compare them for equality. */
793 l1 -= offset1;
794 h1 -= offset1;
795
796 l2 -= offset2;
797 h2 -= offset2;
798
799 /* Different ranges, no match. */
800 if (l1 != l2 || h1 != h2)
801 return 0;
802
803 *h = h1;
804 *l = l1;
805 return 1;
806 }
807 }
808
809 /* Helper function for value_contents_eq. The only difference is that
810 this function is bit rather than byte based.
811
812 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
813 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
814 Return true if the available bits match. */
815
816 static int
817 value_contents_bits_eq (const struct value *val1, int offset1,
818 const struct value *val2, int offset2,
819 int length)
820 {
821 /* Each array element corresponds to a ranges source (unavailable,
822 optimized out). '1' is for VAL1, '2' for VAL2. */
823 struct ranges_and_idx rp1[2], rp2[2];
824
825 /* See function description in value.h. */
826 gdb_assert (!val1->lazy && !val2->lazy);
827
828 /* We shouldn't be trying to compare past the end of the values. */
829 gdb_assert (offset1 + length
830 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
831 gdb_assert (offset2 + length
832 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
833
834 memset (&rp1, 0, sizeof (rp1));
835 memset (&rp2, 0, sizeof (rp2));
836 rp1[0].ranges = val1->unavailable;
837 rp2[0].ranges = val2->unavailable;
838 rp1[1].ranges = val1->optimized_out;
839 rp2[1].ranges = val2->optimized_out;
840
841 while (length > 0)
842 {
843 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
844 int i;
845
846 for (i = 0; i < 2; i++)
847 {
848 ULONGEST l_tmp, h_tmp;
849
850 /* The contents only match equal if the invalid/unavailable
851 contents ranges match as well. */
852 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
853 offset1, offset2, length,
854 &l_tmp, &h_tmp))
855 return 0;
856
857 /* We're interested in the lowest/first range found. */
858 if (i == 0 || l_tmp < l)
859 {
860 l = l_tmp;
861 h = h_tmp;
862 }
863 }
864
865 /* Compare the available/valid contents. */
866 if (memcmp_with_bit_offsets (val1->contents, offset1,
867 val2->contents, offset2, l) != 0)
868 return 0;
869
870 length -= h;
871 offset1 += h;
872 offset2 += h;
873 }
874
875 return 1;
876 }
877
878 int
879 value_contents_eq (const struct value *val1, LONGEST offset1,
880 const struct value *val2, LONGEST offset2,
881 LONGEST length)
882 {
883 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
884 val2, offset2 * TARGET_CHAR_BIT,
885 length * TARGET_CHAR_BIT);
886 }
887
888 /* Prototypes for local functions. */
889
890 static void show_values (char *, int);
891
892 static void show_convenience (char *, int);
893
894
895 /* The value-history records all the values printed
896 by print commands during this session. Each chunk
897 records 60 consecutive values. The first chunk on
898 the chain records the most recent values.
899 The total number of values is in value_history_count. */
900
901 #define VALUE_HISTORY_CHUNK 60
902
903 struct value_history_chunk
904 {
905 struct value_history_chunk *next;
906 struct value *values[VALUE_HISTORY_CHUNK];
907 };
908
909 /* Chain of chunks now in use. */
910
911 static struct value_history_chunk *value_history_chain;
912
913 static int value_history_count; /* Abs number of last entry stored. */
914
915 \f
916 /* List of all value objects currently allocated
917 (except for those released by calls to release_value)
918 This is so they can be freed after each command. */
919
920 static struct value *all_values;
921
922 /* Allocate a lazy value for type TYPE. Its actual content is
923 "lazily" allocated too: the content field of the return value is
924 NULL; it will be allocated when it is fetched from the target. */
925
926 struct value *
927 allocate_value_lazy (struct type *type)
928 {
929 struct value *val;
930
931 /* Call check_typedef on our type to make sure that, if TYPE
932 is a TYPE_CODE_TYPEDEF, its length is set to the length
933 of the target type instead of zero. However, we do not
934 replace the typedef type by the target type, because we want
935 to keep the typedef in order to be able to set the VAL's type
936 description correctly. */
937 check_typedef (type);
938
939 val = XCNEW (struct value);
940 val->contents = NULL;
941 val->next = all_values;
942 all_values = val;
943 val->type = type;
944 val->enclosing_type = type;
945 VALUE_LVAL (val) = not_lval;
946 val->location.address = 0;
947 val->offset = 0;
948 val->bitpos = 0;
949 val->bitsize = 0;
950 val->lazy = 1;
951 val->embedded_offset = 0;
952 val->pointed_to_offset = 0;
953 val->modifiable = 1;
954 val->initialized = 1; /* Default to initialized. */
955
956 /* Values start out on the all_values chain. */
957 val->reference_count = 1;
958
959 return val;
960 }
961
962 /* The maximum size, in bytes, that GDB will try to allocate for a value.
963 The initial value of 64k was not selected for any specific reason, it is
964 just a reasonable starting point. */
965
966 static int max_value_size = 65536; /* 64k bytes */
967
968 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
969 LONGEST, otherwise GDB will not be able to parse integer values from the
970 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
971 be unable to parse "set max-value-size 2".
972
973 As we want a consistent GDB experience across hosts with different sizes
974 of LONGEST, this arbitrary minimum value was selected, so long as this
975 is bigger than LONGEST on all GDB supported hosts we're fine. */
976
977 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
978 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
979
980 /* Implement the "set max-value-size" command. */
981
982 static void
983 set_max_value_size (char *args, int from_tty,
984 struct cmd_list_element *c)
985 {
986 gdb_assert (max_value_size == -1 || max_value_size >= 0);
987
988 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
989 {
990 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
991 error (_("max-value-size set too low, increasing to %d bytes"),
992 max_value_size);
993 }
994 }
995
996 /* Implement the "show max-value-size" command. */
997
998 static void
999 show_max_value_size (struct ui_file *file, int from_tty,
1000 struct cmd_list_element *c, const char *value)
1001 {
1002 if (max_value_size == -1)
1003 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1004 else
1005 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1006 max_value_size);
1007 }
1008
1009 /* Called before we attempt to allocate or reallocate a buffer for the
1010 contents of a value. TYPE is the type of the value for which we are
1011 allocating the buffer. If the buffer is too large (based on the user
1012 controllable setting) then throw an error. If this function returns
1013 then we should attempt to allocate the buffer. */
1014
1015 static void
1016 check_type_length_before_alloc (const struct type *type)
1017 {
1018 unsigned int length = TYPE_LENGTH (type);
1019
1020 if (max_value_size > -1 && length > max_value_size)
1021 {
1022 if (TYPE_NAME (type) != NULL)
1023 error (_("value of type `%s' requires %u bytes, which is more "
1024 "than max-value-size"), TYPE_NAME (type), length);
1025 else
1026 error (_("value requires %u bytes, which is more than "
1027 "max-value-size"), length);
1028 }
1029 }
1030
1031 /* Allocate the contents of VAL if it has not been allocated yet. */
1032
1033 static void
1034 allocate_value_contents (struct value *val)
1035 {
1036 if (!val->contents)
1037 {
1038 check_type_length_before_alloc (val->enclosing_type);
1039 val->contents
1040 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1041 }
1042 }
1043
1044 /* Allocate a value and its contents for type TYPE. */
1045
1046 struct value *
1047 allocate_value (struct type *type)
1048 {
1049 struct value *val = allocate_value_lazy (type);
1050
1051 allocate_value_contents (val);
1052 val->lazy = 0;
1053 return val;
1054 }
1055
1056 /* Allocate a value that has the correct length
1057 for COUNT repetitions of type TYPE. */
1058
1059 struct value *
1060 allocate_repeat_value (struct type *type, int count)
1061 {
1062 int low_bound = current_language->string_lower_bound; /* ??? */
1063 /* FIXME-type-allocation: need a way to free this type when we are
1064 done with it. */
1065 struct type *array_type
1066 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1067
1068 return allocate_value (array_type);
1069 }
1070
1071 struct value *
1072 allocate_computed_value (struct type *type,
1073 const struct lval_funcs *funcs,
1074 void *closure)
1075 {
1076 struct value *v = allocate_value_lazy (type);
1077
1078 VALUE_LVAL (v) = lval_computed;
1079 v->location.computed.funcs = funcs;
1080 v->location.computed.closure = closure;
1081
1082 return v;
1083 }
1084
1085 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1086
1087 struct value *
1088 allocate_optimized_out_value (struct type *type)
1089 {
1090 struct value *retval = allocate_value_lazy (type);
1091
1092 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1093 set_value_lazy (retval, 0);
1094 return retval;
1095 }
1096
1097 /* Accessor methods. */
1098
1099 struct value *
1100 value_next (const struct value *value)
1101 {
1102 return value->next;
1103 }
1104
1105 struct type *
1106 value_type (const struct value *value)
1107 {
1108 return value->type;
1109 }
1110 void
1111 deprecated_set_value_type (struct value *value, struct type *type)
1112 {
1113 value->type = type;
1114 }
1115
1116 LONGEST
1117 value_offset (const struct value *value)
1118 {
1119 return value->offset;
1120 }
1121 void
1122 set_value_offset (struct value *value, LONGEST offset)
1123 {
1124 value->offset = offset;
1125 }
1126
1127 LONGEST
1128 value_bitpos (const struct value *value)
1129 {
1130 return value->bitpos;
1131 }
1132 void
1133 set_value_bitpos (struct value *value, LONGEST bit)
1134 {
1135 value->bitpos = bit;
1136 }
1137
1138 LONGEST
1139 value_bitsize (const struct value *value)
1140 {
1141 return value->bitsize;
1142 }
1143 void
1144 set_value_bitsize (struct value *value, LONGEST bit)
1145 {
1146 value->bitsize = bit;
1147 }
1148
1149 struct value *
1150 value_parent (const struct value *value)
1151 {
1152 return value->parent;
1153 }
1154
1155 /* See value.h. */
1156
1157 void
1158 set_value_parent (struct value *value, struct value *parent)
1159 {
1160 struct value *old = value->parent;
1161
1162 value->parent = parent;
1163 if (parent != NULL)
1164 value_incref (parent);
1165 value_free (old);
1166 }
1167
1168 gdb_byte *
1169 value_contents_raw (struct value *value)
1170 {
1171 struct gdbarch *arch = get_value_arch (value);
1172 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1173
1174 allocate_value_contents (value);
1175 return value->contents + value->embedded_offset * unit_size;
1176 }
1177
1178 gdb_byte *
1179 value_contents_all_raw (struct value *value)
1180 {
1181 allocate_value_contents (value);
1182 return value->contents;
1183 }
1184
1185 struct type *
1186 value_enclosing_type (const struct value *value)
1187 {
1188 return value->enclosing_type;
1189 }
1190
1191 /* Look at value.h for description. */
1192
1193 struct type *
1194 value_actual_type (struct value *value, int resolve_simple_types,
1195 int *real_type_found)
1196 {
1197 struct value_print_options opts;
1198 struct type *result;
1199
1200 get_user_print_options (&opts);
1201
1202 if (real_type_found)
1203 *real_type_found = 0;
1204 result = value_type (value);
1205 if (opts.objectprint)
1206 {
1207 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1208 fetch its rtti type. */
1209 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1210 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1211 == TYPE_CODE_STRUCT
1212 && !value_optimized_out (value))
1213 {
1214 struct type *real_type;
1215
1216 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1217 if (real_type)
1218 {
1219 if (real_type_found)
1220 *real_type_found = 1;
1221 result = real_type;
1222 }
1223 }
1224 else if (resolve_simple_types)
1225 {
1226 if (real_type_found)
1227 *real_type_found = 1;
1228 result = value_enclosing_type (value);
1229 }
1230 }
1231
1232 return result;
1233 }
1234
1235 void
1236 error_value_optimized_out (void)
1237 {
1238 error (_("value has been optimized out"));
1239 }
1240
1241 static void
1242 require_not_optimized_out (const struct value *value)
1243 {
1244 if (!VEC_empty (range_s, value->optimized_out))
1245 {
1246 if (value->lval == lval_register)
1247 error (_("register has not been saved in frame"));
1248 else
1249 error_value_optimized_out ();
1250 }
1251 }
1252
1253 static void
1254 require_available (const struct value *value)
1255 {
1256 if (!VEC_empty (range_s, value->unavailable))
1257 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1258 }
1259
1260 const gdb_byte *
1261 value_contents_for_printing (struct value *value)
1262 {
1263 if (value->lazy)
1264 value_fetch_lazy (value);
1265 return value->contents;
1266 }
1267
1268 const gdb_byte *
1269 value_contents_for_printing_const (const struct value *value)
1270 {
1271 gdb_assert (!value->lazy);
1272 return value->contents;
1273 }
1274
1275 const gdb_byte *
1276 value_contents_all (struct value *value)
1277 {
1278 const gdb_byte *result = value_contents_for_printing (value);
1279 require_not_optimized_out (value);
1280 require_available (value);
1281 return result;
1282 }
1283
1284 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1285 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1286
1287 static void
1288 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1289 VEC (range_s) *src_range, int src_bit_offset,
1290 int bit_length)
1291 {
1292 range_s *r;
1293 int i;
1294
1295 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1296 {
1297 ULONGEST h, l;
1298
1299 l = std::max (r->offset, (LONGEST) src_bit_offset);
1300 h = std::min (r->offset + r->length,
1301 (LONGEST) src_bit_offset + bit_length);
1302
1303 if (l < h)
1304 insert_into_bit_range_vector (dst_range,
1305 dst_bit_offset + (l - src_bit_offset),
1306 h - l);
1307 }
1308 }
1309
1310 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1311 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1312
1313 static void
1314 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1315 const struct value *src, int src_bit_offset,
1316 int bit_length)
1317 {
1318 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1319 src->unavailable, src_bit_offset,
1320 bit_length);
1321 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1322 src->optimized_out, src_bit_offset,
1323 bit_length);
1324 }
1325
1326 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1327 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1328 contents, starting at DST_OFFSET. If unavailable contents are
1329 being copied from SRC, the corresponding DST contents are marked
1330 unavailable accordingly. Neither DST nor SRC may be lazy
1331 values.
1332
1333 It is assumed the contents of DST in the [DST_OFFSET,
1334 DST_OFFSET+LENGTH) range are wholly available. */
1335
1336 void
1337 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1338 struct value *src, LONGEST src_offset, LONGEST length)
1339 {
1340 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1341 struct gdbarch *arch = get_value_arch (src);
1342 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1343
1344 /* A lazy DST would make that this copy operation useless, since as
1345 soon as DST's contents were un-lazied (by a later value_contents
1346 call, say), the contents would be overwritten. A lazy SRC would
1347 mean we'd be copying garbage. */
1348 gdb_assert (!dst->lazy && !src->lazy);
1349
1350 /* The overwritten DST range gets unavailability ORed in, not
1351 replaced. Make sure to remember to implement replacing if it
1352 turns out actually necessary. */
1353 gdb_assert (value_bytes_available (dst, dst_offset, length));
1354 gdb_assert (!value_bits_any_optimized_out (dst,
1355 TARGET_CHAR_BIT * dst_offset,
1356 TARGET_CHAR_BIT * length));
1357
1358 /* Copy the data. */
1359 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1360 value_contents_all_raw (src) + src_offset * unit_size,
1361 length * unit_size);
1362
1363 /* Copy the meta-data, adjusted. */
1364 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1365 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1366 bit_length = length * unit_size * HOST_CHAR_BIT;
1367
1368 value_ranges_copy_adjusted (dst, dst_bit_offset,
1369 src, src_bit_offset,
1370 bit_length);
1371 }
1372
1373 /* Copy LENGTH bytes of SRC value's (all) contents
1374 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1375 (all) contents, starting at DST_OFFSET. If unavailable contents
1376 are being copied from SRC, the corresponding DST contents are
1377 marked unavailable accordingly. DST must not be lazy. If SRC is
1378 lazy, it will be fetched now.
1379
1380 It is assumed the contents of DST in the [DST_OFFSET,
1381 DST_OFFSET+LENGTH) range are wholly available. */
1382
1383 void
1384 value_contents_copy (struct value *dst, LONGEST dst_offset,
1385 struct value *src, LONGEST src_offset, LONGEST length)
1386 {
1387 if (src->lazy)
1388 value_fetch_lazy (src);
1389
1390 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1391 }
1392
1393 int
1394 value_lazy (const struct value *value)
1395 {
1396 return value->lazy;
1397 }
1398
1399 void
1400 set_value_lazy (struct value *value, int val)
1401 {
1402 value->lazy = val;
1403 }
1404
1405 int
1406 value_stack (const struct value *value)
1407 {
1408 return value->stack;
1409 }
1410
1411 void
1412 set_value_stack (struct value *value, int val)
1413 {
1414 value->stack = val;
1415 }
1416
1417 const gdb_byte *
1418 value_contents (struct value *value)
1419 {
1420 const gdb_byte *result = value_contents_writeable (value);
1421 require_not_optimized_out (value);
1422 require_available (value);
1423 return result;
1424 }
1425
1426 gdb_byte *
1427 value_contents_writeable (struct value *value)
1428 {
1429 if (value->lazy)
1430 value_fetch_lazy (value);
1431 return value_contents_raw (value);
1432 }
1433
1434 int
1435 value_optimized_out (struct value *value)
1436 {
1437 /* We can only know if a value is optimized out once we have tried to
1438 fetch it. */
1439 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1440 {
1441 TRY
1442 {
1443 value_fetch_lazy (value);
1444 }
1445 CATCH (ex, RETURN_MASK_ERROR)
1446 {
1447 /* Fall back to checking value->optimized_out. */
1448 }
1449 END_CATCH
1450 }
1451
1452 return !VEC_empty (range_s, value->optimized_out);
1453 }
1454
1455 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1456 the following LENGTH bytes. */
1457
1458 void
1459 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1460 {
1461 mark_value_bits_optimized_out (value,
1462 offset * TARGET_CHAR_BIT,
1463 length * TARGET_CHAR_BIT);
1464 }
1465
1466 /* See value.h. */
1467
1468 void
1469 mark_value_bits_optimized_out (struct value *value,
1470 LONGEST offset, LONGEST length)
1471 {
1472 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1473 }
1474
1475 int
1476 value_bits_synthetic_pointer (const struct value *value,
1477 LONGEST offset, LONGEST length)
1478 {
1479 if (value->lval != lval_computed
1480 || !value->location.computed.funcs->check_synthetic_pointer)
1481 return 0;
1482 return value->location.computed.funcs->check_synthetic_pointer (value,
1483 offset,
1484 length);
1485 }
1486
1487 LONGEST
1488 value_embedded_offset (const struct value *value)
1489 {
1490 return value->embedded_offset;
1491 }
1492
1493 void
1494 set_value_embedded_offset (struct value *value, LONGEST val)
1495 {
1496 value->embedded_offset = val;
1497 }
1498
1499 LONGEST
1500 value_pointed_to_offset (const struct value *value)
1501 {
1502 return value->pointed_to_offset;
1503 }
1504
1505 void
1506 set_value_pointed_to_offset (struct value *value, LONGEST val)
1507 {
1508 value->pointed_to_offset = val;
1509 }
1510
1511 const struct lval_funcs *
1512 value_computed_funcs (const struct value *v)
1513 {
1514 gdb_assert (value_lval_const (v) == lval_computed);
1515
1516 return v->location.computed.funcs;
1517 }
1518
1519 void *
1520 value_computed_closure (const struct value *v)
1521 {
1522 gdb_assert (v->lval == lval_computed);
1523
1524 return v->location.computed.closure;
1525 }
1526
1527 enum lval_type *
1528 deprecated_value_lval_hack (struct value *value)
1529 {
1530 return &value->lval;
1531 }
1532
1533 enum lval_type
1534 value_lval_const (const struct value *value)
1535 {
1536 return value->lval;
1537 }
1538
1539 CORE_ADDR
1540 value_address (const struct value *value)
1541 {
1542 if (value->lval != lval_memory)
1543 return 0;
1544 if (value->parent != NULL)
1545 return value_address (value->parent) + value->offset;
1546 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1547 {
1548 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1549 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1550 }
1551
1552 return value->location.address + value->offset;
1553 }
1554
1555 CORE_ADDR
1556 value_raw_address (const struct value *value)
1557 {
1558 if (value->lval != lval_memory)
1559 return 0;
1560 return value->location.address;
1561 }
1562
1563 void
1564 set_value_address (struct value *value, CORE_ADDR addr)
1565 {
1566 gdb_assert (value->lval == lval_memory);
1567 value->location.address = addr;
1568 }
1569
1570 struct internalvar **
1571 deprecated_value_internalvar_hack (struct value *value)
1572 {
1573 return &value->location.internalvar;
1574 }
1575
1576 struct frame_id *
1577 deprecated_value_next_frame_id_hack (struct value *value)
1578 {
1579 gdb_assert (value->lval == lval_register);
1580 return &value->location.reg.next_frame_id;
1581 }
1582
1583 int *
1584 deprecated_value_regnum_hack (struct value *value)
1585 {
1586 gdb_assert (value->lval == lval_register);
1587 return &value->location.reg.regnum;
1588 }
1589
1590 int
1591 deprecated_value_modifiable (const struct value *value)
1592 {
1593 return value->modifiable;
1594 }
1595 \f
1596 /* Return a mark in the value chain. All values allocated after the
1597 mark is obtained (except for those released) are subject to being freed
1598 if a subsequent value_free_to_mark is passed the mark. */
1599 struct value *
1600 value_mark (void)
1601 {
1602 return all_values;
1603 }
1604
1605 /* Take a reference to VAL. VAL will not be deallocated until all
1606 references are released. */
1607
1608 void
1609 value_incref (struct value *val)
1610 {
1611 val->reference_count++;
1612 }
1613
1614 /* Release a reference to VAL, which was acquired with value_incref.
1615 This function is also called to deallocate values from the value
1616 chain. */
1617
1618 void
1619 value_free (struct value *val)
1620 {
1621 if (val)
1622 {
1623 gdb_assert (val->reference_count > 0);
1624 val->reference_count--;
1625 if (val->reference_count > 0)
1626 return;
1627
1628 /* If there's an associated parent value, drop our reference to
1629 it. */
1630 if (val->parent != NULL)
1631 value_free (val->parent);
1632
1633 if (VALUE_LVAL (val) == lval_computed)
1634 {
1635 const struct lval_funcs *funcs = val->location.computed.funcs;
1636
1637 if (funcs->free_closure)
1638 funcs->free_closure (val);
1639 }
1640 else if (VALUE_LVAL (val) == lval_xcallable)
1641 free_xmethod_worker (val->location.xm_worker);
1642
1643 xfree (val->contents);
1644 VEC_free (range_s, val->unavailable);
1645 }
1646 xfree (val);
1647 }
1648
1649 /* Free all values allocated since MARK was obtained by value_mark
1650 (except for those released). */
1651 void
1652 value_free_to_mark (const struct value *mark)
1653 {
1654 struct value *val;
1655 struct value *next;
1656
1657 for (val = all_values; val && val != mark; val = next)
1658 {
1659 next = val->next;
1660 val->released = 1;
1661 value_free (val);
1662 }
1663 all_values = val;
1664 }
1665
1666 /* Free all the values that have been allocated (except for those released).
1667 Call after each command, successful or not.
1668 In practice this is called before each command, which is sufficient. */
1669
1670 void
1671 free_all_values (void)
1672 {
1673 struct value *val;
1674 struct value *next;
1675
1676 for (val = all_values; val; val = next)
1677 {
1678 next = val->next;
1679 val->released = 1;
1680 value_free (val);
1681 }
1682
1683 all_values = 0;
1684 }
1685
1686 /* Frees all the elements in a chain of values. */
1687
1688 void
1689 free_value_chain (struct value *v)
1690 {
1691 struct value *next;
1692
1693 for (; v; v = next)
1694 {
1695 next = value_next (v);
1696 value_free (v);
1697 }
1698 }
1699
1700 /* Remove VAL from the chain all_values
1701 so it will not be freed automatically. */
1702
1703 void
1704 release_value (struct value *val)
1705 {
1706 struct value *v;
1707
1708 if (all_values == val)
1709 {
1710 all_values = val->next;
1711 val->next = NULL;
1712 val->released = 1;
1713 return;
1714 }
1715
1716 for (v = all_values; v; v = v->next)
1717 {
1718 if (v->next == val)
1719 {
1720 v->next = val->next;
1721 val->next = NULL;
1722 val->released = 1;
1723 break;
1724 }
1725 }
1726 }
1727
1728 /* If the value is not already released, release it.
1729 If the value is already released, increment its reference count.
1730 That is, this function ensures that the value is released from the
1731 value chain and that the caller owns a reference to it. */
1732
1733 void
1734 release_value_or_incref (struct value *val)
1735 {
1736 if (val->released)
1737 value_incref (val);
1738 else
1739 release_value (val);
1740 }
1741
1742 /* Release all values up to mark */
1743 struct value *
1744 value_release_to_mark (const struct value *mark)
1745 {
1746 struct value *val;
1747 struct value *next;
1748
1749 for (val = next = all_values; next; next = next->next)
1750 {
1751 if (next->next == mark)
1752 {
1753 all_values = next->next;
1754 next->next = NULL;
1755 return val;
1756 }
1757 next->released = 1;
1758 }
1759 all_values = 0;
1760 return val;
1761 }
1762
1763 /* Return a copy of the value ARG.
1764 It contains the same contents, for same memory address,
1765 but it's a different block of storage. */
1766
1767 struct value *
1768 value_copy (struct value *arg)
1769 {
1770 struct type *encl_type = value_enclosing_type (arg);
1771 struct value *val;
1772
1773 if (value_lazy (arg))
1774 val = allocate_value_lazy (encl_type);
1775 else
1776 val = allocate_value (encl_type);
1777 val->type = arg->type;
1778 VALUE_LVAL (val) = VALUE_LVAL (arg);
1779 val->location = arg->location;
1780 val->offset = arg->offset;
1781 val->bitpos = arg->bitpos;
1782 val->bitsize = arg->bitsize;
1783 val->lazy = arg->lazy;
1784 val->embedded_offset = value_embedded_offset (arg);
1785 val->pointed_to_offset = arg->pointed_to_offset;
1786 val->modifiable = arg->modifiable;
1787 if (!value_lazy (val))
1788 {
1789 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1790 TYPE_LENGTH (value_enclosing_type (arg)));
1791
1792 }
1793 val->unavailable = VEC_copy (range_s, arg->unavailable);
1794 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1795 set_value_parent (val, arg->parent);
1796 if (VALUE_LVAL (val) == lval_computed)
1797 {
1798 const struct lval_funcs *funcs = val->location.computed.funcs;
1799
1800 if (funcs->copy_closure)
1801 val->location.computed.closure = funcs->copy_closure (val);
1802 }
1803 return val;
1804 }
1805
1806 /* Return a "const" and/or "volatile" qualified version of the value V.
1807 If CNST is true, then the returned value will be qualified with
1808 "const".
1809 if VOLTL is true, then the returned value will be qualified with
1810 "volatile". */
1811
1812 struct value *
1813 make_cv_value (int cnst, int voltl, struct value *v)
1814 {
1815 struct type *val_type = value_type (v);
1816 struct type *enclosing_type = value_enclosing_type (v);
1817 struct value *cv_val = value_copy (v);
1818
1819 deprecated_set_value_type (cv_val,
1820 make_cv_type (cnst, voltl, val_type, NULL));
1821 set_value_enclosing_type (cv_val,
1822 make_cv_type (cnst, voltl, enclosing_type, NULL));
1823
1824 return cv_val;
1825 }
1826
1827 /* Return a version of ARG that is non-lvalue. */
1828
1829 struct value *
1830 value_non_lval (struct value *arg)
1831 {
1832 if (VALUE_LVAL (arg) != not_lval)
1833 {
1834 struct type *enc_type = value_enclosing_type (arg);
1835 struct value *val = allocate_value (enc_type);
1836
1837 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1838 TYPE_LENGTH (enc_type));
1839 val->type = arg->type;
1840 set_value_embedded_offset (val, value_embedded_offset (arg));
1841 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1842 return val;
1843 }
1844 return arg;
1845 }
1846
1847 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1848
1849 void
1850 value_force_lval (struct value *v, CORE_ADDR addr)
1851 {
1852 gdb_assert (VALUE_LVAL (v) == not_lval);
1853
1854 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1855 v->lval = lval_memory;
1856 v->location.address = addr;
1857 }
1858
1859 void
1860 set_value_component_location (struct value *component,
1861 const struct value *whole)
1862 {
1863 struct type *type;
1864
1865 gdb_assert (whole->lval != lval_xcallable);
1866
1867 if (whole->lval == lval_internalvar)
1868 VALUE_LVAL (component) = lval_internalvar_component;
1869 else
1870 VALUE_LVAL (component) = whole->lval;
1871
1872 component->location = whole->location;
1873 if (whole->lval == lval_computed)
1874 {
1875 const struct lval_funcs *funcs = whole->location.computed.funcs;
1876
1877 if (funcs->copy_closure)
1878 component->location.computed.closure = funcs->copy_closure (whole);
1879 }
1880
1881 /* If type has a dynamic resolved location property
1882 update it's value address. */
1883 type = value_type (whole);
1884 if (NULL != TYPE_DATA_LOCATION (type)
1885 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1886 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1887 }
1888
1889 /* Access to the value history. */
1890
1891 /* Record a new value in the value history.
1892 Returns the absolute history index of the entry. */
1893
1894 int
1895 record_latest_value (struct value *val)
1896 {
1897 int i;
1898
1899 /* We don't want this value to have anything to do with the inferior anymore.
1900 In particular, "set $1 = 50" should not affect the variable from which
1901 the value was taken, and fast watchpoints should be able to assume that
1902 a value on the value history never changes. */
1903 if (value_lazy (val))
1904 value_fetch_lazy (val);
1905 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1906 from. This is a bit dubious, because then *&$1 does not just return $1
1907 but the current contents of that location. c'est la vie... */
1908 val->modifiable = 0;
1909
1910 /* The value may have already been released, in which case we're adding a
1911 new reference for its entry in the history. That is why we call
1912 release_value_or_incref here instead of release_value. */
1913 release_value_or_incref (val);
1914
1915 /* Here we treat value_history_count as origin-zero
1916 and applying to the value being stored now. */
1917
1918 i = value_history_count % VALUE_HISTORY_CHUNK;
1919 if (i == 0)
1920 {
1921 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1922
1923 newobj->next = value_history_chain;
1924 value_history_chain = newobj;
1925 }
1926
1927 value_history_chain->values[i] = val;
1928
1929 /* Now we regard value_history_count as origin-one
1930 and applying to the value just stored. */
1931
1932 return ++value_history_count;
1933 }
1934
1935 /* Return a copy of the value in the history with sequence number NUM. */
1936
1937 struct value *
1938 access_value_history (int num)
1939 {
1940 struct value_history_chunk *chunk;
1941 int i;
1942 int absnum = num;
1943
1944 if (absnum <= 0)
1945 absnum += value_history_count;
1946
1947 if (absnum <= 0)
1948 {
1949 if (num == 0)
1950 error (_("The history is empty."));
1951 else if (num == 1)
1952 error (_("There is only one value in the history."));
1953 else
1954 error (_("History does not go back to $$%d."), -num);
1955 }
1956 if (absnum > value_history_count)
1957 error (_("History has not yet reached $%d."), absnum);
1958
1959 absnum--;
1960
1961 /* Now absnum is always absolute and origin zero. */
1962
1963 chunk = value_history_chain;
1964 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1965 - absnum / VALUE_HISTORY_CHUNK;
1966 i > 0; i--)
1967 chunk = chunk->next;
1968
1969 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1970 }
1971
1972 static void
1973 show_values (char *num_exp, int from_tty)
1974 {
1975 int i;
1976 struct value *val;
1977 static int num = 1;
1978
1979 if (num_exp)
1980 {
1981 /* "show values +" should print from the stored position.
1982 "show values <exp>" should print around value number <exp>. */
1983 if (num_exp[0] != '+' || num_exp[1] != '\0')
1984 num = parse_and_eval_long (num_exp) - 5;
1985 }
1986 else
1987 {
1988 /* "show values" means print the last 10 values. */
1989 num = value_history_count - 9;
1990 }
1991
1992 if (num <= 0)
1993 num = 1;
1994
1995 for (i = num; i < num + 10 && i <= value_history_count; i++)
1996 {
1997 struct value_print_options opts;
1998
1999 val = access_value_history (i);
2000 printf_filtered (("$%d = "), i);
2001 get_user_print_options (&opts);
2002 value_print (val, gdb_stdout, &opts);
2003 printf_filtered (("\n"));
2004 }
2005
2006 /* The next "show values +" should start after what we just printed. */
2007 num += 10;
2008
2009 /* Hitting just return after this command should do the same thing as
2010 "show values +". If num_exp is null, this is unnecessary, since
2011 "show values +" is not useful after "show values". */
2012 if (from_tty && num_exp)
2013 {
2014 num_exp[0] = '+';
2015 num_exp[1] = '\0';
2016 }
2017 }
2018 \f
2019 enum internalvar_kind
2020 {
2021 /* The internal variable is empty. */
2022 INTERNALVAR_VOID,
2023
2024 /* The value of the internal variable is provided directly as
2025 a GDB value object. */
2026 INTERNALVAR_VALUE,
2027
2028 /* A fresh value is computed via a call-back routine on every
2029 access to the internal variable. */
2030 INTERNALVAR_MAKE_VALUE,
2031
2032 /* The internal variable holds a GDB internal convenience function. */
2033 INTERNALVAR_FUNCTION,
2034
2035 /* The variable holds an integer value. */
2036 INTERNALVAR_INTEGER,
2037
2038 /* The variable holds a GDB-provided string. */
2039 INTERNALVAR_STRING,
2040 };
2041
2042 union internalvar_data
2043 {
2044 /* A value object used with INTERNALVAR_VALUE. */
2045 struct value *value;
2046
2047 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2048 struct
2049 {
2050 /* The functions to call. */
2051 const struct internalvar_funcs *functions;
2052
2053 /* The function's user-data. */
2054 void *data;
2055 } make_value;
2056
2057 /* The internal function used with INTERNALVAR_FUNCTION. */
2058 struct
2059 {
2060 struct internal_function *function;
2061 /* True if this is the canonical name for the function. */
2062 int canonical;
2063 } fn;
2064
2065 /* An integer value used with INTERNALVAR_INTEGER. */
2066 struct
2067 {
2068 /* If type is non-NULL, it will be used as the type to generate
2069 a value for this internal variable. If type is NULL, a default
2070 integer type for the architecture is used. */
2071 struct type *type;
2072 LONGEST val;
2073 } integer;
2074
2075 /* A string value used with INTERNALVAR_STRING. */
2076 char *string;
2077 };
2078
2079 /* Internal variables. These are variables within the debugger
2080 that hold values assigned by debugger commands.
2081 The user refers to them with a '$' prefix
2082 that does not appear in the variable names stored internally. */
2083
2084 struct internalvar
2085 {
2086 struct internalvar *next;
2087 char *name;
2088
2089 /* We support various different kinds of content of an internal variable.
2090 enum internalvar_kind specifies the kind, and union internalvar_data
2091 provides the data associated with this particular kind. */
2092
2093 enum internalvar_kind kind;
2094
2095 union internalvar_data u;
2096 };
2097
2098 static struct internalvar *internalvars;
2099
2100 /* If the variable does not already exist create it and give it the
2101 value given. If no value is given then the default is zero. */
2102 static void
2103 init_if_undefined_command (char* args, int from_tty)
2104 {
2105 struct internalvar* intvar;
2106
2107 /* Parse the expression - this is taken from set_command(). */
2108 expression_up expr = parse_expression (args);
2109
2110 /* Validate the expression.
2111 Was the expression an assignment?
2112 Or even an expression at all? */
2113 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2114 error (_("Init-if-undefined requires an assignment expression."));
2115
2116 /* Extract the variable from the parsed expression.
2117 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2118 if (expr->elts[1].opcode != OP_INTERNALVAR)
2119 error (_("The first parameter to init-if-undefined "
2120 "should be a GDB variable."));
2121 intvar = expr->elts[2].internalvar;
2122
2123 /* Only evaluate the expression if the lvalue is void.
2124 This may still fail if the expresssion is invalid. */
2125 if (intvar->kind == INTERNALVAR_VOID)
2126 evaluate_expression (expr.get ());
2127 }
2128
2129
2130 /* Look up an internal variable with name NAME. NAME should not
2131 normally include a dollar sign.
2132
2133 If the specified internal variable does not exist,
2134 the return value is NULL. */
2135
2136 struct internalvar *
2137 lookup_only_internalvar (const char *name)
2138 {
2139 struct internalvar *var;
2140
2141 for (var = internalvars; var; var = var->next)
2142 if (strcmp (var->name, name) == 0)
2143 return var;
2144
2145 return NULL;
2146 }
2147
2148 /* Complete NAME by comparing it to the names of internal
2149 variables. */
2150
2151 void
2152 complete_internalvar (completion_tracker &tracker, const char *name)
2153 {
2154 struct internalvar *var;
2155 int len;
2156
2157 len = strlen (name);
2158
2159 for (var = internalvars; var; var = var->next)
2160 if (strncmp (var->name, name, len) == 0)
2161 {
2162 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2163
2164 tracker.add_completion (std::move (copy));
2165 }
2166 }
2167
2168 /* Create an internal variable with name NAME and with a void value.
2169 NAME should not normally include a dollar sign. */
2170
2171 struct internalvar *
2172 create_internalvar (const char *name)
2173 {
2174 struct internalvar *var = XNEW (struct internalvar);
2175
2176 var->name = concat (name, (char *)NULL);
2177 var->kind = INTERNALVAR_VOID;
2178 var->next = internalvars;
2179 internalvars = var;
2180 return var;
2181 }
2182
2183 /* Create an internal variable with name NAME and register FUN as the
2184 function that value_of_internalvar uses to create a value whenever
2185 this variable is referenced. NAME should not normally include a
2186 dollar sign. DATA is passed uninterpreted to FUN when it is
2187 called. CLEANUP, if not NULL, is called when the internal variable
2188 is destroyed. It is passed DATA as its only argument. */
2189
2190 struct internalvar *
2191 create_internalvar_type_lazy (const char *name,
2192 const struct internalvar_funcs *funcs,
2193 void *data)
2194 {
2195 struct internalvar *var = create_internalvar (name);
2196
2197 var->kind = INTERNALVAR_MAKE_VALUE;
2198 var->u.make_value.functions = funcs;
2199 var->u.make_value.data = data;
2200 return var;
2201 }
2202
2203 /* See documentation in value.h. */
2204
2205 int
2206 compile_internalvar_to_ax (struct internalvar *var,
2207 struct agent_expr *expr,
2208 struct axs_value *value)
2209 {
2210 if (var->kind != INTERNALVAR_MAKE_VALUE
2211 || var->u.make_value.functions->compile_to_ax == NULL)
2212 return 0;
2213
2214 var->u.make_value.functions->compile_to_ax (var, expr, value,
2215 var->u.make_value.data);
2216 return 1;
2217 }
2218
2219 /* Look up an internal variable with name NAME. NAME should not
2220 normally include a dollar sign.
2221
2222 If the specified internal variable does not exist,
2223 one is created, with a void value. */
2224
2225 struct internalvar *
2226 lookup_internalvar (const char *name)
2227 {
2228 struct internalvar *var;
2229
2230 var = lookup_only_internalvar (name);
2231 if (var)
2232 return var;
2233
2234 return create_internalvar (name);
2235 }
2236
2237 /* Return current value of internal variable VAR. For variables that
2238 are not inherently typed, use a value type appropriate for GDBARCH. */
2239
2240 struct value *
2241 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2242 {
2243 struct value *val;
2244 struct trace_state_variable *tsv;
2245
2246 /* If there is a trace state variable of the same name, assume that
2247 is what we really want to see. */
2248 tsv = find_trace_state_variable (var->name);
2249 if (tsv)
2250 {
2251 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2252 &(tsv->value));
2253 if (tsv->value_known)
2254 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2255 tsv->value);
2256 else
2257 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2258 return val;
2259 }
2260
2261 switch (var->kind)
2262 {
2263 case INTERNALVAR_VOID:
2264 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2265 break;
2266
2267 case INTERNALVAR_FUNCTION:
2268 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2269 break;
2270
2271 case INTERNALVAR_INTEGER:
2272 if (!var->u.integer.type)
2273 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2274 var->u.integer.val);
2275 else
2276 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2277 break;
2278
2279 case INTERNALVAR_STRING:
2280 val = value_cstring (var->u.string, strlen (var->u.string),
2281 builtin_type (gdbarch)->builtin_char);
2282 break;
2283
2284 case INTERNALVAR_VALUE:
2285 val = value_copy (var->u.value);
2286 if (value_lazy (val))
2287 value_fetch_lazy (val);
2288 break;
2289
2290 case INTERNALVAR_MAKE_VALUE:
2291 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2292 var->u.make_value.data);
2293 break;
2294
2295 default:
2296 internal_error (__FILE__, __LINE__, _("bad kind"));
2297 }
2298
2299 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2300 on this value go back to affect the original internal variable.
2301
2302 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2303 no underlying modifyable state in the internal variable.
2304
2305 Likewise, if the variable's value is a computed lvalue, we want
2306 references to it to produce another computed lvalue, where
2307 references and assignments actually operate through the
2308 computed value's functions.
2309
2310 This means that internal variables with computed values
2311 behave a little differently from other internal variables:
2312 assignments to them don't just replace the previous value
2313 altogether. At the moment, this seems like the behavior we
2314 want. */
2315
2316 if (var->kind != INTERNALVAR_MAKE_VALUE
2317 && val->lval != lval_computed)
2318 {
2319 VALUE_LVAL (val) = lval_internalvar;
2320 VALUE_INTERNALVAR (val) = var;
2321 }
2322
2323 return val;
2324 }
2325
2326 int
2327 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2328 {
2329 if (var->kind == INTERNALVAR_INTEGER)
2330 {
2331 *result = var->u.integer.val;
2332 return 1;
2333 }
2334
2335 if (var->kind == INTERNALVAR_VALUE)
2336 {
2337 struct type *type = check_typedef (value_type (var->u.value));
2338
2339 if (TYPE_CODE (type) == TYPE_CODE_INT)
2340 {
2341 *result = value_as_long (var->u.value);
2342 return 1;
2343 }
2344 }
2345
2346 return 0;
2347 }
2348
2349 static int
2350 get_internalvar_function (struct internalvar *var,
2351 struct internal_function **result)
2352 {
2353 switch (var->kind)
2354 {
2355 case INTERNALVAR_FUNCTION:
2356 *result = var->u.fn.function;
2357 return 1;
2358
2359 default:
2360 return 0;
2361 }
2362 }
2363
2364 void
2365 set_internalvar_component (struct internalvar *var,
2366 LONGEST offset, LONGEST bitpos,
2367 LONGEST bitsize, struct value *newval)
2368 {
2369 gdb_byte *addr;
2370 struct gdbarch *arch;
2371 int unit_size;
2372
2373 switch (var->kind)
2374 {
2375 case INTERNALVAR_VALUE:
2376 addr = value_contents_writeable (var->u.value);
2377 arch = get_value_arch (var->u.value);
2378 unit_size = gdbarch_addressable_memory_unit_size (arch);
2379
2380 if (bitsize)
2381 modify_field (value_type (var->u.value), addr + offset,
2382 value_as_long (newval), bitpos, bitsize);
2383 else
2384 memcpy (addr + offset * unit_size, value_contents (newval),
2385 TYPE_LENGTH (value_type (newval)));
2386 break;
2387
2388 default:
2389 /* We can never get a component of any other kind. */
2390 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2391 }
2392 }
2393
2394 void
2395 set_internalvar (struct internalvar *var, struct value *val)
2396 {
2397 enum internalvar_kind new_kind;
2398 union internalvar_data new_data = { 0 };
2399
2400 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2401 error (_("Cannot overwrite convenience function %s"), var->name);
2402
2403 /* Prepare new contents. */
2404 switch (TYPE_CODE (check_typedef (value_type (val))))
2405 {
2406 case TYPE_CODE_VOID:
2407 new_kind = INTERNALVAR_VOID;
2408 break;
2409
2410 case TYPE_CODE_INTERNAL_FUNCTION:
2411 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2412 new_kind = INTERNALVAR_FUNCTION;
2413 get_internalvar_function (VALUE_INTERNALVAR (val),
2414 &new_data.fn.function);
2415 /* Copies created here are never canonical. */
2416 break;
2417
2418 default:
2419 new_kind = INTERNALVAR_VALUE;
2420 new_data.value = value_copy (val);
2421 new_data.value->modifiable = 1;
2422
2423 /* Force the value to be fetched from the target now, to avoid problems
2424 later when this internalvar is referenced and the target is gone or
2425 has changed. */
2426 if (value_lazy (new_data.value))
2427 value_fetch_lazy (new_data.value);
2428
2429 /* Release the value from the value chain to prevent it from being
2430 deleted by free_all_values. From here on this function should not
2431 call error () until new_data is installed into the var->u to avoid
2432 leaking memory. */
2433 release_value (new_data.value);
2434
2435 /* Internal variables which are created from values with a dynamic
2436 location don't need the location property of the origin anymore.
2437 The resolved dynamic location is used prior then any other address
2438 when accessing the value.
2439 If we keep it, we would still refer to the origin value.
2440 Remove the location property in case it exist. */
2441 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2442
2443 break;
2444 }
2445
2446 /* Clean up old contents. */
2447 clear_internalvar (var);
2448
2449 /* Switch over. */
2450 var->kind = new_kind;
2451 var->u = new_data;
2452 /* End code which must not call error(). */
2453 }
2454
2455 void
2456 set_internalvar_integer (struct internalvar *var, LONGEST l)
2457 {
2458 /* Clean up old contents. */
2459 clear_internalvar (var);
2460
2461 var->kind = INTERNALVAR_INTEGER;
2462 var->u.integer.type = NULL;
2463 var->u.integer.val = l;
2464 }
2465
2466 void
2467 set_internalvar_string (struct internalvar *var, const char *string)
2468 {
2469 /* Clean up old contents. */
2470 clear_internalvar (var);
2471
2472 var->kind = INTERNALVAR_STRING;
2473 var->u.string = xstrdup (string);
2474 }
2475
2476 static void
2477 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2478 {
2479 /* Clean up old contents. */
2480 clear_internalvar (var);
2481
2482 var->kind = INTERNALVAR_FUNCTION;
2483 var->u.fn.function = f;
2484 var->u.fn.canonical = 1;
2485 /* Variables installed here are always the canonical version. */
2486 }
2487
2488 void
2489 clear_internalvar (struct internalvar *var)
2490 {
2491 /* Clean up old contents. */
2492 switch (var->kind)
2493 {
2494 case INTERNALVAR_VALUE:
2495 value_free (var->u.value);
2496 break;
2497
2498 case INTERNALVAR_STRING:
2499 xfree (var->u.string);
2500 break;
2501
2502 case INTERNALVAR_MAKE_VALUE:
2503 if (var->u.make_value.functions->destroy != NULL)
2504 var->u.make_value.functions->destroy (var->u.make_value.data);
2505 break;
2506
2507 default:
2508 break;
2509 }
2510
2511 /* Reset to void kind. */
2512 var->kind = INTERNALVAR_VOID;
2513 }
2514
2515 char *
2516 internalvar_name (const struct internalvar *var)
2517 {
2518 return var->name;
2519 }
2520
2521 static struct internal_function *
2522 create_internal_function (const char *name,
2523 internal_function_fn handler, void *cookie)
2524 {
2525 struct internal_function *ifn = XNEW (struct internal_function);
2526
2527 ifn->name = xstrdup (name);
2528 ifn->handler = handler;
2529 ifn->cookie = cookie;
2530 return ifn;
2531 }
2532
2533 char *
2534 value_internal_function_name (struct value *val)
2535 {
2536 struct internal_function *ifn;
2537 int result;
2538
2539 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2540 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2541 gdb_assert (result);
2542
2543 return ifn->name;
2544 }
2545
2546 struct value *
2547 call_internal_function (struct gdbarch *gdbarch,
2548 const struct language_defn *language,
2549 struct value *func, int argc, struct value **argv)
2550 {
2551 struct internal_function *ifn;
2552 int result;
2553
2554 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2555 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2556 gdb_assert (result);
2557
2558 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2559 }
2560
2561 /* The 'function' command. This does nothing -- it is just a
2562 placeholder to let "help function NAME" work. This is also used as
2563 the implementation of the sub-command that is created when
2564 registering an internal function. */
2565 static void
2566 function_command (char *command, int from_tty)
2567 {
2568 /* Do nothing. */
2569 }
2570
2571 /* Clean up if an internal function's command is destroyed. */
2572 static void
2573 function_destroyer (struct cmd_list_element *self, void *ignore)
2574 {
2575 xfree ((char *) self->name);
2576 xfree ((char *) self->doc);
2577 }
2578
2579 /* Add a new internal function. NAME is the name of the function; DOC
2580 is a documentation string describing the function. HANDLER is
2581 called when the function is invoked. COOKIE is an arbitrary
2582 pointer which is passed to HANDLER and is intended for "user
2583 data". */
2584 void
2585 add_internal_function (const char *name, const char *doc,
2586 internal_function_fn handler, void *cookie)
2587 {
2588 struct cmd_list_element *cmd;
2589 struct internal_function *ifn;
2590 struct internalvar *var = lookup_internalvar (name);
2591
2592 ifn = create_internal_function (name, handler, cookie);
2593 set_internalvar_function (var, ifn);
2594
2595 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2596 &functionlist);
2597 cmd->destroyer = function_destroyer;
2598 }
2599
2600 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2601 prevent cycles / duplicates. */
2602
2603 void
2604 preserve_one_value (struct value *value, struct objfile *objfile,
2605 htab_t copied_types)
2606 {
2607 if (TYPE_OBJFILE (value->type) == objfile)
2608 value->type = copy_type_recursive (objfile, value->type, copied_types);
2609
2610 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2611 value->enclosing_type = copy_type_recursive (objfile,
2612 value->enclosing_type,
2613 copied_types);
2614 }
2615
2616 /* Likewise for internal variable VAR. */
2617
2618 static void
2619 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2620 htab_t copied_types)
2621 {
2622 switch (var->kind)
2623 {
2624 case INTERNALVAR_INTEGER:
2625 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2626 var->u.integer.type
2627 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2628 break;
2629
2630 case INTERNALVAR_VALUE:
2631 preserve_one_value (var->u.value, objfile, copied_types);
2632 break;
2633 }
2634 }
2635
2636 /* Update the internal variables and value history when OBJFILE is
2637 discarded; we must copy the types out of the objfile. New global types
2638 will be created for every convenience variable which currently points to
2639 this objfile's types, and the convenience variables will be adjusted to
2640 use the new global types. */
2641
2642 void
2643 preserve_values (struct objfile *objfile)
2644 {
2645 htab_t copied_types;
2646 struct value_history_chunk *cur;
2647 struct internalvar *var;
2648 int i;
2649
2650 /* Create the hash table. We allocate on the objfile's obstack, since
2651 it is soon to be deleted. */
2652 copied_types = create_copied_types_hash (objfile);
2653
2654 for (cur = value_history_chain; cur; cur = cur->next)
2655 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2656 if (cur->values[i])
2657 preserve_one_value (cur->values[i], objfile, copied_types);
2658
2659 for (var = internalvars; var; var = var->next)
2660 preserve_one_internalvar (var, objfile, copied_types);
2661
2662 preserve_ext_lang_values (objfile, copied_types);
2663
2664 htab_delete (copied_types);
2665 }
2666
2667 static void
2668 show_convenience (char *ignore, int from_tty)
2669 {
2670 struct gdbarch *gdbarch = get_current_arch ();
2671 struct internalvar *var;
2672 int varseen = 0;
2673 struct value_print_options opts;
2674
2675 get_user_print_options (&opts);
2676 for (var = internalvars; var; var = var->next)
2677 {
2678
2679 if (!varseen)
2680 {
2681 varseen = 1;
2682 }
2683 printf_filtered (("$%s = "), var->name);
2684
2685 TRY
2686 {
2687 struct value *val;
2688
2689 val = value_of_internalvar (gdbarch, var);
2690 value_print (val, gdb_stdout, &opts);
2691 }
2692 CATCH (ex, RETURN_MASK_ERROR)
2693 {
2694 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2695 }
2696 END_CATCH
2697
2698 printf_filtered (("\n"));
2699 }
2700 if (!varseen)
2701 {
2702 /* This text does not mention convenience functions on purpose.
2703 The user can't create them except via Python, and if Python support
2704 is installed this message will never be printed ($_streq will
2705 exist). */
2706 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2707 "Convenience variables have "
2708 "names starting with \"$\";\n"
2709 "use \"set\" as in \"set "
2710 "$foo = 5\" to define them.\n"));
2711 }
2712 }
2713 \f
2714 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2715
2716 struct value *
2717 value_of_xmethod (struct xmethod_worker *worker)
2718 {
2719 if (worker->value == NULL)
2720 {
2721 struct value *v;
2722
2723 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2724 v->lval = lval_xcallable;
2725 v->location.xm_worker = worker;
2726 v->modifiable = 0;
2727 worker->value = v;
2728 }
2729
2730 return worker->value;
2731 }
2732
2733 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2734
2735 struct type *
2736 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2737 {
2738 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2739 && method->lval == lval_xcallable && argc > 0);
2740
2741 return get_xmethod_result_type (method->location.xm_worker,
2742 argv[0], argv + 1, argc - 1);
2743 }
2744
2745 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2746
2747 struct value *
2748 call_xmethod (struct value *method, int argc, struct value **argv)
2749 {
2750 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2751 && method->lval == lval_xcallable && argc > 0);
2752
2753 return invoke_xmethod (method->location.xm_worker,
2754 argv[0], argv + 1, argc - 1);
2755 }
2756 \f
2757 /* Extract a value as a C number (either long or double).
2758 Knows how to convert fixed values to double, or
2759 floating values to long.
2760 Does not deallocate the value. */
2761
2762 LONGEST
2763 value_as_long (struct value *val)
2764 {
2765 /* This coerces arrays and functions, which is necessary (e.g.
2766 in disassemble_command). It also dereferences references, which
2767 I suspect is the most logical thing to do. */
2768 val = coerce_array (val);
2769 return unpack_long (value_type (val), value_contents (val));
2770 }
2771
2772 DOUBLEST
2773 value_as_double (struct value *val)
2774 {
2775 DOUBLEST foo;
2776 int inv;
2777
2778 foo = unpack_double (value_type (val), value_contents (val), &inv);
2779 if (inv)
2780 error (_("Invalid floating value found in program."));
2781 return foo;
2782 }
2783
2784 /* Extract a value as a C pointer. Does not deallocate the value.
2785 Note that val's type may not actually be a pointer; value_as_long
2786 handles all the cases. */
2787 CORE_ADDR
2788 value_as_address (struct value *val)
2789 {
2790 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2791
2792 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2793 whether we want this to be true eventually. */
2794 #if 0
2795 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2796 non-address (e.g. argument to "signal", "info break", etc.), or
2797 for pointers to char, in which the low bits *are* significant. */
2798 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2799 #else
2800
2801 /* There are several targets (IA-64, PowerPC, and others) which
2802 don't represent pointers to functions as simply the address of
2803 the function's entry point. For example, on the IA-64, a
2804 function pointer points to a two-word descriptor, generated by
2805 the linker, which contains the function's entry point, and the
2806 value the IA-64 "global pointer" register should have --- to
2807 support position-independent code. The linker generates
2808 descriptors only for those functions whose addresses are taken.
2809
2810 On such targets, it's difficult for GDB to convert an arbitrary
2811 function address into a function pointer; it has to either find
2812 an existing descriptor for that function, or call malloc and
2813 build its own. On some targets, it is impossible for GDB to
2814 build a descriptor at all: the descriptor must contain a jump
2815 instruction; data memory cannot be executed; and code memory
2816 cannot be modified.
2817
2818 Upon entry to this function, if VAL is a value of type `function'
2819 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2820 value_address (val) is the address of the function. This is what
2821 you'll get if you evaluate an expression like `main'. The call
2822 to COERCE_ARRAY below actually does all the usual unary
2823 conversions, which includes converting values of type `function'
2824 to `pointer to function'. This is the challenging conversion
2825 discussed above. Then, `unpack_long' will convert that pointer
2826 back into an address.
2827
2828 So, suppose the user types `disassemble foo' on an architecture
2829 with a strange function pointer representation, on which GDB
2830 cannot build its own descriptors, and suppose further that `foo'
2831 has no linker-built descriptor. The address->pointer conversion
2832 will signal an error and prevent the command from running, even
2833 though the next step would have been to convert the pointer
2834 directly back into the same address.
2835
2836 The following shortcut avoids this whole mess. If VAL is a
2837 function, just return its address directly. */
2838 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2839 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2840 return value_address (val);
2841
2842 val = coerce_array (val);
2843
2844 /* Some architectures (e.g. Harvard), map instruction and data
2845 addresses onto a single large unified address space. For
2846 instance: An architecture may consider a large integer in the
2847 range 0x10000000 .. 0x1000ffff to already represent a data
2848 addresses (hence not need a pointer to address conversion) while
2849 a small integer would still need to be converted integer to
2850 pointer to address. Just assume such architectures handle all
2851 integer conversions in a single function. */
2852
2853 /* JimB writes:
2854
2855 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2856 must admonish GDB hackers to make sure its behavior matches the
2857 compiler's, whenever possible.
2858
2859 In general, I think GDB should evaluate expressions the same way
2860 the compiler does. When the user copies an expression out of
2861 their source code and hands it to a `print' command, they should
2862 get the same value the compiler would have computed. Any
2863 deviation from this rule can cause major confusion and annoyance,
2864 and needs to be justified carefully. In other words, GDB doesn't
2865 really have the freedom to do these conversions in clever and
2866 useful ways.
2867
2868 AndrewC pointed out that users aren't complaining about how GDB
2869 casts integers to pointers; they are complaining that they can't
2870 take an address from a disassembly listing and give it to `x/i'.
2871 This is certainly important.
2872
2873 Adding an architecture method like integer_to_address() certainly
2874 makes it possible for GDB to "get it right" in all circumstances
2875 --- the target has complete control over how things get done, so
2876 people can Do The Right Thing for their target without breaking
2877 anyone else. The standard doesn't specify how integers get
2878 converted to pointers; usually, the ABI doesn't either, but
2879 ABI-specific code is a more reasonable place to handle it. */
2880
2881 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2882 && !TYPE_IS_REFERENCE (value_type (val))
2883 && gdbarch_integer_to_address_p (gdbarch))
2884 return gdbarch_integer_to_address (gdbarch, value_type (val),
2885 value_contents (val));
2886
2887 return unpack_long (value_type (val), value_contents (val));
2888 #endif
2889 }
2890 \f
2891 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2892 as a long, or as a double, assuming the raw data is described
2893 by type TYPE. Knows how to convert different sizes of values
2894 and can convert between fixed and floating point. We don't assume
2895 any alignment for the raw data. Return value is in host byte order.
2896
2897 If you want functions and arrays to be coerced to pointers, and
2898 references to be dereferenced, call value_as_long() instead.
2899
2900 C++: It is assumed that the front-end has taken care of
2901 all matters concerning pointers to members. A pointer
2902 to member which reaches here is considered to be equivalent
2903 to an INT (or some size). After all, it is only an offset. */
2904
2905 LONGEST
2906 unpack_long (struct type *type, const gdb_byte *valaddr)
2907 {
2908 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2909 enum type_code code = TYPE_CODE (type);
2910 int len = TYPE_LENGTH (type);
2911 int nosign = TYPE_UNSIGNED (type);
2912
2913 switch (code)
2914 {
2915 case TYPE_CODE_TYPEDEF:
2916 return unpack_long (check_typedef (type), valaddr);
2917 case TYPE_CODE_ENUM:
2918 case TYPE_CODE_FLAGS:
2919 case TYPE_CODE_BOOL:
2920 case TYPE_CODE_INT:
2921 case TYPE_CODE_CHAR:
2922 case TYPE_CODE_RANGE:
2923 case TYPE_CODE_MEMBERPTR:
2924 if (nosign)
2925 return extract_unsigned_integer (valaddr, len, byte_order);
2926 else
2927 return extract_signed_integer (valaddr, len, byte_order);
2928
2929 case TYPE_CODE_FLT:
2930 return (LONGEST) extract_typed_floating (valaddr, type);
2931
2932 case TYPE_CODE_DECFLOAT:
2933 /* libdecnumber has a function to convert from decimal to integer, but
2934 it doesn't work when the decimal number has a fractional part. */
2935 return (LONGEST) decimal_to_doublest (valaddr, len, byte_order);
2936
2937 case TYPE_CODE_PTR:
2938 case TYPE_CODE_REF:
2939 case TYPE_CODE_RVALUE_REF:
2940 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2941 whether we want this to be true eventually. */
2942 return extract_typed_address (valaddr, type);
2943
2944 default:
2945 error (_("Value can't be converted to integer."));
2946 }
2947 return 0; /* Placate lint. */
2948 }
2949
2950 /* Return a double value from the specified type and address.
2951 INVP points to an int which is set to 0 for valid value,
2952 1 for invalid value (bad float format). In either case,
2953 the returned double is OK to use. Argument is in target
2954 format, result is in host format. */
2955
2956 DOUBLEST
2957 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2958 {
2959 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2960 enum type_code code;
2961 int len;
2962 int nosign;
2963
2964 *invp = 0; /* Assume valid. */
2965 type = check_typedef (type);
2966 code = TYPE_CODE (type);
2967 len = TYPE_LENGTH (type);
2968 nosign = TYPE_UNSIGNED (type);
2969 if (code == TYPE_CODE_FLT)
2970 {
2971 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2972 floating-point value was valid (using the macro
2973 INVALID_FLOAT). That test/macro have been removed.
2974
2975 It turns out that only the VAX defined this macro and then
2976 only in a non-portable way. Fixing the portability problem
2977 wouldn't help since the VAX floating-point code is also badly
2978 bit-rotten. The target needs to add definitions for the
2979 methods gdbarch_float_format and gdbarch_double_format - these
2980 exactly describe the target floating-point format. The
2981 problem here is that the corresponding floatformat_vax_f and
2982 floatformat_vax_d values these methods should be set to are
2983 also not defined either. Oops!
2984
2985 Hopefully someone will add both the missing floatformat
2986 definitions and the new cases for floatformat_is_valid (). */
2987
2988 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2989 {
2990 *invp = 1;
2991 return 0.0;
2992 }
2993
2994 return extract_typed_floating (valaddr, type);
2995 }
2996 else if (code == TYPE_CODE_DECFLOAT)
2997 return decimal_to_doublest (valaddr, len, byte_order);
2998 else if (nosign)
2999 {
3000 /* Unsigned -- be sure we compensate for signed LONGEST. */
3001 return (ULONGEST) unpack_long (type, valaddr);
3002 }
3003 else
3004 {
3005 /* Signed -- we are OK with unpack_long. */
3006 return unpack_long (type, valaddr);
3007 }
3008 }
3009
3010 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3011 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3012 We don't assume any alignment for the raw data. Return value is in
3013 host byte order.
3014
3015 If you want functions and arrays to be coerced to pointers, and
3016 references to be dereferenced, call value_as_address() instead.
3017
3018 C++: It is assumed that the front-end has taken care of
3019 all matters concerning pointers to members. A pointer
3020 to member which reaches here is considered to be equivalent
3021 to an INT (or some size). After all, it is only an offset. */
3022
3023 CORE_ADDR
3024 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3025 {
3026 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3027 whether we want this to be true eventually. */
3028 return unpack_long (type, valaddr);
3029 }
3030
3031 \f
3032 /* Get the value of the FIELDNO'th field (which must be static) of
3033 TYPE. */
3034
3035 struct value *
3036 value_static_field (struct type *type, int fieldno)
3037 {
3038 struct value *retval;
3039
3040 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3041 {
3042 case FIELD_LOC_KIND_PHYSADDR:
3043 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3044 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3045 break;
3046 case FIELD_LOC_KIND_PHYSNAME:
3047 {
3048 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3049 /* TYPE_FIELD_NAME (type, fieldno); */
3050 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3051
3052 if (sym.symbol == NULL)
3053 {
3054 /* With some compilers, e.g. HP aCC, static data members are
3055 reported as non-debuggable symbols. */
3056 struct bound_minimal_symbol msym
3057 = lookup_minimal_symbol (phys_name, NULL, NULL);
3058
3059 if (!msym.minsym)
3060 return allocate_optimized_out_value (type);
3061 else
3062 {
3063 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3064 BMSYMBOL_VALUE_ADDRESS (msym));
3065 }
3066 }
3067 else
3068 retval = value_of_variable (sym.symbol, sym.block);
3069 break;
3070 }
3071 default:
3072 gdb_assert_not_reached ("unexpected field location kind");
3073 }
3074
3075 return retval;
3076 }
3077
3078 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3079 You have to be careful here, since the size of the data area for the value
3080 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3081 than the old enclosing type, you have to allocate more space for the
3082 data. */
3083
3084 void
3085 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3086 {
3087 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3088 {
3089 check_type_length_before_alloc (new_encl_type);
3090 val->contents
3091 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3092 }
3093
3094 val->enclosing_type = new_encl_type;
3095 }
3096
3097 /* Given a value ARG1 (offset by OFFSET bytes)
3098 of a struct or union type ARG_TYPE,
3099 extract and return the value of one of its (non-static) fields.
3100 FIELDNO says which field. */
3101
3102 struct value *
3103 value_primitive_field (struct value *arg1, LONGEST offset,
3104 int fieldno, struct type *arg_type)
3105 {
3106 struct value *v;
3107 struct type *type;
3108 struct gdbarch *arch = get_value_arch (arg1);
3109 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3110
3111 arg_type = check_typedef (arg_type);
3112 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3113
3114 /* Call check_typedef on our type to make sure that, if TYPE
3115 is a TYPE_CODE_TYPEDEF, its length is set to the length
3116 of the target type instead of zero. However, we do not
3117 replace the typedef type by the target type, because we want
3118 to keep the typedef in order to be able to print the type
3119 description correctly. */
3120 check_typedef (type);
3121
3122 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3123 {
3124 /* Handle packed fields.
3125
3126 Create a new value for the bitfield, with bitpos and bitsize
3127 set. If possible, arrange offset and bitpos so that we can
3128 do a single aligned read of the size of the containing type.
3129 Otherwise, adjust offset to the byte containing the first
3130 bit. Assume that the address, offset, and embedded offset
3131 are sufficiently aligned. */
3132
3133 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3134 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3135
3136 v = allocate_value_lazy (type);
3137 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3138 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3139 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3140 v->bitpos = bitpos % container_bitsize;
3141 else
3142 v->bitpos = bitpos % 8;
3143 v->offset = (value_embedded_offset (arg1)
3144 + offset
3145 + (bitpos - v->bitpos) / 8);
3146 set_value_parent (v, arg1);
3147 if (!value_lazy (arg1))
3148 value_fetch_lazy (v);
3149 }
3150 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3151 {
3152 /* This field is actually a base subobject, so preserve the
3153 entire object's contents for later references to virtual
3154 bases, etc. */
3155 LONGEST boffset;
3156
3157 /* Lazy register values with offsets are not supported. */
3158 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3159 value_fetch_lazy (arg1);
3160
3161 /* We special case virtual inheritance here because this
3162 requires access to the contents, which we would rather avoid
3163 for references to ordinary fields of unavailable values. */
3164 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3165 boffset = baseclass_offset (arg_type, fieldno,
3166 value_contents (arg1),
3167 value_embedded_offset (arg1),
3168 value_address (arg1),
3169 arg1);
3170 else
3171 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3172
3173 if (value_lazy (arg1))
3174 v = allocate_value_lazy (value_enclosing_type (arg1));
3175 else
3176 {
3177 v = allocate_value (value_enclosing_type (arg1));
3178 value_contents_copy_raw (v, 0, arg1, 0,
3179 TYPE_LENGTH (value_enclosing_type (arg1)));
3180 }
3181 v->type = type;
3182 v->offset = value_offset (arg1);
3183 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3184 }
3185 else if (NULL != TYPE_DATA_LOCATION (type))
3186 {
3187 /* Field is a dynamic data member. */
3188
3189 gdb_assert (0 == offset);
3190 /* We expect an already resolved data location. */
3191 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3192 /* For dynamic data types defer memory allocation
3193 until we actual access the value. */
3194 v = allocate_value_lazy (type);
3195 }
3196 else
3197 {
3198 /* Plain old data member */
3199 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3200 / (HOST_CHAR_BIT * unit_size));
3201
3202 /* Lazy register values with offsets are not supported. */
3203 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3204 value_fetch_lazy (arg1);
3205
3206 if (value_lazy (arg1))
3207 v = allocate_value_lazy (type);
3208 else
3209 {
3210 v = allocate_value (type);
3211 value_contents_copy_raw (v, value_embedded_offset (v),
3212 arg1, value_embedded_offset (arg1) + offset,
3213 type_length_units (type));
3214 }
3215 v->offset = (value_offset (arg1) + offset
3216 + value_embedded_offset (arg1));
3217 }
3218 set_value_component_location (v, arg1);
3219 return v;
3220 }
3221
3222 /* Given a value ARG1 of a struct or union type,
3223 extract and return the value of one of its (non-static) fields.
3224 FIELDNO says which field. */
3225
3226 struct value *
3227 value_field (struct value *arg1, int fieldno)
3228 {
3229 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3230 }
3231
3232 /* Return a non-virtual function as a value.
3233 F is the list of member functions which contains the desired method.
3234 J is an index into F which provides the desired method.
3235
3236 We only use the symbol for its address, so be happy with either a
3237 full symbol or a minimal symbol. */
3238
3239 struct value *
3240 value_fn_field (struct value **arg1p, struct fn_field *f,
3241 int j, struct type *type,
3242 LONGEST offset)
3243 {
3244 struct value *v;
3245 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3246 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3247 struct symbol *sym;
3248 struct bound_minimal_symbol msym;
3249
3250 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3251 if (sym != NULL)
3252 {
3253 memset (&msym, 0, sizeof (msym));
3254 }
3255 else
3256 {
3257 gdb_assert (sym == NULL);
3258 msym = lookup_bound_minimal_symbol (physname);
3259 if (msym.minsym == NULL)
3260 return NULL;
3261 }
3262
3263 v = allocate_value (ftype);
3264 VALUE_LVAL (v) = lval_memory;
3265 if (sym)
3266 {
3267 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3268 }
3269 else
3270 {
3271 /* The minimal symbol might point to a function descriptor;
3272 resolve it to the actual code address instead. */
3273 struct objfile *objfile = msym.objfile;
3274 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3275
3276 set_value_address (v,
3277 gdbarch_convert_from_func_ptr_addr
3278 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3279 }
3280
3281 if (arg1p)
3282 {
3283 if (type != value_type (*arg1p))
3284 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3285 value_addr (*arg1p)));
3286
3287 /* Move the `this' pointer according to the offset.
3288 VALUE_OFFSET (*arg1p) += offset; */
3289 }
3290
3291 return v;
3292 }
3293
3294 \f
3295
3296 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3297 VALADDR, and store the result in *RESULT.
3298 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3299
3300 Extracting bits depends on endianness of the machine. Compute the
3301 number of least significant bits to discard. For big endian machines,
3302 we compute the total number of bits in the anonymous object, subtract
3303 off the bit count from the MSB of the object to the MSB of the
3304 bitfield, then the size of the bitfield, which leaves the LSB discard
3305 count. For little endian machines, the discard count is simply the
3306 number of bits from the LSB of the anonymous object to the LSB of the
3307 bitfield.
3308
3309 If the field is signed, we also do sign extension. */
3310
3311 static LONGEST
3312 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3313 LONGEST bitpos, LONGEST bitsize)
3314 {
3315 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3316 ULONGEST val;
3317 ULONGEST valmask;
3318 int lsbcount;
3319 LONGEST bytes_read;
3320 LONGEST read_offset;
3321
3322 /* Read the minimum number of bytes required; there may not be
3323 enough bytes to read an entire ULONGEST. */
3324 field_type = check_typedef (field_type);
3325 if (bitsize)
3326 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3327 else
3328 bytes_read = TYPE_LENGTH (field_type);
3329
3330 read_offset = bitpos / 8;
3331
3332 val = extract_unsigned_integer (valaddr + read_offset,
3333 bytes_read, byte_order);
3334
3335 /* Extract bits. See comment above. */
3336
3337 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3338 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3339 else
3340 lsbcount = (bitpos % 8);
3341 val >>= lsbcount;
3342
3343 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3344 If the field is signed, and is negative, then sign extend. */
3345
3346 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3347 {
3348 valmask = (((ULONGEST) 1) << bitsize) - 1;
3349 val &= valmask;
3350 if (!TYPE_UNSIGNED (field_type))
3351 {
3352 if (val & (valmask ^ (valmask >> 1)))
3353 {
3354 val |= ~valmask;
3355 }
3356 }
3357 }
3358
3359 return val;
3360 }
3361
3362 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3363 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3364 ORIGINAL_VALUE, which must not be NULL. See
3365 unpack_value_bits_as_long for more details. */
3366
3367 int
3368 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3369 LONGEST embedded_offset, int fieldno,
3370 const struct value *val, LONGEST *result)
3371 {
3372 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3373 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3374 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3375 int bit_offset;
3376
3377 gdb_assert (val != NULL);
3378
3379 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3380 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3381 || !value_bits_available (val, bit_offset, bitsize))
3382 return 0;
3383
3384 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3385 bitpos, bitsize);
3386 return 1;
3387 }
3388
3389 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3390 object at VALADDR. See unpack_bits_as_long for more details. */
3391
3392 LONGEST
3393 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3394 {
3395 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3396 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3397 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3398
3399 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3400 }
3401
3402 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3403 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3404 the contents in DEST_VAL, zero or sign extending if the type of
3405 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3406 VAL. If the VAL's contents required to extract the bitfield from
3407 are unavailable/optimized out, DEST_VAL is correspondingly
3408 marked unavailable/optimized out. */
3409
3410 void
3411 unpack_value_bitfield (struct value *dest_val,
3412 LONGEST bitpos, LONGEST bitsize,
3413 const gdb_byte *valaddr, LONGEST embedded_offset,
3414 const struct value *val)
3415 {
3416 enum bfd_endian byte_order;
3417 int src_bit_offset;
3418 int dst_bit_offset;
3419 struct type *field_type = value_type (dest_val);
3420
3421 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3422
3423 /* First, unpack and sign extend the bitfield as if it was wholly
3424 valid. Optimized out/unavailable bits are read as zero, but
3425 that's OK, as they'll end up marked below. If the VAL is
3426 wholly-invalid we may have skipped allocating its contents,
3427 though. See allocate_optimized_out_value. */
3428 if (valaddr != NULL)
3429 {
3430 LONGEST num;
3431
3432 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3433 bitpos, bitsize);
3434 store_signed_integer (value_contents_raw (dest_val),
3435 TYPE_LENGTH (field_type), byte_order, num);
3436 }
3437
3438 /* Now copy the optimized out / unavailability ranges to the right
3439 bits. */
3440 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3441 if (byte_order == BFD_ENDIAN_BIG)
3442 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3443 else
3444 dst_bit_offset = 0;
3445 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3446 val, src_bit_offset, bitsize);
3447 }
3448
3449 /* Return a new value with type TYPE, which is FIELDNO field of the
3450 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3451 of VAL. If the VAL's contents required to extract the bitfield
3452 from are unavailable/optimized out, the new value is
3453 correspondingly marked unavailable/optimized out. */
3454
3455 struct value *
3456 value_field_bitfield (struct type *type, int fieldno,
3457 const gdb_byte *valaddr,
3458 LONGEST embedded_offset, const struct value *val)
3459 {
3460 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3461 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3462 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3463
3464 unpack_value_bitfield (res_val, bitpos, bitsize,
3465 valaddr, embedded_offset, val);
3466
3467 return res_val;
3468 }
3469
3470 /* Modify the value of a bitfield. ADDR points to a block of memory in
3471 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3472 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3473 indicate which bits (in target bit order) comprise the bitfield.
3474 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3475 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3476
3477 void
3478 modify_field (struct type *type, gdb_byte *addr,
3479 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3480 {
3481 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3482 ULONGEST oword;
3483 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3484 LONGEST bytesize;
3485
3486 /* Normalize BITPOS. */
3487 addr += bitpos / 8;
3488 bitpos %= 8;
3489
3490 /* If a negative fieldval fits in the field in question, chop
3491 off the sign extension bits. */
3492 if ((~fieldval & ~(mask >> 1)) == 0)
3493 fieldval &= mask;
3494
3495 /* Warn if value is too big to fit in the field in question. */
3496 if (0 != (fieldval & ~mask))
3497 {
3498 /* FIXME: would like to include fieldval in the message, but
3499 we don't have a sprintf_longest. */
3500 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3501
3502 /* Truncate it, otherwise adjoining fields may be corrupted. */
3503 fieldval &= mask;
3504 }
3505
3506 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3507 false valgrind reports. */
3508
3509 bytesize = (bitpos + bitsize + 7) / 8;
3510 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3511
3512 /* Shifting for bit field depends on endianness of the target machine. */
3513 if (gdbarch_bits_big_endian (get_type_arch (type)))
3514 bitpos = bytesize * 8 - bitpos - bitsize;
3515
3516 oword &= ~(mask << bitpos);
3517 oword |= fieldval << bitpos;
3518
3519 store_unsigned_integer (addr, bytesize, byte_order, oword);
3520 }
3521 \f
3522 /* Pack NUM into BUF using a target format of TYPE. */
3523
3524 void
3525 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3526 {
3527 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3528 LONGEST len;
3529
3530 type = check_typedef (type);
3531 len = TYPE_LENGTH (type);
3532
3533 switch (TYPE_CODE (type))
3534 {
3535 case TYPE_CODE_INT:
3536 case TYPE_CODE_CHAR:
3537 case TYPE_CODE_ENUM:
3538 case TYPE_CODE_FLAGS:
3539 case TYPE_CODE_BOOL:
3540 case TYPE_CODE_RANGE:
3541 case TYPE_CODE_MEMBERPTR:
3542 store_signed_integer (buf, len, byte_order, num);
3543 break;
3544
3545 case TYPE_CODE_REF:
3546 case TYPE_CODE_RVALUE_REF:
3547 case TYPE_CODE_PTR:
3548 store_typed_address (buf, type, (CORE_ADDR) num);
3549 break;
3550
3551 default:
3552 error (_("Unexpected type (%d) encountered for integer constant."),
3553 TYPE_CODE (type));
3554 }
3555 }
3556
3557
3558 /* Pack NUM into BUF using a target format of TYPE. */
3559
3560 static void
3561 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3562 {
3563 LONGEST len;
3564 enum bfd_endian byte_order;
3565
3566 type = check_typedef (type);
3567 len = TYPE_LENGTH (type);
3568 byte_order = gdbarch_byte_order (get_type_arch (type));
3569
3570 switch (TYPE_CODE (type))
3571 {
3572 case TYPE_CODE_INT:
3573 case TYPE_CODE_CHAR:
3574 case TYPE_CODE_ENUM:
3575 case TYPE_CODE_FLAGS:
3576 case TYPE_CODE_BOOL:
3577 case TYPE_CODE_RANGE:
3578 case TYPE_CODE_MEMBERPTR:
3579 store_unsigned_integer (buf, len, byte_order, num);
3580 break;
3581
3582 case TYPE_CODE_REF:
3583 case TYPE_CODE_RVALUE_REF:
3584 case TYPE_CODE_PTR:
3585 store_typed_address (buf, type, (CORE_ADDR) num);
3586 break;
3587
3588 default:
3589 error (_("Unexpected type (%d) encountered "
3590 "for unsigned integer constant."),
3591 TYPE_CODE (type));
3592 }
3593 }
3594
3595
3596 /* Convert C numbers into newly allocated values. */
3597
3598 struct value *
3599 value_from_longest (struct type *type, LONGEST num)
3600 {
3601 struct value *val = allocate_value (type);
3602
3603 pack_long (value_contents_raw (val), type, num);
3604 return val;
3605 }
3606
3607
3608 /* Convert C unsigned numbers into newly allocated values. */
3609
3610 struct value *
3611 value_from_ulongest (struct type *type, ULONGEST num)
3612 {
3613 struct value *val = allocate_value (type);
3614
3615 pack_unsigned_long (value_contents_raw (val), type, num);
3616
3617 return val;
3618 }
3619
3620
3621 /* Create a value representing a pointer of type TYPE to the address
3622 ADDR. */
3623
3624 struct value *
3625 value_from_pointer (struct type *type, CORE_ADDR addr)
3626 {
3627 struct value *val = allocate_value (type);
3628
3629 store_typed_address (value_contents_raw (val),
3630 check_typedef (type), addr);
3631 return val;
3632 }
3633
3634
3635 /* Create a value of type TYPE whose contents come from VALADDR, if it
3636 is non-null, and whose memory address (in the inferior) is
3637 ADDRESS. The type of the created value may differ from the passed
3638 type TYPE. Make sure to retrieve values new type after this call.
3639 Note that TYPE is not passed through resolve_dynamic_type; this is
3640 a special API intended for use only by Ada. */
3641
3642 struct value *
3643 value_from_contents_and_address_unresolved (struct type *type,
3644 const gdb_byte *valaddr,
3645 CORE_ADDR address)
3646 {
3647 struct value *v;
3648
3649 if (valaddr == NULL)
3650 v = allocate_value_lazy (type);
3651 else
3652 v = value_from_contents (type, valaddr);
3653 VALUE_LVAL (v) = lval_memory;
3654 set_value_address (v, address);
3655 return v;
3656 }
3657
3658 /* Create a value of type TYPE whose contents come from VALADDR, if it
3659 is non-null, and whose memory address (in the inferior) is
3660 ADDRESS. The type of the created value may differ from the passed
3661 type TYPE. Make sure to retrieve values new type after this call. */
3662
3663 struct value *
3664 value_from_contents_and_address (struct type *type,
3665 const gdb_byte *valaddr,
3666 CORE_ADDR address)
3667 {
3668 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3669 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3670 struct value *v;
3671
3672 if (valaddr == NULL)
3673 v = allocate_value_lazy (resolved_type);
3674 else
3675 v = value_from_contents (resolved_type, valaddr);
3676 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3677 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3678 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3679 VALUE_LVAL (v) = lval_memory;
3680 set_value_address (v, address);
3681 return v;
3682 }
3683
3684 /* Create a value of type TYPE holding the contents CONTENTS.
3685 The new value is `not_lval'. */
3686
3687 struct value *
3688 value_from_contents (struct type *type, const gdb_byte *contents)
3689 {
3690 struct value *result;
3691
3692 result = allocate_value (type);
3693 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3694 return result;
3695 }
3696
3697 struct value *
3698 value_from_double (struct type *type, DOUBLEST num)
3699 {
3700 struct value *val = allocate_value (type);
3701 struct type *base_type = check_typedef (type);
3702 enum type_code code = TYPE_CODE (base_type);
3703
3704 if (code == TYPE_CODE_FLT)
3705 {
3706 store_typed_floating (value_contents_raw (val), base_type, num);
3707 }
3708 else
3709 error (_("Unexpected type encountered for floating constant."));
3710
3711 return val;
3712 }
3713
3714 struct value *
3715 value_from_decfloat (struct type *type, const gdb_byte *dec)
3716 {
3717 struct value *val = allocate_value (type);
3718
3719 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3720 return val;
3721 }
3722
3723 /* Extract a value from the history file. Input will be of the form
3724 $digits or $$digits. See block comment above 'write_dollar_variable'
3725 for details. */
3726
3727 struct value *
3728 value_from_history_ref (const char *h, const char **endp)
3729 {
3730 int index, len;
3731
3732 if (h[0] == '$')
3733 len = 1;
3734 else
3735 return NULL;
3736
3737 if (h[1] == '$')
3738 len = 2;
3739
3740 /* Find length of numeral string. */
3741 for (; isdigit (h[len]); len++)
3742 ;
3743
3744 /* Make sure numeral string is not part of an identifier. */
3745 if (h[len] == '_' || isalpha (h[len]))
3746 return NULL;
3747
3748 /* Now collect the index value. */
3749 if (h[1] == '$')
3750 {
3751 if (len == 2)
3752 {
3753 /* For some bizarre reason, "$$" is equivalent to "$$1",
3754 rather than to "$$0" as it ought to be! */
3755 index = -1;
3756 *endp += len;
3757 }
3758 else
3759 {
3760 char *local_end;
3761
3762 index = -strtol (&h[2], &local_end, 10);
3763 *endp = local_end;
3764 }
3765 }
3766 else
3767 {
3768 if (len == 1)
3769 {
3770 /* "$" is equivalent to "$0". */
3771 index = 0;
3772 *endp += len;
3773 }
3774 else
3775 {
3776 char *local_end;
3777
3778 index = strtol (&h[1], &local_end, 10);
3779 *endp = local_end;
3780 }
3781 }
3782
3783 return access_value_history (index);
3784 }
3785
3786 /* Get the component value (offset by OFFSET bytes) of a struct or
3787 union WHOLE. Component's type is TYPE. */
3788
3789 struct value *
3790 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3791 {
3792 struct value *v;
3793
3794 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3795 v = allocate_value_lazy (type);
3796 else
3797 {
3798 v = allocate_value (type);
3799 value_contents_copy (v, value_embedded_offset (v),
3800 whole, value_embedded_offset (whole) + offset,
3801 type_length_units (type));
3802 }
3803 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3804 set_value_component_location (v, whole);
3805
3806 return v;
3807 }
3808
3809 struct value *
3810 coerce_ref_if_computed (const struct value *arg)
3811 {
3812 const struct lval_funcs *funcs;
3813
3814 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3815 return NULL;
3816
3817 if (value_lval_const (arg) != lval_computed)
3818 return NULL;
3819
3820 funcs = value_computed_funcs (arg);
3821 if (funcs->coerce_ref == NULL)
3822 return NULL;
3823
3824 return funcs->coerce_ref (arg);
3825 }
3826
3827 /* Look at value.h for description. */
3828
3829 struct value *
3830 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3831 const struct type *original_type,
3832 const struct value *original_value)
3833 {
3834 /* Re-adjust type. */
3835 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3836
3837 /* Add embedding info. */
3838 set_value_enclosing_type (value, enc_type);
3839 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3840
3841 /* We may be pointing to an object of some derived type. */
3842 return value_full_object (value, NULL, 0, 0, 0);
3843 }
3844
3845 struct value *
3846 coerce_ref (struct value *arg)
3847 {
3848 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3849 struct value *retval;
3850 struct type *enc_type;
3851
3852 retval = coerce_ref_if_computed (arg);
3853 if (retval)
3854 return retval;
3855
3856 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3857 return arg;
3858
3859 enc_type = check_typedef (value_enclosing_type (arg));
3860 enc_type = TYPE_TARGET_TYPE (enc_type);
3861
3862 retval = value_at_lazy (enc_type,
3863 unpack_pointer (value_type (arg),
3864 value_contents (arg)));
3865 enc_type = value_type (retval);
3866 return readjust_indirect_value_type (retval, enc_type,
3867 value_type_arg_tmp, arg);
3868 }
3869
3870 struct value *
3871 coerce_array (struct value *arg)
3872 {
3873 struct type *type;
3874
3875 arg = coerce_ref (arg);
3876 type = check_typedef (value_type (arg));
3877
3878 switch (TYPE_CODE (type))
3879 {
3880 case TYPE_CODE_ARRAY:
3881 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3882 arg = value_coerce_array (arg);
3883 break;
3884 case TYPE_CODE_FUNC:
3885 arg = value_coerce_function (arg);
3886 break;
3887 }
3888 return arg;
3889 }
3890 \f
3891
3892 /* Return the return value convention that will be used for the
3893 specified type. */
3894
3895 enum return_value_convention
3896 struct_return_convention (struct gdbarch *gdbarch,
3897 struct value *function, struct type *value_type)
3898 {
3899 enum type_code code = TYPE_CODE (value_type);
3900
3901 if (code == TYPE_CODE_ERROR)
3902 error (_("Function return type unknown."));
3903
3904 /* Probe the architecture for the return-value convention. */
3905 return gdbarch_return_value (gdbarch, function, value_type,
3906 NULL, NULL, NULL);
3907 }
3908
3909 /* Return true if the function returning the specified type is using
3910 the convention of returning structures in memory (passing in the
3911 address as a hidden first parameter). */
3912
3913 int
3914 using_struct_return (struct gdbarch *gdbarch,
3915 struct value *function, struct type *value_type)
3916 {
3917 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3918 /* A void return value is never in memory. See also corresponding
3919 code in "print_return_value". */
3920 return 0;
3921
3922 return (struct_return_convention (gdbarch, function, value_type)
3923 != RETURN_VALUE_REGISTER_CONVENTION);
3924 }
3925
3926 /* Set the initialized field in a value struct. */
3927
3928 void
3929 set_value_initialized (struct value *val, int status)
3930 {
3931 val->initialized = status;
3932 }
3933
3934 /* Return the initialized field in a value struct. */
3935
3936 int
3937 value_initialized (const struct value *val)
3938 {
3939 return val->initialized;
3940 }
3941
3942 /* Load the actual content of a lazy value. Fetch the data from the
3943 user's process and clear the lazy flag to indicate that the data in
3944 the buffer is valid.
3945
3946 If the value is zero-length, we avoid calling read_memory, which
3947 would abort. We mark the value as fetched anyway -- all 0 bytes of
3948 it. */
3949
3950 void
3951 value_fetch_lazy (struct value *val)
3952 {
3953 gdb_assert (value_lazy (val));
3954 allocate_value_contents (val);
3955 /* A value is either lazy, or fully fetched. The
3956 availability/validity is only established as we try to fetch a
3957 value. */
3958 gdb_assert (VEC_empty (range_s, val->optimized_out));
3959 gdb_assert (VEC_empty (range_s, val->unavailable));
3960 if (value_bitsize (val))
3961 {
3962 /* To read a lazy bitfield, read the entire enclosing value. This
3963 prevents reading the same block of (possibly volatile) memory once
3964 per bitfield. It would be even better to read only the containing
3965 word, but we have no way to record that just specific bits of a
3966 value have been fetched. */
3967 struct type *type = check_typedef (value_type (val));
3968 struct value *parent = value_parent (val);
3969
3970 if (value_lazy (parent))
3971 value_fetch_lazy (parent);
3972
3973 unpack_value_bitfield (val,
3974 value_bitpos (val), value_bitsize (val),
3975 value_contents_for_printing (parent),
3976 value_offset (val), parent);
3977 }
3978 else if (VALUE_LVAL (val) == lval_memory)
3979 {
3980 CORE_ADDR addr = value_address (val);
3981 struct type *type = check_typedef (value_enclosing_type (val));
3982
3983 if (TYPE_LENGTH (type))
3984 read_value_memory (val, 0, value_stack (val),
3985 addr, value_contents_all_raw (val),
3986 type_length_units (type));
3987 }
3988 else if (VALUE_LVAL (val) == lval_register)
3989 {
3990 struct frame_info *next_frame;
3991 int regnum;
3992 struct type *type = check_typedef (value_type (val));
3993 struct value *new_val = val, *mark = value_mark ();
3994
3995 /* Offsets are not supported here; lazy register values must
3996 refer to the entire register. */
3997 gdb_assert (value_offset (val) == 0);
3998
3999 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
4000 {
4001 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
4002
4003 next_frame = frame_find_by_id (next_frame_id);
4004 regnum = VALUE_REGNUM (new_val);
4005
4006 gdb_assert (next_frame != NULL);
4007
4008 /* Convertible register routines are used for multi-register
4009 values and for interpretation in different types
4010 (e.g. float or int from a double register). Lazy
4011 register values should have the register's natural type,
4012 so they do not apply. */
4013 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4014 regnum, type));
4015
4016 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4017 Since a "->next" operation was performed when setting
4018 this field, we do not need to perform a "next" operation
4019 again when unwinding the register. That's why
4020 frame_unwind_register_value() is called here instead of
4021 get_frame_register_value(). */
4022 new_val = frame_unwind_register_value (next_frame, regnum);
4023
4024 /* If we get another lazy lval_register value, it means the
4025 register is found by reading it from NEXT_FRAME's next frame.
4026 frame_unwind_register_value should never return a value with
4027 the frame id pointing to NEXT_FRAME. If it does, it means we
4028 either have two consecutive frames with the same frame id
4029 in the frame chain, or some code is trying to unwind
4030 behind get_prev_frame's back (e.g., a frame unwind
4031 sniffer trying to unwind), bypassing its validations. In
4032 any case, it should always be an internal error to end up
4033 in this situation. */
4034 if (VALUE_LVAL (new_val) == lval_register
4035 && value_lazy (new_val)
4036 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
4037 internal_error (__FILE__, __LINE__,
4038 _("infinite loop while fetching a register"));
4039 }
4040
4041 /* If it's still lazy (for instance, a saved register on the
4042 stack), fetch it. */
4043 if (value_lazy (new_val))
4044 value_fetch_lazy (new_val);
4045
4046 /* Copy the contents and the unavailability/optimized-out
4047 meta-data from NEW_VAL to VAL. */
4048 set_value_lazy (val, 0);
4049 value_contents_copy (val, value_embedded_offset (val),
4050 new_val, value_embedded_offset (new_val),
4051 type_length_units (type));
4052
4053 if (frame_debug)
4054 {
4055 struct gdbarch *gdbarch;
4056 struct frame_info *frame;
4057 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
4058 so that the frame level will be shown correctly. */
4059 frame = frame_find_by_id (VALUE_FRAME_ID (val));
4060 regnum = VALUE_REGNUM (val);
4061 gdbarch = get_frame_arch (frame);
4062
4063 fprintf_unfiltered (gdb_stdlog,
4064 "{ value_fetch_lazy "
4065 "(frame=%d,regnum=%d(%s),...) ",
4066 frame_relative_level (frame), regnum,
4067 user_reg_map_regnum_to_name (gdbarch, regnum));
4068
4069 fprintf_unfiltered (gdb_stdlog, "->");
4070 if (value_optimized_out (new_val))
4071 {
4072 fprintf_unfiltered (gdb_stdlog, " ");
4073 val_print_optimized_out (new_val, gdb_stdlog);
4074 }
4075 else
4076 {
4077 int i;
4078 const gdb_byte *buf = value_contents (new_val);
4079
4080 if (VALUE_LVAL (new_val) == lval_register)
4081 fprintf_unfiltered (gdb_stdlog, " register=%d",
4082 VALUE_REGNUM (new_val));
4083 else if (VALUE_LVAL (new_val) == lval_memory)
4084 fprintf_unfiltered (gdb_stdlog, " address=%s",
4085 paddress (gdbarch,
4086 value_address (new_val)));
4087 else
4088 fprintf_unfiltered (gdb_stdlog, " computed");
4089
4090 fprintf_unfiltered (gdb_stdlog, " bytes=");
4091 fprintf_unfiltered (gdb_stdlog, "[");
4092 for (i = 0; i < register_size (gdbarch, regnum); i++)
4093 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4094 fprintf_unfiltered (gdb_stdlog, "]");
4095 }
4096
4097 fprintf_unfiltered (gdb_stdlog, " }\n");
4098 }
4099
4100 /* Dispose of the intermediate values. This prevents
4101 watchpoints from trying to watch the saved frame pointer. */
4102 value_free_to_mark (mark);
4103 }
4104 else if (VALUE_LVAL (val) == lval_computed
4105 && value_computed_funcs (val)->read != NULL)
4106 value_computed_funcs (val)->read (val);
4107 else
4108 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4109
4110 set_value_lazy (val, 0);
4111 }
4112
4113 /* Implementation of the convenience function $_isvoid. */
4114
4115 static struct value *
4116 isvoid_internal_fn (struct gdbarch *gdbarch,
4117 const struct language_defn *language,
4118 void *cookie, int argc, struct value **argv)
4119 {
4120 int ret;
4121
4122 if (argc != 1)
4123 error (_("You must provide one argument for $_isvoid."));
4124
4125 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4126
4127 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4128 }
4129
4130 void
4131 _initialize_values (void)
4132 {
4133 add_cmd ("convenience", no_class, show_convenience, _("\
4134 Debugger convenience (\"$foo\") variables and functions.\n\
4135 Convenience variables are created when you assign them values;\n\
4136 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4137 \n\
4138 A few convenience variables are given values automatically:\n\
4139 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4140 \"$__\" holds the contents of the last address examined with \"x\"."
4141 #ifdef HAVE_PYTHON
4142 "\n\n\
4143 Convenience functions are defined via the Python API."
4144 #endif
4145 ), &showlist);
4146 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4147
4148 add_cmd ("values", no_set_class, show_values, _("\
4149 Elements of value history around item number IDX (or last ten)."),
4150 &showlist);
4151
4152 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4153 Initialize a convenience variable if necessary.\n\
4154 init-if-undefined VARIABLE = EXPRESSION\n\
4155 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4156 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4157 VARIABLE is already initialized."));
4158
4159 add_prefix_cmd ("function", no_class, function_command, _("\
4160 Placeholder command for showing help on convenience functions."),
4161 &functionlist, "function ", 0, &cmdlist);
4162
4163 add_internal_function ("_isvoid", _("\
4164 Check whether an expression is void.\n\
4165 Usage: $_isvoid (expression)\n\
4166 Return 1 if the expression is void, zero otherwise."),
4167 isvoid_internal_fn, NULL);
4168
4169 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4170 class_support, &max_value_size, _("\
4171 Set maximum sized value gdb will load from the inferior."), _("\
4172 Show maximum sized value gdb will load from the inferior."), _("\
4173 Use this to control the maximum size, in bytes, of a value that gdb\n\
4174 will load from the inferior. Setting this value to 'unlimited'\n\
4175 disables checking.\n\
4176 Setting this does not invalidate already allocated values, it only\n\
4177 prevents future values, larger than this size, from being allocated."),
4178 set_max_value_size,
4179 show_max_value_size,
4180 &setlist, &showlist);
4181 }
This page took 0.121625 seconds and 4 git commands to generate.