Remove unnecessary function prototypes.
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43 #include <algorithm>
44 #include "completer.h"
45
46 /* Definition of a user function. */
47 struct internal_function
48 {
49 /* The name of the function. It is a bit odd to have this in the
50 function itself -- the user might use a differently-named
51 convenience variable to hold the function. */
52 char *name;
53
54 /* The handler. */
55 internal_function_fn handler;
56
57 /* User data for the handler. */
58 void *cookie;
59 };
60
61 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
62
63 struct range
64 {
65 /* Lowest offset in the range. */
66 LONGEST offset;
67
68 /* Length of the range. */
69 LONGEST length;
70 };
71
72 typedef struct range range_s;
73
74 DEF_VEC_O(range_s);
75
76 /* Returns true if the ranges defined by [offset1, offset1+len1) and
77 [offset2, offset2+len2) overlap. */
78
79 static int
80 ranges_overlap (LONGEST offset1, LONGEST len1,
81 LONGEST offset2, LONGEST len2)
82 {
83 ULONGEST h, l;
84
85 l = std::max (offset1, offset2);
86 h = std::min (offset1 + len1, offset2 + len2);
87 return (l < h);
88 }
89
90 /* Returns true if the first argument is strictly less than the
91 second, useful for VEC_lower_bound. We keep ranges sorted by
92 offset and coalesce overlapping and contiguous ranges, so this just
93 compares the starting offset. */
94
95 static int
96 range_lessthan (const range_s *r1, const range_s *r2)
97 {
98 return r1->offset < r2->offset;
99 }
100
101 /* Returns true if RANGES contains any range that overlaps [OFFSET,
102 OFFSET+LENGTH). */
103
104 static int
105 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
106 {
107 range_s what;
108 LONGEST i;
109
110 what.offset = offset;
111 what.length = length;
112
113 /* We keep ranges sorted by offset and coalesce overlapping and
114 contiguous ranges, so to check if a range list contains a given
115 range, we can do a binary search for the position the given range
116 would be inserted if we only considered the starting OFFSET of
117 ranges. We call that position I. Since we also have LENGTH to
118 care for (this is a range afterall), we need to check if the
119 _previous_ range overlaps the I range. E.g.,
120
121 R
122 |---|
123 |---| |---| |------| ... |--|
124 0 1 2 N
125
126 I=1
127
128 In the case above, the binary search would return `I=1', meaning,
129 this OFFSET should be inserted at position 1, and the current
130 position 1 should be pushed further (and before 2). But, `0'
131 overlaps with R.
132
133 Then we need to check if the I range overlaps the I range itself.
134 E.g.,
135
136 R
137 |---|
138 |---| |---| |-------| ... |--|
139 0 1 2 N
140
141 I=1
142 */
143
144 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
145
146 if (i > 0)
147 {
148 struct range *bef = VEC_index (range_s, ranges, i - 1);
149
150 if (ranges_overlap (bef->offset, bef->length, offset, length))
151 return 1;
152 }
153
154 if (i < VEC_length (range_s, ranges))
155 {
156 struct range *r = VEC_index (range_s, ranges, i);
157
158 if (ranges_overlap (r->offset, r->length, offset, length))
159 return 1;
160 }
161
162 return 0;
163 }
164
165 static struct cmd_list_element *functionlist;
166
167 /* Note that the fields in this structure are arranged to save a bit
168 of memory. */
169
170 struct value
171 {
172 /* Type of value; either not an lval, or one of the various
173 different possible kinds of lval. */
174 enum lval_type lval;
175
176 /* Is it modifiable? Only relevant if lval != not_lval. */
177 unsigned int modifiable : 1;
178
179 /* If zero, contents of this value are in the contents field. If
180 nonzero, contents are in inferior. If the lval field is lval_memory,
181 the contents are in inferior memory at location.address plus offset.
182 The lval field may also be lval_register.
183
184 WARNING: This field is used by the code which handles watchpoints
185 (see breakpoint.c) to decide whether a particular value can be
186 watched by hardware watchpoints. If the lazy flag is set for
187 some member of a value chain, it is assumed that this member of
188 the chain doesn't need to be watched as part of watching the
189 value itself. This is how GDB avoids watching the entire struct
190 or array when the user wants to watch a single struct member or
191 array element. If you ever change the way lazy flag is set and
192 reset, be sure to consider this use as well! */
193 unsigned int lazy : 1;
194
195 /* If value is a variable, is it initialized or not. */
196 unsigned int initialized : 1;
197
198 /* If value is from the stack. If this is set, read_stack will be
199 used instead of read_memory to enable extra caching. */
200 unsigned int stack : 1;
201
202 /* If the value has been released. */
203 unsigned int released : 1;
204
205 /* Location of value (if lval). */
206 union
207 {
208 /* If lval == lval_memory, this is the address in the inferior */
209 CORE_ADDR address;
210
211 /*If lval == lval_register, the value is from a register. */
212 struct
213 {
214 /* Register number. */
215 int regnum;
216 /* Frame ID of "next" frame to which a register value is relative.
217 If the register value is found relative to frame F, then the
218 frame id of F->next will be stored in next_frame_id. */
219 struct frame_id next_frame_id;
220 } reg;
221
222 /* Pointer to internal variable. */
223 struct internalvar *internalvar;
224
225 /* Pointer to xmethod worker. */
226 struct xmethod_worker *xm_worker;
227
228 /* If lval == lval_computed, this is a set of function pointers
229 to use to access and describe the value, and a closure pointer
230 for them to use. */
231 struct
232 {
233 /* Functions to call. */
234 const struct lval_funcs *funcs;
235
236 /* Closure for those functions to use. */
237 void *closure;
238 } computed;
239 } location;
240
241 /* Describes offset of a value within lval of a structure in target
242 addressable memory units. Note also the member embedded_offset
243 below. */
244 LONGEST offset;
245
246 /* Only used for bitfields; number of bits contained in them. */
247 LONGEST bitsize;
248
249 /* Only used for bitfields; position of start of field. For
250 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
251 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
252 LONGEST bitpos;
253
254 /* The number of references to this value. When a value is created,
255 the value chain holds a reference, so REFERENCE_COUNT is 1. If
256 release_value is called, this value is removed from the chain but
257 the caller of release_value now has a reference to this value.
258 The caller must arrange for a call to value_free later. */
259 int reference_count;
260
261 /* Only used for bitfields; the containing value. This allows a
262 single read from the target when displaying multiple
263 bitfields. */
264 struct value *parent;
265
266 /* Type of the value. */
267 struct type *type;
268
269 /* If a value represents a C++ object, then the `type' field gives
270 the object's compile-time type. If the object actually belongs
271 to some class derived from `type', perhaps with other base
272 classes and additional members, then `type' is just a subobject
273 of the real thing, and the full object is probably larger than
274 `type' would suggest.
275
276 If `type' is a dynamic class (i.e. one with a vtable), then GDB
277 can actually determine the object's run-time type by looking at
278 the run-time type information in the vtable. When this
279 information is available, we may elect to read in the entire
280 object, for several reasons:
281
282 - When printing the value, the user would probably rather see the
283 full object, not just the limited portion apparent from the
284 compile-time type.
285
286 - If `type' has virtual base classes, then even printing `type'
287 alone may require reaching outside the `type' portion of the
288 object to wherever the virtual base class has been stored.
289
290 When we store the entire object, `enclosing_type' is the run-time
291 type -- the complete object -- and `embedded_offset' is the
292 offset of `type' within that larger type, in target addressable memory
293 units. The value_contents() macro takes `embedded_offset' into account,
294 so most GDB code continues to see the `type' portion of the value, just
295 as the inferior would.
296
297 If `type' is a pointer to an object, then `enclosing_type' is a
298 pointer to the object's run-time type, and `pointed_to_offset' is
299 the offset in target addressable memory units from the full object
300 to the pointed-to object -- that is, the value `embedded_offset' would
301 have if we followed the pointer and fetched the complete object.
302 (I don't really see the point. Why not just determine the
303 run-time type when you indirect, and avoid the special case? The
304 contents don't matter until you indirect anyway.)
305
306 If we're not doing anything fancy, `enclosing_type' is equal to
307 `type', and `embedded_offset' is zero, so everything works
308 normally. */
309 struct type *enclosing_type;
310 LONGEST embedded_offset;
311 LONGEST pointed_to_offset;
312
313 /* Values are stored in a chain, so that they can be deleted easily
314 over calls to the inferior. Values assigned to internal
315 variables, put into the value history or exposed to Python are
316 taken off this list. */
317 struct value *next;
318
319 /* Actual contents of the value. Target byte-order. NULL or not
320 valid if lazy is nonzero. */
321 gdb_byte *contents;
322
323 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
324 rather than available, since the common and default case is for a
325 value to be available. This is filled in at value read time.
326 The unavailable ranges are tracked in bits. Note that a contents
327 bit that has been optimized out doesn't really exist in the
328 program, so it can't be marked unavailable either. */
329 VEC(range_s) *unavailable;
330
331 /* Likewise, but for optimized out contents (a chunk of the value of
332 a variable that does not actually exist in the program). If LVAL
333 is lval_register, this is a register ($pc, $sp, etc., never a
334 program variable) that has not been saved in the frame. Not
335 saved registers and optimized-out program variables values are
336 treated pretty much the same, except not-saved registers have a
337 different string representation and related error strings. */
338 VEC(range_s) *optimized_out;
339 };
340
341 /* See value.h. */
342
343 struct gdbarch *
344 get_value_arch (const struct value *value)
345 {
346 return get_type_arch (value_type (value));
347 }
348
349 int
350 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
351 {
352 gdb_assert (!value->lazy);
353
354 return !ranges_contain (value->unavailable, offset, length);
355 }
356
357 int
358 value_bytes_available (const struct value *value,
359 LONGEST offset, LONGEST length)
360 {
361 return value_bits_available (value,
362 offset * TARGET_CHAR_BIT,
363 length * TARGET_CHAR_BIT);
364 }
365
366 int
367 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
368 {
369 gdb_assert (!value->lazy);
370
371 return ranges_contain (value->optimized_out, bit_offset, bit_length);
372 }
373
374 int
375 value_entirely_available (struct value *value)
376 {
377 /* We can only tell whether the whole value is available when we try
378 to read it. */
379 if (value->lazy)
380 value_fetch_lazy (value);
381
382 if (VEC_empty (range_s, value->unavailable))
383 return 1;
384 return 0;
385 }
386
387 /* Returns true if VALUE is entirely covered by RANGES. If the value
388 is lazy, it'll be read now. Note that RANGE is a pointer to
389 pointer because reading the value might change *RANGE. */
390
391 static int
392 value_entirely_covered_by_range_vector (struct value *value,
393 VEC(range_s) **ranges)
394 {
395 /* We can only tell whether the whole value is optimized out /
396 unavailable when we try to read it. */
397 if (value->lazy)
398 value_fetch_lazy (value);
399
400 if (VEC_length (range_s, *ranges) == 1)
401 {
402 struct range *t = VEC_index (range_s, *ranges, 0);
403
404 if (t->offset == 0
405 && t->length == (TARGET_CHAR_BIT
406 * TYPE_LENGTH (value_enclosing_type (value))))
407 return 1;
408 }
409
410 return 0;
411 }
412
413 int
414 value_entirely_unavailable (struct value *value)
415 {
416 return value_entirely_covered_by_range_vector (value, &value->unavailable);
417 }
418
419 int
420 value_entirely_optimized_out (struct value *value)
421 {
422 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
423 }
424
425 /* Insert into the vector pointed to by VECTORP the bit range starting of
426 OFFSET bits, and extending for the next LENGTH bits. */
427
428 static void
429 insert_into_bit_range_vector (VEC(range_s) **vectorp,
430 LONGEST offset, LONGEST length)
431 {
432 range_s newr;
433 int i;
434
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
437
438 newr.offset = offset;
439 newr.length = length;
440
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
446
447 #1 - overlaps with previous
448
449 R
450 |-...-|
451 |---| |---| |------| ... |--|
452 0 1 2 N
453
454 I=1
455
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
460
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
463
464 #2 - contiguous with previous
465
466 R
467 |-...-|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=1
472
473 If there's no overlap with the previous range, as in:
474
475 #3 - not overlapping and not contiguous
476
477 R
478 |-...-|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 or if I is 0:
485
486 #4 - R is the range with lowest offset
487
488 R
489 |-...-|
490 |--| |---| |------| ... |--|
491 0 1 2 N
492
493 I=0
494
495 ... we just push the new range to I.
496
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
500
501 #5 - overlapping following ranges
502
503 R
504 |------------------------|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=0
509
510 or:
511
512 R
513 |-------|
514 |--| |---| |------| ... |--|
515 0 1 2 N
516
517 I=1
518
519 */
520
521 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
522 if (i > 0)
523 {
524 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
525
526 if (ranges_overlap (bef->offset, bef->length, offset, length))
527 {
528 /* #1 */
529 ULONGEST l = std::min (bef->offset, offset);
530 ULONGEST h = std::max (bef->offset + bef->length, offset + length);
531
532 bef->offset = l;
533 bef->length = h - l;
534 i--;
535 }
536 else if (offset == bef->offset + bef->length)
537 {
538 /* #2 */
539 bef->length += length;
540 i--;
541 }
542 else
543 {
544 /* #3 */
545 VEC_safe_insert (range_s, *vectorp, i, &newr);
546 }
547 }
548 else
549 {
550 /* #4 */
551 VEC_safe_insert (range_s, *vectorp, i, &newr);
552 }
553
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i + 1 < VEC_length (range_s, *vectorp))
557 {
558 struct range *t;
559 struct range *r;
560 int removed = 0;
561 int next = i + 1;
562
563 /* Get the range we just touched. */
564 t = VEC_index (range_s, *vectorp, i);
565 removed = 0;
566
567 i = next;
568 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
569 if (r->offset <= t->offset + t->length)
570 {
571 ULONGEST l, h;
572
573 l = std::min (t->offset, r->offset);
574 h = std::max (t->offset + t->length, r->offset + r->length);
575
576 t->offset = l;
577 t->length = h - l;
578
579 removed++;
580 }
581 else
582 {
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
586 break;
587 }
588
589 if (removed != 0)
590 VEC_block_remove (range_s, *vectorp, next, removed);
591 }
592 }
593
594 void
595 mark_value_bits_unavailable (struct value *value,
596 LONGEST offset, LONGEST length)
597 {
598 insert_into_bit_range_vector (&value->unavailable, offset, length);
599 }
600
601 void
602 mark_value_bytes_unavailable (struct value *value,
603 LONGEST offset, LONGEST length)
604 {
605 mark_value_bits_unavailable (value,
606 offset * TARGET_CHAR_BIT,
607 length * TARGET_CHAR_BIT);
608 }
609
610 /* Find the first range in RANGES that overlaps the range defined by
611 OFFSET and LENGTH, starting at element POS in the RANGES vector,
612 Returns the index into RANGES where such overlapping range was
613 found, or -1 if none was found. */
614
615 static int
616 find_first_range_overlap (VEC(range_s) *ranges, int pos,
617 LONGEST offset, LONGEST length)
618 {
619 range_s *r;
620 int i;
621
622 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
623 if (ranges_overlap (r->offset, r->length, offset, length))
624 return i;
625
626 return -1;
627 }
628
629 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
630 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
631 return non-zero.
632
633 It must always be the case that:
634 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
635
636 It is assumed that memory can be accessed from:
637 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
638 to:
639 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
640 / TARGET_CHAR_BIT) */
641 static int
642 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
643 const gdb_byte *ptr2, size_t offset2_bits,
644 size_t length_bits)
645 {
646 gdb_assert (offset1_bits % TARGET_CHAR_BIT
647 == offset2_bits % TARGET_CHAR_BIT);
648
649 if (offset1_bits % TARGET_CHAR_BIT != 0)
650 {
651 size_t bits;
652 gdb_byte mask, b1, b2;
653
654 /* The offset from the base pointers PTR1 and PTR2 is not a complete
655 number of bytes. A number of bits up to either the next exact
656 byte boundary, or LENGTH_BITS (which ever is sooner) will be
657 compared. */
658 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
659 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
660 mask = (1 << bits) - 1;
661
662 if (length_bits < bits)
663 {
664 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
665 bits = length_bits;
666 }
667
668 /* Now load the two bytes and mask off the bits we care about. */
669 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
670 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
671
672 if (b1 != b2)
673 return 1;
674
675 /* Now update the length and offsets to take account of the bits
676 we've just compared. */
677 length_bits -= bits;
678 offset1_bits += bits;
679 offset2_bits += bits;
680 }
681
682 if (length_bits % TARGET_CHAR_BIT != 0)
683 {
684 size_t bits;
685 size_t o1, o2;
686 gdb_byte mask, b1, b2;
687
688 /* The length is not an exact number of bytes. After the previous
689 IF.. block then the offsets are byte aligned, or the
690 length is zero (in which case this code is not reached). Compare
691 a number of bits at the end of the region, starting from an exact
692 byte boundary. */
693 bits = length_bits % TARGET_CHAR_BIT;
694 o1 = offset1_bits + length_bits - bits;
695 o2 = offset2_bits + length_bits - bits;
696
697 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
698 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
699
700 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
701 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
702
703 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
704 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
705
706 if (b1 != b2)
707 return 1;
708
709 length_bits -= bits;
710 }
711
712 if (length_bits > 0)
713 {
714 /* We've now taken care of any stray "bits" at the start, or end of
715 the region to compare, the remainder can be covered with a simple
716 memcmp. */
717 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
718 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
719 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
720
721 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
722 ptr2 + offset2_bits / TARGET_CHAR_BIT,
723 length_bits / TARGET_CHAR_BIT);
724 }
725
726 /* Length is zero, regions match. */
727 return 0;
728 }
729
730 /* Helper struct for find_first_range_overlap_and_match and
731 value_contents_bits_eq. Keep track of which slot of a given ranges
732 vector have we last looked at. */
733
734 struct ranges_and_idx
735 {
736 /* The ranges. */
737 VEC(range_s) *ranges;
738
739 /* The range we've last found in RANGES. Given ranges are sorted,
740 we can start the next lookup here. */
741 int idx;
742 };
743
744 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
745 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
746 ranges starting at OFFSET2 bits. Return true if the ranges match
747 and fill in *L and *H with the overlapping window relative to
748 (both) OFFSET1 or OFFSET2. */
749
750 static int
751 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
752 struct ranges_and_idx *rp2,
753 LONGEST offset1, LONGEST offset2,
754 LONGEST length, ULONGEST *l, ULONGEST *h)
755 {
756 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
757 offset1, length);
758 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
759 offset2, length);
760
761 if (rp1->idx == -1 && rp2->idx == -1)
762 {
763 *l = length;
764 *h = length;
765 return 1;
766 }
767 else if (rp1->idx == -1 || rp2->idx == -1)
768 return 0;
769 else
770 {
771 range_s *r1, *r2;
772 ULONGEST l1, h1;
773 ULONGEST l2, h2;
774
775 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
776 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
777
778 /* Get the unavailable windows intersected by the incoming
779 ranges. The first and last ranges that overlap the argument
780 range may be wider than said incoming arguments ranges. */
781 l1 = std::max (offset1, r1->offset);
782 h1 = std::min (offset1 + length, r1->offset + r1->length);
783
784 l2 = std::max (offset2, r2->offset);
785 h2 = std::min (offset2 + length, offset2 + r2->length);
786
787 /* Make them relative to the respective start offsets, so we can
788 compare them for equality. */
789 l1 -= offset1;
790 h1 -= offset1;
791
792 l2 -= offset2;
793 h2 -= offset2;
794
795 /* Different ranges, no match. */
796 if (l1 != l2 || h1 != h2)
797 return 0;
798
799 *h = h1;
800 *l = l1;
801 return 1;
802 }
803 }
804
805 /* Helper function for value_contents_eq. The only difference is that
806 this function is bit rather than byte based.
807
808 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
809 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
810 Return true if the available bits match. */
811
812 static int
813 value_contents_bits_eq (const struct value *val1, int offset1,
814 const struct value *val2, int offset2,
815 int length)
816 {
817 /* Each array element corresponds to a ranges source (unavailable,
818 optimized out). '1' is for VAL1, '2' for VAL2. */
819 struct ranges_and_idx rp1[2], rp2[2];
820
821 /* See function description in value.h. */
822 gdb_assert (!val1->lazy && !val2->lazy);
823
824 /* We shouldn't be trying to compare past the end of the values. */
825 gdb_assert (offset1 + length
826 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
827 gdb_assert (offset2 + length
828 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
829
830 memset (&rp1, 0, sizeof (rp1));
831 memset (&rp2, 0, sizeof (rp2));
832 rp1[0].ranges = val1->unavailable;
833 rp2[0].ranges = val2->unavailable;
834 rp1[1].ranges = val1->optimized_out;
835 rp2[1].ranges = val2->optimized_out;
836
837 while (length > 0)
838 {
839 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
840 int i;
841
842 for (i = 0; i < 2; i++)
843 {
844 ULONGEST l_tmp, h_tmp;
845
846 /* The contents only match equal if the invalid/unavailable
847 contents ranges match as well. */
848 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
849 offset1, offset2, length,
850 &l_tmp, &h_tmp))
851 return 0;
852
853 /* We're interested in the lowest/first range found. */
854 if (i == 0 || l_tmp < l)
855 {
856 l = l_tmp;
857 h = h_tmp;
858 }
859 }
860
861 /* Compare the available/valid contents. */
862 if (memcmp_with_bit_offsets (val1->contents, offset1,
863 val2->contents, offset2, l) != 0)
864 return 0;
865
866 length -= h;
867 offset1 += h;
868 offset2 += h;
869 }
870
871 return 1;
872 }
873
874 int
875 value_contents_eq (const struct value *val1, LONGEST offset1,
876 const struct value *val2, LONGEST offset2,
877 LONGEST length)
878 {
879 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
880 val2, offset2 * TARGET_CHAR_BIT,
881 length * TARGET_CHAR_BIT);
882 }
883
884 /* Prototypes for local functions. */
885
886 static void show_values (char *, int);
887
888 static void show_convenience (char *, int);
889
890
891 /* The value-history records all the values printed
892 by print commands during this session. Each chunk
893 records 60 consecutive values. The first chunk on
894 the chain records the most recent values.
895 The total number of values is in value_history_count. */
896
897 #define VALUE_HISTORY_CHUNK 60
898
899 struct value_history_chunk
900 {
901 struct value_history_chunk *next;
902 struct value *values[VALUE_HISTORY_CHUNK];
903 };
904
905 /* Chain of chunks now in use. */
906
907 static struct value_history_chunk *value_history_chain;
908
909 static int value_history_count; /* Abs number of last entry stored. */
910
911 \f
912 /* List of all value objects currently allocated
913 (except for those released by calls to release_value)
914 This is so they can be freed after each command. */
915
916 static struct value *all_values;
917
918 /* Allocate a lazy value for type TYPE. Its actual content is
919 "lazily" allocated too: the content field of the return value is
920 NULL; it will be allocated when it is fetched from the target. */
921
922 struct value *
923 allocate_value_lazy (struct type *type)
924 {
925 struct value *val;
926
927 /* Call check_typedef on our type to make sure that, if TYPE
928 is a TYPE_CODE_TYPEDEF, its length is set to the length
929 of the target type instead of zero. However, we do not
930 replace the typedef type by the target type, because we want
931 to keep the typedef in order to be able to set the VAL's type
932 description correctly. */
933 check_typedef (type);
934
935 val = XCNEW (struct value);
936 val->contents = NULL;
937 val->next = all_values;
938 all_values = val;
939 val->type = type;
940 val->enclosing_type = type;
941 VALUE_LVAL (val) = not_lval;
942 val->location.address = 0;
943 val->offset = 0;
944 val->bitpos = 0;
945 val->bitsize = 0;
946 val->lazy = 1;
947 val->embedded_offset = 0;
948 val->pointed_to_offset = 0;
949 val->modifiable = 1;
950 val->initialized = 1; /* Default to initialized. */
951
952 /* Values start out on the all_values chain. */
953 val->reference_count = 1;
954
955 return val;
956 }
957
958 /* The maximum size, in bytes, that GDB will try to allocate for a value.
959 The initial value of 64k was not selected for any specific reason, it is
960 just a reasonable starting point. */
961
962 static int max_value_size = 65536; /* 64k bytes */
963
964 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
965 LONGEST, otherwise GDB will not be able to parse integer values from the
966 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
967 be unable to parse "set max-value-size 2".
968
969 As we want a consistent GDB experience across hosts with different sizes
970 of LONGEST, this arbitrary minimum value was selected, so long as this
971 is bigger than LONGEST on all GDB supported hosts we're fine. */
972
973 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
974 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
975
976 /* Implement the "set max-value-size" command. */
977
978 static void
979 set_max_value_size (char *args, int from_tty,
980 struct cmd_list_element *c)
981 {
982 gdb_assert (max_value_size == -1 || max_value_size >= 0);
983
984 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
985 {
986 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
987 error (_("max-value-size set too low, increasing to %d bytes"),
988 max_value_size);
989 }
990 }
991
992 /* Implement the "show max-value-size" command. */
993
994 static void
995 show_max_value_size (struct ui_file *file, int from_tty,
996 struct cmd_list_element *c, const char *value)
997 {
998 if (max_value_size == -1)
999 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1000 else
1001 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1002 max_value_size);
1003 }
1004
1005 /* Called before we attempt to allocate or reallocate a buffer for the
1006 contents of a value. TYPE is the type of the value for which we are
1007 allocating the buffer. If the buffer is too large (based on the user
1008 controllable setting) then throw an error. If this function returns
1009 then we should attempt to allocate the buffer. */
1010
1011 static void
1012 check_type_length_before_alloc (const struct type *type)
1013 {
1014 unsigned int length = TYPE_LENGTH (type);
1015
1016 if (max_value_size > -1 && length > max_value_size)
1017 {
1018 if (TYPE_NAME (type) != NULL)
1019 error (_("value of type `%s' requires %u bytes, which is more "
1020 "than max-value-size"), TYPE_NAME (type), length);
1021 else
1022 error (_("value requires %u bytes, which is more than "
1023 "max-value-size"), length);
1024 }
1025 }
1026
1027 /* Allocate the contents of VAL if it has not been allocated yet. */
1028
1029 static void
1030 allocate_value_contents (struct value *val)
1031 {
1032 if (!val->contents)
1033 {
1034 check_type_length_before_alloc (val->enclosing_type);
1035 val->contents
1036 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1037 }
1038 }
1039
1040 /* Allocate a value and its contents for type TYPE. */
1041
1042 struct value *
1043 allocate_value (struct type *type)
1044 {
1045 struct value *val = allocate_value_lazy (type);
1046
1047 allocate_value_contents (val);
1048 val->lazy = 0;
1049 return val;
1050 }
1051
1052 /* Allocate a value that has the correct length
1053 for COUNT repetitions of type TYPE. */
1054
1055 struct value *
1056 allocate_repeat_value (struct type *type, int count)
1057 {
1058 int low_bound = current_language->string_lower_bound; /* ??? */
1059 /* FIXME-type-allocation: need a way to free this type when we are
1060 done with it. */
1061 struct type *array_type
1062 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1063
1064 return allocate_value (array_type);
1065 }
1066
1067 struct value *
1068 allocate_computed_value (struct type *type,
1069 const struct lval_funcs *funcs,
1070 void *closure)
1071 {
1072 struct value *v = allocate_value_lazy (type);
1073
1074 VALUE_LVAL (v) = lval_computed;
1075 v->location.computed.funcs = funcs;
1076 v->location.computed.closure = closure;
1077
1078 return v;
1079 }
1080
1081 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1082
1083 struct value *
1084 allocate_optimized_out_value (struct type *type)
1085 {
1086 struct value *retval = allocate_value_lazy (type);
1087
1088 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1089 set_value_lazy (retval, 0);
1090 return retval;
1091 }
1092
1093 /* Accessor methods. */
1094
1095 struct value *
1096 value_next (const struct value *value)
1097 {
1098 return value->next;
1099 }
1100
1101 struct type *
1102 value_type (const struct value *value)
1103 {
1104 return value->type;
1105 }
1106 void
1107 deprecated_set_value_type (struct value *value, struct type *type)
1108 {
1109 value->type = type;
1110 }
1111
1112 LONGEST
1113 value_offset (const struct value *value)
1114 {
1115 return value->offset;
1116 }
1117 void
1118 set_value_offset (struct value *value, LONGEST offset)
1119 {
1120 value->offset = offset;
1121 }
1122
1123 LONGEST
1124 value_bitpos (const struct value *value)
1125 {
1126 return value->bitpos;
1127 }
1128 void
1129 set_value_bitpos (struct value *value, LONGEST bit)
1130 {
1131 value->bitpos = bit;
1132 }
1133
1134 LONGEST
1135 value_bitsize (const struct value *value)
1136 {
1137 return value->bitsize;
1138 }
1139 void
1140 set_value_bitsize (struct value *value, LONGEST bit)
1141 {
1142 value->bitsize = bit;
1143 }
1144
1145 struct value *
1146 value_parent (const struct value *value)
1147 {
1148 return value->parent;
1149 }
1150
1151 /* See value.h. */
1152
1153 void
1154 set_value_parent (struct value *value, struct value *parent)
1155 {
1156 struct value *old = value->parent;
1157
1158 value->parent = parent;
1159 if (parent != NULL)
1160 value_incref (parent);
1161 value_free (old);
1162 }
1163
1164 gdb_byte *
1165 value_contents_raw (struct value *value)
1166 {
1167 struct gdbarch *arch = get_value_arch (value);
1168 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1169
1170 allocate_value_contents (value);
1171 return value->contents + value->embedded_offset * unit_size;
1172 }
1173
1174 gdb_byte *
1175 value_contents_all_raw (struct value *value)
1176 {
1177 allocate_value_contents (value);
1178 return value->contents;
1179 }
1180
1181 struct type *
1182 value_enclosing_type (const struct value *value)
1183 {
1184 return value->enclosing_type;
1185 }
1186
1187 /* Look at value.h for description. */
1188
1189 struct type *
1190 value_actual_type (struct value *value, int resolve_simple_types,
1191 int *real_type_found)
1192 {
1193 struct value_print_options opts;
1194 struct type *result;
1195
1196 get_user_print_options (&opts);
1197
1198 if (real_type_found)
1199 *real_type_found = 0;
1200 result = value_type (value);
1201 if (opts.objectprint)
1202 {
1203 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1204 fetch its rtti type. */
1205 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1206 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1207 == TYPE_CODE_STRUCT
1208 && !value_optimized_out (value))
1209 {
1210 struct type *real_type;
1211
1212 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1213 if (real_type)
1214 {
1215 if (real_type_found)
1216 *real_type_found = 1;
1217 result = real_type;
1218 }
1219 }
1220 else if (resolve_simple_types)
1221 {
1222 if (real_type_found)
1223 *real_type_found = 1;
1224 result = value_enclosing_type (value);
1225 }
1226 }
1227
1228 return result;
1229 }
1230
1231 void
1232 error_value_optimized_out (void)
1233 {
1234 error (_("value has been optimized out"));
1235 }
1236
1237 static void
1238 require_not_optimized_out (const struct value *value)
1239 {
1240 if (!VEC_empty (range_s, value->optimized_out))
1241 {
1242 if (value->lval == lval_register)
1243 error (_("register has not been saved in frame"));
1244 else
1245 error_value_optimized_out ();
1246 }
1247 }
1248
1249 static void
1250 require_available (const struct value *value)
1251 {
1252 if (!VEC_empty (range_s, value->unavailable))
1253 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1254 }
1255
1256 const gdb_byte *
1257 value_contents_for_printing (struct value *value)
1258 {
1259 if (value->lazy)
1260 value_fetch_lazy (value);
1261 return value->contents;
1262 }
1263
1264 const gdb_byte *
1265 value_contents_for_printing_const (const struct value *value)
1266 {
1267 gdb_assert (!value->lazy);
1268 return value->contents;
1269 }
1270
1271 const gdb_byte *
1272 value_contents_all (struct value *value)
1273 {
1274 const gdb_byte *result = value_contents_for_printing (value);
1275 require_not_optimized_out (value);
1276 require_available (value);
1277 return result;
1278 }
1279
1280 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1281 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1282
1283 static void
1284 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1285 VEC (range_s) *src_range, int src_bit_offset,
1286 int bit_length)
1287 {
1288 range_s *r;
1289 int i;
1290
1291 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1292 {
1293 ULONGEST h, l;
1294
1295 l = std::max (r->offset, (LONGEST) src_bit_offset);
1296 h = std::min (r->offset + r->length,
1297 (LONGEST) src_bit_offset + bit_length);
1298
1299 if (l < h)
1300 insert_into_bit_range_vector (dst_range,
1301 dst_bit_offset + (l - src_bit_offset),
1302 h - l);
1303 }
1304 }
1305
1306 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1307 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1308
1309 static void
1310 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1311 const struct value *src, int src_bit_offset,
1312 int bit_length)
1313 {
1314 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1315 src->unavailable, src_bit_offset,
1316 bit_length);
1317 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1318 src->optimized_out, src_bit_offset,
1319 bit_length);
1320 }
1321
1322 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1323 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1324 contents, starting at DST_OFFSET. If unavailable contents are
1325 being copied from SRC, the corresponding DST contents are marked
1326 unavailable accordingly. Neither DST nor SRC may be lazy
1327 values.
1328
1329 It is assumed the contents of DST in the [DST_OFFSET,
1330 DST_OFFSET+LENGTH) range are wholly available. */
1331
1332 void
1333 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1334 struct value *src, LONGEST src_offset, LONGEST length)
1335 {
1336 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1337 struct gdbarch *arch = get_value_arch (src);
1338 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1339
1340 /* A lazy DST would make that this copy operation useless, since as
1341 soon as DST's contents were un-lazied (by a later value_contents
1342 call, say), the contents would be overwritten. A lazy SRC would
1343 mean we'd be copying garbage. */
1344 gdb_assert (!dst->lazy && !src->lazy);
1345
1346 /* The overwritten DST range gets unavailability ORed in, not
1347 replaced. Make sure to remember to implement replacing if it
1348 turns out actually necessary. */
1349 gdb_assert (value_bytes_available (dst, dst_offset, length));
1350 gdb_assert (!value_bits_any_optimized_out (dst,
1351 TARGET_CHAR_BIT * dst_offset,
1352 TARGET_CHAR_BIT * length));
1353
1354 /* Copy the data. */
1355 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1356 value_contents_all_raw (src) + src_offset * unit_size,
1357 length * unit_size);
1358
1359 /* Copy the meta-data, adjusted. */
1360 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1361 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1362 bit_length = length * unit_size * HOST_CHAR_BIT;
1363
1364 value_ranges_copy_adjusted (dst, dst_bit_offset,
1365 src, src_bit_offset,
1366 bit_length);
1367 }
1368
1369 /* Copy LENGTH bytes of SRC value's (all) contents
1370 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1371 (all) contents, starting at DST_OFFSET. If unavailable contents
1372 are being copied from SRC, the corresponding DST contents are
1373 marked unavailable accordingly. DST must not be lazy. If SRC is
1374 lazy, it will be fetched now.
1375
1376 It is assumed the contents of DST in the [DST_OFFSET,
1377 DST_OFFSET+LENGTH) range are wholly available. */
1378
1379 void
1380 value_contents_copy (struct value *dst, LONGEST dst_offset,
1381 struct value *src, LONGEST src_offset, LONGEST length)
1382 {
1383 if (src->lazy)
1384 value_fetch_lazy (src);
1385
1386 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1387 }
1388
1389 int
1390 value_lazy (const struct value *value)
1391 {
1392 return value->lazy;
1393 }
1394
1395 void
1396 set_value_lazy (struct value *value, int val)
1397 {
1398 value->lazy = val;
1399 }
1400
1401 int
1402 value_stack (const struct value *value)
1403 {
1404 return value->stack;
1405 }
1406
1407 void
1408 set_value_stack (struct value *value, int val)
1409 {
1410 value->stack = val;
1411 }
1412
1413 const gdb_byte *
1414 value_contents (struct value *value)
1415 {
1416 const gdb_byte *result = value_contents_writeable (value);
1417 require_not_optimized_out (value);
1418 require_available (value);
1419 return result;
1420 }
1421
1422 gdb_byte *
1423 value_contents_writeable (struct value *value)
1424 {
1425 if (value->lazy)
1426 value_fetch_lazy (value);
1427 return value_contents_raw (value);
1428 }
1429
1430 int
1431 value_optimized_out (struct value *value)
1432 {
1433 /* We can only know if a value is optimized out once we have tried to
1434 fetch it. */
1435 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1436 {
1437 TRY
1438 {
1439 value_fetch_lazy (value);
1440 }
1441 CATCH (ex, RETURN_MASK_ERROR)
1442 {
1443 /* Fall back to checking value->optimized_out. */
1444 }
1445 END_CATCH
1446 }
1447
1448 return !VEC_empty (range_s, value->optimized_out);
1449 }
1450
1451 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1452 the following LENGTH bytes. */
1453
1454 void
1455 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1456 {
1457 mark_value_bits_optimized_out (value,
1458 offset * TARGET_CHAR_BIT,
1459 length * TARGET_CHAR_BIT);
1460 }
1461
1462 /* See value.h. */
1463
1464 void
1465 mark_value_bits_optimized_out (struct value *value,
1466 LONGEST offset, LONGEST length)
1467 {
1468 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1469 }
1470
1471 int
1472 value_bits_synthetic_pointer (const struct value *value,
1473 LONGEST offset, LONGEST length)
1474 {
1475 if (value->lval != lval_computed
1476 || !value->location.computed.funcs->check_synthetic_pointer)
1477 return 0;
1478 return value->location.computed.funcs->check_synthetic_pointer (value,
1479 offset,
1480 length);
1481 }
1482
1483 LONGEST
1484 value_embedded_offset (const struct value *value)
1485 {
1486 return value->embedded_offset;
1487 }
1488
1489 void
1490 set_value_embedded_offset (struct value *value, LONGEST val)
1491 {
1492 value->embedded_offset = val;
1493 }
1494
1495 LONGEST
1496 value_pointed_to_offset (const struct value *value)
1497 {
1498 return value->pointed_to_offset;
1499 }
1500
1501 void
1502 set_value_pointed_to_offset (struct value *value, LONGEST val)
1503 {
1504 value->pointed_to_offset = val;
1505 }
1506
1507 const struct lval_funcs *
1508 value_computed_funcs (const struct value *v)
1509 {
1510 gdb_assert (value_lval_const (v) == lval_computed);
1511
1512 return v->location.computed.funcs;
1513 }
1514
1515 void *
1516 value_computed_closure (const struct value *v)
1517 {
1518 gdb_assert (v->lval == lval_computed);
1519
1520 return v->location.computed.closure;
1521 }
1522
1523 enum lval_type *
1524 deprecated_value_lval_hack (struct value *value)
1525 {
1526 return &value->lval;
1527 }
1528
1529 enum lval_type
1530 value_lval_const (const struct value *value)
1531 {
1532 return value->lval;
1533 }
1534
1535 CORE_ADDR
1536 value_address (const struct value *value)
1537 {
1538 if (value->lval != lval_memory)
1539 return 0;
1540 if (value->parent != NULL)
1541 return value_address (value->parent) + value->offset;
1542 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1543 {
1544 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1545 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1546 }
1547
1548 return value->location.address + value->offset;
1549 }
1550
1551 CORE_ADDR
1552 value_raw_address (const struct value *value)
1553 {
1554 if (value->lval != lval_memory)
1555 return 0;
1556 return value->location.address;
1557 }
1558
1559 void
1560 set_value_address (struct value *value, CORE_ADDR addr)
1561 {
1562 gdb_assert (value->lval == lval_memory);
1563 value->location.address = addr;
1564 }
1565
1566 struct internalvar **
1567 deprecated_value_internalvar_hack (struct value *value)
1568 {
1569 return &value->location.internalvar;
1570 }
1571
1572 struct frame_id *
1573 deprecated_value_next_frame_id_hack (struct value *value)
1574 {
1575 gdb_assert (value->lval == lval_register);
1576 return &value->location.reg.next_frame_id;
1577 }
1578
1579 int *
1580 deprecated_value_regnum_hack (struct value *value)
1581 {
1582 gdb_assert (value->lval == lval_register);
1583 return &value->location.reg.regnum;
1584 }
1585
1586 int
1587 deprecated_value_modifiable (const struct value *value)
1588 {
1589 return value->modifiable;
1590 }
1591 \f
1592 /* Return a mark in the value chain. All values allocated after the
1593 mark is obtained (except for those released) are subject to being freed
1594 if a subsequent value_free_to_mark is passed the mark. */
1595 struct value *
1596 value_mark (void)
1597 {
1598 return all_values;
1599 }
1600
1601 /* Take a reference to VAL. VAL will not be deallocated until all
1602 references are released. */
1603
1604 void
1605 value_incref (struct value *val)
1606 {
1607 val->reference_count++;
1608 }
1609
1610 /* Release a reference to VAL, which was acquired with value_incref.
1611 This function is also called to deallocate values from the value
1612 chain. */
1613
1614 void
1615 value_free (struct value *val)
1616 {
1617 if (val)
1618 {
1619 gdb_assert (val->reference_count > 0);
1620 val->reference_count--;
1621 if (val->reference_count > 0)
1622 return;
1623
1624 /* If there's an associated parent value, drop our reference to
1625 it. */
1626 if (val->parent != NULL)
1627 value_free (val->parent);
1628
1629 if (VALUE_LVAL (val) == lval_computed)
1630 {
1631 const struct lval_funcs *funcs = val->location.computed.funcs;
1632
1633 if (funcs->free_closure)
1634 funcs->free_closure (val);
1635 }
1636 else if (VALUE_LVAL (val) == lval_xcallable)
1637 free_xmethod_worker (val->location.xm_worker);
1638
1639 xfree (val->contents);
1640 VEC_free (range_s, val->unavailable);
1641 }
1642 xfree (val);
1643 }
1644
1645 /* Free all values allocated since MARK was obtained by value_mark
1646 (except for those released). */
1647 void
1648 value_free_to_mark (const struct value *mark)
1649 {
1650 struct value *val;
1651 struct value *next;
1652
1653 for (val = all_values; val && val != mark; val = next)
1654 {
1655 next = val->next;
1656 val->released = 1;
1657 value_free (val);
1658 }
1659 all_values = val;
1660 }
1661
1662 /* Free all the values that have been allocated (except for those released).
1663 Call after each command, successful or not.
1664 In practice this is called before each command, which is sufficient. */
1665
1666 void
1667 free_all_values (void)
1668 {
1669 struct value *val;
1670 struct value *next;
1671
1672 for (val = all_values; val; val = next)
1673 {
1674 next = val->next;
1675 val->released = 1;
1676 value_free (val);
1677 }
1678
1679 all_values = 0;
1680 }
1681
1682 /* Frees all the elements in a chain of values. */
1683
1684 void
1685 free_value_chain (struct value *v)
1686 {
1687 struct value *next;
1688
1689 for (; v; v = next)
1690 {
1691 next = value_next (v);
1692 value_free (v);
1693 }
1694 }
1695
1696 /* Remove VAL from the chain all_values
1697 so it will not be freed automatically. */
1698
1699 void
1700 release_value (struct value *val)
1701 {
1702 struct value *v;
1703
1704 if (all_values == val)
1705 {
1706 all_values = val->next;
1707 val->next = NULL;
1708 val->released = 1;
1709 return;
1710 }
1711
1712 for (v = all_values; v; v = v->next)
1713 {
1714 if (v->next == val)
1715 {
1716 v->next = val->next;
1717 val->next = NULL;
1718 val->released = 1;
1719 break;
1720 }
1721 }
1722 }
1723
1724 /* If the value is not already released, release it.
1725 If the value is already released, increment its reference count.
1726 That is, this function ensures that the value is released from the
1727 value chain and that the caller owns a reference to it. */
1728
1729 void
1730 release_value_or_incref (struct value *val)
1731 {
1732 if (val->released)
1733 value_incref (val);
1734 else
1735 release_value (val);
1736 }
1737
1738 /* Release all values up to mark */
1739 struct value *
1740 value_release_to_mark (const struct value *mark)
1741 {
1742 struct value *val;
1743 struct value *next;
1744
1745 for (val = next = all_values; next; next = next->next)
1746 {
1747 if (next->next == mark)
1748 {
1749 all_values = next->next;
1750 next->next = NULL;
1751 return val;
1752 }
1753 next->released = 1;
1754 }
1755 all_values = 0;
1756 return val;
1757 }
1758
1759 /* Return a copy of the value ARG.
1760 It contains the same contents, for same memory address,
1761 but it's a different block of storage. */
1762
1763 struct value *
1764 value_copy (struct value *arg)
1765 {
1766 struct type *encl_type = value_enclosing_type (arg);
1767 struct value *val;
1768
1769 if (value_lazy (arg))
1770 val = allocate_value_lazy (encl_type);
1771 else
1772 val = allocate_value (encl_type);
1773 val->type = arg->type;
1774 VALUE_LVAL (val) = VALUE_LVAL (arg);
1775 val->location = arg->location;
1776 val->offset = arg->offset;
1777 val->bitpos = arg->bitpos;
1778 val->bitsize = arg->bitsize;
1779 val->lazy = arg->lazy;
1780 val->embedded_offset = value_embedded_offset (arg);
1781 val->pointed_to_offset = arg->pointed_to_offset;
1782 val->modifiable = arg->modifiable;
1783 if (!value_lazy (val))
1784 {
1785 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1786 TYPE_LENGTH (value_enclosing_type (arg)));
1787
1788 }
1789 val->unavailable = VEC_copy (range_s, arg->unavailable);
1790 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1791 set_value_parent (val, arg->parent);
1792 if (VALUE_LVAL (val) == lval_computed)
1793 {
1794 const struct lval_funcs *funcs = val->location.computed.funcs;
1795
1796 if (funcs->copy_closure)
1797 val->location.computed.closure = funcs->copy_closure (val);
1798 }
1799 return val;
1800 }
1801
1802 /* Return a "const" and/or "volatile" qualified version of the value V.
1803 If CNST is true, then the returned value will be qualified with
1804 "const".
1805 if VOLTL is true, then the returned value will be qualified with
1806 "volatile". */
1807
1808 struct value *
1809 make_cv_value (int cnst, int voltl, struct value *v)
1810 {
1811 struct type *val_type = value_type (v);
1812 struct type *enclosing_type = value_enclosing_type (v);
1813 struct value *cv_val = value_copy (v);
1814
1815 deprecated_set_value_type (cv_val,
1816 make_cv_type (cnst, voltl, val_type, NULL));
1817 set_value_enclosing_type (cv_val,
1818 make_cv_type (cnst, voltl, enclosing_type, NULL));
1819
1820 return cv_val;
1821 }
1822
1823 /* Return a version of ARG that is non-lvalue. */
1824
1825 struct value *
1826 value_non_lval (struct value *arg)
1827 {
1828 if (VALUE_LVAL (arg) != not_lval)
1829 {
1830 struct type *enc_type = value_enclosing_type (arg);
1831 struct value *val = allocate_value (enc_type);
1832
1833 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1834 TYPE_LENGTH (enc_type));
1835 val->type = arg->type;
1836 set_value_embedded_offset (val, value_embedded_offset (arg));
1837 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1838 return val;
1839 }
1840 return arg;
1841 }
1842
1843 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1844
1845 void
1846 value_force_lval (struct value *v, CORE_ADDR addr)
1847 {
1848 gdb_assert (VALUE_LVAL (v) == not_lval);
1849
1850 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1851 v->lval = lval_memory;
1852 v->location.address = addr;
1853 }
1854
1855 void
1856 set_value_component_location (struct value *component,
1857 const struct value *whole)
1858 {
1859 struct type *type;
1860
1861 gdb_assert (whole->lval != lval_xcallable);
1862
1863 if (whole->lval == lval_internalvar)
1864 VALUE_LVAL (component) = lval_internalvar_component;
1865 else
1866 VALUE_LVAL (component) = whole->lval;
1867
1868 component->location = whole->location;
1869 if (whole->lval == lval_computed)
1870 {
1871 const struct lval_funcs *funcs = whole->location.computed.funcs;
1872
1873 if (funcs->copy_closure)
1874 component->location.computed.closure = funcs->copy_closure (whole);
1875 }
1876
1877 /* If type has a dynamic resolved location property
1878 update it's value address. */
1879 type = value_type (whole);
1880 if (NULL != TYPE_DATA_LOCATION (type)
1881 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1882 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1883 }
1884
1885 /* Access to the value history. */
1886
1887 /* Record a new value in the value history.
1888 Returns the absolute history index of the entry. */
1889
1890 int
1891 record_latest_value (struct value *val)
1892 {
1893 int i;
1894
1895 /* We don't want this value to have anything to do with the inferior anymore.
1896 In particular, "set $1 = 50" should not affect the variable from which
1897 the value was taken, and fast watchpoints should be able to assume that
1898 a value on the value history never changes. */
1899 if (value_lazy (val))
1900 value_fetch_lazy (val);
1901 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1902 from. This is a bit dubious, because then *&$1 does not just return $1
1903 but the current contents of that location. c'est la vie... */
1904 val->modifiable = 0;
1905
1906 /* The value may have already been released, in which case we're adding a
1907 new reference for its entry in the history. That is why we call
1908 release_value_or_incref here instead of release_value. */
1909 release_value_or_incref (val);
1910
1911 /* Here we treat value_history_count as origin-zero
1912 and applying to the value being stored now. */
1913
1914 i = value_history_count % VALUE_HISTORY_CHUNK;
1915 if (i == 0)
1916 {
1917 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1918
1919 newobj->next = value_history_chain;
1920 value_history_chain = newobj;
1921 }
1922
1923 value_history_chain->values[i] = val;
1924
1925 /* Now we regard value_history_count as origin-one
1926 and applying to the value just stored. */
1927
1928 return ++value_history_count;
1929 }
1930
1931 /* Return a copy of the value in the history with sequence number NUM. */
1932
1933 struct value *
1934 access_value_history (int num)
1935 {
1936 struct value_history_chunk *chunk;
1937 int i;
1938 int absnum = num;
1939
1940 if (absnum <= 0)
1941 absnum += value_history_count;
1942
1943 if (absnum <= 0)
1944 {
1945 if (num == 0)
1946 error (_("The history is empty."));
1947 else if (num == 1)
1948 error (_("There is only one value in the history."));
1949 else
1950 error (_("History does not go back to $$%d."), -num);
1951 }
1952 if (absnum > value_history_count)
1953 error (_("History has not yet reached $%d."), absnum);
1954
1955 absnum--;
1956
1957 /* Now absnum is always absolute and origin zero. */
1958
1959 chunk = value_history_chain;
1960 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1961 - absnum / VALUE_HISTORY_CHUNK;
1962 i > 0; i--)
1963 chunk = chunk->next;
1964
1965 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1966 }
1967
1968 static void
1969 show_values (char *num_exp, int from_tty)
1970 {
1971 int i;
1972 struct value *val;
1973 static int num = 1;
1974
1975 if (num_exp)
1976 {
1977 /* "show values +" should print from the stored position.
1978 "show values <exp>" should print around value number <exp>. */
1979 if (num_exp[0] != '+' || num_exp[1] != '\0')
1980 num = parse_and_eval_long (num_exp) - 5;
1981 }
1982 else
1983 {
1984 /* "show values" means print the last 10 values. */
1985 num = value_history_count - 9;
1986 }
1987
1988 if (num <= 0)
1989 num = 1;
1990
1991 for (i = num; i < num + 10 && i <= value_history_count; i++)
1992 {
1993 struct value_print_options opts;
1994
1995 val = access_value_history (i);
1996 printf_filtered (("$%d = "), i);
1997 get_user_print_options (&opts);
1998 value_print (val, gdb_stdout, &opts);
1999 printf_filtered (("\n"));
2000 }
2001
2002 /* The next "show values +" should start after what we just printed. */
2003 num += 10;
2004
2005 /* Hitting just return after this command should do the same thing as
2006 "show values +". If num_exp is null, this is unnecessary, since
2007 "show values +" is not useful after "show values". */
2008 if (from_tty && num_exp)
2009 {
2010 num_exp[0] = '+';
2011 num_exp[1] = '\0';
2012 }
2013 }
2014 \f
2015 enum internalvar_kind
2016 {
2017 /* The internal variable is empty. */
2018 INTERNALVAR_VOID,
2019
2020 /* The value of the internal variable is provided directly as
2021 a GDB value object. */
2022 INTERNALVAR_VALUE,
2023
2024 /* A fresh value is computed via a call-back routine on every
2025 access to the internal variable. */
2026 INTERNALVAR_MAKE_VALUE,
2027
2028 /* The internal variable holds a GDB internal convenience function. */
2029 INTERNALVAR_FUNCTION,
2030
2031 /* The variable holds an integer value. */
2032 INTERNALVAR_INTEGER,
2033
2034 /* The variable holds a GDB-provided string. */
2035 INTERNALVAR_STRING,
2036 };
2037
2038 union internalvar_data
2039 {
2040 /* A value object used with INTERNALVAR_VALUE. */
2041 struct value *value;
2042
2043 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2044 struct
2045 {
2046 /* The functions to call. */
2047 const struct internalvar_funcs *functions;
2048
2049 /* The function's user-data. */
2050 void *data;
2051 } make_value;
2052
2053 /* The internal function used with INTERNALVAR_FUNCTION. */
2054 struct
2055 {
2056 struct internal_function *function;
2057 /* True if this is the canonical name for the function. */
2058 int canonical;
2059 } fn;
2060
2061 /* An integer value used with INTERNALVAR_INTEGER. */
2062 struct
2063 {
2064 /* If type is non-NULL, it will be used as the type to generate
2065 a value for this internal variable. If type is NULL, a default
2066 integer type for the architecture is used. */
2067 struct type *type;
2068 LONGEST val;
2069 } integer;
2070
2071 /* A string value used with INTERNALVAR_STRING. */
2072 char *string;
2073 };
2074
2075 /* Internal variables. These are variables within the debugger
2076 that hold values assigned by debugger commands.
2077 The user refers to them with a '$' prefix
2078 that does not appear in the variable names stored internally. */
2079
2080 struct internalvar
2081 {
2082 struct internalvar *next;
2083 char *name;
2084
2085 /* We support various different kinds of content of an internal variable.
2086 enum internalvar_kind specifies the kind, and union internalvar_data
2087 provides the data associated with this particular kind. */
2088
2089 enum internalvar_kind kind;
2090
2091 union internalvar_data u;
2092 };
2093
2094 static struct internalvar *internalvars;
2095
2096 /* If the variable does not already exist create it and give it the
2097 value given. If no value is given then the default is zero. */
2098 static void
2099 init_if_undefined_command (char* args, int from_tty)
2100 {
2101 struct internalvar* intvar;
2102
2103 /* Parse the expression - this is taken from set_command(). */
2104 expression_up expr = parse_expression (args);
2105
2106 /* Validate the expression.
2107 Was the expression an assignment?
2108 Or even an expression at all? */
2109 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2110 error (_("Init-if-undefined requires an assignment expression."));
2111
2112 /* Extract the variable from the parsed expression.
2113 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2114 if (expr->elts[1].opcode != OP_INTERNALVAR)
2115 error (_("The first parameter to init-if-undefined "
2116 "should be a GDB variable."));
2117 intvar = expr->elts[2].internalvar;
2118
2119 /* Only evaluate the expression if the lvalue is void.
2120 This may still fail if the expresssion is invalid. */
2121 if (intvar->kind == INTERNALVAR_VOID)
2122 evaluate_expression (expr.get ());
2123 }
2124
2125
2126 /* Look up an internal variable with name NAME. NAME should not
2127 normally include a dollar sign.
2128
2129 If the specified internal variable does not exist,
2130 the return value is NULL. */
2131
2132 struct internalvar *
2133 lookup_only_internalvar (const char *name)
2134 {
2135 struct internalvar *var;
2136
2137 for (var = internalvars; var; var = var->next)
2138 if (strcmp (var->name, name) == 0)
2139 return var;
2140
2141 return NULL;
2142 }
2143
2144 /* Complete NAME by comparing it to the names of internal
2145 variables. */
2146
2147 void
2148 complete_internalvar (completion_tracker &tracker, const char *name)
2149 {
2150 struct internalvar *var;
2151 int len;
2152
2153 len = strlen (name);
2154
2155 for (var = internalvars; var; var = var->next)
2156 if (strncmp (var->name, name, len) == 0)
2157 {
2158 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2159
2160 tracker.add_completion (std::move (copy));
2161 }
2162 }
2163
2164 /* Create an internal variable with name NAME and with a void value.
2165 NAME should not normally include a dollar sign. */
2166
2167 struct internalvar *
2168 create_internalvar (const char *name)
2169 {
2170 struct internalvar *var = XNEW (struct internalvar);
2171
2172 var->name = concat (name, (char *)NULL);
2173 var->kind = INTERNALVAR_VOID;
2174 var->next = internalvars;
2175 internalvars = var;
2176 return var;
2177 }
2178
2179 /* Create an internal variable with name NAME and register FUN as the
2180 function that value_of_internalvar uses to create a value whenever
2181 this variable is referenced. NAME should not normally include a
2182 dollar sign. DATA is passed uninterpreted to FUN when it is
2183 called. CLEANUP, if not NULL, is called when the internal variable
2184 is destroyed. It is passed DATA as its only argument. */
2185
2186 struct internalvar *
2187 create_internalvar_type_lazy (const char *name,
2188 const struct internalvar_funcs *funcs,
2189 void *data)
2190 {
2191 struct internalvar *var = create_internalvar (name);
2192
2193 var->kind = INTERNALVAR_MAKE_VALUE;
2194 var->u.make_value.functions = funcs;
2195 var->u.make_value.data = data;
2196 return var;
2197 }
2198
2199 /* See documentation in value.h. */
2200
2201 int
2202 compile_internalvar_to_ax (struct internalvar *var,
2203 struct agent_expr *expr,
2204 struct axs_value *value)
2205 {
2206 if (var->kind != INTERNALVAR_MAKE_VALUE
2207 || var->u.make_value.functions->compile_to_ax == NULL)
2208 return 0;
2209
2210 var->u.make_value.functions->compile_to_ax (var, expr, value,
2211 var->u.make_value.data);
2212 return 1;
2213 }
2214
2215 /* Look up an internal variable with name NAME. NAME should not
2216 normally include a dollar sign.
2217
2218 If the specified internal variable does not exist,
2219 one is created, with a void value. */
2220
2221 struct internalvar *
2222 lookup_internalvar (const char *name)
2223 {
2224 struct internalvar *var;
2225
2226 var = lookup_only_internalvar (name);
2227 if (var)
2228 return var;
2229
2230 return create_internalvar (name);
2231 }
2232
2233 /* Return current value of internal variable VAR. For variables that
2234 are not inherently typed, use a value type appropriate for GDBARCH. */
2235
2236 struct value *
2237 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2238 {
2239 struct value *val;
2240 struct trace_state_variable *tsv;
2241
2242 /* If there is a trace state variable of the same name, assume that
2243 is what we really want to see. */
2244 tsv = find_trace_state_variable (var->name);
2245 if (tsv)
2246 {
2247 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2248 &(tsv->value));
2249 if (tsv->value_known)
2250 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2251 tsv->value);
2252 else
2253 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2254 return val;
2255 }
2256
2257 switch (var->kind)
2258 {
2259 case INTERNALVAR_VOID:
2260 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2261 break;
2262
2263 case INTERNALVAR_FUNCTION:
2264 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2265 break;
2266
2267 case INTERNALVAR_INTEGER:
2268 if (!var->u.integer.type)
2269 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2270 var->u.integer.val);
2271 else
2272 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2273 break;
2274
2275 case INTERNALVAR_STRING:
2276 val = value_cstring (var->u.string, strlen (var->u.string),
2277 builtin_type (gdbarch)->builtin_char);
2278 break;
2279
2280 case INTERNALVAR_VALUE:
2281 val = value_copy (var->u.value);
2282 if (value_lazy (val))
2283 value_fetch_lazy (val);
2284 break;
2285
2286 case INTERNALVAR_MAKE_VALUE:
2287 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2288 var->u.make_value.data);
2289 break;
2290
2291 default:
2292 internal_error (__FILE__, __LINE__, _("bad kind"));
2293 }
2294
2295 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2296 on this value go back to affect the original internal variable.
2297
2298 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2299 no underlying modifyable state in the internal variable.
2300
2301 Likewise, if the variable's value is a computed lvalue, we want
2302 references to it to produce another computed lvalue, where
2303 references and assignments actually operate through the
2304 computed value's functions.
2305
2306 This means that internal variables with computed values
2307 behave a little differently from other internal variables:
2308 assignments to them don't just replace the previous value
2309 altogether. At the moment, this seems like the behavior we
2310 want. */
2311
2312 if (var->kind != INTERNALVAR_MAKE_VALUE
2313 && val->lval != lval_computed)
2314 {
2315 VALUE_LVAL (val) = lval_internalvar;
2316 VALUE_INTERNALVAR (val) = var;
2317 }
2318
2319 return val;
2320 }
2321
2322 int
2323 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2324 {
2325 if (var->kind == INTERNALVAR_INTEGER)
2326 {
2327 *result = var->u.integer.val;
2328 return 1;
2329 }
2330
2331 if (var->kind == INTERNALVAR_VALUE)
2332 {
2333 struct type *type = check_typedef (value_type (var->u.value));
2334
2335 if (TYPE_CODE (type) == TYPE_CODE_INT)
2336 {
2337 *result = value_as_long (var->u.value);
2338 return 1;
2339 }
2340 }
2341
2342 return 0;
2343 }
2344
2345 static int
2346 get_internalvar_function (struct internalvar *var,
2347 struct internal_function **result)
2348 {
2349 switch (var->kind)
2350 {
2351 case INTERNALVAR_FUNCTION:
2352 *result = var->u.fn.function;
2353 return 1;
2354
2355 default:
2356 return 0;
2357 }
2358 }
2359
2360 void
2361 set_internalvar_component (struct internalvar *var,
2362 LONGEST offset, LONGEST bitpos,
2363 LONGEST bitsize, struct value *newval)
2364 {
2365 gdb_byte *addr;
2366 struct gdbarch *arch;
2367 int unit_size;
2368
2369 switch (var->kind)
2370 {
2371 case INTERNALVAR_VALUE:
2372 addr = value_contents_writeable (var->u.value);
2373 arch = get_value_arch (var->u.value);
2374 unit_size = gdbarch_addressable_memory_unit_size (arch);
2375
2376 if (bitsize)
2377 modify_field (value_type (var->u.value), addr + offset,
2378 value_as_long (newval), bitpos, bitsize);
2379 else
2380 memcpy (addr + offset * unit_size, value_contents (newval),
2381 TYPE_LENGTH (value_type (newval)));
2382 break;
2383
2384 default:
2385 /* We can never get a component of any other kind. */
2386 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2387 }
2388 }
2389
2390 void
2391 set_internalvar (struct internalvar *var, struct value *val)
2392 {
2393 enum internalvar_kind new_kind;
2394 union internalvar_data new_data = { 0 };
2395
2396 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2397 error (_("Cannot overwrite convenience function %s"), var->name);
2398
2399 /* Prepare new contents. */
2400 switch (TYPE_CODE (check_typedef (value_type (val))))
2401 {
2402 case TYPE_CODE_VOID:
2403 new_kind = INTERNALVAR_VOID;
2404 break;
2405
2406 case TYPE_CODE_INTERNAL_FUNCTION:
2407 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2408 new_kind = INTERNALVAR_FUNCTION;
2409 get_internalvar_function (VALUE_INTERNALVAR (val),
2410 &new_data.fn.function);
2411 /* Copies created here are never canonical. */
2412 break;
2413
2414 default:
2415 new_kind = INTERNALVAR_VALUE;
2416 new_data.value = value_copy (val);
2417 new_data.value->modifiable = 1;
2418
2419 /* Force the value to be fetched from the target now, to avoid problems
2420 later when this internalvar is referenced and the target is gone or
2421 has changed. */
2422 if (value_lazy (new_data.value))
2423 value_fetch_lazy (new_data.value);
2424
2425 /* Release the value from the value chain to prevent it from being
2426 deleted by free_all_values. From here on this function should not
2427 call error () until new_data is installed into the var->u to avoid
2428 leaking memory. */
2429 release_value (new_data.value);
2430
2431 /* Internal variables which are created from values with a dynamic
2432 location don't need the location property of the origin anymore.
2433 The resolved dynamic location is used prior then any other address
2434 when accessing the value.
2435 If we keep it, we would still refer to the origin value.
2436 Remove the location property in case it exist. */
2437 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2438
2439 break;
2440 }
2441
2442 /* Clean up old contents. */
2443 clear_internalvar (var);
2444
2445 /* Switch over. */
2446 var->kind = new_kind;
2447 var->u = new_data;
2448 /* End code which must not call error(). */
2449 }
2450
2451 void
2452 set_internalvar_integer (struct internalvar *var, LONGEST l)
2453 {
2454 /* Clean up old contents. */
2455 clear_internalvar (var);
2456
2457 var->kind = INTERNALVAR_INTEGER;
2458 var->u.integer.type = NULL;
2459 var->u.integer.val = l;
2460 }
2461
2462 void
2463 set_internalvar_string (struct internalvar *var, const char *string)
2464 {
2465 /* Clean up old contents. */
2466 clear_internalvar (var);
2467
2468 var->kind = INTERNALVAR_STRING;
2469 var->u.string = xstrdup (string);
2470 }
2471
2472 static void
2473 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2474 {
2475 /* Clean up old contents. */
2476 clear_internalvar (var);
2477
2478 var->kind = INTERNALVAR_FUNCTION;
2479 var->u.fn.function = f;
2480 var->u.fn.canonical = 1;
2481 /* Variables installed here are always the canonical version. */
2482 }
2483
2484 void
2485 clear_internalvar (struct internalvar *var)
2486 {
2487 /* Clean up old contents. */
2488 switch (var->kind)
2489 {
2490 case INTERNALVAR_VALUE:
2491 value_free (var->u.value);
2492 break;
2493
2494 case INTERNALVAR_STRING:
2495 xfree (var->u.string);
2496 break;
2497
2498 case INTERNALVAR_MAKE_VALUE:
2499 if (var->u.make_value.functions->destroy != NULL)
2500 var->u.make_value.functions->destroy (var->u.make_value.data);
2501 break;
2502
2503 default:
2504 break;
2505 }
2506
2507 /* Reset to void kind. */
2508 var->kind = INTERNALVAR_VOID;
2509 }
2510
2511 char *
2512 internalvar_name (const struct internalvar *var)
2513 {
2514 return var->name;
2515 }
2516
2517 static struct internal_function *
2518 create_internal_function (const char *name,
2519 internal_function_fn handler, void *cookie)
2520 {
2521 struct internal_function *ifn = XNEW (struct internal_function);
2522
2523 ifn->name = xstrdup (name);
2524 ifn->handler = handler;
2525 ifn->cookie = cookie;
2526 return ifn;
2527 }
2528
2529 char *
2530 value_internal_function_name (struct value *val)
2531 {
2532 struct internal_function *ifn;
2533 int result;
2534
2535 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2536 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2537 gdb_assert (result);
2538
2539 return ifn->name;
2540 }
2541
2542 struct value *
2543 call_internal_function (struct gdbarch *gdbarch,
2544 const struct language_defn *language,
2545 struct value *func, int argc, struct value **argv)
2546 {
2547 struct internal_function *ifn;
2548 int result;
2549
2550 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2551 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2552 gdb_assert (result);
2553
2554 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2555 }
2556
2557 /* The 'function' command. This does nothing -- it is just a
2558 placeholder to let "help function NAME" work. This is also used as
2559 the implementation of the sub-command that is created when
2560 registering an internal function. */
2561 static void
2562 function_command (char *command, int from_tty)
2563 {
2564 /* Do nothing. */
2565 }
2566
2567 /* Clean up if an internal function's command is destroyed. */
2568 static void
2569 function_destroyer (struct cmd_list_element *self, void *ignore)
2570 {
2571 xfree ((char *) self->name);
2572 xfree ((char *) self->doc);
2573 }
2574
2575 /* Add a new internal function. NAME is the name of the function; DOC
2576 is a documentation string describing the function. HANDLER is
2577 called when the function is invoked. COOKIE is an arbitrary
2578 pointer which is passed to HANDLER and is intended for "user
2579 data". */
2580 void
2581 add_internal_function (const char *name, const char *doc,
2582 internal_function_fn handler, void *cookie)
2583 {
2584 struct cmd_list_element *cmd;
2585 struct internal_function *ifn;
2586 struct internalvar *var = lookup_internalvar (name);
2587
2588 ifn = create_internal_function (name, handler, cookie);
2589 set_internalvar_function (var, ifn);
2590
2591 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2592 &functionlist);
2593 cmd->destroyer = function_destroyer;
2594 }
2595
2596 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2597 prevent cycles / duplicates. */
2598
2599 void
2600 preserve_one_value (struct value *value, struct objfile *objfile,
2601 htab_t copied_types)
2602 {
2603 if (TYPE_OBJFILE (value->type) == objfile)
2604 value->type = copy_type_recursive (objfile, value->type, copied_types);
2605
2606 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2607 value->enclosing_type = copy_type_recursive (objfile,
2608 value->enclosing_type,
2609 copied_types);
2610 }
2611
2612 /* Likewise for internal variable VAR. */
2613
2614 static void
2615 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2616 htab_t copied_types)
2617 {
2618 switch (var->kind)
2619 {
2620 case INTERNALVAR_INTEGER:
2621 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2622 var->u.integer.type
2623 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2624 break;
2625
2626 case INTERNALVAR_VALUE:
2627 preserve_one_value (var->u.value, objfile, copied_types);
2628 break;
2629 }
2630 }
2631
2632 /* Update the internal variables and value history when OBJFILE is
2633 discarded; we must copy the types out of the objfile. New global types
2634 will be created for every convenience variable which currently points to
2635 this objfile's types, and the convenience variables will be adjusted to
2636 use the new global types. */
2637
2638 void
2639 preserve_values (struct objfile *objfile)
2640 {
2641 htab_t copied_types;
2642 struct value_history_chunk *cur;
2643 struct internalvar *var;
2644 int i;
2645
2646 /* Create the hash table. We allocate on the objfile's obstack, since
2647 it is soon to be deleted. */
2648 copied_types = create_copied_types_hash (objfile);
2649
2650 for (cur = value_history_chain; cur; cur = cur->next)
2651 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2652 if (cur->values[i])
2653 preserve_one_value (cur->values[i], objfile, copied_types);
2654
2655 for (var = internalvars; var; var = var->next)
2656 preserve_one_internalvar (var, objfile, copied_types);
2657
2658 preserve_ext_lang_values (objfile, copied_types);
2659
2660 htab_delete (copied_types);
2661 }
2662
2663 static void
2664 show_convenience (char *ignore, int from_tty)
2665 {
2666 struct gdbarch *gdbarch = get_current_arch ();
2667 struct internalvar *var;
2668 int varseen = 0;
2669 struct value_print_options opts;
2670
2671 get_user_print_options (&opts);
2672 for (var = internalvars; var; var = var->next)
2673 {
2674
2675 if (!varseen)
2676 {
2677 varseen = 1;
2678 }
2679 printf_filtered (("$%s = "), var->name);
2680
2681 TRY
2682 {
2683 struct value *val;
2684
2685 val = value_of_internalvar (gdbarch, var);
2686 value_print (val, gdb_stdout, &opts);
2687 }
2688 CATCH (ex, RETURN_MASK_ERROR)
2689 {
2690 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2691 }
2692 END_CATCH
2693
2694 printf_filtered (("\n"));
2695 }
2696 if (!varseen)
2697 {
2698 /* This text does not mention convenience functions on purpose.
2699 The user can't create them except via Python, and if Python support
2700 is installed this message will never be printed ($_streq will
2701 exist). */
2702 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2703 "Convenience variables have "
2704 "names starting with \"$\";\n"
2705 "use \"set\" as in \"set "
2706 "$foo = 5\" to define them.\n"));
2707 }
2708 }
2709 \f
2710 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2711
2712 struct value *
2713 value_of_xmethod (struct xmethod_worker *worker)
2714 {
2715 if (worker->value == NULL)
2716 {
2717 struct value *v;
2718
2719 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2720 v->lval = lval_xcallable;
2721 v->location.xm_worker = worker;
2722 v->modifiable = 0;
2723 worker->value = v;
2724 }
2725
2726 return worker->value;
2727 }
2728
2729 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2730
2731 struct type *
2732 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2733 {
2734 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2735 && method->lval == lval_xcallable && argc > 0);
2736
2737 return get_xmethod_result_type (method->location.xm_worker,
2738 argv[0], argv + 1, argc - 1);
2739 }
2740
2741 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2742
2743 struct value *
2744 call_xmethod (struct value *method, int argc, struct value **argv)
2745 {
2746 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2747 && method->lval == lval_xcallable && argc > 0);
2748
2749 return invoke_xmethod (method->location.xm_worker,
2750 argv[0], argv + 1, argc - 1);
2751 }
2752 \f
2753 /* Extract a value as a C number (either long or double).
2754 Knows how to convert fixed values to double, or
2755 floating values to long.
2756 Does not deallocate the value. */
2757
2758 LONGEST
2759 value_as_long (struct value *val)
2760 {
2761 /* This coerces arrays and functions, which is necessary (e.g.
2762 in disassemble_command). It also dereferences references, which
2763 I suspect is the most logical thing to do. */
2764 val = coerce_array (val);
2765 return unpack_long (value_type (val), value_contents (val));
2766 }
2767
2768 DOUBLEST
2769 value_as_double (struct value *val)
2770 {
2771 DOUBLEST foo;
2772 int inv;
2773
2774 foo = unpack_double (value_type (val), value_contents (val), &inv);
2775 if (inv)
2776 error (_("Invalid floating value found in program."));
2777 return foo;
2778 }
2779
2780 /* Extract a value as a C pointer. Does not deallocate the value.
2781 Note that val's type may not actually be a pointer; value_as_long
2782 handles all the cases. */
2783 CORE_ADDR
2784 value_as_address (struct value *val)
2785 {
2786 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2787
2788 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2789 whether we want this to be true eventually. */
2790 #if 0
2791 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2792 non-address (e.g. argument to "signal", "info break", etc.), or
2793 for pointers to char, in which the low bits *are* significant. */
2794 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2795 #else
2796
2797 /* There are several targets (IA-64, PowerPC, and others) which
2798 don't represent pointers to functions as simply the address of
2799 the function's entry point. For example, on the IA-64, a
2800 function pointer points to a two-word descriptor, generated by
2801 the linker, which contains the function's entry point, and the
2802 value the IA-64 "global pointer" register should have --- to
2803 support position-independent code. The linker generates
2804 descriptors only for those functions whose addresses are taken.
2805
2806 On such targets, it's difficult for GDB to convert an arbitrary
2807 function address into a function pointer; it has to either find
2808 an existing descriptor for that function, or call malloc and
2809 build its own. On some targets, it is impossible for GDB to
2810 build a descriptor at all: the descriptor must contain a jump
2811 instruction; data memory cannot be executed; and code memory
2812 cannot be modified.
2813
2814 Upon entry to this function, if VAL is a value of type `function'
2815 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2816 value_address (val) is the address of the function. This is what
2817 you'll get if you evaluate an expression like `main'. The call
2818 to COERCE_ARRAY below actually does all the usual unary
2819 conversions, which includes converting values of type `function'
2820 to `pointer to function'. This is the challenging conversion
2821 discussed above. Then, `unpack_long' will convert that pointer
2822 back into an address.
2823
2824 So, suppose the user types `disassemble foo' on an architecture
2825 with a strange function pointer representation, on which GDB
2826 cannot build its own descriptors, and suppose further that `foo'
2827 has no linker-built descriptor. The address->pointer conversion
2828 will signal an error and prevent the command from running, even
2829 though the next step would have been to convert the pointer
2830 directly back into the same address.
2831
2832 The following shortcut avoids this whole mess. If VAL is a
2833 function, just return its address directly. */
2834 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2835 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2836 return value_address (val);
2837
2838 val = coerce_array (val);
2839
2840 /* Some architectures (e.g. Harvard), map instruction and data
2841 addresses onto a single large unified address space. For
2842 instance: An architecture may consider a large integer in the
2843 range 0x10000000 .. 0x1000ffff to already represent a data
2844 addresses (hence not need a pointer to address conversion) while
2845 a small integer would still need to be converted integer to
2846 pointer to address. Just assume such architectures handle all
2847 integer conversions in a single function. */
2848
2849 /* JimB writes:
2850
2851 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2852 must admonish GDB hackers to make sure its behavior matches the
2853 compiler's, whenever possible.
2854
2855 In general, I think GDB should evaluate expressions the same way
2856 the compiler does. When the user copies an expression out of
2857 their source code and hands it to a `print' command, they should
2858 get the same value the compiler would have computed. Any
2859 deviation from this rule can cause major confusion and annoyance,
2860 and needs to be justified carefully. In other words, GDB doesn't
2861 really have the freedom to do these conversions in clever and
2862 useful ways.
2863
2864 AndrewC pointed out that users aren't complaining about how GDB
2865 casts integers to pointers; they are complaining that they can't
2866 take an address from a disassembly listing and give it to `x/i'.
2867 This is certainly important.
2868
2869 Adding an architecture method like integer_to_address() certainly
2870 makes it possible for GDB to "get it right" in all circumstances
2871 --- the target has complete control over how things get done, so
2872 people can Do The Right Thing for their target without breaking
2873 anyone else. The standard doesn't specify how integers get
2874 converted to pointers; usually, the ABI doesn't either, but
2875 ABI-specific code is a more reasonable place to handle it. */
2876
2877 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2878 && !TYPE_IS_REFERENCE (value_type (val))
2879 && gdbarch_integer_to_address_p (gdbarch))
2880 return gdbarch_integer_to_address (gdbarch, value_type (val),
2881 value_contents (val));
2882
2883 return unpack_long (value_type (val), value_contents (val));
2884 #endif
2885 }
2886 \f
2887 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2888 as a long, or as a double, assuming the raw data is described
2889 by type TYPE. Knows how to convert different sizes of values
2890 and can convert between fixed and floating point. We don't assume
2891 any alignment for the raw data. Return value is in host byte order.
2892
2893 If you want functions and arrays to be coerced to pointers, and
2894 references to be dereferenced, call value_as_long() instead.
2895
2896 C++: It is assumed that the front-end has taken care of
2897 all matters concerning pointers to members. A pointer
2898 to member which reaches here is considered to be equivalent
2899 to an INT (or some size). After all, it is only an offset. */
2900
2901 LONGEST
2902 unpack_long (struct type *type, const gdb_byte *valaddr)
2903 {
2904 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2905 enum type_code code = TYPE_CODE (type);
2906 int len = TYPE_LENGTH (type);
2907 int nosign = TYPE_UNSIGNED (type);
2908
2909 switch (code)
2910 {
2911 case TYPE_CODE_TYPEDEF:
2912 return unpack_long (check_typedef (type), valaddr);
2913 case TYPE_CODE_ENUM:
2914 case TYPE_CODE_FLAGS:
2915 case TYPE_CODE_BOOL:
2916 case TYPE_CODE_INT:
2917 case TYPE_CODE_CHAR:
2918 case TYPE_CODE_RANGE:
2919 case TYPE_CODE_MEMBERPTR:
2920 if (nosign)
2921 return extract_unsigned_integer (valaddr, len, byte_order);
2922 else
2923 return extract_signed_integer (valaddr, len, byte_order);
2924
2925 case TYPE_CODE_FLT:
2926 return (LONGEST) extract_typed_floating (valaddr, type);
2927
2928 case TYPE_CODE_DECFLOAT:
2929 /* libdecnumber has a function to convert from decimal to integer, but
2930 it doesn't work when the decimal number has a fractional part. */
2931 return (LONGEST) decimal_to_doublest (valaddr, len, byte_order);
2932
2933 case TYPE_CODE_PTR:
2934 case TYPE_CODE_REF:
2935 case TYPE_CODE_RVALUE_REF:
2936 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2937 whether we want this to be true eventually. */
2938 return extract_typed_address (valaddr, type);
2939
2940 default:
2941 error (_("Value can't be converted to integer."));
2942 }
2943 return 0; /* Placate lint. */
2944 }
2945
2946 /* Return a double value from the specified type and address.
2947 INVP points to an int which is set to 0 for valid value,
2948 1 for invalid value (bad float format). In either case,
2949 the returned double is OK to use. Argument is in target
2950 format, result is in host format. */
2951
2952 DOUBLEST
2953 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2954 {
2955 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2956 enum type_code code;
2957 int len;
2958 int nosign;
2959
2960 *invp = 0; /* Assume valid. */
2961 type = check_typedef (type);
2962 code = TYPE_CODE (type);
2963 len = TYPE_LENGTH (type);
2964 nosign = TYPE_UNSIGNED (type);
2965 if (code == TYPE_CODE_FLT)
2966 {
2967 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2968 floating-point value was valid (using the macro
2969 INVALID_FLOAT). That test/macro have been removed.
2970
2971 It turns out that only the VAX defined this macro and then
2972 only in a non-portable way. Fixing the portability problem
2973 wouldn't help since the VAX floating-point code is also badly
2974 bit-rotten. The target needs to add definitions for the
2975 methods gdbarch_float_format and gdbarch_double_format - these
2976 exactly describe the target floating-point format. The
2977 problem here is that the corresponding floatformat_vax_f and
2978 floatformat_vax_d values these methods should be set to are
2979 also not defined either. Oops!
2980
2981 Hopefully someone will add both the missing floatformat
2982 definitions and the new cases for floatformat_is_valid (). */
2983
2984 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2985 {
2986 *invp = 1;
2987 return 0.0;
2988 }
2989
2990 return extract_typed_floating (valaddr, type);
2991 }
2992 else if (code == TYPE_CODE_DECFLOAT)
2993 return decimal_to_doublest (valaddr, len, byte_order);
2994 else if (nosign)
2995 {
2996 /* Unsigned -- be sure we compensate for signed LONGEST. */
2997 return (ULONGEST) unpack_long (type, valaddr);
2998 }
2999 else
3000 {
3001 /* Signed -- we are OK with unpack_long. */
3002 return unpack_long (type, valaddr);
3003 }
3004 }
3005
3006 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3007 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3008 We don't assume any alignment for the raw data. Return value is in
3009 host byte order.
3010
3011 If you want functions and arrays to be coerced to pointers, and
3012 references to be dereferenced, call value_as_address() instead.
3013
3014 C++: It is assumed that the front-end has taken care of
3015 all matters concerning pointers to members. A pointer
3016 to member which reaches here is considered to be equivalent
3017 to an INT (or some size). After all, it is only an offset. */
3018
3019 CORE_ADDR
3020 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3021 {
3022 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3023 whether we want this to be true eventually. */
3024 return unpack_long (type, valaddr);
3025 }
3026
3027 \f
3028 /* Get the value of the FIELDNO'th field (which must be static) of
3029 TYPE. */
3030
3031 struct value *
3032 value_static_field (struct type *type, int fieldno)
3033 {
3034 struct value *retval;
3035
3036 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3037 {
3038 case FIELD_LOC_KIND_PHYSADDR:
3039 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3040 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3041 break;
3042 case FIELD_LOC_KIND_PHYSNAME:
3043 {
3044 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3045 /* TYPE_FIELD_NAME (type, fieldno); */
3046 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3047
3048 if (sym.symbol == NULL)
3049 {
3050 /* With some compilers, e.g. HP aCC, static data members are
3051 reported as non-debuggable symbols. */
3052 struct bound_minimal_symbol msym
3053 = lookup_minimal_symbol (phys_name, NULL, NULL);
3054
3055 if (!msym.minsym)
3056 return allocate_optimized_out_value (type);
3057 else
3058 {
3059 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3060 BMSYMBOL_VALUE_ADDRESS (msym));
3061 }
3062 }
3063 else
3064 retval = value_of_variable (sym.symbol, sym.block);
3065 break;
3066 }
3067 default:
3068 gdb_assert_not_reached ("unexpected field location kind");
3069 }
3070
3071 return retval;
3072 }
3073
3074 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3075 You have to be careful here, since the size of the data area for the value
3076 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3077 than the old enclosing type, you have to allocate more space for the
3078 data. */
3079
3080 void
3081 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3082 {
3083 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3084 {
3085 check_type_length_before_alloc (new_encl_type);
3086 val->contents
3087 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3088 }
3089
3090 val->enclosing_type = new_encl_type;
3091 }
3092
3093 /* Given a value ARG1 (offset by OFFSET bytes)
3094 of a struct or union type ARG_TYPE,
3095 extract and return the value of one of its (non-static) fields.
3096 FIELDNO says which field. */
3097
3098 struct value *
3099 value_primitive_field (struct value *arg1, LONGEST offset,
3100 int fieldno, struct type *arg_type)
3101 {
3102 struct value *v;
3103 struct type *type;
3104 struct gdbarch *arch = get_value_arch (arg1);
3105 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3106
3107 arg_type = check_typedef (arg_type);
3108 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3109
3110 /* Call check_typedef on our type to make sure that, if TYPE
3111 is a TYPE_CODE_TYPEDEF, its length is set to the length
3112 of the target type instead of zero. However, we do not
3113 replace the typedef type by the target type, because we want
3114 to keep the typedef in order to be able to print the type
3115 description correctly. */
3116 check_typedef (type);
3117
3118 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3119 {
3120 /* Handle packed fields.
3121
3122 Create a new value for the bitfield, with bitpos and bitsize
3123 set. If possible, arrange offset and bitpos so that we can
3124 do a single aligned read of the size of the containing type.
3125 Otherwise, adjust offset to the byte containing the first
3126 bit. Assume that the address, offset, and embedded offset
3127 are sufficiently aligned. */
3128
3129 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3130 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3131
3132 v = allocate_value_lazy (type);
3133 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3134 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3135 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3136 v->bitpos = bitpos % container_bitsize;
3137 else
3138 v->bitpos = bitpos % 8;
3139 v->offset = (value_embedded_offset (arg1)
3140 + offset
3141 + (bitpos - v->bitpos) / 8);
3142 set_value_parent (v, arg1);
3143 if (!value_lazy (arg1))
3144 value_fetch_lazy (v);
3145 }
3146 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3147 {
3148 /* This field is actually a base subobject, so preserve the
3149 entire object's contents for later references to virtual
3150 bases, etc. */
3151 LONGEST boffset;
3152
3153 /* Lazy register values with offsets are not supported. */
3154 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3155 value_fetch_lazy (arg1);
3156
3157 /* We special case virtual inheritance here because this
3158 requires access to the contents, which we would rather avoid
3159 for references to ordinary fields of unavailable values. */
3160 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3161 boffset = baseclass_offset (arg_type, fieldno,
3162 value_contents (arg1),
3163 value_embedded_offset (arg1),
3164 value_address (arg1),
3165 arg1);
3166 else
3167 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3168
3169 if (value_lazy (arg1))
3170 v = allocate_value_lazy (value_enclosing_type (arg1));
3171 else
3172 {
3173 v = allocate_value (value_enclosing_type (arg1));
3174 value_contents_copy_raw (v, 0, arg1, 0,
3175 TYPE_LENGTH (value_enclosing_type (arg1)));
3176 }
3177 v->type = type;
3178 v->offset = value_offset (arg1);
3179 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3180 }
3181 else if (NULL != TYPE_DATA_LOCATION (type))
3182 {
3183 /* Field is a dynamic data member. */
3184
3185 gdb_assert (0 == offset);
3186 /* We expect an already resolved data location. */
3187 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3188 /* For dynamic data types defer memory allocation
3189 until we actual access the value. */
3190 v = allocate_value_lazy (type);
3191 }
3192 else
3193 {
3194 /* Plain old data member */
3195 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3196 / (HOST_CHAR_BIT * unit_size));
3197
3198 /* Lazy register values with offsets are not supported. */
3199 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3200 value_fetch_lazy (arg1);
3201
3202 if (value_lazy (arg1))
3203 v = allocate_value_lazy (type);
3204 else
3205 {
3206 v = allocate_value (type);
3207 value_contents_copy_raw (v, value_embedded_offset (v),
3208 arg1, value_embedded_offset (arg1) + offset,
3209 type_length_units (type));
3210 }
3211 v->offset = (value_offset (arg1) + offset
3212 + value_embedded_offset (arg1));
3213 }
3214 set_value_component_location (v, arg1);
3215 return v;
3216 }
3217
3218 /* Given a value ARG1 of a struct or union type,
3219 extract and return the value of one of its (non-static) fields.
3220 FIELDNO says which field. */
3221
3222 struct value *
3223 value_field (struct value *arg1, int fieldno)
3224 {
3225 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3226 }
3227
3228 /* Return a non-virtual function as a value.
3229 F is the list of member functions which contains the desired method.
3230 J is an index into F which provides the desired method.
3231
3232 We only use the symbol for its address, so be happy with either a
3233 full symbol or a minimal symbol. */
3234
3235 struct value *
3236 value_fn_field (struct value **arg1p, struct fn_field *f,
3237 int j, struct type *type,
3238 LONGEST offset)
3239 {
3240 struct value *v;
3241 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3242 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3243 struct symbol *sym;
3244 struct bound_minimal_symbol msym;
3245
3246 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3247 if (sym != NULL)
3248 {
3249 memset (&msym, 0, sizeof (msym));
3250 }
3251 else
3252 {
3253 gdb_assert (sym == NULL);
3254 msym = lookup_bound_minimal_symbol (physname);
3255 if (msym.minsym == NULL)
3256 return NULL;
3257 }
3258
3259 v = allocate_value (ftype);
3260 VALUE_LVAL (v) = lval_memory;
3261 if (sym)
3262 {
3263 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3264 }
3265 else
3266 {
3267 /* The minimal symbol might point to a function descriptor;
3268 resolve it to the actual code address instead. */
3269 struct objfile *objfile = msym.objfile;
3270 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3271
3272 set_value_address (v,
3273 gdbarch_convert_from_func_ptr_addr
3274 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3275 }
3276
3277 if (arg1p)
3278 {
3279 if (type != value_type (*arg1p))
3280 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3281 value_addr (*arg1p)));
3282
3283 /* Move the `this' pointer according to the offset.
3284 VALUE_OFFSET (*arg1p) += offset; */
3285 }
3286
3287 return v;
3288 }
3289
3290 \f
3291
3292 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3293 VALADDR, and store the result in *RESULT.
3294 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3295
3296 Extracting bits depends on endianness of the machine. Compute the
3297 number of least significant bits to discard. For big endian machines,
3298 we compute the total number of bits in the anonymous object, subtract
3299 off the bit count from the MSB of the object to the MSB of the
3300 bitfield, then the size of the bitfield, which leaves the LSB discard
3301 count. For little endian machines, the discard count is simply the
3302 number of bits from the LSB of the anonymous object to the LSB of the
3303 bitfield.
3304
3305 If the field is signed, we also do sign extension. */
3306
3307 static LONGEST
3308 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3309 LONGEST bitpos, LONGEST bitsize)
3310 {
3311 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3312 ULONGEST val;
3313 ULONGEST valmask;
3314 int lsbcount;
3315 LONGEST bytes_read;
3316 LONGEST read_offset;
3317
3318 /* Read the minimum number of bytes required; there may not be
3319 enough bytes to read an entire ULONGEST. */
3320 field_type = check_typedef (field_type);
3321 if (bitsize)
3322 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3323 else
3324 bytes_read = TYPE_LENGTH (field_type);
3325
3326 read_offset = bitpos / 8;
3327
3328 val = extract_unsigned_integer (valaddr + read_offset,
3329 bytes_read, byte_order);
3330
3331 /* Extract bits. See comment above. */
3332
3333 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3334 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3335 else
3336 lsbcount = (bitpos % 8);
3337 val >>= lsbcount;
3338
3339 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3340 If the field is signed, and is negative, then sign extend. */
3341
3342 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3343 {
3344 valmask = (((ULONGEST) 1) << bitsize) - 1;
3345 val &= valmask;
3346 if (!TYPE_UNSIGNED (field_type))
3347 {
3348 if (val & (valmask ^ (valmask >> 1)))
3349 {
3350 val |= ~valmask;
3351 }
3352 }
3353 }
3354
3355 return val;
3356 }
3357
3358 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3359 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3360 ORIGINAL_VALUE, which must not be NULL. See
3361 unpack_value_bits_as_long for more details. */
3362
3363 int
3364 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3365 LONGEST embedded_offset, int fieldno,
3366 const struct value *val, LONGEST *result)
3367 {
3368 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3369 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3370 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3371 int bit_offset;
3372
3373 gdb_assert (val != NULL);
3374
3375 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3376 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3377 || !value_bits_available (val, bit_offset, bitsize))
3378 return 0;
3379
3380 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3381 bitpos, bitsize);
3382 return 1;
3383 }
3384
3385 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3386 object at VALADDR. See unpack_bits_as_long for more details. */
3387
3388 LONGEST
3389 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3390 {
3391 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3392 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3393 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3394
3395 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3396 }
3397
3398 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3399 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3400 the contents in DEST_VAL, zero or sign extending if the type of
3401 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3402 VAL. If the VAL's contents required to extract the bitfield from
3403 are unavailable/optimized out, DEST_VAL is correspondingly
3404 marked unavailable/optimized out. */
3405
3406 void
3407 unpack_value_bitfield (struct value *dest_val,
3408 LONGEST bitpos, LONGEST bitsize,
3409 const gdb_byte *valaddr, LONGEST embedded_offset,
3410 const struct value *val)
3411 {
3412 enum bfd_endian byte_order;
3413 int src_bit_offset;
3414 int dst_bit_offset;
3415 struct type *field_type = value_type (dest_val);
3416
3417 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3418
3419 /* First, unpack and sign extend the bitfield as if it was wholly
3420 valid. Optimized out/unavailable bits are read as zero, but
3421 that's OK, as they'll end up marked below. If the VAL is
3422 wholly-invalid we may have skipped allocating its contents,
3423 though. See allocate_optimized_out_value. */
3424 if (valaddr != NULL)
3425 {
3426 LONGEST num;
3427
3428 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3429 bitpos, bitsize);
3430 store_signed_integer (value_contents_raw (dest_val),
3431 TYPE_LENGTH (field_type), byte_order, num);
3432 }
3433
3434 /* Now copy the optimized out / unavailability ranges to the right
3435 bits. */
3436 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3437 if (byte_order == BFD_ENDIAN_BIG)
3438 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3439 else
3440 dst_bit_offset = 0;
3441 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3442 val, src_bit_offset, bitsize);
3443 }
3444
3445 /* Return a new value with type TYPE, which is FIELDNO field of the
3446 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3447 of VAL. If the VAL's contents required to extract the bitfield
3448 from are unavailable/optimized out, the new value is
3449 correspondingly marked unavailable/optimized out. */
3450
3451 struct value *
3452 value_field_bitfield (struct type *type, int fieldno,
3453 const gdb_byte *valaddr,
3454 LONGEST embedded_offset, const struct value *val)
3455 {
3456 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3457 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3458 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3459
3460 unpack_value_bitfield (res_val, bitpos, bitsize,
3461 valaddr, embedded_offset, val);
3462
3463 return res_val;
3464 }
3465
3466 /* Modify the value of a bitfield. ADDR points to a block of memory in
3467 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3468 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3469 indicate which bits (in target bit order) comprise the bitfield.
3470 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3471 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3472
3473 void
3474 modify_field (struct type *type, gdb_byte *addr,
3475 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3476 {
3477 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3478 ULONGEST oword;
3479 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3480 LONGEST bytesize;
3481
3482 /* Normalize BITPOS. */
3483 addr += bitpos / 8;
3484 bitpos %= 8;
3485
3486 /* If a negative fieldval fits in the field in question, chop
3487 off the sign extension bits. */
3488 if ((~fieldval & ~(mask >> 1)) == 0)
3489 fieldval &= mask;
3490
3491 /* Warn if value is too big to fit in the field in question. */
3492 if (0 != (fieldval & ~mask))
3493 {
3494 /* FIXME: would like to include fieldval in the message, but
3495 we don't have a sprintf_longest. */
3496 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3497
3498 /* Truncate it, otherwise adjoining fields may be corrupted. */
3499 fieldval &= mask;
3500 }
3501
3502 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3503 false valgrind reports. */
3504
3505 bytesize = (bitpos + bitsize + 7) / 8;
3506 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3507
3508 /* Shifting for bit field depends on endianness of the target machine. */
3509 if (gdbarch_bits_big_endian (get_type_arch (type)))
3510 bitpos = bytesize * 8 - bitpos - bitsize;
3511
3512 oword &= ~(mask << bitpos);
3513 oword |= fieldval << bitpos;
3514
3515 store_unsigned_integer (addr, bytesize, byte_order, oword);
3516 }
3517 \f
3518 /* Pack NUM into BUF using a target format of TYPE. */
3519
3520 void
3521 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3522 {
3523 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3524 LONGEST len;
3525
3526 type = check_typedef (type);
3527 len = TYPE_LENGTH (type);
3528
3529 switch (TYPE_CODE (type))
3530 {
3531 case TYPE_CODE_INT:
3532 case TYPE_CODE_CHAR:
3533 case TYPE_CODE_ENUM:
3534 case TYPE_CODE_FLAGS:
3535 case TYPE_CODE_BOOL:
3536 case TYPE_CODE_RANGE:
3537 case TYPE_CODE_MEMBERPTR:
3538 store_signed_integer (buf, len, byte_order, num);
3539 break;
3540
3541 case TYPE_CODE_REF:
3542 case TYPE_CODE_RVALUE_REF:
3543 case TYPE_CODE_PTR:
3544 store_typed_address (buf, type, (CORE_ADDR) num);
3545 break;
3546
3547 default:
3548 error (_("Unexpected type (%d) encountered for integer constant."),
3549 TYPE_CODE (type));
3550 }
3551 }
3552
3553
3554 /* Pack NUM into BUF using a target format of TYPE. */
3555
3556 static void
3557 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3558 {
3559 LONGEST len;
3560 enum bfd_endian byte_order;
3561
3562 type = check_typedef (type);
3563 len = TYPE_LENGTH (type);
3564 byte_order = gdbarch_byte_order (get_type_arch (type));
3565
3566 switch (TYPE_CODE (type))
3567 {
3568 case TYPE_CODE_INT:
3569 case TYPE_CODE_CHAR:
3570 case TYPE_CODE_ENUM:
3571 case TYPE_CODE_FLAGS:
3572 case TYPE_CODE_BOOL:
3573 case TYPE_CODE_RANGE:
3574 case TYPE_CODE_MEMBERPTR:
3575 store_unsigned_integer (buf, len, byte_order, num);
3576 break;
3577
3578 case TYPE_CODE_REF:
3579 case TYPE_CODE_RVALUE_REF:
3580 case TYPE_CODE_PTR:
3581 store_typed_address (buf, type, (CORE_ADDR) num);
3582 break;
3583
3584 default:
3585 error (_("Unexpected type (%d) encountered "
3586 "for unsigned integer constant."),
3587 TYPE_CODE (type));
3588 }
3589 }
3590
3591
3592 /* Convert C numbers into newly allocated values. */
3593
3594 struct value *
3595 value_from_longest (struct type *type, LONGEST num)
3596 {
3597 struct value *val = allocate_value (type);
3598
3599 pack_long (value_contents_raw (val), type, num);
3600 return val;
3601 }
3602
3603
3604 /* Convert C unsigned numbers into newly allocated values. */
3605
3606 struct value *
3607 value_from_ulongest (struct type *type, ULONGEST num)
3608 {
3609 struct value *val = allocate_value (type);
3610
3611 pack_unsigned_long (value_contents_raw (val), type, num);
3612
3613 return val;
3614 }
3615
3616
3617 /* Create a value representing a pointer of type TYPE to the address
3618 ADDR. */
3619
3620 struct value *
3621 value_from_pointer (struct type *type, CORE_ADDR addr)
3622 {
3623 struct value *val = allocate_value (type);
3624
3625 store_typed_address (value_contents_raw (val),
3626 check_typedef (type), addr);
3627 return val;
3628 }
3629
3630
3631 /* Create a value of type TYPE whose contents come from VALADDR, if it
3632 is non-null, and whose memory address (in the inferior) is
3633 ADDRESS. The type of the created value may differ from the passed
3634 type TYPE. Make sure to retrieve values new type after this call.
3635 Note that TYPE is not passed through resolve_dynamic_type; this is
3636 a special API intended for use only by Ada. */
3637
3638 struct value *
3639 value_from_contents_and_address_unresolved (struct type *type,
3640 const gdb_byte *valaddr,
3641 CORE_ADDR address)
3642 {
3643 struct value *v;
3644
3645 if (valaddr == NULL)
3646 v = allocate_value_lazy (type);
3647 else
3648 v = value_from_contents (type, valaddr);
3649 VALUE_LVAL (v) = lval_memory;
3650 set_value_address (v, address);
3651 return v;
3652 }
3653
3654 /* Create a value of type TYPE whose contents come from VALADDR, if it
3655 is non-null, and whose memory address (in the inferior) is
3656 ADDRESS. The type of the created value may differ from the passed
3657 type TYPE. Make sure to retrieve values new type after this call. */
3658
3659 struct value *
3660 value_from_contents_and_address (struct type *type,
3661 const gdb_byte *valaddr,
3662 CORE_ADDR address)
3663 {
3664 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3665 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3666 struct value *v;
3667
3668 if (valaddr == NULL)
3669 v = allocate_value_lazy (resolved_type);
3670 else
3671 v = value_from_contents (resolved_type, valaddr);
3672 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3673 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3674 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3675 VALUE_LVAL (v) = lval_memory;
3676 set_value_address (v, address);
3677 return v;
3678 }
3679
3680 /* Create a value of type TYPE holding the contents CONTENTS.
3681 The new value is `not_lval'. */
3682
3683 struct value *
3684 value_from_contents (struct type *type, const gdb_byte *contents)
3685 {
3686 struct value *result;
3687
3688 result = allocate_value (type);
3689 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3690 return result;
3691 }
3692
3693 struct value *
3694 value_from_double (struct type *type, DOUBLEST num)
3695 {
3696 struct value *val = allocate_value (type);
3697 struct type *base_type = check_typedef (type);
3698 enum type_code code = TYPE_CODE (base_type);
3699
3700 if (code == TYPE_CODE_FLT)
3701 {
3702 store_typed_floating (value_contents_raw (val), base_type, num);
3703 }
3704 else
3705 error (_("Unexpected type encountered for floating constant."));
3706
3707 return val;
3708 }
3709
3710 struct value *
3711 value_from_decfloat (struct type *type, const gdb_byte *dec)
3712 {
3713 struct value *val = allocate_value (type);
3714
3715 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3716 return val;
3717 }
3718
3719 /* Extract a value from the history file. Input will be of the form
3720 $digits or $$digits. See block comment above 'write_dollar_variable'
3721 for details. */
3722
3723 struct value *
3724 value_from_history_ref (const char *h, const char **endp)
3725 {
3726 int index, len;
3727
3728 if (h[0] == '$')
3729 len = 1;
3730 else
3731 return NULL;
3732
3733 if (h[1] == '$')
3734 len = 2;
3735
3736 /* Find length of numeral string. */
3737 for (; isdigit (h[len]); len++)
3738 ;
3739
3740 /* Make sure numeral string is not part of an identifier. */
3741 if (h[len] == '_' || isalpha (h[len]))
3742 return NULL;
3743
3744 /* Now collect the index value. */
3745 if (h[1] == '$')
3746 {
3747 if (len == 2)
3748 {
3749 /* For some bizarre reason, "$$" is equivalent to "$$1",
3750 rather than to "$$0" as it ought to be! */
3751 index = -1;
3752 *endp += len;
3753 }
3754 else
3755 {
3756 char *local_end;
3757
3758 index = -strtol (&h[2], &local_end, 10);
3759 *endp = local_end;
3760 }
3761 }
3762 else
3763 {
3764 if (len == 1)
3765 {
3766 /* "$" is equivalent to "$0". */
3767 index = 0;
3768 *endp += len;
3769 }
3770 else
3771 {
3772 char *local_end;
3773
3774 index = strtol (&h[1], &local_end, 10);
3775 *endp = local_end;
3776 }
3777 }
3778
3779 return access_value_history (index);
3780 }
3781
3782 /* Get the component value (offset by OFFSET bytes) of a struct or
3783 union WHOLE. Component's type is TYPE. */
3784
3785 struct value *
3786 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3787 {
3788 struct value *v;
3789
3790 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3791 v = allocate_value_lazy (type);
3792 else
3793 {
3794 v = allocate_value (type);
3795 value_contents_copy (v, value_embedded_offset (v),
3796 whole, value_embedded_offset (whole) + offset,
3797 type_length_units (type));
3798 }
3799 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3800 set_value_component_location (v, whole);
3801
3802 return v;
3803 }
3804
3805 struct value *
3806 coerce_ref_if_computed (const struct value *arg)
3807 {
3808 const struct lval_funcs *funcs;
3809
3810 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3811 return NULL;
3812
3813 if (value_lval_const (arg) != lval_computed)
3814 return NULL;
3815
3816 funcs = value_computed_funcs (arg);
3817 if (funcs->coerce_ref == NULL)
3818 return NULL;
3819
3820 return funcs->coerce_ref (arg);
3821 }
3822
3823 /* Look at value.h for description. */
3824
3825 struct value *
3826 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3827 const struct type *original_type,
3828 const struct value *original_value)
3829 {
3830 /* Re-adjust type. */
3831 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3832
3833 /* Add embedding info. */
3834 set_value_enclosing_type (value, enc_type);
3835 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3836
3837 /* We may be pointing to an object of some derived type. */
3838 return value_full_object (value, NULL, 0, 0, 0);
3839 }
3840
3841 struct value *
3842 coerce_ref (struct value *arg)
3843 {
3844 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3845 struct value *retval;
3846 struct type *enc_type;
3847
3848 retval = coerce_ref_if_computed (arg);
3849 if (retval)
3850 return retval;
3851
3852 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3853 return arg;
3854
3855 enc_type = check_typedef (value_enclosing_type (arg));
3856 enc_type = TYPE_TARGET_TYPE (enc_type);
3857
3858 retval = value_at_lazy (enc_type,
3859 unpack_pointer (value_type (arg),
3860 value_contents (arg)));
3861 enc_type = value_type (retval);
3862 return readjust_indirect_value_type (retval, enc_type,
3863 value_type_arg_tmp, arg);
3864 }
3865
3866 struct value *
3867 coerce_array (struct value *arg)
3868 {
3869 struct type *type;
3870
3871 arg = coerce_ref (arg);
3872 type = check_typedef (value_type (arg));
3873
3874 switch (TYPE_CODE (type))
3875 {
3876 case TYPE_CODE_ARRAY:
3877 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3878 arg = value_coerce_array (arg);
3879 break;
3880 case TYPE_CODE_FUNC:
3881 arg = value_coerce_function (arg);
3882 break;
3883 }
3884 return arg;
3885 }
3886 \f
3887
3888 /* Return the return value convention that will be used for the
3889 specified type. */
3890
3891 enum return_value_convention
3892 struct_return_convention (struct gdbarch *gdbarch,
3893 struct value *function, struct type *value_type)
3894 {
3895 enum type_code code = TYPE_CODE (value_type);
3896
3897 if (code == TYPE_CODE_ERROR)
3898 error (_("Function return type unknown."));
3899
3900 /* Probe the architecture for the return-value convention. */
3901 return gdbarch_return_value (gdbarch, function, value_type,
3902 NULL, NULL, NULL);
3903 }
3904
3905 /* Return true if the function returning the specified type is using
3906 the convention of returning structures in memory (passing in the
3907 address as a hidden first parameter). */
3908
3909 int
3910 using_struct_return (struct gdbarch *gdbarch,
3911 struct value *function, struct type *value_type)
3912 {
3913 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3914 /* A void return value is never in memory. See also corresponding
3915 code in "print_return_value". */
3916 return 0;
3917
3918 return (struct_return_convention (gdbarch, function, value_type)
3919 != RETURN_VALUE_REGISTER_CONVENTION);
3920 }
3921
3922 /* Set the initialized field in a value struct. */
3923
3924 void
3925 set_value_initialized (struct value *val, int status)
3926 {
3927 val->initialized = status;
3928 }
3929
3930 /* Return the initialized field in a value struct. */
3931
3932 int
3933 value_initialized (const struct value *val)
3934 {
3935 return val->initialized;
3936 }
3937
3938 /* Load the actual content of a lazy value. Fetch the data from the
3939 user's process and clear the lazy flag to indicate that the data in
3940 the buffer is valid.
3941
3942 If the value is zero-length, we avoid calling read_memory, which
3943 would abort. We mark the value as fetched anyway -- all 0 bytes of
3944 it. */
3945
3946 void
3947 value_fetch_lazy (struct value *val)
3948 {
3949 gdb_assert (value_lazy (val));
3950 allocate_value_contents (val);
3951 /* A value is either lazy, or fully fetched. The
3952 availability/validity is only established as we try to fetch a
3953 value. */
3954 gdb_assert (VEC_empty (range_s, val->optimized_out));
3955 gdb_assert (VEC_empty (range_s, val->unavailable));
3956 if (value_bitsize (val))
3957 {
3958 /* To read a lazy bitfield, read the entire enclosing value. This
3959 prevents reading the same block of (possibly volatile) memory once
3960 per bitfield. It would be even better to read only the containing
3961 word, but we have no way to record that just specific bits of a
3962 value have been fetched. */
3963 struct type *type = check_typedef (value_type (val));
3964 struct value *parent = value_parent (val);
3965
3966 if (value_lazy (parent))
3967 value_fetch_lazy (parent);
3968
3969 unpack_value_bitfield (val,
3970 value_bitpos (val), value_bitsize (val),
3971 value_contents_for_printing (parent),
3972 value_offset (val), parent);
3973 }
3974 else if (VALUE_LVAL (val) == lval_memory)
3975 {
3976 CORE_ADDR addr = value_address (val);
3977 struct type *type = check_typedef (value_enclosing_type (val));
3978
3979 if (TYPE_LENGTH (type))
3980 read_value_memory (val, 0, value_stack (val),
3981 addr, value_contents_all_raw (val),
3982 type_length_units (type));
3983 }
3984 else if (VALUE_LVAL (val) == lval_register)
3985 {
3986 struct frame_info *next_frame;
3987 int regnum;
3988 struct type *type = check_typedef (value_type (val));
3989 struct value *new_val = val, *mark = value_mark ();
3990
3991 /* Offsets are not supported here; lazy register values must
3992 refer to the entire register. */
3993 gdb_assert (value_offset (val) == 0);
3994
3995 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3996 {
3997 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3998
3999 next_frame = frame_find_by_id (next_frame_id);
4000 regnum = VALUE_REGNUM (new_val);
4001
4002 gdb_assert (next_frame != NULL);
4003
4004 /* Convertible register routines are used for multi-register
4005 values and for interpretation in different types
4006 (e.g. float or int from a double register). Lazy
4007 register values should have the register's natural type,
4008 so they do not apply. */
4009 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4010 regnum, type));
4011
4012 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4013 Since a "->next" operation was performed when setting
4014 this field, we do not need to perform a "next" operation
4015 again when unwinding the register. That's why
4016 frame_unwind_register_value() is called here instead of
4017 get_frame_register_value(). */
4018 new_val = frame_unwind_register_value (next_frame, regnum);
4019
4020 /* If we get another lazy lval_register value, it means the
4021 register is found by reading it from NEXT_FRAME's next frame.
4022 frame_unwind_register_value should never return a value with
4023 the frame id pointing to NEXT_FRAME. If it does, it means we
4024 either have two consecutive frames with the same frame id
4025 in the frame chain, or some code is trying to unwind
4026 behind get_prev_frame's back (e.g., a frame unwind
4027 sniffer trying to unwind), bypassing its validations. In
4028 any case, it should always be an internal error to end up
4029 in this situation. */
4030 if (VALUE_LVAL (new_val) == lval_register
4031 && value_lazy (new_val)
4032 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
4033 internal_error (__FILE__, __LINE__,
4034 _("infinite loop while fetching a register"));
4035 }
4036
4037 /* If it's still lazy (for instance, a saved register on the
4038 stack), fetch it. */
4039 if (value_lazy (new_val))
4040 value_fetch_lazy (new_val);
4041
4042 /* Copy the contents and the unavailability/optimized-out
4043 meta-data from NEW_VAL to VAL. */
4044 set_value_lazy (val, 0);
4045 value_contents_copy (val, value_embedded_offset (val),
4046 new_val, value_embedded_offset (new_val),
4047 type_length_units (type));
4048
4049 if (frame_debug)
4050 {
4051 struct gdbarch *gdbarch;
4052 struct frame_info *frame;
4053 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
4054 so that the frame level will be shown correctly. */
4055 frame = frame_find_by_id (VALUE_FRAME_ID (val));
4056 regnum = VALUE_REGNUM (val);
4057 gdbarch = get_frame_arch (frame);
4058
4059 fprintf_unfiltered (gdb_stdlog,
4060 "{ value_fetch_lazy "
4061 "(frame=%d,regnum=%d(%s),...) ",
4062 frame_relative_level (frame), regnum,
4063 user_reg_map_regnum_to_name (gdbarch, regnum));
4064
4065 fprintf_unfiltered (gdb_stdlog, "->");
4066 if (value_optimized_out (new_val))
4067 {
4068 fprintf_unfiltered (gdb_stdlog, " ");
4069 val_print_optimized_out (new_val, gdb_stdlog);
4070 }
4071 else
4072 {
4073 int i;
4074 const gdb_byte *buf = value_contents (new_val);
4075
4076 if (VALUE_LVAL (new_val) == lval_register)
4077 fprintf_unfiltered (gdb_stdlog, " register=%d",
4078 VALUE_REGNUM (new_val));
4079 else if (VALUE_LVAL (new_val) == lval_memory)
4080 fprintf_unfiltered (gdb_stdlog, " address=%s",
4081 paddress (gdbarch,
4082 value_address (new_val)));
4083 else
4084 fprintf_unfiltered (gdb_stdlog, " computed");
4085
4086 fprintf_unfiltered (gdb_stdlog, " bytes=");
4087 fprintf_unfiltered (gdb_stdlog, "[");
4088 for (i = 0; i < register_size (gdbarch, regnum); i++)
4089 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4090 fprintf_unfiltered (gdb_stdlog, "]");
4091 }
4092
4093 fprintf_unfiltered (gdb_stdlog, " }\n");
4094 }
4095
4096 /* Dispose of the intermediate values. This prevents
4097 watchpoints from trying to watch the saved frame pointer. */
4098 value_free_to_mark (mark);
4099 }
4100 else if (VALUE_LVAL (val) == lval_computed
4101 && value_computed_funcs (val)->read != NULL)
4102 value_computed_funcs (val)->read (val);
4103 else
4104 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4105
4106 set_value_lazy (val, 0);
4107 }
4108
4109 /* Implementation of the convenience function $_isvoid. */
4110
4111 static struct value *
4112 isvoid_internal_fn (struct gdbarch *gdbarch,
4113 const struct language_defn *language,
4114 void *cookie, int argc, struct value **argv)
4115 {
4116 int ret;
4117
4118 if (argc != 1)
4119 error (_("You must provide one argument for $_isvoid."));
4120
4121 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4122
4123 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4124 }
4125
4126 void
4127 _initialize_values (void)
4128 {
4129 add_cmd ("convenience", no_class, show_convenience, _("\
4130 Debugger convenience (\"$foo\") variables and functions.\n\
4131 Convenience variables are created when you assign them values;\n\
4132 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4133 \n\
4134 A few convenience variables are given values automatically:\n\
4135 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4136 \"$__\" holds the contents of the last address examined with \"x\"."
4137 #ifdef HAVE_PYTHON
4138 "\n\n\
4139 Convenience functions are defined via the Python API."
4140 #endif
4141 ), &showlist);
4142 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4143
4144 add_cmd ("values", no_set_class, show_values, _("\
4145 Elements of value history around item number IDX (or last ten)."),
4146 &showlist);
4147
4148 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4149 Initialize a convenience variable if necessary.\n\
4150 init-if-undefined VARIABLE = EXPRESSION\n\
4151 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4152 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4153 VARIABLE is already initialized."));
4154
4155 add_prefix_cmd ("function", no_class, function_command, _("\
4156 Placeholder command for showing help on convenience functions."),
4157 &functionlist, "function ", 0, &cmdlist);
4158
4159 add_internal_function ("_isvoid", _("\
4160 Check whether an expression is void.\n\
4161 Usage: $_isvoid (expression)\n\
4162 Return 1 if the expression is void, zero otherwise."),
4163 isvoid_internal_fn, NULL);
4164
4165 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4166 class_support, &max_value_size, _("\
4167 Set maximum sized value gdb will load from the inferior."), _("\
4168 Show maximum sized value gdb will load from the inferior."), _("\
4169 Use this to control the maximum size, in bytes, of a value that gdb\n\
4170 will load from the inferior. Setting this value to 'unlimited'\n\
4171 disables checking.\n\
4172 Setting this does not invalidate already allocated values, it only\n\
4173 prevents future values, larger than this size, from being allocated."),
4174 set_max_value_size,
4175 show_max_value_size,
4176 &setlist, &showlist);
4177 }
This page took 0.109513 seconds and 5 git commands to generate.