7d0966c8fe19dea8c99da8c2aaac2efa4f804c90
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "floatformat.h"
33 #include "regcache.h"
34 #include "block.h"
35 #include "dfp.h"
36 #include "objfiles.h"
37 #include "valprint.h"
38 #include "cli/cli-decode.h"
39 #include "extension.h"
40 #include <ctype.h>
41 #include "tracepoint.h"
42 #include "cp-abi.h"
43 #include "user-regs.h"
44 #include <algorithm>
45 #include "completer.h"
46
47 /* Definition of a user function. */
48 struct internal_function
49 {
50 /* The name of the function. It is a bit odd to have this in the
51 function itself -- the user might use a differently-named
52 convenience variable to hold the function. */
53 char *name;
54
55 /* The handler. */
56 internal_function_fn handler;
57
58 /* User data for the handler. */
59 void *cookie;
60 };
61
62 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
63
64 struct range
65 {
66 /* Lowest offset in the range. */
67 LONGEST offset;
68
69 /* Length of the range. */
70 LONGEST length;
71 };
72
73 typedef struct range range_s;
74
75 DEF_VEC_O(range_s);
76
77 /* Returns true if the ranges defined by [offset1, offset1+len1) and
78 [offset2, offset2+len2) overlap. */
79
80 static int
81 ranges_overlap (LONGEST offset1, LONGEST len1,
82 LONGEST offset2, LONGEST len2)
83 {
84 ULONGEST h, l;
85
86 l = std::max (offset1, offset2);
87 h = std::min (offset1 + len1, offset2 + len2);
88 return (l < h);
89 }
90
91 /* Returns true if the first argument is strictly less than the
92 second, useful for VEC_lower_bound. We keep ranges sorted by
93 offset and coalesce overlapping and contiguous ranges, so this just
94 compares the starting offset. */
95
96 static int
97 range_lessthan (const range_s *r1, const range_s *r2)
98 {
99 return r1->offset < r2->offset;
100 }
101
102 /* Returns true if RANGES contains any range that overlaps [OFFSET,
103 OFFSET+LENGTH). */
104
105 static int
106 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
107 {
108 range_s what;
109 LONGEST i;
110
111 what.offset = offset;
112 what.length = length;
113
114 /* We keep ranges sorted by offset and coalesce overlapping and
115 contiguous ranges, so to check if a range list contains a given
116 range, we can do a binary search for the position the given range
117 would be inserted if we only considered the starting OFFSET of
118 ranges. We call that position I. Since we also have LENGTH to
119 care for (this is a range afterall), we need to check if the
120 _previous_ range overlaps the I range. E.g.,
121
122 R
123 |---|
124 |---| |---| |------| ... |--|
125 0 1 2 N
126
127 I=1
128
129 In the case above, the binary search would return `I=1', meaning,
130 this OFFSET should be inserted at position 1, and the current
131 position 1 should be pushed further (and before 2). But, `0'
132 overlaps with R.
133
134 Then we need to check if the I range overlaps the I range itself.
135 E.g.,
136
137 R
138 |---|
139 |---| |---| |-------| ... |--|
140 0 1 2 N
141
142 I=1
143 */
144
145 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
146
147 if (i > 0)
148 {
149 struct range *bef = VEC_index (range_s, ranges, i - 1);
150
151 if (ranges_overlap (bef->offset, bef->length, offset, length))
152 return 1;
153 }
154
155 if (i < VEC_length (range_s, ranges))
156 {
157 struct range *r = VEC_index (range_s, ranges, i);
158
159 if (ranges_overlap (r->offset, r->length, offset, length))
160 return 1;
161 }
162
163 return 0;
164 }
165
166 static struct cmd_list_element *functionlist;
167
168 /* Note that the fields in this structure are arranged to save a bit
169 of memory. */
170
171 struct value
172 {
173 /* Type of value; either not an lval, or one of the various
174 different possible kinds of lval. */
175 enum lval_type lval;
176
177 /* Is it modifiable? Only relevant if lval != not_lval. */
178 unsigned int modifiable : 1;
179
180 /* If zero, contents of this value are in the contents field. If
181 nonzero, contents are in inferior. If the lval field is lval_memory,
182 the contents are in inferior memory at location.address plus offset.
183 The lval field may also be lval_register.
184
185 WARNING: This field is used by the code which handles watchpoints
186 (see breakpoint.c) to decide whether a particular value can be
187 watched by hardware watchpoints. If the lazy flag is set for
188 some member of a value chain, it is assumed that this member of
189 the chain doesn't need to be watched as part of watching the
190 value itself. This is how GDB avoids watching the entire struct
191 or array when the user wants to watch a single struct member or
192 array element. If you ever change the way lazy flag is set and
193 reset, be sure to consider this use as well! */
194 unsigned int lazy : 1;
195
196 /* If value is a variable, is it initialized or not. */
197 unsigned int initialized : 1;
198
199 /* If value is from the stack. If this is set, read_stack will be
200 used instead of read_memory to enable extra caching. */
201 unsigned int stack : 1;
202
203 /* If the value has been released. */
204 unsigned int released : 1;
205
206 /* Location of value (if lval). */
207 union
208 {
209 /* If lval == lval_memory, this is the address in the inferior */
210 CORE_ADDR address;
211
212 /*If lval == lval_register, the value is from a register. */
213 struct
214 {
215 /* Register number. */
216 int regnum;
217 /* Frame ID of "next" frame to which a register value is relative.
218 If the register value is found relative to frame F, then the
219 frame id of F->next will be stored in next_frame_id. */
220 struct frame_id next_frame_id;
221 } reg;
222
223 /* Pointer to internal variable. */
224 struct internalvar *internalvar;
225
226 /* Pointer to xmethod worker. */
227 struct xmethod_worker *xm_worker;
228
229 /* If lval == lval_computed, this is a set of function pointers
230 to use to access and describe the value, and a closure pointer
231 for them to use. */
232 struct
233 {
234 /* Functions to call. */
235 const struct lval_funcs *funcs;
236
237 /* Closure for those functions to use. */
238 void *closure;
239 } computed;
240 } location;
241
242 /* Describes offset of a value within lval of a structure in target
243 addressable memory units. Note also the member embedded_offset
244 below. */
245 LONGEST offset;
246
247 /* Only used for bitfields; number of bits contained in them. */
248 LONGEST bitsize;
249
250 /* Only used for bitfields; position of start of field. For
251 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
252 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
253 LONGEST bitpos;
254
255 /* The number of references to this value. When a value is created,
256 the value chain holds a reference, so REFERENCE_COUNT is 1. If
257 release_value is called, this value is removed from the chain but
258 the caller of release_value now has a reference to this value.
259 The caller must arrange for a call to value_free later. */
260 int reference_count;
261
262 /* Only used for bitfields; the containing value. This allows a
263 single read from the target when displaying multiple
264 bitfields. */
265 struct value *parent;
266
267 /* Type of the value. */
268 struct type *type;
269
270 /* If a value represents a C++ object, then the `type' field gives
271 the object's compile-time type. If the object actually belongs
272 to some class derived from `type', perhaps with other base
273 classes and additional members, then `type' is just a subobject
274 of the real thing, and the full object is probably larger than
275 `type' would suggest.
276
277 If `type' is a dynamic class (i.e. one with a vtable), then GDB
278 can actually determine the object's run-time type by looking at
279 the run-time type information in the vtable. When this
280 information is available, we may elect to read in the entire
281 object, for several reasons:
282
283 - When printing the value, the user would probably rather see the
284 full object, not just the limited portion apparent from the
285 compile-time type.
286
287 - If `type' has virtual base classes, then even printing `type'
288 alone may require reaching outside the `type' portion of the
289 object to wherever the virtual base class has been stored.
290
291 When we store the entire object, `enclosing_type' is the run-time
292 type -- the complete object -- and `embedded_offset' is the
293 offset of `type' within that larger type, in target addressable memory
294 units. The value_contents() macro takes `embedded_offset' into account,
295 so most GDB code continues to see the `type' portion of the value, just
296 as the inferior would.
297
298 If `type' is a pointer to an object, then `enclosing_type' is a
299 pointer to the object's run-time type, and `pointed_to_offset' is
300 the offset in target addressable memory units from the full object
301 to the pointed-to object -- that is, the value `embedded_offset' would
302 have if we followed the pointer and fetched the complete object.
303 (I don't really see the point. Why not just determine the
304 run-time type when you indirect, and avoid the special case? The
305 contents don't matter until you indirect anyway.)
306
307 If we're not doing anything fancy, `enclosing_type' is equal to
308 `type', and `embedded_offset' is zero, so everything works
309 normally. */
310 struct type *enclosing_type;
311 LONGEST embedded_offset;
312 LONGEST pointed_to_offset;
313
314 /* Values are stored in a chain, so that they can be deleted easily
315 over calls to the inferior. Values assigned to internal
316 variables, put into the value history or exposed to Python are
317 taken off this list. */
318 struct value *next;
319
320 /* Actual contents of the value. Target byte-order. NULL or not
321 valid if lazy is nonzero. */
322 gdb_byte *contents;
323
324 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
325 rather than available, since the common and default case is for a
326 value to be available. This is filled in at value read time.
327 The unavailable ranges are tracked in bits. Note that a contents
328 bit that has been optimized out doesn't really exist in the
329 program, so it can't be marked unavailable either. */
330 VEC(range_s) *unavailable;
331
332 /* Likewise, but for optimized out contents (a chunk of the value of
333 a variable that does not actually exist in the program). If LVAL
334 is lval_register, this is a register ($pc, $sp, etc., never a
335 program variable) that has not been saved in the frame. Not
336 saved registers and optimized-out program variables values are
337 treated pretty much the same, except not-saved registers have a
338 different string representation and related error strings. */
339 VEC(range_s) *optimized_out;
340 };
341
342 /* See value.h. */
343
344 struct gdbarch *
345 get_value_arch (const struct value *value)
346 {
347 return get_type_arch (value_type (value));
348 }
349
350 int
351 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
352 {
353 gdb_assert (!value->lazy);
354
355 return !ranges_contain (value->unavailable, offset, length);
356 }
357
358 int
359 value_bytes_available (const struct value *value,
360 LONGEST offset, LONGEST length)
361 {
362 return value_bits_available (value,
363 offset * TARGET_CHAR_BIT,
364 length * TARGET_CHAR_BIT);
365 }
366
367 int
368 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
369 {
370 gdb_assert (!value->lazy);
371
372 return ranges_contain (value->optimized_out, bit_offset, bit_length);
373 }
374
375 int
376 value_entirely_available (struct value *value)
377 {
378 /* We can only tell whether the whole value is available when we try
379 to read it. */
380 if (value->lazy)
381 value_fetch_lazy (value);
382
383 if (VEC_empty (range_s, value->unavailable))
384 return 1;
385 return 0;
386 }
387
388 /* Returns true if VALUE is entirely covered by RANGES. If the value
389 is lazy, it'll be read now. Note that RANGE is a pointer to
390 pointer because reading the value might change *RANGE. */
391
392 static int
393 value_entirely_covered_by_range_vector (struct value *value,
394 VEC(range_s) **ranges)
395 {
396 /* We can only tell whether the whole value is optimized out /
397 unavailable when we try to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (VEC_length (range_s, *ranges) == 1)
402 {
403 struct range *t = VEC_index (range_s, *ranges, 0);
404
405 if (t->offset == 0
406 && t->length == (TARGET_CHAR_BIT
407 * TYPE_LENGTH (value_enclosing_type (value))))
408 return 1;
409 }
410
411 return 0;
412 }
413
414 int
415 value_entirely_unavailable (struct value *value)
416 {
417 return value_entirely_covered_by_range_vector (value, &value->unavailable);
418 }
419
420 int
421 value_entirely_optimized_out (struct value *value)
422 {
423 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
424 }
425
426 /* Insert into the vector pointed to by VECTORP the bit range starting of
427 OFFSET bits, and extending for the next LENGTH bits. */
428
429 static void
430 insert_into_bit_range_vector (VEC(range_s) **vectorp,
431 LONGEST offset, LONGEST length)
432 {
433 range_s newr;
434 int i;
435
436 /* Insert the range sorted. If there's overlap or the new range
437 would be contiguous with an existing range, merge. */
438
439 newr.offset = offset;
440 newr.length = length;
441
442 /* Do a binary search for the position the given range would be
443 inserted if we only considered the starting OFFSET of ranges.
444 Call that position I. Since we also have LENGTH to care for
445 (this is a range afterall), we need to check if the _previous_
446 range overlaps the I range. E.g., calling R the new range:
447
448 #1 - overlaps with previous
449
450 R
451 |-...-|
452 |---| |---| |------| ... |--|
453 0 1 2 N
454
455 I=1
456
457 In the case #1 above, the binary search would return `I=1',
458 meaning, this OFFSET should be inserted at position 1, and the
459 current position 1 should be pushed further (and become 2). But,
460 note that `0' overlaps with R, so we want to merge them.
461
462 A similar consideration needs to be taken if the new range would
463 be contiguous with the previous range:
464
465 #2 - contiguous with previous
466
467 R
468 |-...-|
469 |--| |---| |------| ... |--|
470 0 1 2 N
471
472 I=1
473
474 If there's no overlap with the previous range, as in:
475
476 #3 - not overlapping and not contiguous
477
478 R
479 |-...-|
480 |--| |---| |------| ... |--|
481 0 1 2 N
482
483 I=1
484
485 or if I is 0:
486
487 #4 - R is the range with lowest offset
488
489 R
490 |-...-|
491 |--| |---| |------| ... |--|
492 0 1 2 N
493
494 I=0
495
496 ... we just push the new range to I.
497
498 All the 4 cases above need to consider that the new range may
499 also overlap several of the ranges that follow, or that R may be
500 contiguous with the following range, and merge. E.g.,
501
502 #5 - overlapping following ranges
503
504 R
505 |------------------------|
506 |--| |---| |------| ... |--|
507 0 1 2 N
508
509 I=0
510
511 or:
512
513 R
514 |-------|
515 |--| |---| |------| ... |--|
516 0 1 2 N
517
518 I=1
519
520 */
521
522 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
523 if (i > 0)
524 {
525 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
526
527 if (ranges_overlap (bef->offset, bef->length, offset, length))
528 {
529 /* #1 */
530 ULONGEST l = std::min (bef->offset, offset);
531 ULONGEST h = std::max (bef->offset + bef->length, offset + length);
532
533 bef->offset = l;
534 bef->length = h - l;
535 i--;
536 }
537 else if (offset == bef->offset + bef->length)
538 {
539 /* #2 */
540 bef->length += length;
541 i--;
542 }
543 else
544 {
545 /* #3 */
546 VEC_safe_insert (range_s, *vectorp, i, &newr);
547 }
548 }
549 else
550 {
551 /* #4 */
552 VEC_safe_insert (range_s, *vectorp, i, &newr);
553 }
554
555 /* Check whether the ranges following the one we've just added or
556 touched can be folded in (#5 above). */
557 if (i + 1 < VEC_length (range_s, *vectorp))
558 {
559 struct range *t;
560 struct range *r;
561 int removed = 0;
562 int next = i + 1;
563
564 /* Get the range we just touched. */
565 t = VEC_index (range_s, *vectorp, i);
566 removed = 0;
567
568 i = next;
569 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
570 if (r->offset <= t->offset + t->length)
571 {
572 ULONGEST l, h;
573
574 l = std::min (t->offset, r->offset);
575 h = std::max (t->offset + t->length, r->offset + r->length);
576
577 t->offset = l;
578 t->length = h - l;
579
580 removed++;
581 }
582 else
583 {
584 /* If we couldn't merge this one, we won't be able to
585 merge following ones either, since the ranges are
586 always sorted by OFFSET. */
587 break;
588 }
589
590 if (removed != 0)
591 VEC_block_remove (range_s, *vectorp, next, removed);
592 }
593 }
594
595 void
596 mark_value_bits_unavailable (struct value *value,
597 LONGEST offset, LONGEST length)
598 {
599 insert_into_bit_range_vector (&value->unavailable, offset, length);
600 }
601
602 void
603 mark_value_bytes_unavailable (struct value *value,
604 LONGEST offset, LONGEST length)
605 {
606 mark_value_bits_unavailable (value,
607 offset * TARGET_CHAR_BIT,
608 length * TARGET_CHAR_BIT);
609 }
610
611 /* Find the first range in RANGES that overlaps the range defined by
612 OFFSET and LENGTH, starting at element POS in the RANGES vector,
613 Returns the index into RANGES where such overlapping range was
614 found, or -1 if none was found. */
615
616 static int
617 find_first_range_overlap (VEC(range_s) *ranges, int pos,
618 LONGEST offset, LONGEST length)
619 {
620 range_s *r;
621 int i;
622
623 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
624 if (ranges_overlap (r->offset, r->length, offset, length))
625 return i;
626
627 return -1;
628 }
629
630 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
631 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
632 return non-zero.
633
634 It must always be the case that:
635 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
636
637 It is assumed that memory can be accessed from:
638 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
639 to:
640 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
641 / TARGET_CHAR_BIT) */
642 static int
643 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
644 const gdb_byte *ptr2, size_t offset2_bits,
645 size_t length_bits)
646 {
647 gdb_assert (offset1_bits % TARGET_CHAR_BIT
648 == offset2_bits % TARGET_CHAR_BIT);
649
650 if (offset1_bits % TARGET_CHAR_BIT != 0)
651 {
652 size_t bits;
653 gdb_byte mask, b1, b2;
654
655 /* The offset from the base pointers PTR1 and PTR2 is not a complete
656 number of bytes. A number of bits up to either the next exact
657 byte boundary, or LENGTH_BITS (which ever is sooner) will be
658 compared. */
659 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
660 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
661 mask = (1 << bits) - 1;
662
663 if (length_bits < bits)
664 {
665 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
666 bits = length_bits;
667 }
668
669 /* Now load the two bytes and mask off the bits we care about. */
670 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
671 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
672
673 if (b1 != b2)
674 return 1;
675
676 /* Now update the length and offsets to take account of the bits
677 we've just compared. */
678 length_bits -= bits;
679 offset1_bits += bits;
680 offset2_bits += bits;
681 }
682
683 if (length_bits % TARGET_CHAR_BIT != 0)
684 {
685 size_t bits;
686 size_t o1, o2;
687 gdb_byte mask, b1, b2;
688
689 /* The length is not an exact number of bytes. After the previous
690 IF.. block then the offsets are byte aligned, or the
691 length is zero (in which case this code is not reached). Compare
692 a number of bits at the end of the region, starting from an exact
693 byte boundary. */
694 bits = length_bits % TARGET_CHAR_BIT;
695 o1 = offset1_bits + length_bits - bits;
696 o2 = offset2_bits + length_bits - bits;
697
698 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
699 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
700
701 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
702 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
703
704 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
705 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
706
707 if (b1 != b2)
708 return 1;
709
710 length_bits -= bits;
711 }
712
713 if (length_bits > 0)
714 {
715 /* We've now taken care of any stray "bits" at the start, or end of
716 the region to compare, the remainder can be covered with a simple
717 memcmp. */
718 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
719 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
720 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
721
722 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
723 ptr2 + offset2_bits / TARGET_CHAR_BIT,
724 length_bits / TARGET_CHAR_BIT);
725 }
726
727 /* Length is zero, regions match. */
728 return 0;
729 }
730
731 /* Helper struct for find_first_range_overlap_and_match and
732 value_contents_bits_eq. Keep track of which slot of a given ranges
733 vector have we last looked at. */
734
735 struct ranges_and_idx
736 {
737 /* The ranges. */
738 VEC(range_s) *ranges;
739
740 /* The range we've last found in RANGES. Given ranges are sorted,
741 we can start the next lookup here. */
742 int idx;
743 };
744
745 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
746 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
747 ranges starting at OFFSET2 bits. Return true if the ranges match
748 and fill in *L and *H with the overlapping window relative to
749 (both) OFFSET1 or OFFSET2. */
750
751 static int
752 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
753 struct ranges_and_idx *rp2,
754 LONGEST offset1, LONGEST offset2,
755 LONGEST length, ULONGEST *l, ULONGEST *h)
756 {
757 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
758 offset1, length);
759 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
760 offset2, length);
761
762 if (rp1->idx == -1 && rp2->idx == -1)
763 {
764 *l = length;
765 *h = length;
766 return 1;
767 }
768 else if (rp1->idx == -1 || rp2->idx == -1)
769 return 0;
770 else
771 {
772 range_s *r1, *r2;
773 ULONGEST l1, h1;
774 ULONGEST l2, h2;
775
776 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
777 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
778
779 /* Get the unavailable windows intersected by the incoming
780 ranges. The first and last ranges that overlap the argument
781 range may be wider than said incoming arguments ranges. */
782 l1 = std::max (offset1, r1->offset);
783 h1 = std::min (offset1 + length, r1->offset + r1->length);
784
785 l2 = std::max (offset2, r2->offset);
786 h2 = std::min (offset2 + length, offset2 + r2->length);
787
788 /* Make them relative to the respective start offsets, so we can
789 compare them for equality. */
790 l1 -= offset1;
791 h1 -= offset1;
792
793 l2 -= offset2;
794 h2 -= offset2;
795
796 /* Different ranges, no match. */
797 if (l1 != l2 || h1 != h2)
798 return 0;
799
800 *h = h1;
801 *l = l1;
802 return 1;
803 }
804 }
805
806 /* Helper function for value_contents_eq. The only difference is that
807 this function is bit rather than byte based.
808
809 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
810 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
811 Return true if the available bits match. */
812
813 static int
814 value_contents_bits_eq (const struct value *val1, int offset1,
815 const struct value *val2, int offset2,
816 int length)
817 {
818 /* Each array element corresponds to a ranges source (unavailable,
819 optimized out). '1' is for VAL1, '2' for VAL2. */
820 struct ranges_and_idx rp1[2], rp2[2];
821
822 /* See function description in value.h. */
823 gdb_assert (!val1->lazy && !val2->lazy);
824
825 /* We shouldn't be trying to compare past the end of the values. */
826 gdb_assert (offset1 + length
827 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
828 gdb_assert (offset2 + length
829 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
830
831 memset (&rp1, 0, sizeof (rp1));
832 memset (&rp2, 0, sizeof (rp2));
833 rp1[0].ranges = val1->unavailable;
834 rp2[0].ranges = val2->unavailable;
835 rp1[1].ranges = val1->optimized_out;
836 rp2[1].ranges = val2->optimized_out;
837
838 while (length > 0)
839 {
840 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
841 int i;
842
843 for (i = 0; i < 2; i++)
844 {
845 ULONGEST l_tmp, h_tmp;
846
847 /* The contents only match equal if the invalid/unavailable
848 contents ranges match as well. */
849 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
850 offset1, offset2, length,
851 &l_tmp, &h_tmp))
852 return 0;
853
854 /* We're interested in the lowest/first range found. */
855 if (i == 0 || l_tmp < l)
856 {
857 l = l_tmp;
858 h = h_tmp;
859 }
860 }
861
862 /* Compare the available/valid contents. */
863 if (memcmp_with_bit_offsets (val1->contents, offset1,
864 val2->contents, offset2, l) != 0)
865 return 0;
866
867 length -= h;
868 offset1 += h;
869 offset2 += h;
870 }
871
872 return 1;
873 }
874
875 int
876 value_contents_eq (const struct value *val1, LONGEST offset1,
877 const struct value *val2, LONGEST offset2,
878 LONGEST length)
879 {
880 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
881 val2, offset2 * TARGET_CHAR_BIT,
882 length * TARGET_CHAR_BIT);
883 }
884
885 /* Prototypes for local functions. */
886
887 static void show_values (char *, int);
888
889
890 /* The value-history records all the values printed
891 by print commands during this session. Each chunk
892 records 60 consecutive values. The first chunk on
893 the chain records the most recent values.
894 The total number of values is in value_history_count. */
895
896 #define VALUE_HISTORY_CHUNK 60
897
898 struct value_history_chunk
899 {
900 struct value_history_chunk *next;
901 struct value *values[VALUE_HISTORY_CHUNK];
902 };
903
904 /* Chain of chunks now in use. */
905
906 static struct value_history_chunk *value_history_chain;
907
908 static int value_history_count; /* Abs number of last entry stored. */
909
910 \f
911 /* List of all value objects currently allocated
912 (except for those released by calls to release_value)
913 This is so they can be freed after each command. */
914
915 static struct value *all_values;
916
917 /* Allocate a lazy value for type TYPE. Its actual content is
918 "lazily" allocated too: the content field of the return value is
919 NULL; it will be allocated when it is fetched from the target. */
920
921 struct value *
922 allocate_value_lazy (struct type *type)
923 {
924 struct value *val;
925
926 /* Call check_typedef on our type to make sure that, if TYPE
927 is a TYPE_CODE_TYPEDEF, its length is set to the length
928 of the target type instead of zero. However, we do not
929 replace the typedef type by the target type, because we want
930 to keep the typedef in order to be able to set the VAL's type
931 description correctly. */
932 check_typedef (type);
933
934 val = XCNEW (struct value);
935 val->contents = NULL;
936 val->next = all_values;
937 all_values = val;
938 val->type = type;
939 val->enclosing_type = type;
940 VALUE_LVAL (val) = not_lval;
941 val->location.address = 0;
942 val->offset = 0;
943 val->bitpos = 0;
944 val->bitsize = 0;
945 val->lazy = 1;
946 val->embedded_offset = 0;
947 val->pointed_to_offset = 0;
948 val->modifiable = 1;
949 val->initialized = 1; /* Default to initialized. */
950
951 /* Values start out on the all_values chain. */
952 val->reference_count = 1;
953
954 return val;
955 }
956
957 /* The maximum size, in bytes, that GDB will try to allocate for a value.
958 The initial value of 64k was not selected for any specific reason, it is
959 just a reasonable starting point. */
960
961 static int max_value_size = 65536; /* 64k bytes */
962
963 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
964 LONGEST, otherwise GDB will not be able to parse integer values from the
965 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
966 be unable to parse "set max-value-size 2".
967
968 As we want a consistent GDB experience across hosts with different sizes
969 of LONGEST, this arbitrary minimum value was selected, so long as this
970 is bigger than LONGEST on all GDB supported hosts we're fine. */
971
972 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
973 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
974
975 /* Implement the "set max-value-size" command. */
976
977 static void
978 set_max_value_size (char *args, int from_tty,
979 struct cmd_list_element *c)
980 {
981 gdb_assert (max_value_size == -1 || max_value_size >= 0);
982
983 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
984 {
985 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
986 error (_("max-value-size set too low, increasing to %d bytes"),
987 max_value_size);
988 }
989 }
990
991 /* Implement the "show max-value-size" command. */
992
993 static void
994 show_max_value_size (struct ui_file *file, int from_tty,
995 struct cmd_list_element *c, const char *value)
996 {
997 if (max_value_size == -1)
998 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
999 else
1000 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1001 max_value_size);
1002 }
1003
1004 /* Called before we attempt to allocate or reallocate a buffer for the
1005 contents of a value. TYPE is the type of the value for which we are
1006 allocating the buffer. If the buffer is too large (based on the user
1007 controllable setting) then throw an error. If this function returns
1008 then we should attempt to allocate the buffer. */
1009
1010 static void
1011 check_type_length_before_alloc (const struct type *type)
1012 {
1013 unsigned int length = TYPE_LENGTH (type);
1014
1015 if (max_value_size > -1 && length > max_value_size)
1016 {
1017 if (TYPE_NAME (type) != NULL)
1018 error (_("value of type `%s' requires %u bytes, which is more "
1019 "than max-value-size"), TYPE_NAME (type), length);
1020 else
1021 error (_("value requires %u bytes, which is more than "
1022 "max-value-size"), length);
1023 }
1024 }
1025
1026 /* Allocate the contents of VAL if it has not been allocated yet. */
1027
1028 static void
1029 allocate_value_contents (struct value *val)
1030 {
1031 if (!val->contents)
1032 {
1033 check_type_length_before_alloc (val->enclosing_type);
1034 val->contents
1035 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1036 }
1037 }
1038
1039 /* Allocate a value and its contents for type TYPE. */
1040
1041 struct value *
1042 allocate_value (struct type *type)
1043 {
1044 struct value *val = allocate_value_lazy (type);
1045
1046 allocate_value_contents (val);
1047 val->lazy = 0;
1048 return val;
1049 }
1050
1051 /* Allocate a value that has the correct length
1052 for COUNT repetitions of type TYPE. */
1053
1054 struct value *
1055 allocate_repeat_value (struct type *type, int count)
1056 {
1057 int low_bound = current_language->string_lower_bound; /* ??? */
1058 /* FIXME-type-allocation: need a way to free this type when we are
1059 done with it. */
1060 struct type *array_type
1061 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1062
1063 return allocate_value (array_type);
1064 }
1065
1066 struct value *
1067 allocate_computed_value (struct type *type,
1068 const struct lval_funcs *funcs,
1069 void *closure)
1070 {
1071 struct value *v = allocate_value_lazy (type);
1072
1073 VALUE_LVAL (v) = lval_computed;
1074 v->location.computed.funcs = funcs;
1075 v->location.computed.closure = closure;
1076
1077 return v;
1078 }
1079
1080 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1081
1082 struct value *
1083 allocate_optimized_out_value (struct type *type)
1084 {
1085 struct value *retval = allocate_value_lazy (type);
1086
1087 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1088 set_value_lazy (retval, 0);
1089 return retval;
1090 }
1091
1092 /* Accessor methods. */
1093
1094 struct value *
1095 value_next (const struct value *value)
1096 {
1097 return value->next;
1098 }
1099
1100 struct type *
1101 value_type (const struct value *value)
1102 {
1103 return value->type;
1104 }
1105 void
1106 deprecated_set_value_type (struct value *value, struct type *type)
1107 {
1108 value->type = type;
1109 }
1110
1111 LONGEST
1112 value_offset (const struct value *value)
1113 {
1114 return value->offset;
1115 }
1116 void
1117 set_value_offset (struct value *value, LONGEST offset)
1118 {
1119 value->offset = offset;
1120 }
1121
1122 LONGEST
1123 value_bitpos (const struct value *value)
1124 {
1125 return value->bitpos;
1126 }
1127 void
1128 set_value_bitpos (struct value *value, LONGEST bit)
1129 {
1130 value->bitpos = bit;
1131 }
1132
1133 LONGEST
1134 value_bitsize (const struct value *value)
1135 {
1136 return value->bitsize;
1137 }
1138 void
1139 set_value_bitsize (struct value *value, LONGEST bit)
1140 {
1141 value->bitsize = bit;
1142 }
1143
1144 struct value *
1145 value_parent (const struct value *value)
1146 {
1147 return value->parent;
1148 }
1149
1150 /* See value.h. */
1151
1152 void
1153 set_value_parent (struct value *value, struct value *parent)
1154 {
1155 struct value *old = value->parent;
1156
1157 value->parent = parent;
1158 if (parent != NULL)
1159 value_incref (parent);
1160 value_free (old);
1161 }
1162
1163 gdb_byte *
1164 value_contents_raw (struct value *value)
1165 {
1166 struct gdbarch *arch = get_value_arch (value);
1167 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1168
1169 allocate_value_contents (value);
1170 return value->contents + value->embedded_offset * unit_size;
1171 }
1172
1173 gdb_byte *
1174 value_contents_all_raw (struct value *value)
1175 {
1176 allocate_value_contents (value);
1177 return value->contents;
1178 }
1179
1180 struct type *
1181 value_enclosing_type (const struct value *value)
1182 {
1183 return value->enclosing_type;
1184 }
1185
1186 /* Look at value.h for description. */
1187
1188 struct type *
1189 value_actual_type (struct value *value, int resolve_simple_types,
1190 int *real_type_found)
1191 {
1192 struct value_print_options opts;
1193 struct type *result;
1194
1195 get_user_print_options (&opts);
1196
1197 if (real_type_found)
1198 *real_type_found = 0;
1199 result = value_type (value);
1200 if (opts.objectprint)
1201 {
1202 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1203 fetch its rtti type. */
1204 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1205 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1206 == TYPE_CODE_STRUCT
1207 && !value_optimized_out (value))
1208 {
1209 struct type *real_type;
1210
1211 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1212 if (real_type)
1213 {
1214 if (real_type_found)
1215 *real_type_found = 1;
1216 result = real_type;
1217 }
1218 }
1219 else if (resolve_simple_types)
1220 {
1221 if (real_type_found)
1222 *real_type_found = 1;
1223 result = value_enclosing_type (value);
1224 }
1225 }
1226
1227 return result;
1228 }
1229
1230 void
1231 error_value_optimized_out (void)
1232 {
1233 error (_("value has been optimized out"));
1234 }
1235
1236 static void
1237 require_not_optimized_out (const struct value *value)
1238 {
1239 if (!VEC_empty (range_s, value->optimized_out))
1240 {
1241 if (value->lval == lval_register)
1242 error (_("register has not been saved in frame"));
1243 else
1244 error_value_optimized_out ();
1245 }
1246 }
1247
1248 static void
1249 require_available (const struct value *value)
1250 {
1251 if (!VEC_empty (range_s, value->unavailable))
1252 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1253 }
1254
1255 const gdb_byte *
1256 value_contents_for_printing (struct value *value)
1257 {
1258 if (value->lazy)
1259 value_fetch_lazy (value);
1260 return value->contents;
1261 }
1262
1263 const gdb_byte *
1264 value_contents_for_printing_const (const struct value *value)
1265 {
1266 gdb_assert (!value->lazy);
1267 return value->contents;
1268 }
1269
1270 const gdb_byte *
1271 value_contents_all (struct value *value)
1272 {
1273 const gdb_byte *result = value_contents_for_printing (value);
1274 require_not_optimized_out (value);
1275 require_available (value);
1276 return result;
1277 }
1278
1279 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1280 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1281
1282 static void
1283 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1284 VEC (range_s) *src_range, int src_bit_offset,
1285 int bit_length)
1286 {
1287 range_s *r;
1288 int i;
1289
1290 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1291 {
1292 ULONGEST h, l;
1293
1294 l = std::max (r->offset, (LONGEST) src_bit_offset);
1295 h = std::min (r->offset + r->length,
1296 (LONGEST) src_bit_offset + bit_length);
1297
1298 if (l < h)
1299 insert_into_bit_range_vector (dst_range,
1300 dst_bit_offset + (l - src_bit_offset),
1301 h - l);
1302 }
1303 }
1304
1305 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1306 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1307
1308 static void
1309 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1310 const struct value *src, int src_bit_offset,
1311 int bit_length)
1312 {
1313 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1314 src->unavailable, src_bit_offset,
1315 bit_length);
1316 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1317 src->optimized_out, src_bit_offset,
1318 bit_length);
1319 }
1320
1321 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1322 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1323 contents, starting at DST_OFFSET. If unavailable contents are
1324 being copied from SRC, the corresponding DST contents are marked
1325 unavailable accordingly. Neither DST nor SRC may be lazy
1326 values.
1327
1328 It is assumed the contents of DST in the [DST_OFFSET,
1329 DST_OFFSET+LENGTH) range are wholly available. */
1330
1331 void
1332 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1333 struct value *src, LONGEST src_offset, LONGEST length)
1334 {
1335 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1336 struct gdbarch *arch = get_value_arch (src);
1337 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1338
1339 /* A lazy DST would make that this copy operation useless, since as
1340 soon as DST's contents were un-lazied (by a later value_contents
1341 call, say), the contents would be overwritten. A lazy SRC would
1342 mean we'd be copying garbage. */
1343 gdb_assert (!dst->lazy && !src->lazy);
1344
1345 /* The overwritten DST range gets unavailability ORed in, not
1346 replaced. Make sure to remember to implement replacing if it
1347 turns out actually necessary. */
1348 gdb_assert (value_bytes_available (dst, dst_offset, length));
1349 gdb_assert (!value_bits_any_optimized_out (dst,
1350 TARGET_CHAR_BIT * dst_offset,
1351 TARGET_CHAR_BIT * length));
1352
1353 /* Copy the data. */
1354 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1355 value_contents_all_raw (src) + src_offset * unit_size,
1356 length * unit_size);
1357
1358 /* Copy the meta-data, adjusted. */
1359 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1360 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1361 bit_length = length * unit_size * HOST_CHAR_BIT;
1362
1363 value_ranges_copy_adjusted (dst, dst_bit_offset,
1364 src, src_bit_offset,
1365 bit_length);
1366 }
1367
1368 /* Copy LENGTH bytes of SRC value's (all) contents
1369 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1370 (all) contents, starting at DST_OFFSET. If unavailable contents
1371 are being copied from SRC, the corresponding DST contents are
1372 marked unavailable accordingly. DST must not be lazy. If SRC is
1373 lazy, it will be fetched now.
1374
1375 It is assumed the contents of DST in the [DST_OFFSET,
1376 DST_OFFSET+LENGTH) range are wholly available. */
1377
1378 void
1379 value_contents_copy (struct value *dst, LONGEST dst_offset,
1380 struct value *src, LONGEST src_offset, LONGEST length)
1381 {
1382 if (src->lazy)
1383 value_fetch_lazy (src);
1384
1385 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1386 }
1387
1388 int
1389 value_lazy (const struct value *value)
1390 {
1391 return value->lazy;
1392 }
1393
1394 void
1395 set_value_lazy (struct value *value, int val)
1396 {
1397 value->lazy = val;
1398 }
1399
1400 int
1401 value_stack (const struct value *value)
1402 {
1403 return value->stack;
1404 }
1405
1406 void
1407 set_value_stack (struct value *value, int val)
1408 {
1409 value->stack = val;
1410 }
1411
1412 const gdb_byte *
1413 value_contents (struct value *value)
1414 {
1415 const gdb_byte *result = value_contents_writeable (value);
1416 require_not_optimized_out (value);
1417 require_available (value);
1418 return result;
1419 }
1420
1421 gdb_byte *
1422 value_contents_writeable (struct value *value)
1423 {
1424 if (value->lazy)
1425 value_fetch_lazy (value);
1426 return value_contents_raw (value);
1427 }
1428
1429 int
1430 value_optimized_out (struct value *value)
1431 {
1432 /* We can only know if a value is optimized out once we have tried to
1433 fetch it. */
1434 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1435 {
1436 TRY
1437 {
1438 value_fetch_lazy (value);
1439 }
1440 CATCH (ex, RETURN_MASK_ERROR)
1441 {
1442 /* Fall back to checking value->optimized_out. */
1443 }
1444 END_CATCH
1445 }
1446
1447 return !VEC_empty (range_s, value->optimized_out);
1448 }
1449
1450 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1451 the following LENGTH bytes. */
1452
1453 void
1454 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1455 {
1456 mark_value_bits_optimized_out (value,
1457 offset * TARGET_CHAR_BIT,
1458 length * TARGET_CHAR_BIT);
1459 }
1460
1461 /* See value.h. */
1462
1463 void
1464 mark_value_bits_optimized_out (struct value *value,
1465 LONGEST offset, LONGEST length)
1466 {
1467 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1468 }
1469
1470 int
1471 value_bits_synthetic_pointer (const struct value *value,
1472 LONGEST offset, LONGEST length)
1473 {
1474 if (value->lval != lval_computed
1475 || !value->location.computed.funcs->check_synthetic_pointer)
1476 return 0;
1477 return value->location.computed.funcs->check_synthetic_pointer (value,
1478 offset,
1479 length);
1480 }
1481
1482 LONGEST
1483 value_embedded_offset (const struct value *value)
1484 {
1485 return value->embedded_offset;
1486 }
1487
1488 void
1489 set_value_embedded_offset (struct value *value, LONGEST val)
1490 {
1491 value->embedded_offset = val;
1492 }
1493
1494 LONGEST
1495 value_pointed_to_offset (const struct value *value)
1496 {
1497 return value->pointed_to_offset;
1498 }
1499
1500 void
1501 set_value_pointed_to_offset (struct value *value, LONGEST val)
1502 {
1503 value->pointed_to_offset = val;
1504 }
1505
1506 const struct lval_funcs *
1507 value_computed_funcs (const struct value *v)
1508 {
1509 gdb_assert (value_lval_const (v) == lval_computed);
1510
1511 return v->location.computed.funcs;
1512 }
1513
1514 void *
1515 value_computed_closure (const struct value *v)
1516 {
1517 gdb_assert (v->lval == lval_computed);
1518
1519 return v->location.computed.closure;
1520 }
1521
1522 enum lval_type *
1523 deprecated_value_lval_hack (struct value *value)
1524 {
1525 return &value->lval;
1526 }
1527
1528 enum lval_type
1529 value_lval_const (const struct value *value)
1530 {
1531 return value->lval;
1532 }
1533
1534 CORE_ADDR
1535 value_address (const struct value *value)
1536 {
1537 if (value->lval != lval_memory)
1538 return 0;
1539 if (value->parent != NULL)
1540 return value_address (value->parent) + value->offset;
1541 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1542 {
1543 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1544 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1545 }
1546
1547 return value->location.address + value->offset;
1548 }
1549
1550 CORE_ADDR
1551 value_raw_address (const struct value *value)
1552 {
1553 if (value->lval != lval_memory)
1554 return 0;
1555 return value->location.address;
1556 }
1557
1558 void
1559 set_value_address (struct value *value, CORE_ADDR addr)
1560 {
1561 gdb_assert (value->lval == lval_memory);
1562 value->location.address = addr;
1563 }
1564
1565 struct internalvar **
1566 deprecated_value_internalvar_hack (struct value *value)
1567 {
1568 return &value->location.internalvar;
1569 }
1570
1571 struct frame_id *
1572 deprecated_value_next_frame_id_hack (struct value *value)
1573 {
1574 gdb_assert (value->lval == lval_register);
1575 return &value->location.reg.next_frame_id;
1576 }
1577
1578 int *
1579 deprecated_value_regnum_hack (struct value *value)
1580 {
1581 gdb_assert (value->lval == lval_register);
1582 return &value->location.reg.regnum;
1583 }
1584
1585 int
1586 deprecated_value_modifiable (const struct value *value)
1587 {
1588 return value->modifiable;
1589 }
1590 \f
1591 /* Return a mark in the value chain. All values allocated after the
1592 mark is obtained (except for those released) are subject to being freed
1593 if a subsequent value_free_to_mark is passed the mark. */
1594 struct value *
1595 value_mark (void)
1596 {
1597 return all_values;
1598 }
1599
1600 /* Take a reference to VAL. VAL will not be deallocated until all
1601 references are released. */
1602
1603 void
1604 value_incref (struct value *val)
1605 {
1606 val->reference_count++;
1607 }
1608
1609 /* Release a reference to VAL, which was acquired with value_incref.
1610 This function is also called to deallocate values from the value
1611 chain. */
1612
1613 void
1614 value_free (struct value *val)
1615 {
1616 if (val)
1617 {
1618 gdb_assert (val->reference_count > 0);
1619 val->reference_count--;
1620 if (val->reference_count > 0)
1621 return;
1622
1623 /* If there's an associated parent value, drop our reference to
1624 it. */
1625 if (val->parent != NULL)
1626 value_free (val->parent);
1627
1628 if (VALUE_LVAL (val) == lval_computed)
1629 {
1630 const struct lval_funcs *funcs = val->location.computed.funcs;
1631
1632 if (funcs->free_closure)
1633 funcs->free_closure (val);
1634 }
1635 else if (VALUE_LVAL (val) == lval_xcallable)
1636 free_xmethod_worker (val->location.xm_worker);
1637
1638 xfree (val->contents);
1639 VEC_free (range_s, val->unavailable);
1640 }
1641 xfree (val);
1642 }
1643
1644 /* Free all values allocated since MARK was obtained by value_mark
1645 (except for those released). */
1646 void
1647 value_free_to_mark (const struct value *mark)
1648 {
1649 struct value *val;
1650 struct value *next;
1651
1652 for (val = all_values; val && val != mark; val = next)
1653 {
1654 next = val->next;
1655 val->released = 1;
1656 value_free (val);
1657 }
1658 all_values = val;
1659 }
1660
1661 /* Free all the values that have been allocated (except for those released).
1662 Call after each command, successful or not.
1663 In practice this is called before each command, which is sufficient. */
1664
1665 void
1666 free_all_values (void)
1667 {
1668 struct value *val;
1669 struct value *next;
1670
1671 for (val = all_values; val; val = next)
1672 {
1673 next = val->next;
1674 val->released = 1;
1675 value_free (val);
1676 }
1677
1678 all_values = 0;
1679 }
1680
1681 /* Frees all the elements in a chain of values. */
1682
1683 void
1684 free_value_chain (struct value *v)
1685 {
1686 struct value *next;
1687
1688 for (; v; v = next)
1689 {
1690 next = value_next (v);
1691 value_free (v);
1692 }
1693 }
1694
1695 /* Remove VAL from the chain all_values
1696 so it will not be freed automatically. */
1697
1698 void
1699 release_value (struct value *val)
1700 {
1701 struct value *v;
1702
1703 if (all_values == val)
1704 {
1705 all_values = val->next;
1706 val->next = NULL;
1707 val->released = 1;
1708 return;
1709 }
1710
1711 for (v = all_values; v; v = v->next)
1712 {
1713 if (v->next == val)
1714 {
1715 v->next = val->next;
1716 val->next = NULL;
1717 val->released = 1;
1718 break;
1719 }
1720 }
1721 }
1722
1723 /* If the value is not already released, release it.
1724 If the value is already released, increment its reference count.
1725 That is, this function ensures that the value is released from the
1726 value chain and that the caller owns a reference to it. */
1727
1728 void
1729 release_value_or_incref (struct value *val)
1730 {
1731 if (val->released)
1732 value_incref (val);
1733 else
1734 release_value (val);
1735 }
1736
1737 /* Release all values up to mark */
1738 struct value *
1739 value_release_to_mark (const struct value *mark)
1740 {
1741 struct value *val;
1742 struct value *next;
1743
1744 for (val = next = all_values; next; next = next->next)
1745 {
1746 if (next->next == mark)
1747 {
1748 all_values = next->next;
1749 next->next = NULL;
1750 return val;
1751 }
1752 next->released = 1;
1753 }
1754 all_values = 0;
1755 return val;
1756 }
1757
1758 /* Return a copy of the value ARG.
1759 It contains the same contents, for same memory address,
1760 but it's a different block of storage. */
1761
1762 struct value *
1763 value_copy (struct value *arg)
1764 {
1765 struct type *encl_type = value_enclosing_type (arg);
1766 struct value *val;
1767
1768 if (value_lazy (arg))
1769 val = allocate_value_lazy (encl_type);
1770 else
1771 val = allocate_value (encl_type);
1772 val->type = arg->type;
1773 VALUE_LVAL (val) = VALUE_LVAL (arg);
1774 val->location = arg->location;
1775 val->offset = arg->offset;
1776 val->bitpos = arg->bitpos;
1777 val->bitsize = arg->bitsize;
1778 val->lazy = arg->lazy;
1779 val->embedded_offset = value_embedded_offset (arg);
1780 val->pointed_to_offset = arg->pointed_to_offset;
1781 val->modifiable = arg->modifiable;
1782 if (!value_lazy (val))
1783 {
1784 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1785 TYPE_LENGTH (value_enclosing_type (arg)));
1786
1787 }
1788 val->unavailable = VEC_copy (range_s, arg->unavailable);
1789 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1790 set_value_parent (val, arg->parent);
1791 if (VALUE_LVAL (val) == lval_computed)
1792 {
1793 const struct lval_funcs *funcs = val->location.computed.funcs;
1794
1795 if (funcs->copy_closure)
1796 val->location.computed.closure = funcs->copy_closure (val);
1797 }
1798 return val;
1799 }
1800
1801 /* Return a "const" and/or "volatile" qualified version of the value V.
1802 If CNST is true, then the returned value will be qualified with
1803 "const".
1804 if VOLTL is true, then the returned value will be qualified with
1805 "volatile". */
1806
1807 struct value *
1808 make_cv_value (int cnst, int voltl, struct value *v)
1809 {
1810 struct type *val_type = value_type (v);
1811 struct type *enclosing_type = value_enclosing_type (v);
1812 struct value *cv_val = value_copy (v);
1813
1814 deprecated_set_value_type (cv_val,
1815 make_cv_type (cnst, voltl, val_type, NULL));
1816 set_value_enclosing_type (cv_val,
1817 make_cv_type (cnst, voltl, enclosing_type, NULL));
1818
1819 return cv_val;
1820 }
1821
1822 /* Return a version of ARG that is non-lvalue. */
1823
1824 struct value *
1825 value_non_lval (struct value *arg)
1826 {
1827 if (VALUE_LVAL (arg) != not_lval)
1828 {
1829 struct type *enc_type = value_enclosing_type (arg);
1830 struct value *val = allocate_value (enc_type);
1831
1832 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1833 TYPE_LENGTH (enc_type));
1834 val->type = arg->type;
1835 set_value_embedded_offset (val, value_embedded_offset (arg));
1836 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1837 return val;
1838 }
1839 return arg;
1840 }
1841
1842 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1843
1844 void
1845 value_force_lval (struct value *v, CORE_ADDR addr)
1846 {
1847 gdb_assert (VALUE_LVAL (v) == not_lval);
1848
1849 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1850 v->lval = lval_memory;
1851 v->location.address = addr;
1852 }
1853
1854 void
1855 set_value_component_location (struct value *component,
1856 const struct value *whole)
1857 {
1858 struct type *type;
1859
1860 gdb_assert (whole->lval != lval_xcallable);
1861
1862 if (whole->lval == lval_internalvar)
1863 VALUE_LVAL (component) = lval_internalvar_component;
1864 else
1865 VALUE_LVAL (component) = whole->lval;
1866
1867 component->location = whole->location;
1868 if (whole->lval == lval_computed)
1869 {
1870 const struct lval_funcs *funcs = whole->location.computed.funcs;
1871
1872 if (funcs->copy_closure)
1873 component->location.computed.closure = funcs->copy_closure (whole);
1874 }
1875
1876 /* If type has a dynamic resolved location property
1877 update it's value address. */
1878 type = value_type (whole);
1879 if (NULL != TYPE_DATA_LOCATION (type)
1880 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1881 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1882 }
1883
1884 /* Access to the value history. */
1885
1886 /* Record a new value in the value history.
1887 Returns the absolute history index of the entry. */
1888
1889 int
1890 record_latest_value (struct value *val)
1891 {
1892 int i;
1893
1894 /* We don't want this value to have anything to do with the inferior anymore.
1895 In particular, "set $1 = 50" should not affect the variable from which
1896 the value was taken, and fast watchpoints should be able to assume that
1897 a value on the value history never changes. */
1898 if (value_lazy (val))
1899 value_fetch_lazy (val);
1900 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1901 from. This is a bit dubious, because then *&$1 does not just return $1
1902 but the current contents of that location. c'est la vie... */
1903 val->modifiable = 0;
1904
1905 /* The value may have already been released, in which case we're adding a
1906 new reference for its entry in the history. That is why we call
1907 release_value_or_incref here instead of release_value. */
1908 release_value_or_incref (val);
1909
1910 /* Here we treat value_history_count as origin-zero
1911 and applying to the value being stored now. */
1912
1913 i = value_history_count % VALUE_HISTORY_CHUNK;
1914 if (i == 0)
1915 {
1916 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1917
1918 newobj->next = value_history_chain;
1919 value_history_chain = newobj;
1920 }
1921
1922 value_history_chain->values[i] = val;
1923
1924 /* Now we regard value_history_count as origin-one
1925 and applying to the value just stored. */
1926
1927 return ++value_history_count;
1928 }
1929
1930 /* Return a copy of the value in the history with sequence number NUM. */
1931
1932 struct value *
1933 access_value_history (int num)
1934 {
1935 struct value_history_chunk *chunk;
1936 int i;
1937 int absnum = num;
1938
1939 if (absnum <= 0)
1940 absnum += value_history_count;
1941
1942 if (absnum <= 0)
1943 {
1944 if (num == 0)
1945 error (_("The history is empty."));
1946 else if (num == 1)
1947 error (_("There is only one value in the history."));
1948 else
1949 error (_("History does not go back to $$%d."), -num);
1950 }
1951 if (absnum > value_history_count)
1952 error (_("History has not yet reached $%d."), absnum);
1953
1954 absnum--;
1955
1956 /* Now absnum is always absolute and origin zero. */
1957
1958 chunk = value_history_chain;
1959 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1960 - absnum / VALUE_HISTORY_CHUNK;
1961 i > 0; i--)
1962 chunk = chunk->next;
1963
1964 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1965 }
1966
1967 static void
1968 show_values (char *num_exp, int from_tty)
1969 {
1970 int i;
1971 struct value *val;
1972 static int num = 1;
1973
1974 if (num_exp)
1975 {
1976 /* "show values +" should print from the stored position.
1977 "show values <exp>" should print around value number <exp>. */
1978 if (num_exp[0] != '+' || num_exp[1] != '\0')
1979 num = parse_and_eval_long (num_exp) - 5;
1980 }
1981 else
1982 {
1983 /* "show values" means print the last 10 values. */
1984 num = value_history_count - 9;
1985 }
1986
1987 if (num <= 0)
1988 num = 1;
1989
1990 for (i = num; i < num + 10 && i <= value_history_count; i++)
1991 {
1992 struct value_print_options opts;
1993
1994 val = access_value_history (i);
1995 printf_filtered (("$%d = "), i);
1996 get_user_print_options (&opts);
1997 value_print (val, gdb_stdout, &opts);
1998 printf_filtered (("\n"));
1999 }
2000
2001 /* The next "show values +" should start after what we just printed. */
2002 num += 10;
2003
2004 /* Hitting just return after this command should do the same thing as
2005 "show values +". If num_exp is null, this is unnecessary, since
2006 "show values +" is not useful after "show values". */
2007 if (from_tty && num_exp)
2008 {
2009 num_exp[0] = '+';
2010 num_exp[1] = '\0';
2011 }
2012 }
2013 \f
2014 enum internalvar_kind
2015 {
2016 /* The internal variable is empty. */
2017 INTERNALVAR_VOID,
2018
2019 /* The value of the internal variable is provided directly as
2020 a GDB value object. */
2021 INTERNALVAR_VALUE,
2022
2023 /* A fresh value is computed via a call-back routine on every
2024 access to the internal variable. */
2025 INTERNALVAR_MAKE_VALUE,
2026
2027 /* The internal variable holds a GDB internal convenience function. */
2028 INTERNALVAR_FUNCTION,
2029
2030 /* The variable holds an integer value. */
2031 INTERNALVAR_INTEGER,
2032
2033 /* The variable holds a GDB-provided string. */
2034 INTERNALVAR_STRING,
2035 };
2036
2037 union internalvar_data
2038 {
2039 /* A value object used with INTERNALVAR_VALUE. */
2040 struct value *value;
2041
2042 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2043 struct
2044 {
2045 /* The functions to call. */
2046 const struct internalvar_funcs *functions;
2047
2048 /* The function's user-data. */
2049 void *data;
2050 } make_value;
2051
2052 /* The internal function used with INTERNALVAR_FUNCTION. */
2053 struct
2054 {
2055 struct internal_function *function;
2056 /* True if this is the canonical name for the function. */
2057 int canonical;
2058 } fn;
2059
2060 /* An integer value used with INTERNALVAR_INTEGER. */
2061 struct
2062 {
2063 /* If type is non-NULL, it will be used as the type to generate
2064 a value for this internal variable. If type is NULL, a default
2065 integer type for the architecture is used. */
2066 struct type *type;
2067 LONGEST val;
2068 } integer;
2069
2070 /* A string value used with INTERNALVAR_STRING. */
2071 char *string;
2072 };
2073
2074 /* Internal variables. These are variables within the debugger
2075 that hold values assigned by debugger commands.
2076 The user refers to them with a '$' prefix
2077 that does not appear in the variable names stored internally. */
2078
2079 struct internalvar
2080 {
2081 struct internalvar *next;
2082 char *name;
2083
2084 /* We support various different kinds of content of an internal variable.
2085 enum internalvar_kind specifies the kind, and union internalvar_data
2086 provides the data associated with this particular kind. */
2087
2088 enum internalvar_kind kind;
2089
2090 union internalvar_data u;
2091 };
2092
2093 static struct internalvar *internalvars;
2094
2095 /* If the variable does not already exist create it and give it the
2096 value given. If no value is given then the default is zero. */
2097 static void
2098 init_if_undefined_command (char* args, int from_tty)
2099 {
2100 struct internalvar* intvar;
2101
2102 /* Parse the expression - this is taken from set_command(). */
2103 expression_up expr = parse_expression (args);
2104
2105 /* Validate the expression.
2106 Was the expression an assignment?
2107 Or even an expression at all? */
2108 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2109 error (_("Init-if-undefined requires an assignment expression."));
2110
2111 /* Extract the variable from the parsed expression.
2112 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2113 if (expr->elts[1].opcode != OP_INTERNALVAR)
2114 error (_("The first parameter to init-if-undefined "
2115 "should be a GDB variable."));
2116 intvar = expr->elts[2].internalvar;
2117
2118 /* Only evaluate the expression if the lvalue is void.
2119 This may still fail if the expresssion is invalid. */
2120 if (intvar->kind == INTERNALVAR_VOID)
2121 evaluate_expression (expr.get ());
2122 }
2123
2124
2125 /* Look up an internal variable with name NAME. NAME should not
2126 normally include a dollar sign.
2127
2128 If the specified internal variable does not exist,
2129 the return value is NULL. */
2130
2131 struct internalvar *
2132 lookup_only_internalvar (const char *name)
2133 {
2134 struct internalvar *var;
2135
2136 for (var = internalvars; var; var = var->next)
2137 if (strcmp (var->name, name) == 0)
2138 return var;
2139
2140 return NULL;
2141 }
2142
2143 /* Complete NAME by comparing it to the names of internal
2144 variables. */
2145
2146 void
2147 complete_internalvar (completion_tracker &tracker, const char *name)
2148 {
2149 struct internalvar *var;
2150 int len;
2151
2152 len = strlen (name);
2153
2154 for (var = internalvars; var; var = var->next)
2155 if (strncmp (var->name, name, len) == 0)
2156 {
2157 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2158
2159 tracker.add_completion (std::move (copy));
2160 }
2161 }
2162
2163 /* Create an internal variable with name NAME and with a void value.
2164 NAME should not normally include a dollar sign. */
2165
2166 struct internalvar *
2167 create_internalvar (const char *name)
2168 {
2169 struct internalvar *var = XNEW (struct internalvar);
2170
2171 var->name = concat (name, (char *)NULL);
2172 var->kind = INTERNALVAR_VOID;
2173 var->next = internalvars;
2174 internalvars = var;
2175 return var;
2176 }
2177
2178 /* Create an internal variable with name NAME and register FUN as the
2179 function that value_of_internalvar uses to create a value whenever
2180 this variable is referenced. NAME should not normally include a
2181 dollar sign. DATA is passed uninterpreted to FUN when it is
2182 called. CLEANUP, if not NULL, is called when the internal variable
2183 is destroyed. It is passed DATA as its only argument. */
2184
2185 struct internalvar *
2186 create_internalvar_type_lazy (const char *name,
2187 const struct internalvar_funcs *funcs,
2188 void *data)
2189 {
2190 struct internalvar *var = create_internalvar (name);
2191
2192 var->kind = INTERNALVAR_MAKE_VALUE;
2193 var->u.make_value.functions = funcs;
2194 var->u.make_value.data = data;
2195 return var;
2196 }
2197
2198 /* See documentation in value.h. */
2199
2200 int
2201 compile_internalvar_to_ax (struct internalvar *var,
2202 struct agent_expr *expr,
2203 struct axs_value *value)
2204 {
2205 if (var->kind != INTERNALVAR_MAKE_VALUE
2206 || var->u.make_value.functions->compile_to_ax == NULL)
2207 return 0;
2208
2209 var->u.make_value.functions->compile_to_ax (var, expr, value,
2210 var->u.make_value.data);
2211 return 1;
2212 }
2213
2214 /* Look up an internal variable with name NAME. NAME should not
2215 normally include a dollar sign.
2216
2217 If the specified internal variable does not exist,
2218 one is created, with a void value. */
2219
2220 struct internalvar *
2221 lookup_internalvar (const char *name)
2222 {
2223 struct internalvar *var;
2224
2225 var = lookup_only_internalvar (name);
2226 if (var)
2227 return var;
2228
2229 return create_internalvar (name);
2230 }
2231
2232 /* Return current value of internal variable VAR. For variables that
2233 are not inherently typed, use a value type appropriate for GDBARCH. */
2234
2235 struct value *
2236 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2237 {
2238 struct value *val;
2239 struct trace_state_variable *tsv;
2240
2241 /* If there is a trace state variable of the same name, assume that
2242 is what we really want to see. */
2243 tsv = find_trace_state_variable (var->name);
2244 if (tsv)
2245 {
2246 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2247 &(tsv->value));
2248 if (tsv->value_known)
2249 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2250 tsv->value);
2251 else
2252 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2253 return val;
2254 }
2255
2256 switch (var->kind)
2257 {
2258 case INTERNALVAR_VOID:
2259 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2260 break;
2261
2262 case INTERNALVAR_FUNCTION:
2263 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2264 break;
2265
2266 case INTERNALVAR_INTEGER:
2267 if (!var->u.integer.type)
2268 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2269 var->u.integer.val);
2270 else
2271 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2272 break;
2273
2274 case INTERNALVAR_STRING:
2275 val = value_cstring (var->u.string, strlen (var->u.string),
2276 builtin_type (gdbarch)->builtin_char);
2277 break;
2278
2279 case INTERNALVAR_VALUE:
2280 val = value_copy (var->u.value);
2281 if (value_lazy (val))
2282 value_fetch_lazy (val);
2283 break;
2284
2285 case INTERNALVAR_MAKE_VALUE:
2286 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2287 var->u.make_value.data);
2288 break;
2289
2290 default:
2291 internal_error (__FILE__, __LINE__, _("bad kind"));
2292 }
2293
2294 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2295 on this value go back to affect the original internal variable.
2296
2297 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2298 no underlying modifyable state in the internal variable.
2299
2300 Likewise, if the variable's value is a computed lvalue, we want
2301 references to it to produce another computed lvalue, where
2302 references and assignments actually operate through the
2303 computed value's functions.
2304
2305 This means that internal variables with computed values
2306 behave a little differently from other internal variables:
2307 assignments to them don't just replace the previous value
2308 altogether. At the moment, this seems like the behavior we
2309 want. */
2310
2311 if (var->kind != INTERNALVAR_MAKE_VALUE
2312 && val->lval != lval_computed)
2313 {
2314 VALUE_LVAL (val) = lval_internalvar;
2315 VALUE_INTERNALVAR (val) = var;
2316 }
2317
2318 return val;
2319 }
2320
2321 int
2322 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2323 {
2324 if (var->kind == INTERNALVAR_INTEGER)
2325 {
2326 *result = var->u.integer.val;
2327 return 1;
2328 }
2329
2330 if (var->kind == INTERNALVAR_VALUE)
2331 {
2332 struct type *type = check_typedef (value_type (var->u.value));
2333
2334 if (TYPE_CODE (type) == TYPE_CODE_INT)
2335 {
2336 *result = value_as_long (var->u.value);
2337 return 1;
2338 }
2339 }
2340
2341 return 0;
2342 }
2343
2344 static int
2345 get_internalvar_function (struct internalvar *var,
2346 struct internal_function **result)
2347 {
2348 switch (var->kind)
2349 {
2350 case INTERNALVAR_FUNCTION:
2351 *result = var->u.fn.function;
2352 return 1;
2353
2354 default:
2355 return 0;
2356 }
2357 }
2358
2359 void
2360 set_internalvar_component (struct internalvar *var,
2361 LONGEST offset, LONGEST bitpos,
2362 LONGEST bitsize, struct value *newval)
2363 {
2364 gdb_byte *addr;
2365 struct gdbarch *arch;
2366 int unit_size;
2367
2368 switch (var->kind)
2369 {
2370 case INTERNALVAR_VALUE:
2371 addr = value_contents_writeable (var->u.value);
2372 arch = get_value_arch (var->u.value);
2373 unit_size = gdbarch_addressable_memory_unit_size (arch);
2374
2375 if (bitsize)
2376 modify_field (value_type (var->u.value), addr + offset,
2377 value_as_long (newval), bitpos, bitsize);
2378 else
2379 memcpy (addr + offset * unit_size, value_contents (newval),
2380 TYPE_LENGTH (value_type (newval)));
2381 break;
2382
2383 default:
2384 /* We can never get a component of any other kind. */
2385 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2386 }
2387 }
2388
2389 void
2390 set_internalvar (struct internalvar *var, struct value *val)
2391 {
2392 enum internalvar_kind new_kind;
2393 union internalvar_data new_data = { 0 };
2394
2395 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2396 error (_("Cannot overwrite convenience function %s"), var->name);
2397
2398 /* Prepare new contents. */
2399 switch (TYPE_CODE (check_typedef (value_type (val))))
2400 {
2401 case TYPE_CODE_VOID:
2402 new_kind = INTERNALVAR_VOID;
2403 break;
2404
2405 case TYPE_CODE_INTERNAL_FUNCTION:
2406 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2407 new_kind = INTERNALVAR_FUNCTION;
2408 get_internalvar_function (VALUE_INTERNALVAR (val),
2409 &new_data.fn.function);
2410 /* Copies created here are never canonical. */
2411 break;
2412
2413 default:
2414 new_kind = INTERNALVAR_VALUE;
2415 new_data.value = value_copy (val);
2416 new_data.value->modifiable = 1;
2417
2418 /* Force the value to be fetched from the target now, to avoid problems
2419 later when this internalvar is referenced and the target is gone or
2420 has changed. */
2421 if (value_lazy (new_data.value))
2422 value_fetch_lazy (new_data.value);
2423
2424 /* Release the value from the value chain to prevent it from being
2425 deleted by free_all_values. From here on this function should not
2426 call error () until new_data is installed into the var->u to avoid
2427 leaking memory. */
2428 release_value (new_data.value);
2429
2430 /* Internal variables which are created from values with a dynamic
2431 location don't need the location property of the origin anymore.
2432 The resolved dynamic location is used prior then any other address
2433 when accessing the value.
2434 If we keep it, we would still refer to the origin value.
2435 Remove the location property in case it exist. */
2436 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2437
2438 break;
2439 }
2440
2441 /* Clean up old contents. */
2442 clear_internalvar (var);
2443
2444 /* Switch over. */
2445 var->kind = new_kind;
2446 var->u = new_data;
2447 /* End code which must not call error(). */
2448 }
2449
2450 void
2451 set_internalvar_integer (struct internalvar *var, LONGEST l)
2452 {
2453 /* Clean up old contents. */
2454 clear_internalvar (var);
2455
2456 var->kind = INTERNALVAR_INTEGER;
2457 var->u.integer.type = NULL;
2458 var->u.integer.val = l;
2459 }
2460
2461 void
2462 set_internalvar_string (struct internalvar *var, const char *string)
2463 {
2464 /* Clean up old contents. */
2465 clear_internalvar (var);
2466
2467 var->kind = INTERNALVAR_STRING;
2468 var->u.string = xstrdup (string);
2469 }
2470
2471 static void
2472 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2473 {
2474 /* Clean up old contents. */
2475 clear_internalvar (var);
2476
2477 var->kind = INTERNALVAR_FUNCTION;
2478 var->u.fn.function = f;
2479 var->u.fn.canonical = 1;
2480 /* Variables installed here are always the canonical version. */
2481 }
2482
2483 void
2484 clear_internalvar (struct internalvar *var)
2485 {
2486 /* Clean up old contents. */
2487 switch (var->kind)
2488 {
2489 case INTERNALVAR_VALUE:
2490 value_free (var->u.value);
2491 break;
2492
2493 case INTERNALVAR_STRING:
2494 xfree (var->u.string);
2495 break;
2496
2497 case INTERNALVAR_MAKE_VALUE:
2498 if (var->u.make_value.functions->destroy != NULL)
2499 var->u.make_value.functions->destroy (var->u.make_value.data);
2500 break;
2501
2502 default:
2503 break;
2504 }
2505
2506 /* Reset to void kind. */
2507 var->kind = INTERNALVAR_VOID;
2508 }
2509
2510 char *
2511 internalvar_name (const struct internalvar *var)
2512 {
2513 return var->name;
2514 }
2515
2516 static struct internal_function *
2517 create_internal_function (const char *name,
2518 internal_function_fn handler, void *cookie)
2519 {
2520 struct internal_function *ifn = XNEW (struct internal_function);
2521
2522 ifn->name = xstrdup (name);
2523 ifn->handler = handler;
2524 ifn->cookie = cookie;
2525 return ifn;
2526 }
2527
2528 char *
2529 value_internal_function_name (struct value *val)
2530 {
2531 struct internal_function *ifn;
2532 int result;
2533
2534 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2535 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2536 gdb_assert (result);
2537
2538 return ifn->name;
2539 }
2540
2541 struct value *
2542 call_internal_function (struct gdbarch *gdbarch,
2543 const struct language_defn *language,
2544 struct value *func, int argc, struct value **argv)
2545 {
2546 struct internal_function *ifn;
2547 int result;
2548
2549 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2550 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2551 gdb_assert (result);
2552
2553 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2554 }
2555
2556 /* The 'function' command. This does nothing -- it is just a
2557 placeholder to let "help function NAME" work. This is also used as
2558 the implementation of the sub-command that is created when
2559 registering an internal function. */
2560 static void
2561 function_command (const char *command, int from_tty)
2562 {
2563 /* Do nothing. */
2564 }
2565
2566 /* Clean up if an internal function's command is destroyed. */
2567 static void
2568 function_destroyer (struct cmd_list_element *self, void *ignore)
2569 {
2570 xfree ((char *) self->name);
2571 xfree ((char *) self->doc);
2572 }
2573
2574 /* Add a new internal function. NAME is the name of the function; DOC
2575 is a documentation string describing the function. HANDLER is
2576 called when the function is invoked. COOKIE is an arbitrary
2577 pointer which is passed to HANDLER and is intended for "user
2578 data". */
2579 void
2580 add_internal_function (const char *name, const char *doc,
2581 internal_function_fn handler, void *cookie)
2582 {
2583 struct cmd_list_element *cmd;
2584 struct internal_function *ifn;
2585 struct internalvar *var = lookup_internalvar (name);
2586
2587 ifn = create_internal_function (name, handler, cookie);
2588 set_internalvar_function (var, ifn);
2589
2590 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2591 &functionlist);
2592 cmd->destroyer = function_destroyer;
2593 }
2594
2595 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2596 prevent cycles / duplicates. */
2597
2598 void
2599 preserve_one_value (struct value *value, struct objfile *objfile,
2600 htab_t copied_types)
2601 {
2602 if (TYPE_OBJFILE (value->type) == objfile)
2603 value->type = copy_type_recursive (objfile, value->type, copied_types);
2604
2605 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2606 value->enclosing_type = copy_type_recursive (objfile,
2607 value->enclosing_type,
2608 copied_types);
2609 }
2610
2611 /* Likewise for internal variable VAR. */
2612
2613 static void
2614 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2615 htab_t copied_types)
2616 {
2617 switch (var->kind)
2618 {
2619 case INTERNALVAR_INTEGER:
2620 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2621 var->u.integer.type
2622 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2623 break;
2624
2625 case INTERNALVAR_VALUE:
2626 preserve_one_value (var->u.value, objfile, copied_types);
2627 break;
2628 }
2629 }
2630
2631 /* Update the internal variables and value history when OBJFILE is
2632 discarded; we must copy the types out of the objfile. New global types
2633 will be created for every convenience variable which currently points to
2634 this objfile's types, and the convenience variables will be adjusted to
2635 use the new global types. */
2636
2637 void
2638 preserve_values (struct objfile *objfile)
2639 {
2640 htab_t copied_types;
2641 struct value_history_chunk *cur;
2642 struct internalvar *var;
2643 int i;
2644
2645 /* Create the hash table. We allocate on the objfile's obstack, since
2646 it is soon to be deleted. */
2647 copied_types = create_copied_types_hash (objfile);
2648
2649 for (cur = value_history_chain; cur; cur = cur->next)
2650 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2651 if (cur->values[i])
2652 preserve_one_value (cur->values[i], objfile, copied_types);
2653
2654 for (var = internalvars; var; var = var->next)
2655 preserve_one_internalvar (var, objfile, copied_types);
2656
2657 preserve_ext_lang_values (objfile, copied_types);
2658
2659 htab_delete (copied_types);
2660 }
2661
2662 static void
2663 show_convenience (const char *ignore, int from_tty)
2664 {
2665 struct gdbarch *gdbarch = get_current_arch ();
2666 struct internalvar *var;
2667 int varseen = 0;
2668 struct value_print_options opts;
2669
2670 get_user_print_options (&opts);
2671 for (var = internalvars; var; var = var->next)
2672 {
2673
2674 if (!varseen)
2675 {
2676 varseen = 1;
2677 }
2678 printf_filtered (("$%s = "), var->name);
2679
2680 TRY
2681 {
2682 struct value *val;
2683
2684 val = value_of_internalvar (gdbarch, var);
2685 value_print (val, gdb_stdout, &opts);
2686 }
2687 CATCH (ex, RETURN_MASK_ERROR)
2688 {
2689 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2690 }
2691 END_CATCH
2692
2693 printf_filtered (("\n"));
2694 }
2695 if (!varseen)
2696 {
2697 /* This text does not mention convenience functions on purpose.
2698 The user can't create them except via Python, and if Python support
2699 is installed this message will never be printed ($_streq will
2700 exist). */
2701 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2702 "Convenience variables have "
2703 "names starting with \"$\";\n"
2704 "use \"set\" as in \"set "
2705 "$foo = 5\" to define them.\n"));
2706 }
2707 }
2708 \f
2709 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2710
2711 struct value *
2712 value_of_xmethod (struct xmethod_worker *worker)
2713 {
2714 if (worker->value == NULL)
2715 {
2716 struct value *v;
2717
2718 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2719 v->lval = lval_xcallable;
2720 v->location.xm_worker = worker;
2721 v->modifiable = 0;
2722 worker->value = v;
2723 }
2724
2725 return worker->value;
2726 }
2727
2728 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2729
2730 struct type *
2731 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2732 {
2733 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2734 && method->lval == lval_xcallable && argc > 0);
2735
2736 return get_xmethod_result_type (method->location.xm_worker,
2737 argv[0], argv + 1, argc - 1);
2738 }
2739
2740 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2741
2742 struct value *
2743 call_xmethod (struct value *method, int argc, struct value **argv)
2744 {
2745 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2746 && method->lval == lval_xcallable && argc > 0);
2747
2748 return invoke_xmethod (method->location.xm_worker,
2749 argv[0], argv + 1, argc - 1);
2750 }
2751 \f
2752 /* Extract a value as a C number (either long or double).
2753 Knows how to convert fixed values to double, or
2754 floating values to long.
2755 Does not deallocate the value. */
2756
2757 LONGEST
2758 value_as_long (struct value *val)
2759 {
2760 /* This coerces arrays and functions, which is necessary (e.g.
2761 in disassemble_command). It also dereferences references, which
2762 I suspect is the most logical thing to do. */
2763 val = coerce_array (val);
2764 return unpack_long (value_type (val), value_contents (val));
2765 }
2766
2767 DOUBLEST
2768 value_as_double (struct value *val)
2769 {
2770 DOUBLEST foo;
2771 int inv;
2772
2773 foo = unpack_double (value_type (val), value_contents (val), &inv);
2774 if (inv)
2775 error (_("Invalid floating value found in program."));
2776 return foo;
2777 }
2778
2779 /* Extract a value as a C pointer. Does not deallocate the value.
2780 Note that val's type may not actually be a pointer; value_as_long
2781 handles all the cases. */
2782 CORE_ADDR
2783 value_as_address (struct value *val)
2784 {
2785 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2786
2787 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2788 whether we want this to be true eventually. */
2789 #if 0
2790 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2791 non-address (e.g. argument to "signal", "info break", etc.), or
2792 for pointers to char, in which the low bits *are* significant. */
2793 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2794 #else
2795
2796 /* There are several targets (IA-64, PowerPC, and others) which
2797 don't represent pointers to functions as simply the address of
2798 the function's entry point. For example, on the IA-64, a
2799 function pointer points to a two-word descriptor, generated by
2800 the linker, which contains the function's entry point, and the
2801 value the IA-64 "global pointer" register should have --- to
2802 support position-independent code. The linker generates
2803 descriptors only for those functions whose addresses are taken.
2804
2805 On such targets, it's difficult for GDB to convert an arbitrary
2806 function address into a function pointer; it has to either find
2807 an existing descriptor for that function, or call malloc and
2808 build its own. On some targets, it is impossible for GDB to
2809 build a descriptor at all: the descriptor must contain a jump
2810 instruction; data memory cannot be executed; and code memory
2811 cannot be modified.
2812
2813 Upon entry to this function, if VAL is a value of type `function'
2814 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2815 value_address (val) is the address of the function. This is what
2816 you'll get if you evaluate an expression like `main'. The call
2817 to COERCE_ARRAY below actually does all the usual unary
2818 conversions, which includes converting values of type `function'
2819 to `pointer to function'. This is the challenging conversion
2820 discussed above. Then, `unpack_long' will convert that pointer
2821 back into an address.
2822
2823 So, suppose the user types `disassemble foo' on an architecture
2824 with a strange function pointer representation, on which GDB
2825 cannot build its own descriptors, and suppose further that `foo'
2826 has no linker-built descriptor. The address->pointer conversion
2827 will signal an error and prevent the command from running, even
2828 though the next step would have been to convert the pointer
2829 directly back into the same address.
2830
2831 The following shortcut avoids this whole mess. If VAL is a
2832 function, just return its address directly. */
2833 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2834 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2835 return value_address (val);
2836
2837 val = coerce_array (val);
2838
2839 /* Some architectures (e.g. Harvard), map instruction and data
2840 addresses onto a single large unified address space. For
2841 instance: An architecture may consider a large integer in the
2842 range 0x10000000 .. 0x1000ffff to already represent a data
2843 addresses (hence not need a pointer to address conversion) while
2844 a small integer would still need to be converted integer to
2845 pointer to address. Just assume such architectures handle all
2846 integer conversions in a single function. */
2847
2848 /* JimB writes:
2849
2850 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2851 must admonish GDB hackers to make sure its behavior matches the
2852 compiler's, whenever possible.
2853
2854 In general, I think GDB should evaluate expressions the same way
2855 the compiler does. When the user copies an expression out of
2856 their source code and hands it to a `print' command, they should
2857 get the same value the compiler would have computed. Any
2858 deviation from this rule can cause major confusion and annoyance,
2859 and needs to be justified carefully. In other words, GDB doesn't
2860 really have the freedom to do these conversions in clever and
2861 useful ways.
2862
2863 AndrewC pointed out that users aren't complaining about how GDB
2864 casts integers to pointers; they are complaining that they can't
2865 take an address from a disassembly listing and give it to `x/i'.
2866 This is certainly important.
2867
2868 Adding an architecture method like integer_to_address() certainly
2869 makes it possible for GDB to "get it right" in all circumstances
2870 --- the target has complete control over how things get done, so
2871 people can Do The Right Thing for their target without breaking
2872 anyone else. The standard doesn't specify how integers get
2873 converted to pointers; usually, the ABI doesn't either, but
2874 ABI-specific code is a more reasonable place to handle it. */
2875
2876 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2877 && !TYPE_IS_REFERENCE (value_type (val))
2878 && gdbarch_integer_to_address_p (gdbarch))
2879 return gdbarch_integer_to_address (gdbarch, value_type (val),
2880 value_contents (val));
2881
2882 return unpack_long (value_type (val), value_contents (val));
2883 #endif
2884 }
2885 \f
2886 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2887 as a long, or as a double, assuming the raw data is described
2888 by type TYPE. Knows how to convert different sizes of values
2889 and can convert between fixed and floating point. We don't assume
2890 any alignment for the raw data. Return value is in host byte order.
2891
2892 If you want functions and arrays to be coerced to pointers, and
2893 references to be dereferenced, call value_as_long() instead.
2894
2895 C++: It is assumed that the front-end has taken care of
2896 all matters concerning pointers to members. A pointer
2897 to member which reaches here is considered to be equivalent
2898 to an INT (or some size). After all, it is only an offset. */
2899
2900 LONGEST
2901 unpack_long (struct type *type, const gdb_byte *valaddr)
2902 {
2903 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2904 enum type_code code = TYPE_CODE (type);
2905 int len = TYPE_LENGTH (type);
2906 int nosign = TYPE_UNSIGNED (type);
2907
2908 switch (code)
2909 {
2910 case TYPE_CODE_TYPEDEF:
2911 return unpack_long (check_typedef (type), valaddr);
2912 case TYPE_CODE_ENUM:
2913 case TYPE_CODE_FLAGS:
2914 case TYPE_CODE_BOOL:
2915 case TYPE_CODE_INT:
2916 case TYPE_CODE_CHAR:
2917 case TYPE_CODE_RANGE:
2918 case TYPE_CODE_MEMBERPTR:
2919 if (nosign)
2920 return extract_unsigned_integer (valaddr, len, byte_order);
2921 else
2922 return extract_signed_integer (valaddr, len, byte_order);
2923
2924 case TYPE_CODE_FLT:
2925 return (LONGEST) extract_typed_floating (valaddr, type);
2926
2927 case TYPE_CODE_DECFLOAT:
2928 return decimal_to_longest (valaddr, len, byte_order);
2929
2930 case TYPE_CODE_PTR:
2931 case TYPE_CODE_REF:
2932 case TYPE_CODE_RVALUE_REF:
2933 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2934 whether we want this to be true eventually. */
2935 return extract_typed_address (valaddr, type);
2936
2937 default:
2938 error (_("Value can't be converted to integer."));
2939 }
2940 return 0; /* Placate lint. */
2941 }
2942
2943 /* Return a double value from the specified type and address.
2944 INVP points to an int which is set to 0 for valid value,
2945 1 for invalid value (bad float format). In either case,
2946 the returned double is OK to use. Argument is in target
2947 format, result is in host format. */
2948
2949 DOUBLEST
2950 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2951 {
2952 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2953 enum type_code code;
2954 int len;
2955 int nosign;
2956
2957 *invp = 0; /* Assume valid. */
2958 type = check_typedef (type);
2959 code = TYPE_CODE (type);
2960 len = TYPE_LENGTH (type);
2961 nosign = TYPE_UNSIGNED (type);
2962 if (code == TYPE_CODE_FLT)
2963 {
2964 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2965 floating-point value was valid (using the macro
2966 INVALID_FLOAT). That test/macro have been removed.
2967
2968 It turns out that only the VAX defined this macro and then
2969 only in a non-portable way. Fixing the portability problem
2970 wouldn't help since the VAX floating-point code is also badly
2971 bit-rotten. The target needs to add definitions for the
2972 methods gdbarch_float_format and gdbarch_double_format - these
2973 exactly describe the target floating-point format. The
2974 problem here is that the corresponding floatformat_vax_f and
2975 floatformat_vax_d values these methods should be set to are
2976 also not defined either. Oops!
2977
2978 Hopefully someone will add both the missing floatformat
2979 definitions and the new cases for floatformat_is_valid (). */
2980
2981 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2982 {
2983 *invp = 1;
2984 return 0.0;
2985 }
2986
2987 return extract_typed_floating (valaddr, type);
2988 }
2989 else if (code == TYPE_CODE_DECFLOAT)
2990 return decimal_to_doublest (valaddr, len, byte_order);
2991 else if (nosign)
2992 {
2993 /* Unsigned -- be sure we compensate for signed LONGEST. */
2994 return (ULONGEST) unpack_long (type, valaddr);
2995 }
2996 else
2997 {
2998 /* Signed -- we are OK with unpack_long. */
2999 return unpack_long (type, valaddr);
3000 }
3001 }
3002
3003 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3004 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3005 We don't assume any alignment for the raw data. Return value is in
3006 host byte order.
3007
3008 If you want functions and arrays to be coerced to pointers, and
3009 references to be dereferenced, call value_as_address() instead.
3010
3011 C++: It is assumed that the front-end has taken care of
3012 all matters concerning pointers to members. A pointer
3013 to member which reaches here is considered to be equivalent
3014 to an INT (or some size). After all, it is only an offset. */
3015
3016 CORE_ADDR
3017 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3018 {
3019 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3020 whether we want this to be true eventually. */
3021 return unpack_long (type, valaddr);
3022 }
3023
3024 \f
3025 /* Get the value of the FIELDNO'th field (which must be static) of
3026 TYPE. */
3027
3028 struct value *
3029 value_static_field (struct type *type, int fieldno)
3030 {
3031 struct value *retval;
3032
3033 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3034 {
3035 case FIELD_LOC_KIND_PHYSADDR:
3036 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3037 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3038 break;
3039 case FIELD_LOC_KIND_PHYSNAME:
3040 {
3041 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3042 /* TYPE_FIELD_NAME (type, fieldno); */
3043 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3044
3045 if (sym.symbol == NULL)
3046 {
3047 /* With some compilers, e.g. HP aCC, static data members are
3048 reported as non-debuggable symbols. */
3049 struct bound_minimal_symbol msym
3050 = lookup_minimal_symbol (phys_name, NULL, NULL);
3051
3052 if (!msym.minsym)
3053 return allocate_optimized_out_value (type);
3054 else
3055 {
3056 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3057 BMSYMBOL_VALUE_ADDRESS (msym));
3058 }
3059 }
3060 else
3061 retval = value_of_variable (sym.symbol, sym.block);
3062 break;
3063 }
3064 default:
3065 gdb_assert_not_reached ("unexpected field location kind");
3066 }
3067
3068 return retval;
3069 }
3070
3071 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3072 You have to be careful here, since the size of the data area for the value
3073 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3074 than the old enclosing type, you have to allocate more space for the
3075 data. */
3076
3077 void
3078 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3079 {
3080 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3081 {
3082 check_type_length_before_alloc (new_encl_type);
3083 val->contents
3084 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3085 }
3086
3087 val->enclosing_type = new_encl_type;
3088 }
3089
3090 /* Given a value ARG1 (offset by OFFSET bytes)
3091 of a struct or union type ARG_TYPE,
3092 extract and return the value of one of its (non-static) fields.
3093 FIELDNO says which field. */
3094
3095 struct value *
3096 value_primitive_field (struct value *arg1, LONGEST offset,
3097 int fieldno, struct type *arg_type)
3098 {
3099 struct value *v;
3100 struct type *type;
3101 struct gdbarch *arch = get_value_arch (arg1);
3102 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3103
3104 arg_type = check_typedef (arg_type);
3105 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3106
3107 /* Call check_typedef on our type to make sure that, if TYPE
3108 is a TYPE_CODE_TYPEDEF, its length is set to the length
3109 of the target type instead of zero. However, we do not
3110 replace the typedef type by the target type, because we want
3111 to keep the typedef in order to be able to print the type
3112 description correctly. */
3113 check_typedef (type);
3114
3115 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3116 {
3117 /* Handle packed fields.
3118
3119 Create a new value for the bitfield, with bitpos and bitsize
3120 set. If possible, arrange offset and bitpos so that we can
3121 do a single aligned read of the size of the containing type.
3122 Otherwise, adjust offset to the byte containing the first
3123 bit. Assume that the address, offset, and embedded offset
3124 are sufficiently aligned. */
3125
3126 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3127 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3128
3129 v = allocate_value_lazy (type);
3130 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3131 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3132 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3133 v->bitpos = bitpos % container_bitsize;
3134 else
3135 v->bitpos = bitpos % 8;
3136 v->offset = (value_embedded_offset (arg1)
3137 + offset
3138 + (bitpos - v->bitpos) / 8);
3139 set_value_parent (v, arg1);
3140 if (!value_lazy (arg1))
3141 value_fetch_lazy (v);
3142 }
3143 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3144 {
3145 /* This field is actually a base subobject, so preserve the
3146 entire object's contents for later references to virtual
3147 bases, etc. */
3148 LONGEST boffset;
3149
3150 /* Lazy register values with offsets are not supported. */
3151 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3152 value_fetch_lazy (arg1);
3153
3154 /* We special case virtual inheritance here because this
3155 requires access to the contents, which we would rather avoid
3156 for references to ordinary fields of unavailable values. */
3157 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3158 boffset = baseclass_offset (arg_type, fieldno,
3159 value_contents (arg1),
3160 value_embedded_offset (arg1),
3161 value_address (arg1),
3162 arg1);
3163 else
3164 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3165
3166 if (value_lazy (arg1))
3167 v = allocate_value_lazy (value_enclosing_type (arg1));
3168 else
3169 {
3170 v = allocate_value (value_enclosing_type (arg1));
3171 value_contents_copy_raw (v, 0, arg1, 0,
3172 TYPE_LENGTH (value_enclosing_type (arg1)));
3173 }
3174 v->type = type;
3175 v->offset = value_offset (arg1);
3176 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3177 }
3178 else if (NULL != TYPE_DATA_LOCATION (type))
3179 {
3180 /* Field is a dynamic data member. */
3181
3182 gdb_assert (0 == offset);
3183 /* We expect an already resolved data location. */
3184 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3185 /* For dynamic data types defer memory allocation
3186 until we actual access the value. */
3187 v = allocate_value_lazy (type);
3188 }
3189 else
3190 {
3191 /* Plain old data member */
3192 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3193 / (HOST_CHAR_BIT * unit_size));
3194
3195 /* Lazy register values with offsets are not supported. */
3196 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3197 value_fetch_lazy (arg1);
3198
3199 if (value_lazy (arg1))
3200 v = allocate_value_lazy (type);
3201 else
3202 {
3203 v = allocate_value (type);
3204 value_contents_copy_raw (v, value_embedded_offset (v),
3205 arg1, value_embedded_offset (arg1) + offset,
3206 type_length_units (type));
3207 }
3208 v->offset = (value_offset (arg1) + offset
3209 + value_embedded_offset (arg1));
3210 }
3211 set_value_component_location (v, arg1);
3212 return v;
3213 }
3214
3215 /* Given a value ARG1 of a struct or union type,
3216 extract and return the value of one of its (non-static) fields.
3217 FIELDNO says which field. */
3218
3219 struct value *
3220 value_field (struct value *arg1, int fieldno)
3221 {
3222 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3223 }
3224
3225 /* Return a non-virtual function as a value.
3226 F is the list of member functions which contains the desired method.
3227 J is an index into F which provides the desired method.
3228
3229 We only use the symbol for its address, so be happy with either a
3230 full symbol or a minimal symbol. */
3231
3232 struct value *
3233 value_fn_field (struct value **arg1p, struct fn_field *f,
3234 int j, struct type *type,
3235 LONGEST offset)
3236 {
3237 struct value *v;
3238 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3239 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3240 struct symbol *sym;
3241 struct bound_minimal_symbol msym;
3242
3243 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3244 if (sym != NULL)
3245 {
3246 memset (&msym, 0, sizeof (msym));
3247 }
3248 else
3249 {
3250 gdb_assert (sym == NULL);
3251 msym = lookup_bound_minimal_symbol (physname);
3252 if (msym.minsym == NULL)
3253 return NULL;
3254 }
3255
3256 v = allocate_value (ftype);
3257 VALUE_LVAL (v) = lval_memory;
3258 if (sym)
3259 {
3260 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3261 }
3262 else
3263 {
3264 /* The minimal symbol might point to a function descriptor;
3265 resolve it to the actual code address instead. */
3266 struct objfile *objfile = msym.objfile;
3267 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3268
3269 set_value_address (v,
3270 gdbarch_convert_from_func_ptr_addr
3271 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3272 }
3273
3274 if (arg1p)
3275 {
3276 if (type != value_type (*arg1p))
3277 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3278 value_addr (*arg1p)));
3279
3280 /* Move the `this' pointer according to the offset.
3281 VALUE_OFFSET (*arg1p) += offset; */
3282 }
3283
3284 return v;
3285 }
3286
3287 \f
3288
3289 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3290 VALADDR, and store the result in *RESULT.
3291 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3292
3293 Extracting bits depends on endianness of the machine. Compute the
3294 number of least significant bits to discard. For big endian machines,
3295 we compute the total number of bits in the anonymous object, subtract
3296 off the bit count from the MSB of the object to the MSB of the
3297 bitfield, then the size of the bitfield, which leaves the LSB discard
3298 count. For little endian machines, the discard count is simply the
3299 number of bits from the LSB of the anonymous object to the LSB of the
3300 bitfield.
3301
3302 If the field is signed, we also do sign extension. */
3303
3304 static LONGEST
3305 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3306 LONGEST bitpos, LONGEST bitsize)
3307 {
3308 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3309 ULONGEST val;
3310 ULONGEST valmask;
3311 int lsbcount;
3312 LONGEST bytes_read;
3313 LONGEST read_offset;
3314
3315 /* Read the minimum number of bytes required; there may not be
3316 enough bytes to read an entire ULONGEST. */
3317 field_type = check_typedef (field_type);
3318 if (bitsize)
3319 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3320 else
3321 bytes_read = TYPE_LENGTH (field_type);
3322
3323 read_offset = bitpos / 8;
3324
3325 val = extract_unsigned_integer (valaddr + read_offset,
3326 bytes_read, byte_order);
3327
3328 /* Extract bits. See comment above. */
3329
3330 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3331 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3332 else
3333 lsbcount = (bitpos % 8);
3334 val >>= lsbcount;
3335
3336 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3337 If the field is signed, and is negative, then sign extend. */
3338
3339 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3340 {
3341 valmask = (((ULONGEST) 1) << bitsize) - 1;
3342 val &= valmask;
3343 if (!TYPE_UNSIGNED (field_type))
3344 {
3345 if (val & (valmask ^ (valmask >> 1)))
3346 {
3347 val |= ~valmask;
3348 }
3349 }
3350 }
3351
3352 return val;
3353 }
3354
3355 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3356 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3357 ORIGINAL_VALUE, which must not be NULL. See
3358 unpack_value_bits_as_long for more details. */
3359
3360 int
3361 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3362 LONGEST embedded_offset, int fieldno,
3363 const struct value *val, LONGEST *result)
3364 {
3365 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3366 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3367 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3368 int bit_offset;
3369
3370 gdb_assert (val != NULL);
3371
3372 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3373 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3374 || !value_bits_available (val, bit_offset, bitsize))
3375 return 0;
3376
3377 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3378 bitpos, bitsize);
3379 return 1;
3380 }
3381
3382 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3383 object at VALADDR. See unpack_bits_as_long for more details. */
3384
3385 LONGEST
3386 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3387 {
3388 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3389 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3390 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3391
3392 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3393 }
3394
3395 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3396 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3397 the contents in DEST_VAL, zero or sign extending if the type of
3398 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3399 VAL. If the VAL's contents required to extract the bitfield from
3400 are unavailable/optimized out, DEST_VAL is correspondingly
3401 marked unavailable/optimized out. */
3402
3403 void
3404 unpack_value_bitfield (struct value *dest_val,
3405 LONGEST bitpos, LONGEST bitsize,
3406 const gdb_byte *valaddr, LONGEST embedded_offset,
3407 const struct value *val)
3408 {
3409 enum bfd_endian byte_order;
3410 int src_bit_offset;
3411 int dst_bit_offset;
3412 struct type *field_type = value_type (dest_val);
3413
3414 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3415
3416 /* First, unpack and sign extend the bitfield as if it was wholly
3417 valid. Optimized out/unavailable bits are read as zero, but
3418 that's OK, as they'll end up marked below. If the VAL is
3419 wholly-invalid we may have skipped allocating its contents,
3420 though. See allocate_optimized_out_value. */
3421 if (valaddr != NULL)
3422 {
3423 LONGEST num;
3424
3425 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3426 bitpos, bitsize);
3427 store_signed_integer (value_contents_raw (dest_val),
3428 TYPE_LENGTH (field_type), byte_order, num);
3429 }
3430
3431 /* Now copy the optimized out / unavailability ranges to the right
3432 bits. */
3433 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3434 if (byte_order == BFD_ENDIAN_BIG)
3435 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3436 else
3437 dst_bit_offset = 0;
3438 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3439 val, src_bit_offset, bitsize);
3440 }
3441
3442 /* Return a new value with type TYPE, which is FIELDNO field of the
3443 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3444 of VAL. If the VAL's contents required to extract the bitfield
3445 from are unavailable/optimized out, the new value is
3446 correspondingly marked unavailable/optimized out. */
3447
3448 struct value *
3449 value_field_bitfield (struct type *type, int fieldno,
3450 const gdb_byte *valaddr,
3451 LONGEST embedded_offset, const struct value *val)
3452 {
3453 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3454 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3455 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3456
3457 unpack_value_bitfield (res_val, bitpos, bitsize,
3458 valaddr, embedded_offset, val);
3459
3460 return res_val;
3461 }
3462
3463 /* Modify the value of a bitfield. ADDR points to a block of memory in
3464 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3465 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3466 indicate which bits (in target bit order) comprise the bitfield.
3467 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3468 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3469
3470 void
3471 modify_field (struct type *type, gdb_byte *addr,
3472 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3473 {
3474 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3475 ULONGEST oword;
3476 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3477 LONGEST bytesize;
3478
3479 /* Normalize BITPOS. */
3480 addr += bitpos / 8;
3481 bitpos %= 8;
3482
3483 /* If a negative fieldval fits in the field in question, chop
3484 off the sign extension bits. */
3485 if ((~fieldval & ~(mask >> 1)) == 0)
3486 fieldval &= mask;
3487
3488 /* Warn if value is too big to fit in the field in question. */
3489 if (0 != (fieldval & ~mask))
3490 {
3491 /* FIXME: would like to include fieldval in the message, but
3492 we don't have a sprintf_longest. */
3493 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3494
3495 /* Truncate it, otherwise adjoining fields may be corrupted. */
3496 fieldval &= mask;
3497 }
3498
3499 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3500 false valgrind reports. */
3501
3502 bytesize = (bitpos + bitsize + 7) / 8;
3503 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3504
3505 /* Shifting for bit field depends on endianness of the target machine. */
3506 if (gdbarch_bits_big_endian (get_type_arch (type)))
3507 bitpos = bytesize * 8 - bitpos - bitsize;
3508
3509 oword &= ~(mask << bitpos);
3510 oword |= fieldval << bitpos;
3511
3512 store_unsigned_integer (addr, bytesize, byte_order, oword);
3513 }
3514 \f
3515 /* Pack NUM into BUF using a target format of TYPE. */
3516
3517 void
3518 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3519 {
3520 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3521 LONGEST len;
3522
3523 type = check_typedef (type);
3524 len = TYPE_LENGTH (type);
3525
3526 switch (TYPE_CODE (type))
3527 {
3528 case TYPE_CODE_INT:
3529 case TYPE_CODE_CHAR:
3530 case TYPE_CODE_ENUM:
3531 case TYPE_CODE_FLAGS:
3532 case TYPE_CODE_BOOL:
3533 case TYPE_CODE_RANGE:
3534 case TYPE_CODE_MEMBERPTR:
3535 store_signed_integer (buf, len, byte_order, num);
3536 break;
3537
3538 case TYPE_CODE_REF:
3539 case TYPE_CODE_RVALUE_REF:
3540 case TYPE_CODE_PTR:
3541 store_typed_address (buf, type, (CORE_ADDR) num);
3542 break;
3543
3544 default:
3545 error (_("Unexpected type (%d) encountered for integer constant."),
3546 TYPE_CODE (type));
3547 }
3548 }
3549
3550
3551 /* Pack NUM into BUF using a target format of TYPE. */
3552
3553 static void
3554 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3555 {
3556 LONGEST len;
3557 enum bfd_endian byte_order;
3558
3559 type = check_typedef (type);
3560 len = TYPE_LENGTH (type);
3561 byte_order = gdbarch_byte_order (get_type_arch (type));
3562
3563 switch (TYPE_CODE (type))
3564 {
3565 case TYPE_CODE_INT:
3566 case TYPE_CODE_CHAR:
3567 case TYPE_CODE_ENUM:
3568 case TYPE_CODE_FLAGS:
3569 case TYPE_CODE_BOOL:
3570 case TYPE_CODE_RANGE:
3571 case TYPE_CODE_MEMBERPTR:
3572 store_unsigned_integer (buf, len, byte_order, num);
3573 break;
3574
3575 case TYPE_CODE_REF:
3576 case TYPE_CODE_RVALUE_REF:
3577 case TYPE_CODE_PTR:
3578 store_typed_address (buf, type, (CORE_ADDR) num);
3579 break;
3580
3581 default:
3582 error (_("Unexpected type (%d) encountered "
3583 "for unsigned integer constant."),
3584 TYPE_CODE (type));
3585 }
3586 }
3587
3588
3589 /* Convert C numbers into newly allocated values. */
3590
3591 struct value *
3592 value_from_longest (struct type *type, LONGEST num)
3593 {
3594 struct value *val = allocate_value (type);
3595
3596 pack_long (value_contents_raw (val), type, num);
3597 return val;
3598 }
3599
3600
3601 /* Convert C unsigned numbers into newly allocated values. */
3602
3603 struct value *
3604 value_from_ulongest (struct type *type, ULONGEST num)
3605 {
3606 struct value *val = allocate_value (type);
3607
3608 pack_unsigned_long (value_contents_raw (val), type, num);
3609
3610 return val;
3611 }
3612
3613
3614 /* Create a value representing a pointer of type TYPE to the address
3615 ADDR. */
3616
3617 struct value *
3618 value_from_pointer (struct type *type, CORE_ADDR addr)
3619 {
3620 struct value *val = allocate_value (type);
3621
3622 store_typed_address (value_contents_raw (val),
3623 check_typedef (type), addr);
3624 return val;
3625 }
3626
3627
3628 /* Create a value of type TYPE whose contents come from VALADDR, if it
3629 is non-null, and whose memory address (in the inferior) is
3630 ADDRESS. The type of the created value may differ from the passed
3631 type TYPE. Make sure to retrieve values new type after this call.
3632 Note that TYPE is not passed through resolve_dynamic_type; this is
3633 a special API intended for use only by Ada. */
3634
3635 struct value *
3636 value_from_contents_and_address_unresolved (struct type *type,
3637 const gdb_byte *valaddr,
3638 CORE_ADDR address)
3639 {
3640 struct value *v;
3641
3642 if (valaddr == NULL)
3643 v = allocate_value_lazy (type);
3644 else
3645 v = value_from_contents (type, valaddr);
3646 VALUE_LVAL (v) = lval_memory;
3647 set_value_address (v, address);
3648 return v;
3649 }
3650
3651 /* Create a value of type TYPE whose contents come from VALADDR, if it
3652 is non-null, and whose memory address (in the inferior) is
3653 ADDRESS. The type of the created value may differ from the passed
3654 type TYPE. Make sure to retrieve values new type after this call. */
3655
3656 struct value *
3657 value_from_contents_and_address (struct type *type,
3658 const gdb_byte *valaddr,
3659 CORE_ADDR address)
3660 {
3661 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3662 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3663 struct value *v;
3664
3665 if (valaddr == NULL)
3666 v = allocate_value_lazy (resolved_type);
3667 else
3668 v = value_from_contents (resolved_type, valaddr);
3669 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3670 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3671 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3672 VALUE_LVAL (v) = lval_memory;
3673 set_value_address (v, address);
3674 return v;
3675 }
3676
3677 /* Create a value of type TYPE holding the contents CONTENTS.
3678 The new value is `not_lval'. */
3679
3680 struct value *
3681 value_from_contents (struct type *type, const gdb_byte *contents)
3682 {
3683 struct value *result;
3684
3685 result = allocate_value (type);
3686 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3687 return result;
3688 }
3689
3690 struct value *
3691 value_from_double (struct type *type, DOUBLEST num)
3692 {
3693 struct value *val = allocate_value (type);
3694 struct type *base_type = check_typedef (type);
3695 enum type_code code = TYPE_CODE (base_type);
3696
3697 if (code == TYPE_CODE_FLT)
3698 {
3699 store_typed_floating (value_contents_raw (val), base_type, num);
3700 }
3701 else
3702 error (_("Unexpected type encountered for floating constant."));
3703
3704 return val;
3705 }
3706
3707 struct value *
3708 value_from_decfloat (struct type *type, const gdb_byte *dec)
3709 {
3710 struct value *val = allocate_value (type);
3711
3712 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3713 return val;
3714 }
3715
3716 /* Extract a value from the history file. Input will be of the form
3717 $digits or $$digits. See block comment above 'write_dollar_variable'
3718 for details. */
3719
3720 struct value *
3721 value_from_history_ref (const char *h, const char **endp)
3722 {
3723 int index, len;
3724
3725 if (h[0] == '$')
3726 len = 1;
3727 else
3728 return NULL;
3729
3730 if (h[1] == '$')
3731 len = 2;
3732
3733 /* Find length of numeral string. */
3734 for (; isdigit (h[len]); len++)
3735 ;
3736
3737 /* Make sure numeral string is not part of an identifier. */
3738 if (h[len] == '_' || isalpha (h[len]))
3739 return NULL;
3740
3741 /* Now collect the index value. */
3742 if (h[1] == '$')
3743 {
3744 if (len == 2)
3745 {
3746 /* For some bizarre reason, "$$" is equivalent to "$$1",
3747 rather than to "$$0" as it ought to be! */
3748 index = -1;
3749 *endp += len;
3750 }
3751 else
3752 {
3753 char *local_end;
3754
3755 index = -strtol (&h[2], &local_end, 10);
3756 *endp = local_end;
3757 }
3758 }
3759 else
3760 {
3761 if (len == 1)
3762 {
3763 /* "$" is equivalent to "$0". */
3764 index = 0;
3765 *endp += len;
3766 }
3767 else
3768 {
3769 char *local_end;
3770
3771 index = strtol (&h[1], &local_end, 10);
3772 *endp = local_end;
3773 }
3774 }
3775
3776 return access_value_history (index);
3777 }
3778
3779 /* Get the component value (offset by OFFSET bytes) of a struct or
3780 union WHOLE. Component's type is TYPE. */
3781
3782 struct value *
3783 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3784 {
3785 struct value *v;
3786
3787 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3788 v = allocate_value_lazy (type);
3789 else
3790 {
3791 v = allocate_value (type);
3792 value_contents_copy (v, value_embedded_offset (v),
3793 whole, value_embedded_offset (whole) + offset,
3794 type_length_units (type));
3795 }
3796 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3797 set_value_component_location (v, whole);
3798
3799 return v;
3800 }
3801
3802 struct value *
3803 coerce_ref_if_computed (const struct value *arg)
3804 {
3805 const struct lval_funcs *funcs;
3806
3807 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3808 return NULL;
3809
3810 if (value_lval_const (arg) != lval_computed)
3811 return NULL;
3812
3813 funcs = value_computed_funcs (arg);
3814 if (funcs->coerce_ref == NULL)
3815 return NULL;
3816
3817 return funcs->coerce_ref (arg);
3818 }
3819
3820 /* Look at value.h for description. */
3821
3822 struct value *
3823 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3824 const struct type *original_type,
3825 const struct value *original_value)
3826 {
3827 /* Re-adjust type. */
3828 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3829
3830 /* Add embedding info. */
3831 set_value_enclosing_type (value, enc_type);
3832 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3833
3834 /* We may be pointing to an object of some derived type. */
3835 return value_full_object (value, NULL, 0, 0, 0);
3836 }
3837
3838 struct value *
3839 coerce_ref (struct value *arg)
3840 {
3841 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3842 struct value *retval;
3843 struct type *enc_type;
3844
3845 retval = coerce_ref_if_computed (arg);
3846 if (retval)
3847 return retval;
3848
3849 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3850 return arg;
3851
3852 enc_type = check_typedef (value_enclosing_type (arg));
3853 enc_type = TYPE_TARGET_TYPE (enc_type);
3854
3855 retval = value_at_lazy (enc_type,
3856 unpack_pointer (value_type (arg),
3857 value_contents (arg)));
3858 enc_type = value_type (retval);
3859 return readjust_indirect_value_type (retval, enc_type,
3860 value_type_arg_tmp, arg);
3861 }
3862
3863 struct value *
3864 coerce_array (struct value *arg)
3865 {
3866 struct type *type;
3867
3868 arg = coerce_ref (arg);
3869 type = check_typedef (value_type (arg));
3870
3871 switch (TYPE_CODE (type))
3872 {
3873 case TYPE_CODE_ARRAY:
3874 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3875 arg = value_coerce_array (arg);
3876 break;
3877 case TYPE_CODE_FUNC:
3878 arg = value_coerce_function (arg);
3879 break;
3880 }
3881 return arg;
3882 }
3883 \f
3884
3885 /* Return the return value convention that will be used for the
3886 specified type. */
3887
3888 enum return_value_convention
3889 struct_return_convention (struct gdbarch *gdbarch,
3890 struct value *function, struct type *value_type)
3891 {
3892 enum type_code code = TYPE_CODE (value_type);
3893
3894 if (code == TYPE_CODE_ERROR)
3895 error (_("Function return type unknown."));
3896
3897 /* Probe the architecture for the return-value convention. */
3898 return gdbarch_return_value (gdbarch, function, value_type,
3899 NULL, NULL, NULL);
3900 }
3901
3902 /* Return true if the function returning the specified type is using
3903 the convention of returning structures in memory (passing in the
3904 address as a hidden first parameter). */
3905
3906 int
3907 using_struct_return (struct gdbarch *gdbarch,
3908 struct value *function, struct type *value_type)
3909 {
3910 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3911 /* A void return value is never in memory. See also corresponding
3912 code in "print_return_value". */
3913 return 0;
3914
3915 return (struct_return_convention (gdbarch, function, value_type)
3916 != RETURN_VALUE_REGISTER_CONVENTION);
3917 }
3918
3919 /* Set the initialized field in a value struct. */
3920
3921 void
3922 set_value_initialized (struct value *val, int status)
3923 {
3924 val->initialized = status;
3925 }
3926
3927 /* Return the initialized field in a value struct. */
3928
3929 int
3930 value_initialized (const struct value *val)
3931 {
3932 return val->initialized;
3933 }
3934
3935 /* Load the actual content of a lazy value. Fetch the data from the
3936 user's process and clear the lazy flag to indicate that the data in
3937 the buffer is valid.
3938
3939 If the value is zero-length, we avoid calling read_memory, which
3940 would abort. We mark the value as fetched anyway -- all 0 bytes of
3941 it. */
3942
3943 void
3944 value_fetch_lazy (struct value *val)
3945 {
3946 gdb_assert (value_lazy (val));
3947 allocate_value_contents (val);
3948 /* A value is either lazy, or fully fetched. The
3949 availability/validity is only established as we try to fetch a
3950 value. */
3951 gdb_assert (VEC_empty (range_s, val->optimized_out));
3952 gdb_assert (VEC_empty (range_s, val->unavailable));
3953 if (value_bitsize (val))
3954 {
3955 /* To read a lazy bitfield, read the entire enclosing value. This
3956 prevents reading the same block of (possibly volatile) memory once
3957 per bitfield. It would be even better to read only the containing
3958 word, but we have no way to record that just specific bits of a
3959 value have been fetched. */
3960 struct type *type = check_typedef (value_type (val));
3961 struct value *parent = value_parent (val);
3962
3963 if (value_lazy (parent))
3964 value_fetch_lazy (parent);
3965
3966 unpack_value_bitfield (val,
3967 value_bitpos (val), value_bitsize (val),
3968 value_contents_for_printing (parent),
3969 value_offset (val), parent);
3970 }
3971 else if (VALUE_LVAL (val) == lval_memory)
3972 {
3973 CORE_ADDR addr = value_address (val);
3974 struct type *type = check_typedef (value_enclosing_type (val));
3975
3976 if (TYPE_LENGTH (type))
3977 read_value_memory (val, 0, value_stack (val),
3978 addr, value_contents_all_raw (val),
3979 type_length_units (type));
3980 }
3981 else if (VALUE_LVAL (val) == lval_register)
3982 {
3983 struct frame_info *next_frame;
3984 int regnum;
3985 struct type *type = check_typedef (value_type (val));
3986 struct value *new_val = val, *mark = value_mark ();
3987
3988 /* Offsets are not supported here; lazy register values must
3989 refer to the entire register. */
3990 gdb_assert (value_offset (val) == 0);
3991
3992 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3993 {
3994 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3995
3996 next_frame = frame_find_by_id (next_frame_id);
3997 regnum = VALUE_REGNUM (new_val);
3998
3999 gdb_assert (next_frame != NULL);
4000
4001 /* Convertible register routines are used for multi-register
4002 values and for interpretation in different types
4003 (e.g. float or int from a double register). Lazy
4004 register values should have the register's natural type,
4005 so they do not apply. */
4006 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4007 regnum, type));
4008
4009 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4010 Since a "->next" operation was performed when setting
4011 this field, we do not need to perform a "next" operation
4012 again when unwinding the register. That's why
4013 frame_unwind_register_value() is called here instead of
4014 get_frame_register_value(). */
4015 new_val = frame_unwind_register_value (next_frame, regnum);
4016
4017 /* If we get another lazy lval_register value, it means the
4018 register is found by reading it from NEXT_FRAME's next frame.
4019 frame_unwind_register_value should never return a value with
4020 the frame id pointing to NEXT_FRAME. If it does, it means we
4021 either have two consecutive frames with the same frame id
4022 in the frame chain, or some code is trying to unwind
4023 behind get_prev_frame's back (e.g., a frame unwind
4024 sniffer trying to unwind), bypassing its validations. In
4025 any case, it should always be an internal error to end up
4026 in this situation. */
4027 if (VALUE_LVAL (new_val) == lval_register
4028 && value_lazy (new_val)
4029 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
4030 internal_error (__FILE__, __LINE__,
4031 _("infinite loop while fetching a register"));
4032 }
4033
4034 /* If it's still lazy (for instance, a saved register on the
4035 stack), fetch it. */
4036 if (value_lazy (new_val))
4037 value_fetch_lazy (new_val);
4038
4039 /* Copy the contents and the unavailability/optimized-out
4040 meta-data from NEW_VAL to VAL. */
4041 set_value_lazy (val, 0);
4042 value_contents_copy (val, value_embedded_offset (val),
4043 new_val, value_embedded_offset (new_val),
4044 type_length_units (type));
4045
4046 if (frame_debug)
4047 {
4048 struct gdbarch *gdbarch;
4049 struct frame_info *frame;
4050 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
4051 so that the frame level will be shown correctly. */
4052 frame = frame_find_by_id (VALUE_FRAME_ID (val));
4053 regnum = VALUE_REGNUM (val);
4054 gdbarch = get_frame_arch (frame);
4055
4056 fprintf_unfiltered (gdb_stdlog,
4057 "{ value_fetch_lazy "
4058 "(frame=%d,regnum=%d(%s),...) ",
4059 frame_relative_level (frame), regnum,
4060 user_reg_map_regnum_to_name (gdbarch, regnum));
4061
4062 fprintf_unfiltered (gdb_stdlog, "->");
4063 if (value_optimized_out (new_val))
4064 {
4065 fprintf_unfiltered (gdb_stdlog, " ");
4066 val_print_optimized_out (new_val, gdb_stdlog);
4067 }
4068 else
4069 {
4070 int i;
4071 const gdb_byte *buf = value_contents (new_val);
4072
4073 if (VALUE_LVAL (new_val) == lval_register)
4074 fprintf_unfiltered (gdb_stdlog, " register=%d",
4075 VALUE_REGNUM (new_val));
4076 else if (VALUE_LVAL (new_val) == lval_memory)
4077 fprintf_unfiltered (gdb_stdlog, " address=%s",
4078 paddress (gdbarch,
4079 value_address (new_val)));
4080 else
4081 fprintf_unfiltered (gdb_stdlog, " computed");
4082
4083 fprintf_unfiltered (gdb_stdlog, " bytes=");
4084 fprintf_unfiltered (gdb_stdlog, "[");
4085 for (i = 0; i < register_size (gdbarch, regnum); i++)
4086 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4087 fprintf_unfiltered (gdb_stdlog, "]");
4088 }
4089
4090 fprintf_unfiltered (gdb_stdlog, " }\n");
4091 }
4092
4093 /* Dispose of the intermediate values. This prevents
4094 watchpoints from trying to watch the saved frame pointer. */
4095 value_free_to_mark (mark);
4096 }
4097 else if (VALUE_LVAL (val) == lval_computed
4098 && value_computed_funcs (val)->read != NULL)
4099 value_computed_funcs (val)->read (val);
4100 else
4101 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4102
4103 set_value_lazy (val, 0);
4104 }
4105
4106 /* Implementation of the convenience function $_isvoid. */
4107
4108 static struct value *
4109 isvoid_internal_fn (struct gdbarch *gdbarch,
4110 const struct language_defn *language,
4111 void *cookie, int argc, struct value **argv)
4112 {
4113 int ret;
4114
4115 if (argc != 1)
4116 error (_("You must provide one argument for $_isvoid."));
4117
4118 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4119
4120 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4121 }
4122
4123 void
4124 _initialize_values (void)
4125 {
4126 add_cmd ("convenience", no_class, show_convenience, _("\
4127 Debugger convenience (\"$foo\") variables and functions.\n\
4128 Convenience variables are created when you assign them values;\n\
4129 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4130 \n\
4131 A few convenience variables are given values automatically:\n\
4132 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4133 \"$__\" holds the contents of the last address examined with \"x\"."
4134 #ifdef HAVE_PYTHON
4135 "\n\n\
4136 Convenience functions are defined via the Python API."
4137 #endif
4138 ), &showlist);
4139 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4140
4141 add_cmd ("values", no_set_class, show_values, _("\
4142 Elements of value history around item number IDX (or last ten)."),
4143 &showlist);
4144
4145 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4146 Initialize a convenience variable if necessary.\n\
4147 init-if-undefined VARIABLE = EXPRESSION\n\
4148 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4149 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4150 VARIABLE is already initialized."));
4151
4152 add_prefix_cmd ("function", no_class, function_command, _("\
4153 Placeholder command for showing help on convenience functions."),
4154 &functionlist, "function ", 0, &cmdlist);
4155
4156 add_internal_function ("_isvoid", _("\
4157 Check whether an expression is void.\n\
4158 Usage: $_isvoid (expression)\n\
4159 Return 1 if the expression is void, zero otherwise."),
4160 isvoid_internal_fn, NULL);
4161
4162 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4163 class_support, &max_value_size, _("\
4164 Set maximum sized value gdb will load from the inferior."), _("\
4165 Show maximum sized value gdb will load from the inferior."), _("\
4166 Use this to control the maximum size, in bytes, of a value that gdb\n\
4167 will load from the inferior. Setting this value to 'unlimited'\n\
4168 disables checking.\n\
4169 Setting this does not invalidate already allocated values, it only\n\
4170 prevents future values, larger than this size, from being allocated."),
4171 set_max_value_size,
4172 show_max_value_size,
4173 &setlist, &showlist);
4174 }
This page took 0.113148 seconds and 3 git commands to generate.