Don't print 0x for core_addr_to_string_nz
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 int offset;
69
70 /* Length of the range. */
71 int length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (int offset1, int len1,
83 int offset2, int len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109 range_s what;
110 int i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. If lval == lval_memory, this is an offset to
239 the address. If lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the member
241 embedded_offset below. */
242 int offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 int bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 int bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in target addressable memory
295 units. The value_contents() macro takes `embedded_offset' into account,
296 so most GDB code continues to see the `type' portion of the value, just
297 as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in target addressable memory units from the full object
302 to the pointed-to object -- that is, the value `embedded_offset' would
303 have if we followed the pointer and fetched the complete object.
304 (I don't really see the point. Why not just determine the
305 run-time type when you indirect, and avoid the special case? The
306 contents don't matter until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 int embedded_offset;
313 int pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 /* See value.h. */
344
345 struct gdbarch *
346 get_value_arch (const struct value *value)
347 {
348 return get_type_arch (value_type (value));
349 }
350
351 int
352 value_bits_available (const struct value *value, int offset, int length)
353 {
354 gdb_assert (!value->lazy);
355
356 return !ranges_contain (value->unavailable, offset, length);
357 }
358
359 int
360 value_bytes_available (const struct value *value, int offset, int length)
361 {
362 return value_bits_available (value,
363 offset * TARGET_CHAR_BIT,
364 length * TARGET_CHAR_BIT);
365 }
366
367 int
368 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
369 {
370 gdb_assert (!value->lazy);
371
372 return ranges_contain (value->optimized_out, bit_offset, bit_length);
373 }
374
375 int
376 value_entirely_available (struct value *value)
377 {
378 /* We can only tell whether the whole value is available when we try
379 to read it. */
380 if (value->lazy)
381 value_fetch_lazy (value);
382
383 if (VEC_empty (range_s, value->unavailable))
384 return 1;
385 return 0;
386 }
387
388 /* Returns true if VALUE is entirely covered by RANGES. If the value
389 is lazy, it'll be read now. Note that RANGE is a pointer to
390 pointer because reading the value might change *RANGE. */
391
392 static int
393 value_entirely_covered_by_range_vector (struct value *value,
394 VEC(range_s) **ranges)
395 {
396 /* We can only tell whether the whole value is optimized out /
397 unavailable when we try to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (VEC_length (range_s, *ranges) == 1)
402 {
403 struct range *t = VEC_index (range_s, *ranges, 0);
404
405 if (t->offset == 0
406 && t->length == (TARGET_CHAR_BIT
407 * TYPE_LENGTH (value_enclosing_type (value))))
408 return 1;
409 }
410
411 return 0;
412 }
413
414 int
415 value_entirely_unavailable (struct value *value)
416 {
417 return value_entirely_covered_by_range_vector (value, &value->unavailable);
418 }
419
420 int
421 value_entirely_optimized_out (struct value *value)
422 {
423 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
424 }
425
426 /* Insert into the vector pointed to by VECTORP the bit range starting of
427 OFFSET bits, and extending for the next LENGTH bits. */
428
429 static void
430 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
431 {
432 range_s newr;
433 int i;
434
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
437
438 newr.offset = offset;
439 newr.length = length;
440
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
446
447 #1 - overlaps with previous
448
449 R
450 |-...-|
451 |---| |---| |------| ... |--|
452 0 1 2 N
453
454 I=1
455
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
460
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
463
464 #2 - contiguous with previous
465
466 R
467 |-...-|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=1
472
473 If there's no overlap with the previous range, as in:
474
475 #3 - not overlapping and not contiguous
476
477 R
478 |-...-|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 or if I is 0:
485
486 #4 - R is the range with lowest offset
487
488 R
489 |-...-|
490 |--| |---| |------| ... |--|
491 0 1 2 N
492
493 I=0
494
495 ... we just push the new range to I.
496
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
500
501 #5 - overlapping following ranges
502
503 R
504 |------------------------|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=0
509
510 or:
511
512 R
513 |-------|
514 |--| |---| |------| ... |--|
515 0 1 2 N
516
517 I=1
518
519 */
520
521 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
522 if (i > 0)
523 {
524 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
525
526 if (ranges_overlap (bef->offset, bef->length, offset, length))
527 {
528 /* #1 */
529 ULONGEST l = min (bef->offset, offset);
530 ULONGEST h = max (bef->offset + bef->length, offset + length);
531
532 bef->offset = l;
533 bef->length = h - l;
534 i--;
535 }
536 else if (offset == bef->offset + bef->length)
537 {
538 /* #2 */
539 bef->length += length;
540 i--;
541 }
542 else
543 {
544 /* #3 */
545 VEC_safe_insert (range_s, *vectorp, i, &newr);
546 }
547 }
548 else
549 {
550 /* #4 */
551 VEC_safe_insert (range_s, *vectorp, i, &newr);
552 }
553
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i + 1 < VEC_length (range_s, *vectorp))
557 {
558 struct range *t;
559 struct range *r;
560 int removed = 0;
561 int next = i + 1;
562
563 /* Get the range we just touched. */
564 t = VEC_index (range_s, *vectorp, i);
565 removed = 0;
566
567 i = next;
568 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
569 if (r->offset <= t->offset + t->length)
570 {
571 ULONGEST l, h;
572
573 l = min (t->offset, r->offset);
574 h = max (t->offset + t->length, r->offset + r->length);
575
576 t->offset = l;
577 t->length = h - l;
578
579 removed++;
580 }
581 else
582 {
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
586 break;
587 }
588
589 if (removed != 0)
590 VEC_block_remove (range_s, *vectorp, next, removed);
591 }
592 }
593
594 void
595 mark_value_bits_unavailable (struct value *value, int offset, int length)
596 {
597 insert_into_bit_range_vector (&value->unavailable, offset, length);
598 }
599
600 void
601 mark_value_bytes_unavailable (struct value *value, int offset, int length)
602 {
603 mark_value_bits_unavailable (value,
604 offset * TARGET_CHAR_BIT,
605 length * TARGET_CHAR_BIT);
606 }
607
608 /* Find the first range in RANGES that overlaps the range defined by
609 OFFSET and LENGTH, starting at element POS in the RANGES vector,
610 Returns the index into RANGES where such overlapping range was
611 found, or -1 if none was found. */
612
613 static int
614 find_first_range_overlap (VEC(range_s) *ranges, int pos,
615 int offset, int length)
616 {
617 range_s *r;
618 int i;
619
620 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
621 if (ranges_overlap (r->offset, r->length, offset, length))
622 return i;
623
624 return -1;
625 }
626
627 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
628 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
629 return non-zero.
630
631 It must always be the case that:
632 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
633
634 It is assumed that memory can be accessed from:
635 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
636 to:
637 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
638 / TARGET_CHAR_BIT) */
639 static int
640 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
641 const gdb_byte *ptr2, size_t offset2_bits,
642 size_t length_bits)
643 {
644 gdb_assert (offset1_bits % TARGET_CHAR_BIT
645 == offset2_bits % TARGET_CHAR_BIT);
646
647 if (offset1_bits % TARGET_CHAR_BIT != 0)
648 {
649 size_t bits;
650 gdb_byte mask, b1, b2;
651
652 /* The offset from the base pointers PTR1 and PTR2 is not a complete
653 number of bytes. A number of bits up to either the next exact
654 byte boundary, or LENGTH_BITS (which ever is sooner) will be
655 compared. */
656 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
657 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
658 mask = (1 << bits) - 1;
659
660 if (length_bits < bits)
661 {
662 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
663 bits = length_bits;
664 }
665
666 /* Now load the two bytes and mask off the bits we care about. */
667 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
668 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
669
670 if (b1 != b2)
671 return 1;
672
673 /* Now update the length and offsets to take account of the bits
674 we've just compared. */
675 length_bits -= bits;
676 offset1_bits += bits;
677 offset2_bits += bits;
678 }
679
680 if (length_bits % TARGET_CHAR_BIT != 0)
681 {
682 size_t bits;
683 size_t o1, o2;
684 gdb_byte mask, b1, b2;
685
686 /* The length is not an exact number of bytes. After the previous
687 IF.. block then the offsets are byte aligned, or the
688 length is zero (in which case this code is not reached). Compare
689 a number of bits at the end of the region, starting from an exact
690 byte boundary. */
691 bits = length_bits % TARGET_CHAR_BIT;
692 o1 = offset1_bits + length_bits - bits;
693 o2 = offset2_bits + length_bits - bits;
694
695 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
696 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
697
698 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
699 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
700
701 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
702 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
703
704 if (b1 != b2)
705 return 1;
706
707 length_bits -= bits;
708 }
709
710 if (length_bits > 0)
711 {
712 /* We've now taken care of any stray "bits" at the start, or end of
713 the region to compare, the remainder can be covered with a simple
714 memcmp. */
715 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
716 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
717 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
718
719 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
720 ptr2 + offset2_bits / TARGET_CHAR_BIT,
721 length_bits / TARGET_CHAR_BIT);
722 }
723
724 /* Length is zero, regions match. */
725 return 0;
726 }
727
728 /* Helper struct for find_first_range_overlap_and_match and
729 value_contents_bits_eq. Keep track of which slot of a given ranges
730 vector have we last looked at. */
731
732 struct ranges_and_idx
733 {
734 /* The ranges. */
735 VEC(range_s) *ranges;
736
737 /* The range we've last found in RANGES. Given ranges are sorted,
738 we can start the next lookup here. */
739 int idx;
740 };
741
742 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
743 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
744 ranges starting at OFFSET2 bits. Return true if the ranges match
745 and fill in *L and *H with the overlapping window relative to
746 (both) OFFSET1 or OFFSET2. */
747
748 static int
749 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
750 struct ranges_and_idx *rp2,
751 int offset1, int offset2,
752 int length, ULONGEST *l, ULONGEST *h)
753 {
754 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
755 offset1, length);
756 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
757 offset2, length);
758
759 if (rp1->idx == -1 && rp2->idx == -1)
760 {
761 *l = length;
762 *h = length;
763 return 1;
764 }
765 else if (rp1->idx == -1 || rp2->idx == -1)
766 return 0;
767 else
768 {
769 range_s *r1, *r2;
770 ULONGEST l1, h1;
771 ULONGEST l2, h2;
772
773 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
774 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
775
776 /* Get the unavailable windows intersected by the incoming
777 ranges. The first and last ranges that overlap the argument
778 range may be wider than said incoming arguments ranges. */
779 l1 = max (offset1, r1->offset);
780 h1 = min (offset1 + length, r1->offset + r1->length);
781
782 l2 = max (offset2, r2->offset);
783 h2 = min (offset2 + length, offset2 + r2->length);
784
785 /* Make them relative to the respective start offsets, so we can
786 compare them for equality. */
787 l1 -= offset1;
788 h1 -= offset1;
789
790 l2 -= offset2;
791 h2 -= offset2;
792
793 /* Different ranges, no match. */
794 if (l1 != l2 || h1 != h2)
795 return 0;
796
797 *h = h1;
798 *l = l1;
799 return 1;
800 }
801 }
802
803 /* Helper function for value_contents_eq. The only difference is that
804 this function is bit rather than byte based.
805
806 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
807 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
808 Return true if the available bits match. */
809
810 static int
811 value_contents_bits_eq (const struct value *val1, int offset1,
812 const struct value *val2, int offset2,
813 int length)
814 {
815 /* Each array element corresponds to a ranges source (unavailable,
816 optimized out). '1' is for VAL1, '2' for VAL2. */
817 struct ranges_and_idx rp1[2], rp2[2];
818
819 /* See function description in value.h. */
820 gdb_assert (!val1->lazy && !val2->lazy);
821
822 /* We shouldn't be trying to compare past the end of the values. */
823 gdb_assert (offset1 + length
824 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
825 gdb_assert (offset2 + length
826 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
827
828 memset (&rp1, 0, sizeof (rp1));
829 memset (&rp2, 0, sizeof (rp2));
830 rp1[0].ranges = val1->unavailable;
831 rp2[0].ranges = val2->unavailable;
832 rp1[1].ranges = val1->optimized_out;
833 rp2[1].ranges = val2->optimized_out;
834
835 while (length > 0)
836 {
837 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
838 int i;
839
840 for (i = 0; i < 2; i++)
841 {
842 ULONGEST l_tmp, h_tmp;
843
844 /* The contents only match equal if the invalid/unavailable
845 contents ranges match as well. */
846 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
847 offset1, offset2, length,
848 &l_tmp, &h_tmp))
849 return 0;
850
851 /* We're interested in the lowest/first range found. */
852 if (i == 0 || l_tmp < l)
853 {
854 l = l_tmp;
855 h = h_tmp;
856 }
857 }
858
859 /* Compare the available/valid contents. */
860 if (memcmp_with_bit_offsets (val1->contents, offset1,
861 val2->contents, offset2, l) != 0)
862 return 0;
863
864 length -= h;
865 offset1 += h;
866 offset2 += h;
867 }
868
869 return 1;
870 }
871
872 int
873 value_contents_eq (const struct value *val1, int offset1,
874 const struct value *val2, int offset2,
875 int length)
876 {
877 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
878 val2, offset2 * TARGET_CHAR_BIT,
879 length * TARGET_CHAR_BIT);
880 }
881
882 /* Prototypes for local functions. */
883
884 static void show_values (char *, int);
885
886 static void show_convenience (char *, int);
887
888
889 /* The value-history records all the values printed
890 by print commands during this session. Each chunk
891 records 60 consecutive values. The first chunk on
892 the chain records the most recent values.
893 The total number of values is in value_history_count. */
894
895 #define VALUE_HISTORY_CHUNK 60
896
897 struct value_history_chunk
898 {
899 struct value_history_chunk *next;
900 struct value *values[VALUE_HISTORY_CHUNK];
901 };
902
903 /* Chain of chunks now in use. */
904
905 static struct value_history_chunk *value_history_chain;
906
907 static int value_history_count; /* Abs number of last entry stored. */
908
909 \f
910 /* List of all value objects currently allocated
911 (except for those released by calls to release_value)
912 This is so they can be freed after each command. */
913
914 static struct value *all_values;
915
916 /* Allocate a lazy value for type TYPE. Its actual content is
917 "lazily" allocated too: the content field of the return value is
918 NULL; it will be allocated when it is fetched from the target. */
919
920 struct value *
921 allocate_value_lazy (struct type *type)
922 {
923 struct value *val;
924
925 /* Call check_typedef on our type to make sure that, if TYPE
926 is a TYPE_CODE_TYPEDEF, its length is set to the length
927 of the target type instead of zero. However, we do not
928 replace the typedef type by the target type, because we want
929 to keep the typedef in order to be able to set the VAL's type
930 description correctly. */
931 check_typedef (type);
932
933 val = XCNEW (struct value);
934 val->contents = NULL;
935 val->next = all_values;
936 all_values = val;
937 val->type = type;
938 val->enclosing_type = type;
939 VALUE_LVAL (val) = not_lval;
940 val->location.address = 0;
941 VALUE_FRAME_ID (val) = null_frame_id;
942 val->offset = 0;
943 val->bitpos = 0;
944 val->bitsize = 0;
945 VALUE_REGNUM (val) = -1;
946 val->lazy = 1;
947 val->embedded_offset = 0;
948 val->pointed_to_offset = 0;
949 val->modifiable = 1;
950 val->initialized = 1; /* Default to initialized. */
951
952 /* Values start out on the all_values chain. */
953 val->reference_count = 1;
954
955 return val;
956 }
957
958 /* The maximum size, in bytes, that GDB will try to allocate for a value.
959 The initial value of 64k was not selected for any specific reason, it is
960 just a reasonable starting point. */
961
962 static int max_value_size = 65536; /* 64k bytes */
963
964 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
965 LONGEST, otherwise GDB will not be able to parse integer values from the
966 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
967 be unable to parse "set max-value-size 2".
968
969 As we want a consistent GDB experience across hosts with different sizes
970 of LONGEST, this arbitrary minimum value was selected, so long as this
971 is bigger than LONGEST on all GDB supported hosts we're fine. */
972
973 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
974 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
975
976 /* Implement the "set max-value-size" command. */
977
978 static void
979 set_max_value_size (char *args, int from_tty,
980 struct cmd_list_element *c)
981 {
982 gdb_assert (max_value_size == -1 || max_value_size >= 0);
983
984 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
985 {
986 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
987 error (_("max-value-size set too low, increasing to %d bytes"),
988 max_value_size);
989 }
990 }
991
992 /* Implement the "show max-value-size" command. */
993
994 static void
995 show_max_value_size (struct ui_file *file, int from_tty,
996 struct cmd_list_element *c, const char *value)
997 {
998 if (max_value_size == -1)
999 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1000 else
1001 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1002 max_value_size);
1003 }
1004
1005 /* Called before we attempt to allocate or reallocate a buffer for the
1006 contents of a value. TYPE is the type of the value for which we are
1007 allocating the buffer. If the buffer is too large (based on the user
1008 controllable setting) then throw an error. If this function returns
1009 then we should attempt to allocate the buffer. */
1010
1011 static void
1012 check_type_length_before_alloc (const struct type *type)
1013 {
1014 unsigned int length = TYPE_LENGTH (type);
1015
1016 if (max_value_size > -1 && length > max_value_size)
1017 {
1018 if (TYPE_NAME (type) != NULL)
1019 error (_("value of type `%s' requires %u bytes, which is more "
1020 "than max-value-size"), TYPE_NAME (type), length);
1021 else
1022 error (_("value requires %u bytes, which is more than "
1023 "max-value-size"), length);
1024 }
1025 }
1026
1027 /* Allocate the contents of VAL if it has not been allocated yet. */
1028
1029 static void
1030 allocate_value_contents (struct value *val)
1031 {
1032 if (!val->contents)
1033 {
1034 check_type_length_before_alloc (val->enclosing_type);
1035 val->contents
1036 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1037 }
1038 }
1039
1040 /* Allocate a value and its contents for type TYPE. */
1041
1042 struct value *
1043 allocate_value (struct type *type)
1044 {
1045 struct value *val = allocate_value_lazy (type);
1046
1047 allocate_value_contents (val);
1048 val->lazy = 0;
1049 return val;
1050 }
1051
1052 /* Allocate a value that has the correct length
1053 for COUNT repetitions of type TYPE. */
1054
1055 struct value *
1056 allocate_repeat_value (struct type *type, int count)
1057 {
1058 int low_bound = current_language->string_lower_bound; /* ??? */
1059 /* FIXME-type-allocation: need a way to free this type when we are
1060 done with it. */
1061 struct type *array_type
1062 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1063
1064 return allocate_value (array_type);
1065 }
1066
1067 struct value *
1068 allocate_computed_value (struct type *type,
1069 const struct lval_funcs *funcs,
1070 void *closure)
1071 {
1072 struct value *v = allocate_value_lazy (type);
1073
1074 VALUE_LVAL (v) = lval_computed;
1075 v->location.computed.funcs = funcs;
1076 v->location.computed.closure = closure;
1077
1078 return v;
1079 }
1080
1081 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1082
1083 struct value *
1084 allocate_optimized_out_value (struct type *type)
1085 {
1086 struct value *retval = allocate_value_lazy (type);
1087
1088 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1089 set_value_lazy (retval, 0);
1090 return retval;
1091 }
1092
1093 /* Accessor methods. */
1094
1095 struct value *
1096 value_next (struct value *value)
1097 {
1098 return value->next;
1099 }
1100
1101 struct type *
1102 value_type (const struct value *value)
1103 {
1104 return value->type;
1105 }
1106 void
1107 deprecated_set_value_type (struct value *value, struct type *type)
1108 {
1109 value->type = type;
1110 }
1111
1112 int
1113 value_offset (const struct value *value)
1114 {
1115 return value->offset;
1116 }
1117 void
1118 set_value_offset (struct value *value, int offset)
1119 {
1120 value->offset = offset;
1121 }
1122
1123 int
1124 value_bitpos (const struct value *value)
1125 {
1126 return value->bitpos;
1127 }
1128 void
1129 set_value_bitpos (struct value *value, int bit)
1130 {
1131 value->bitpos = bit;
1132 }
1133
1134 int
1135 value_bitsize (const struct value *value)
1136 {
1137 return value->bitsize;
1138 }
1139 void
1140 set_value_bitsize (struct value *value, int bit)
1141 {
1142 value->bitsize = bit;
1143 }
1144
1145 struct value *
1146 value_parent (struct value *value)
1147 {
1148 return value->parent;
1149 }
1150
1151 /* See value.h. */
1152
1153 void
1154 set_value_parent (struct value *value, struct value *parent)
1155 {
1156 struct value *old = value->parent;
1157
1158 value->parent = parent;
1159 if (parent != NULL)
1160 value_incref (parent);
1161 value_free (old);
1162 }
1163
1164 gdb_byte *
1165 value_contents_raw (struct value *value)
1166 {
1167 struct gdbarch *arch = get_value_arch (value);
1168 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1169
1170 allocate_value_contents (value);
1171 return value->contents + value->embedded_offset * unit_size;
1172 }
1173
1174 gdb_byte *
1175 value_contents_all_raw (struct value *value)
1176 {
1177 allocate_value_contents (value);
1178 return value->contents;
1179 }
1180
1181 struct type *
1182 value_enclosing_type (struct value *value)
1183 {
1184 return value->enclosing_type;
1185 }
1186
1187 /* Look at value.h for description. */
1188
1189 struct type *
1190 value_actual_type (struct value *value, int resolve_simple_types,
1191 int *real_type_found)
1192 {
1193 struct value_print_options opts;
1194 struct type *result;
1195
1196 get_user_print_options (&opts);
1197
1198 if (real_type_found)
1199 *real_type_found = 0;
1200 result = value_type (value);
1201 if (opts.objectprint)
1202 {
1203 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1204 fetch its rtti type. */
1205 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1206 || TYPE_CODE (result) == TYPE_CODE_REF)
1207 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1208 == TYPE_CODE_STRUCT)
1209 {
1210 struct type *real_type;
1211
1212 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1213 if (real_type)
1214 {
1215 if (real_type_found)
1216 *real_type_found = 1;
1217 result = real_type;
1218 }
1219 }
1220 else if (resolve_simple_types)
1221 {
1222 if (real_type_found)
1223 *real_type_found = 1;
1224 result = value_enclosing_type (value);
1225 }
1226 }
1227
1228 return result;
1229 }
1230
1231 void
1232 error_value_optimized_out (void)
1233 {
1234 error (_("value has been optimized out"));
1235 }
1236
1237 static void
1238 require_not_optimized_out (const struct value *value)
1239 {
1240 if (!VEC_empty (range_s, value->optimized_out))
1241 {
1242 if (value->lval == lval_register)
1243 error (_("register has not been saved in frame"));
1244 else
1245 error_value_optimized_out ();
1246 }
1247 }
1248
1249 static void
1250 require_available (const struct value *value)
1251 {
1252 if (!VEC_empty (range_s, value->unavailable))
1253 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1254 }
1255
1256 const gdb_byte *
1257 value_contents_for_printing (struct value *value)
1258 {
1259 if (value->lazy)
1260 value_fetch_lazy (value);
1261 return value->contents;
1262 }
1263
1264 const gdb_byte *
1265 value_contents_for_printing_const (const struct value *value)
1266 {
1267 gdb_assert (!value->lazy);
1268 return value->contents;
1269 }
1270
1271 const gdb_byte *
1272 value_contents_all (struct value *value)
1273 {
1274 const gdb_byte *result = value_contents_for_printing (value);
1275 require_not_optimized_out (value);
1276 require_available (value);
1277 return result;
1278 }
1279
1280 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1281 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1282
1283 static void
1284 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1285 VEC (range_s) *src_range, int src_bit_offset,
1286 int bit_length)
1287 {
1288 range_s *r;
1289 int i;
1290
1291 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1292 {
1293 ULONGEST h, l;
1294
1295 l = max (r->offset, src_bit_offset);
1296 h = min (r->offset + r->length, src_bit_offset + bit_length);
1297
1298 if (l < h)
1299 insert_into_bit_range_vector (dst_range,
1300 dst_bit_offset + (l - src_bit_offset),
1301 h - l);
1302 }
1303 }
1304
1305 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1306 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1307
1308 static void
1309 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1310 const struct value *src, int src_bit_offset,
1311 int bit_length)
1312 {
1313 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1314 src->unavailable, src_bit_offset,
1315 bit_length);
1316 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1317 src->optimized_out, src_bit_offset,
1318 bit_length);
1319 }
1320
1321 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1322 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1323 contents, starting at DST_OFFSET. If unavailable contents are
1324 being copied from SRC, the corresponding DST contents are marked
1325 unavailable accordingly. Neither DST nor SRC may be lazy
1326 values.
1327
1328 It is assumed the contents of DST in the [DST_OFFSET,
1329 DST_OFFSET+LENGTH) range are wholly available. */
1330
1331 void
1332 value_contents_copy_raw (struct value *dst, int dst_offset,
1333 struct value *src, int src_offset, int length)
1334 {
1335 range_s *r;
1336 int src_bit_offset, dst_bit_offset, bit_length;
1337 struct gdbarch *arch = get_value_arch (src);
1338 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1339
1340 /* A lazy DST would make that this copy operation useless, since as
1341 soon as DST's contents were un-lazied (by a later value_contents
1342 call, say), the contents would be overwritten. A lazy SRC would
1343 mean we'd be copying garbage. */
1344 gdb_assert (!dst->lazy && !src->lazy);
1345
1346 /* The overwritten DST range gets unavailability ORed in, not
1347 replaced. Make sure to remember to implement replacing if it
1348 turns out actually necessary. */
1349 gdb_assert (value_bytes_available (dst, dst_offset, length));
1350 gdb_assert (!value_bits_any_optimized_out (dst,
1351 TARGET_CHAR_BIT * dst_offset,
1352 TARGET_CHAR_BIT * length));
1353
1354 /* Copy the data. */
1355 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1356 value_contents_all_raw (src) + src_offset * unit_size,
1357 length * unit_size);
1358
1359 /* Copy the meta-data, adjusted. */
1360 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1361 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1362 bit_length = length * unit_size * HOST_CHAR_BIT;
1363
1364 value_ranges_copy_adjusted (dst, dst_bit_offset,
1365 src, src_bit_offset,
1366 bit_length);
1367 }
1368
1369 /* Copy LENGTH bytes of SRC value's (all) contents
1370 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1371 (all) contents, starting at DST_OFFSET. If unavailable contents
1372 are being copied from SRC, the corresponding DST contents are
1373 marked unavailable accordingly. DST must not be lazy. If SRC is
1374 lazy, it will be fetched now.
1375
1376 It is assumed the contents of DST in the [DST_OFFSET,
1377 DST_OFFSET+LENGTH) range are wholly available. */
1378
1379 void
1380 value_contents_copy (struct value *dst, int dst_offset,
1381 struct value *src, int src_offset, int length)
1382 {
1383 if (src->lazy)
1384 value_fetch_lazy (src);
1385
1386 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1387 }
1388
1389 int
1390 value_lazy (struct value *value)
1391 {
1392 return value->lazy;
1393 }
1394
1395 void
1396 set_value_lazy (struct value *value, int val)
1397 {
1398 value->lazy = val;
1399 }
1400
1401 int
1402 value_stack (struct value *value)
1403 {
1404 return value->stack;
1405 }
1406
1407 void
1408 set_value_stack (struct value *value, int val)
1409 {
1410 value->stack = val;
1411 }
1412
1413 const gdb_byte *
1414 value_contents (struct value *value)
1415 {
1416 const gdb_byte *result = value_contents_writeable (value);
1417 require_not_optimized_out (value);
1418 require_available (value);
1419 return result;
1420 }
1421
1422 gdb_byte *
1423 value_contents_writeable (struct value *value)
1424 {
1425 if (value->lazy)
1426 value_fetch_lazy (value);
1427 return value_contents_raw (value);
1428 }
1429
1430 int
1431 value_optimized_out (struct value *value)
1432 {
1433 /* We can only know if a value is optimized out once we have tried to
1434 fetch it. */
1435 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1436 value_fetch_lazy (value);
1437
1438 return !VEC_empty (range_s, value->optimized_out);
1439 }
1440
1441 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1442 the following LENGTH bytes. */
1443
1444 void
1445 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1446 {
1447 mark_value_bits_optimized_out (value,
1448 offset * TARGET_CHAR_BIT,
1449 length * TARGET_CHAR_BIT);
1450 }
1451
1452 /* See value.h. */
1453
1454 void
1455 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1456 {
1457 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1458 }
1459
1460 int
1461 value_bits_synthetic_pointer (const struct value *value,
1462 int offset, int length)
1463 {
1464 if (value->lval != lval_computed
1465 || !value->location.computed.funcs->check_synthetic_pointer)
1466 return 0;
1467 return value->location.computed.funcs->check_synthetic_pointer (value,
1468 offset,
1469 length);
1470 }
1471
1472 int
1473 value_embedded_offset (struct value *value)
1474 {
1475 return value->embedded_offset;
1476 }
1477
1478 void
1479 set_value_embedded_offset (struct value *value, int val)
1480 {
1481 value->embedded_offset = val;
1482 }
1483
1484 int
1485 value_pointed_to_offset (struct value *value)
1486 {
1487 return value->pointed_to_offset;
1488 }
1489
1490 void
1491 set_value_pointed_to_offset (struct value *value, int val)
1492 {
1493 value->pointed_to_offset = val;
1494 }
1495
1496 const struct lval_funcs *
1497 value_computed_funcs (const struct value *v)
1498 {
1499 gdb_assert (value_lval_const (v) == lval_computed);
1500
1501 return v->location.computed.funcs;
1502 }
1503
1504 void *
1505 value_computed_closure (const struct value *v)
1506 {
1507 gdb_assert (v->lval == lval_computed);
1508
1509 return v->location.computed.closure;
1510 }
1511
1512 enum lval_type *
1513 deprecated_value_lval_hack (struct value *value)
1514 {
1515 return &value->lval;
1516 }
1517
1518 enum lval_type
1519 value_lval_const (const struct value *value)
1520 {
1521 return value->lval;
1522 }
1523
1524 CORE_ADDR
1525 value_address (const struct value *value)
1526 {
1527 if (value->lval == lval_internalvar
1528 || value->lval == lval_internalvar_component
1529 || value->lval == lval_xcallable)
1530 return 0;
1531 if (value->parent != NULL)
1532 return value_address (value->parent) + value->offset;
1533 else
1534 return value->location.address + value->offset;
1535 }
1536
1537 CORE_ADDR
1538 value_raw_address (struct value *value)
1539 {
1540 if (value->lval == lval_internalvar
1541 || value->lval == lval_internalvar_component
1542 || value->lval == lval_xcallable)
1543 return 0;
1544 return value->location.address;
1545 }
1546
1547 void
1548 set_value_address (struct value *value, CORE_ADDR addr)
1549 {
1550 gdb_assert (value->lval != lval_internalvar
1551 && value->lval != lval_internalvar_component
1552 && value->lval != lval_xcallable);
1553 value->location.address = addr;
1554 }
1555
1556 struct internalvar **
1557 deprecated_value_internalvar_hack (struct value *value)
1558 {
1559 return &value->location.internalvar;
1560 }
1561
1562 struct frame_id *
1563 deprecated_value_frame_id_hack (struct value *value)
1564 {
1565 return &value->frame_id;
1566 }
1567
1568 short *
1569 deprecated_value_regnum_hack (struct value *value)
1570 {
1571 return &value->regnum;
1572 }
1573
1574 int
1575 deprecated_value_modifiable (struct value *value)
1576 {
1577 return value->modifiable;
1578 }
1579 \f
1580 /* Return a mark in the value chain. All values allocated after the
1581 mark is obtained (except for those released) are subject to being freed
1582 if a subsequent value_free_to_mark is passed the mark. */
1583 struct value *
1584 value_mark (void)
1585 {
1586 return all_values;
1587 }
1588
1589 /* Take a reference to VAL. VAL will not be deallocated until all
1590 references are released. */
1591
1592 void
1593 value_incref (struct value *val)
1594 {
1595 val->reference_count++;
1596 }
1597
1598 /* Release a reference to VAL, which was acquired with value_incref.
1599 This function is also called to deallocate values from the value
1600 chain. */
1601
1602 void
1603 value_free (struct value *val)
1604 {
1605 if (val)
1606 {
1607 gdb_assert (val->reference_count > 0);
1608 val->reference_count--;
1609 if (val->reference_count > 0)
1610 return;
1611
1612 /* If there's an associated parent value, drop our reference to
1613 it. */
1614 if (val->parent != NULL)
1615 value_free (val->parent);
1616
1617 if (VALUE_LVAL (val) == lval_computed)
1618 {
1619 const struct lval_funcs *funcs = val->location.computed.funcs;
1620
1621 if (funcs->free_closure)
1622 funcs->free_closure (val);
1623 }
1624 else if (VALUE_LVAL (val) == lval_xcallable)
1625 free_xmethod_worker (val->location.xm_worker);
1626
1627 xfree (val->contents);
1628 VEC_free (range_s, val->unavailable);
1629 }
1630 xfree (val);
1631 }
1632
1633 /* Free all values allocated since MARK was obtained by value_mark
1634 (except for those released). */
1635 void
1636 value_free_to_mark (struct value *mark)
1637 {
1638 struct value *val;
1639 struct value *next;
1640
1641 for (val = all_values; val && val != mark; val = next)
1642 {
1643 next = val->next;
1644 val->released = 1;
1645 value_free (val);
1646 }
1647 all_values = val;
1648 }
1649
1650 /* Free all the values that have been allocated (except for those released).
1651 Call after each command, successful or not.
1652 In practice this is called before each command, which is sufficient. */
1653
1654 void
1655 free_all_values (void)
1656 {
1657 struct value *val;
1658 struct value *next;
1659
1660 for (val = all_values; val; val = next)
1661 {
1662 next = val->next;
1663 val->released = 1;
1664 value_free (val);
1665 }
1666
1667 all_values = 0;
1668 }
1669
1670 /* Frees all the elements in a chain of values. */
1671
1672 void
1673 free_value_chain (struct value *v)
1674 {
1675 struct value *next;
1676
1677 for (; v; v = next)
1678 {
1679 next = value_next (v);
1680 value_free (v);
1681 }
1682 }
1683
1684 /* Remove VAL from the chain all_values
1685 so it will not be freed automatically. */
1686
1687 void
1688 release_value (struct value *val)
1689 {
1690 struct value *v;
1691
1692 if (all_values == val)
1693 {
1694 all_values = val->next;
1695 val->next = NULL;
1696 val->released = 1;
1697 return;
1698 }
1699
1700 for (v = all_values; v; v = v->next)
1701 {
1702 if (v->next == val)
1703 {
1704 v->next = val->next;
1705 val->next = NULL;
1706 val->released = 1;
1707 break;
1708 }
1709 }
1710 }
1711
1712 /* If the value is not already released, release it.
1713 If the value is already released, increment its reference count.
1714 That is, this function ensures that the value is released from the
1715 value chain and that the caller owns a reference to it. */
1716
1717 void
1718 release_value_or_incref (struct value *val)
1719 {
1720 if (val->released)
1721 value_incref (val);
1722 else
1723 release_value (val);
1724 }
1725
1726 /* Release all values up to mark */
1727 struct value *
1728 value_release_to_mark (struct value *mark)
1729 {
1730 struct value *val;
1731 struct value *next;
1732
1733 for (val = next = all_values; next; next = next->next)
1734 {
1735 if (next->next == mark)
1736 {
1737 all_values = next->next;
1738 next->next = NULL;
1739 return val;
1740 }
1741 next->released = 1;
1742 }
1743 all_values = 0;
1744 return val;
1745 }
1746
1747 /* Return a copy of the value ARG.
1748 It contains the same contents, for same memory address,
1749 but it's a different block of storage. */
1750
1751 struct value *
1752 value_copy (struct value *arg)
1753 {
1754 struct type *encl_type = value_enclosing_type (arg);
1755 struct value *val;
1756
1757 if (value_lazy (arg))
1758 val = allocate_value_lazy (encl_type);
1759 else
1760 val = allocate_value (encl_type);
1761 val->type = arg->type;
1762 VALUE_LVAL (val) = VALUE_LVAL (arg);
1763 val->location = arg->location;
1764 val->offset = arg->offset;
1765 val->bitpos = arg->bitpos;
1766 val->bitsize = arg->bitsize;
1767 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1768 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1769 val->lazy = arg->lazy;
1770 val->embedded_offset = value_embedded_offset (arg);
1771 val->pointed_to_offset = arg->pointed_to_offset;
1772 val->modifiable = arg->modifiable;
1773 if (!value_lazy (val))
1774 {
1775 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1776 TYPE_LENGTH (value_enclosing_type (arg)));
1777
1778 }
1779 val->unavailable = VEC_copy (range_s, arg->unavailable);
1780 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1781 set_value_parent (val, arg->parent);
1782 if (VALUE_LVAL (val) == lval_computed)
1783 {
1784 const struct lval_funcs *funcs = val->location.computed.funcs;
1785
1786 if (funcs->copy_closure)
1787 val->location.computed.closure = funcs->copy_closure (val);
1788 }
1789 return val;
1790 }
1791
1792 /* Return a "const" and/or "volatile" qualified version of the value V.
1793 If CNST is true, then the returned value will be qualified with
1794 "const".
1795 if VOLTL is true, then the returned value will be qualified with
1796 "volatile". */
1797
1798 struct value *
1799 make_cv_value (int cnst, int voltl, struct value *v)
1800 {
1801 struct type *val_type = value_type (v);
1802 struct type *enclosing_type = value_enclosing_type (v);
1803 struct value *cv_val = value_copy (v);
1804
1805 deprecated_set_value_type (cv_val,
1806 make_cv_type (cnst, voltl, val_type, NULL));
1807 set_value_enclosing_type (cv_val,
1808 make_cv_type (cnst, voltl, enclosing_type, NULL));
1809
1810 return cv_val;
1811 }
1812
1813 /* Return a version of ARG that is non-lvalue. */
1814
1815 struct value *
1816 value_non_lval (struct value *arg)
1817 {
1818 if (VALUE_LVAL (arg) != not_lval)
1819 {
1820 struct type *enc_type = value_enclosing_type (arg);
1821 struct value *val = allocate_value (enc_type);
1822
1823 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1824 TYPE_LENGTH (enc_type));
1825 val->type = arg->type;
1826 set_value_embedded_offset (val, value_embedded_offset (arg));
1827 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1828 return val;
1829 }
1830 return arg;
1831 }
1832
1833 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1834
1835 void
1836 value_force_lval (struct value *v, CORE_ADDR addr)
1837 {
1838 gdb_assert (VALUE_LVAL (v) == not_lval);
1839
1840 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1841 v->lval = lval_memory;
1842 v->location.address = addr;
1843 }
1844
1845 void
1846 set_value_component_location (struct value *component,
1847 const struct value *whole)
1848 {
1849 gdb_assert (whole->lval != lval_xcallable);
1850
1851 if (whole->lval == lval_internalvar)
1852 VALUE_LVAL (component) = lval_internalvar_component;
1853 else
1854 VALUE_LVAL (component) = whole->lval;
1855
1856 component->location = whole->location;
1857 if (whole->lval == lval_computed)
1858 {
1859 const struct lval_funcs *funcs = whole->location.computed.funcs;
1860
1861 if (funcs->copy_closure)
1862 component->location.computed.closure = funcs->copy_closure (whole);
1863 }
1864 }
1865
1866 \f
1867 /* Access to the value history. */
1868
1869 /* Record a new value in the value history.
1870 Returns the absolute history index of the entry. */
1871
1872 int
1873 record_latest_value (struct value *val)
1874 {
1875 int i;
1876
1877 /* We don't want this value to have anything to do with the inferior anymore.
1878 In particular, "set $1 = 50" should not affect the variable from which
1879 the value was taken, and fast watchpoints should be able to assume that
1880 a value on the value history never changes. */
1881 if (value_lazy (val))
1882 value_fetch_lazy (val);
1883 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1884 from. This is a bit dubious, because then *&$1 does not just return $1
1885 but the current contents of that location. c'est la vie... */
1886 val->modifiable = 0;
1887
1888 /* The value may have already been released, in which case we're adding a
1889 new reference for its entry in the history. That is why we call
1890 release_value_or_incref here instead of release_value. */
1891 release_value_or_incref (val);
1892
1893 /* Here we treat value_history_count as origin-zero
1894 and applying to the value being stored now. */
1895
1896 i = value_history_count % VALUE_HISTORY_CHUNK;
1897 if (i == 0)
1898 {
1899 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1900
1901 newobj->next = value_history_chain;
1902 value_history_chain = newobj;
1903 }
1904
1905 value_history_chain->values[i] = val;
1906
1907 /* Now we regard value_history_count as origin-one
1908 and applying to the value just stored. */
1909
1910 return ++value_history_count;
1911 }
1912
1913 /* Return a copy of the value in the history with sequence number NUM. */
1914
1915 struct value *
1916 access_value_history (int num)
1917 {
1918 struct value_history_chunk *chunk;
1919 int i;
1920 int absnum = num;
1921
1922 if (absnum <= 0)
1923 absnum += value_history_count;
1924
1925 if (absnum <= 0)
1926 {
1927 if (num == 0)
1928 error (_("The history is empty."));
1929 else if (num == 1)
1930 error (_("There is only one value in the history."));
1931 else
1932 error (_("History does not go back to $$%d."), -num);
1933 }
1934 if (absnum > value_history_count)
1935 error (_("History has not yet reached $%d."), absnum);
1936
1937 absnum--;
1938
1939 /* Now absnum is always absolute and origin zero. */
1940
1941 chunk = value_history_chain;
1942 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1943 - absnum / VALUE_HISTORY_CHUNK;
1944 i > 0; i--)
1945 chunk = chunk->next;
1946
1947 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1948 }
1949
1950 static void
1951 show_values (char *num_exp, int from_tty)
1952 {
1953 int i;
1954 struct value *val;
1955 static int num = 1;
1956
1957 if (num_exp)
1958 {
1959 /* "show values +" should print from the stored position.
1960 "show values <exp>" should print around value number <exp>. */
1961 if (num_exp[0] != '+' || num_exp[1] != '\0')
1962 num = parse_and_eval_long (num_exp) - 5;
1963 }
1964 else
1965 {
1966 /* "show values" means print the last 10 values. */
1967 num = value_history_count - 9;
1968 }
1969
1970 if (num <= 0)
1971 num = 1;
1972
1973 for (i = num; i < num + 10 && i <= value_history_count; i++)
1974 {
1975 struct value_print_options opts;
1976
1977 val = access_value_history (i);
1978 printf_filtered (("$%d = "), i);
1979 get_user_print_options (&opts);
1980 value_print (val, gdb_stdout, &opts);
1981 printf_filtered (("\n"));
1982 }
1983
1984 /* The next "show values +" should start after what we just printed. */
1985 num += 10;
1986
1987 /* Hitting just return after this command should do the same thing as
1988 "show values +". If num_exp is null, this is unnecessary, since
1989 "show values +" is not useful after "show values". */
1990 if (from_tty && num_exp)
1991 {
1992 num_exp[0] = '+';
1993 num_exp[1] = '\0';
1994 }
1995 }
1996 \f
1997 enum internalvar_kind
1998 {
1999 /* The internal variable is empty. */
2000 INTERNALVAR_VOID,
2001
2002 /* The value of the internal variable is provided directly as
2003 a GDB value object. */
2004 INTERNALVAR_VALUE,
2005
2006 /* A fresh value is computed via a call-back routine on every
2007 access to the internal variable. */
2008 INTERNALVAR_MAKE_VALUE,
2009
2010 /* The internal variable holds a GDB internal convenience function. */
2011 INTERNALVAR_FUNCTION,
2012
2013 /* The variable holds an integer value. */
2014 INTERNALVAR_INTEGER,
2015
2016 /* The variable holds a GDB-provided string. */
2017 INTERNALVAR_STRING,
2018 };
2019
2020 union internalvar_data
2021 {
2022 /* A value object used with INTERNALVAR_VALUE. */
2023 struct value *value;
2024
2025 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2026 struct
2027 {
2028 /* The functions to call. */
2029 const struct internalvar_funcs *functions;
2030
2031 /* The function's user-data. */
2032 void *data;
2033 } make_value;
2034
2035 /* The internal function used with INTERNALVAR_FUNCTION. */
2036 struct
2037 {
2038 struct internal_function *function;
2039 /* True if this is the canonical name for the function. */
2040 int canonical;
2041 } fn;
2042
2043 /* An integer value used with INTERNALVAR_INTEGER. */
2044 struct
2045 {
2046 /* If type is non-NULL, it will be used as the type to generate
2047 a value for this internal variable. If type is NULL, a default
2048 integer type for the architecture is used. */
2049 struct type *type;
2050 LONGEST val;
2051 } integer;
2052
2053 /* A string value used with INTERNALVAR_STRING. */
2054 char *string;
2055 };
2056
2057 /* Internal variables. These are variables within the debugger
2058 that hold values assigned by debugger commands.
2059 The user refers to them with a '$' prefix
2060 that does not appear in the variable names stored internally. */
2061
2062 struct internalvar
2063 {
2064 struct internalvar *next;
2065 char *name;
2066
2067 /* We support various different kinds of content of an internal variable.
2068 enum internalvar_kind specifies the kind, and union internalvar_data
2069 provides the data associated with this particular kind. */
2070
2071 enum internalvar_kind kind;
2072
2073 union internalvar_data u;
2074 };
2075
2076 static struct internalvar *internalvars;
2077
2078 /* If the variable does not already exist create it and give it the
2079 value given. If no value is given then the default is zero. */
2080 static void
2081 init_if_undefined_command (char* args, int from_tty)
2082 {
2083 struct internalvar* intvar;
2084
2085 /* Parse the expression - this is taken from set_command(). */
2086 struct expression *expr = parse_expression (args);
2087 register struct cleanup *old_chain =
2088 make_cleanup (free_current_contents, &expr);
2089
2090 /* Validate the expression.
2091 Was the expression an assignment?
2092 Or even an expression at all? */
2093 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2094 error (_("Init-if-undefined requires an assignment expression."));
2095
2096 /* Extract the variable from the parsed expression.
2097 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2098 if (expr->elts[1].opcode != OP_INTERNALVAR)
2099 error (_("The first parameter to init-if-undefined "
2100 "should be a GDB variable."));
2101 intvar = expr->elts[2].internalvar;
2102
2103 /* Only evaluate the expression if the lvalue is void.
2104 This may still fail if the expresssion is invalid. */
2105 if (intvar->kind == INTERNALVAR_VOID)
2106 evaluate_expression (expr);
2107
2108 do_cleanups (old_chain);
2109 }
2110
2111
2112 /* Look up an internal variable with name NAME. NAME should not
2113 normally include a dollar sign.
2114
2115 If the specified internal variable does not exist,
2116 the return value is NULL. */
2117
2118 struct internalvar *
2119 lookup_only_internalvar (const char *name)
2120 {
2121 struct internalvar *var;
2122
2123 for (var = internalvars; var; var = var->next)
2124 if (strcmp (var->name, name) == 0)
2125 return var;
2126
2127 return NULL;
2128 }
2129
2130 /* Complete NAME by comparing it to the names of internal variables.
2131 Returns a vector of newly allocated strings, or NULL if no matches
2132 were found. */
2133
2134 VEC (char_ptr) *
2135 complete_internalvar (const char *name)
2136 {
2137 VEC (char_ptr) *result = NULL;
2138 struct internalvar *var;
2139 int len;
2140
2141 len = strlen (name);
2142
2143 for (var = internalvars; var; var = var->next)
2144 if (strncmp (var->name, name, len) == 0)
2145 {
2146 char *r = xstrdup (var->name);
2147
2148 VEC_safe_push (char_ptr, result, r);
2149 }
2150
2151 return result;
2152 }
2153
2154 /* Create an internal variable with name NAME and with a void value.
2155 NAME should not normally include a dollar sign. */
2156
2157 struct internalvar *
2158 create_internalvar (const char *name)
2159 {
2160 struct internalvar *var = XNEW (struct internalvar);
2161
2162 var->name = concat (name, (char *)NULL);
2163 var->kind = INTERNALVAR_VOID;
2164 var->next = internalvars;
2165 internalvars = var;
2166 return var;
2167 }
2168
2169 /* Create an internal variable with name NAME and register FUN as the
2170 function that value_of_internalvar uses to create a value whenever
2171 this variable is referenced. NAME should not normally include a
2172 dollar sign. DATA is passed uninterpreted to FUN when it is
2173 called. CLEANUP, if not NULL, is called when the internal variable
2174 is destroyed. It is passed DATA as its only argument. */
2175
2176 struct internalvar *
2177 create_internalvar_type_lazy (const char *name,
2178 const struct internalvar_funcs *funcs,
2179 void *data)
2180 {
2181 struct internalvar *var = create_internalvar (name);
2182
2183 var->kind = INTERNALVAR_MAKE_VALUE;
2184 var->u.make_value.functions = funcs;
2185 var->u.make_value.data = data;
2186 return var;
2187 }
2188
2189 /* See documentation in value.h. */
2190
2191 int
2192 compile_internalvar_to_ax (struct internalvar *var,
2193 struct agent_expr *expr,
2194 struct axs_value *value)
2195 {
2196 if (var->kind != INTERNALVAR_MAKE_VALUE
2197 || var->u.make_value.functions->compile_to_ax == NULL)
2198 return 0;
2199
2200 var->u.make_value.functions->compile_to_ax (var, expr, value,
2201 var->u.make_value.data);
2202 return 1;
2203 }
2204
2205 /* Look up an internal variable with name NAME. NAME should not
2206 normally include a dollar sign.
2207
2208 If the specified internal variable does not exist,
2209 one is created, with a void value. */
2210
2211 struct internalvar *
2212 lookup_internalvar (const char *name)
2213 {
2214 struct internalvar *var;
2215
2216 var = lookup_only_internalvar (name);
2217 if (var)
2218 return var;
2219
2220 return create_internalvar (name);
2221 }
2222
2223 /* Return current value of internal variable VAR. For variables that
2224 are not inherently typed, use a value type appropriate for GDBARCH. */
2225
2226 struct value *
2227 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2228 {
2229 struct value *val;
2230 struct trace_state_variable *tsv;
2231
2232 /* If there is a trace state variable of the same name, assume that
2233 is what we really want to see. */
2234 tsv = find_trace_state_variable (var->name);
2235 if (tsv)
2236 {
2237 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2238 &(tsv->value));
2239 if (tsv->value_known)
2240 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2241 tsv->value);
2242 else
2243 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2244 return val;
2245 }
2246
2247 switch (var->kind)
2248 {
2249 case INTERNALVAR_VOID:
2250 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2251 break;
2252
2253 case INTERNALVAR_FUNCTION:
2254 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2255 break;
2256
2257 case INTERNALVAR_INTEGER:
2258 if (!var->u.integer.type)
2259 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2260 var->u.integer.val);
2261 else
2262 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2263 break;
2264
2265 case INTERNALVAR_STRING:
2266 val = value_cstring (var->u.string, strlen (var->u.string),
2267 builtin_type (gdbarch)->builtin_char);
2268 break;
2269
2270 case INTERNALVAR_VALUE:
2271 val = value_copy (var->u.value);
2272 if (value_lazy (val))
2273 value_fetch_lazy (val);
2274 break;
2275
2276 case INTERNALVAR_MAKE_VALUE:
2277 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2278 var->u.make_value.data);
2279 break;
2280
2281 default:
2282 internal_error (__FILE__, __LINE__, _("bad kind"));
2283 }
2284
2285 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2286 on this value go back to affect the original internal variable.
2287
2288 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2289 no underlying modifyable state in the internal variable.
2290
2291 Likewise, if the variable's value is a computed lvalue, we want
2292 references to it to produce another computed lvalue, where
2293 references and assignments actually operate through the
2294 computed value's functions.
2295
2296 This means that internal variables with computed values
2297 behave a little differently from other internal variables:
2298 assignments to them don't just replace the previous value
2299 altogether. At the moment, this seems like the behavior we
2300 want. */
2301
2302 if (var->kind != INTERNALVAR_MAKE_VALUE
2303 && val->lval != lval_computed)
2304 {
2305 VALUE_LVAL (val) = lval_internalvar;
2306 VALUE_INTERNALVAR (val) = var;
2307 }
2308
2309 return val;
2310 }
2311
2312 int
2313 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2314 {
2315 if (var->kind == INTERNALVAR_INTEGER)
2316 {
2317 *result = var->u.integer.val;
2318 return 1;
2319 }
2320
2321 if (var->kind == INTERNALVAR_VALUE)
2322 {
2323 struct type *type = check_typedef (value_type (var->u.value));
2324
2325 if (TYPE_CODE (type) == TYPE_CODE_INT)
2326 {
2327 *result = value_as_long (var->u.value);
2328 return 1;
2329 }
2330 }
2331
2332 return 0;
2333 }
2334
2335 static int
2336 get_internalvar_function (struct internalvar *var,
2337 struct internal_function **result)
2338 {
2339 switch (var->kind)
2340 {
2341 case INTERNALVAR_FUNCTION:
2342 *result = var->u.fn.function;
2343 return 1;
2344
2345 default:
2346 return 0;
2347 }
2348 }
2349
2350 void
2351 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2352 int bitsize, struct value *newval)
2353 {
2354 gdb_byte *addr;
2355 struct gdbarch *arch;
2356 int unit_size;
2357
2358 switch (var->kind)
2359 {
2360 case INTERNALVAR_VALUE:
2361 addr = value_contents_writeable (var->u.value);
2362 arch = get_value_arch (var->u.value);
2363 unit_size = gdbarch_addressable_memory_unit_size (arch);
2364
2365 if (bitsize)
2366 modify_field (value_type (var->u.value), addr + offset,
2367 value_as_long (newval), bitpos, bitsize);
2368 else
2369 memcpy (addr + offset * unit_size, value_contents (newval),
2370 TYPE_LENGTH (value_type (newval)));
2371 break;
2372
2373 default:
2374 /* We can never get a component of any other kind. */
2375 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2376 }
2377 }
2378
2379 void
2380 set_internalvar (struct internalvar *var, struct value *val)
2381 {
2382 enum internalvar_kind new_kind;
2383 union internalvar_data new_data = { 0 };
2384
2385 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2386 error (_("Cannot overwrite convenience function %s"), var->name);
2387
2388 /* Prepare new contents. */
2389 switch (TYPE_CODE (check_typedef (value_type (val))))
2390 {
2391 case TYPE_CODE_VOID:
2392 new_kind = INTERNALVAR_VOID;
2393 break;
2394
2395 case TYPE_CODE_INTERNAL_FUNCTION:
2396 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2397 new_kind = INTERNALVAR_FUNCTION;
2398 get_internalvar_function (VALUE_INTERNALVAR (val),
2399 &new_data.fn.function);
2400 /* Copies created here are never canonical. */
2401 break;
2402
2403 default:
2404 new_kind = INTERNALVAR_VALUE;
2405 new_data.value = value_copy (val);
2406 new_data.value->modifiable = 1;
2407
2408 /* Force the value to be fetched from the target now, to avoid problems
2409 later when this internalvar is referenced and the target is gone or
2410 has changed. */
2411 if (value_lazy (new_data.value))
2412 value_fetch_lazy (new_data.value);
2413
2414 /* Release the value from the value chain to prevent it from being
2415 deleted by free_all_values. From here on this function should not
2416 call error () until new_data is installed into the var->u to avoid
2417 leaking memory. */
2418 release_value (new_data.value);
2419 break;
2420 }
2421
2422 /* Clean up old contents. */
2423 clear_internalvar (var);
2424
2425 /* Switch over. */
2426 var->kind = new_kind;
2427 var->u = new_data;
2428 /* End code which must not call error(). */
2429 }
2430
2431 void
2432 set_internalvar_integer (struct internalvar *var, LONGEST l)
2433 {
2434 /* Clean up old contents. */
2435 clear_internalvar (var);
2436
2437 var->kind = INTERNALVAR_INTEGER;
2438 var->u.integer.type = NULL;
2439 var->u.integer.val = l;
2440 }
2441
2442 void
2443 set_internalvar_string (struct internalvar *var, const char *string)
2444 {
2445 /* Clean up old contents. */
2446 clear_internalvar (var);
2447
2448 var->kind = INTERNALVAR_STRING;
2449 var->u.string = xstrdup (string);
2450 }
2451
2452 static void
2453 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2454 {
2455 /* Clean up old contents. */
2456 clear_internalvar (var);
2457
2458 var->kind = INTERNALVAR_FUNCTION;
2459 var->u.fn.function = f;
2460 var->u.fn.canonical = 1;
2461 /* Variables installed here are always the canonical version. */
2462 }
2463
2464 void
2465 clear_internalvar (struct internalvar *var)
2466 {
2467 /* Clean up old contents. */
2468 switch (var->kind)
2469 {
2470 case INTERNALVAR_VALUE:
2471 value_free (var->u.value);
2472 break;
2473
2474 case INTERNALVAR_STRING:
2475 xfree (var->u.string);
2476 break;
2477
2478 case INTERNALVAR_MAKE_VALUE:
2479 if (var->u.make_value.functions->destroy != NULL)
2480 var->u.make_value.functions->destroy (var->u.make_value.data);
2481 break;
2482
2483 default:
2484 break;
2485 }
2486
2487 /* Reset to void kind. */
2488 var->kind = INTERNALVAR_VOID;
2489 }
2490
2491 char *
2492 internalvar_name (struct internalvar *var)
2493 {
2494 return var->name;
2495 }
2496
2497 static struct internal_function *
2498 create_internal_function (const char *name,
2499 internal_function_fn handler, void *cookie)
2500 {
2501 struct internal_function *ifn = XNEW (struct internal_function);
2502
2503 ifn->name = xstrdup (name);
2504 ifn->handler = handler;
2505 ifn->cookie = cookie;
2506 return ifn;
2507 }
2508
2509 char *
2510 value_internal_function_name (struct value *val)
2511 {
2512 struct internal_function *ifn;
2513 int result;
2514
2515 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2516 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2517 gdb_assert (result);
2518
2519 return ifn->name;
2520 }
2521
2522 struct value *
2523 call_internal_function (struct gdbarch *gdbarch,
2524 const struct language_defn *language,
2525 struct value *func, int argc, struct value **argv)
2526 {
2527 struct internal_function *ifn;
2528 int result;
2529
2530 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2531 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2532 gdb_assert (result);
2533
2534 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2535 }
2536
2537 /* The 'function' command. This does nothing -- it is just a
2538 placeholder to let "help function NAME" work. This is also used as
2539 the implementation of the sub-command that is created when
2540 registering an internal function. */
2541 static void
2542 function_command (char *command, int from_tty)
2543 {
2544 /* Do nothing. */
2545 }
2546
2547 /* Clean up if an internal function's command is destroyed. */
2548 static void
2549 function_destroyer (struct cmd_list_element *self, void *ignore)
2550 {
2551 xfree ((char *) self->name);
2552 xfree ((char *) self->doc);
2553 }
2554
2555 /* Add a new internal function. NAME is the name of the function; DOC
2556 is a documentation string describing the function. HANDLER is
2557 called when the function is invoked. COOKIE is an arbitrary
2558 pointer which is passed to HANDLER and is intended for "user
2559 data". */
2560 void
2561 add_internal_function (const char *name, const char *doc,
2562 internal_function_fn handler, void *cookie)
2563 {
2564 struct cmd_list_element *cmd;
2565 struct internal_function *ifn;
2566 struct internalvar *var = lookup_internalvar (name);
2567
2568 ifn = create_internal_function (name, handler, cookie);
2569 set_internalvar_function (var, ifn);
2570
2571 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2572 &functionlist);
2573 cmd->destroyer = function_destroyer;
2574 }
2575
2576 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2577 prevent cycles / duplicates. */
2578
2579 void
2580 preserve_one_value (struct value *value, struct objfile *objfile,
2581 htab_t copied_types)
2582 {
2583 if (TYPE_OBJFILE (value->type) == objfile)
2584 value->type = copy_type_recursive (objfile, value->type, copied_types);
2585
2586 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2587 value->enclosing_type = copy_type_recursive (objfile,
2588 value->enclosing_type,
2589 copied_types);
2590 }
2591
2592 /* Likewise for internal variable VAR. */
2593
2594 static void
2595 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2596 htab_t copied_types)
2597 {
2598 switch (var->kind)
2599 {
2600 case INTERNALVAR_INTEGER:
2601 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2602 var->u.integer.type
2603 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2604 break;
2605
2606 case INTERNALVAR_VALUE:
2607 preserve_one_value (var->u.value, objfile, copied_types);
2608 break;
2609 }
2610 }
2611
2612 /* Update the internal variables and value history when OBJFILE is
2613 discarded; we must copy the types out of the objfile. New global types
2614 will be created for every convenience variable which currently points to
2615 this objfile's types, and the convenience variables will be adjusted to
2616 use the new global types. */
2617
2618 void
2619 preserve_values (struct objfile *objfile)
2620 {
2621 htab_t copied_types;
2622 struct value_history_chunk *cur;
2623 struct internalvar *var;
2624 int i;
2625
2626 /* Create the hash table. We allocate on the objfile's obstack, since
2627 it is soon to be deleted. */
2628 copied_types = create_copied_types_hash (objfile);
2629
2630 for (cur = value_history_chain; cur; cur = cur->next)
2631 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2632 if (cur->values[i])
2633 preserve_one_value (cur->values[i], objfile, copied_types);
2634
2635 for (var = internalvars; var; var = var->next)
2636 preserve_one_internalvar (var, objfile, copied_types);
2637
2638 preserve_ext_lang_values (objfile, copied_types);
2639
2640 htab_delete (copied_types);
2641 }
2642
2643 static void
2644 show_convenience (char *ignore, int from_tty)
2645 {
2646 struct gdbarch *gdbarch = get_current_arch ();
2647 struct internalvar *var;
2648 int varseen = 0;
2649 struct value_print_options opts;
2650
2651 get_user_print_options (&opts);
2652 for (var = internalvars; var; var = var->next)
2653 {
2654
2655 if (!varseen)
2656 {
2657 varseen = 1;
2658 }
2659 printf_filtered (("$%s = "), var->name);
2660
2661 TRY
2662 {
2663 struct value *val;
2664
2665 val = value_of_internalvar (gdbarch, var);
2666 value_print (val, gdb_stdout, &opts);
2667 }
2668 CATCH (ex, RETURN_MASK_ERROR)
2669 {
2670 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2671 }
2672 END_CATCH
2673
2674 printf_filtered (("\n"));
2675 }
2676 if (!varseen)
2677 {
2678 /* This text does not mention convenience functions on purpose.
2679 The user can't create them except via Python, and if Python support
2680 is installed this message will never be printed ($_streq will
2681 exist). */
2682 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2683 "Convenience variables have "
2684 "names starting with \"$\";\n"
2685 "use \"set\" as in \"set "
2686 "$foo = 5\" to define them.\n"));
2687 }
2688 }
2689 \f
2690 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2691
2692 struct value *
2693 value_of_xmethod (struct xmethod_worker *worker)
2694 {
2695 if (worker->value == NULL)
2696 {
2697 struct value *v;
2698
2699 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2700 v->lval = lval_xcallable;
2701 v->location.xm_worker = worker;
2702 v->modifiable = 0;
2703 worker->value = v;
2704 }
2705
2706 return worker->value;
2707 }
2708
2709 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2710
2711 struct type *
2712 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2713 {
2714 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2715 && method->lval == lval_xcallable && argc > 0);
2716
2717 return get_xmethod_result_type (method->location.xm_worker,
2718 argv[0], argv + 1, argc - 1);
2719 }
2720
2721 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2722
2723 struct value *
2724 call_xmethod (struct value *method, int argc, struct value **argv)
2725 {
2726 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2727 && method->lval == lval_xcallable && argc > 0);
2728
2729 return invoke_xmethod (method->location.xm_worker,
2730 argv[0], argv + 1, argc - 1);
2731 }
2732 \f
2733 /* Extract a value as a C number (either long or double).
2734 Knows how to convert fixed values to double, or
2735 floating values to long.
2736 Does not deallocate the value. */
2737
2738 LONGEST
2739 value_as_long (struct value *val)
2740 {
2741 /* This coerces arrays and functions, which is necessary (e.g.
2742 in disassemble_command). It also dereferences references, which
2743 I suspect is the most logical thing to do. */
2744 val = coerce_array (val);
2745 return unpack_long (value_type (val), value_contents (val));
2746 }
2747
2748 DOUBLEST
2749 value_as_double (struct value *val)
2750 {
2751 DOUBLEST foo;
2752 int inv;
2753
2754 foo = unpack_double (value_type (val), value_contents (val), &inv);
2755 if (inv)
2756 error (_("Invalid floating value found in program."));
2757 return foo;
2758 }
2759
2760 /* Extract a value as a C pointer. Does not deallocate the value.
2761 Note that val's type may not actually be a pointer; value_as_long
2762 handles all the cases. */
2763 CORE_ADDR
2764 value_as_address (struct value *val)
2765 {
2766 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2767
2768 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2769 whether we want this to be true eventually. */
2770 #if 0
2771 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2772 non-address (e.g. argument to "signal", "info break", etc.), or
2773 for pointers to char, in which the low bits *are* significant. */
2774 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2775 #else
2776
2777 /* There are several targets (IA-64, PowerPC, and others) which
2778 don't represent pointers to functions as simply the address of
2779 the function's entry point. For example, on the IA-64, a
2780 function pointer points to a two-word descriptor, generated by
2781 the linker, which contains the function's entry point, and the
2782 value the IA-64 "global pointer" register should have --- to
2783 support position-independent code. The linker generates
2784 descriptors only for those functions whose addresses are taken.
2785
2786 On such targets, it's difficult for GDB to convert an arbitrary
2787 function address into a function pointer; it has to either find
2788 an existing descriptor for that function, or call malloc and
2789 build its own. On some targets, it is impossible for GDB to
2790 build a descriptor at all: the descriptor must contain a jump
2791 instruction; data memory cannot be executed; and code memory
2792 cannot be modified.
2793
2794 Upon entry to this function, if VAL is a value of type `function'
2795 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2796 value_address (val) is the address of the function. This is what
2797 you'll get if you evaluate an expression like `main'. The call
2798 to COERCE_ARRAY below actually does all the usual unary
2799 conversions, which includes converting values of type `function'
2800 to `pointer to function'. This is the challenging conversion
2801 discussed above. Then, `unpack_long' will convert that pointer
2802 back into an address.
2803
2804 So, suppose the user types `disassemble foo' on an architecture
2805 with a strange function pointer representation, on which GDB
2806 cannot build its own descriptors, and suppose further that `foo'
2807 has no linker-built descriptor. The address->pointer conversion
2808 will signal an error and prevent the command from running, even
2809 though the next step would have been to convert the pointer
2810 directly back into the same address.
2811
2812 The following shortcut avoids this whole mess. If VAL is a
2813 function, just return its address directly. */
2814 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2815 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2816 return value_address (val);
2817
2818 val = coerce_array (val);
2819
2820 /* Some architectures (e.g. Harvard), map instruction and data
2821 addresses onto a single large unified address space. For
2822 instance: An architecture may consider a large integer in the
2823 range 0x10000000 .. 0x1000ffff to already represent a data
2824 addresses (hence not need a pointer to address conversion) while
2825 a small integer would still need to be converted integer to
2826 pointer to address. Just assume such architectures handle all
2827 integer conversions in a single function. */
2828
2829 /* JimB writes:
2830
2831 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2832 must admonish GDB hackers to make sure its behavior matches the
2833 compiler's, whenever possible.
2834
2835 In general, I think GDB should evaluate expressions the same way
2836 the compiler does. When the user copies an expression out of
2837 their source code and hands it to a `print' command, they should
2838 get the same value the compiler would have computed. Any
2839 deviation from this rule can cause major confusion and annoyance,
2840 and needs to be justified carefully. In other words, GDB doesn't
2841 really have the freedom to do these conversions in clever and
2842 useful ways.
2843
2844 AndrewC pointed out that users aren't complaining about how GDB
2845 casts integers to pointers; they are complaining that they can't
2846 take an address from a disassembly listing and give it to `x/i'.
2847 This is certainly important.
2848
2849 Adding an architecture method like integer_to_address() certainly
2850 makes it possible for GDB to "get it right" in all circumstances
2851 --- the target has complete control over how things get done, so
2852 people can Do The Right Thing for their target without breaking
2853 anyone else. The standard doesn't specify how integers get
2854 converted to pointers; usually, the ABI doesn't either, but
2855 ABI-specific code is a more reasonable place to handle it. */
2856
2857 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2858 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2859 && gdbarch_integer_to_address_p (gdbarch))
2860 return gdbarch_integer_to_address (gdbarch, value_type (val),
2861 value_contents (val));
2862
2863 return unpack_long (value_type (val), value_contents (val));
2864 #endif
2865 }
2866 \f
2867 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2868 as a long, or as a double, assuming the raw data is described
2869 by type TYPE. Knows how to convert different sizes of values
2870 and can convert between fixed and floating point. We don't assume
2871 any alignment for the raw data. Return value is in host byte order.
2872
2873 If you want functions and arrays to be coerced to pointers, and
2874 references to be dereferenced, call value_as_long() instead.
2875
2876 C++: It is assumed that the front-end has taken care of
2877 all matters concerning pointers to members. A pointer
2878 to member which reaches here is considered to be equivalent
2879 to an INT (or some size). After all, it is only an offset. */
2880
2881 LONGEST
2882 unpack_long (struct type *type, const gdb_byte *valaddr)
2883 {
2884 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2885 enum type_code code = TYPE_CODE (type);
2886 int len = TYPE_LENGTH (type);
2887 int nosign = TYPE_UNSIGNED (type);
2888
2889 switch (code)
2890 {
2891 case TYPE_CODE_TYPEDEF:
2892 return unpack_long (check_typedef (type), valaddr);
2893 case TYPE_CODE_ENUM:
2894 case TYPE_CODE_FLAGS:
2895 case TYPE_CODE_BOOL:
2896 case TYPE_CODE_INT:
2897 case TYPE_CODE_CHAR:
2898 case TYPE_CODE_RANGE:
2899 case TYPE_CODE_MEMBERPTR:
2900 if (nosign)
2901 return extract_unsigned_integer (valaddr, len, byte_order);
2902 else
2903 return extract_signed_integer (valaddr, len, byte_order);
2904
2905 case TYPE_CODE_FLT:
2906 return extract_typed_floating (valaddr, type);
2907
2908 case TYPE_CODE_DECFLOAT:
2909 /* libdecnumber has a function to convert from decimal to integer, but
2910 it doesn't work when the decimal number has a fractional part. */
2911 return decimal_to_doublest (valaddr, len, byte_order);
2912
2913 case TYPE_CODE_PTR:
2914 case TYPE_CODE_REF:
2915 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2916 whether we want this to be true eventually. */
2917 return extract_typed_address (valaddr, type);
2918
2919 default:
2920 error (_("Value can't be converted to integer."));
2921 }
2922 return 0; /* Placate lint. */
2923 }
2924
2925 /* Return a double value from the specified type and address.
2926 INVP points to an int which is set to 0 for valid value,
2927 1 for invalid value (bad float format). In either case,
2928 the returned double is OK to use. Argument is in target
2929 format, result is in host format. */
2930
2931 DOUBLEST
2932 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2933 {
2934 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2935 enum type_code code;
2936 int len;
2937 int nosign;
2938
2939 *invp = 0; /* Assume valid. */
2940 type = check_typedef (type);
2941 code = TYPE_CODE (type);
2942 len = TYPE_LENGTH (type);
2943 nosign = TYPE_UNSIGNED (type);
2944 if (code == TYPE_CODE_FLT)
2945 {
2946 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2947 floating-point value was valid (using the macro
2948 INVALID_FLOAT). That test/macro have been removed.
2949
2950 It turns out that only the VAX defined this macro and then
2951 only in a non-portable way. Fixing the portability problem
2952 wouldn't help since the VAX floating-point code is also badly
2953 bit-rotten. The target needs to add definitions for the
2954 methods gdbarch_float_format and gdbarch_double_format - these
2955 exactly describe the target floating-point format. The
2956 problem here is that the corresponding floatformat_vax_f and
2957 floatformat_vax_d values these methods should be set to are
2958 also not defined either. Oops!
2959
2960 Hopefully someone will add both the missing floatformat
2961 definitions and the new cases for floatformat_is_valid (). */
2962
2963 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2964 {
2965 *invp = 1;
2966 return 0.0;
2967 }
2968
2969 return extract_typed_floating (valaddr, type);
2970 }
2971 else if (code == TYPE_CODE_DECFLOAT)
2972 return decimal_to_doublest (valaddr, len, byte_order);
2973 else if (nosign)
2974 {
2975 /* Unsigned -- be sure we compensate for signed LONGEST. */
2976 return (ULONGEST) unpack_long (type, valaddr);
2977 }
2978 else
2979 {
2980 /* Signed -- we are OK with unpack_long. */
2981 return unpack_long (type, valaddr);
2982 }
2983 }
2984
2985 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2986 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2987 We don't assume any alignment for the raw data. Return value is in
2988 host byte order.
2989
2990 If you want functions and arrays to be coerced to pointers, and
2991 references to be dereferenced, call value_as_address() instead.
2992
2993 C++: It is assumed that the front-end has taken care of
2994 all matters concerning pointers to members. A pointer
2995 to member which reaches here is considered to be equivalent
2996 to an INT (or some size). After all, it is only an offset. */
2997
2998 CORE_ADDR
2999 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3000 {
3001 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3002 whether we want this to be true eventually. */
3003 return unpack_long (type, valaddr);
3004 }
3005
3006 \f
3007 /* Get the value of the FIELDNO'th field (which must be static) of
3008 TYPE. */
3009
3010 struct value *
3011 value_static_field (struct type *type, int fieldno)
3012 {
3013 struct value *retval;
3014
3015 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3016 {
3017 case FIELD_LOC_KIND_PHYSADDR:
3018 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3019 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3020 break;
3021 case FIELD_LOC_KIND_PHYSNAME:
3022 {
3023 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3024 /* TYPE_FIELD_NAME (type, fieldno); */
3025 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3026
3027 if (sym.symbol == NULL)
3028 {
3029 /* With some compilers, e.g. HP aCC, static data members are
3030 reported as non-debuggable symbols. */
3031 struct bound_minimal_symbol msym
3032 = lookup_minimal_symbol (phys_name, NULL, NULL);
3033
3034 if (!msym.minsym)
3035 return allocate_optimized_out_value (type);
3036 else
3037 {
3038 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3039 BMSYMBOL_VALUE_ADDRESS (msym));
3040 }
3041 }
3042 else
3043 retval = value_of_variable (sym.symbol, sym.block);
3044 break;
3045 }
3046 default:
3047 gdb_assert_not_reached ("unexpected field location kind");
3048 }
3049
3050 return retval;
3051 }
3052
3053 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3054 You have to be careful here, since the size of the data area for the value
3055 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3056 than the old enclosing type, you have to allocate more space for the
3057 data. */
3058
3059 void
3060 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3061 {
3062 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3063 {
3064 check_type_length_before_alloc (new_encl_type);
3065 val->contents
3066 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3067 }
3068
3069 val->enclosing_type = new_encl_type;
3070 }
3071
3072 /* Given a value ARG1 (offset by OFFSET bytes)
3073 of a struct or union type ARG_TYPE,
3074 extract and return the value of one of its (non-static) fields.
3075 FIELDNO says which field. */
3076
3077 struct value *
3078 value_primitive_field (struct value *arg1, int offset,
3079 int fieldno, struct type *arg_type)
3080 {
3081 struct value *v;
3082 struct type *type;
3083 struct gdbarch *arch = get_value_arch (arg1);
3084 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3085
3086 arg_type = check_typedef (arg_type);
3087 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3088
3089 /* Call check_typedef on our type to make sure that, if TYPE
3090 is a TYPE_CODE_TYPEDEF, its length is set to the length
3091 of the target type instead of zero. However, we do not
3092 replace the typedef type by the target type, because we want
3093 to keep the typedef in order to be able to print the type
3094 description correctly. */
3095 check_typedef (type);
3096
3097 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3098 {
3099 /* Handle packed fields.
3100
3101 Create a new value for the bitfield, with bitpos and bitsize
3102 set. If possible, arrange offset and bitpos so that we can
3103 do a single aligned read of the size of the containing type.
3104 Otherwise, adjust offset to the byte containing the first
3105 bit. Assume that the address, offset, and embedded offset
3106 are sufficiently aligned. */
3107
3108 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3109 int container_bitsize = TYPE_LENGTH (type) * 8;
3110
3111 v = allocate_value_lazy (type);
3112 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3113 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3114 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3115 v->bitpos = bitpos % container_bitsize;
3116 else
3117 v->bitpos = bitpos % 8;
3118 v->offset = (value_embedded_offset (arg1)
3119 + offset
3120 + (bitpos - v->bitpos) / 8);
3121 set_value_parent (v, arg1);
3122 if (!value_lazy (arg1))
3123 value_fetch_lazy (v);
3124 }
3125 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3126 {
3127 /* This field is actually a base subobject, so preserve the
3128 entire object's contents for later references to virtual
3129 bases, etc. */
3130 int boffset;
3131
3132 /* Lazy register values with offsets are not supported. */
3133 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3134 value_fetch_lazy (arg1);
3135
3136 /* We special case virtual inheritance here because this
3137 requires access to the contents, which we would rather avoid
3138 for references to ordinary fields of unavailable values. */
3139 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3140 boffset = baseclass_offset (arg_type, fieldno,
3141 value_contents (arg1),
3142 value_embedded_offset (arg1),
3143 value_address (arg1),
3144 arg1);
3145 else
3146 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3147
3148 if (value_lazy (arg1))
3149 v = allocate_value_lazy (value_enclosing_type (arg1));
3150 else
3151 {
3152 v = allocate_value (value_enclosing_type (arg1));
3153 value_contents_copy_raw (v, 0, arg1, 0,
3154 TYPE_LENGTH (value_enclosing_type (arg1)));
3155 }
3156 v->type = type;
3157 v->offset = value_offset (arg1);
3158 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3159 }
3160 else
3161 {
3162 /* Plain old data member */
3163 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3164 / (HOST_CHAR_BIT * unit_size));
3165
3166 /* Lazy register values with offsets are not supported. */
3167 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3168 value_fetch_lazy (arg1);
3169
3170 if (value_lazy (arg1))
3171 v = allocate_value_lazy (type);
3172 else
3173 {
3174 v = allocate_value (type);
3175 value_contents_copy_raw (v, value_embedded_offset (v),
3176 arg1, value_embedded_offset (arg1) + offset,
3177 type_length_units (type));
3178 }
3179 v->offset = (value_offset (arg1) + offset
3180 + value_embedded_offset (arg1));
3181 }
3182 set_value_component_location (v, arg1);
3183 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3184 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3185 return v;
3186 }
3187
3188 /* Given a value ARG1 of a struct or union type,
3189 extract and return the value of one of its (non-static) fields.
3190 FIELDNO says which field. */
3191
3192 struct value *
3193 value_field (struct value *arg1, int fieldno)
3194 {
3195 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3196 }
3197
3198 /* Return a non-virtual function as a value.
3199 F is the list of member functions which contains the desired method.
3200 J is an index into F which provides the desired method.
3201
3202 We only use the symbol for its address, so be happy with either a
3203 full symbol or a minimal symbol. */
3204
3205 struct value *
3206 value_fn_field (struct value **arg1p, struct fn_field *f,
3207 int j, struct type *type,
3208 int offset)
3209 {
3210 struct value *v;
3211 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3212 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3213 struct symbol *sym;
3214 struct bound_minimal_symbol msym;
3215
3216 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3217 if (sym != NULL)
3218 {
3219 memset (&msym, 0, sizeof (msym));
3220 }
3221 else
3222 {
3223 gdb_assert (sym == NULL);
3224 msym = lookup_bound_minimal_symbol (physname);
3225 if (msym.minsym == NULL)
3226 return NULL;
3227 }
3228
3229 v = allocate_value (ftype);
3230 if (sym)
3231 {
3232 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3233 }
3234 else
3235 {
3236 /* The minimal symbol might point to a function descriptor;
3237 resolve it to the actual code address instead. */
3238 struct objfile *objfile = msym.objfile;
3239 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3240
3241 set_value_address (v,
3242 gdbarch_convert_from_func_ptr_addr
3243 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3244 }
3245
3246 if (arg1p)
3247 {
3248 if (type != value_type (*arg1p))
3249 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3250 value_addr (*arg1p)));
3251
3252 /* Move the `this' pointer according to the offset.
3253 VALUE_OFFSET (*arg1p) += offset; */
3254 }
3255
3256 return v;
3257 }
3258
3259 \f
3260
3261 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3262 VALADDR, and store the result in *RESULT.
3263 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3264
3265 Extracting bits depends on endianness of the machine. Compute the
3266 number of least significant bits to discard. For big endian machines,
3267 we compute the total number of bits in the anonymous object, subtract
3268 off the bit count from the MSB of the object to the MSB of the
3269 bitfield, then the size of the bitfield, which leaves the LSB discard
3270 count. For little endian machines, the discard count is simply the
3271 number of bits from the LSB of the anonymous object to the LSB of the
3272 bitfield.
3273
3274 If the field is signed, we also do sign extension. */
3275
3276 static LONGEST
3277 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3278 int bitpos, int bitsize)
3279 {
3280 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3281 ULONGEST val;
3282 ULONGEST valmask;
3283 int lsbcount;
3284 int bytes_read;
3285 int read_offset;
3286
3287 /* Read the minimum number of bytes required; there may not be
3288 enough bytes to read an entire ULONGEST. */
3289 field_type = check_typedef (field_type);
3290 if (bitsize)
3291 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3292 else
3293 bytes_read = TYPE_LENGTH (field_type);
3294
3295 read_offset = bitpos / 8;
3296
3297 val = extract_unsigned_integer (valaddr + read_offset,
3298 bytes_read, byte_order);
3299
3300 /* Extract bits. See comment above. */
3301
3302 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3303 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3304 else
3305 lsbcount = (bitpos % 8);
3306 val >>= lsbcount;
3307
3308 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3309 If the field is signed, and is negative, then sign extend. */
3310
3311 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3312 {
3313 valmask = (((ULONGEST) 1) << bitsize) - 1;
3314 val &= valmask;
3315 if (!TYPE_UNSIGNED (field_type))
3316 {
3317 if (val & (valmask ^ (valmask >> 1)))
3318 {
3319 val |= ~valmask;
3320 }
3321 }
3322 }
3323
3324 return val;
3325 }
3326
3327 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3328 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3329 ORIGINAL_VALUE, which must not be NULL. See
3330 unpack_value_bits_as_long for more details. */
3331
3332 int
3333 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3334 int embedded_offset, int fieldno,
3335 const struct value *val, LONGEST *result)
3336 {
3337 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3338 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3339 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3340 int bit_offset;
3341
3342 gdb_assert (val != NULL);
3343
3344 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3345 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3346 || !value_bits_available (val, bit_offset, bitsize))
3347 return 0;
3348
3349 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3350 bitpos, bitsize);
3351 return 1;
3352 }
3353
3354 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3355 object at VALADDR. See unpack_bits_as_long for more details. */
3356
3357 LONGEST
3358 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3359 {
3360 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3361 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3362 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3363
3364 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3365 }
3366
3367 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3368 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3369 the contents in DEST_VAL, zero or sign extending if the type of
3370 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3371 VAL. If the VAL's contents required to extract the bitfield from
3372 are unavailable/optimized out, DEST_VAL is correspondingly
3373 marked unavailable/optimized out. */
3374
3375 void
3376 unpack_value_bitfield (struct value *dest_val,
3377 int bitpos, int bitsize,
3378 const gdb_byte *valaddr, int embedded_offset,
3379 const struct value *val)
3380 {
3381 enum bfd_endian byte_order;
3382 int src_bit_offset;
3383 int dst_bit_offset;
3384 LONGEST num;
3385 struct type *field_type = value_type (dest_val);
3386
3387 /* First, unpack and sign extend the bitfield as if it was wholly
3388 available. Invalid/unavailable bits are read as zero, but that's
3389 OK, as they'll end up marked below. */
3390 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3391 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3392 bitpos, bitsize);
3393 store_signed_integer (value_contents_raw (dest_val),
3394 TYPE_LENGTH (field_type), byte_order, num);
3395
3396 /* Now copy the optimized out / unavailability ranges to the right
3397 bits. */
3398 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3399 if (byte_order == BFD_ENDIAN_BIG)
3400 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3401 else
3402 dst_bit_offset = 0;
3403 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3404 val, src_bit_offset, bitsize);
3405 }
3406
3407 /* Return a new value with type TYPE, which is FIELDNO field of the
3408 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3409 of VAL. If the VAL's contents required to extract the bitfield
3410 from are unavailable/optimized out, the new value is
3411 correspondingly marked unavailable/optimized out. */
3412
3413 struct value *
3414 value_field_bitfield (struct type *type, int fieldno,
3415 const gdb_byte *valaddr,
3416 int embedded_offset, const struct value *val)
3417 {
3418 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3419 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3420 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3421
3422 unpack_value_bitfield (res_val, bitpos, bitsize,
3423 valaddr, embedded_offset, val);
3424
3425 return res_val;
3426 }
3427
3428 /* Modify the value of a bitfield. ADDR points to a block of memory in
3429 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3430 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3431 indicate which bits (in target bit order) comprise the bitfield.
3432 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3433 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3434
3435 void
3436 modify_field (struct type *type, gdb_byte *addr,
3437 LONGEST fieldval, int bitpos, int bitsize)
3438 {
3439 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3440 ULONGEST oword;
3441 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3442 int bytesize;
3443
3444 /* Normalize BITPOS. */
3445 addr += bitpos / 8;
3446 bitpos %= 8;
3447
3448 /* If a negative fieldval fits in the field in question, chop
3449 off the sign extension bits. */
3450 if ((~fieldval & ~(mask >> 1)) == 0)
3451 fieldval &= mask;
3452
3453 /* Warn if value is too big to fit in the field in question. */
3454 if (0 != (fieldval & ~mask))
3455 {
3456 /* FIXME: would like to include fieldval in the message, but
3457 we don't have a sprintf_longest. */
3458 warning (_("Value does not fit in %d bits."), bitsize);
3459
3460 /* Truncate it, otherwise adjoining fields may be corrupted. */
3461 fieldval &= mask;
3462 }
3463
3464 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3465 false valgrind reports. */
3466
3467 bytesize = (bitpos + bitsize + 7) / 8;
3468 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3469
3470 /* Shifting for bit field depends on endianness of the target machine. */
3471 if (gdbarch_bits_big_endian (get_type_arch (type)))
3472 bitpos = bytesize * 8 - bitpos - bitsize;
3473
3474 oword &= ~(mask << bitpos);
3475 oword |= fieldval << bitpos;
3476
3477 store_unsigned_integer (addr, bytesize, byte_order, oword);
3478 }
3479 \f
3480 /* Pack NUM into BUF using a target format of TYPE. */
3481
3482 void
3483 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3484 {
3485 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3486 int len;
3487
3488 type = check_typedef (type);
3489 len = TYPE_LENGTH (type);
3490
3491 switch (TYPE_CODE (type))
3492 {
3493 case TYPE_CODE_INT:
3494 case TYPE_CODE_CHAR:
3495 case TYPE_CODE_ENUM:
3496 case TYPE_CODE_FLAGS:
3497 case TYPE_CODE_BOOL:
3498 case TYPE_CODE_RANGE:
3499 case TYPE_CODE_MEMBERPTR:
3500 store_signed_integer (buf, len, byte_order, num);
3501 break;
3502
3503 case TYPE_CODE_REF:
3504 case TYPE_CODE_PTR:
3505 store_typed_address (buf, type, (CORE_ADDR) num);
3506 break;
3507
3508 default:
3509 error (_("Unexpected type (%d) encountered for integer constant."),
3510 TYPE_CODE (type));
3511 }
3512 }
3513
3514
3515 /* Pack NUM into BUF using a target format of TYPE. */
3516
3517 static void
3518 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3519 {
3520 int len;
3521 enum bfd_endian byte_order;
3522
3523 type = check_typedef (type);
3524 len = TYPE_LENGTH (type);
3525 byte_order = gdbarch_byte_order (get_type_arch (type));
3526
3527 switch (TYPE_CODE (type))
3528 {
3529 case TYPE_CODE_INT:
3530 case TYPE_CODE_CHAR:
3531 case TYPE_CODE_ENUM:
3532 case TYPE_CODE_FLAGS:
3533 case TYPE_CODE_BOOL:
3534 case TYPE_CODE_RANGE:
3535 case TYPE_CODE_MEMBERPTR:
3536 store_unsigned_integer (buf, len, byte_order, num);
3537 break;
3538
3539 case TYPE_CODE_REF:
3540 case TYPE_CODE_PTR:
3541 store_typed_address (buf, type, (CORE_ADDR) num);
3542 break;
3543
3544 default:
3545 error (_("Unexpected type (%d) encountered "
3546 "for unsigned integer constant."),
3547 TYPE_CODE (type));
3548 }
3549 }
3550
3551
3552 /* Convert C numbers into newly allocated values. */
3553
3554 struct value *
3555 value_from_longest (struct type *type, LONGEST num)
3556 {
3557 struct value *val = allocate_value (type);
3558
3559 pack_long (value_contents_raw (val), type, num);
3560 return val;
3561 }
3562
3563
3564 /* Convert C unsigned numbers into newly allocated values. */
3565
3566 struct value *
3567 value_from_ulongest (struct type *type, ULONGEST num)
3568 {
3569 struct value *val = allocate_value (type);
3570
3571 pack_unsigned_long (value_contents_raw (val), type, num);
3572
3573 return val;
3574 }
3575
3576
3577 /* Create a value representing a pointer of type TYPE to the address
3578 ADDR. */
3579
3580 struct value *
3581 value_from_pointer (struct type *type, CORE_ADDR addr)
3582 {
3583 struct value *val = allocate_value (type);
3584
3585 store_typed_address (value_contents_raw (val),
3586 check_typedef (type), addr);
3587 return val;
3588 }
3589
3590
3591 /* Create a value of type TYPE whose contents come from VALADDR, if it
3592 is non-null, and whose memory address (in the inferior) is
3593 ADDRESS. The type of the created value may differ from the passed
3594 type TYPE. Make sure to retrieve values new type after this call.
3595 Note that TYPE is not passed through resolve_dynamic_type; this is
3596 a special API intended for use only by Ada. */
3597
3598 struct value *
3599 value_from_contents_and_address_unresolved (struct type *type,
3600 const gdb_byte *valaddr,
3601 CORE_ADDR address)
3602 {
3603 struct value *v;
3604
3605 if (valaddr == NULL)
3606 v = allocate_value_lazy (type);
3607 else
3608 v = value_from_contents (type, valaddr);
3609 set_value_address (v, address);
3610 VALUE_LVAL (v) = lval_memory;
3611 return v;
3612 }
3613
3614 /* Create a value of type TYPE whose contents come from VALADDR, if it
3615 is non-null, and whose memory address (in the inferior) is
3616 ADDRESS. The type of the created value may differ from the passed
3617 type TYPE. Make sure to retrieve values new type after this call. */
3618
3619 struct value *
3620 value_from_contents_and_address (struct type *type,
3621 const gdb_byte *valaddr,
3622 CORE_ADDR address)
3623 {
3624 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3625 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3626 struct value *v;
3627
3628 if (valaddr == NULL)
3629 v = allocate_value_lazy (resolved_type);
3630 else
3631 v = value_from_contents (resolved_type, valaddr);
3632 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3633 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3634 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3635 set_value_address (v, address);
3636 VALUE_LVAL (v) = lval_memory;
3637 return v;
3638 }
3639
3640 /* Create a value of type TYPE holding the contents CONTENTS.
3641 The new value is `not_lval'. */
3642
3643 struct value *
3644 value_from_contents (struct type *type, const gdb_byte *contents)
3645 {
3646 struct value *result;
3647
3648 result = allocate_value (type);
3649 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3650 return result;
3651 }
3652
3653 struct value *
3654 value_from_double (struct type *type, DOUBLEST num)
3655 {
3656 struct value *val = allocate_value (type);
3657 struct type *base_type = check_typedef (type);
3658 enum type_code code = TYPE_CODE (base_type);
3659
3660 if (code == TYPE_CODE_FLT)
3661 {
3662 store_typed_floating (value_contents_raw (val), base_type, num);
3663 }
3664 else
3665 error (_("Unexpected type encountered for floating constant."));
3666
3667 return val;
3668 }
3669
3670 struct value *
3671 value_from_decfloat (struct type *type, const gdb_byte *dec)
3672 {
3673 struct value *val = allocate_value (type);
3674
3675 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3676 return val;
3677 }
3678
3679 /* Extract a value from the history file. Input will be of the form
3680 $digits or $$digits. See block comment above 'write_dollar_variable'
3681 for details. */
3682
3683 struct value *
3684 value_from_history_ref (const char *h, const char **endp)
3685 {
3686 int index, len;
3687
3688 if (h[0] == '$')
3689 len = 1;
3690 else
3691 return NULL;
3692
3693 if (h[1] == '$')
3694 len = 2;
3695
3696 /* Find length of numeral string. */
3697 for (; isdigit (h[len]); len++)
3698 ;
3699
3700 /* Make sure numeral string is not part of an identifier. */
3701 if (h[len] == '_' || isalpha (h[len]))
3702 return NULL;
3703
3704 /* Now collect the index value. */
3705 if (h[1] == '$')
3706 {
3707 if (len == 2)
3708 {
3709 /* For some bizarre reason, "$$" is equivalent to "$$1",
3710 rather than to "$$0" as it ought to be! */
3711 index = -1;
3712 *endp += len;
3713 }
3714 else
3715 {
3716 char *local_end;
3717
3718 index = -strtol (&h[2], &local_end, 10);
3719 *endp = local_end;
3720 }
3721 }
3722 else
3723 {
3724 if (len == 1)
3725 {
3726 /* "$" is equivalent to "$0". */
3727 index = 0;
3728 *endp += len;
3729 }
3730 else
3731 {
3732 char *local_end;
3733
3734 index = strtol (&h[1], &local_end, 10);
3735 *endp = local_end;
3736 }
3737 }
3738
3739 return access_value_history (index);
3740 }
3741
3742 struct value *
3743 coerce_ref_if_computed (const struct value *arg)
3744 {
3745 const struct lval_funcs *funcs;
3746
3747 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3748 return NULL;
3749
3750 if (value_lval_const (arg) != lval_computed)
3751 return NULL;
3752
3753 funcs = value_computed_funcs (arg);
3754 if (funcs->coerce_ref == NULL)
3755 return NULL;
3756
3757 return funcs->coerce_ref (arg);
3758 }
3759
3760 /* Look at value.h for description. */
3761
3762 struct value *
3763 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3764 struct type *original_type,
3765 struct value *original_value)
3766 {
3767 /* Re-adjust type. */
3768 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3769
3770 /* Add embedding info. */
3771 set_value_enclosing_type (value, enc_type);
3772 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3773
3774 /* We may be pointing to an object of some derived type. */
3775 return value_full_object (value, NULL, 0, 0, 0);
3776 }
3777
3778 struct value *
3779 coerce_ref (struct value *arg)
3780 {
3781 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3782 struct value *retval;
3783 struct type *enc_type;
3784
3785 retval = coerce_ref_if_computed (arg);
3786 if (retval)
3787 return retval;
3788
3789 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3790 return arg;
3791
3792 enc_type = check_typedef (value_enclosing_type (arg));
3793 enc_type = TYPE_TARGET_TYPE (enc_type);
3794
3795 retval = value_at_lazy (enc_type,
3796 unpack_pointer (value_type (arg),
3797 value_contents (arg)));
3798 enc_type = value_type (retval);
3799 return readjust_indirect_value_type (retval, enc_type,
3800 value_type_arg_tmp, arg);
3801 }
3802
3803 struct value *
3804 coerce_array (struct value *arg)
3805 {
3806 struct type *type;
3807
3808 arg = coerce_ref (arg);
3809 type = check_typedef (value_type (arg));
3810
3811 switch (TYPE_CODE (type))
3812 {
3813 case TYPE_CODE_ARRAY:
3814 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3815 arg = value_coerce_array (arg);
3816 break;
3817 case TYPE_CODE_FUNC:
3818 arg = value_coerce_function (arg);
3819 break;
3820 }
3821 return arg;
3822 }
3823 \f
3824
3825 /* Return the return value convention that will be used for the
3826 specified type. */
3827
3828 enum return_value_convention
3829 struct_return_convention (struct gdbarch *gdbarch,
3830 struct value *function, struct type *value_type)
3831 {
3832 enum type_code code = TYPE_CODE (value_type);
3833
3834 if (code == TYPE_CODE_ERROR)
3835 error (_("Function return type unknown."));
3836
3837 /* Probe the architecture for the return-value convention. */
3838 return gdbarch_return_value (gdbarch, function, value_type,
3839 NULL, NULL, NULL);
3840 }
3841
3842 /* Return true if the function returning the specified type is using
3843 the convention of returning structures in memory (passing in the
3844 address as a hidden first parameter). */
3845
3846 int
3847 using_struct_return (struct gdbarch *gdbarch,
3848 struct value *function, struct type *value_type)
3849 {
3850 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3851 /* A void return value is never in memory. See also corresponding
3852 code in "print_return_value". */
3853 return 0;
3854
3855 return (struct_return_convention (gdbarch, function, value_type)
3856 != RETURN_VALUE_REGISTER_CONVENTION);
3857 }
3858
3859 /* Set the initialized field in a value struct. */
3860
3861 void
3862 set_value_initialized (struct value *val, int status)
3863 {
3864 val->initialized = status;
3865 }
3866
3867 /* Return the initialized field in a value struct. */
3868
3869 int
3870 value_initialized (struct value *val)
3871 {
3872 return val->initialized;
3873 }
3874
3875 /* Load the actual content of a lazy value. Fetch the data from the
3876 user's process and clear the lazy flag to indicate that the data in
3877 the buffer is valid.
3878
3879 If the value is zero-length, we avoid calling read_memory, which
3880 would abort. We mark the value as fetched anyway -- all 0 bytes of
3881 it. */
3882
3883 void
3884 value_fetch_lazy (struct value *val)
3885 {
3886 gdb_assert (value_lazy (val));
3887 allocate_value_contents (val);
3888 /* A value is either lazy, or fully fetched. The
3889 availability/validity is only established as we try to fetch a
3890 value. */
3891 gdb_assert (VEC_empty (range_s, val->optimized_out));
3892 gdb_assert (VEC_empty (range_s, val->unavailable));
3893 if (value_bitsize (val))
3894 {
3895 /* To read a lazy bitfield, read the entire enclosing value. This
3896 prevents reading the same block of (possibly volatile) memory once
3897 per bitfield. It would be even better to read only the containing
3898 word, but we have no way to record that just specific bits of a
3899 value have been fetched. */
3900 struct type *type = check_typedef (value_type (val));
3901 struct value *parent = value_parent (val);
3902
3903 if (value_lazy (parent))
3904 value_fetch_lazy (parent);
3905
3906 unpack_value_bitfield (val,
3907 value_bitpos (val), value_bitsize (val),
3908 value_contents_for_printing (parent),
3909 value_offset (val), parent);
3910 }
3911 else if (VALUE_LVAL (val) == lval_memory)
3912 {
3913 CORE_ADDR addr = value_address (val);
3914 struct type *type = check_typedef (value_enclosing_type (val));
3915
3916 if (TYPE_LENGTH (type))
3917 read_value_memory (val, 0, value_stack (val),
3918 addr, value_contents_all_raw (val),
3919 type_length_units (type));
3920 }
3921 else if (VALUE_LVAL (val) == lval_register)
3922 {
3923 struct frame_info *frame;
3924 int regnum;
3925 struct type *type = check_typedef (value_type (val));
3926 struct value *new_val = val, *mark = value_mark ();
3927
3928 /* Offsets are not supported here; lazy register values must
3929 refer to the entire register. */
3930 gdb_assert (value_offset (val) == 0);
3931
3932 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3933 {
3934 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3935
3936 frame = frame_find_by_id (frame_id);
3937 regnum = VALUE_REGNUM (new_val);
3938
3939 gdb_assert (frame != NULL);
3940
3941 /* Convertible register routines are used for multi-register
3942 values and for interpretation in different types
3943 (e.g. float or int from a double register). Lazy
3944 register values should have the register's natural type,
3945 so they do not apply. */
3946 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3947 regnum, type));
3948
3949 new_val = get_frame_register_value (frame, regnum);
3950
3951 /* If we get another lazy lval_register value, it means the
3952 register is found by reading it from the next frame.
3953 get_frame_register_value should never return a value with
3954 the frame id pointing to FRAME. If it does, it means we
3955 either have two consecutive frames with the same frame id
3956 in the frame chain, or some code is trying to unwind
3957 behind get_prev_frame's back (e.g., a frame unwind
3958 sniffer trying to unwind), bypassing its validations. In
3959 any case, it should always be an internal error to end up
3960 in this situation. */
3961 if (VALUE_LVAL (new_val) == lval_register
3962 && value_lazy (new_val)
3963 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3964 internal_error (__FILE__, __LINE__,
3965 _("infinite loop while fetching a register"));
3966 }
3967
3968 /* If it's still lazy (for instance, a saved register on the
3969 stack), fetch it. */
3970 if (value_lazy (new_val))
3971 value_fetch_lazy (new_val);
3972
3973 /* Copy the contents and the unavailability/optimized-out
3974 meta-data from NEW_VAL to VAL. */
3975 set_value_lazy (val, 0);
3976 value_contents_copy (val, value_embedded_offset (val),
3977 new_val, value_embedded_offset (new_val),
3978 type_length_units (type));
3979
3980 if (frame_debug)
3981 {
3982 struct gdbarch *gdbarch;
3983 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3984 regnum = VALUE_REGNUM (val);
3985 gdbarch = get_frame_arch (frame);
3986
3987 fprintf_unfiltered (gdb_stdlog,
3988 "{ value_fetch_lazy "
3989 "(frame=%d,regnum=%d(%s),...) ",
3990 frame_relative_level (frame), regnum,
3991 user_reg_map_regnum_to_name (gdbarch, regnum));
3992
3993 fprintf_unfiltered (gdb_stdlog, "->");
3994 if (value_optimized_out (new_val))
3995 {
3996 fprintf_unfiltered (gdb_stdlog, " ");
3997 val_print_optimized_out (new_val, gdb_stdlog);
3998 }
3999 else
4000 {
4001 int i;
4002 const gdb_byte *buf = value_contents (new_val);
4003
4004 if (VALUE_LVAL (new_val) == lval_register)
4005 fprintf_unfiltered (gdb_stdlog, " register=%d",
4006 VALUE_REGNUM (new_val));
4007 else if (VALUE_LVAL (new_val) == lval_memory)
4008 fprintf_unfiltered (gdb_stdlog, " address=%s",
4009 paddress (gdbarch,
4010 value_address (new_val)));
4011 else
4012 fprintf_unfiltered (gdb_stdlog, " computed");
4013
4014 fprintf_unfiltered (gdb_stdlog, " bytes=");
4015 fprintf_unfiltered (gdb_stdlog, "[");
4016 for (i = 0; i < register_size (gdbarch, regnum); i++)
4017 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4018 fprintf_unfiltered (gdb_stdlog, "]");
4019 }
4020
4021 fprintf_unfiltered (gdb_stdlog, " }\n");
4022 }
4023
4024 /* Dispose of the intermediate values. This prevents
4025 watchpoints from trying to watch the saved frame pointer. */
4026 value_free_to_mark (mark);
4027 }
4028 else if (VALUE_LVAL (val) == lval_computed
4029 && value_computed_funcs (val)->read != NULL)
4030 value_computed_funcs (val)->read (val);
4031 else
4032 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4033
4034 set_value_lazy (val, 0);
4035 }
4036
4037 /* Implementation of the convenience function $_isvoid. */
4038
4039 static struct value *
4040 isvoid_internal_fn (struct gdbarch *gdbarch,
4041 const struct language_defn *language,
4042 void *cookie, int argc, struct value **argv)
4043 {
4044 int ret;
4045
4046 if (argc != 1)
4047 error (_("You must provide one argument for $_isvoid."));
4048
4049 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4050
4051 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4052 }
4053
4054 void
4055 _initialize_values (void)
4056 {
4057 add_cmd ("convenience", no_class, show_convenience, _("\
4058 Debugger convenience (\"$foo\") variables and functions.\n\
4059 Convenience variables are created when you assign them values;\n\
4060 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4061 \n\
4062 A few convenience variables are given values automatically:\n\
4063 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4064 \"$__\" holds the contents of the last address examined with \"x\"."
4065 #ifdef HAVE_PYTHON
4066 "\n\n\
4067 Convenience functions are defined via the Python API."
4068 #endif
4069 ), &showlist);
4070 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4071
4072 add_cmd ("values", no_set_class, show_values, _("\
4073 Elements of value history around item number IDX (or last ten)."),
4074 &showlist);
4075
4076 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4077 Initialize a convenience variable if necessary.\n\
4078 init-if-undefined VARIABLE = EXPRESSION\n\
4079 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4080 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4081 VARIABLE is already initialized."));
4082
4083 add_prefix_cmd ("function", no_class, function_command, _("\
4084 Placeholder command for showing help on convenience functions."),
4085 &functionlist, "function ", 0, &cmdlist);
4086
4087 add_internal_function ("_isvoid", _("\
4088 Check whether an expression is void.\n\
4089 Usage: $_isvoid (expression)\n\
4090 Return 1 if the expression is void, zero otherwise."),
4091 isvoid_internal_fn, NULL);
4092
4093 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4094 class_support, &max_value_size, _("\
4095 Set maximum sized value gdb will load from the inferior."), _("\
4096 Show maximum sized value gdb will load from the inferior."), _("\
4097 Use this to control the maximum size, in bytes, of a value that gdb\n\
4098 will load from the inferior. Setting this value to 'unlimited'\n\
4099 disables checking.\n\
4100 Setting this does not invalidate already allocated values, it only\n\
4101 prevents future values, larger than this size, from being allocated."),
4102 set_max_value_size,
4103 show_max_value_size,
4104 &setlist, &showlist);
4105 }
This page took 0.133658 seconds and 4 git commands to generate.