Remove unneeded AUX register symbols.
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 int offset;
69
70 /* Length of the range. */
71 int length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (int offset1, int len1,
83 int offset2, int len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109 range_s what;
110 int i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. If lval == lval_memory, this is an offset to
239 the address. If lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the member
241 embedded_offset below. */
242 int offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 int bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 int bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in target addressable memory
295 units. The value_contents() macro takes `embedded_offset' into account,
296 so most GDB code continues to see the `type' portion of the value, just
297 as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in target addressable memory units from the full object
302 to the pointed-to object -- that is, the value `embedded_offset' would
303 have if we followed the pointer and fetched the complete object.
304 (I don't really see the point. Why not just determine the
305 run-time type when you indirect, and avoid the special case? The
306 contents don't matter until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 int embedded_offset;
313 int pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 /* See value.h. */
344
345 struct gdbarch *
346 get_value_arch (const struct value *value)
347 {
348 return get_type_arch (value_type (value));
349 }
350
351 int
352 value_bits_available (const struct value *value, int offset, int length)
353 {
354 gdb_assert (!value->lazy);
355
356 return !ranges_contain (value->unavailable, offset, length);
357 }
358
359 int
360 value_bytes_available (const struct value *value, int offset, int length)
361 {
362 return value_bits_available (value,
363 offset * TARGET_CHAR_BIT,
364 length * TARGET_CHAR_BIT);
365 }
366
367 int
368 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
369 {
370 gdb_assert (!value->lazy);
371
372 return ranges_contain (value->optimized_out, bit_offset, bit_length);
373 }
374
375 int
376 value_entirely_available (struct value *value)
377 {
378 /* We can only tell whether the whole value is available when we try
379 to read it. */
380 if (value->lazy)
381 value_fetch_lazy (value);
382
383 if (VEC_empty (range_s, value->unavailable))
384 return 1;
385 return 0;
386 }
387
388 /* Returns true if VALUE is entirely covered by RANGES. If the value
389 is lazy, it'll be read now. Note that RANGE is a pointer to
390 pointer because reading the value might change *RANGE. */
391
392 static int
393 value_entirely_covered_by_range_vector (struct value *value,
394 VEC(range_s) **ranges)
395 {
396 /* We can only tell whether the whole value is optimized out /
397 unavailable when we try to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (VEC_length (range_s, *ranges) == 1)
402 {
403 struct range *t = VEC_index (range_s, *ranges, 0);
404
405 if (t->offset == 0
406 && t->length == (TARGET_CHAR_BIT
407 * TYPE_LENGTH (value_enclosing_type (value))))
408 return 1;
409 }
410
411 return 0;
412 }
413
414 int
415 value_entirely_unavailable (struct value *value)
416 {
417 return value_entirely_covered_by_range_vector (value, &value->unavailable);
418 }
419
420 int
421 value_entirely_optimized_out (struct value *value)
422 {
423 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
424 }
425
426 /* Insert into the vector pointed to by VECTORP the bit range starting of
427 OFFSET bits, and extending for the next LENGTH bits. */
428
429 static void
430 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
431 {
432 range_s newr;
433 int i;
434
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
437
438 newr.offset = offset;
439 newr.length = length;
440
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
446
447 #1 - overlaps with previous
448
449 R
450 |-...-|
451 |---| |---| |------| ... |--|
452 0 1 2 N
453
454 I=1
455
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
460
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
463
464 #2 - contiguous with previous
465
466 R
467 |-...-|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=1
472
473 If there's no overlap with the previous range, as in:
474
475 #3 - not overlapping and not contiguous
476
477 R
478 |-...-|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 or if I is 0:
485
486 #4 - R is the range with lowest offset
487
488 R
489 |-...-|
490 |--| |---| |------| ... |--|
491 0 1 2 N
492
493 I=0
494
495 ... we just push the new range to I.
496
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
500
501 #5 - overlapping following ranges
502
503 R
504 |------------------------|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=0
509
510 or:
511
512 R
513 |-------|
514 |--| |---| |------| ... |--|
515 0 1 2 N
516
517 I=1
518
519 */
520
521 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
522 if (i > 0)
523 {
524 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
525
526 if (ranges_overlap (bef->offset, bef->length, offset, length))
527 {
528 /* #1 */
529 ULONGEST l = min (bef->offset, offset);
530 ULONGEST h = max (bef->offset + bef->length, offset + length);
531
532 bef->offset = l;
533 bef->length = h - l;
534 i--;
535 }
536 else if (offset == bef->offset + bef->length)
537 {
538 /* #2 */
539 bef->length += length;
540 i--;
541 }
542 else
543 {
544 /* #3 */
545 VEC_safe_insert (range_s, *vectorp, i, &newr);
546 }
547 }
548 else
549 {
550 /* #4 */
551 VEC_safe_insert (range_s, *vectorp, i, &newr);
552 }
553
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i + 1 < VEC_length (range_s, *vectorp))
557 {
558 struct range *t;
559 struct range *r;
560 int removed = 0;
561 int next = i + 1;
562
563 /* Get the range we just touched. */
564 t = VEC_index (range_s, *vectorp, i);
565 removed = 0;
566
567 i = next;
568 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
569 if (r->offset <= t->offset + t->length)
570 {
571 ULONGEST l, h;
572
573 l = min (t->offset, r->offset);
574 h = max (t->offset + t->length, r->offset + r->length);
575
576 t->offset = l;
577 t->length = h - l;
578
579 removed++;
580 }
581 else
582 {
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
586 break;
587 }
588
589 if (removed != 0)
590 VEC_block_remove (range_s, *vectorp, next, removed);
591 }
592 }
593
594 void
595 mark_value_bits_unavailable (struct value *value, int offset, int length)
596 {
597 insert_into_bit_range_vector (&value->unavailable, offset, length);
598 }
599
600 void
601 mark_value_bytes_unavailable (struct value *value, int offset, int length)
602 {
603 mark_value_bits_unavailable (value,
604 offset * TARGET_CHAR_BIT,
605 length * TARGET_CHAR_BIT);
606 }
607
608 /* Find the first range in RANGES that overlaps the range defined by
609 OFFSET and LENGTH, starting at element POS in the RANGES vector,
610 Returns the index into RANGES where such overlapping range was
611 found, or -1 if none was found. */
612
613 static int
614 find_first_range_overlap (VEC(range_s) *ranges, int pos,
615 int offset, int length)
616 {
617 range_s *r;
618 int i;
619
620 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
621 if (ranges_overlap (r->offset, r->length, offset, length))
622 return i;
623
624 return -1;
625 }
626
627 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
628 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
629 return non-zero.
630
631 It must always be the case that:
632 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
633
634 It is assumed that memory can be accessed from:
635 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
636 to:
637 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
638 / TARGET_CHAR_BIT) */
639 static int
640 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
641 const gdb_byte *ptr2, size_t offset2_bits,
642 size_t length_bits)
643 {
644 gdb_assert (offset1_bits % TARGET_CHAR_BIT
645 == offset2_bits % TARGET_CHAR_BIT);
646
647 if (offset1_bits % TARGET_CHAR_BIT != 0)
648 {
649 size_t bits;
650 gdb_byte mask, b1, b2;
651
652 /* The offset from the base pointers PTR1 and PTR2 is not a complete
653 number of bytes. A number of bits up to either the next exact
654 byte boundary, or LENGTH_BITS (which ever is sooner) will be
655 compared. */
656 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
657 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
658 mask = (1 << bits) - 1;
659
660 if (length_bits < bits)
661 {
662 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
663 bits = length_bits;
664 }
665
666 /* Now load the two bytes and mask off the bits we care about. */
667 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
668 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
669
670 if (b1 != b2)
671 return 1;
672
673 /* Now update the length and offsets to take account of the bits
674 we've just compared. */
675 length_bits -= bits;
676 offset1_bits += bits;
677 offset2_bits += bits;
678 }
679
680 if (length_bits % TARGET_CHAR_BIT != 0)
681 {
682 size_t bits;
683 size_t o1, o2;
684 gdb_byte mask, b1, b2;
685
686 /* The length is not an exact number of bytes. After the previous
687 IF.. block then the offsets are byte aligned, or the
688 length is zero (in which case this code is not reached). Compare
689 a number of bits at the end of the region, starting from an exact
690 byte boundary. */
691 bits = length_bits % TARGET_CHAR_BIT;
692 o1 = offset1_bits + length_bits - bits;
693 o2 = offset2_bits + length_bits - bits;
694
695 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
696 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
697
698 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
699 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
700
701 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
702 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
703
704 if (b1 != b2)
705 return 1;
706
707 length_bits -= bits;
708 }
709
710 if (length_bits > 0)
711 {
712 /* We've now taken care of any stray "bits" at the start, or end of
713 the region to compare, the remainder can be covered with a simple
714 memcmp. */
715 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
716 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
717 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
718
719 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
720 ptr2 + offset2_bits / TARGET_CHAR_BIT,
721 length_bits / TARGET_CHAR_BIT);
722 }
723
724 /* Length is zero, regions match. */
725 return 0;
726 }
727
728 /* Helper struct for find_first_range_overlap_and_match and
729 value_contents_bits_eq. Keep track of which slot of a given ranges
730 vector have we last looked at. */
731
732 struct ranges_and_idx
733 {
734 /* The ranges. */
735 VEC(range_s) *ranges;
736
737 /* The range we've last found in RANGES. Given ranges are sorted,
738 we can start the next lookup here. */
739 int idx;
740 };
741
742 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
743 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
744 ranges starting at OFFSET2 bits. Return true if the ranges match
745 and fill in *L and *H with the overlapping window relative to
746 (both) OFFSET1 or OFFSET2. */
747
748 static int
749 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
750 struct ranges_and_idx *rp2,
751 int offset1, int offset2,
752 int length, ULONGEST *l, ULONGEST *h)
753 {
754 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
755 offset1, length);
756 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
757 offset2, length);
758
759 if (rp1->idx == -1 && rp2->idx == -1)
760 {
761 *l = length;
762 *h = length;
763 return 1;
764 }
765 else if (rp1->idx == -1 || rp2->idx == -1)
766 return 0;
767 else
768 {
769 range_s *r1, *r2;
770 ULONGEST l1, h1;
771 ULONGEST l2, h2;
772
773 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
774 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
775
776 /* Get the unavailable windows intersected by the incoming
777 ranges. The first and last ranges that overlap the argument
778 range may be wider than said incoming arguments ranges. */
779 l1 = max (offset1, r1->offset);
780 h1 = min (offset1 + length, r1->offset + r1->length);
781
782 l2 = max (offset2, r2->offset);
783 h2 = min (offset2 + length, offset2 + r2->length);
784
785 /* Make them relative to the respective start offsets, so we can
786 compare them for equality. */
787 l1 -= offset1;
788 h1 -= offset1;
789
790 l2 -= offset2;
791 h2 -= offset2;
792
793 /* Different ranges, no match. */
794 if (l1 != l2 || h1 != h2)
795 return 0;
796
797 *h = h1;
798 *l = l1;
799 return 1;
800 }
801 }
802
803 /* Helper function for value_contents_eq. The only difference is that
804 this function is bit rather than byte based.
805
806 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
807 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
808 Return true if the available bits match. */
809
810 static int
811 value_contents_bits_eq (const struct value *val1, int offset1,
812 const struct value *val2, int offset2,
813 int length)
814 {
815 /* Each array element corresponds to a ranges source (unavailable,
816 optimized out). '1' is for VAL1, '2' for VAL2. */
817 struct ranges_and_idx rp1[2], rp2[2];
818
819 /* See function description in value.h. */
820 gdb_assert (!val1->lazy && !val2->lazy);
821
822 /* We shouldn't be trying to compare past the end of the values. */
823 gdb_assert (offset1 + length
824 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
825 gdb_assert (offset2 + length
826 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
827
828 memset (&rp1, 0, sizeof (rp1));
829 memset (&rp2, 0, sizeof (rp2));
830 rp1[0].ranges = val1->unavailable;
831 rp2[0].ranges = val2->unavailable;
832 rp1[1].ranges = val1->optimized_out;
833 rp2[1].ranges = val2->optimized_out;
834
835 while (length > 0)
836 {
837 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
838 int i;
839
840 for (i = 0; i < 2; i++)
841 {
842 ULONGEST l_tmp, h_tmp;
843
844 /* The contents only match equal if the invalid/unavailable
845 contents ranges match as well. */
846 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
847 offset1, offset2, length,
848 &l_tmp, &h_tmp))
849 return 0;
850
851 /* We're interested in the lowest/first range found. */
852 if (i == 0 || l_tmp < l)
853 {
854 l = l_tmp;
855 h = h_tmp;
856 }
857 }
858
859 /* Compare the available/valid contents. */
860 if (memcmp_with_bit_offsets (val1->contents, offset1,
861 val2->contents, offset2, l) != 0)
862 return 0;
863
864 length -= h;
865 offset1 += h;
866 offset2 += h;
867 }
868
869 return 1;
870 }
871
872 int
873 value_contents_eq (const struct value *val1, int offset1,
874 const struct value *val2, int offset2,
875 int length)
876 {
877 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
878 val2, offset2 * TARGET_CHAR_BIT,
879 length * TARGET_CHAR_BIT);
880 }
881
882 /* Prototypes for local functions. */
883
884 static void show_values (char *, int);
885
886 static void show_convenience (char *, int);
887
888
889 /* The value-history records all the values printed
890 by print commands during this session. Each chunk
891 records 60 consecutive values. The first chunk on
892 the chain records the most recent values.
893 The total number of values is in value_history_count. */
894
895 #define VALUE_HISTORY_CHUNK 60
896
897 struct value_history_chunk
898 {
899 struct value_history_chunk *next;
900 struct value *values[VALUE_HISTORY_CHUNK];
901 };
902
903 /* Chain of chunks now in use. */
904
905 static struct value_history_chunk *value_history_chain;
906
907 static int value_history_count; /* Abs number of last entry stored. */
908
909 \f
910 /* List of all value objects currently allocated
911 (except for those released by calls to release_value)
912 This is so they can be freed after each command. */
913
914 static struct value *all_values;
915
916 /* Allocate a lazy value for type TYPE. Its actual content is
917 "lazily" allocated too: the content field of the return value is
918 NULL; it will be allocated when it is fetched from the target. */
919
920 struct value *
921 allocate_value_lazy (struct type *type)
922 {
923 struct value *val;
924
925 /* Call check_typedef on our type to make sure that, if TYPE
926 is a TYPE_CODE_TYPEDEF, its length is set to the length
927 of the target type instead of zero. However, we do not
928 replace the typedef type by the target type, because we want
929 to keep the typedef in order to be able to set the VAL's type
930 description correctly. */
931 check_typedef (type);
932
933 val = XCNEW (struct value);
934 val->contents = NULL;
935 val->next = all_values;
936 all_values = val;
937 val->type = type;
938 val->enclosing_type = type;
939 VALUE_LVAL (val) = not_lval;
940 val->location.address = 0;
941 VALUE_FRAME_ID (val) = null_frame_id;
942 val->offset = 0;
943 val->bitpos = 0;
944 val->bitsize = 0;
945 VALUE_REGNUM (val) = -1;
946 val->lazy = 1;
947 val->embedded_offset = 0;
948 val->pointed_to_offset = 0;
949 val->modifiable = 1;
950 val->initialized = 1; /* Default to initialized. */
951
952 /* Values start out on the all_values chain. */
953 val->reference_count = 1;
954
955 return val;
956 }
957
958 /* The maximum size, in bytes, that GDB will try to allocate for a value.
959 The initial value of 64k was not selected for any specific reason, it is
960 just a reasonable starting point. */
961
962 static int max_value_size = 65536; /* 64k bytes */
963
964 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
965 LONGEST, otherwise GDB will not be able to parse integer values from the
966 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
967 be unable to parse "set max-value-size 2".
968
969 As we want a consistent GDB experience across hosts with different sizes
970 of LONGEST, this arbitrary minimum value was selected, so long as this
971 is bigger than LONGEST on all GDB supported hosts we're fine. */
972
973 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
974 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
975
976 /* Implement the "set max-value-size" command. */
977
978 static void
979 set_max_value_size (char *args, int from_tty,
980 struct cmd_list_element *c)
981 {
982 gdb_assert (max_value_size == -1 || max_value_size >= 0);
983
984 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
985 {
986 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
987 error (_("max-value-size set too low, increasing to %d bytes"),
988 max_value_size);
989 }
990 }
991
992 /* Implement the "show max-value-size" command. */
993
994 static void
995 show_max_value_size (struct ui_file *file, int from_tty,
996 struct cmd_list_element *c, const char *value)
997 {
998 if (max_value_size == -1)
999 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1000 else
1001 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1002 max_value_size);
1003 }
1004
1005 /* Called before we attempt to allocate or reallocate a buffer for the
1006 contents of a value. TYPE is the type of the value for which we are
1007 allocating the buffer. If the buffer is too large (based on the user
1008 controllable setting) then throw an error. If this function returns
1009 then we should attempt to allocate the buffer. */
1010
1011 static void
1012 check_type_length_before_alloc (const struct type *type)
1013 {
1014 unsigned int length = TYPE_LENGTH (type);
1015
1016 if (max_value_size > -1 && length > max_value_size)
1017 {
1018 if (TYPE_NAME (type) != NULL)
1019 error (_("value of type `%s' requires %u bytes, which is more "
1020 "than max-value-size"), TYPE_NAME (type), length);
1021 else
1022 error (_("value requires %u bytes, which is more than "
1023 "max-value-size"), length);
1024 }
1025 }
1026
1027 /* Allocate the contents of VAL if it has not been allocated yet. */
1028
1029 static void
1030 allocate_value_contents (struct value *val)
1031 {
1032 if (!val->contents)
1033 {
1034 check_type_length_before_alloc (val->enclosing_type);
1035 val->contents
1036 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1037 }
1038 }
1039
1040 /* Allocate a value and its contents for type TYPE. */
1041
1042 struct value *
1043 allocate_value (struct type *type)
1044 {
1045 struct value *val = allocate_value_lazy (type);
1046
1047 allocate_value_contents (val);
1048 val->lazy = 0;
1049 return val;
1050 }
1051
1052 /* Allocate a value that has the correct length
1053 for COUNT repetitions of type TYPE. */
1054
1055 struct value *
1056 allocate_repeat_value (struct type *type, int count)
1057 {
1058 int low_bound = current_language->string_lower_bound; /* ??? */
1059 /* FIXME-type-allocation: need a way to free this type when we are
1060 done with it. */
1061 struct type *array_type
1062 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1063
1064 return allocate_value (array_type);
1065 }
1066
1067 struct value *
1068 allocate_computed_value (struct type *type,
1069 const struct lval_funcs *funcs,
1070 void *closure)
1071 {
1072 struct value *v = allocate_value_lazy (type);
1073
1074 VALUE_LVAL (v) = lval_computed;
1075 v->location.computed.funcs = funcs;
1076 v->location.computed.closure = closure;
1077
1078 return v;
1079 }
1080
1081 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1082
1083 struct value *
1084 allocate_optimized_out_value (struct type *type)
1085 {
1086 struct value *retval = allocate_value_lazy (type);
1087
1088 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1089 set_value_lazy (retval, 0);
1090 return retval;
1091 }
1092
1093 /* Accessor methods. */
1094
1095 struct value *
1096 value_next (const struct value *value)
1097 {
1098 return value->next;
1099 }
1100
1101 struct type *
1102 value_type (const struct value *value)
1103 {
1104 return value->type;
1105 }
1106 void
1107 deprecated_set_value_type (struct value *value, struct type *type)
1108 {
1109 value->type = type;
1110 }
1111
1112 int
1113 value_offset (const struct value *value)
1114 {
1115 return value->offset;
1116 }
1117 void
1118 set_value_offset (struct value *value, int offset)
1119 {
1120 value->offset = offset;
1121 }
1122
1123 int
1124 value_bitpos (const struct value *value)
1125 {
1126 return value->bitpos;
1127 }
1128 void
1129 set_value_bitpos (struct value *value, int bit)
1130 {
1131 value->bitpos = bit;
1132 }
1133
1134 int
1135 value_bitsize (const struct value *value)
1136 {
1137 return value->bitsize;
1138 }
1139 void
1140 set_value_bitsize (struct value *value, int bit)
1141 {
1142 value->bitsize = bit;
1143 }
1144
1145 struct value *
1146 value_parent (const struct value *value)
1147 {
1148 return value->parent;
1149 }
1150
1151 /* See value.h. */
1152
1153 void
1154 set_value_parent (struct value *value, struct value *parent)
1155 {
1156 struct value *old = value->parent;
1157
1158 value->parent = parent;
1159 if (parent != NULL)
1160 value_incref (parent);
1161 value_free (old);
1162 }
1163
1164 gdb_byte *
1165 value_contents_raw (struct value *value)
1166 {
1167 struct gdbarch *arch = get_value_arch (value);
1168 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1169
1170 allocate_value_contents (value);
1171 return value->contents + value->embedded_offset * unit_size;
1172 }
1173
1174 gdb_byte *
1175 value_contents_all_raw (struct value *value)
1176 {
1177 allocate_value_contents (value);
1178 return value->contents;
1179 }
1180
1181 struct type *
1182 value_enclosing_type (const struct value *value)
1183 {
1184 return value->enclosing_type;
1185 }
1186
1187 /* Look at value.h for description. */
1188
1189 struct type *
1190 value_actual_type (struct value *value, int resolve_simple_types,
1191 int *real_type_found)
1192 {
1193 struct value_print_options opts;
1194 struct type *result;
1195
1196 get_user_print_options (&opts);
1197
1198 if (real_type_found)
1199 *real_type_found = 0;
1200 result = value_type (value);
1201 if (opts.objectprint)
1202 {
1203 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1204 fetch its rtti type. */
1205 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1206 || TYPE_CODE (result) == TYPE_CODE_REF)
1207 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1208 == TYPE_CODE_STRUCT
1209 && !value_optimized_out (value))
1210 {
1211 struct type *real_type;
1212
1213 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1214 if (real_type)
1215 {
1216 if (real_type_found)
1217 *real_type_found = 1;
1218 result = real_type;
1219 }
1220 }
1221 else if (resolve_simple_types)
1222 {
1223 if (real_type_found)
1224 *real_type_found = 1;
1225 result = value_enclosing_type (value);
1226 }
1227 }
1228
1229 return result;
1230 }
1231
1232 void
1233 error_value_optimized_out (void)
1234 {
1235 error (_("value has been optimized out"));
1236 }
1237
1238 static void
1239 require_not_optimized_out (const struct value *value)
1240 {
1241 if (!VEC_empty (range_s, value->optimized_out))
1242 {
1243 if (value->lval == lval_register)
1244 error (_("register has not been saved in frame"));
1245 else
1246 error_value_optimized_out ();
1247 }
1248 }
1249
1250 static void
1251 require_available (const struct value *value)
1252 {
1253 if (!VEC_empty (range_s, value->unavailable))
1254 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1255 }
1256
1257 const gdb_byte *
1258 value_contents_for_printing (struct value *value)
1259 {
1260 if (value->lazy)
1261 value_fetch_lazy (value);
1262 return value->contents;
1263 }
1264
1265 const gdb_byte *
1266 value_contents_for_printing_const (const struct value *value)
1267 {
1268 gdb_assert (!value->lazy);
1269 return value->contents;
1270 }
1271
1272 const gdb_byte *
1273 value_contents_all (struct value *value)
1274 {
1275 const gdb_byte *result = value_contents_for_printing (value);
1276 require_not_optimized_out (value);
1277 require_available (value);
1278 return result;
1279 }
1280
1281 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1282 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1283
1284 static void
1285 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1286 VEC (range_s) *src_range, int src_bit_offset,
1287 int bit_length)
1288 {
1289 range_s *r;
1290 int i;
1291
1292 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1293 {
1294 ULONGEST h, l;
1295
1296 l = max (r->offset, src_bit_offset);
1297 h = min (r->offset + r->length, src_bit_offset + bit_length);
1298
1299 if (l < h)
1300 insert_into_bit_range_vector (dst_range,
1301 dst_bit_offset + (l - src_bit_offset),
1302 h - l);
1303 }
1304 }
1305
1306 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1307 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1308
1309 static void
1310 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1311 const struct value *src, int src_bit_offset,
1312 int bit_length)
1313 {
1314 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1315 src->unavailable, src_bit_offset,
1316 bit_length);
1317 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1318 src->optimized_out, src_bit_offset,
1319 bit_length);
1320 }
1321
1322 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1323 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1324 contents, starting at DST_OFFSET. If unavailable contents are
1325 being copied from SRC, the corresponding DST contents are marked
1326 unavailable accordingly. Neither DST nor SRC may be lazy
1327 values.
1328
1329 It is assumed the contents of DST in the [DST_OFFSET,
1330 DST_OFFSET+LENGTH) range are wholly available. */
1331
1332 void
1333 value_contents_copy_raw (struct value *dst, int dst_offset,
1334 struct value *src, int src_offset, int length)
1335 {
1336 range_s *r;
1337 int src_bit_offset, dst_bit_offset, bit_length;
1338 struct gdbarch *arch = get_value_arch (src);
1339 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1340
1341 /* A lazy DST would make that this copy operation useless, since as
1342 soon as DST's contents were un-lazied (by a later value_contents
1343 call, say), the contents would be overwritten. A lazy SRC would
1344 mean we'd be copying garbage. */
1345 gdb_assert (!dst->lazy && !src->lazy);
1346
1347 /* The overwritten DST range gets unavailability ORed in, not
1348 replaced. Make sure to remember to implement replacing if it
1349 turns out actually necessary. */
1350 gdb_assert (value_bytes_available (dst, dst_offset, length));
1351 gdb_assert (!value_bits_any_optimized_out (dst,
1352 TARGET_CHAR_BIT * dst_offset,
1353 TARGET_CHAR_BIT * length));
1354
1355 /* Copy the data. */
1356 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1357 value_contents_all_raw (src) + src_offset * unit_size,
1358 length * unit_size);
1359
1360 /* Copy the meta-data, adjusted. */
1361 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1362 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1363 bit_length = length * unit_size * HOST_CHAR_BIT;
1364
1365 value_ranges_copy_adjusted (dst, dst_bit_offset,
1366 src, src_bit_offset,
1367 bit_length);
1368 }
1369
1370 /* Copy LENGTH bytes of SRC value's (all) contents
1371 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1372 (all) contents, starting at DST_OFFSET. If unavailable contents
1373 are being copied from SRC, the corresponding DST contents are
1374 marked unavailable accordingly. DST must not be lazy. If SRC is
1375 lazy, it will be fetched now.
1376
1377 It is assumed the contents of DST in the [DST_OFFSET,
1378 DST_OFFSET+LENGTH) range are wholly available. */
1379
1380 void
1381 value_contents_copy (struct value *dst, int dst_offset,
1382 struct value *src, int src_offset, int length)
1383 {
1384 if (src->lazy)
1385 value_fetch_lazy (src);
1386
1387 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1388 }
1389
1390 int
1391 value_lazy (const struct value *value)
1392 {
1393 return value->lazy;
1394 }
1395
1396 void
1397 set_value_lazy (struct value *value, int val)
1398 {
1399 value->lazy = val;
1400 }
1401
1402 int
1403 value_stack (const struct value *value)
1404 {
1405 return value->stack;
1406 }
1407
1408 void
1409 set_value_stack (struct value *value, int val)
1410 {
1411 value->stack = val;
1412 }
1413
1414 const gdb_byte *
1415 value_contents (struct value *value)
1416 {
1417 const gdb_byte *result = value_contents_writeable (value);
1418 require_not_optimized_out (value);
1419 require_available (value);
1420 return result;
1421 }
1422
1423 gdb_byte *
1424 value_contents_writeable (struct value *value)
1425 {
1426 if (value->lazy)
1427 value_fetch_lazy (value);
1428 return value_contents_raw (value);
1429 }
1430
1431 int
1432 value_optimized_out (struct value *value)
1433 {
1434 /* We can only know if a value is optimized out once we have tried to
1435 fetch it. */
1436 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1437 {
1438 TRY
1439 {
1440 value_fetch_lazy (value);
1441 }
1442 CATCH (ex, RETURN_MASK_ERROR)
1443 {
1444 /* Fall back to checking value->optimized_out. */
1445 }
1446 END_CATCH
1447 }
1448
1449 return !VEC_empty (range_s, value->optimized_out);
1450 }
1451
1452 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1453 the following LENGTH bytes. */
1454
1455 void
1456 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1457 {
1458 mark_value_bits_optimized_out (value,
1459 offset * TARGET_CHAR_BIT,
1460 length * TARGET_CHAR_BIT);
1461 }
1462
1463 /* See value.h. */
1464
1465 void
1466 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1467 {
1468 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1469 }
1470
1471 int
1472 value_bits_synthetic_pointer (const struct value *value,
1473 int offset, int length)
1474 {
1475 if (value->lval != lval_computed
1476 || !value->location.computed.funcs->check_synthetic_pointer)
1477 return 0;
1478 return value->location.computed.funcs->check_synthetic_pointer (value,
1479 offset,
1480 length);
1481 }
1482
1483 int
1484 value_embedded_offset (const struct value *value)
1485 {
1486 return value->embedded_offset;
1487 }
1488
1489 void
1490 set_value_embedded_offset (struct value *value, int val)
1491 {
1492 value->embedded_offset = val;
1493 }
1494
1495 int
1496 value_pointed_to_offset (const struct value *value)
1497 {
1498 return value->pointed_to_offset;
1499 }
1500
1501 void
1502 set_value_pointed_to_offset (struct value *value, int val)
1503 {
1504 value->pointed_to_offset = val;
1505 }
1506
1507 const struct lval_funcs *
1508 value_computed_funcs (const struct value *v)
1509 {
1510 gdb_assert (value_lval_const (v) == lval_computed);
1511
1512 return v->location.computed.funcs;
1513 }
1514
1515 void *
1516 value_computed_closure (const struct value *v)
1517 {
1518 gdb_assert (v->lval == lval_computed);
1519
1520 return v->location.computed.closure;
1521 }
1522
1523 enum lval_type *
1524 deprecated_value_lval_hack (struct value *value)
1525 {
1526 return &value->lval;
1527 }
1528
1529 enum lval_type
1530 value_lval_const (const struct value *value)
1531 {
1532 return value->lval;
1533 }
1534
1535 CORE_ADDR
1536 value_address (const struct value *value)
1537 {
1538 if (value->lval == lval_internalvar
1539 || value->lval == lval_internalvar_component
1540 || value->lval == lval_xcallable)
1541 return 0;
1542 if (value->parent != NULL)
1543 return value_address (value->parent) + value->offset;
1544 else
1545 return value->location.address + value->offset;
1546 }
1547
1548 CORE_ADDR
1549 value_raw_address (const struct value *value)
1550 {
1551 if (value->lval == lval_internalvar
1552 || value->lval == lval_internalvar_component
1553 || value->lval == lval_xcallable)
1554 return 0;
1555 return value->location.address;
1556 }
1557
1558 void
1559 set_value_address (struct value *value, CORE_ADDR addr)
1560 {
1561 gdb_assert (value->lval != lval_internalvar
1562 && value->lval != lval_internalvar_component
1563 && value->lval != lval_xcallable);
1564 value->location.address = addr;
1565 }
1566
1567 struct internalvar **
1568 deprecated_value_internalvar_hack (struct value *value)
1569 {
1570 return &value->location.internalvar;
1571 }
1572
1573 struct frame_id *
1574 deprecated_value_frame_id_hack (struct value *value)
1575 {
1576 return &value->frame_id;
1577 }
1578
1579 short *
1580 deprecated_value_regnum_hack (struct value *value)
1581 {
1582 return &value->regnum;
1583 }
1584
1585 int
1586 deprecated_value_modifiable (const struct value *value)
1587 {
1588 return value->modifiable;
1589 }
1590 \f
1591 /* Return a mark in the value chain. All values allocated after the
1592 mark is obtained (except for those released) are subject to being freed
1593 if a subsequent value_free_to_mark is passed the mark. */
1594 struct value *
1595 value_mark (void)
1596 {
1597 return all_values;
1598 }
1599
1600 /* Take a reference to VAL. VAL will not be deallocated until all
1601 references are released. */
1602
1603 void
1604 value_incref (struct value *val)
1605 {
1606 val->reference_count++;
1607 }
1608
1609 /* Release a reference to VAL, which was acquired with value_incref.
1610 This function is also called to deallocate values from the value
1611 chain. */
1612
1613 void
1614 value_free (struct value *val)
1615 {
1616 if (val)
1617 {
1618 gdb_assert (val->reference_count > 0);
1619 val->reference_count--;
1620 if (val->reference_count > 0)
1621 return;
1622
1623 /* If there's an associated parent value, drop our reference to
1624 it. */
1625 if (val->parent != NULL)
1626 value_free (val->parent);
1627
1628 if (VALUE_LVAL (val) == lval_computed)
1629 {
1630 const struct lval_funcs *funcs = val->location.computed.funcs;
1631
1632 if (funcs->free_closure)
1633 funcs->free_closure (val);
1634 }
1635 else if (VALUE_LVAL (val) == lval_xcallable)
1636 free_xmethod_worker (val->location.xm_worker);
1637
1638 xfree (val->contents);
1639 VEC_free (range_s, val->unavailable);
1640 }
1641 xfree (val);
1642 }
1643
1644 /* Free all values allocated since MARK was obtained by value_mark
1645 (except for those released). */
1646 void
1647 value_free_to_mark (const struct value *mark)
1648 {
1649 struct value *val;
1650 struct value *next;
1651
1652 for (val = all_values; val && val != mark; val = next)
1653 {
1654 next = val->next;
1655 val->released = 1;
1656 value_free (val);
1657 }
1658 all_values = val;
1659 }
1660
1661 /* Free all the values that have been allocated (except for those released).
1662 Call after each command, successful or not.
1663 In practice this is called before each command, which is sufficient. */
1664
1665 void
1666 free_all_values (void)
1667 {
1668 struct value *val;
1669 struct value *next;
1670
1671 for (val = all_values; val; val = next)
1672 {
1673 next = val->next;
1674 val->released = 1;
1675 value_free (val);
1676 }
1677
1678 all_values = 0;
1679 }
1680
1681 /* Frees all the elements in a chain of values. */
1682
1683 void
1684 free_value_chain (struct value *v)
1685 {
1686 struct value *next;
1687
1688 for (; v; v = next)
1689 {
1690 next = value_next (v);
1691 value_free (v);
1692 }
1693 }
1694
1695 /* Remove VAL from the chain all_values
1696 so it will not be freed automatically. */
1697
1698 void
1699 release_value (struct value *val)
1700 {
1701 struct value *v;
1702
1703 if (all_values == val)
1704 {
1705 all_values = val->next;
1706 val->next = NULL;
1707 val->released = 1;
1708 return;
1709 }
1710
1711 for (v = all_values; v; v = v->next)
1712 {
1713 if (v->next == val)
1714 {
1715 v->next = val->next;
1716 val->next = NULL;
1717 val->released = 1;
1718 break;
1719 }
1720 }
1721 }
1722
1723 /* If the value is not already released, release it.
1724 If the value is already released, increment its reference count.
1725 That is, this function ensures that the value is released from the
1726 value chain and that the caller owns a reference to it. */
1727
1728 void
1729 release_value_or_incref (struct value *val)
1730 {
1731 if (val->released)
1732 value_incref (val);
1733 else
1734 release_value (val);
1735 }
1736
1737 /* Release all values up to mark */
1738 struct value *
1739 value_release_to_mark (const struct value *mark)
1740 {
1741 struct value *val;
1742 struct value *next;
1743
1744 for (val = next = all_values; next; next = next->next)
1745 {
1746 if (next->next == mark)
1747 {
1748 all_values = next->next;
1749 next->next = NULL;
1750 return val;
1751 }
1752 next->released = 1;
1753 }
1754 all_values = 0;
1755 return val;
1756 }
1757
1758 /* Return a copy of the value ARG.
1759 It contains the same contents, for same memory address,
1760 but it's a different block of storage. */
1761
1762 struct value *
1763 value_copy (struct value *arg)
1764 {
1765 struct type *encl_type = value_enclosing_type (arg);
1766 struct value *val;
1767
1768 if (value_lazy (arg))
1769 val = allocate_value_lazy (encl_type);
1770 else
1771 val = allocate_value (encl_type);
1772 val->type = arg->type;
1773 VALUE_LVAL (val) = VALUE_LVAL (arg);
1774 val->location = arg->location;
1775 val->offset = arg->offset;
1776 val->bitpos = arg->bitpos;
1777 val->bitsize = arg->bitsize;
1778 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1779 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1780 val->lazy = arg->lazy;
1781 val->embedded_offset = value_embedded_offset (arg);
1782 val->pointed_to_offset = arg->pointed_to_offset;
1783 val->modifiable = arg->modifiable;
1784 if (!value_lazy (val))
1785 {
1786 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1787 TYPE_LENGTH (value_enclosing_type (arg)));
1788
1789 }
1790 val->unavailable = VEC_copy (range_s, arg->unavailable);
1791 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1792 set_value_parent (val, arg->parent);
1793 if (VALUE_LVAL (val) == lval_computed)
1794 {
1795 const struct lval_funcs *funcs = val->location.computed.funcs;
1796
1797 if (funcs->copy_closure)
1798 val->location.computed.closure = funcs->copy_closure (val);
1799 }
1800 return val;
1801 }
1802
1803 /* Return a "const" and/or "volatile" qualified version of the value V.
1804 If CNST is true, then the returned value will be qualified with
1805 "const".
1806 if VOLTL is true, then the returned value will be qualified with
1807 "volatile". */
1808
1809 struct value *
1810 make_cv_value (int cnst, int voltl, struct value *v)
1811 {
1812 struct type *val_type = value_type (v);
1813 struct type *enclosing_type = value_enclosing_type (v);
1814 struct value *cv_val = value_copy (v);
1815
1816 deprecated_set_value_type (cv_val,
1817 make_cv_type (cnst, voltl, val_type, NULL));
1818 set_value_enclosing_type (cv_val,
1819 make_cv_type (cnst, voltl, enclosing_type, NULL));
1820
1821 return cv_val;
1822 }
1823
1824 /* Return a version of ARG that is non-lvalue. */
1825
1826 struct value *
1827 value_non_lval (struct value *arg)
1828 {
1829 if (VALUE_LVAL (arg) != not_lval)
1830 {
1831 struct type *enc_type = value_enclosing_type (arg);
1832 struct value *val = allocate_value (enc_type);
1833
1834 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1835 TYPE_LENGTH (enc_type));
1836 val->type = arg->type;
1837 set_value_embedded_offset (val, value_embedded_offset (arg));
1838 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1839 return val;
1840 }
1841 return arg;
1842 }
1843
1844 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1845
1846 void
1847 value_force_lval (struct value *v, CORE_ADDR addr)
1848 {
1849 gdb_assert (VALUE_LVAL (v) == not_lval);
1850
1851 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1852 v->lval = lval_memory;
1853 v->location.address = addr;
1854 }
1855
1856 void
1857 set_value_component_location (struct value *component,
1858 const struct value *whole)
1859 {
1860 gdb_assert (whole->lval != lval_xcallable);
1861
1862 if (whole->lval == lval_internalvar)
1863 VALUE_LVAL (component) = lval_internalvar_component;
1864 else
1865 VALUE_LVAL (component) = whole->lval;
1866
1867 component->location = whole->location;
1868 if (whole->lval == lval_computed)
1869 {
1870 const struct lval_funcs *funcs = whole->location.computed.funcs;
1871
1872 if (funcs->copy_closure)
1873 component->location.computed.closure = funcs->copy_closure (whole);
1874 }
1875 }
1876
1877 \f
1878 /* Access to the value history. */
1879
1880 /* Record a new value in the value history.
1881 Returns the absolute history index of the entry. */
1882
1883 int
1884 record_latest_value (struct value *val)
1885 {
1886 int i;
1887
1888 /* We don't want this value to have anything to do with the inferior anymore.
1889 In particular, "set $1 = 50" should not affect the variable from which
1890 the value was taken, and fast watchpoints should be able to assume that
1891 a value on the value history never changes. */
1892 if (value_lazy (val))
1893 value_fetch_lazy (val);
1894 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1895 from. This is a bit dubious, because then *&$1 does not just return $1
1896 but the current contents of that location. c'est la vie... */
1897 val->modifiable = 0;
1898
1899 /* The value may have already been released, in which case we're adding a
1900 new reference for its entry in the history. That is why we call
1901 release_value_or_incref here instead of release_value. */
1902 release_value_or_incref (val);
1903
1904 /* Here we treat value_history_count as origin-zero
1905 and applying to the value being stored now. */
1906
1907 i = value_history_count % VALUE_HISTORY_CHUNK;
1908 if (i == 0)
1909 {
1910 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1911
1912 newobj->next = value_history_chain;
1913 value_history_chain = newobj;
1914 }
1915
1916 value_history_chain->values[i] = val;
1917
1918 /* Now we regard value_history_count as origin-one
1919 and applying to the value just stored. */
1920
1921 return ++value_history_count;
1922 }
1923
1924 /* Return a copy of the value in the history with sequence number NUM. */
1925
1926 struct value *
1927 access_value_history (int num)
1928 {
1929 struct value_history_chunk *chunk;
1930 int i;
1931 int absnum = num;
1932
1933 if (absnum <= 0)
1934 absnum += value_history_count;
1935
1936 if (absnum <= 0)
1937 {
1938 if (num == 0)
1939 error (_("The history is empty."));
1940 else if (num == 1)
1941 error (_("There is only one value in the history."));
1942 else
1943 error (_("History does not go back to $$%d."), -num);
1944 }
1945 if (absnum > value_history_count)
1946 error (_("History has not yet reached $%d."), absnum);
1947
1948 absnum--;
1949
1950 /* Now absnum is always absolute and origin zero. */
1951
1952 chunk = value_history_chain;
1953 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1954 - absnum / VALUE_HISTORY_CHUNK;
1955 i > 0; i--)
1956 chunk = chunk->next;
1957
1958 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1959 }
1960
1961 static void
1962 show_values (char *num_exp, int from_tty)
1963 {
1964 int i;
1965 struct value *val;
1966 static int num = 1;
1967
1968 if (num_exp)
1969 {
1970 /* "show values +" should print from the stored position.
1971 "show values <exp>" should print around value number <exp>. */
1972 if (num_exp[0] != '+' || num_exp[1] != '\0')
1973 num = parse_and_eval_long (num_exp) - 5;
1974 }
1975 else
1976 {
1977 /* "show values" means print the last 10 values. */
1978 num = value_history_count - 9;
1979 }
1980
1981 if (num <= 0)
1982 num = 1;
1983
1984 for (i = num; i < num + 10 && i <= value_history_count; i++)
1985 {
1986 struct value_print_options opts;
1987
1988 val = access_value_history (i);
1989 printf_filtered (("$%d = "), i);
1990 get_user_print_options (&opts);
1991 value_print (val, gdb_stdout, &opts);
1992 printf_filtered (("\n"));
1993 }
1994
1995 /* The next "show values +" should start after what we just printed. */
1996 num += 10;
1997
1998 /* Hitting just return after this command should do the same thing as
1999 "show values +". If num_exp is null, this is unnecessary, since
2000 "show values +" is not useful after "show values". */
2001 if (from_tty && num_exp)
2002 {
2003 num_exp[0] = '+';
2004 num_exp[1] = '\0';
2005 }
2006 }
2007 \f
2008 enum internalvar_kind
2009 {
2010 /* The internal variable is empty. */
2011 INTERNALVAR_VOID,
2012
2013 /* The value of the internal variable is provided directly as
2014 a GDB value object. */
2015 INTERNALVAR_VALUE,
2016
2017 /* A fresh value is computed via a call-back routine on every
2018 access to the internal variable. */
2019 INTERNALVAR_MAKE_VALUE,
2020
2021 /* The internal variable holds a GDB internal convenience function. */
2022 INTERNALVAR_FUNCTION,
2023
2024 /* The variable holds an integer value. */
2025 INTERNALVAR_INTEGER,
2026
2027 /* The variable holds a GDB-provided string. */
2028 INTERNALVAR_STRING,
2029 };
2030
2031 union internalvar_data
2032 {
2033 /* A value object used with INTERNALVAR_VALUE. */
2034 struct value *value;
2035
2036 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2037 struct
2038 {
2039 /* The functions to call. */
2040 const struct internalvar_funcs *functions;
2041
2042 /* The function's user-data. */
2043 void *data;
2044 } make_value;
2045
2046 /* The internal function used with INTERNALVAR_FUNCTION. */
2047 struct
2048 {
2049 struct internal_function *function;
2050 /* True if this is the canonical name for the function. */
2051 int canonical;
2052 } fn;
2053
2054 /* An integer value used with INTERNALVAR_INTEGER. */
2055 struct
2056 {
2057 /* If type is non-NULL, it will be used as the type to generate
2058 a value for this internal variable. If type is NULL, a default
2059 integer type for the architecture is used. */
2060 struct type *type;
2061 LONGEST val;
2062 } integer;
2063
2064 /* A string value used with INTERNALVAR_STRING. */
2065 char *string;
2066 };
2067
2068 /* Internal variables. These are variables within the debugger
2069 that hold values assigned by debugger commands.
2070 The user refers to them with a '$' prefix
2071 that does not appear in the variable names stored internally. */
2072
2073 struct internalvar
2074 {
2075 struct internalvar *next;
2076 char *name;
2077
2078 /* We support various different kinds of content of an internal variable.
2079 enum internalvar_kind specifies the kind, and union internalvar_data
2080 provides the data associated with this particular kind. */
2081
2082 enum internalvar_kind kind;
2083
2084 union internalvar_data u;
2085 };
2086
2087 static struct internalvar *internalvars;
2088
2089 /* If the variable does not already exist create it and give it the
2090 value given. If no value is given then the default is zero. */
2091 static void
2092 init_if_undefined_command (char* args, int from_tty)
2093 {
2094 struct internalvar* intvar;
2095
2096 /* Parse the expression - this is taken from set_command(). */
2097 struct expression *expr = parse_expression (args);
2098 register struct cleanup *old_chain =
2099 make_cleanup (free_current_contents, &expr);
2100
2101 /* Validate the expression.
2102 Was the expression an assignment?
2103 Or even an expression at all? */
2104 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2105 error (_("Init-if-undefined requires an assignment expression."));
2106
2107 /* Extract the variable from the parsed expression.
2108 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2109 if (expr->elts[1].opcode != OP_INTERNALVAR)
2110 error (_("The first parameter to init-if-undefined "
2111 "should be a GDB variable."));
2112 intvar = expr->elts[2].internalvar;
2113
2114 /* Only evaluate the expression if the lvalue is void.
2115 This may still fail if the expresssion is invalid. */
2116 if (intvar->kind == INTERNALVAR_VOID)
2117 evaluate_expression (expr);
2118
2119 do_cleanups (old_chain);
2120 }
2121
2122
2123 /* Look up an internal variable with name NAME. NAME should not
2124 normally include a dollar sign.
2125
2126 If the specified internal variable does not exist,
2127 the return value is NULL. */
2128
2129 struct internalvar *
2130 lookup_only_internalvar (const char *name)
2131 {
2132 struct internalvar *var;
2133
2134 for (var = internalvars; var; var = var->next)
2135 if (strcmp (var->name, name) == 0)
2136 return var;
2137
2138 return NULL;
2139 }
2140
2141 /* Complete NAME by comparing it to the names of internal variables.
2142 Returns a vector of newly allocated strings, or NULL if no matches
2143 were found. */
2144
2145 VEC (char_ptr) *
2146 complete_internalvar (const char *name)
2147 {
2148 VEC (char_ptr) *result = NULL;
2149 struct internalvar *var;
2150 int len;
2151
2152 len = strlen (name);
2153
2154 for (var = internalvars; var; var = var->next)
2155 if (strncmp (var->name, name, len) == 0)
2156 {
2157 char *r = xstrdup (var->name);
2158
2159 VEC_safe_push (char_ptr, result, r);
2160 }
2161
2162 return result;
2163 }
2164
2165 /* Create an internal variable with name NAME and with a void value.
2166 NAME should not normally include a dollar sign. */
2167
2168 struct internalvar *
2169 create_internalvar (const char *name)
2170 {
2171 struct internalvar *var = XNEW (struct internalvar);
2172
2173 var->name = concat (name, (char *)NULL);
2174 var->kind = INTERNALVAR_VOID;
2175 var->next = internalvars;
2176 internalvars = var;
2177 return var;
2178 }
2179
2180 /* Create an internal variable with name NAME and register FUN as the
2181 function that value_of_internalvar uses to create a value whenever
2182 this variable is referenced. NAME should not normally include a
2183 dollar sign. DATA is passed uninterpreted to FUN when it is
2184 called. CLEANUP, if not NULL, is called when the internal variable
2185 is destroyed. It is passed DATA as its only argument. */
2186
2187 struct internalvar *
2188 create_internalvar_type_lazy (const char *name,
2189 const struct internalvar_funcs *funcs,
2190 void *data)
2191 {
2192 struct internalvar *var = create_internalvar (name);
2193
2194 var->kind = INTERNALVAR_MAKE_VALUE;
2195 var->u.make_value.functions = funcs;
2196 var->u.make_value.data = data;
2197 return var;
2198 }
2199
2200 /* See documentation in value.h. */
2201
2202 int
2203 compile_internalvar_to_ax (struct internalvar *var,
2204 struct agent_expr *expr,
2205 struct axs_value *value)
2206 {
2207 if (var->kind != INTERNALVAR_MAKE_VALUE
2208 || var->u.make_value.functions->compile_to_ax == NULL)
2209 return 0;
2210
2211 var->u.make_value.functions->compile_to_ax (var, expr, value,
2212 var->u.make_value.data);
2213 return 1;
2214 }
2215
2216 /* Look up an internal variable with name NAME. NAME should not
2217 normally include a dollar sign.
2218
2219 If the specified internal variable does not exist,
2220 one is created, with a void value. */
2221
2222 struct internalvar *
2223 lookup_internalvar (const char *name)
2224 {
2225 struct internalvar *var;
2226
2227 var = lookup_only_internalvar (name);
2228 if (var)
2229 return var;
2230
2231 return create_internalvar (name);
2232 }
2233
2234 /* Return current value of internal variable VAR. For variables that
2235 are not inherently typed, use a value type appropriate for GDBARCH. */
2236
2237 struct value *
2238 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2239 {
2240 struct value *val;
2241 struct trace_state_variable *tsv;
2242
2243 /* If there is a trace state variable of the same name, assume that
2244 is what we really want to see. */
2245 tsv = find_trace_state_variable (var->name);
2246 if (tsv)
2247 {
2248 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2249 &(tsv->value));
2250 if (tsv->value_known)
2251 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2252 tsv->value);
2253 else
2254 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2255 return val;
2256 }
2257
2258 switch (var->kind)
2259 {
2260 case INTERNALVAR_VOID:
2261 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2262 break;
2263
2264 case INTERNALVAR_FUNCTION:
2265 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2266 break;
2267
2268 case INTERNALVAR_INTEGER:
2269 if (!var->u.integer.type)
2270 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2271 var->u.integer.val);
2272 else
2273 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2274 break;
2275
2276 case INTERNALVAR_STRING:
2277 val = value_cstring (var->u.string, strlen (var->u.string),
2278 builtin_type (gdbarch)->builtin_char);
2279 break;
2280
2281 case INTERNALVAR_VALUE:
2282 val = value_copy (var->u.value);
2283 if (value_lazy (val))
2284 value_fetch_lazy (val);
2285 break;
2286
2287 case INTERNALVAR_MAKE_VALUE:
2288 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2289 var->u.make_value.data);
2290 break;
2291
2292 default:
2293 internal_error (__FILE__, __LINE__, _("bad kind"));
2294 }
2295
2296 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2297 on this value go back to affect the original internal variable.
2298
2299 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2300 no underlying modifyable state in the internal variable.
2301
2302 Likewise, if the variable's value is a computed lvalue, we want
2303 references to it to produce another computed lvalue, where
2304 references and assignments actually operate through the
2305 computed value's functions.
2306
2307 This means that internal variables with computed values
2308 behave a little differently from other internal variables:
2309 assignments to them don't just replace the previous value
2310 altogether. At the moment, this seems like the behavior we
2311 want. */
2312
2313 if (var->kind != INTERNALVAR_MAKE_VALUE
2314 && val->lval != lval_computed)
2315 {
2316 VALUE_LVAL (val) = lval_internalvar;
2317 VALUE_INTERNALVAR (val) = var;
2318 }
2319
2320 return val;
2321 }
2322
2323 int
2324 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2325 {
2326 if (var->kind == INTERNALVAR_INTEGER)
2327 {
2328 *result = var->u.integer.val;
2329 return 1;
2330 }
2331
2332 if (var->kind == INTERNALVAR_VALUE)
2333 {
2334 struct type *type = check_typedef (value_type (var->u.value));
2335
2336 if (TYPE_CODE (type) == TYPE_CODE_INT)
2337 {
2338 *result = value_as_long (var->u.value);
2339 return 1;
2340 }
2341 }
2342
2343 return 0;
2344 }
2345
2346 static int
2347 get_internalvar_function (struct internalvar *var,
2348 struct internal_function **result)
2349 {
2350 switch (var->kind)
2351 {
2352 case INTERNALVAR_FUNCTION:
2353 *result = var->u.fn.function;
2354 return 1;
2355
2356 default:
2357 return 0;
2358 }
2359 }
2360
2361 void
2362 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2363 int bitsize, struct value *newval)
2364 {
2365 gdb_byte *addr;
2366 struct gdbarch *arch;
2367 int unit_size;
2368
2369 switch (var->kind)
2370 {
2371 case INTERNALVAR_VALUE:
2372 addr = value_contents_writeable (var->u.value);
2373 arch = get_value_arch (var->u.value);
2374 unit_size = gdbarch_addressable_memory_unit_size (arch);
2375
2376 if (bitsize)
2377 modify_field (value_type (var->u.value), addr + offset,
2378 value_as_long (newval), bitpos, bitsize);
2379 else
2380 memcpy (addr + offset * unit_size, value_contents (newval),
2381 TYPE_LENGTH (value_type (newval)));
2382 break;
2383
2384 default:
2385 /* We can never get a component of any other kind. */
2386 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2387 }
2388 }
2389
2390 void
2391 set_internalvar (struct internalvar *var, struct value *val)
2392 {
2393 enum internalvar_kind new_kind;
2394 union internalvar_data new_data = { 0 };
2395
2396 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2397 error (_("Cannot overwrite convenience function %s"), var->name);
2398
2399 /* Prepare new contents. */
2400 switch (TYPE_CODE (check_typedef (value_type (val))))
2401 {
2402 case TYPE_CODE_VOID:
2403 new_kind = INTERNALVAR_VOID;
2404 break;
2405
2406 case TYPE_CODE_INTERNAL_FUNCTION:
2407 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2408 new_kind = INTERNALVAR_FUNCTION;
2409 get_internalvar_function (VALUE_INTERNALVAR (val),
2410 &new_data.fn.function);
2411 /* Copies created here are never canonical. */
2412 break;
2413
2414 default:
2415 new_kind = INTERNALVAR_VALUE;
2416 new_data.value = value_copy (val);
2417 new_data.value->modifiable = 1;
2418
2419 /* Force the value to be fetched from the target now, to avoid problems
2420 later when this internalvar is referenced and the target is gone or
2421 has changed. */
2422 if (value_lazy (new_data.value))
2423 value_fetch_lazy (new_data.value);
2424
2425 /* Release the value from the value chain to prevent it from being
2426 deleted by free_all_values. From here on this function should not
2427 call error () until new_data is installed into the var->u to avoid
2428 leaking memory. */
2429 release_value (new_data.value);
2430 break;
2431 }
2432
2433 /* Clean up old contents. */
2434 clear_internalvar (var);
2435
2436 /* Switch over. */
2437 var->kind = new_kind;
2438 var->u = new_data;
2439 /* End code which must not call error(). */
2440 }
2441
2442 void
2443 set_internalvar_integer (struct internalvar *var, LONGEST l)
2444 {
2445 /* Clean up old contents. */
2446 clear_internalvar (var);
2447
2448 var->kind = INTERNALVAR_INTEGER;
2449 var->u.integer.type = NULL;
2450 var->u.integer.val = l;
2451 }
2452
2453 void
2454 set_internalvar_string (struct internalvar *var, const char *string)
2455 {
2456 /* Clean up old contents. */
2457 clear_internalvar (var);
2458
2459 var->kind = INTERNALVAR_STRING;
2460 var->u.string = xstrdup (string);
2461 }
2462
2463 static void
2464 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2465 {
2466 /* Clean up old contents. */
2467 clear_internalvar (var);
2468
2469 var->kind = INTERNALVAR_FUNCTION;
2470 var->u.fn.function = f;
2471 var->u.fn.canonical = 1;
2472 /* Variables installed here are always the canonical version. */
2473 }
2474
2475 void
2476 clear_internalvar (struct internalvar *var)
2477 {
2478 /* Clean up old contents. */
2479 switch (var->kind)
2480 {
2481 case INTERNALVAR_VALUE:
2482 value_free (var->u.value);
2483 break;
2484
2485 case INTERNALVAR_STRING:
2486 xfree (var->u.string);
2487 break;
2488
2489 case INTERNALVAR_MAKE_VALUE:
2490 if (var->u.make_value.functions->destroy != NULL)
2491 var->u.make_value.functions->destroy (var->u.make_value.data);
2492 break;
2493
2494 default:
2495 break;
2496 }
2497
2498 /* Reset to void kind. */
2499 var->kind = INTERNALVAR_VOID;
2500 }
2501
2502 char *
2503 internalvar_name (const struct internalvar *var)
2504 {
2505 return var->name;
2506 }
2507
2508 static struct internal_function *
2509 create_internal_function (const char *name,
2510 internal_function_fn handler, void *cookie)
2511 {
2512 struct internal_function *ifn = XNEW (struct internal_function);
2513
2514 ifn->name = xstrdup (name);
2515 ifn->handler = handler;
2516 ifn->cookie = cookie;
2517 return ifn;
2518 }
2519
2520 char *
2521 value_internal_function_name (struct value *val)
2522 {
2523 struct internal_function *ifn;
2524 int result;
2525
2526 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2527 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2528 gdb_assert (result);
2529
2530 return ifn->name;
2531 }
2532
2533 struct value *
2534 call_internal_function (struct gdbarch *gdbarch,
2535 const struct language_defn *language,
2536 struct value *func, int argc, struct value **argv)
2537 {
2538 struct internal_function *ifn;
2539 int result;
2540
2541 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2542 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2543 gdb_assert (result);
2544
2545 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2546 }
2547
2548 /* The 'function' command. This does nothing -- it is just a
2549 placeholder to let "help function NAME" work. This is also used as
2550 the implementation of the sub-command that is created when
2551 registering an internal function. */
2552 static void
2553 function_command (char *command, int from_tty)
2554 {
2555 /* Do nothing. */
2556 }
2557
2558 /* Clean up if an internal function's command is destroyed. */
2559 static void
2560 function_destroyer (struct cmd_list_element *self, void *ignore)
2561 {
2562 xfree ((char *) self->name);
2563 xfree ((char *) self->doc);
2564 }
2565
2566 /* Add a new internal function. NAME is the name of the function; DOC
2567 is a documentation string describing the function. HANDLER is
2568 called when the function is invoked. COOKIE is an arbitrary
2569 pointer which is passed to HANDLER and is intended for "user
2570 data". */
2571 void
2572 add_internal_function (const char *name, const char *doc,
2573 internal_function_fn handler, void *cookie)
2574 {
2575 struct cmd_list_element *cmd;
2576 struct internal_function *ifn;
2577 struct internalvar *var = lookup_internalvar (name);
2578
2579 ifn = create_internal_function (name, handler, cookie);
2580 set_internalvar_function (var, ifn);
2581
2582 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2583 &functionlist);
2584 cmd->destroyer = function_destroyer;
2585 }
2586
2587 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2588 prevent cycles / duplicates. */
2589
2590 void
2591 preserve_one_value (struct value *value, struct objfile *objfile,
2592 htab_t copied_types)
2593 {
2594 if (TYPE_OBJFILE (value->type) == objfile)
2595 value->type = copy_type_recursive (objfile, value->type, copied_types);
2596
2597 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2598 value->enclosing_type = copy_type_recursive (objfile,
2599 value->enclosing_type,
2600 copied_types);
2601 }
2602
2603 /* Likewise for internal variable VAR. */
2604
2605 static void
2606 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2607 htab_t copied_types)
2608 {
2609 switch (var->kind)
2610 {
2611 case INTERNALVAR_INTEGER:
2612 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2613 var->u.integer.type
2614 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2615 break;
2616
2617 case INTERNALVAR_VALUE:
2618 preserve_one_value (var->u.value, objfile, copied_types);
2619 break;
2620 }
2621 }
2622
2623 /* Update the internal variables and value history when OBJFILE is
2624 discarded; we must copy the types out of the objfile. New global types
2625 will be created for every convenience variable which currently points to
2626 this objfile's types, and the convenience variables will be adjusted to
2627 use the new global types. */
2628
2629 void
2630 preserve_values (struct objfile *objfile)
2631 {
2632 htab_t copied_types;
2633 struct value_history_chunk *cur;
2634 struct internalvar *var;
2635 int i;
2636
2637 /* Create the hash table. We allocate on the objfile's obstack, since
2638 it is soon to be deleted. */
2639 copied_types = create_copied_types_hash (objfile);
2640
2641 for (cur = value_history_chain; cur; cur = cur->next)
2642 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2643 if (cur->values[i])
2644 preserve_one_value (cur->values[i], objfile, copied_types);
2645
2646 for (var = internalvars; var; var = var->next)
2647 preserve_one_internalvar (var, objfile, copied_types);
2648
2649 preserve_ext_lang_values (objfile, copied_types);
2650
2651 htab_delete (copied_types);
2652 }
2653
2654 static void
2655 show_convenience (char *ignore, int from_tty)
2656 {
2657 struct gdbarch *gdbarch = get_current_arch ();
2658 struct internalvar *var;
2659 int varseen = 0;
2660 struct value_print_options opts;
2661
2662 get_user_print_options (&opts);
2663 for (var = internalvars; var; var = var->next)
2664 {
2665
2666 if (!varseen)
2667 {
2668 varseen = 1;
2669 }
2670 printf_filtered (("$%s = "), var->name);
2671
2672 TRY
2673 {
2674 struct value *val;
2675
2676 val = value_of_internalvar (gdbarch, var);
2677 value_print (val, gdb_stdout, &opts);
2678 }
2679 CATCH (ex, RETURN_MASK_ERROR)
2680 {
2681 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2682 }
2683 END_CATCH
2684
2685 printf_filtered (("\n"));
2686 }
2687 if (!varseen)
2688 {
2689 /* This text does not mention convenience functions on purpose.
2690 The user can't create them except via Python, and if Python support
2691 is installed this message will never be printed ($_streq will
2692 exist). */
2693 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2694 "Convenience variables have "
2695 "names starting with \"$\";\n"
2696 "use \"set\" as in \"set "
2697 "$foo = 5\" to define them.\n"));
2698 }
2699 }
2700 \f
2701 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2702
2703 struct value *
2704 value_of_xmethod (struct xmethod_worker *worker)
2705 {
2706 if (worker->value == NULL)
2707 {
2708 struct value *v;
2709
2710 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2711 v->lval = lval_xcallable;
2712 v->location.xm_worker = worker;
2713 v->modifiable = 0;
2714 worker->value = v;
2715 }
2716
2717 return worker->value;
2718 }
2719
2720 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2721
2722 struct type *
2723 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2724 {
2725 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2726 && method->lval == lval_xcallable && argc > 0);
2727
2728 return get_xmethod_result_type (method->location.xm_worker,
2729 argv[0], argv + 1, argc - 1);
2730 }
2731
2732 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2733
2734 struct value *
2735 call_xmethod (struct value *method, int argc, struct value **argv)
2736 {
2737 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2738 && method->lval == lval_xcallable && argc > 0);
2739
2740 return invoke_xmethod (method->location.xm_worker,
2741 argv[0], argv + 1, argc - 1);
2742 }
2743 \f
2744 /* Extract a value as a C number (either long or double).
2745 Knows how to convert fixed values to double, or
2746 floating values to long.
2747 Does not deallocate the value. */
2748
2749 LONGEST
2750 value_as_long (struct value *val)
2751 {
2752 /* This coerces arrays and functions, which is necessary (e.g.
2753 in disassemble_command). It also dereferences references, which
2754 I suspect is the most logical thing to do. */
2755 val = coerce_array (val);
2756 return unpack_long (value_type (val), value_contents (val));
2757 }
2758
2759 DOUBLEST
2760 value_as_double (struct value *val)
2761 {
2762 DOUBLEST foo;
2763 int inv;
2764
2765 foo = unpack_double (value_type (val), value_contents (val), &inv);
2766 if (inv)
2767 error (_("Invalid floating value found in program."));
2768 return foo;
2769 }
2770
2771 /* Extract a value as a C pointer. Does not deallocate the value.
2772 Note that val's type may not actually be a pointer; value_as_long
2773 handles all the cases. */
2774 CORE_ADDR
2775 value_as_address (struct value *val)
2776 {
2777 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2778
2779 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2780 whether we want this to be true eventually. */
2781 #if 0
2782 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2783 non-address (e.g. argument to "signal", "info break", etc.), or
2784 for pointers to char, in which the low bits *are* significant. */
2785 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2786 #else
2787
2788 /* There are several targets (IA-64, PowerPC, and others) which
2789 don't represent pointers to functions as simply the address of
2790 the function's entry point. For example, on the IA-64, a
2791 function pointer points to a two-word descriptor, generated by
2792 the linker, which contains the function's entry point, and the
2793 value the IA-64 "global pointer" register should have --- to
2794 support position-independent code. The linker generates
2795 descriptors only for those functions whose addresses are taken.
2796
2797 On such targets, it's difficult for GDB to convert an arbitrary
2798 function address into a function pointer; it has to either find
2799 an existing descriptor for that function, or call malloc and
2800 build its own. On some targets, it is impossible for GDB to
2801 build a descriptor at all: the descriptor must contain a jump
2802 instruction; data memory cannot be executed; and code memory
2803 cannot be modified.
2804
2805 Upon entry to this function, if VAL is a value of type `function'
2806 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2807 value_address (val) is the address of the function. This is what
2808 you'll get if you evaluate an expression like `main'. The call
2809 to COERCE_ARRAY below actually does all the usual unary
2810 conversions, which includes converting values of type `function'
2811 to `pointer to function'. This is the challenging conversion
2812 discussed above. Then, `unpack_long' will convert that pointer
2813 back into an address.
2814
2815 So, suppose the user types `disassemble foo' on an architecture
2816 with a strange function pointer representation, on which GDB
2817 cannot build its own descriptors, and suppose further that `foo'
2818 has no linker-built descriptor. The address->pointer conversion
2819 will signal an error and prevent the command from running, even
2820 though the next step would have been to convert the pointer
2821 directly back into the same address.
2822
2823 The following shortcut avoids this whole mess. If VAL is a
2824 function, just return its address directly. */
2825 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2826 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2827 return value_address (val);
2828
2829 val = coerce_array (val);
2830
2831 /* Some architectures (e.g. Harvard), map instruction and data
2832 addresses onto a single large unified address space. For
2833 instance: An architecture may consider a large integer in the
2834 range 0x10000000 .. 0x1000ffff to already represent a data
2835 addresses (hence not need a pointer to address conversion) while
2836 a small integer would still need to be converted integer to
2837 pointer to address. Just assume such architectures handle all
2838 integer conversions in a single function. */
2839
2840 /* JimB writes:
2841
2842 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2843 must admonish GDB hackers to make sure its behavior matches the
2844 compiler's, whenever possible.
2845
2846 In general, I think GDB should evaluate expressions the same way
2847 the compiler does. When the user copies an expression out of
2848 their source code and hands it to a `print' command, they should
2849 get the same value the compiler would have computed. Any
2850 deviation from this rule can cause major confusion and annoyance,
2851 and needs to be justified carefully. In other words, GDB doesn't
2852 really have the freedom to do these conversions in clever and
2853 useful ways.
2854
2855 AndrewC pointed out that users aren't complaining about how GDB
2856 casts integers to pointers; they are complaining that they can't
2857 take an address from a disassembly listing and give it to `x/i'.
2858 This is certainly important.
2859
2860 Adding an architecture method like integer_to_address() certainly
2861 makes it possible for GDB to "get it right" in all circumstances
2862 --- the target has complete control over how things get done, so
2863 people can Do The Right Thing for their target without breaking
2864 anyone else. The standard doesn't specify how integers get
2865 converted to pointers; usually, the ABI doesn't either, but
2866 ABI-specific code is a more reasonable place to handle it. */
2867
2868 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2869 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2870 && gdbarch_integer_to_address_p (gdbarch))
2871 return gdbarch_integer_to_address (gdbarch, value_type (val),
2872 value_contents (val));
2873
2874 return unpack_long (value_type (val), value_contents (val));
2875 #endif
2876 }
2877 \f
2878 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2879 as a long, or as a double, assuming the raw data is described
2880 by type TYPE. Knows how to convert different sizes of values
2881 and can convert between fixed and floating point. We don't assume
2882 any alignment for the raw data. Return value is in host byte order.
2883
2884 If you want functions and arrays to be coerced to pointers, and
2885 references to be dereferenced, call value_as_long() instead.
2886
2887 C++: It is assumed that the front-end has taken care of
2888 all matters concerning pointers to members. A pointer
2889 to member which reaches here is considered to be equivalent
2890 to an INT (or some size). After all, it is only an offset. */
2891
2892 LONGEST
2893 unpack_long (struct type *type, const gdb_byte *valaddr)
2894 {
2895 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2896 enum type_code code = TYPE_CODE (type);
2897 int len = TYPE_LENGTH (type);
2898 int nosign = TYPE_UNSIGNED (type);
2899
2900 switch (code)
2901 {
2902 case TYPE_CODE_TYPEDEF:
2903 return unpack_long (check_typedef (type), valaddr);
2904 case TYPE_CODE_ENUM:
2905 case TYPE_CODE_FLAGS:
2906 case TYPE_CODE_BOOL:
2907 case TYPE_CODE_INT:
2908 case TYPE_CODE_CHAR:
2909 case TYPE_CODE_RANGE:
2910 case TYPE_CODE_MEMBERPTR:
2911 if (nosign)
2912 return extract_unsigned_integer (valaddr, len, byte_order);
2913 else
2914 return extract_signed_integer (valaddr, len, byte_order);
2915
2916 case TYPE_CODE_FLT:
2917 return extract_typed_floating (valaddr, type);
2918
2919 case TYPE_CODE_DECFLOAT:
2920 /* libdecnumber has a function to convert from decimal to integer, but
2921 it doesn't work when the decimal number has a fractional part. */
2922 return decimal_to_doublest (valaddr, len, byte_order);
2923
2924 case TYPE_CODE_PTR:
2925 case TYPE_CODE_REF:
2926 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2927 whether we want this to be true eventually. */
2928 return extract_typed_address (valaddr, type);
2929
2930 default:
2931 error (_("Value can't be converted to integer."));
2932 }
2933 return 0; /* Placate lint. */
2934 }
2935
2936 /* Return a double value from the specified type and address.
2937 INVP points to an int which is set to 0 for valid value,
2938 1 for invalid value (bad float format). In either case,
2939 the returned double is OK to use. Argument is in target
2940 format, result is in host format. */
2941
2942 DOUBLEST
2943 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2944 {
2945 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2946 enum type_code code;
2947 int len;
2948 int nosign;
2949
2950 *invp = 0; /* Assume valid. */
2951 type = check_typedef (type);
2952 code = TYPE_CODE (type);
2953 len = TYPE_LENGTH (type);
2954 nosign = TYPE_UNSIGNED (type);
2955 if (code == TYPE_CODE_FLT)
2956 {
2957 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2958 floating-point value was valid (using the macro
2959 INVALID_FLOAT). That test/macro have been removed.
2960
2961 It turns out that only the VAX defined this macro and then
2962 only in a non-portable way. Fixing the portability problem
2963 wouldn't help since the VAX floating-point code is also badly
2964 bit-rotten. The target needs to add definitions for the
2965 methods gdbarch_float_format and gdbarch_double_format - these
2966 exactly describe the target floating-point format. The
2967 problem here is that the corresponding floatformat_vax_f and
2968 floatformat_vax_d values these methods should be set to are
2969 also not defined either. Oops!
2970
2971 Hopefully someone will add both the missing floatformat
2972 definitions and the new cases for floatformat_is_valid (). */
2973
2974 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2975 {
2976 *invp = 1;
2977 return 0.0;
2978 }
2979
2980 return extract_typed_floating (valaddr, type);
2981 }
2982 else if (code == TYPE_CODE_DECFLOAT)
2983 return decimal_to_doublest (valaddr, len, byte_order);
2984 else if (nosign)
2985 {
2986 /* Unsigned -- be sure we compensate for signed LONGEST. */
2987 return (ULONGEST) unpack_long (type, valaddr);
2988 }
2989 else
2990 {
2991 /* Signed -- we are OK with unpack_long. */
2992 return unpack_long (type, valaddr);
2993 }
2994 }
2995
2996 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2997 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2998 We don't assume any alignment for the raw data. Return value is in
2999 host byte order.
3000
3001 If you want functions and arrays to be coerced to pointers, and
3002 references to be dereferenced, call value_as_address() instead.
3003
3004 C++: It is assumed that the front-end has taken care of
3005 all matters concerning pointers to members. A pointer
3006 to member which reaches here is considered to be equivalent
3007 to an INT (or some size). After all, it is only an offset. */
3008
3009 CORE_ADDR
3010 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3011 {
3012 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3013 whether we want this to be true eventually. */
3014 return unpack_long (type, valaddr);
3015 }
3016
3017 \f
3018 /* Get the value of the FIELDNO'th field (which must be static) of
3019 TYPE. */
3020
3021 struct value *
3022 value_static_field (struct type *type, int fieldno)
3023 {
3024 struct value *retval;
3025
3026 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3027 {
3028 case FIELD_LOC_KIND_PHYSADDR:
3029 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3030 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3031 break;
3032 case FIELD_LOC_KIND_PHYSNAME:
3033 {
3034 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3035 /* TYPE_FIELD_NAME (type, fieldno); */
3036 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3037
3038 if (sym.symbol == NULL)
3039 {
3040 /* With some compilers, e.g. HP aCC, static data members are
3041 reported as non-debuggable symbols. */
3042 struct bound_minimal_symbol msym
3043 = lookup_minimal_symbol (phys_name, NULL, NULL);
3044
3045 if (!msym.minsym)
3046 return allocate_optimized_out_value (type);
3047 else
3048 {
3049 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3050 BMSYMBOL_VALUE_ADDRESS (msym));
3051 }
3052 }
3053 else
3054 retval = value_of_variable (sym.symbol, sym.block);
3055 break;
3056 }
3057 default:
3058 gdb_assert_not_reached ("unexpected field location kind");
3059 }
3060
3061 return retval;
3062 }
3063
3064 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3065 You have to be careful here, since the size of the data area for the value
3066 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3067 than the old enclosing type, you have to allocate more space for the
3068 data. */
3069
3070 void
3071 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3072 {
3073 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3074 {
3075 check_type_length_before_alloc (new_encl_type);
3076 val->contents
3077 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3078 }
3079
3080 val->enclosing_type = new_encl_type;
3081 }
3082
3083 /* Given a value ARG1 (offset by OFFSET bytes)
3084 of a struct or union type ARG_TYPE,
3085 extract and return the value of one of its (non-static) fields.
3086 FIELDNO says which field. */
3087
3088 struct value *
3089 value_primitive_field (struct value *arg1, int offset,
3090 int fieldno, struct type *arg_type)
3091 {
3092 struct value *v;
3093 struct type *type;
3094 struct gdbarch *arch = get_value_arch (arg1);
3095 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3096
3097 arg_type = check_typedef (arg_type);
3098 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3099
3100 /* Call check_typedef on our type to make sure that, if TYPE
3101 is a TYPE_CODE_TYPEDEF, its length is set to the length
3102 of the target type instead of zero. However, we do not
3103 replace the typedef type by the target type, because we want
3104 to keep the typedef in order to be able to print the type
3105 description correctly. */
3106 check_typedef (type);
3107
3108 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3109 {
3110 /* Handle packed fields.
3111
3112 Create a new value for the bitfield, with bitpos and bitsize
3113 set. If possible, arrange offset and bitpos so that we can
3114 do a single aligned read of the size of the containing type.
3115 Otherwise, adjust offset to the byte containing the first
3116 bit. Assume that the address, offset, and embedded offset
3117 are sufficiently aligned. */
3118
3119 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3120 int container_bitsize = TYPE_LENGTH (type) * 8;
3121
3122 v = allocate_value_lazy (type);
3123 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3124 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3125 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3126 v->bitpos = bitpos % container_bitsize;
3127 else
3128 v->bitpos = bitpos % 8;
3129 v->offset = (value_embedded_offset (arg1)
3130 + offset
3131 + (bitpos - v->bitpos) / 8);
3132 set_value_parent (v, arg1);
3133 if (!value_lazy (arg1))
3134 value_fetch_lazy (v);
3135 }
3136 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3137 {
3138 /* This field is actually a base subobject, so preserve the
3139 entire object's contents for later references to virtual
3140 bases, etc. */
3141 int boffset;
3142
3143 /* Lazy register values with offsets are not supported. */
3144 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3145 value_fetch_lazy (arg1);
3146
3147 /* We special case virtual inheritance here because this
3148 requires access to the contents, which we would rather avoid
3149 for references to ordinary fields of unavailable values. */
3150 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3151 boffset = baseclass_offset (arg_type, fieldno,
3152 value_contents (arg1),
3153 value_embedded_offset (arg1),
3154 value_address (arg1),
3155 arg1);
3156 else
3157 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3158
3159 if (value_lazy (arg1))
3160 v = allocate_value_lazy (value_enclosing_type (arg1));
3161 else
3162 {
3163 v = allocate_value (value_enclosing_type (arg1));
3164 value_contents_copy_raw (v, 0, arg1, 0,
3165 TYPE_LENGTH (value_enclosing_type (arg1)));
3166 }
3167 v->type = type;
3168 v->offset = value_offset (arg1);
3169 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3170 }
3171 else
3172 {
3173 /* Plain old data member */
3174 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3175 / (HOST_CHAR_BIT * unit_size));
3176
3177 /* Lazy register values with offsets are not supported. */
3178 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3179 value_fetch_lazy (arg1);
3180
3181 if (value_lazy (arg1))
3182 v = allocate_value_lazy (type);
3183 else
3184 {
3185 v = allocate_value (type);
3186 value_contents_copy_raw (v, value_embedded_offset (v),
3187 arg1, value_embedded_offset (arg1) + offset,
3188 type_length_units (type));
3189 }
3190 v->offset = (value_offset (arg1) + offset
3191 + value_embedded_offset (arg1));
3192 }
3193 set_value_component_location (v, arg1);
3194 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3195 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3196 return v;
3197 }
3198
3199 /* Given a value ARG1 of a struct or union type,
3200 extract and return the value of one of its (non-static) fields.
3201 FIELDNO says which field. */
3202
3203 struct value *
3204 value_field (struct value *arg1, int fieldno)
3205 {
3206 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3207 }
3208
3209 /* Return a non-virtual function as a value.
3210 F is the list of member functions which contains the desired method.
3211 J is an index into F which provides the desired method.
3212
3213 We only use the symbol for its address, so be happy with either a
3214 full symbol or a minimal symbol. */
3215
3216 struct value *
3217 value_fn_field (struct value **arg1p, struct fn_field *f,
3218 int j, struct type *type,
3219 int offset)
3220 {
3221 struct value *v;
3222 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3223 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3224 struct symbol *sym;
3225 struct bound_minimal_symbol msym;
3226
3227 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3228 if (sym != NULL)
3229 {
3230 memset (&msym, 0, sizeof (msym));
3231 }
3232 else
3233 {
3234 gdb_assert (sym == NULL);
3235 msym = lookup_bound_minimal_symbol (physname);
3236 if (msym.minsym == NULL)
3237 return NULL;
3238 }
3239
3240 v = allocate_value (ftype);
3241 if (sym)
3242 {
3243 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3244 }
3245 else
3246 {
3247 /* The minimal symbol might point to a function descriptor;
3248 resolve it to the actual code address instead. */
3249 struct objfile *objfile = msym.objfile;
3250 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3251
3252 set_value_address (v,
3253 gdbarch_convert_from_func_ptr_addr
3254 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3255 }
3256
3257 if (arg1p)
3258 {
3259 if (type != value_type (*arg1p))
3260 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3261 value_addr (*arg1p)));
3262
3263 /* Move the `this' pointer according to the offset.
3264 VALUE_OFFSET (*arg1p) += offset; */
3265 }
3266
3267 return v;
3268 }
3269
3270 \f
3271
3272 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3273 VALADDR, and store the result in *RESULT.
3274 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3275
3276 Extracting bits depends on endianness of the machine. Compute the
3277 number of least significant bits to discard. For big endian machines,
3278 we compute the total number of bits in the anonymous object, subtract
3279 off the bit count from the MSB of the object to the MSB of the
3280 bitfield, then the size of the bitfield, which leaves the LSB discard
3281 count. For little endian machines, the discard count is simply the
3282 number of bits from the LSB of the anonymous object to the LSB of the
3283 bitfield.
3284
3285 If the field is signed, we also do sign extension. */
3286
3287 static LONGEST
3288 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3289 int bitpos, int bitsize)
3290 {
3291 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3292 ULONGEST val;
3293 ULONGEST valmask;
3294 int lsbcount;
3295 int bytes_read;
3296 int read_offset;
3297
3298 /* Read the minimum number of bytes required; there may not be
3299 enough bytes to read an entire ULONGEST. */
3300 field_type = check_typedef (field_type);
3301 if (bitsize)
3302 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3303 else
3304 bytes_read = TYPE_LENGTH (field_type);
3305
3306 read_offset = bitpos / 8;
3307
3308 val = extract_unsigned_integer (valaddr + read_offset,
3309 bytes_read, byte_order);
3310
3311 /* Extract bits. See comment above. */
3312
3313 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3314 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3315 else
3316 lsbcount = (bitpos % 8);
3317 val >>= lsbcount;
3318
3319 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3320 If the field is signed, and is negative, then sign extend. */
3321
3322 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3323 {
3324 valmask = (((ULONGEST) 1) << bitsize) - 1;
3325 val &= valmask;
3326 if (!TYPE_UNSIGNED (field_type))
3327 {
3328 if (val & (valmask ^ (valmask >> 1)))
3329 {
3330 val |= ~valmask;
3331 }
3332 }
3333 }
3334
3335 return val;
3336 }
3337
3338 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3339 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3340 ORIGINAL_VALUE, which must not be NULL. See
3341 unpack_value_bits_as_long for more details. */
3342
3343 int
3344 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3345 int embedded_offset, int fieldno,
3346 const struct value *val, LONGEST *result)
3347 {
3348 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3349 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3350 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3351 int bit_offset;
3352
3353 gdb_assert (val != NULL);
3354
3355 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3356 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3357 || !value_bits_available (val, bit_offset, bitsize))
3358 return 0;
3359
3360 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3361 bitpos, bitsize);
3362 return 1;
3363 }
3364
3365 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3366 object at VALADDR. See unpack_bits_as_long for more details. */
3367
3368 LONGEST
3369 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3370 {
3371 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3372 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3373 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3374
3375 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3376 }
3377
3378 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3379 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3380 the contents in DEST_VAL, zero or sign extending if the type of
3381 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3382 VAL. If the VAL's contents required to extract the bitfield from
3383 are unavailable/optimized out, DEST_VAL is correspondingly
3384 marked unavailable/optimized out. */
3385
3386 void
3387 unpack_value_bitfield (struct value *dest_val,
3388 int bitpos, int bitsize,
3389 const gdb_byte *valaddr, int embedded_offset,
3390 const struct value *val)
3391 {
3392 enum bfd_endian byte_order;
3393 int src_bit_offset;
3394 int dst_bit_offset;
3395 LONGEST num;
3396 struct type *field_type = value_type (dest_val);
3397
3398 /* First, unpack and sign extend the bitfield as if it was wholly
3399 available. Invalid/unavailable bits are read as zero, but that's
3400 OK, as they'll end up marked below. */
3401 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3402 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3403 bitpos, bitsize);
3404 store_signed_integer (value_contents_raw (dest_val),
3405 TYPE_LENGTH (field_type), byte_order, num);
3406
3407 /* Now copy the optimized out / unavailability ranges to the right
3408 bits. */
3409 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3410 if (byte_order == BFD_ENDIAN_BIG)
3411 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3412 else
3413 dst_bit_offset = 0;
3414 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3415 val, src_bit_offset, bitsize);
3416 }
3417
3418 /* Return a new value with type TYPE, which is FIELDNO field of the
3419 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3420 of VAL. If the VAL's contents required to extract the bitfield
3421 from are unavailable/optimized out, the new value is
3422 correspondingly marked unavailable/optimized out. */
3423
3424 struct value *
3425 value_field_bitfield (struct type *type, int fieldno,
3426 const gdb_byte *valaddr,
3427 int embedded_offset, const struct value *val)
3428 {
3429 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3430 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3431 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3432
3433 unpack_value_bitfield (res_val, bitpos, bitsize,
3434 valaddr, embedded_offset, val);
3435
3436 return res_val;
3437 }
3438
3439 /* Modify the value of a bitfield. ADDR points to a block of memory in
3440 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3441 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3442 indicate which bits (in target bit order) comprise the bitfield.
3443 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3444 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3445
3446 void
3447 modify_field (struct type *type, gdb_byte *addr,
3448 LONGEST fieldval, int bitpos, int bitsize)
3449 {
3450 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3451 ULONGEST oword;
3452 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3453 int bytesize;
3454
3455 /* Normalize BITPOS. */
3456 addr += bitpos / 8;
3457 bitpos %= 8;
3458
3459 /* If a negative fieldval fits in the field in question, chop
3460 off the sign extension bits. */
3461 if ((~fieldval & ~(mask >> 1)) == 0)
3462 fieldval &= mask;
3463
3464 /* Warn if value is too big to fit in the field in question. */
3465 if (0 != (fieldval & ~mask))
3466 {
3467 /* FIXME: would like to include fieldval in the message, but
3468 we don't have a sprintf_longest. */
3469 warning (_("Value does not fit in %d bits."), bitsize);
3470
3471 /* Truncate it, otherwise adjoining fields may be corrupted. */
3472 fieldval &= mask;
3473 }
3474
3475 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3476 false valgrind reports. */
3477
3478 bytesize = (bitpos + bitsize + 7) / 8;
3479 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3480
3481 /* Shifting for bit field depends on endianness of the target machine. */
3482 if (gdbarch_bits_big_endian (get_type_arch (type)))
3483 bitpos = bytesize * 8 - bitpos - bitsize;
3484
3485 oword &= ~(mask << bitpos);
3486 oword |= fieldval << bitpos;
3487
3488 store_unsigned_integer (addr, bytesize, byte_order, oword);
3489 }
3490 \f
3491 /* Pack NUM into BUF using a target format of TYPE. */
3492
3493 void
3494 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3495 {
3496 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3497 int len;
3498
3499 type = check_typedef (type);
3500 len = TYPE_LENGTH (type);
3501
3502 switch (TYPE_CODE (type))
3503 {
3504 case TYPE_CODE_INT:
3505 case TYPE_CODE_CHAR:
3506 case TYPE_CODE_ENUM:
3507 case TYPE_CODE_FLAGS:
3508 case TYPE_CODE_BOOL:
3509 case TYPE_CODE_RANGE:
3510 case TYPE_CODE_MEMBERPTR:
3511 store_signed_integer (buf, len, byte_order, num);
3512 break;
3513
3514 case TYPE_CODE_REF:
3515 case TYPE_CODE_PTR:
3516 store_typed_address (buf, type, (CORE_ADDR) num);
3517 break;
3518
3519 default:
3520 error (_("Unexpected type (%d) encountered for integer constant."),
3521 TYPE_CODE (type));
3522 }
3523 }
3524
3525
3526 /* Pack NUM into BUF using a target format of TYPE. */
3527
3528 static void
3529 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3530 {
3531 int len;
3532 enum bfd_endian byte_order;
3533
3534 type = check_typedef (type);
3535 len = TYPE_LENGTH (type);
3536 byte_order = gdbarch_byte_order (get_type_arch (type));
3537
3538 switch (TYPE_CODE (type))
3539 {
3540 case TYPE_CODE_INT:
3541 case TYPE_CODE_CHAR:
3542 case TYPE_CODE_ENUM:
3543 case TYPE_CODE_FLAGS:
3544 case TYPE_CODE_BOOL:
3545 case TYPE_CODE_RANGE:
3546 case TYPE_CODE_MEMBERPTR:
3547 store_unsigned_integer (buf, len, byte_order, num);
3548 break;
3549
3550 case TYPE_CODE_REF:
3551 case TYPE_CODE_PTR:
3552 store_typed_address (buf, type, (CORE_ADDR) num);
3553 break;
3554
3555 default:
3556 error (_("Unexpected type (%d) encountered "
3557 "for unsigned integer constant."),
3558 TYPE_CODE (type));
3559 }
3560 }
3561
3562
3563 /* Convert C numbers into newly allocated values. */
3564
3565 struct value *
3566 value_from_longest (struct type *type, LONGEST num)
3567 {
3568 struct value *val = allocate_value (type);
3569
3570 pack_long (value_contents_raw (val), type, num);
3571 return val;
3572 }
3573
3574
3575 /* Convert C unsigned numbers into newly allocated values. */
3576
3577 struct value *
3578 value_from_ulongest (struct type *type, ULONGEST num)
3579 {
3580 struct value *val = allocate_value (type);
3581
3582 pack_unsigned_long (value_contents_raw (val), type, num);
3583
3584 return val;
3585 }
3586
3587
3588 /* Create a value representing a pointer of type TYPE to the address
3589 ADDR. */
3590
3591 struct value *
3592 value_from_pointer (struct type *type, CORE_ADDR addr)
3593 {
3594 struct value *val = allocate_value (type);
3595
3596 store_typed_address (value_contents_raw (val),
3597 check_typedef (type), addr);
3598 return val;
3599 }
3600
3601
3602 /* Create a value of type TYPE whose contents come from VALADDR, if it
3603 is non-null, and whose memory address (in the inferior) is
3604 ADDRESS. The type of the created value may differ from the passed
3605 type TYPE. Make sure to retrieve values new type after this call.
3606 Note that TYPE is not passed through resolve_dynamic_type; this is
3607 a special API intended for use only by Ada. */
3608
3609 struct value *
3610 value_from_contents_and_address_unresolved (struct type *type,
3611 const gdb_byte *valaddr,
3612 CORE_ADDR address)
3613 {
3614 struct value *v;
3615
3616 if (valaddr == NULL)
3617 v = allocate_value_lazy (type);
3618 else
3619 v = value_from_contents (type, valaddr);
3620 set_value_address (v, address);
3621 VALUE_LVAL (v) = lval_memory;
3622 return v;
3623 }
3624
3625 /* Create a value of type TYPE whose contents come from VALADDR, if it
3626 is non-null, and whose memory address (in the inferior) is
3627 ADDRESS. The type of the created value may differ from the passed
3628 type TYPE. Make sure to retrieve values new type after this call. */
3629
3630 struct value *
3631 value_from_contents_and_address (struct type *type,
3632 const gdb_byte *valaddr,
3633 CORE_ADDR address)
3634 {
3635 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3636 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3637 struct value *v;
3638
3639 if (valaddr == NULL)
3640 v = allocate_value_lazy (resolved_type);
3641 else
3642 v = value_from_contents (resolved_type, valaddr);
3643 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3644 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3645 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3646 set_value_address (v, address);
3647 VALUE_LVAL (v) = lval_memory;
3648 return v;
3649 }
3650
3651 /* Create a value of type TYPE holding the contents CONTENTS.
3652 The new value is `not_lval'. */
3653
3654 struct value *
3655 value_from_contents (struct type *type, const gdb_byte *contents)
3656 {
3657 struct value *result;
3658
3659 result = allocate_value (type);
3660 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3661 return result;
3662 }
3663
3664 struct value *
3665 value_from_double (struct type *type, DOUBLEST num)
3666 {
3667 struct value *val = allocate_value (type);
3668 struct type *base_type = check_typedef (type);
3669 enum type_code code = TYPE_CODE (base_type);
3670
3671 if (code == TYPE_CODE_FLT)
3672 {
3673 store_typed_floating (value_contents_raw (val), base_type, num);
3674 }
3675 else
3676 error (_("Unexpected type encountered for floating constant."));
3677
3678 return val;
3679 }
3680
3681 struct value *
3682 value_from_decfloat (struct type *type, const gdb_byte *dec)
3683 {
3684 struct value *val = allocate_value (type);
3685
3686 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3687 return val;
3688 }
3689
3690 /* Extract a value from the history file. Input will be of the form
3691 $digits or $$digits. See block comment above 'write_dollar_variable'
3692 for details. */
3693
3694 struct value *
3695 value_from_history_ref (const char *h, const char **endp)
3696 {
3697 int index, len;
3698
3699 if (h[0] == '$')
3700 len = 1;
3701 else
3702 return NULL;
3703
3704 if (h[1] == '$')
3705 len = 2;
3706
3707 /* Find length of numeral string. */
3708 for (; isdigit (h[len]); len++)
3709 ;
3710
3711 /* Make sure numeral string is not part of an identifier. */
3712 if (h[len] == '_' || isalpha (h[len]))
3713 return NULL;
3714
3715 /* Now collect the index value. */
3716 if (h[1] == '$')
3717 {
3718 if (len == 2)
3719 {
3720 /* For some bizarre reason, "$$" is equivalent to "$$1",
3721 rather than to "$$0" as it ought to be! */
3722 index = -1;
3723 *endp += len;
3724 }
3725 else
3726 {
3727 char *local_end;
3728
3729 index = -strtol (&h[2], &local_end, 10);
3730 *endp = local_end;
3731 }
3732 }
3733 else
3734 {
3735 if (len == 1)
3736 {
3737 /* "$" is equivalent to "$0". */
3738 index = 0;
3739 *endp += len;
3740 }
3741 else
3742 {
3743 char *local_end;
3744
3745 index = strtol (&h[1], &local_end, 10);
3746 *endp = local_end;
3747 }
3748 }
3749
3750 return access_value_history (index);
3751 }
3752
3753 struct value *
3754 coerce_ref_if_computed (const struct value *arg)
3755 {
3756 const struct lval_funcs *funcs;
3757
3758 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3759 return NULL;
3760
3761 if (value_lval_const (arg) != lval_computed)
3762 return NULL;
3763
3764 funcs = value_computed_funcs (arg);
3765 if (funcs->coerce_ref == NULL)
3766 return NULL;
3767
3768 return funcs->coerce_ref (arg);
3769 }
3770
3771 /* Look at value.h for description. */
3772
3773 struct value *
3774 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3775 const struct type *original_type,
3776 const struct value *original_value)
3777 {
3778 /* Re-adjust type. */
3779 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3780
3781 /* Add embedding info. */
3782 set_value_enclosing_type (value, enc_type);
3783 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3784
3785 /* We may be pointing to an object of some derived type. */
3786 return value_full_object (value, NULL, 0, 0, 0);
3787 }
3788
3789 struct value *
3790 coerce_ref (struct value *arg)
3791 {
3792 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3793 struct value *retval;
3794 struct type *enc_type;
3795
3796 retval = coerce_ref_if_computed (arg);
3797 if (retval)
3798 return retval;
3799
3800 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3801 return arg;
3802
3803 enc_type = check_typedef (value_enclosing_type (arg));
3804 enc_type = TYPE_TARGET_TYPE (enc_type);
3805
3806 retval = value_at_lazy (enc_type,
3807 unpack_pointer (value_type (arg),
3808 value_contents (arg)));
3809 enc_type = value_type (retval);
3810 return readjust_indirect_value_type (retval, enc_type,
3811 value_type_arg_tmp, arg);
3812 }
3813
3814 struct value *
3815 coerce_array (struct value *arg)
3816 {
3817 struct type *type;
3818
3819 arg = coerce_ref (arg);
3820 type = check_typedef (value_type (arg));
3821
3822 switch (TYPE_CODE (type))
3823 {
3824 case TYPE_CODE_ARRAY:
3825 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3826 arg = value_coerce_array (arg);
3827 break;
3828 case TYPE_CODE_FUNC:
3829 arg = value_coerce_function (arg);
3830 break;
3831 }
3832 return arg;
3833 }
3834 \f
3835
3836 /* Return the return value convention that will be used for the
3837 specified type. */
3838
3839 enum return_value_convention
3840 struct_return_convention (struct gdbarch *gdbarch,
3841 struct value *function, struct type *value_type)
3842 {
3843 enum type_code code = TYPE_CODE (value_type);
3844
3845 if (code == TYPE_CODE_ERROR)
3846 error (_("Function return type unknown."));
3847
3848 /* Probe the architecture for the return-value convention. */
3849 return gdbarch_return_value (gdbarch, function, value_type,
3850 NULL, NULL, NULL);
3851 }
3852
3853 /* Return true if the function returning the specified type is using
3854 the convention of returning structures in memory (passing in the
3855 address as a hidden first parameter). */
3856
3857 int
3858 using_struct_return (struct gdbarch *gdbarch,
3859 struct value *function, struct type *value_type)
3860 {
3861 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3862 /* A void return value is never in memory. See also corresponding
3863 code in "print_return_value". */
3864 return 0;
3865
3866 return (struct_return_convention (gdbarch, function, value_type)
3867 != RETURN_VALUE_REGISTER_CONVENTION);
3868 }
3869
3870 /* Set the initialized field in a value struct. */
3871
3872 void
3873 set_value_initialized (struct value *val, int status)
3874 {
3875 val->initialized = status;
3876 }
3877
3878 /* Return the initialized field in a value struct. */
3879
3880 int
3881 value_initialized (const struct value *val)
3882 {
3883 return val->initialized;
3884 }
3885
3886 /* Load the actual content of a lazy value. Fetch the data from the
3887 user's process and clear the lazy flag to indicate that the data in
3888 the buffer is valid.
3889
3890 If the value is zero-length, we avoid calling read_memory, which
3891 would abort. We mark the value as fetched anyway -- all 0 bytes of
3892 it. */
3893
3894 void
3895 value_fetch_lazy (struct value *val)
3896 {
3897 gdb_assert (value_lazy (val));
3898 allocate_value_contents (val);
3899 /* A value is either lazy, or fully fetched. The
3900 availability/validity is only established as we try to fetch a
3901 value. */
3902 gdb_assert (VEC_empty (range_s, val->optimized_out));
3903 gdb_assert (VEC_empty (range_s, val->unavailable));
3904 if (value_bitsize (val))
3905 {
3906 /* To read a lazy bitfield, read the entire enclosing value. This
3907 prevents reading the same block of (possibly volatile) memory once
3908 per bitfield. It would be even better to read only the containing
3909 word, but we have no way to record that just specific bits of a
3910 value have been fetched. */
3911 struct type *type = check_typedef (value_type (val));
3912 struct value *parent = value_parent (val);
3913
3914 if (value_lazy (parent))
3915 value_fetch_lazy (parent);
3916
3917 unpack_value_bitfield (val,
3918 value_bitpos (val), value_bitsize (val),
3919 value_contents_for_printing (parent),
3920 value_offset (val), parent);
3921 }
3922 else if (VALUE_LVAL (val) == lval_memory)
3923 {
3924 CORE_ADDR addr = value_address (val);
3925 struct type *type = check_typedef (value_enclosing_type (val));
3926
3927 if (TYPE_LENGTH (type))
3928 read_value_memory (val, 0, value_stack (val),
3929 addr, value_contents_all_raw (val),
3930 type_length_units (type));
3931 }
3932 else if (VALUE_LVAL (val) == lval_register)
3933 {
3934 struct frame_info *frame;
3935 int regnum;
3936 struct type *type = check_typedef (value_type (val));
3937 struct value *new_val = val, *mark = value_mark ();
3938
3939 /* Offsets are not supported here; lazy register values must
3940 refer to the entire register. */
3941 gdb_assert (value_offset (val) == 0);
3942
3943 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3944 {
3945 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3946
3947 frame = frame_find_by_id (frame_id);
3948 regnum = VALUE_REGNUM (new_val);
3949
3950 gdb_assert (frame != NULL);
3951
3952 /* Convertible register routines are used for multi-register
3953 values and for interpretation in different types
3954 (e.g. float or int from a double register). Lazy
3955 register values should have the register's natural type,
3956 so they do not apply. */
3957 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3958 regnum, type));
3959
3960 new_val = get_frame_register_value (frame, regnum);
3961
3962 /* If we get another lazy lval_register value, it means the
3963 register is found by reading it from the next frame.
3964 get_frame_register_value should never return a value with
3965 the frame id pointing to FRAME. If it does, it means we
3966 either have two consecutive frames with the same frame id
3967 in the frame chain, or some code is trying to unwind
3968 behind get_prev_frame's back (e.g., a frame unwind
3969 sniffer trying to unwind), bypassing its validations. In
3970 any case, it should always be an internal error to end up
3971 in this situation. */
3972 if (VALUE_LVAL (new_val) == lval_register
3973 && value_lazy (new_val)
3974 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3975 internal_error (__FILE__, __LINE__,
3976 _("infinite loop while fetching a register"));
3977 }
3978
3979 /* If it's still lazy (for instance, a saved register on the
3980 stack), fetch it. */
3981 if (value_lazy (new_val))
3982 value_fetch_lazy (new_val);
3983
3984 /* Copy the contents and the unavailability/optimized-out
3985 meta-data from NEW_VAL to VAL. */
3986 set_value_lazy (val, 0);
3987 value_contents_copy (val, value_embedded_offset (val),
3988 new_val, value_embedded_offset (new_val),
3989 type_length_units (type));
3990
3991 if (frame_debug)
3992 {
3993 struct gdbarch *gdbarch;
3994 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3995 regnum = VALUE_REGNUM (val);
3996 gdbarch = get_frame_arch (frame);
3997
3998 fprintf_unfiltered (gdb_stdlog,
3999 "{ value_fetch_lazy "
4000 "(frame=%d,regnum=%d(%s),...) ",
4001 frame_relative_level (frame), regnum,
4002 user_reg_map_regnum_to_name (gdbarch, regnum));
4003
4004 fprintf_unfiltered (gdb_stdlog, "->");
4005 if (value_optimized_out (new_val))
4006 {
4007 fprintf_unfiltered (gdb_stdlog, " ");
4008 val_print_optimized_out (new_val, gdb_stdlog);
4009 }
4010 else
4011 {
4012 int i;
4013 const gdb_byte *buf = value_contents (new_val);
4014
4015 if (VALUE_LVAL (new_val) == lval_register)
4016 fprintf_unfiltered (gdb_stdlog, " register=%d",
4017 VALUE_REGNUM (new_val));
4018 else if (VALUE_LVAL (new_val) == lval_memory)
4019 fprintf_unfiltered (gdb_stdlog, " address=%s",
4020 paddress (gdbarch,
4021 value_address (new_val)));
4022 else
4023 fprintf_unfiltered (gdb_stdlog, " computed");
4024
4025 fprintf_unfiltered (gdb_stdlog, " bytes=");
4026 fprintf_unfiltered (gdb_stdlog, "[");
4027 for (i = 0; i < register_size (gdbarch, regnum); i++)
4028 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4029 fprintf_unfiltered (gdb_stdlog, "]");
4030 }
4031
4032 fprintf_unfiltered (gdb_stdlog, " }\n");
4033 }
4034
4035 /* Dispose of the intermediate values. This prevents
4036 watchpoints from trying to watch the saved frame pointer. */
4037 value_free_to_mark (mark);
4038 }
4039 else if (VALUE_LVAL (val) == lval_computed
4040 && value_computed_funcs (val)->read != NULL)
4041 value_computed_funcs (val)->read (val);
4042 else
4043 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4044
4045 set_value_lazy (val, 0);
4046 }
4047
4048 /* Implementation of the convenience function $_isvoid. */
4049
4050 static struct value *
4051 isvoid_internal_fn (struct gdbarch *gdbarch,
4052 const struct language_defn *language,
4053 void *cookie, int argc, struct value **argv)
4054 {
4055 int ret;
4056
4057 if (argc != 1)
4058 error (_("You must provide one argument for $_isvoid."));
4059
4060 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4061
4062 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4063 }
4064
4065 void
4066 _initialize_values (void)
4067 {
4068 add_cmd ("convenience", no_class, show_convenience, _("\
4069 Debugger convenience (\"$foo\") variables and functions.\n\
4070 Convenience variables are created when you assign them values;\n\
4071 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4072 \n\
4073 A few convenience variables are given values automatically:\n\
4074 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4075 \"$__\" holds the contents of the last address examined with \"x\"."
4076 #ifdef HAVE_PYTHON
4077 "\n\n\
4078 Convenience functions are defined via the Python API."
4079 #endif
4080 ), &showlist);
4081 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4082
4083 add_cmd ("values", no_set_class, show_values, _("\
4084 Elements of value history around item number IDX (or last ten)."),
4085 &showlist);
4086
4087 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4088 Initialize a convenience variable if necessary.\n\
4089 init-if-undefined VARIABLE = EXPRESSION\n\
4090 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4091 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4092 VARIABLE is already initialized."));
4093
4094 add_prefix_cmd ("function", no_class, function_command, _("\
4095 Placeholder command for showing help on convenience functions."),
4096 &functionlist, "function ", 0, &cmdlist);
4097
4098 add_internal_function ("_isvoid", _("\
4099 Check whether an expression is void.\n\
4100 Usage: $_isvoid (expression)\n\
4101 Return 1 if the expression is void, zero otherwise."),
4102 isvoid_internal_fn, NULL);
4103
4104 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4105 class_support, &max_value_size, _("\
4106 Set maximum sized value gdb will load from the inferior."), _("\
4107 Show maximum sized value gdb will load from the inferior."), _("\
4108 Use this to control the maximum size, in bytes, of a value that gdb\n\
4109 will load from the inferior. Setting this value to 'unlimited'\n\
4110 disables checking.\n\
4111 Setting this does not invalidate already allocated values, it only\n\
4112 prevents future values, larger than this size, from being allocated."),
4113 set_max_value_size,
4114 show_max_value_size,
4115 &setlist, &showlist);
4116 }
This page took 0.129134 seconds and 4 git commands to generate.