0d540d5762df0efc0d3f2c77c5fea51f85b63ccf
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 int offset;
69
70 /* Length of the range. */
71 int length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (int offset1, int len1,
83 int offset2, int len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109 range_s what;
110 int i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. If lval == lval_memory, this is an offset to
239 the address. If lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the member
241 embedded_offset below. */
242 int offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 int bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 int bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in target addressable memory
295 units. The value_contents() macro takes `embedded_offset' into account,
296 so most GDB code continues to see the `type' portion of the value, just
297 as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in target addressable memory units from the full object
302 to the pointed-to object -- that is, the value `embedded_offset' would
303 have if we followed the pointer and fetched the complete object.
304 (I don't really see the point. Why not just determine the
305 run-time type when you indirect, and avoid the special case? The
306 contents don't matter until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 int embedded_offset;
313 int pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 /* See value.h. */
344
345 struct gdbarch *
346 get_value_arch (const struct value *value)
347 {
348 return get_type_arch (value_type (value));
349 }
350
351 int
352 value_bits_available (const struct value *value, int offset, int length)
353 {
354 gdb_assert (!value->lazy);
355
356 return !ranges_contain (value->unavailable, offset, length);
357 }
358
359 int
360 value_bytes_available (const struct value *value, int offset, int length)
361 {
362 return value_bits_available (value,
363 offset * TARGET_CHAR_BIT,
364 length * TARGET_CHAR_BIT);
365 }
366
367 int
368 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
369 {
370 gdb_assert (!value->lazy);
371
372 return ranges_contain (value->optimized_out, bit_offset, bit_length);
373 }
374
375 int
376 value_entirely_available (struct value *value)
377 {
378 /* We can only tell whether the whole value is available when we try
379 to read it. */
380 if (value->lazy)
381 value_fetch_lazy (value);
382
383 if (VEC_empty (range_s, value->unavailable))
384 return 1;
385 return 0;
386 }
387
388 /* Returns true if VALUE is entirely covered by RANGES. If the value
389 is lazy, it'll be read now. Note that RANGE is a pointer to
390 pointer because reading the value might change *RANGE. */
391
392 static int
393 value_entirely_covered_by_range_vector (struct value *value,
394 VEC(range_s) **ranges)
395 {
396 /* We can only tell whether the whole value is optimized out /
397 unavailable when we try to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (VEC_length (range_s, *ranges) == 1)
402 {
403 struct range *t = VEC_index (range_s, *ranges, 0);
404
405 if (t->offset == 0
406 && t->length == (TARGET_CHAR_BIT
407 * TYPE_LENGTH (value_enclosing_type (value))))
408 return 1;
409 }
410
411 return 0;
412 }
413
414 int
415 value_entirely_unavailable (struct value *value)
416 {
417 return value_entirely_covered_by_range_vector (value, &value->unavailable);
418 }
419
420 int
421 value_entirely_optimized_out (struct value *value)
422 {
423 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
424 }
425
426 /* Insert into the vector pointed to by VECTORP the bit range starting of
427 OFFSET bits, and extending for the next LENGTH bits. */
428
429 static void
430 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
431 {
432 range_s newr;
433 int i;
434
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
437
438 newr.offset = offset;
439 newr.length = length;
440
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
446
447 #1 - overlaps with previous
448
449 R
450 |-...-|
451 |---| |---| |------| ... |--|
452 0 1 2 N
453
454 I=1
455
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
460
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
463
464 #2 - contiguous with previous
465
466 R
467 |-...-|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=1
472
473 If there's no overlap with the previous range, as in:
474
475 #3 - not overlapping and not contiguous
476
477 R
478 |-...-|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 or if I is 0:
485
486 #4 - R is the range with lowest offset
487
488 R
489 |-...-|
490 |--| |---| |------| ... |--|
491 0 1 2 N
492
493 I=0
494
495 ... we just push the new range to I.
496
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
500
501 #5 - overlapping following ranges
502
503 R
504 |------------------------|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=0
509
510 or:
511
512 R
513 |-------|
514 |--| |---| |------| ... |--|
515 0 1 2 N
516
517 I=1
518
519 */
520
521 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
522 if (i > 0)
523 {
524 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
525
526 if (ranges_overlap (bef->offset, bef->length, offset, length))
527 {
528 /* #1 */
529 ULONGEST l = min (bef->offset, offset);
530 ULONGEST h = max (bef->offset + bef->length, offset + length);
531
532 bef->offset = l;
533 bef->length = h - l;
534 i--;
535 }
536 else if (offset == bef->offset + bef->length)
537 {
538 /* #2 */
539 bef->length += length;
540 i--;
541 }
542 else
543 {
544 /* #3 */
545 VEC_safe_insert (range_s, *vectorp, i, &newr);
546 }
547 }
548 else
549 {
550 /* #4 */
551 VEC_safe_insert (range_s, *vectorp, i, &newr);
552 }
553
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i + 1 < VEC_length (range_s, *vectorp))
557 {
558 struct range *t;
559 struct range *r;
560 int removed = 0;
561 int next = i + 1;
562
563 /* Get the range we just touched. */
564 t = VEC_index (range_s, *vectorp, i);
565 removed = 0;
566
567 i = next;
568 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
569 if (r->offset <= t->offset + t->length)
570 {
571 ULONGEST l, h;
572
573 l = min (t->offset, r->offset);
574 h = max (t->offset + t->length, r->offset + r->length);
575
576 t->offset = l;
577 t->length = h - l;
578
579 removed++;
580 }
581 else
582 {
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
586 break;
587 }
588
589 if (removed != 0)
590 VEC_block_remove (range_s, *vectorp, next, removed);
591 }
592 }
593
594 void
595 mark_value_bits_unavailable (struct value *value, int offset, int length)
596 {
597 insert_into_bit_range_vector (&value->unavailable, offset, length);
598 }
599
600 void
601 mark_value_bytes_unavailable (struct value *value, int offset, int length)
602 {
603 mark_value_bits_unavailable (value,
604 offset * TARGET_CHAR_BIT,
605 length * TARGET_CHAR_BIT);
606 }
607
608 /* Find the first range in RANGES that overlaps the range defined by
609 OFFSET and LENGTH, starting at element POS in the RANGES vector,
610 Returns the index into RANGES where such overlapping range was
611 found, or -1 if none was found. */
612
613 static int
614 find_first_range_overlap (VEC(range_s) *ranges, int pos,
615 int offset, int length)
616 {
617 range_s *r;
618 int i;
619
620 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
621 if (ranges_overlap (r->offset, r->length, offset, length))
622 return i;
623
624 return -1;
625 }
626
627 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
628 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
629 return non-zero.
630
631 It must always be the case that:
632 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
633
634 It is assumed that memory can be accessed from:
635 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
636 to:
637 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
638 / TARGET_CHAR_BIT) */
639 static int
640 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
641 const gdb_byte *ptr2, size_t offset2_bits,
642 size_t length_bits)
643 {
644 gdb_assert (offset1_bits % TARGET_CHAR_BIT
645 == offset2_bits % TARGET_CHAR_BIT);
646
647 if (offset1_bits % TARGET_CHAR_BIT != 0)
648 {
649 size_t bits;
650 gdb_byte mask, b1, b2;
651
652 /* The offset from the base pointers PTR1 and PTR2 is not a complete
653 number of bytes. A number of bits up to either the next exact
654 byte boundary, or LENGTH_BITS (which ever is sooner) will be
655 compared. */
656 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
657 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
658 mask = (1 << bits) - 1;
659
660 if (length_bits < bits)
661 {
662 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
663 bits = length_bits;
664 }
665
666 /* Now load the two bytes and mask off the bits we care about. */
667 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
668 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
669
670 if (b1 != b2)
671 return 1;
672
673 /* Now update the length and offsets to take account of the bits
674 we've just compared. */
675 length_bits -= bits;
676 offset1_bits += bits;
677 offset2_bits += bits;
678 }
679
680 if (length_bits % TARGET_CHAR_BIT != 0)
681 {
682 size_t bits;
683 size_t o1, o2;
684 gdb_byte mask, b1, b2;
685
686 /* The length is not an exact number of bytes. After the previous
687 IF.. block then the offsets are byte aligned, or the
688 length is zero (in which case this code is not reached). Compare
689 a number of bits at the end of the region, starting from an exact
690 byte boundary. */
691 bits = length_bits % TARGET_CHAR_BIT;
692 o1 = offset1_bits + length_bits - bits;
693 o2 = offset2_bits + length_bits - bits;
694
695 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
696 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
697
698 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
699 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
700
701 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
702 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
703
704 if (b1 != b2)
705 return 1;
706
707 length_bits -= bits;
708 }
709
710 if (length_bits > 0)
711 {
712 /* We've now taken care of any stray "bits" at the start, or end of
713 the region to compare, the remainder can be covered with a simple
714 memcmp. */
715 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
716 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
717 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
718
719 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
720 ptr2 + offset2_bits / TARGET_CHAR_BIT,
721 length_bits / TARGET_CHAR_BIT);
722 }
723
724 /* Length is zero, regions match. */
725 return 0;
726 }
727
728 /* Helper struct for find_first_range_overlap_and_match and
729 value_contents_bits_eq. Keep track of which slot of a given ranges
730 vector have we last looked at. */
731
732 struct ranges_and_idx
733 {
734 /* The ranges. */
735 VEC(range_s) *ranges;
736
737 /* The range we've last found in RANGES. Given ranges are sorted,
738 we can start the next lookup here. */
739 int idx;
740 };
741
742 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
743 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
744 ranges starting at OFFSET2 bits. Return true if the ranges match
745 and fill in *L and *H with the overlapping window relative to
746 (both) OFFSET1 or OFFSET2. */
747
748 static int
749 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
750 struct ranges_and_idx *rp2,
751 int offset1, int offset2,
752 int length, ULONGEST *l, ULONGEST *h)
753 {
754 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
755 offset1, length);
756 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
757 offset2, length);
758
759 if (rp1->idx == -1 && rp2->idx == -1)
760 {
761 *l = length;
762 *h = length;
763 return 1;
764 }
765 else if (rp1->idx == -1 || rp2->idx == -1)
766 return 0;
767 else
768 {
769 range_s *r1, *r2;
770 ULONGEST l1, h1;
771 ULONGEST l2, h2;
772
773 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
774 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
775
776 /* Get the unavailable windows intersected by the incoming
777 ranges. The first and last ranges that overlap the argument
778 range may be wider than said incoming arguments ranges. */
779 l1 = max (offset1, r1->offset);
780 h1 = min (offset1 + length, r1->offset + r1->length);
781
782 l2 = max (offset2, r2->offset);
783 h2 = min (offset2 + length, offset2 + r2->length);
784
785 /* Make them relative to the respective start offsets, so we can
786 compare them for equality. */
787 l1 -= offset1;
788 h1 -= offset1;
789
790 l2 -= offset2;
791 h2 -= offset2;
792
793 /* Different ranges, no match. */
794 if (l1 != l2 || h1 != h2)
795 return 0;
796
797 *h = h1;
798 *l = l1;
799 return 1;
800 }
801 }
802
803 /* Helper function for value_contents_eq. The only difference is that
804 this function is bit rather than byte based.
805
806 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
807 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
808 Return true if the available bits match. */
809
810 static int
811 value_contents_bits_eq (const struct value *val1, int offset1,
812 const struct value *val2, int offset2,
813 int length)
814 {
815 /* Each array element corresponds to a ranges source (unavailable,
816 optimized out). '1' is for VAL1, '2' for VAL2. */
817 struct ranges_and_idx rp1[2], rp2[2];
818
819 /* See function description in value.h. */
820 gdb_assert (!val1->lazy && !val2->lazy);
821
822 /* We shouldn't be trying to compare past the end of the values. */
823 gdb_assert (offset1 + length
824 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
825 gdb_assert (offset2 + length
826 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
827
828 memset (&rp1, 0, sizeof (rp1));
829 memset (&rp2, 0, sizeof (rp2));
830 rp1[0].ranges = val1->unavailable;
831 rp2[0].ranges = val2->unavailable;
832 rp1[1].ranges = val1->optimized_out;
833 rp2[1].ranges = val2->optimized_out;
834
835 while (length > 0)
836 {
837 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
838 int i;
839
840 for (i = 0; i < 2; i++)
841 {
842 ULONGEST l_tmp, h_tmp;
843
844 /* The contents only match equal if the invalid/unavailable
845 contents ranges match as well. */
846 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
847 offset1, offset2, length,
848 &l_tmp, &h_tmp))
849 return 0;
850
851 /* We're interested in the lowest/first range found. */
852 if (i == 0 || l_tmp < l)
853 {
854 l = l_tmp;
855 h = h_tmp;
856 }
857 }
858
859 /* Compare the available/valid contents. */
860 if (memcmp_with_bit_offsets (val1->contents, offset1,
861 val2->contents, offset2, l) != 0)
862 return 0;
863
864 length -= h;
865 offset1 += h;
866 offset2 += h;
867 }
868
869 return 1;
870 }
871
872 int
873 value_contents_eq (const struct value *val1, int offset1,
874 const struct value *val2, int offset2,
875 int length)
876 {
877 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
878 val2, offset2 * TARGET_CHAR_BIT,
879 length * TARGET_CHAR_BIT);
880 }
881
882 /* Prototypes for local functions. */
883
884 static void show_values (char *, int);
885
886 static void show_convenience (char *, int);
887
888
889 /* The value-history records all the values printed
890 by print commands during this session. Each chunk
891 records 60 consecutive values. The first chunk on
892 the chain records the most recent values.
893 The total number of values is in value_history_count. */
894
895 #define VALUE_HISTORY_CHUNK 60
896
897 struct value_history_chunk
898 {
899 struct value_history_chunk *next;
900 struct value *values[VALUE_HISTORY_CHUNK];
901 };
902
903 /* Chain of chunks now in use. */
904
905 static struct value_history_chunk *value_history_chain;
906
907 static int value_history_count; /* Abs number of last entry stored. */
908
909 \f
910 /* List of all value objects currently allocated
911 (except for those released by calls to release_value)
912 This is so they can be freed after each command. */
913
914 static struct value *all_values;
915
916 /* Allocate a lazy value for type TYPE. Its actual content is
917 "lazily" allocated too: the content field of the return value is
918 NULL; it will be allocated when it is fetched from the target. */
919
920 struct value *
921 allocate_value_lazy (struct type *type)
922 {
923 struct value *val;
924
925 /* Call check_typedef on our type to make sure that, if TYPE
926 is a TYPE_CODE_TYPEDEF, its length is set to the length
927 of the target type instead of zero. However, we do not
928 replace the typedef type by the target type, because we want
929 to keep the typedef in order to be able to set the VAL's type
930 description correctly. */
931 check_typedef (type);
932
933 val = (struct value *) xzalloc (sizeof (struct value));
934 val->contents = NULL;
935 val->next = all_values;
936 all_values = val;
937 val->type = type;
938 val->enclosing_type = type;
939 VALUE_LVAL (val) = not_lval;
940 val->location.address = 0;
941 VALUE_FRAME_ID (val) = null_frame_id;
942 val->offset = 0;
943 val->bitpos = 0;
944 val->bitsize = 0;
945 VALUE_REGNUM (val) = -1;
946 val->lazy = 1;
947 val->embedded_offset = 0;
948 val->pointed_to_offset = 0;
949 val->modifiable = 1;
950 val->initialized = 1; /* Default to initialized. */
951
952 /* Values start out on the all_values chain. */
953 val->reference_count = 1;
954
955 return val;
956 }
957
958 /* Allocate the contents of VAL if it has not been allocated yet. */
959
960 static void
961 allocate_value_contents (struct value *val)
962 {
963 if (!val->contents)
964 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
965 }
966
967 /* Allocate a value and its contents for type TYPE. */
968
969 struct value *
970 allocate_value (struct type *type)
971 {
972 struct value *val = allocate_value_lazy (type);
973
974 allocate_value_contents (val);
975 val->lazy = 0;
976 return val;
977 }
978
979 /* Allocate a value that has the correct length
980 for COUNT repetitions of type TYPE. */
981
982 struct value *
983 allocate_repeat_value (struct type *type, int count)
984 {
985 int low_bound = current_language->string_lower_bound; /* ??? */
986 /* FIXME-type-allocation: need a way to free this type when we are
987 done with it. */
988 struct type *array_type
989 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
990
991 return allocate_value (array_type);
992 }
993
994 struct value *
995 allocate_computed_value (struct type *type,
996 const struct lval_funcs *funcs,
997 void *closure)
998 {
999 struct value *v = allocate_value_lazy (type);
1000
1001 VALUE_LVAL (v) = lval_computed;
1002 v->location.computed.funcs = funcs;
1003 v->location.computed.closure = closure;
1004
1005 return v;
1006 }
1007
1008 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1009
1010 struct value *
1011 allocate_optimized_out_value (struct type *type)
1012 {
1013 struct value *retval = allocate_value_lazy (type);
1014
1015 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1016 set_value_lazy (retval, 0);
1017 return retval;
1018 }
1019
1020 /* Accessor methods. */
1021
1022 struct value *
1023 value_next (struct value *value)
1024 {
1025 return value->next;
1026 }
1027
1028 struct type *
1029 value_type (const struct value *value)
1030 {
1031 return value->type;
1032 }
1033 void
1034 deprecated_set_value_type (struct value *value, struct type *type)
1035 {
1036 value->type = type;
1037 }
1038
1039 int
1040 value_offset (const struct value *value)
1041 {
1042 return value->offset;
1043 }
1044 void
1045 set_value_offset (struct value *value, int offset)
1046 {
1047 value->offset = offset;
1048 }
1049
1050 int
1051 value_bitpos (const struct value *value)
1052 {
1053 return value->bitpos;
1054 }
1055 void
1056 set_value_bitpos (struct value *value, int bit)
1057 {
1058 value->bitpos = bit;
1059 }
1060
1061 int
1062 value_bitsize (const struct value *value)
1063 {
1064 return value->bitsize;
1065 }
1066 void
1067 set_value_bitsize (struct value *value, int bit)
1068 {
1069 value->bitsize = bit;
1070 }
1071
1072 struct value *
1073 value_parent (struct value *value)
1074 {
1075 return value->parent;
1076 }
1077
1078 /* See value.h. */
1079
1080 void
1081 set_value_parent (struct value *value, struct value *parent)
1082 {
1083 struct value *old = value->parent;
1084
1085 value->parent = parent;
1086 if (parent != NULL)
1087 value_incref (parent);
1088 value_free (old);
1089 }
1090
1091 gdb_byte *
1092 value_contents_raw (struct value *value)
1093 {
1094 allocate_value_contents (value);
1095 return value->contents + value->embedded_offset;
1096 }
1097
1098 gdb_byte *
1099 value_contents_all_raw (struct value *value)
1100 {
1101 allocate_value_contents (value);
1102 return value->contents;
1103 }
1104
1105 struct type *
1106 value_enclosing_type (struct value *value)
1107 {
1108 return value->enclosing_type;
1109 }
1110
1111 /* Look at value.h for description. */
1112
1113 struct type *
1114 value_actual_type (struct value *value, int resolve_simple_types,
1115 int *real_type_found)
1116 {
1117 struct value_print_options opts;
1118 struct type *result;
1119
1120 get_user_print_options (&opts);
1121
1122 if (real_type_found)
1123 *real_type_found = 0;
1124 result = value_type (value);
1125 if (opts.objectprint)
1126 {
1127 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1128 fetch its rtti type. */
1129 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1130 || TYPE_CODE (result) == TYPE_CODE_REF)
1131 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1132 == TYPE_CODE_STRUCT)
1133 {
1134 struct type *real_type;
1135
1136 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1137 if (real_type)
1138 {
1139 if (real_type_found)
1140 *real_type_found = 1;
1141 result = real_type;
1142 }
1143 }
1144 else if (resolve_simple_types)
1145 {
1146 if (real_type_found)
1147 *real_type_found = 1;
1148 result = value_enclosing_type (value);
1149 }
1150 }
1151
1152 return result;
1153 }
1154
1155 void
1156 error_value_optimized_out (void)
1157 {
1158 error (_("value has been optimized out"));
1159 }
1160
1161 static void
1162 require_not_optimized_out (const struct value *value)
1163 {
1164 if (!VEC_empty (range_s, value->optimized_out))
1165 {
1166 if (value->lval == lval_register)
1167 error (_("register has not been saved in frame"));
1168 else
1169 error_value_optimized_out ();
1170 }
1171 }
1172
1173 static void
1174 require_available (const struct value *value)
1175 {
1176 if (!VEC_empty (range_s, value->unavailable))
1177 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1178 }
1179
1180 const gdb_byte *
1181 value_contents_for_printing (struct value *value)
1182 {
1183 if (value->lazy)
1184 value_fetch_lazy (value);
1185 return value->contents;
1186 }
1187
1188 const gdb_byte *
1189 value_contents_for_printing_const (const struct value *value)
1190 {
1191 gdb_assert (!value->lazy);
1192 return value->contents;
1193 }
1194
1195 const gdb_byte *
1196 value_contents_all (struct value *value)
1197 {
1198 const gdb_byte *result = value_contents_for_printing (value);
1199 require_not_optimized_out (value);
1200 require_available (value);
1201 return result;
1202 }
1203
1204 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1205 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1206
1207 static void
1208 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1209 VEC (range_s) *src_range, int src_bit_offset,
1210 int bit_length)
1211 {
1212 range_s *r;
1213 int i;
1214
1215 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1216 {
1217 ULONGEST h, l;
1218
1219 l = max (r->offset, src_bit_offset);
1220 h = min (r->offset + r->length, src_bit_offset + bit_length);
1221
1222 if (l < h)
1223 insert_into_bit_range_vector (dst_range,
1224 dst_bit_offset + (l - src_bit_offset),
1225 h - l);
1226 }
1227 }
1228
1229 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1230 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1231
1232 static void
1233 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1234 const struct value *src, int src_bit_offset,
1235 int bit_length)
1236 {
1237 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1238 src->unavailable, src_bit_offset,
1239 bit_length);
1240 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1241 src->optimized_out, src_bit_offset,
1242 bit_length);
1243 }
1244
1245 /* Copy LENGTH bytes of SRC value's (all) contents
1246 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1247 contents, starting at DST_OFFSET. If unavailable contents are
1248 being copied from SRC, the corresponding DST contents are marked
1249 unavailable accordingly. Neither DST nor SRC may be lazy
1250 values.
1251
1252 It is assumed the contents of DST in the [DST_OFFSET,
1253 DST_OFFSET+LENGTH) range are wholly available. */
1254
1255 void
1256 value_contents_copy_raw (struct value *dst, int dst_offset,
1257 struct value *src, int src_offset, int length)
1258 {
1259 range_s *r;
1260 int i;
1261 int src_bit_offset, dst_bit_offset, bit_length;
1262
1263 /* A lazy DST would make that this copy operation useless, since as
1264 soon as DST's contents were un-lazied (by a later value_contents
1265 call, say), the contents would be overwritten. A lazy SRC would
1266 mean we'd be copying garbage. */
1267 gdb_assert (!dst->lazy && !src->lazy);
1268
1269 /* The overwritten DST range gets unavailability ORed in, not
1270 replaced. Make sure to remember to implement replacing if it
1271 turns out actually necessary. */
1272 gdb_assert (value_bytes_available (dst, dst_offset, length));
1273 gdb_assert (!value_bits_any_optimized_out (dst,
1274 TARGET_CHAR_BIT * dst_offset,
1275 TARGET_CHAR_BIT * length));
1276
1277 /* Copy the data. */
1278 memcpy (value_contents_all_raw (dst) + dst_offset,
1279 value_contents_all_raw (src) + src_offset,
1280 length);
1281
1282 /* Copy the meta-data, adjusted. */
1283 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1284 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1285 bit_length = length * TARGET_CHAR_BIT;
1286
1287 value_ranges_copy_adjusted (dst, dst_bit_offset,
1288 src, src_bit_offset,
1289 bit_length);
1290 }
1291
1292 /* Copy LENGTH bytes of SRC value's (all) contents
1293 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1294 (all) contents, starting at DST_OFFSET. If unavailable contents
1295 are being copied from SRC, the corresponding DST contents are
1296 marked unavailable accordingly. DST must not be lazy. If SRC is
1297 lazy, it will be fetched now.
1298
1299 It is assumed the contents of DST in the [DST_OFFSET,
1300 DST_OFFSET+LENGTH) range are wholly available. */
1301
1302 void
1303 value_contents_copy (struct value *dst, int dst_offset,
1304 struct value *src, int src_offset, int length)
1305 {
1306 if (src->lazy)
1307 value_fetch_lazy (src);
1308
1309 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1310 }
1311
1312 int
1313 value_lazy (struct value *value)
1314 {
1315 return value->lazy;
1316 }
1317
1318 void
1319 set_value_lazy (struct value *value, int val)
1320 {
1321 value->lazy = val;
1322 }
1323
1324 int
1325 value_stack (struct value *value)
1326 {
1327 return value->stack;
1328 }
1329
1330 void
1331 set_value_stack (struct value *value, int val)
1332 {
1333 value->stack = val;
1334 }
1335
1336 const gdb_byte *
1337 value_contents (struct value *value)
1338 {
1339 const gdb_byte *result = value_contents_writeable (value);
1340 require_not_optimized_out (value);
1341 require_available (value);
1342 return result;
1343 }
1344
1345 gdb_byte *
1346 value_contents_writeable (struct value *value)
1347 {
1348 if (value->lazy)
1349 value_fetch_lazy (value);
1350 return value_contents_raw (value);
1351 }
1352
1353 int
1354 value_optimized_out (struct value *value)
1355 {
1356 /* We can only know if a value is optimized out once we have tried to
1357 fetch it. */
1358 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1359 value_fetch_lazy (value);
1360
1361 return !VEC_empty (range_s, value->optimized_out);
1362 }
1363
1364 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1365 the following LENGTH bytes. */
1366
1367 void
1368 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1369 {
1370 mark_value_bits_optimized_out (value,
1371 offset * TARGET_CHAR_BIT,
1372 length * TARGET_CHAR_BIT);
1373 }
1374
1375 /* See value.h. */
1376
1377 void
1378 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1379 {
1380 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1381 }
1382
1383 int
1384 value_bits_synthetic_pointer (const struct value *value,
1385 int offset, int length)
1386 {
1387 if (value->lval != lval_computed
1388 || !value->location.computed.funcs->check_synthetic_pointer)
1389 return 0;
1390 return value->location.computed.funcs->check_synthetic_pointer (value,
1391 offset,
1392 length);
1393 }
1394
1395 int
1396 value_embedded_offset (struct value *value)
1397 {
1398 return value->embedded_offset;
1399 }
1400
1401 void
1402 set_value_embedded_offset (struct value *value, int val)
1403 {
1404 value->embedded_offset = val;
1405 }
1406
1407 int
1408 value_pointed_to_offset (struct value *value)
1409 {
1410 return value->pointed_to_offset;
1411 }
1412
1413 void
1414 set_value_pointed_to_offset (struct value *value, int val)
1415 {
1416 value->pointed_to_offset = val;
1417 }
1418
1419 const struct lval_funcs *
1420 value_computed_funcs (const struct value *v)
1421 {
1422 gdb_assert (value_lval_const (v) == lval_computed);
1423
1424 return v->location.computed.funcs;
1425 }
1426
1427 void *
1428 value_computed_closure (const struct value *v)
1429 {
1430 gdb_assert (v->lval == lval_computed);
1431
1432 return v->location.computed.closure;
1433 }
1434
1435 enum lval_type *
1436 deprecated_value_lval_hack (struct value *value)
1437 {
1438 return &value->lval;
1439 }
1440
1441 enum lval_type
1442 value_lval_const (const struct value *value)
1443 {
1444 return value->lval;
1445 }
1446
1447 CORE_ADDR
1448 value_address (const struct value *value)
1449 {
1450 if (value->lval == lval_internalvar
1451 || value->lval == lval_internalvar_component
1452 || value->lval == lval_xcallable)
1453 return 0;
1454 if (value->parent != NULL)
1455 return value_address (value->parent) + value->offset;
1456 else
1457 return value->location.address + value->offset;
1458 }
1459
1460 CORE_ADDR
1461 value_raw_address (struct value *value)
1462 {
1463 if (value->lval == lval_internalvar
1464 || value->lval == lval_internalvar_component
1465 || value->lval == lval_xcallable)
1466 return 0;
1467 return value->location.address;
1468 }
1469
1470 void
1471 set_value_address (struct value *value, CORE_ADDR addr)
1472 {
1473 gdb_assert (value->lval != lval_internalvar
1474 && value->lval != lval_internalvar_component
1475 && value->lval != lval_xcallable);
1476 value->location.address = addr;
1477 }
1478
1479 struct internalvar **
1480 deprecated_value_internalvar_hack (struct value *value)
1481 {
1482 return &value->location.internalvar;
1483 }
1484
1485 struct frame_id *
1486 deprecated_value_frame_id_hack (struct value *value)
1487 {
1488 return &value->frame_id;
1489 }
1490
1491 short *
1492 deprecated_value_regnum_hack (struct value *value)
1493 {
1494 return &value->regnum;
1495 }
1496
1497 int
1498 deprecated_value_modifiable (struct value *value)
1499 {
1500 return value->modifiable;
1501 }
1502 \f
1503 /* Return a mark in the value chain. All values allocated after the
1504 mark is obtained (except for those released) are subject to being freed
1505 if a subsequent value_free_to_mark is passed the mark. */
1506 struct value *
1507 value_mark (void)
1508 {
1509 return all_values;
1510 }
1511
1512 /* Take a reference to VAL. VAL will not be deallocated until all
1513 references are released. */
1514
1515 void
1516 value_incref (struct value *val)
1517 {
1518 val->reference_count++;
1519 }
1520
1521 /* Release a reference to VAL, which was acquired with value_incref.
1522 This function is also called to deallocate values from the value
1523 chain. */
1524
1525 void
1526 value_free (struct value *val)
1527 {
1528 if (val)
1529 {
1530 gdb_assert (val->reference_count > 0);
1531 val->reference_count--;
1532 if (val->reference_count > 0)
1533 return;
1534
1535 /* If there's an associated parent value, drop our reference to
1536 it. */
1537 if (val->parent != NULL)
1538 value_free (val->parent);
1539
1540 if (VALUE_LVAL (val) == lval_computed)
1541 {
1542 const struct lval_funcs *funcs = val->location.computed.funcs;
1543
1544 if (funcs->free_closure)
1545 funcs->free_closure (val);
1546 }
1547 else if (VALUE_LVAL (val) == lval_xcallable)
1548 free_xmethod_worker (val->location.xm_worker);
1549
1550 xfree (val->contents);
1551 VEC_free (range_s, val->unavailable);
1552 }
1553 xfree (val);
1554 }
1555
1556 /* Free all values allocated since MARK was obtained by value_mark
1557 (except for those released). */
1558 void
1559 value_free_to_mark (struct value *mark)
1560 {
1561 struct value *val;
1562 struct value *next;
1563
1564 for (val = all_values; val && val != mark; val = next)
1565 {
1566 next = val->next;
1567 val->released = 1;
1568 value_free (val);
1569 }
1570 all_values = val;
1571 }
1572
1573 /* Free all the values that have been allocated (except for those released).
1574 Call after each command, successful or not.
1575 In practice this is called before each command, which is sufficient. */
1576
1577 void
1578 free_all_values (void)
1579 {
1580 struct value *val;
1581 struct value *next;
1582
1583 for (val = all_values; val; val = next)
1584 {
1585 next = val->next;
1586 val->released = 1;
1587 value_free (val);
1588 }
1589
1590 all_values = 0;
1591 }
1592
1593 /* Frees all the elements in a chain of values. */
1594
1595 void
1596 free_value_chain (struct value *v)
1597 {
1598 struct value *next;
1599
1600 for (; v; v = next)
1601 {
1602 next = value_next (v);
1603 value_free (v);
1604 }
1605 }
1606
1607 /* Remove VAL from the chain all_values
1608 so it will not be freed automatically. */
1609
1610 void
1611 release_value (struct value *val)
1612 {
1613 struct value *v;
1614
1615 if (all_values == val)
1616 {
1617 all_values = val->next;
1618 val->next = NULL;
1619 val->released = 1;
1620 return;
1621 }
1622
1623 for (v = all_values; v; v = v->next)
1624 {
1625 if (v->next == val)
1626 {
1627 v->next = val->next;
1628 val->next = NULL;
1629 val->released = 1;
1630 break;
1631 }
1632 }
1633 }
1634
1635 /* If the value is not already released, release it.
1636 If the value is already released, increment its reference count.
1637 That is, this function ensures that the value is released from the
1638 value chain and that the caller owns a reference to it. */
1639
1640 void
1641 release_value_or_incref (struct value *val)
1642 {
1643 if (val->released)
1644 value_incref (val);
1645 else
1646 release_value (val);
1647 }
1648
1649 /* Release all values up to mark */
1650 struct value *
1651 value_release_to_mark (struct value *mark)
1652 {
1653 struct value *val;
1654 struct value *next;
1655
1656 for (val = next = all_values; next; next = next->next)
1657 {
1658 if (next->next == mark)
1659 {
1660 all_values = next->next;
1661 next->next = NULL;
1662 return val;
1663 }
1664 next->released = 1;
1665 }
1666 all_values = 0;
1667 return val;
1668 }
1669
1670 /* Return a copy of the value ARG.
1671 It contains the same contents, for same memory address,
1672 but it's a different block of storage. */
1673
1674 struct value *
1675 value_copy (struct value *arg)
1676 {
1677 struct type *encl_type = value_enclosing_type (arg);
1678 struct value *val;
1679
1680 if (value_lazy (arg))
1681 val = allocate_value_lazy (encl_type);
1682 else
1683 val = allocate_value (encl_type);
1684 val->type = arg->type;
1685 VALUE_LVAL (val) = VALUE_LVAL (arg);
1686 val->location = arg->location;
1687 val->offset = arg->offset;
1688 val->bitpos = arg->bitpos;
1689 val->bitsize = arg->bitsize;
1690 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1691 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1692 val->lazy = arg->lazy;
1693 val->embedded_offset = value_embedded_offset (arg);
1694 val->pointed_to_offset = arg->pointed_to_offset;
1695 val->modifiable = arg->modifiable;
1696 if (!value_lazy (val))
1697 {
1698 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1699 TYPE_LENGTH (value_enclosing_type (arg)));
1700
1701 }
1702 val->unavailable = VEC_copy (range_s, arg->unavailable);
1703 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1704 set_value_parent (val, arg->parent);
1705 if (VALUE_LVAL (val) == lval_computed)
1706 {
1707 const struct lval_funcs *funcs = val->location.computed.funcs;
1708
1709 if (funcs->copy_closure)
1710 val->location.computed.closure = funcs->copy_closure (val);
1711 }
1712 return val;
1713 }
1714
1715 /* Return a "const" and/or "volatile" qualified version of the value V.
1716 If CNST is true, then the returned value will be qualified with
1717 "const".
1718 if VOLTL is true, then the returned value will be qualified with
1719 "volatile". */
1720
1721 struct value *
1722 make_cv_value (int cnst, int voltl, struct value *v)
1723 {
1724 struct type *val_type = value_type (v);
1725 struct type *enclosing_type = value_enclosing_type (v);
1726 struct value *cv_val = value_copy (v);
1727
1728 deprecated_set_value_type (cv_val,
1729 make_cv_type (cnst, voltl, val_type, NULL));
1730 set_value_enclosing_type (cv_val,
1731 make_cv_type (cnst, voltl, enclosing_type, NULL));
1732
1733 return cv_val;
1734 }
1735
1736 /* Return a version of ARG that is non-lvalue. */
1737
1738 struct value *
1739 value_non_lval (struct value *arg)
1740 {
1741 if (VALUE_LVAL (arg) != not_lval)
1742 {
1743 struct type *enc_type = value_enclosing_type (arg);
1744 struct value *val = allocate_value (enc_type);
1745
1746 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1747 TYPE_LENGTH (enc_type));
1748 val->type = arg->type;
1749 set_value_embedded_offset (val, value_embedded_offset (arg));
1750 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1751 return val;
1752 }
1753 return arg;
1754 }
1755
1756 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1757
1758 void
1759 value_force_lval (struct value *v, CORE_ADDR addr)
1760 {
1761 gdb_assert (VALUE_LVAL (v) == not_lval);
1762
1763 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1764 v->lval = lval_memory;
1765 v->location.address = addr;
1766 }
1767
1768 void
1769 set_value_component_location (struct value *component,
1770 const struct value *whole)
1771 {
1772 gdb_assert (whole->lval != lval_xcallable);
1773
1774 if (whole->lval == lval_internalvar)
1775 VALUE_LVAL (component) = lval_internalvar_component;
1776 else
1777 VALUE_LVAL (component) = whole->lval;
1778
1779 component->location = whole->location;
1780 if (whole->lval == lval_computed)
1781 {
1782 const struct lval_funcs *funcs = whole->location.computed.funcs;
1783
1784 if (funcs->copy_closure)
1785 component->location.computed.closure = funcs->copy_closure (whole);
1786 }
1787 }
1788
1789 \f
1790 /* Access to the value history. */
1791
1792 /* Record a new value in the value history.
1793 Returns the absolute history index of the entry. */
1794
1795 int
1796 record_latest_value (struct value *val)
1797 {
1798 int i;
1799
1800 /* We don't want this value to have anything to do with the inferior anymore.
1801 In particular, "set $1 = 50" should not affect the variable from which
1802 the value was taken, and fast watchpoints should be able to assume that
1803 a value on the value history never changes. */
1804 if (value_lazy (val))
1805 value_fetch_lazy (val);
1806 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1807 from. This is a bit dubious, because then *&$1 does not just return $1
1808 but the current contents of that location. c'est la vie... */
1809 val->modifiable = 0;
1810
1811 /* The value may have already been released, in which case we're adding a
1812 new reference for its entry in the history. That is why we call
1813 release_value_or_incref here instead of release_value. */
1814 release_value_or_incref (val);
1815
1816 /* Here we treat value_history_count as origin-zero
1817 and applying to the value being stored now. */
1818
1819 i = value_history_count % VALUE_HISTORY_CHUNK;
1820 if (i == 0)
1821 {
1822 struct value_history_chunk *newobj
1823 = (struct value_history_chunk *)
1824
1825 xmalloc (sizeof (struct value_history_chunk));
1826 memset (newobj->values, 0, sizeof newobj->values);
1827 newobj->next = value_history_chain;
1828 value_history_chain = newobj;
1829 }
1830
1831 value_history_chain->values[i] = val;
1832
1833 /* Now we regard value_history_count as origin-one
1834 and applying to the value just stored. */
1835
1836 return ++value_history_count;
1837 }
1838
1839 /* Return a copy of the value in the history with sequence number NUM. */
1840
1841 struct value *
1842 access_value_history (int num)
1843 {
1844 struct value_history_chunk *chunk;
1845 int i;
1846 int absnum = num;
1847
1848 if (absnum <= 0)
1849 absnum += value_history_count;
1850
1851 if (absnum <= 0)
1852 {
1853 if (num == 0)
1854 error (_("The history is empty."));
1855 else if (num == 1)
1856 error (_("There is only one value in the history."));
1857 else
1858 error (_("History does not go back to $$%d."), -num);
1859 }
1860 if (absnum > value_history_count)
1861 error (_("History has not yet reached $%d."), absnum);
1862
1863 absnum--;
1864
1865 /* Now absnum is always absolute and origin zero. */
1866
1867 chunk = value_history_chain;
1868 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1869 - absnum / VALUE_HISTORY_CHUNK;
1870 i > 0; i--)
1871 chunk = chunk->next;
1872
1873 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1874 }
1875
1876 static void
1877 show_values (char *num_exp, int from_tty)
1878 {
1879 int i;
1880 struct value *val;
1881 static int num = 1;
1882
1883 if (num_exp)
1884 {
1885 /* "show values +" should print from the stored position.
1886 "show values <exp>" should print around value number <exp>. */
1887 if (num_exp[0] != '+' || num_exp[1] != '\0')
1888 num = parse_and_eval_long (num_exp) - 5;
1889 }
1890 else
1891 {
1892 /* "show values" means print the last 10 values. */
1893 num = value_history_count - 9;
1894 }
1895
1896 if (num <= 0)
1897 num = 1;
1898
1899 for (i = num; i < num + 10 && i <= value_history_count; i++)
1900 {
1901 struct value_print_options opts;
1902
1903 val = access_value_history (i);
1904 printf_filtered (("$%d = "), i);
1905 get_user_print_options (&opts);
1906 value_print (val, gdb_stdout, &opts);
1907 printf_filtered (("\n"));
1908 }
1909
1910 /* The next "show values +" should start after what we just printed. */
1911 num += 10;
1912
1913 /* Hitting just return after this command should do the same thing as
1914 "show values +". If num_exp is null, this is unnecessary, since
1915 "show values +" is not useful after "show values". */
1916 if (from_tty && num_exp)
1917 {
1918 num_exp[0] = '+';
1919 num_exp[1] = '\0';
1920 }
1921 }
1922 \f
1923 enum internalvar_kind
1924 {
1925 /* The internal variable is empty. */
1926 INTERNALVAR_VOID,
1927
1928 /* The value of the internal variable is provided directly as
1929 a GDB value object. */
1930 INTERNALVAR_VALUE,
1931
1932 /* A fresh value is computed via a call-back routine on every
1933 access to the internal variable. */
1934 INTERNALVAR_MAKE_VALUE,
1935
1936 /* The internal variable holds a GDB internal convenience function. */
1937 INTERNALVAR_FUNCTION,
1938
1939 /* The variable holds an integer value. */
1940 INTERNALVAR_INTEGER,
1941
1942 /* The variable holds a GDB-provided string. */
1943 INTERNALVAR_STRING,
1944 };
1945
1946 union internalvar_data
1947 {
1948 /* A value object used with INTERNALVAR_VALUE. */
1949 struct value *value;
1950
1951 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1952 struct
1953 {
1954 /* The functions to call. */
1955 const struct internalvar_funcs *functions;
1956
1957 /* The function's user-data. */
1958 void *data;
1959 } make_value;
1960
1961 /* The internal function used with INTERNALVAR_FUNCTION. */
1962 struct
1963 {
1964 struct internal_function *function;
1965 /* True if this is the canonical name for the function. */
1966 int canonical;
1967 } fn;
1968
1969 /* An integer value used with INTERNALVAR_INTEGER. */
1970 struct
1971 {
1972 /* If type is non-NULL, it will be used as the type to generate
1973 a value for this internal variable. If type is NULL, a default
1974 integer type for the architecture is used. */
1975 struct type *type;
1976 LONGEST val;
1977 } integer;
1978
1979 /* A string value used with INTERNALVAR_STRING. */
1980 char *string;
1981 };
1982
1983 /* Internal variables. These are variables within the debugger
1984 that hold values assigned by debugger commands.
1985 The user refers to them with a '$' prefix
1986 that does not appear in the variable names stored internally. */
1987
1988 struct internalvar
1989 {
1990 struct internalvar *next;
1991 char *name;
1992
1993 /* We support various different kinds of content of an internal variable.
1994 enum internalvar_kind specifies the kind, and union internalvar_data
1995 provides the data associated with this particular kind. */
1996
1997 enum internalvar_kind kind;
1998
1999 union internalvar_data u;
2000 };
2001
2002 static struct internalvar *internalvars;
2003
2004 /* If the variable does not already exist create it and give it the
2005 value given. If no value is given then the default is zero. */
2006 static void
2007 init_if_undefined_command (char* args, int from_tty)
2008 {
2009 struct internalvar* intvar;
2010
2011 /* Parse the expression - this is taken from set_command(). */
2012 struct expression *expr = parse_expression (args);
2013 register struct cleanup *old_chain =
2014 make_cleanup (free_current_contents, &expr);
2015
2016 /* Validate the expression.
2017 Was the expression an assignment?
2018 Or even an expression at all? */
2019 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2020 error (_("Init-if-undefined requires an assignment expression."));
2021
2022 /* Extract the variable from the parsed expression.
2023 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2024 if (expr->elts[1].opcode != OP_INTERNALVAR)
2025 error (_("The first parameter to init-if-undefined "
2026 "should be a GDB variable."));
2027 intvar = expr->elts[2].internalvar;
2028
2029 /* Only evaluate the expression if the lvalue is void.
2030 This may still fail if the expresssion is invalid. */
2031 if (intvar->kind == INTERNALVAR_VOID)
2032 evaluate_expression (expr);
2033
2034 do_cleanups (old_chain);
2035 }
2036
2037
2038 /* Look up an internal variable with name NAME. NAME should not
2039 normally include a dollar sign.
2040
2041 If the specified internal variable does not exist,
2042 the return value is NULL. */
2043
2044 struct internalvar *
2045 lookup_only_internalvar (const char *name)
2046 {
2047 struct internalvar *var;
2048
2049 for (var = internalvars; var; var = var->next)
2050 if (strcmp (var->name, name) == 0)
2051 return var;
2052
2053 return NULL;
2054 }
2055
2056 /* Complete NAME by comparing it to the names of internal variables.
2057 Returns a vector of newly allocated strings, or NULL if no matches
2058 were found. */
2059
2060 VEC (char_ptr) *
2061 complete_internalvar (const char *name)
2062 {
2063 VEC (char_ptr) *result = NULL;
2064 struct internalvar *var;
2065 int len;
2066
2067 len = strlen (name);
2068
2069 for (var = internalvars; var; var = var->next)
2070 if (strncmp (var->name, name, len) == 0)
2071 {
2072 char *r = xstrdup (var->name);
2073
2074 VEC_safe_push (char_ptr, result, r);
2075 }
2076
2077 return result;
2078 }
2079
2080 /* Create an internal variable with name NAME and with a void value.
2081 NAME should not normally include a dollar sign. */
2082
2083 struct internalvar *
2084 create_internalvar (const char *name)
2085 {
2086 struct internalvar *var;
2087
2088 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
2089 var->name = concat (name, (char *)NULL);
2090 var->kind = INTERNALVAR_VOID;
2091 var->next = internalvars;
2092 internalvars = var;
2093 return var;
2094 }
2095
2096 /* Create an internal variable with name NAME and register FUN as the
2097 function that value_of_internalvar uses to create a value whenever
2098 this variable is referenced. NAME should not normally include a
2099 dollar sign. DATA is passed uninterpreted to FUN when it is
2100 called. CLEANUP, if not NULL, is called when the internal variable
2101 is destroyed. It is passed DATA as its only argument. */
2102
2103 struct internalvar *
2104 create_internalvar_type_lazy (const char *name,
2105 const struct internalvar_funcs *funcs,
2106 void *data)
2107 {
2108 struct internalvar *var = create_internalvar (name);
2109
2110 var->kind = INTERNALVAR_MAKE_VALUE;
2111 var->u.make_value.functions = funcs;
2112 var->u.make_value.data = data;
2113 return var;
2114 }
2115
2116 /* See documentation in value.h. */
2117
2118 int
2119 compile_internalvar_to_ax (struct internalvar *var,
2120 struct agent_expr *expr,
2121 struct axs_value *value)
2122 {
2123 if (var->kind != INTERNALVAR_MAKE_VALUE
2124 || var->u.make_value.functions->compile_to_ax == NULL)
2125 return 0;
2126
2127 var->u.make_value.functions->compile_to_ax (var, expr, value,
2128 var->u.make_value.data);
2129 return 1;
2130 }
2131
2132 /* Look up an internal variable with name NAME. NAME should not
2133 normally include a dollar sign.
2134
2135 If the specified internal variable does not exist,
2136 one is created, with a void value. */
2137
2138 struct internalvar *
2139 lookup_internalvar (const char *name)
2140 {
2141 struct internalvar *var;
2142
2143 var = lookup_only_internalvar (name);
2144 if (var)
2145 return var;
2146
2147 return create_internalvar (name);
2148 }
2149
2150 /* Return current value of internal variable VAR. For variables that
2151 are not inherently typed, use a value type appropriate for GDBARCH. */
2152
2153 struct value *
2154 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2155 {
2156 struct value *val;
2157 struct trace_state_variable *tsv;
2158
2159 /* If there is a trace state variable of the same name, assume that
2160 is what we really want to see. */
2161 tsv = find_trace_state_variable (var->name);
2162 if (tsv)
2163 {
2164 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2165 &(tsv->value));
2166 if (tsv->value_known)
2167 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2168 tsv->value);
2169 else
2170 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2171 return val;
2172 }
2173
2174 switch (var->kind)
2175 {
2176 case INTERNALVAR_VOID:
2177 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2178 break;
2179
2180 case INTERNALVAR_FUNCTION:
2181 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2182 break;
2183
2184 case INTERNALVAR_INTEGER:
2185 if (!var->u.integer.type)
2186 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2187 var->u.integer.val);
2188 else
2189 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2190 break;
2191
2192 case INTERNALVAR_STRING:
2193 val = value_cstring (var->u.string, strlen (var->u.string),
2194 builtin_type (gdbarch)->builtin_char);
2195 break;
2196
2197 case INTERNALVAR_VALUE:
2198 val = value_copy (var->u.value);
2199 if (value_lazy (val))
2200 value_fetch_lazy (val);
2201 break;
2202
2203 case INTERNALVAR_MAKE_VALUE:
2204 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2205 var->u.make_value.data);
2206 break;
2207
2208 default:
2209 internal_error (__FILE__, __LINE__, _("bad kind"));
2210 }
2211
2212 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2213 on this value go back to affect the original internal variable.
2214
2215 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2216 no underlying modifyable state in the internal variable.
2217
2218 Likewise, if the variable's value is a computed lvalue, we want
2219 references to it to produce another computed lvalue, where
2220 references and assignments actually operate through the
2221 computed value's functions.
2222
2223 This means that internal variables with computed values
2224 behave a little differently from other internal variables:
2225 assignments to them don't just replace the previous value
2226 altogether. At the moment, this seems like the behavior we
2227 want. */
2228
2229 if (var->kind != INTERNALVAR_MAKE_VALUE
2230 && val->lval != lval_computed)
2231 {
2232 VALUE_LVAL (val) = lval_internalvar;
2233 VALUE_INTERNALVAR (val) = var;
2234 }
2235
2236 return val;
2237 }
2238
2239 int
2240 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2241 {
2242 if (var->kind == INTERNALVAR_INTEGER)
2243 {
2244 *result = var->u.integer.val;
2245 return 1;
2246 }
2247
2248 if (var->kind == INTERNALVAR_VALUE)
2249 {
2250 struct type *type = check_typedef (value_type (var->u.value));
2251
2252 if (TYPE_CODE (type) == TYPE_CODE_INT)
2253 {
2254 *result = value_as_long (var->u.value);
2255 return 1;
2256 }
2257 }
2258
2259 return 0;
2260 }
2261
2262 static int
2263 get_internalvar_function (struct internalvar *var,
2264 struct internal_function **result)
2265 {
2266 switch (var->kind)
2267 {
2268 case INTERNALVAR_FUNCTION:
2269 *result = var->u.fn.function;
2270 return 1;
2271
2272 default:
2273 return 0;
2274 }
2275 }
2276
2277 void
2278 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2279 int bitsize, struct value *newval)
2280 {
2281 gdb_byte *addr;
2282
2283 switch (var->kind)
2284 {
2285 case INTERNALVAR_VALUE:
2286 addr = value_contents_writeable (var->u.value);
2287
2288 if (bitsize)
2289 modify_field (value_type (var->u.value), addr + offset,
2290 value_as_long (newval), bitpos, bitsize);
2291 else
2292 memcpy (addr + offset, value_contents (newval),
2293 TYPE_LENGTH (value_type (newval)));
2294 break;
2295
2296 default:
2297 /* We can never get a component of any other kind. */
2298 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2299 }
2300 }
2301
2302 void
2303 set_internalvar (struct internalvar *var, struct value *val)
2304 {
2305 enum internalvar_kind new_kind;
2306 union internalvar_data new_data = { 0 };
2307
2308 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2309 error (_("Cannot overwrite convenience function %s"), var->name);
2310
2311 /* Prepare new contents. */
2312 switch (TYPE_CODE (check_typedef (value_type (val))))
2313 {
2314 case TYPE_CODE_VOID:
2315 new_kind = INTERNALVAR_VOID;
2316 break;
2317
2318 case TYPE_CODE_INTERNAL_FUNCTION:
2319 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2320 new_kind = INTERNALVAR_FUNCTION;
2321 get_internalvar_function (VALUE_INTERNALVAR (val),
2322 &new_data.fn.function);
2323 /* Copies created here are never canonical. */
2324 break;
2325
2326 default:
2327 new_kind = INTERNALVAR_VALUE;
2328 new_data.value = value_copy (val);
2329 new_data.value->modifiable = 1;
2330
2331 /* Force the value to be fetched from the target now, to avoid problems
2332 later when this internalvar is referenced and the target is gone or
2333 has changed. */
2334 if (value_lazy (new_data.value))
2335 value_fetch_lazy (new_data.value);
2336
2337 /* Release the value from the value chain to prevent it from being
2338 deleted by free_all_values. From here on this function should not
2339 call error () until new_data is installed into the var->u to avoid
2340 leaking memory. */
2341 release_value (new_data.value);
2342 break;
2343 }
2344
2345 /* Clean up old contents. */
2346 clear_internalvar (var);
2347
2348 /* Switch over. */
2349 var->kind = new_kind;
2350 var->u = new_data;
2351 /* End code which must not call error(). */
2352 }
2353
2354 void
2355 set_internalvar_integer (struct internalvar *var, LONGEST l)
2356 {
2357 /* Clean up old contents. */
2358 clear_internalvar (var);
2359
2360 var->kind = INTERNALVAR_INTEGER;
2361 var->u.integer.type = NULL;
2362 var->u.integer.val = l;
2363 }
2364
2365 void
2366 set_internalvar_string (struct internalvar *var, const char *string)
2367 {
2368 /* Clean up old contents. */
2369 clear_internalvar (var);
2370
2371 var->kind = INTERNALVAR_STRING;
2372 var->u.string = xstrdup (string);
2373 }
2374
2375 static void
2376 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2377 {
2378 /* Clean up old contents. */
2379 clear_internalvar (var);
2380
2381 var->kind = INTERNALVAR_FUNCTION;
2382 var->u.fn.function = f;
2383 var->u.fn.canonical = 1;
2384 /* Variables installed here are always the canonical version. */
2385 }
2386
2387 void
2388 clear_internalvar (struct internalvar *var)
2389 {
2390 /* Clean up old contents. */
2391 switch (var->kind)
2392 {
2393 case INTERNALVAR_VALUE:
2394 value_free (var->u.value);
2395 break;
2396
2397 case INTERNALVAR_STRING:
2398 xfree (var->u.string);
2399 break;
2400
2401 case INTERNALVAR_MAKE_VALUE:
2402 if (var->u.make_value.functions->destroy != NULL)
2403 var->u.make_value.functions->destroy (var->u.make_value.data);
2404 break;
2405
2406 default:
2407 break;
2408 }
2409
2410 /* Reset to void kind. */
2411 var->kind = INTERNALVAR_VOID;
2412 }
2413
2414 char *
2415 internalvar_name (struct internalvar *var)
2416 {
2417 return var->name;
2418 }
2419
2420 static struct internal_function *
2421 create_internal_function (const char *name,
2422 internal_function_fn handler, void *cookie)
2423 {
2424 struct internal_function *ifn = XNEW (struct internal_function);
2425
2426 ifn->name = xstrdup (name);
2427 ifn->handler = handler;
2428 ifn->cookie = cookie;
2429 return ifn;
2430 }
2431
2432 char *
2433 value_internal_function_name (struct value *val)
2434 {
2435 struct internal_function *ifn;
2436 int result;
2437
2438 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2439 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2440 gdb_assert (result);
2441
2442 return ifn->name;
2443 }
2444
2445 struct value *
2446 call_internal_function (struct gdbarch *gdbarch,
2447 const struct language_defn *language,
2448 struct value *func, int argc, struct value **argv)
2449 {
2450 struct internal_function *ifn;
2451 int result;
2452
2453 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2454 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2455 gdb_assert (result);
2456
2457 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2458 }
2459
2460 /* The 'function' command. This does nothing -- it is just a
2461 placeholder to let "help function NAME" work. This is also used as
2462 the implementation of the sub-command that is created when
2463 registering an internal function. */
2464 static void
2465 function_command (char *command, int from_tty)
2466 {
2467 /* Do nothing. */
2468 }
2469
2470 /* Clean up if an internal function's command is destroyed. */
2471 static void
2472 function_destroyer (struct cmd_list_element *self, void *ignore)
2473 {
2474 xfree ((char *) self->name);
2475 xfree ((char *) self->doc);
2476 }
2477
2478 /* Add a new internal function. NAME is the name of the function; DOC
2479 is a documentation string describing the function. HANDLER is
2480 called when the function is invoked. COOKIE is an arbitrary
2481 pointer which is passed to HANDLER and is intended for "user
2482 data". */
2483 void
2484 add_internal_function (const char *name, const char *doc,
2485 internal_function_fn handler, void *cookie)
2486 {
2487 struct cmd_list_element *cmd;
2488 struct internal_function *ifn;
2489 struct internalvar *var = lookup_internalvar (name);
2490
2491 ifn = create_internal_function (name, handler, cookie);
2492 set_internalvar_function (var, ifn);
2493
2494 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2495 &functionlist);
2496 cmd->destroyer = function_destroyer;
2497 }
2498
2499 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2500 prevent cycles / duplicates. */
2501
2502 void
2503 preserve_one_value (struct value *value, struct objfile *objfile,
2504 htab_t copied_types)
2505 {
2506 if (TYPE_OBJFILE (value->type) == objfile)
2507 value->type = copy_type_recursive (objfile, value->type, copied_types);
2508
2509 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2510 value->enclosing_type = copy_type_recursive (objfile,
2511 value->enclosing_type,
2512 copied_types);
2513 }
2514
2515 /* Likewise for internal variable VAR. */
2516
2517 static void
2518 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2519 htab_t copied_types)
2520 {
2521 switch (var->kind)
2522 {
2523 case INTERNALVAR_INTEGER:
2524 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2525 var->u.integer.type
2526 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2527 break;
2528
2529 case INTERNALVAR_VALUE:
2530 preserve_one_value (var->u.value, objfile, copied_types);
2531 break;
2532 }
2533 }
2534
2535 /* Update the internal variables and value history when OBJFILE is
2536 discarded; we must copy the types out of the objfile. New global types
2537 will be created for every convenience variable which currently points to
2538 this objfile's types, and the convenience variables will be adjusted to
2539 use the new global types. */
2540
2541 void
2542 preserve_values (struct objfile *objfile)
2543 {
2544 htab_t copied_types;
2545 struct value_history_chunk *cur;
2546 struct internalvar *var;
2547 int i;
2548
2549 /* Create the hash table. We allocate on the objfile's obstack, since
2550 it is soon to be deleted. */
2551 copied_types = create_copied_types_hash (objfile);
2552
2553 for (cur = value_history_chain; cur; cur = cur->next)
2554 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2555 if (cur->values[i])
2556 preserve_one_value (cur->values[i], objfile, copied_types);
2557
2558 for (var = internalvars; var; var = var->next)
2559 preserve_one_internalvar (var, objfile, copied_types);
2560
2561 preserve_ext_lang_values (objfile, copied_types);
2562
2563 htab_delete (copied_types);
2564 }
2565
2566 static void
2567 show_convenience (char *ignore, int from_tty)
2568 {
2569 struct gdbarch *gdbarch = get_current_arch ();
2570 struct internalvar *var;
2571 int varseen = 0;
2572 struct value_print_options opts;
2573
2574 get_user_print_options (&opts);
2575 for (var = internalvars; var; var = var->next)
2576 {
2577
2578 if (!varseen)
2579 {
2580 varseen = 1;
2581 }
2582 printf_filtered (("$%s = "), var->name);
2583
2584 TRY
2585 {
2586 struct value *val;
2587
2588 val = value_of_internalvar (gdbarch, var);
2589 value_print (val, gdb_stdout, &opts);
2590 }
2591 CATCH (ex, RETURN_MASK_ERROR)
2592 {
2593 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2594 }
2595 END_CATCH
2596
2597 printf_filtered (("\n"));
2598 }
2599 if (!varseen)
2600 {
2601 /* This text does not mention convenience functions on purpose.
2602 The user can't create them except via Python, and if Python support
2603 is installed this message will never be printed ($_streq will
2604 exist). */
2605 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2606 "Convenience variables have "
2607 "names starting with \"$\";\n"
2608 "use \"set\" as in \"set "
2609 "$foo = 5\" to define them.\n"));
2610 }
2611 }
2612 \f
2613 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2614
2615 struct value *
2616 value_of_xmethod (struct xmethod_worker *worker)
2617 {
2618 if (worker->value == NULL)
2619 {
2620 struct value *v;
2621
2622 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2623 v->lval = lval_xcallable;
2624 v->location.xm_worker = worker;
2625 v->modifiable = 0;
2626 worker->value = v;
2627 }
2628
2629 return worker->value;
2630 }
2631
2632 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2633
2634 struct type *
2635 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2636 {
2637 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2638 && method->lval == lval_xcallable && argc > 0);
2639
2640 return get_xmethod_result_type (method->location.xm_worker,
2641 argv[0], argv + 1, argc - 1);
2642 }
2643
2644 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2645
2646 struct value *
2647 call_xmethod (struct value *method, int argc, struct value **argv)
2648 {
2649 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2650 && method->lval == lval_xcallable && argc > 0);
2651
2652 return invoke_xmethod (method->location.xm_worker,
2653 argv[0], argv + 1, argc - 1);
2654 }
2655 \f
2656 /* Extract a value as a C number (either long or double).
2657 Knows how to convert fixed values to double, or
2658 floating values to long.
2659 Does not deallocate the value. */
2660
2661 LONGEST
2662 value_as_long (struct value *val)
2663 {
2664 /* This coerces arrays and functions, which is necessary (e.g.
2665 in disassemble_command). It also dereferences references, which
2666 I suspect is the most logical thing to do. */
2667 val = coerce_array (val);
2668 return unpack_long (value_type (val), value_contents (val));
2669 }
2670
2671 DOUBLEST
2672 value_as_double (struct value *val)
2673 {
2674 DOUBLEST foo;
2675 int inv;
2676
2677 foo = unpack_double (value_type (val), value_contents (val), &inv);
2678 if (inv)
2679 error (_("Invalid floating value found in program."));
2680 return foo;
2681 }
2682
2683 /* Extract a value as a C pointer. Does not deallocate the value.
2684 Note that val's type may not actually be a pointer; value_as_long
2685 handles all the cases. */
2686 CORE_ADDR
2687 value_as_address (struct value *val)
2688 {
2689 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2690
2691 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2692 whether we want this to be true eventually. */
2693 #if 0
2694 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2695 non-address (e.g. argument to "signal", "info break", etc.), or
2696 for pointers to char, in which the low bits *are* significant. */
2697 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2698 #else
2699
2700 /* There are several targets (IA-64, PowerPC, and others) which
2701 don't represent pointers to functions as simply the address of
2702 the function's entry point. For example, on the IA-64, a
2703 function pointer points to a two-word descriptor, generated by
2704 the linker, which contains the function's entry point, and the
2705 value the IA-64 "global pointer" register should have --- to
2706 support position-independent code. The linker generates
2707 descriptors only for those functions whose addresses are taken.
2708
2709 On such targets, it's difficult for GDB to convert an arbitrary
2710 function address into a function pointer; it has to either find
2711 an existing descriptor for that function, or call malloc and
2712 build its own. On some targets, it is impossible for GDB to
2713 build a descriptor at all: the descriptor must contain a jump
2714 instruction; data memory cannot be executed; and code memory
2715 cannot be modified.
2716
2717 Upon entry to this function, if VAL is a value of type `function'
2718 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2719 value_address (val) is the address of the function. This is what
2720 you'll get if you evaluate an expression like `main'. The call
2721 to COERCE_ARRAY below actually does all the usual unary
2722 conversions, which includes converting values of type `function'
2723 to `pointer to function'. This is the challenging conversion
2724 discussed above. Then, `unpack_long' will convert that pointer
2725 back into an address.
2726
2727 So, suppose the user types `disassemble foo' on an architecture
2728 with a strange function pointer representation, on which GDB
2729 cannot build its own descriptors, and suppose further that `foo'
2730 has no linker-built descriptor. The address->pointer conversion
2731 will signal an error and prevent the command from running, even
2732 though the next step would have been to convert the pointer
2733 directly back into the same address.
2734
2735 The following shortcut avoids this whole mess. If VAL is a
2736 function, just return its address directly. */
2737 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2738 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2739 return value_address (val);
2740
2741 val = coerce_array (val);
2742
2743 /* Some architectures (e.g. Harvard), map instruction and data
2744 addresses onto a single large unified address space. For
2745 instance: An architecture may consider a large integer in the
2746 range 0x10000000 .. 0x1000ffff to already represent a data
2747 addresses (hence not need a pointer to address conversion) while
2748 a small integer would still need to be converted integer to
2749 pointer to address. Just assume such architectures handle all
2750 integer conversions in a single function. */
2751
2752 /* JimB writes:
2753
2754 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2755 must admonish GDB hackers to make sure its behavior matches the
2756 compiler's, whenever possible.
2757
2758 In general, I think GDB should evaluate expressions the same way
2759 the compiler does. When the user copies an expression out of
2760 their source code and hands it to a `print' command, they should
2761 get the same value the compiler would have computed. Any
2762 deviation from this rule can cause major confusion and annoyance,
2763 and needs to be justified carefully. In other words, GDB doesn't
2764 really have the freedom to do these conversions in clever and
2765 useful ways.
2766
2767 AndrewC pointed out that users aren't complaining about how GDB
2768 casts integers to pointers; they are complaining that they can't
2769 take an address from a disassembly listing and give it to `x/i'.
2770 This is certainly important.
2771
2772 Adding an architecture method like integer_to_address() certainly
2773 makes it possible for GDB to "get it right" in all circumstances
2774 --- the target has complete control over how things get done, so
2775 people can Do The Right Thing for their target without breaking
2776 anyone else. The standard doesn't specify how integers get
2777 converted to pointers; usually, the ABI doesn't either, but
2778 ABI-specific code is a more reasonable place to handle it. */
2779
2780 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2781 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2782 && gdbarch_integer_to_address_p (gdbarch))
2783 return gdbarch_integer_to_address (gdbarch, value_type (val),
2784 value_contents (val));
2785
2786 return unpack_long (value_type (val), value_contents (val));
2787 #endif
2788 }
2789 \f
2790 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2791 as a long, or as a double, assuming the raw data is described
2792 by type TYPE. Knows how to convert different sizes of values
2793 and can convert between fixed and floating point. We don't assume
2794 any alignment for the raw data. Return value is in host byte order.
2795
2796 If you want functions and arrays to be coerced to pointers, and
2797 references to be dereferenced, call value_as_long() instead.
2798
2799 C++: It is assumed that the front-end has taken care of
2800 all matters concerning pointers to members. A pointer
2801 to member which reaches here is considered to be equivalent
2802 to an INT (or some size). After all, it is only an offset. */
2803
2804 LONGEST
2805 unpack_long (struct type *type, const gdb_byte *valaddr)
2806 {
2807 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2808 enum type_code code = TYPE_CODE (type);
2809 int len = TYPE_LENGTH (type);
2810 int nosign = TYPE_UNSIGNED (type);
2811
2812 switch (code)
2813 {
2814 case TYPE_CODE_TYPEDEF:
2815 return unpack_long (check_typedef (type), valaddr);
2816 case TYPE_CODE_ENUM:
2817 case TYPE_CODE_FLAGS:
2818 case TYPE_CODE_BOOL:
2819 case TYPE_CODE_INT:
2820 case TYPE_CODE_CHAR:
2821 case TYPE_CODE_RANGE:
2822 case TYPE_CODE_MEMBERPTR:
2823 if (nosign)
2824 return extract_unsigned_integer (valaddr, len, byte_order);
2825 else
2826 return extract_signed_integer (valaddr, len, byte_order);
2827
2828 case TYPE_CODE_FLT:
2829 return extract_typed_floating (valaddr, type);
2830
2831 case TYPE_CODE_DECFLOAT:
2832 /* libdecnumber has a function to convert from decimal to integer, but
2833 it doesn't work when the decimal number has a fractional part. */
2834 return decimal_to_doublest (valaddr, len, byte_order);
2835
2836 case TYPE_CODE_PTR:
2837 case TYPE_CODE_REF:
2838 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2839 whether we want this to be true eventually. */
2840 return extract_typed_address (valaddr, type);
2841
2842 default:
2843 error (_("Value can't be converted to integer."));
2844 }
2845 return 0; /* Placate lint. */
2846 }
2847
2848 /* Return a double value from the specified type and address.
2849 INVP points to an int which is set to 0 for valid value,
2850 1 for invalid value (bad float format). In either case,
2851 the returned double is OK to use. Argument is in target
2852 format, result is in host format. */
2853
2854 DOUBLEST
2855 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2856 {
2857 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2858 enum type_code code;
2859 int len;
2860 int nosign;
2861
2862 *invp = 0; /* Assume valid. */
2863 type = check_typedef (type);
2864 code = TYPE_CODE (type);
2865 len = TYPE_LENGTH (type);
2866 nosign = TYPE_UNSIGNED (type);
2867 if (code == TYPE_CODE_FLT)
2868 {
2869 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2870 floating-point value was valid (using the macro
2871 INVALID_FLOAT). That test/macro have been removed.
2872
2873 It turns out that only the VAX defined this macro and then
2874 only in a non-portable way. Fixing the portability problem
2875 wouldn't help since the VAX floating-point code is also badly
2876 bit-rotten. The target needs to add definitions for the
2877 methods gdbarch_float_format and gdbarch_double_format - these
2878 exactly describe the target floating-point format. The
2879 problem here is that the corresponding floatformat_vax_f and
2880 floatformat_vax_d values these methods should be set to are
2881 also not defined either. Oops!
2882
2883 Hopefully someone will add both the missing floatformat
2884 definitions and the new cases for floatformat_is_valid (). */
2885
2886 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2887 {
2888 *invp = 1;
2889 return 0.0;
2890 }
2891
2892 return extract_typed_floating (valaddr, type);
2893 }
2894 else if (code == TYPE_CODE_DECFLOAT)
2895 return decimal_to_doublest (valaddr, len, byte_order);
2896 else if (nosign)
2897 {
2898 /* Unsigned -- be sure we compensate for signed LONGEST. */
2899 return (ULONGEST) unpack_long (type, valaddr);
2900 }
2901 else
2902 {
2903 /* Signed -- we are OK with unpack_long. */
2904 return unpack_long (type, valaddr);
2905 }
2906 }
2907
2908 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2909 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2910 We don't assume any alignment for the raw data. Return value is in
2911 host byte order.
2912
2913 If you want functions and arrays to be coerced to pointers, and
2914 references to be dereferenced, call value_as_address() instead.
2915
2916 C++: It is assumed that the front-end has taken care of
2917 all matters concerning pointers to members. A pointer
2918 to member which reaches here is considered to be equivalent
2919 to an INT (or some size). After all, it is only an offset. */
2920
2921 CORE_ADDR
2922 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2923 {
2924 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2925 whether we want this to be true eventually. */
2926 return unpack_long (type, valaddr);
2927 }
2928
2929 \f
2930 /* Get the value of the FIELDNO'th field (which must be static) of
2931 TYPE. */
2932
2933 struct value *
2934 value_static_field (struct type *type, int fieldno)
2935 {
2936 struct value *retval;
2937
2938 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2939 {
2940 case FIELD_LOC_KIND_PHYSADDR:
2941 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2942 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2943 break;
2944 case FIELD_LOC_KIND_PHYSNAME:
2945 {
2946 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2947 /* TYPE_FIELD_NAME (type, fieldno); */
2948 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2949
2950 if (sym == NULL)
2951 {
2952 /* With some compilers, e.g. HP aCC, static data members are
2953 reported as non-debuggable symbols. */
2954 struct bound_minimal_symbol msym
2955 = lookup_minimal_symbol (phys_name, NULL, NULL);
2956
2957 if (!msym.minsym)
2958 return allocate_optimized_out_value (type);
2959 else
2960 {
2961 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2962 BMSYMBOL_VALUE_ADDRESS (msym));
2963 }
2964 }
2965 else
2966 retval = value_of_variable (sym, NULL);
2967 break;
2968 }
2969 default:
2970 gdb_assert_not_reached ("unexpected field location kind");
2971 }
2972
2973 return retval;
2974 }
2975
2976 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2977 You have to be careful here, since the size of the data area for the value
2978 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2979 than the old enclosing type, you have to allocate more space for the
2980 data. */
2981
2982 void
2983 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2984 {
2985 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2986 val->contents =
2987 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2988
2989 val->enclosing_type = new_encl_type;
2990 }
2991
2992 /* Given a value ARG1 (offset by OFFSET bytes)
2993 of a struct or union type ARG_TYPE,
2994 extract and return the value of one of its (non-static) fields.
2995 FIELDNO says which field. */
2996
2997 struct value *
2998 value_primitive_field (struct value *arg1, int offset,
2999 int fieldno, struct type *arg_type)
3000 {
3001 struct value *v;
3002 struct type *type;
3003
3004 arg_type = check_typedef (arg_type);
3005 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3006
3007 /* Call check_typedef on our type to make sure that, if TYPE
3008 is a TYPE_CODE_TYPEDEF, its length is set to the length
3009 of the target type instead of zero. However, we do not
3010 replace the typedef type by the target type, because we want
3011 to keep the typedef in order to be able to print the type
3012 description correctly. */
3013 check_typedef (type);
3014
3015 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3016 {
3017 /* Handle packed fields.
3018
3019 Create a new value for the bitfield, with bitpos and bitsize
3020 set. If possible, arrange offset and bitpos so that we can
3021 do a single aligned read of the size of the containing type.
3022 Otherwise, adjust offset to the byte containing the first
3023 bit. Assume that the address, offset, and embedded offset
3024 are sufficiently aligned. */
3025
3026 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3027 int container_bitsize = TYPE_LENGTH (type) * 8;
3028
3029 v = allocate_value_lazy (type);
3030 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3031 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3032 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3033 v->bitpos = bitpos % container_bitsize;
3034 else
3035 v->bitpos = bitpos % 8;
3036 v->offset = (value_embedded_offset (arg1)
3037 + offset
3038 + (bitpos - v->bitpos) / 8);
3039 set_value_parent (v, arg1);
3040 if (!value_lazy (arg1))
3041 value_fetch_lazy (v);
3042 }
3043 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3044 {
3045 /* This field is actually a base subobject, so preserve the
3046 entire object's contents for later references to virtual
3047 bases, etc. */
3048 int boffset;
3049
3050 /* Lazy register values with offsets are not supported. */
3051 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3052 value_fetch_lazy (arg1);
3053
3054 /* We special case virtual inheritance here because this
3055 requires access to the contents, which we would rather avoid
3056 for references to ordinary fields of unavailable values. */
3057 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3058 boffset = baseclass_offset (arg_type, fieldno,
3059 value_contents (arg1),
3060 value_embedded_offset (arg1),
3061 value_address (arg1),
3062 arg1);
3063 else
3064 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3065
3066 if (value_lazy (arg1))
3067 v = allocate_value_lazy (value_enclosing_type (arg1));
3068 else
3069 {
3070 v = allocate_value (value_enclosing_type (arg1));
3071 value_contents_copy_raw (v, 0, arg1, 0,
3072 TYPE_LENGTH (value_enclosing_type (arg1)));
3073 }
3074 v->type = type;
3075 v->offset = value_offset (arg1);
3076 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3077 }
3078 else
3079 {
3080 /* Plain old data member */
3081 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3082
3083 /* Lazy register values with offsets are not supported. */
3084 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3085 value_fetch_lazy (arg1);
3086
3087 if (value_lazy (arg1))
3088 v = allocate_value_lazy (type);
3089 else
3090 {
3091 v = allocate_value (type);
3092 value_contents_copy_raw (v, value_embedded_offset (v),
3093 arg1, value_embedded_offset (arg1) + offset,
3094 TYPE_LENGTH (type));
3095 }
3096 v->offset = (value_offset (arg1) + offset
3097 + value_embedded_offset (arg1));
3098 }
3099 set_value_component_location (v, arg1);
3100 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3101 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3102 return v;
3103 }
3104
3105 /* Given a value ARG1 of a struct or union type,
3106 extract and return the value of one of its (non-static) fields.
3107 FIELDNO says which field. */
3108
3109 struct value *
3110 value_field (struct value *arg1, int fieldno)
3111 {
3112 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3113 }
3114
3115 /* Return a non-virtual function as a value.
3116 F is the list of member functions which contains the desired method.
3117 J is an index into F which provides the desired method.
3118
3119 We only use the symbol for its address, so be happy with either a
3120 full symbol or a minimal symbol. */
3121
3122 struct value *
3123 value_fn_field (struct value **arg1p, struct fn_field *f,
3124 int j, struct type *type,
3125 int offset)
3126 {
3127 struct value *v;
3128 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3129 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3130 struct symbol *sym;
3131 struct bound_minimal_symbol msym;
3132
3133 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
3134 if (sym != NULL)
3135 {
3136 memset (&msym, 0, sizeof (msym));
3137 }
3138 else
3139 {
3140 gdb_assert (sym == NULL);
3141 msym = lookup_bound_minimal_symbol (physname);
3142 if (msym.minsym == NULL)
3143 return NULL;
3144 }
3145
3146 v = allocate_value (ftype);
3147 if (sym)
3148 {
3149 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3150 }
3151 else
3152 {
3153 /* The minimal symbol might point to a function descriptor;
3154 resolve it to the actual code address instead. */
3155 struct objfile *objfile = msym.objfile;
3156 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3157
3158 set_value_address (v,
3159 gdbarch_convert_from_func_ptr_addr
3160 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3161 }
3162
3163 if (arg1p)
3164 {
3165 if (type != value_type (*arg1p))
3166 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3167 value_addr (*arg1p)));
3168
3169 /* Move the `this' pointer according to the offset.
3170 VALUE_OFFSET (*arg1p) += offset; */
3171 }
3172
3173 return v;
3174 }
3175
3176 \f
3177
3178 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3179 VALADDR, and store the result in *RESULT.
3180 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3181
3182 Extracting bits depends on endianness of the machine. Compute the
3183 number of least significant bits to discard. For big endian machines,
3184 we compute the total number of bits in the anonymous object, subtract
3185 off the bit count from the MSB of the object to the MSB of the
3186 bitfield, then the size of the bitfield, which leaves the LSB discard
3187 count. For little endian machines, the discard count is simply the
3188 number of bits from the LSB of the anonymous object to the LSB of the
3189 bitfield.
3190
3191 If the field is signed, we also do sign extension. */
3192
3193 static LONGEST
3194 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3195 int bitpos, int bitsize)
3196 {
3197 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3198 ULONGEST val;
3199 ULONGEST valmask;
3200 int lsbcount;
3201 int bytes_read;
3202 int read_offset;
3203
3204 /* Read the minimum number of bytes required; there may not be
3205 enough bytes to read an entire ULONGEST. */
3206 field_type = check_typedef (field_type);
3207 if (bitsize)
3208 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3209 else
3210 bytes_read = TYPE_LENGTH (field_type);
3211
3212 read_offset = bitpos / 8;
3213
3214 val = extract_unsigned_integer (valaddr + read_offset,
3215 bytes_read, byte_order);
3216
3217 /* Extract bits. See comment above. */
3218
3219 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3220 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3221 else
3222 lsbcount = (bitpos % 8);
3223 val >>= lsbcount;
3224
3225 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3226 If the field is signed, and is negative, then sign extend. */
3227
3228 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3229 {
3230 valmask = (((ULONGEST) 1) << bitsize) - 1;
3231 val &= valmask;
3232 if (!TYPE_UNSIGNED (field_type))
3233 {
3234 if (val & (valmask ^ (valmask >> 1)))
3235 {
3236 val |= ~valmask;
3237 }
3238 }
3239 }
3240
3241 return val;
3242 }
3243
3244 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3245 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3246 ORIGINAL_VALUE, which must not be NULL. See
3247 unpack_value_bits_as_long for more details. */
3248
3249 int
3250 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3251 int embedded_offset, int fieldno,
3252 const struct value *val, LONGEST *result)
3253 {
3254 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3255 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3256 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3257 int bit_offset;
3258
3259 gdb_assert (val != NULL);
3260
3261 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3262 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3263 || !value_bits_available (val, bit_offset, bitsize))
3264 return 0;
3265
3266 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3267 bitpos, bitsize);
3268 return 1;
3269 }
3270
3271 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3272 object at VALADDR. See unpack_bits_as_long for more details. */
3273
3274 LONGEST
3275 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3276 {
3277 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3278 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3279 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3280
3281 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3282 }
3283
3284 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3285 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3286 the contents in DEST_VAL, zero or sign extending if the type of
3287 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3288 VAL. If the VAL's contents required to extract the bitfield from
3289 are unavailable/optimized out, DEST_VAL is correspondingly
3290 marked unavailable/optimized out. */
3291
3292 void
3293 unpack_value_bitfield (struct value *dest_val,
3294 int bitpos, int bitsize,
3295 const gdb_byte *valaddr, int embedded_offset,
3296 const struct value *val)
3297 {
3298 enum bfd_endian byte_order;
3299 int src_bit_offset;
3300 int dst_bit_offset;
3301 LONGEST num;
3302 struct type *field_type = value_type (dest_val);
3303
3304 /* First, unpack and sign extend the bitfield as if it was wholly
3305 available. Invalid/unavailable bits are read as zero, but that's
3306 OK, as they'll end up marked below. */
3307 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3308 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3309 bitpos, bitsize);
3310 store_signed_integer (value_contents_raw (dest_val),
3311 TYPE_LENGTH (field_type), byte_order, num);
3312
3313 /* Now copy the optimized out / unavailability ranges to the right
3314 bits. */
3315 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3316 if (byte_order == BFD_ENDIAN_BIG)
3317 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3318 else
3319 dst_bit_offset = 0;
3320 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3321 val, src_bit_offset, bitsize);
3322 }
3323
3324 /* Return a new value with type TYPE, which is FIELDNO field of the
3325 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3326 of VAL. If the VAL's contents required to extract the bitfield
3327 from are unavailable/optimized out, the new value is
3328 correspondingly marked unavailable/optimized out. */
3329
3330 struct value *
3331 value_field_bitfield (struct type *type, int fieldno,
3332 const gdb_byte *valaddr,
3333 int embedded_offset, const struct value *val)
3334 {
3335 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3336 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3337 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3338
3339 unpack_value_bitfield (res_val, bitpos, bitsize,
3340 valaddr, embedded_offset, val);
3341
3342 return res_val;
3343 }
3344
3345 /* Modify the value of a bitfield. ADDR points to a block of memory in
3346 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3347 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3348 indicate which bits (in target bit order) comprise the bitfield.
3349 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3350 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3351
3352 void
3353 modify_field (struct type *type, gdb_byte *addr,
3354 LONGEST fieldval, int bitpos, int bitsize)
3355 {
3356 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3357 ULONGEST oword;
3358 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3359 int bytesize;
3360
3361 /* Normalize BITPOS. */
3362 addr += bitpos / 8;
3363 bitpos %= 8;
3364
3365 /* If a negative fieldval fits in the field in question, chop
3366 off the sign extension bits. */
3367 if ((~fieldval & ~(mask >> 1)) == 0)
3368 fieldval &= mask;
3369
3370 /* Warn if value is too big to fit in the field in question. */
3371 if (0 != (fieldval & ~mask))
3372 {
3373 /* FIXME: would like to include fieldval in the message, but
3374 we don't have a sprintf_longest. */
3375 warning (_("Value does not fit in %d bits."), bitsize);
3376
3377 /* Truncate it, otherwise adjoining fields may be corrupted. */
3378 fieldval &= mask;
3379 }
3380
3381 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3382 false valgrind reports. */
3383
3384 bytesize = (bitpos + bitsize + 7) / 8;
3385 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3386
3387 /* Shifting for bit field depends on endianness of the target machine. */
3388 if (gdbarch_bits_big_endian (get_type_arch (type)))
3389 bitpos = bytesize * 8 - bitpos - bitsize;
3390
3391 oword &= ~(mask << bitpos);
3392 oword |= fieldval << bitpos;
3393
3394 store_unsigned_integer (addr, bytesize, byte_order, oword);
3395 }
3396 \f
3397 /* Pack NUM into BUF using a target format of TYPE. */
3398
3399 void
3400 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3401 {
3402 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3403 int len;
3404
3405 type = check_typedef (type);
3406 len = TYPE_LENGTH (type);
3407
3408 switch (TYPE_CODE (type))
3409 {
3410 case TYPE_CODE_INT:
3411 case TYPE_CODE_CHAR:
3412 case TYPE_CODE_ENUM:
3413 case TYPE_CODE_FLAGS:
3414 case TYPE_CODE_BOOL:
3415 case TYPE_CODE_RANGE:
3416 case TYPE_CODE_MEMBERPTR:
3417 store_signed_integer (buf, len, byte_order, num);
3418 break;
3419
3420 case TYPE_CODE_REF:
3421 case TYPE_CODE_PTR:
3422 store_typed_address (buf, type, (CORE_ADDR) num);
3423 break;
3424
3425 default:
3426 error (_("Unexpected type (%d) encountered for integer constant."),
3427 TYPE_CODE (type));
3428 }
3429 }
3430
3431
3432 /* Pack NUM into BUF using a target format of TYPE. */
3433
3434 static void
3435 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3436 {
3437 int len;
3438 enum bfd_endian byte_order;
3439
3440 type = check_typedef (type);
3441 len = TYPE_LENGTH (type);
3442 byte_order = gdbarch_byte_order (get_type_arch (type));
3443
3444 switch (TYPE_CODE (type))
3445 {
3446 case TYPE_CODE_INT:
3447 case TYPE_CODE_CHAR:
3448 case TYPE_CODE_ENUM:
3449 case TYPE_CODE_FLAGS:
3450 case TYPE_CODE_BOOL:
3451 case TYPE_CODE_RANGE:
3452 case TYPE_CODE_MEMBERPTR:
3453 store_unsigned_integer (buf, len, byte_order, num);
3454 break;
3455
3456 case TYPE_CODE_REF:
3457 case TYPE_CODE_PTR:
3458 store_typed_address (buf, type, (CORE_ADDR) num);
3459 break;
3460
3461 default:
3462 error (_("Unexpected type (%d) encountered "
3463 "for unsigned integer constant."),
3464 TYPE_CODE (type));
3465 }
3466 }
3467
3468
3469 /* Convert C numbers into newly allocated values. */
3470
3471 struct value *
3472 value_from_longest (struct type *type, LONGEST num)
3473 {
3474 struct value *val = allocate_value (type);
3475
3476 pack_long (value_contents_raw (val), type, num);
3477 return val;
3478 }
3479
3480
3481 /* Convert C unsigned numbers into newly allocated values. */
3482
3483 struct value *
3484 value_from_ulongest (struct type *type, ULONGEST num)
3485 {
3486 struct value *val = allocate_value (type);
3487
3488 pack_unsigned_long (value_contents_raw (val), type, num);
3489
3490 return val;
3491 }
3492
3493
3494 /* Create a value representing a pointer of type TYPE to the address
3495 ADDR. */
3496
3497 struct value *
3498 value_from_pointer (struct type *type, CORE_ADDR addr)
3499 {
3500 struct value *val = allocate_value (type);
3501
3502 store_typed_address (value_contents_raw (val),
3503 check_typedef (type), addr);
3504 return val;
3505 }
3506
3507
3508 /* Create a value of type TYPE whose contents come from VALADDR, if it
3509 is non-null, and whose memory address (in the inferior) is
3510 ADDRESS. The type of the created value may differ from the passed
3511 type TYPE. Make sure to retrieve values new type after this call.
3512 Note that TYPE is not passed through resolve_dynamic_type; this is
3513 a special API intended for use only by Ada. */
3514
3515 struct value *
3516 value_from_contents_and_address_unresolved (struct type *type,
3517 const gdb_byte *valaddr,
3518 CORE_ADDR address)
3519 {
3520 struct value *v;
3521
3522 if (valaddr == NULL)
3523 v = allocate_value_lazy (type);
3524 else
3525 v = value_from_contents (type, valaddr);
3526 set_value_address (v, address);
3527 VALUE_LVAL (v) = lval_memory;
3528 return v;
3529 }
3530
3531 /* Create a value of type TYPE whose contents come from VALADDR, if it
3532 is non-null, and whose memory address (in the inferior) is
3533 ADDRESS. The type of the created value may differ from the passed
3534 type TYPE. Make sure to retrieve values new type after this call. */
3535
3536 struct value *
3537 value_from_contents_and_address (struct type *type,
3538 const gdb_byte *valaddr,
3539 CORE_ADDR address)
3540 {
3541 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3542 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3543 struct value *v;
3544
3545 if (valaddr == NULL)
3546 v = allocate_value_lazy (resolved_type);
3547 else
3548 v = value_from_contents (resolved_type, valaddr);
3549 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3550 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3551 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3552 set_value_address (v, address);
3553 VALUE_LVAL (v) = lval_memory;
3554 return v;
3555 }
3556
3557 /* Create a value of type TYPE holding the contents CONTENTS.
3558 The new value is `not_lval'. */
3559
3560 struct value *
3561 value_from_contents (struct type *type, const gdb_byte *contents)
3562 {
3563 struct value *result;
3564
3565 result = allocate_value (type);
3566 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3567 return result;
3568 }
3569
3570 struct value *
3571 value_from_double (struct type *type, DOUBLEST num)
3572 {
3573 struct value *val = allocate_value (type);
3574 struct type *base_type = check_typedef (type);
3575 enum type_code code = TYPE_CODE (base_type);
3576
3577 if (code == TYPE_CODE_FLT)
3578 {
3579 store_typed_floating (value_contents_raw (val), base_type, num);
3580 }
3581 else
3582 error (_("Unexpected type encountered for floating constant."));
3583
3584 return val;
3585 }
3586
3587 struct value *
3588 value_from_decfloat (struct type *type, const gdb_byte *dec)
3589 {
3590 struct value *val = allocate_value (type);
3591
3592 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3593 return val;
3594 }
3595
3596 /* Extract a value from the history file. Input will be of the form
3597 $digits or $$digits. See block comment above 'write_dollar_variable'
3598 for details. */
3599
3600 struct value *
3601 value_from_history_ref (const char *h, const char **endp)
3602 {
3603 int index, len;
3604
3605 if (h[0] == '$')
3606 len = 1;
3607 else
3608 return NULL;
3609
3610 if (h[1] == '$')
3611 len = 2;
3612
3613 /* Find length of numeral string. */
3614 for (; isdigit (h[len]); len++)
3615 ;
3616
3617 /* Make sure numeral string is not part of an identifier. */
3618 if (h[len] == '_' || isalpha (h[len]))
3619 return NULL;
3620
3621 /* Now collect the index value. */
3622 if (h[1] == '$')
3623 {
3624 if (len == 2)
3625 {
3626 /* For some bizarre reason, "$$" is equivalent to "$$1",
3627 rather than to "$$0" as it ought to be! */
3628 index = -1;
3629 *endp += len;
3630 }
3631 else
3632 {
3633 char *local_end;
3634
3635 index = -strtol (&h[2], &local_end, 10);
3636 *endp = local_end;
3637 }
3638 }
3639 else
3640 {
3641 if (len == 1)
3642 {
3643 /* "$" is equivalent to "$0". */
3644 index = 0;
3645 *endp += len;
3646 }
3647 else
3648 {
3649 char *local_end;
3650
3651 index = strtol (&h[1], &local_end, 10);
3652 *endp = local_end;
3653 }
3654 }
3655
3656 return access_value_history (index);
3657 }
3658
3659 struct value *
3660 coerce_ref_if_computed (const struct value *arg)
3661 {
3662 const struct lval_funcs *funcs;
3663
3664 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3665 return NULL;
3666
3667 if (value_lval_const (arg) != lval_computed)
3668 return NULL;
3669
3670 funcs = value_computed_funcs (arg);
3671 if (funcs->coerce_ref == NULL)
3672 return NULL;
3673
3674 return funcs->coerce_ref (arg);
3675 }
3676
3677 /* Look at value.h for description. */
3678
3679 struct value *
3680 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3681 struct type *original_type,
3682 struct value *original_value)
3683 {
3684 /* Re-adjust type. */
3685 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3686
3687 /* Add embedding info. */
3688 set_value_enclosing_type (value, enc_type);
3689 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3690
3691 /* We may be pointing to an object of some derived type. */
3692 return value_full_object (value, NULL, 0, 0, 0);
3693 }
3694
3695 struct value *
3696 coerce_ref (struct value *arg)
3697 {
3698 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3699 struct value *retval;
3700 struct type *enc_type;
3701
3702 retval = coerce_ref_if_computed (arg);
3703 if (retval)
3704 return retval;
3705
3706 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3707 return arg;
3708
3709 enc_type = check_typedef (value_enclosing_type (arg));
3710 enc_type = TYPE_TARGET_TYPE (enc_type);
3711
3712 retval = value_at_lazy (enc_type,
3713 unpack_pointer (value_type (arg),
3714 value_contents (arg)));
3715 enc_type = value_type (retval);
3716 return readjust_indirect_value_type (retval, enc_type,
3717 value_type_arg_tmp, arg);
3718 }
3719
3720 struct value *
3721 coerce_array (struct value *arg)
3722 {
3723 struct type *type;
3724
3725 arg = coerce_ref (arg);
3726 type = check_typedef (value_type (arg));
3727
3728 switch (TYPE_CODE (type))
3729 {
3730 case TYPE_CODE_ARRAY:
3731 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3732 arg = value_coerce_array (arg);
3733 break;
3734 case TYPE_CODE_FUNC:
3735 arg = value_coerce_function (arg);
3736 break;
3737 }
3738 return arg;
3739 }
3740 \f
3741
3742 /* Return the return value convention that will be used for the
3743 specified type. */
3744
3745 enum return_value_convention
3746 struct_return_convention (struct gdbarch *gdbarch,
3747 struct value *function, struct type *value_type)
3748 {
3749 enum type_code code = TYPE_CODE (value_type);
3750
3751 if (code == TYPE_CODE_ERROR)
3752 error (_("Function return type unknown."));
3753
3754 /* Probe the architecture for the return-value convention. */
3755 return gdbarch_return_value (gdbarch, function, value_type,
3756 NULL, NULL, NULL);
3757 }
3758
3759 /* Return true if the function returning the specified type is using
3760 the convention of returning structures in memory (passing in the
3761 address as a hidden first parameter). */
3762
3763 int
3764 using_struct_return (struct gdbarch *gdbarch,
3765 struct value *function, struct type *value_type)
3766 {
3767 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3768 /* A void return value is never in memory. See also corresponding
3769 code in "print_return_value". */
3770 return 0;
3771
3772 return (struct_return_convention (gdbarch, function, value_type)
3773 != RETURN_VALUE_REGISTER_CONVENTION);
3774 }
3775
3776 /* Set the initialized field in a value struct. */
3777
3778 void
3779 set_value_initialized (struct value *val, int status)
3780 {
3781 val->initialized = status;
3782 }
3783
3784 /* Return the initialized field in a value struct. */
3785
3786 int
3787 value_initialized (struct value *val)
3788 {
3789 return val->initialized;
3790 }
3791
3792 /* Load the actual content of a lazy value. Fetch the data from the
3793 user's process and clear the lazy flag to indicate that the data in
3794 the buffer is valid.
3795
3796 If the value is zero-length, we avoid calling read_memory, which
3797 would abort. We mark the value as fetched anyway -- all 0 bytes of
3798 it. */
3799
3800 void
3801 value_fetch_lazy (struct value *val)
3802 {
3803 gdb_assert (value_lazy (val));
3804 allocate_value_contents (val);
3805 /* A value is either lazy, or fully fetched. The
3806 availability/validity is only established as we try to fetch a
3807 value. */
3808 gdb_assert (VEC_empty (range_s, val->optimized_out));
3809 gdb_assert (VEC_empty (range_s, val->unavailable));
3810 if (value_bitsize (val))
3811 {
3812 /* To read a lazy bitfield, read the entire enclosing value. This
3813 prevents reading the same block of (possibly volatile) memory once
3814 per bitfield. It would be even better to read only the containing
3815 word, but we have no way to record that just specific bits of a
3816 value have been fetched. */
3817 struct type *type = check_typedef (value_type (val));
3818 struct value *parent = value_parent (val);
3819
3820 if (value_lazy (parent))
3821 value_fetch_lazy (parent);
3822
3823 unpack_value_bitfield (val,
3824 value_bitpos (val), value_bitsize (val),
3825 value_contents_for_printing (parent),
3826 value_offset (val), parent);
3827 }
3828 else if (VALUE_LVAL (val) == lval_memory)
3829 {
3830 CORE_ADDR addr = value_address (val);
3831 struct type *type = check_typedef (value_enclosing_type (val));
3832
3833 if (TYPE_LENGTH (type))
3834 read_value_memory (val, 0, value_stack (val),
3835 addr, value_contents_all_raw (val),
3836 TYPE_LENGTH (type));
3837 }
3838 else if (VALUE_LVAL (val) == lval_register)
3839 {
3840 struct frame_info *frame;
3841 int regnum;
3842 struct type *type = check_typedef (value_type (val));
3843 struct value *new_val = val, *mark = value_mark ();
3844
3845 /* Offsets are not supported here; lazy register values must
3846 refer to the entire register. */
3847 gdb_assert (value_offset (val) == 0);
3848
3849 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3850 {
3851 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3852
3853 frame = frame_find_by_id (frame_id);
3854 regnum = VALUE_REGNUM (new_val);
3855
3856 gdb_assert (frame != NULL);
3857
3858 /* Convertible register routines are used for multi-register
3859 values and for interpretation in different types
3860 (e.g. float or int from a double register). Lazy
3861 register values should have the register's natural type,
3862 so they do not apply. */
3863 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3864 regnum, type));
3865
3866 new_val = get_frame_register_value (frame, regnum);
3867
3868 /* If we get another lazy lval_register value, it means the
3869 register is found by reading it from the next frame.
3870 get_frame_register_value should never return a value with
3871 the frame id pointing to FRAME. If it does, it means we
3872 either have two consecutive frames with the same frame id
3873 in the frame chain, or some code is trying to unwind
3874 behind get_prev_frame's back (e.g., a frame unwind
3875 sniffer trying to unwind), bypassing its validations. In
3876 any case, it should always be an internal error to end up
3877 in this situation. */
3878 if (VALUE_LVAL (new_val) == lval_register
3879 && value_lazy (new_val)
3880 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3881 internal_error (__FILE__, __LINE__,
3882 _("infinite loop while fetching a register"));
3883 }
3884
3885 /* If it's still lazy (for instance, a saved register on the
3886 stack), fetch it. */
3887 if (value_lazy (new_val))
3888 value_fetch_lazy (new_val);
3889
3890 /* Copy the contents and the unavailability/optimized-out
3891 meta-data from NEW_VAL to VAL. */
3892 set_value_lazy (val, 0);
3893 value_contents_copy (val, value_embedded_offset (val),
3894 new_val, value_embedded_offset (new_val),
3895 TYPE_LENGTH (type));
3896
3897 if (frame_debug)
3898 {
3899 struct gdbarch *gdbarch;
3900 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3901 regnum = VALUE_REGNUM (val);
3902 gdbarch = get_frame_arch (frame);
3903
3904 fprintf_unfiltered (gdb_stdlog,
3905 "{ value_fetch_lazy "
3906 "(frame=%d,regnum=%d(%s),...) ",
3907 frame_relative_level (frame), regnum,
3908 user_reg_map_regnum_to_name (gdbarch, regnum));
3909
3910 fprintf_unfiltered (gdb_stdlog, "->");
3911 if (value_optimized_out (new_val))
3912 {
3913 fprintf_unfiltered (gdb_stdlog, " ");
3914 val_print_optimized_out (new_val, gdb_stdlog);
3915 }
3916 else
3917 {
3918 int i;
3919 const gdb_byte *buf = value_contents (new_val);
3920
3921 if (VALUE_LVAL (new_val) == lval_register)
3922 fprintf_unfiltered (gdb_stdlog, " register=%d",
3923 VALUE_REGNUM (new_val));
3924 else if (VALUE_LVAL (new_val) == lval_memory)
3925 fprintf_unfiltered (gdb_stdlog, " address=%s",
3926 paddress (gdbarch,
3927 value_address (new_val)));
3928 else
3929 fprintf_unfiltered (gdb_stdlog, " computed");
3930
3931 fprintf_unfiltered (gdb_stdlog, " bytes=");
3932 fprintf_unfiltered (gdb_stdlog, "[");
3933 for (i = 0; i < register_size (gdbarch, regnum); i++)
3934 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3935 fprintf_unfiltered (gdb_stdlog, "]");
3936 }
3937
3938 fprintf_unfiltered (gdb_stdlog, " }\n");
3939 }
3940
3941 /* Dispose of the intermediate values. This prevents
3942 watchpoints from trying to watch the saved frame pointer. */
3943 value_free_to_mark (mark);
3944 }
3945 else if (VALUE_LVAL (val) == lval_computed
3946 && value_computed_funcs (val)->read != NULL)
3947 value_computed_funcs (val)->read (val);
3948 else
3949 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3950
3951 set_value_lazy (val, 0);
3952 }
3953
3954 /* Implementation of the convenience function $_isvoid. */
3955
3956 static struct value *
3957 isvoid_internal_fn (struct gdbarch *gdbarch,
3958 const struct language_defn *language,
3959 void *cookie, int argc, struct value **argv)
3960 {
3961 int ret;
3962
3963 if (argc != 1)
3964 error (_("You must provide one argument for $_isvoid."));
3965
3966 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3967
3968 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3969 }
3970
3971 void
3972 _initialize_values (void)
3973 {
3974 add_cmd ("convenience", no_class, show_convenience, _("\
3975 Debugger convenience (\"$foo\") variables and functions.\n\
3976 Convenience variables are created when you assign them values;\n\
3977 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3978 \n\
3979 A few convenience variables are given values automatically:\n\
3980 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3981 \"$__\" holds the contents of the last address examined with \"x\"."
3982 #ifdef HAVE_PYTHON
3983 "\n\n\
3984 Convenience functions are defined via the Python API."
3985 #endif
3986 ), &showlist);
3987 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3988
3989 add_cmd ("values", no_set_class, show_values, _("\
3990 Elements of value history around item number IDX (or last ten)."),
3991 &showlist);
3992
3993 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3994 Initialize a convenience variable if necessary.\n\
3995 init-if-undefined VARIABLE = EXPRESSION\n\
3996 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3997 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3998 VARIABLE is already initialized."));
3999
4000 add_prefix_cmd ("function", no_class, function_command, _("\
4001 Placeholder command for showing help on convenience functions."),
4002 &functionlist, "function ", 0, &cmdlist);
4003
4004 add_internal_function ("_isvoid", _("\
4005 Check whether an expression is void.\n\
4006 Usage: $_isvoid (expression)\n\
4007 Return 1 if the expression is void, zero otherwise."),
4008 isvoid_internal_fn, NULL);
4009 }
This page took 0.144497 seconds and 4 git commands to generate.