Create nat/i386-dregs.h
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include <string.h>
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "command.h"
28 #include "gdbcmd.h"
29 #include "target.h"
30 #include "language.h"
31 #include "demangle.h"
32 #include "doublest.h"
33 #include "gdb_assert.h"
34 #include "regcache.h"
35 #include "block.h"
36 #include "dfp.h"
37 #include "objfiles.h"
38 #include "valprint.h"
39 #include "cli/cli-decode.h"
40 #include "exceptions.h"
41 #include "extension.h"
42 #include <ctype.h>
43 #include "tracepoint.h"
44 #include "cp-abi.h"
45 #include "user-regs.h"
46
47 /* Prototypes for exported functions. */
48
49 void _initialize_values (void);
50
51 /* Definition of a user function. */
52 struct internal_function
53 {
54 /* The name of the function. It is a bit odd to have this in the
55 function itself -- the user might use a differently-named
56 convenience variable to hold the function. */
57 char *name;
58
59 /* The handler. */
60 internal_function_fn handler;
61
62 /* User data for the handler. */
63 void *cookie;
64 };
65
66 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
67
68 struct range
69 {
70 /* Lowest offset in the range. */
71 int offset;
72
73 /* Length of the range. */
74 int length;
75 };
76
77 typedef struct range range_s;
78
79 DEF_VEC_O(range_s);
80
81 /* Returns true if the ranges defined by [offset1, offset1+len1) and
82 [offset2, offset2+len2) overlap. */
83
84 static int
85 ranges_overlap (int offset1, int len1,
86 int offset2, int len2)
87 {
88 ULONGEST h, l;
89
90 l = max (offset1, offset2);
91 h = min (offset1 + len1, offset2 + len2);
92 return (l < h);
93 }
94
95 /* Returns true if the first argument is strictly less than the
96 second, useful for VEC_lower_bound. We keep ranges sorted by
97 offset and coalesce overlapping and contiguous ranges, so this just
98 compares the starting offset. */
99
100 static int
101 range_lessthan (const range_s *r1, const range_s *r2)
102 {
103 return r1->offset < r2->offset;
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (VEC(range_s) *ranges, int offset, int length)
111 {
112 range_s what;
113 int i;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
150
151 if (i > 0)
152 {
153 struct range *bef = VEC_index (range_s, ranges, i - 1);
154
155 if (ranges_overlap (bef->offset, bef->length, offset, length))
156 return 1;
157 }
158
159 if (i < VEC_length (range_s, ranges))
160 {
161 struct range *r = VEC_index (range_s, ranges, i);
162
163 if (ranges_overlap (r->offset, r->length, offset, length))
164 return 1;
165 }
166
167 return 0;
168 }
169
170 static struct cmd_list_element *functionlist;
171
172 /* Note that the fields in this structure are arranged to save a bit
173 of memory. */
174
175 struct value
176 {
177 /* Type of value; either not an lval, or one of the various
178 different possible kinds of lval. */
179 enum lval_type lval;
180
181 /* Is it modifiable? Only relevant if lval != not_lval. */
182 unsigned int modifiable : 1;
183
184 /* If zero, contents of this value are in the contents field. If
185 nonzero, contents are in inferior. If the lval field is lval_memory,
186 the contents are in inferior memory at location.address plus offset.
187 The lval field may also be lval_register.
188
189 WARNING: This field is used by the code which handles watchpoints
190 (see breakpoint.c) to decide whether a particular value can be
191 watched by hardware watchpoints. If the lazy flag is set for
192 some member of a value chain, it is assumed that this member of
193 the chain doesn't need to be watched as part of watching the
194 value itself. This is how GDB avoids watching the entire struct
195 or array when the user wants to watch a single struct member or
196 array element. If you ever change the way lazy flag is set and
197 reset, be sure to consider this use as well! */
198 unsigned int lazy : 1;
199
200 /* If nonzero, this is the value of a variable that does not
201 actually exist in the program. If nonzero, and LVAL is
202 lval_register, this is a register ($pc, $sp, etc., never a
203 program variable) that has not been saved in the frame. All
204 optimized-out values are treated pretty much the same, except
205 registers have a different string representation and related
206 error strings. */
207 unsigned int optimized_out : 1;
208
209 /* If value is a variable, is it initialized or not. */
210 unsigned int initialized : 1;
211
212 /* If value is from the stack. If this is set, read_stack will be
213 used instead of read_memory to enable extra caching. */
214 unsigned int stack : 1;
215
216 /* If the value has been released. */
217 unsigned int released : 1;
218
219 /* Register number if the value is from a register. */
220 short regnum;
221
222 /* Location of value (if lval). */
223 union
224 {
225 /* If lval == lval_memory, this is the address in the inferior.
226 If lval == lval_register, this is the byte offset into the
227 registers structure. */
228 CORE_ADDR address;
229
230 /* Pointer to internal variable. */
231 struct internalvar *internalvar;
232
233 /* Pointer to xmethod worker. */
234 struct xmethod_worker *xm_worker;
235
236 /* If lval == lval_computed, this is a set of function pointers
237 to use to access and describe the value, and a closure pointer
238 for them to use. */
239 struct
240 {
241 /* Functions to call. */
242 const struct lval_funcs *funcs;
243
244 /* Closure for those functions to use. */
245 void *closure;
246 } computed;
247 } location;
248
249 /* Describes offset of a value within lval of a structure in bytes.
250 If lval == lval_memory, this is an offset to the address. If
251 lval == lval_register, this is a further offset from
252 location.address within the registers structure. Note also the
253 member embedded_offset below. */
254 int offset;
255
256 /* Only used for bitfields; number of bits contained in them. */
257 int bitsize;
258
259 /* Only used for bitfields; position of start of field. For
260 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
261 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
262 int bitpos;
263
264 /* The number of references to this value. When a value is created,
265 the value chain holds a reference, so REFERENCE_COUNT is 1. If
266 release_value is called, this value is removed from the chain but
267 the caller of release_value now has a reference to this value.
268 The caller must arrange for a call to value_free later. */
269 int reference_count;
270
271 /* Only used for bitfields; the containing value. This allows a
272 single read from the target when displaying multiple
273 bitfields. */
274 struct value *parent;
275
276 /* Frame register value is relative to. This will be described in
277 the lval enum above as "lval_register". */
278 struct frame_id frame_id;
279
280 /* Type of the value. */
281 struct type *type;
282
283 /* If a value represents a C++ object, then the `type' field gives
284 the object's compile-time type. If the object actually belongs
285 to some class derived from `type', perhaps with other base
286 classes and additional members, then `type' is just a subobject
287 of the real thing, and the full object is probably larger than
288 `type' would suggest.
289
290 If `type' is a dynamic class (i.e. one with a vtable), then GDB
291 can actually determine the object's run-time type by looking at
292 the run-time type information in the vtable. When this
293 information is available, we may elect to read in the entire
294 object, for several reasons:
295
296 - When printing the value, the user would probably rather see the
297 full object, not just the limited portion apparent from the
298 compile-time type.
299
300 - If `type' has virtual base classes, then even printing `type'
301 alone may require reaching outside the `type' portion of the
302 object to wherever the virtual base class has been stored.
303
304 When we store the entire object, `enclosing_type' is the run-time
305 type -- the complete object -- and `embedded_offset' is the
306 offset of `type' within that larger type, in bytes. The
307 value_contents() macro takes `embedded_offset' into account, so
308 most GDB code continues to see the `type' portion of the value,
309 just as the inferior would.
310
311 If `type' is a pointer to an object, then `enclosing_type' is a
312 pointer to the object's run-time type, and `pointed_to_offset' is
313 the offset in bytes from the full object to the pointed-to object
314 -- that is, the value `embedded_offset' would have if we followed
315 the pointer and fetched the complete object. (I don't really see
316 the point. Why not just determine the run-time type when you
317 indirect, and avoid the special case? The contents don't matter
318 until you indirect anyway.)
319
320 If we're not doing anything fancy, `enclosing_type' is equal to
321 `type', and `embedded_offset' is zero, so everything works
322 normally. */
323 struct type *enclosing_type;
324 int embedded_offset;
325 int pointed_to_offset;
326
327 /* Values are stored in a chain, so that they can be deleted easily
328 over calls to the inferior. Values assigned to internal
329 variables, put into the value history or exposed to Python are
330 taken off this list. */
331 struct value *next;
332
333 /* Actual contents of the value. Target byte-order. NULL or not
334 valid if lazy is nonzero. */
335 gdb_byte *contents;
336
337 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
338 rather than available, since the common and default case is for a
339 value to be available. This is filled in at value read time. The
340 unavailable ranges are tracked in bits. */
341 VEC(range_s) *unavailable;
342 };
343
344 int
345 value_bits_available (const struct value *value, int offset, int length)
346 {
347 gdb_assert (!value->lazy);
348
349 return !ranges_contain (value->unavailable, offset, length);
350 }
351
352 int
353 value_bytes_available (const struct value *value, int offset, int length)
354 {
355 return value_bits_available (value,
356 offset * TARGET_CHAR_BIT,
357 length * TARGET_CHAR_BIT);
358 }
359
360 int
361 value_entirely_available (struct value *value)
362 {
363 /* We can only tell whether the whole value is available when we try
364 to read it. */
365 if (value->lazy)
366 value_fetch_lazy (value);
367
368 if (VEC_empty (range_s, value->unavailable))
369 return 1;
370 return 0;
371 }
372
373 int
374 value_entirely_unavailable (struct value *value)
375 {
376 /* We can only tell whether the whole value is available when we try
377 to read it. */
378 if (value->lazy)
379 value_fetch_lazy (value);
380
381 if (VEC_length (range_s, value->unavailable) == 1)
382 {
383 struct range *t = VEC_index (range_s, value->unavailable, 0);
384
385 if (t->offset == 0
386 && t->length == (TARGET_CHAR_BIT
387 * TYPE_LENGTH (value_enclosing_type (value))))
388 return 1;
389 }
390
391 return 0;
392 }
393
394 void
395 mark_value_bits_unavailable (struct value *value, int offset, int length)
396 {
397 range_s newr;
398 int i;
399
400 /* Insert the range sorted. If there's overlap or the new range
401 would be contiguous with an existing range, merge. */
402
403 newr.offset = offset;
404 newr.length = length;
405
406 /* Do a binary search for the position the given range would be
407 inserted if we only considered the starting OFFSET of ranges.
408 Call that position I. Since we also have LENGTH to care for
409 (this is a range afterall), we need to check if the _previous_
410 range overlaps the I range. E.g., calling R the new range:
411
412 #1 - overlaps with previous
413
414 R
415 |-...-|
416 |---| |---| |------| ... |--|
417 0 1 2 N
418
419 I=1
420
421 In the case #1 above, the binary search would return `I=1',
422 meaning, this OFFSET should be inserted at position 1, and the
423 current position 1 should be pushed further (and become 2). But,
424 note that `0' overlaps with R, so we want to merge them.
425
426 A similar consideration needs to be taken if the new range would
427 be contiguous with the previous range:
428
429 #2 - contiguous with previous
430
431 R
432 |-...-|
433 |--| |---| |------| ... |--|
434 0 1 2 N
435
436 I=1
437
438 If there's no overlap with the previous range, as in:
439
440 #3 - not overlapping and not contiguous
441
442 R
443 |-...-|
444 |--| |---| |------| ... |--|
445 0 1 2 N
446
447 I=1
448
449 or if I is 0:
450
451 #4 - R is the range with lowest offset
452
453 R
454 |-...-|
455 |--| |---| |------| ... |--|
456 0 1 2 N
457
458 I=0
459
460 ... we just push the new range to I.
461
462 All the 4 cases above need to consider that the new range may
463 also overlap several of the ranges that follow, or that R may be
464 contiguous with the following range, and merge. E.g.,
465
466 #5 - overlapping following ranges
467
468 R
469 |------------------------|
470 |--| |---| |------| ... |--|
471 0 1 2 N
472
473 I=0
474
475 or:
476
477 R
478 |-------|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 */
485
486 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
487 if (i > 0)
488 {
489 struct range *bef = VEC_index (range_s, value->unavailable, i - 1);
490
491 if (ranges_overlap (bef->offset, bef->length, offset, length))
492 {
493 /* #1 */
494 ULONGEST l = min (bef->offset, offset);
495 ULONGEST h = max (bef->offset + bef->length, offset + length);
496
497 bef->offset = l;
498 bef->length = h - l;
499 i--;
500 }
501 else if (offset == bef->offset + bef->length)
502 {
503 /* #2 */
504 bef->length += length;
505 i--;
506 }
507 else
508 {
509 /* #3 */
510 VEC_safe_insert (range_s, value->unavailable, i, &newr);
511 }
512 }
513 else
514 {
515 /* #4 */
516 VEC_safe_insert (range_s, value->unavailable, i, &newr);
517 }
518
519 /* Check whether the ranges following the one we've just added or
520 touched can be folded in (#5 above). */
521 if (i + 1 < VEC_length (range_s, value->unavailable))
522 {
523 struct range *t;
524 struct range *r;
525 int removed = 0;
526 int next = i + 1;
527
528 /* Get the range we just touched. */
529 t = VEC_index (range_s, value->unavailable, i);
530 removed = 0;
531
532 i = next;
533 for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
534 if (r->offset <= t->offset + t->length)
535 {
536 ULONGEST l, h;
537
538 l = min (t->offset, r->offset);
539 h = max (t->offset + t->length, r->offset + r->length);
540
541 t->offset = l;
542 t->length = h - l;
543
544 removed++;
545 }
546 else
547 {
548 /* If we couldn't merge this one, we won't be able to
549 merge following ones either, since the ranges are
550 always sorted by OFFSET. */
551 break;
552 }
553
554 if (removed != 0)
555 VEC_block_remove (range_s, value->unavailable, next, removed);
556 }
557 }
558
559 void
560 mark_value_bytes_unavailable (struct value *value, int offset, int length)
561 {
562 mark_value_bits_unavailable (value,
563 offset * TARGET_CHAR_BIT,
564 length * TARGET_CHAR_BIT);
565 }
566
567 /* Find the first range in RANGES that overlaps the range defined by
568 OFFSET and LENGTH, starting at element POS in the RANGES vector,
569 Returns the index into RANGES where such overlapping range was
570 found, or -1 if none was found. */
571
572 static int
573 find_first_range_overlap (VEC(range_s) *ranges, int pos,
574 int offset, int length)
575 {
576 range_s *r;
577 int i;
578
579 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
580 if (ranges_overlap (r->offset, r->length, offset, length))
581 return i;
582
583 return -1;
584 }
585
586 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
587 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
588 return non-zero.
589
590 It must always be the case that:
591 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
592
593 It is assumed that memory can be accessed from:
594 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
595 to:
596 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
597 / TARGET_CHAR_BIT) */
598 static int
599 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
600 const gdb_byte *ptr2, size_t offset2_bits,
601 size_t length_bits)
602 {
603 gdb_assert (offset1_bits % TARGET_CHAR_BIT
604 == offset2_bits % TARGET_CHAR_BIT);
605
606 if (offset1_bits % TARGET_CHAR_BIT != 0)
607 {
608 size_t bits;
609 gdb_byte mask, b1, b2;
610
611 /* The offset from the base pointers PTR1 and PTR2 is not a complete
612 number of bytes. A number of bits up to either the next exact
613 byte boundary, or LENGTH_BITS (which ever is sooner) will be
614 compared. */
615 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
616 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
617 mask = (1 << bits) - 1;
618
619 if (length_bits < bits)
620 {
621 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
622 bits = length_bits;
623 }
624
625 /* Now load the two bytes and mask off the bits we care about. */
626 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
627 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
628
629 if (b1 != b2)
630 return 1;
631
632 /* Now update the length and offsets to take account of the bits
633 we've just compared. */
634 length_bits -= bits;
635 offset1_bits += bits;
636 offset2_bits += bits;
637 }
638
639 if (length_bits % TARGET_CHAR_BIT != 0)
640 {
641 size_t bits;
642 size_t o1, o2;
643 gdb_byte mask, b1, b2;
644
645 /* The length is not an exact number of bytes. After the previous
646 IF.. block then the offsets are byte aligned, or the
647 length is zero (in which case this code is not reached). Compare
648 a number of bits at the end of the region, starting from an exact
649 byte boundary. */
650 bits = length_bits % TARGET_CHAR_BIT;
651 o1 = offset1_bits + length_bits - bits;
652 o2 = offset2_bits + length_bits - bits;
653
654 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
655 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
656
657 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
658 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
659
660 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
661 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
662
663 if (b1 != b2)
664 return 1;
665
666 length_bits -= bits;
667 }
668
669 if (length_bits > 0)
670 {
671 /* We've now taken care of any stray "bits" at the start, or end of
672 the region to compare, the remainder can be covered with a simple
673 memcmp. */
674 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
675 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
676 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
677
678 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
679 ptr2 + offset2_bits / TARGET_CHAR_BIT,
680 length_bits / TARGET_CHAR_BIT);
681 }
682
683 /* Length is zero, regions match. */
684 return 0;
685 }
686
687 /* Helper function for value_available_contents_eq. The only difference is
688 that this function is bit rather than byte based.
689
690 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits with
691 LENGTH bits of VAL2's contents starting at OFFSET2 bits. Return true
692 if the available bits match. */
693
694 static int
695 value_available_contents_bits_eq (const struct value *val1, int offset1,
696 const struct value *val2, int offset2,
697 int length)
698 {
699 int idx1 = 0, idx2 = 0;
700
701 /* See function description in value.h. */
702 gdb_assert (!val1->lazy && !val2->lazy);
703
704 while (length > 0)
705 {
706 range_s *r1, *r2;
707 ULONGEST l1, h1;
708 ULONGEST l2, h2;
709
710 idx1 = find_first_range_overlap (val1->unavailable, idx1,
711 offset1, length);
712 idx2 = find_first_range_overlap (val2->unavailable, idx2,
713 offset2, length);
714
715 /* The usual case is for both values to be completely available. */
716 if (idx1 == -1 && idx2 == -1)
717 return (memcmp_with_bit_offsets (val1->contents, offset1,
718 val2->contents, offset2,
719 length) == 0);
720 /* The contents only match equal if the available set matches as
721 well. */
722 else if (idx1 == -1 || idx2 == -1)
723 return 0;
724
725 gdb_assert (idx1 != -1 && idx2 != -1);
726
727 r1 = VEC_index (range_s, val1->unavailable, idx1);
728 r2 = VEC_index (range_s, val2->unavailable, idx2);
729
730 /* Get the unavailable windows intersected by the incoming
731 ranges. The first and last ranges that overlap the argument
732 range may be wider than said incoming arguments ranges. */
733 l1 = max (offset1, r1->offset);
734 h1 = min (offset1 + length, r1->offset + r1->length);
735
736 l2 = max (offset2, r2->offset);
737 h2 = min (offset2 + length, r2->offset + r2->length);
738
739 /* Make them relative to the respective start offsets, so we can
740 compare them for equality. */
741 l1 -= offset1;
742 h1 -= offset1;
743
744 l2 -= offset2;
745 h2 -= offset2;
746
747 /* Different availability, no match. */
748 if (l1 != l2 || h1 != h2)
749 return 0;
750
751 /* Compare the _available_ contents. */
752 if (memcmp_with_bit_offsets (val1->contents, offset1,
753 val2->contents, offset2, l1) != 0)
754 return 0;
755
756 length -= h1;
757 offset1 += h1;
758 offset2 += h1;
759 }
760
761 return 1;
762 }
763
764 int
765 value_available_contents_eq (const struct value *val1, int offset1,
766 const struct value *val2, int offset2,
767 int length)
768 {
769 return value_available_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
770 val2, offset2 * TARGET_CHAR_BIT,
771 length * TARGET_CHAR_BIT);
772 }
773
774 /* Prototypes for local functions. */
775
776 static void show_values (char *, int);
777
778 static void show_convenience (char *, int);
779
780
781 /* The value-history records all the values printed
782 by print commands during this session. Each chunk
783 records 60 consecutive values. The first chunk on
784 the chain records the most recent values.
785 The total number of values is in value_history_count. */
786
787 #define VALUE_HISTORY_CHUNK 60
788
789 struct value_history_chunk
790 {
791 struct value_history_chunk *next;
792 struct value *values[VALUE_HISTORY_CHUNK];
793 };
794
795 /* Chain of chunks now in use. */
796
797 static struct value_history_chunk *value_history_chain;
798
799 static int value_history_count; /* Abs number of last entry stored. */
800
801 \f
802 /* List of all value objects currently allocated
803 (except for those released by calls to release_value)
804 This is so they can be freed after each command. */
805
806 static struct value *all_values;
807
808 /* Allocate a lazy value for type TYPE. Its actual content is
809 "lazily" allocated too: the content field of the return value is
810 NULL; it will be allocated when it is fetched from the target. */
811
812 struct value *
813 allocate_value_lazy (struct type *type)
814 {
815 struct value *val;
816
817 /* Call check_typedef on our type to make sure that, if TYPE
818 is a TYPE_CODE_TYPEDEF, its length is set to the length
819 of the target type instead of zero. However, we do not
820 replace the typedef type by the target type, because we want
821 to keep the typedef in order to be able to set the VAL's type
822 description correctly. */
823 check_typedef (type);
824
825 val = (struct value *) xzalloc (sizeof (struct value));
826 val->contents = NULL;
827 val->next = all_values;
828 all_values = val;
829 val->type = type;
830 val->enclosing_type = type;
831 VALUE_LVAL (val) = not_lval;
832 val->location.address = 0;
833 VALUE_FRAME_ID (val) = null_frame_id;
834 val->offset = 0;
835 val->bitpos = 0;
836 val->bitsize = 0;
837 VALUE_REGNUM (val) = -1;
838 val->lazy = 1;
839 val->optimized_out = 0;
840 val->embedded_offset = 0;
841 val->pointed_to_offset = 0;
842 val->modifiable = 1;
843 val->initialized = 1; /* Default to initialized. */
844
845 /* Values start out on the all_values chain. */
846 val->reference_count = 1;
847
848 return val;
849 }
850
851 /* Allocate the contents of VAL if it has not been allocated yet. */
852
853 static void
854 allocate_value_contents (struct value *val)
855 {
856 if (!val->contents)
857 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
858 }
859
860 /* Allocate a value and its contents for type TYPE. */
861
862 struct value *
863 allocate_value (struct type *type)
864 {
865 struct value *val = allocate_value_lazy (type);
866
867 allocate_value_contents (val);
868 val->lazy = 0;
869 return val;
870 }
871
872 /* Allocate a value that has the correct length
873 for COUNT repetitions of type TYPE. */
874
875 struct value *
876 allocate_repeat_value (struct type *type, int count)
877 {
878 int low_bound = current_language->string_lower_bound; /* ??? */
879 /* FIXME-type-allocation: need a way to free this type when we are
880 done with it. */
881 struct type *array_type
882 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
883
884 return allocate_value (array_type);
885 }
886
887 struct value *
888 allocate_computed_value (struct type *type,
889 const struct lval_funcs *funcs,
890 void *closure)
891 {
892 struct value *v = allocate_value_lazy (type);
893
894 VALUE_LVAL (v) = lval_computed;
895 v->location.computed.funcs = funcs;
896 v->location.computed.closure = closure;
897
898 return v;
899 }
900
901 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
902
903 struct value *
904 allocate_optimized_out_value (struct type *type)
905 {
906 struct value *retval = allocate_value_lazy (type);
907
908 set_value_optimized_out (retval, 1);
909 set_value_lazy (retval, 0);
910 return retval;
911 }
912
913 /* Accessor methods. */
914
915 struct value *
916 value_next (struct value *value)
917 {
918 return value->next;
919 }
920
921 struct type *
922 value_type (const struct value *value)
923 {
924 return value->type;
925 }
926 void
927 deprecated_set_value_type (struct value *value, struct type *type)
928 {
929 value->type = type;
930 }
931
932 int
933 value_offset (const struct value *value)
934 {
935 return value->offset;
936 }
937 void
938 set_value_offset (struct value *value, int offset)
939 {
940 value->offset = offset;
941 }
942
943 int
944 value_bitpos (const struct value *value)
945 {
946 return value->bitpos;
947 }
948 void
949 set_value_bitpos (struct value *value, int bit)
950 {
951 value->bitpos = bit;
952 }
953
954 int
955 value_bitsize (const struct value *value)
956 {
957 return value->bitsize;
958 }
959 void
960 set_value_bitsize (struct value *value, int bit)
961 {
962 value->bitsize = bit;
963 }
964
965 struct value *
966 value_parent (struct value *value)
967 {
968 return value->parent;
969 }
970
971 /* See value.h. */
972
973 void
974 set_value_parent (struct value *value, struct value *parent)
975 {
976 struct value *old = value->parent;
977
978 value->parent = parent;
979 if (parent != NULL)
980 value_incref (parent);
981 value_free (old);
982 }
983
984 gdb_byte *
985 value_contents_raw (struct value *value)
986 {
987 allocate_value_contents (value);
988 return value->contents + value->embedded_offset;
989 }
990
991 gdb_byte *
992 value_contents_all_raw (struct value *value)
993 {
994 allocate_value_contents (value);
995 return value->contents;
996 }
997
998 struct type *
999 value_enclosing_type (struct value *value)
1000 {
1001 return value->enclosing_type;
1002 }
1003
1004 /* Look at value.h for description. */
1005
1006 struct type *
1007 value_actual_type (struct value *value, int resolve_simple_types,
1008 int *real_type_found)
1009 {
1010 struct value_print_options opts;
1011 struct type *result;
1012
1013 get_user_print_options (&opts);
1014
1015 if (real_type_found)
1016 *real_type_found = 0;
1017 result = value_type (value);
1018 if (opts.objectprint)
1019 {
1020 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1021 fetch its rtti type. */
1022 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1023 || TYPE_CODE (result) == TYPE_CODE_REF)
1024 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1025 == TYPE_CODE_STRUCT)
1026 {
1027 struct type *real_type;
1028
1029 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1030 if (real_type)
1031 {
1032 if (real_type_found)
1033 *real_type_found = 1;
1034 result = real_type;
1035 }
1036 }
1037 else if (resolve_simple_types)
1038 {
1039 if (real_type_found)
1040 *real_type_found = 1;
1041 result = value_enclosing_type (value);
1042 }
1043 }
1044
1045 return result;
1046 }
1047
1048 void
1049 error_value_optimized_out (void)
1050 {
1051 error (_("value has been optimized out"));
1052 }
1053
1054 static void
1055 require_not_optimized_out (const struct value *value)
1056 {
1057 if (value->optimized_out)
1058 {
1059 if (value->lval == lval_register)
1060 error (_("register has not been saved in frame"));
1061 else
1062 error_value_optimized_out ();
1063 }
1064 }
1065
1066 static void
1067 require_available (const struct value *value)
1068 {
1069 if (!VEC_empty (range_s, value->unavailable))
1070 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1071 }
1072
1073 const gdb_byte *
1074 value_contents_for_printing (struct value *value)
1075 {
1076 if (value->lazy)
1077 value_fetch_lazy (value);
1078 return value->contents;
1079 }
1080
1081 const gdb_byte *
1082 value_contents_for_printing_const (const struct value *value)
1083 {
1084 gdb_assert (!value->lazy);
1085 return value->contents;
1086 }
1087
1088 const gdb_byte *
1089 value_contents_all (struct value *value)
1090 {
1091 const gdb_byte *result = value_contents_for_printing (value);
1092 require_not_optimized_out (value);
1093 require_available (value);
1094 return result;
1095 }
1096
1097 /* Copy LENGTH bytes of SRC value's (all) contents
1098 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1099 contents, starting at DST_OFFSET. If unavailable contents are
1100 being copied from SRC, the corresponding DST contents are marked
1101 unavailable accordingly. Neither DST nor SRC may be lazy
1102 values.
1103
1104 It is assumed the contents of DST in the [DST_OFFSET,
1105 DST_OFFSET+LENGTH) range are wholly available. */
1106
1107 void
1108 value_contents_copy_raw (struct value *dst, int dst_offset,
1109 struct value *src, int src_offset, int length)
1110 {
1111 range_s *r;
1112 int i;
1113 int src_bit_offset, dst_bit_offset, bit_length;
1114
1115 /* A lazy DST would make that this copy operation useless, since as
1116 soon as DST's contents were un-lazied (by a later value_contents
1117 call, say), the contents would be overwritten. A lazy SRC would
1118 mean we'd be copying garbage. */
1119 gdb_assert (!dst->lazy && !src->lazy);
1120
1121 /* The overwritten DST range gets unavailability ORed in, not
1122 replaced. Make sure to remember to implement replacing if it
1123 turns out actually necessary. */
1124 gdb_assert (value_bytes_available (dst, dst_offset, length));
1125
1126 /* Copy the data. */
1127 memcpy (value_contents_all_raw (dst) + dst_offset,
1128 value_contents_all_raw (src) + src_offset,
1129 length);
1130
1131 /* Copy the meta-data, adjusted. */
1132 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1133 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1134 bit_length = length * TARGET_CHAR_BIT;
1135 for (i = 0; VEC_iterate (range_s, src->unavailable, i, r); i++)
1136 {
1137 ULONGEST h, l;
1138
1139 l = max (r->offset, src_bit_offset);
1140 h = min (r->offset + r->length, src_bit_offset + bit_length);
1141
1142 if (l < h)
1143 mark_value_bits_unavailable (dst,
1144 dst_bit_offset + (l - src_bit_offset),
1145 h - l);
1146 }
1147 }
1148
1149 /* Copy LENGTH bytes of SRC value's (all) contents
1150 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1151 (all) contents, starting at DST_OFFSET. If unavailable contents
1152 are being copied from SRC, the corresponding DST contents are
1153 marked unavailable accordingly. DST must not be lazy. If SRC is
1154 lazy, it will be fetched now. If SRC is not valid (is optimized
1155 out), an error is thrown.
1156
1157 It is assumed the contents of DST in the [DST_OFFSET,
1158 DST_OFFSET+LENGTH) range are wholly available. */
1159
1160 void
1161 value_contents_copy (struct value *dst, int dst_offset,
1162 struct value *src, int src_offset, int length)
1163 {
1164 require_not_optimized_out (src);
1165
1166 if (src->lazy)
1167 value_fetch_lazy (src);
1168
1169 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1170 }
1171
1172 int
1173 value_lazy (struct value *value)
1174 {
1175 return value->lazy;
1176 }
1177
1178 void
1179 set_value_lazy (struct value *value, int val)
1180 {
1181 value->lazy = val;
1182 }
1183
1184 int
1185 value_stack (struct value *value)
1186 {
1187 return value->stack;
1188 }
1189
1190 void
1191 set_value_stack (struct value *value, int val)
1192 {
1193 value->stack = val;
1194 }
1195
1196 const gdb_byte *
1197 value_contents (struct value *value)
1198 {
1199 const gdb_byte *result = value_contents_writeable (value);
1200 require_not_optimized_out (value);
1201 require_available (value);
1202 return result;
1203 }
1204
1205 gdb_byte *
1206 value_contents_writeable (struct value *value)
1207 {
1208 if (value->lazy)
1209 value_fetch_lazy (value);
1210 return value_contents_raw (value);
1211 }
1212
1213 /* Return non-zero if VAL1 and VAL2 have the same contents. Note that
1214 this function is different from value_equal; in C the operator ==
1215 can return 0 even if the two values being compared are equal. */
1216
1217 int
1218 value_contents_equal (struct value *val1, struct value *val2)
1219 {
1220 struct type *type1;
1221 struct type *type2;
1222
1223 type1 = check_typedef (value_type (val1));
1224 type2 = check_typedef (value_type (val2));
1225 if (TYPE_LENGTH (type1) != TYPE_LENGTH (type2))
1226 return 0;
1227
1228 return (memcmp (value_contents (val1), value_contents (val2),
1229 TYPE_LENGTH (type1)) == 0);
1230 }
1231
1232 int
1233 value_optimized_out (struct value *value)
1234 {
1235 /* We can only know if a value is optimized out once we have tried to
1236 fetch it. */
1237 if (!value->optimized_out && value->lazy)
1238 value_fetch_lazy (value);
1239
1240 return value->optimized_out;
1241 }
1242
1243 int
1244 value_optimized_out_const (const struct value *value)
1245 {
1246 return value->optimized_out;
1247 }
1248
1249 void
1250 set_value_optimized_out (struct value *value, int val)
1251 {
1252 value->optimized_out = val;
1253 }
1254
1255 int
1256 value_entirely_optimized_out (const struct value *value)
1257 {
1258 if (!value->optimized_out)
1259 return 0;
1260 if (value->lval != lval_computed
1261 || !value->location.computed.funcs->check_any_valid)
1262 return 1;
1263 return !value->location.computed.funcs->check_any_valid (value);
1264 }
1265
1266 int
1267 value_bits_valid (const struct value *value, int offset, int length)
1268 {
1269 if (!value->optimized_out)
1270 return 1;
1271 if (value->lval != lval_computed
1272 || !value->location.computed.funcs->check_validity)
1273 return 0;
1274 return value->location.computed.funcs->check_validity (value, offset,
1275 length);
1276 }
1277
1278 int
1279 value_bits_synthetic_pointer (const struct value *value,
1280 int offset, int length)
1281 {
1282 if (value->lval != lval_computed
1283 || !value->location.computed.funcs->check_synthetic_pointer)
1284 return 0;
1285 return value->location.computed.funcs->check_synthetic_pointer (value,
1286 offset,
1287 length);
1288 }
1289
1290 int
1291 value_embedded_offset (struct value *value)
1292 {
1293 return value->embedded_offset;
1294 }
1295
1296 void
1297 set_value_embedded_offset (struct value *value, int val)
1298 {
1299 value->embedded_offset = val;
1300 }
1301
1302 int
1303 value_pointed_to_offset (struct value *value)
1304 {
1305 return value->pointed_to_offset;
1306 }
1307
1308 void
1309 set_value_pointed_to_offset (struct value *value, int val)
1310 {
1311 value->pointed_to_offset = val;
1312 }
1313
1314 const struct lval_funcs *
1315 value_computed_funcs (const struct value *v)
1316 {
1317 gdb_assert (value_lval_const (v) == lval_computed);
1318
1319 return v->location.computed.funcs;
1320 }
1321
1322 void *
1323 value_computed_closure (const struct value *v)
1324 {
1325 gdb_assert (v->lval == lval_computed);
1326
1327 return v->location.computed.closure;
1328 }
1329
1330 enum lval_type *
1331 deprecated_value_lval_hack (struct value *value)
1332 {
1333 return &value->lval;
1334 }
1335
1336 enum lval_type
1337 value_lval_const (const struct value *value)
1338 {
1339 return value->lval;
1340 }
1341
1342 CORE_ADDR
1343 value_address (const struct value *value)
1344 {
1345 if (value->lval == lval_internalvar
1346 || value->lval == lval_internalvar_component
1347 || value->lval == lval_xcallable)
1348 return 0;
1349 if (value->parent != NULL)
1350 return value_address (value->parent) + value->offset;
1351 else
1352 return value->location.address + value->offset;
1353 }
1354
1355 CORE_ADDR
1356 value_raw_address (struct value *value)
1357 {
1358 if (value->lval == lval_internalvar
1359 || value->lval == lval_internalvar_component
1360 || value->lval == lval_xcallable)
1361 return 0;
1362 return value->location.address;
1363 }
1364
1365 void
1366 set_value_address (struct value *value, CORE_ADDR addr)
1367 {
1368 gdb_assert (value->lval != lval_internalvar
1369 && value->lval != lval_internalvar_component
1370 && value->lval != lval_xcallable);
1371 value->location.address = addr;
1372 }
1373
1374 struct internalvar **
1375 deprecated_value_internalvar_hack (struct value *value)
1376 {
1377 return &value->location.internalvar;
1378 }
1379
1380 struct frame_id *
1381 deprecated_value_frame_id_hack (struct value *value)
1382 {
1383 return &value->frame_id;
1384 }
1385
1386 short *
1387 deprecated_value_regnum_hack (struct value *value)
1388 {
1389 return &value->regnum;
1390 }
1391
1392 int
1393 deprecated_value_modifiable (struct value *value)
1394 {
1395 return value->modifiable;
1396 }
1397 \f
1398 /* Return a mark in the value chain. All values allocated after the
1399 mark is obtained (except for those released) are subject to being freed
1400 if a subsequent value_free_to_mark is passed the mark. */
1401 struct value *
1402 value_mark (void)
1403 {
1404 return all_values;
1405 }
1406
1407 /* Take a reference to VAL. VAL will not be deallocated until all
1408 references are released. */
1409
1410 void
1411 value_incref (struct value *val)
1412 {
1413 val->reference_count++;
1414 }
1415
1416 /* Release a reference to VAL, which was acquired with value_incref.
1417 This function is also called to deallocate values from the value
1418 chain. */
1419
1420 void
1421 value_free (struct value *val)
1422 {
1423 if (val)
1424 {
1425 gdb_assert (val->reference_count > 0);
1426 val->reference_count--;
1427 if (val->reference_count > 0)
1428 return;
1429
1430 /* If there's an associated parent value, drop our reference to
1431 it. */
1432 if (val->parent != NULL)
1433 value_free (val->parent);
1434
1435 if (VALUE_LVAL (val) == lval_computed)
1436 {
1437 const struct lval_funcs *funcs = val->location.computed.funcs;
1438
1439 if (funcs->free_closure)
1440 funcs->free_closure (val);
1441 }
1442 else if (VALUE_LVAL (val) == lval_xcallable)
1443 free_xmethod_worker (val->location.xm_worker);
1444
1445 xfree (val->contents);
1446 VEC_free (range_s, val->unavailable);
1447 }
1448 xfree (val);
1449 }
1450
1451 /* Free all values allocated since MARK was obtained by value_mark
1452 (except for those released). */
1453 void
1454 value_free_to_mark (struct value *mark)
1455 {
1456 struct value *val;
1457 struct value *next;
1458
1459 for (val = all_values; val && val != mark; val = next)
1460 {
1461 next = val->next;
1462 val->released = 1;
1463 value_free (val);
1464 }
1465 all_values = val;
1466 }
1467
1468 /* Free all the values that have been allocated (except for those released).
1469 Call after each command, successful or not.
1470 In practice this is called before each command, which is sufficient. */
1471
1472 void
1473 free_all_values (void)
1474 {
1475 struct value *val;
1476 struct value *next;
1477
1478 for (val = all_values; val; val = next)
1479 {
1480 next = val->next;
1481 val->released = 1;
1482 value_free (val);
1483 }
1484
1485 all_values = 0;
1486 }
1487
1488 /* Frees all the elements in a chain of values. */
1489
1490 void
1491 free_value_chain (struct value *v)
1492 {
1493 struct value *next;
1494
1495 for (; v; v = next)
1496 {
1497 next = value_next (v);
1498 value_free (v);
1499 }
1500 }
1501
1502 /* Remove VAL from the chain all_values
1503 so it will not be freed automatically. */
1504
1505 void
1506 release_value (struct value *val)
1507 {
1508 struct value *v;
1509
1510 if (all_values == val)
1511 {
1512 all_values = val->next;
1513 val->next = NULL;
1514 val->released = 1;
1515 return;
1516 }
1517
1518 for (v = all_values; v; v = v->next)
1519 {
1520 if (v->next == val)
1521 {
1522 v->next = val->next;
1523 val->next = NULL;
1524 val->released = 1;
1525 break;
1526 }
1527 }
1528 }
1529
1530 /* If the value is not already released, release it.
1531 If the value is already released, increment its reference count.
1532 That is, this function ensures that the value is released from the
1533 value chain and that the caller owns a reference to it. */
1534
1535 void
1536 release_value_or_incref (struct value *val)
1537 {
1538 if (val->released)
1539 value_incref (val);
1540 else
1541 release_value (val);
1542 }
1543
1544 /* Release all values up to mark */
1545 struct value *
1546 value_release_to_mark (struct value *mark)
1547 {
1548 struct value *val;
1549 struct value *next;
1550
1551 for (val = next = all_values; next; next = next->next)
1552 {
1553 if (next->next == mark)
1554 {
1555 all_values = next->next;
1556 next->next = NULL;
1557 return val;
1558 }
1559 next->released = 1;
1560 }
1561 all_values = 0;
1562 return val;
1563 }
1564
1565 /* Return a copy of the value ARG.
1566 It contains the same contents, for same memory address,
1567 but it's a different block of storage. */
1568
1569 struct value *
1570 value_copy (struct value *arg)
1571 {
1572 struct type *encl_type = value_enclosing_type (arg);
1573 struct value *val;
1574
1575 if (value_lazy (arg))
1576 val = allocate_value_lazy (encl_type);
1577 else
1578 val = allocate_value (encl_type);
1579 val->type = arg->type;
1580 VALUE_LVAL (val) = VALUE_LVAL (arg);
1581 val->location = arg->location;
1582 val->offset = arg->offset;
1583 val->bitpos = arg->bitpos;
1584 val->bitsize = arg->bitsize;
1585 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1586 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1587 val->lazy = arg->lazy;
1588 val->optimized_out = arg->optimized_out;
1589 val->embedded_offset = value_embedded_offset (arg);
1590 val->pointed_to_offset = arg->pointed_to_offset;
1591 val->modifiable = arg->modifiable;
1592 if (!value_lazy (val))
1593 {
1594 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1595 TYPE_LENGTH (value_enclosing_type (arg)));
1596
1597 }
1598 val->unavailable = VEC_copy (range_s, arg->unavailable);
1599 set_value_parent (val, arg->parent);
1600 if (VALUE_LVAL (val) == lval_computed)
1601 {
1602 const struct lval_funcs *funcs = val->location.computed.funcs;
1603
1604 if (funcs->copy_closure)
1605 val->location.computed.closure = funcs->copy_closure (val);
1606 }
1607 return val;
1608 }
1609
1610 /* Return a version of ARG that is non-lvalue. */
1611
1612 struct value *
1613 value_non_lval (struct value *arg)
1614 {
1615 if (VALUE_LVAL (arg) != not_lval)
1616 {
1617 struct type *enc_type = value_enclosing_type (arg);
1618 struct value *val = allocate_value (enc_type);
1619
1620 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1621 TYPE_LENGTH (enc_type));
1622 val->type = arg->type;
1623 set_value_embedded_offset (val, value_embedded_offset (arg));
1624 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1625 return val;
1626 }
1627 return arg;
1628 }
1629
1630 void
1631 set_value_component_location (struct value *component,
1632 const struct value *whole)
1633 {
1634 gdb_assert (whole->lval != lval_xcallable);
1635
1636 if (whole->lval == lval_internalvar)
1637 VALUE_LVAL (component) = lval_internalvar_component;
1638 else
1639 VALUE_LVAL (component) = whole->lval;
1640
1641 component->location = whole->location;
1642 if (whole->lval == lval_computed)
1643 {
1644 const struct lval_funcs *funcs = whole->location.computed.funcs;
1645
1646 if (funcs->copy_closure)
1647 component->location.computed.closure = funcs->copy_closure (whole);
1648 }
1649 }
1650
1651 \f
1652 /* Access to the value history. */
1653
1654 /* Record a new value in the value history.
1655 Returns the absolute history index of the entry. */
1656
1657 int
1658 record_latest_value (struct value *val)
1659 {
1660 int i;
1661
1662 /* We don't want this value to have anything to do with the inferior anymore.
1663 In particular, "set $1 = 50" should not affect the variable from which
1664 the value was taken, and fast watchpoints should be able to assume that
1665 a value on the value history never changes. */
1666 if (value_lazy (val))
1667 value_fetch_lazy (val);
1668 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1669 from. This is a bit dubious, because then *&$1 does not just return $1
1670 but the current contents of that location. c'est la vie... */
1671 val->modifiable = 0;
1672
1673 /* The value may have already been released, in which case we're adding a
1674 new reference for its entry in the history. That is why we call
1675 release_value_or_incref here instead of release_value. */
1676 release_value_or_incref (val);
1677
1678 /* Here we treat value_history_count as origin-zero
1679 and applying to the value being stored now. */
1680
1681 i = value_history_count % VALUE_HISTORY_CHUNK;
1682 if (i == 0)
1683 {
1684 struct value_history_chunk *new
1685 = (struct value_history_chunk *)
1686
1687 xmalloc (sizeof (struct value_history_chunk));
1688 memset (new->values, 0, sizeof new->values);
1689 new->next = value_history_chain;
1690 value_history_chain = new;
1691 }
1692
1693 value_history_chain->values[i] = val;
1694
1695 /* Now we regard value_history_count as origin-one
1696 and applying to the value just stored. */
1697
1698 return ++value_history_count;
1699 }
1700
1701 /* Return a copy of the value in the history with sequence number NUM. */
1702
1703 struct value *
1704 access_value_history (int num)
1705 {
1706 struct value_history_chunk *chunk;
1707 int i;
1708 int absnum = num;
1709
1710 if (absnum <= 0)
1711 absnum += value_history_count;
1712
1713 if (absnum <= 0)
1714 {
1715 if (num == 0)
1716 error (_("The history is empty."));
1717 else if (num == 1)
1718 error (_("There is only one value in the history."));
1719 else
1720 error (_("History does not go back to $$%d."), -num);
1721 }
1722 if (absnum > value_history_count)
1723 error (_("History has not yet reached $%d."), absnum);
1724
1725 absnum--;
1726
1727 /* Now absnum is always absolute and origin zero. */
1728
1729 chunk = value_history_chain;
1730 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1731 - absnum / VALUE_HISTORY_CHUNK;
1732 i > 0; i--)
1733 chunk = chunk->next;
1734
1735 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1736 }
1737
1738 static void
1739 show_values (char *num_exp, int from_tty)
1740 {
1741 int i;
1742 struct value *val;
1743 static int num = 1;
1744
1745 if (num_exp)
1746 {
1747 /* "show values +" should print from the stored position.
1748 "show values <exp>" should print around value number <exp>. */
1749 if (num_exp[0] != '+' || num_exp[1] != '\0')
1750 num = parse_and_eval_long (num_exp) - 5;
1751 }
1752 else
1753 {
1754 /* "show values" means print the last 10 values. */
1755 num = value_history_count - 9;
1756 }
1757
1758 if (num <= 0)
1759 num = 1;
1760
1761 for (i = num; i < num + 10 && i <= value_history_count; i++)
1762 {
1763 struct value_print_options opts;
1764
1765 val = access_value_history (i);
1766 printf_filtered (("$%d = "), i);
1767 get_user_print_options (&opts);
1768 value_print (val, gdb_stdout, &opts);
1769 printf_filtered (("\n"));
1770 }
1771
1772 /* The next "show values +" should start after what we just printed. */
1773 num += 10;
1774
1775 /* Hitting just return after this command should do the same thing as
1776 "show values +". If num_exp is null, this is unnecessary, since
1777 "show values +" is not useful after "show values". */
1778 if (from_tty && num_exp)
1779 {
1780 num_exp[0] = '+';
1781 num_exp[1] = '\0';
1782 }
1783 }
1784 \f
1785 /* Internal variables. These are variables within the debugger
1786 that hold values assigned by debugger commands.
1787 The user refers to them with a '$' prefix
1788 that does not appear in the variable names stored internally. */
1789
1790 struct internalvar
1791 {
1792 struct internalvar *next;
1793 char *name;
1794
1795 /* We support various different kinds of content of an internal variable.
1796 enum internalvar_kind specifies the kind, and union internalvar_data
1797 provides the data associated with this particular kind. */
1798
1799 enum internalvar_kind
1800 {
1801 /* The internal variable is empty. */
1802 INTERNALVAR_VOID,
1803
1804 /* The value of the internal variable is provided directly as
1805 a GDB value object. */
1806 INTERNALVAR_VALUE,
1807
1808 /* A fresh value is computed via a call-back routine on every
1809 access to the internal variable. */
1810 INTERNALVAR_MAKE_VALUE,
1811
1812 /* The internal variable holds a GDB internal convenience function. */
1813 INTERNALVAR_FUNCTION,
1814
1815 /* The variable holds an integer value. */
1816 INTERNALVAR_INTEGER,
1817
1818 /* The variable holds a GDB-provided string. */
1819 INTERNALVAR_STRING,
1820
1821 } kind;
1822
1823 union internalvar_data
1824 {
1825 /* A value object used with INTERNALVAR_VALUE. */
1826 struct value *value;
1827
1828 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1829 struct
1830 {
1831 /* The functions to call. */
1832 const struct internalvar_funcs *functions;
1833
1834 /* The function's user-data. */
1835 void *data;
1836 } make_value;
1837
1838 /* The internal function used with INTERNALVAR_FUNCTION. */
1839 struct
1840 {
1841 struct internal_function *function;
1842 /* True if this is the canonical name for the function. */
1843 int canonical;
1844 } fn;
1845
1846 /* An integer value used with INTERNALVAR_INTEGER. */
1847 struct
1848 {
1849 /* If type is non-NULL, it will be used as the type to generate
1850 a value for this internal variable. If type is NULL, a default
1851 integer type for the architecture is used. */
1852 struct type *type;
1853 LONGEST val;
1854 } integer;
1855
1856 /* A string value used with INTERNALVAR_STRING. */
1857 char *string;
1858 } u;
1859 };
1860
1861 static struct internalvar *internalvars;
1862
1863 /* If the variable does not already exist create it and give it the
1864 value given. If no value is given then the default is zero. */
1865 static void
1866 init_if_undefined_command (char* args, int from_tty)
1867 {
1868 struct internalvar* intvar;
1869
1870 /* Parse the expression - this is taken from set_command(). */
1871 struct expression *expr = parse_expression (args);
1872 register struct cleanup *old_chain =
1873 make_cleanup (free_current_contents, &expr);
1874
1875 /* Validate the expression.
1876 Was the expression an assignment?
1877 Or even an expression at all? */
1878 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1879 error (_("Init-if-undefined requires an assignment expression."));
1880
1881 /* Extract the variable from the parsed expression.
1882 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1883 if (expr->elts[1].opcode != OP_INTERNALVAR)
1884 error (_("The first parameter to init-if-undefined "
1885 "should be a GDB variable."));
1886 intvar = expr->elts[2].internalvar;
1887
1888 /* Only evaluate the expression if the lvalue is void.
1889 This may still fail if the expresssion is invalid. */
1890 if (intvar->kind == INTERNALVAR_VOID)
1891 evaluate_expression (expr);
1892
1893 do_cleanups (old_chain);
1894 }
1895
1896
1897 /* Look up an internal variable with name NAME. NAME should not
1898 normally include a dollar sign.
1899
1900 If the specified internal variable does not exist,
1901 the return value is NULL. */
1902
1903 struct internalvar *
1904 lookup_only_internalvar (const char *name)
1905 {
1906 struct internalvar *var;
1907
1908 for (var = internalvars; var; var = var->next)
1909 if (strcmp (var->name, name) == 0)
1910 return var;
1911
1912 return NULL;
1913 }
1914
1915 /* Complete NAME by comparing it to the names of internal variables.
1916 Returns a vector of newly allocated strings, or NULL if no matches
1917 were found. */
1918
1919 VEC (char_ptr) *
1920 complete_internalvar (const char *name)
1921 {
1922 VEC (char_ptr) *result = NULL;
1923 struct internalvar *var;
1924 int len;
1925
1926 len = strlen (name);
1927
1928 for (var = internalvars; var; var = var->next)
1929 if (strncmp (var->name, name, len) == 0)
1930 {
1931 char *r = xstrdup (var->name);
1932
1933 VEC_safe_push (char_ptr, result, r);
1934 }
1935
1936 return result;
1937 }
1938
1939 /* Create an internal variable with name NAME and with a void value.
1940 NAME should not normally include a dollar sign. */
1941
1942 struct internalvar *
1943 create_internalvar (const char *name)
1944 {
1945 struct internalvar *var;
1946
1947 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
1948 var->name = concat (name, (char *)NULL);
1949 var->kind = INTERNALVAR_VOID;
1950 var->next = internalvars;
1951 internalvars = var;
1952 return var;
1953 }
1954
1955 /* Create an internal variable with name NAME and register FUN as the
1956 function that value_of_internalvar uses to create a value whenever
1957 this variable is referenced. NAME should not normally include a
1958 dollar sign. DATA is passed uninterpreted to FUN when it is
1959 called. CLEANUP, if not NULL, is called when the internal variable
1960 is destroyed. It is passed DATA as its only argument. */
1961
1962 struct internalvar *
1963 create_internalvar_type_lazy (const char *name,
1964 const struct internalvar_funcs *funcs,
1965 void *data)
1966 {
1967 struct internalvar *var = create_internalvar (name);
1968
1969 var->kind = INTERNALVAR_MAKE_VALUE;
1970 var->u.make_value.functions = funcs;
1971 var->u.make_value.data = data;
1972 return var;
1973 }
1974
1975 /* See documentation in value.h. */
1976
1977 int
1978 compile_internalvar_to_ax (struct internalvar *var,
1979 struct agent_expr *expr,
1980 struct axs_value *value)
1981 {
1982 if (var->kind != INTERNALVAR_MAKE_VALUE
1983 || var->u.make_value.functions->compile_to_ax == NULL)
1984 return 0;
1985
1986 var->u.make_value.functions->compile_to_ax (var, expr, value,
1987 var->u.make_value.data);
1988 return 1;
1989 }
1990
1991 /* Look up an internal variable with name NAME. NAME should not
1992 normally include a dollar sign.
1993
1994 If the specified internal variable does not exist,
1995 one is created, with a void value. */
1996
1997 struct internalvar *
1998 lookup_internalvar (const char *name)
1999 {
2000 struct internalvar *var;
2001
2002 var = lookup_only_internalvar (name);
2003 if (var)
2004 return var;
2005
2006 return create_internalvar (name);
2007 }
2008
2009 /* Return current value of internal variable VAR. For variables that
2010 are not inherently typed, use a value type appropriate for GDBARCH. */
2011
2012 struct value *
2013 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2014 {
2015 struct value *val;
2016 struct trace_state_variable *tsv;
2017
2018 /* If there is a trace state variable of the same name, assume that
2019 is what we really want to see. */
2020 tsv = find_trace_state_variable (var->name);
2021 if (tsv)
2022 {
2023 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2024 &(tsv->value));
2025 if (tsv->value_known)
2026 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2027 tsv->value);
2028 else
2029 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2030 return val;
2031 }
2032
2033 switch (var->kind)
2034 {
2035 case INTERNALVAR_VOID:
2036 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2037 break;
2038
2039 case INTERNALVAR_FUNCTION:
2040 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2041 break;
2042
2043 case INTERNALVAR_INTEGER:
2044 if (!var->u.integer.type)
2045 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2046 var->u.integer.val);
2047 else
2048 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2049 break;
2050
2051 case INTERNALVAR_STRING:
2052 val = value_cstring (var->u.string, strlen (var->u.string),
2053 builtin_type (gdbarch)->builtin_char);
2054 break;
2055
2056 case INTERNALVAR_VALUE:
2057 val = value_copy (var->u.value);
2058 if (value_lazy (val))
2059 value_fetch_lazy (val);
2060 break;
2061
2062 case INTERNALVAR_MAKE_VALUE:
2063 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2064 var->u.make_value.data);
2065 break;
2066
2067 default:
2068 internal_error (__FILE__, __LINE__, _("bad kind"));
2069 }
2070
2071 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2072 on this value go back to affect the original internal variable.
2073
2074 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2075 no underlying modifyable state in the internal variable.
2076
2077 Likewise, if the variable's value is a computed lvalue, we want
2078 references to it to produce another computed lvalue, where
2079 references and assignments actually operate through the
2080 computed value's functions.
2081
2082 This means that internal variables with computed values
2083 behave a little differently from other internal variables:
2084 assignments to them don't just replace the previous value
2085 altogether. At the moment, this seems like the behavior we
2086 want. */
2087
2088 if (var->kind != INTERNALVAR_MAKE_VALUE
2089 && val->lval != lval_computed)
2090 {
2091 VALUE_LVAL (val) = lval_internalvar;
2092 VALUE_INTERNALVAR (val) = var;
2093 }
2094
2095 return val;
2096 }
2097
2098 int
2099 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2100 {
2101 if (var->kind == INTERNALVAR_INTEGER)
2102 {
2103 *result = var->u.integer.val;
2104 return 1;
2105 }
2106
2107 if (var->kind == INTERNALVAR_VALUE)
2108 {
2109 struct type *type = check_typedef (value_type (var->u.value));
2110
2111 if (TYPE_CODE (type) == TYPE_CODE_INT)
2112 {
2113 *result = value_as_long (var->u.value);
2114 return 1;
2115 }
2116 }
2117
2118 return 0;
2119 }
2120
2121 static int
2122 get_internalvar_function (struct internalvar *var,
2123 struct internal_function **result)
2124 {
2125 switch (var->kind)
2126 {
2127 case INTERNALVAR_FUNCTION:
2128 *result = var->u.fn.function;
2129 return 1;
2130
2131 default:
2132 return 0;
2133 }
2134 }
2135
2136 void
2137 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2138 int bitsize, struct value *newval)
2139 {
2140 gdb_byte *addr;
2141
2142 switch (var->kind)
2143 {
2144 case INTERNALVAR_VALUE:
2145 addr = value_contents_writeable (var->u.value);
2146
2147 if (bitsize)
2148 modify_field (value_type (var->u.value), addr + offset,
2149 value_as_long (newval), bitpos, bitsize);
2150 else
2151 memcpy (addr + offset, value_contents (newval),
2152 TYPE_LENGTH (value_type (newval)));
2153 break;
2154
2155 default:
2156 /* We can never get a component of any other kind. */
2157 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2158 }
2159 }
2160
2161 void
2162 set_internalvar (struct internalvar *var, struct value *val)
2163 {
2164 enum internalvar_kind new_kind;
2165 union internalvar_data new_data = { 0 };
2166
2167 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2168 error (_("Cannot overwrite convenience function %s"), var->name);
2169
2170 /* Prepare new contents. */
2171 switch (TYPE_CODE (check_typedef (value_type (val))))
2172 {
2173 case TYPE_CODE_VOID:
2174 new_kind = INTERNALVAR_VOID;
2175 break;
2176
2177 case TYPE_CODE_INTERNAL_FUNCTION:
2178 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2179 new_kind = INTERNALVAR_FUNCTION;
2180 get_internalvar_function (VALUE_INTERNALVAR (val),
2181 &new_data.fn.function);
2182 /* Copies created here are never canonical. */
2183 break;
2184
2185 default:
2186 new_kind = INTERNALVAR_VALUE;
2187 new_data.value = value_copy (val);
2188 new_data.value->modifiable = 1;
2189
2190 /* Force the value to be fetched from the target now, to avoid problems
2191 later when this internalvar is referenced and the target is gone or
2192 has changed. */
2193 if (value_lazy (new_data.value))
2194 value_fetch_lazy (new_data.value);
2195
2196 /* Release the value from the value chain to prevent it from being
2197 deleted by free_all_values. From here on this function should not
2198 call error () until new_data is installed into the var->u to avoid
2199 leaking memory. */
2200 release_value (new_data.value);
2201 break;
2202 }
2203
2204 /* Clean up old contents. */
2205 clear_internalvar (var);
2206
2207 /* Switch over. */
2208 var->kind = new_kind;
2209 var->u = new_data;
2210 /* End code which must not call error(). */
2211 }
2212
2213 void
2214 set_internalvar_integer (struct internalvar *var, LONGEST l)
2215 {
2216 /* Clean up old contents. */
2217 clear_internalvar (var);
2218
2219 var->kind = INTERNALVAR_INTEGER;
2220 var->u.integer.type = NULL;
2221 var->u.integer.val = l;
2222 }
2223
2224 void
2225 set_internalvar_string (struct internalvar *var, const char *string)
2226 {
2227 /* Clean up old contents. */
2228 clear_internalvar (var);
2229
2230 var->kind = INTERNALVAR_STRING;
2231 var->u.string = xstrdup (string);
2232 }
2233
2234 static void
2235 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2236 {
2237 /* Clean up old contents. */
2238 clear_internalvar (var);
2239
2240 var->kind = INTERNALVAR_FUNCTION;
2241 var->u.fn.function = f;
2242 var->u.fn.canonical = 1;
2243 /* Variables installed here are always the canonical version. */
2244 }
2245
2246 void
2247 clear_internalvar (struct internalvar *var)
2248 {
2249 /* Clean up old contents. */
2250 switch (var->kind)
2251 {
2252 case INTERNALVAR_VALUE:
2253 value_free (var->u.value);
2254 break;
2255
2256 case INTERNALVAR_STRING:
2257 xfree (var->u.string);
2258 break;
2259
2260 case INTERNALVAR_MAKE_VALUE:
2261 if (var->u.make_value.functions->destroy != NULL)
2262 var->u.make_value.functions->destroy (var->u.make_value.data);
2263 break;
2264
2265 default:
2266 break;
2267 }
2268
2269 /* Reset to void kind. */
2270 var->kind = INTERNALVAR_VOID;
2271 }
2272
2273 char *
2274 internalvar_name (struct internalvar *var)
2275 {
2276 return var->name;
2277 }
2278
2279 static struct internal_function *
2280 create_internal_function (const char *name,
2281 internal_function_fn handler, void *cookie)
2282 {
2283 struct internal_function *ifn = XNEW (struct internal_function);
2284
2285 ifn->name = xstrdup (name);
2286 ifn->handler = handler;
2287 ifn->cookie = cookie;
2288 return ifn;
2289 }
2290
2291 char *
2292 value_internal_function_name (struct value *val)
2293 {
2294 struct internal_function *ifn;
2295 int result;
2296
2297 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2298 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2299 gdb_assert (result);
2300
2301 return ifn->name;
2302 }
2303
2304 struct value *
2305 call_internal_function (struct gdbarch *gdbarch,
2306 const struct language_defn *language,
2307 struct value *func, int argc, struct value **argv)
2308 {
2309 struct internal_function *ifn;
2310 int result;
2311
2312 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2313 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2314 gdb_assert (result);
2315
2316 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2317 }
2318
2319 /* The 'function' command. This does nothing -- it is just a
2320 placeholder to let "help function NAME" work. This is also used as
2321 the implementation of the sub-command that is created when
2322 registering an internal function. */
2323 static void
2324 function_command (char *command, int from_tty)
2325 {
2326 /* Do nothing. */
2327 }
2328
2329 /* Clean up if an internal function's command is destroyed. */
2330 static void
2331 function_destroyer (struct cmd_list_element *self, void *ignore)
2332 {
2333 xfree ((char *) self->name);
2334 xfree (self->doc);
2335 }
2336
2337 /* Add a new internal function. NAME is the name of the function; DOC
2338 is a documentation string describing the function. HANDLER is
2339 called when the function is invoked. COOKIE is an arbitrary
2340 pointer which is passed to HANDLER and is intended for "user
2341 data". */
2342 void
2343 add_internal_function (const char *name, const char *doc,
2344 internal_function_fn handler, void *cookie)
2345 {
2346 struct cmd_list_element *cmd;
2347 struct internal_function *ifn;
2348 struct internalvar *var = lookup_internalvar (name);
2349
2350 ifn = create_internal_function (name, handler, cookie);
2351 set_internalvar_function (var, ifn);
2352
2353 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2354 &functionlist);
2355 cmd->destroyer = function_destroyer;
2356 }
2357
2358 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2359 prevent cycles / duplicates. */
2360
2361 void
2362 preserve_one_value (struct value *value, struct objfile *objfile,
2363 htab_t copied_types)
2364 {
2365 if (TYPE_OBJFILE (value->type) == objfile)
2366 value->type = copy_type_recursive (objfile, value->type, copied_types);
2367
2368 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2369 value->enclosing_type = copy_type_recursive (objfile,
2370 value->enclosing_type,
2371 copied_types);
2372 }
2373
2374 /* Likewise for internal variable VAR. */
2375
2376 static void
2377 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2378 htab_t copied_types)
2379 {
2380 switch (var->kind)
2381 {
2382 case INTERNALVAR_INTEGER:
2383 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2384 var->u.integer.type
2385 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2386 break;
2387
2388 case INTERNALVAR_VALUE:
2389 preserve_one_value (var->u.value, objfile, copied_types);
2390 break;
2391 }
2392 }
2393
2394 /* Update the internal variables and value history when OBJFILE is
2395 discarded; we must copy the types out of the objfile. New global types
2396 will be created for every convenience variable which currently points to
2397 this objfile's types, and the convenience variables will be adjusted to
2398 use the new global types. */
2399
2400 void
2401 preserve_values (struct objfile *objfile)
2402 {
2403 htab_t copied_types;
2404 struct value_history_chunk *cur;
2405 struct internalvar *var;
2406 int i;
2407
2408 /* Create the hash table. We allocate on the objfile's obstack, since
2409 it is soon to be deleted. */
2410 copied_types = create_copied_types_hash (objfile);
2411
2412 for (cur = value_history_chain; cur; cur = cur->next)
2413 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2414 if (cur->values[i])
2415 preserve_one_value (cur->values[i], objfile, copied_types);
2416
2417 for (var = internalvars; var; var = var->next)
2418 preserve_one_internalvar (var, objfile, copied_types);
2419
2420 preserve_ext_lang_values (objfile, copied_types);
2421
2422 htab_delete (copied_types);
2423 }
2424
2425 static void
2426 show_convenience (char *ignore, int from_tty)
2427 {
2428 struct gdbarch *gdbarch = get_current_arch ();
2429 struct internalvar *var;
2430 int varseen = 0;
2431 struct value_print_options opts;
2432
2433 get_user_print_options (&opts);
2434 for (var = internalvars; var; var = var->next)
2435 {
2436 volatile struct gdb_exception ex;
2437
2438 if (!varseen)
2439 {
2440 varseen = 1;
2441 }
2442 printf_filtered (("$%s = "), var->name);
2443
2444 TRY_CATCH (ex, RETURN_MASK_ERROR)
2445 {
2446 struct value *val;
2447
2448 val = value_of_internalvar (gdbarch, var);
2449 value_print (val, gdb_stdout, &opts);
2450 }
2451 if (ex.reason < 0)
2452 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2453 printf_filtered (("\n"));
2454 }
2455 if (!varseen)
2456 {
2457 /* This text does not mention convenience functions on purpose.
2458 The user can't create them except via Python, and if Python support
2459 is installed this message will never be printed ($_streq will
2460 exist). */
2461 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2462 "Convenience variables have "
2463 "names starting with \"$\";\n"
2464 "use \"set\" as in \"set "
2465 "$foo = 5\" to define them.\n"));
2466 }
2467 }
2468 \f
2469 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2470
2471 struct value *
2472 value_of_xmethod (struct xmethod_worker *worker)
2473 {
2474 if (worker->value == NULL)
2475 {
2476 struct value *v;
2477
2478 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2479 v->lval = lval_xcallable;
2480 v->location.xm_worker = worker;
2481 v->modifiable = 0;
2482 worker->value = v;
2483 }
2484
2485 return worker->value;
2486 }
2487
2488 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2489
2490 struct value *
2491 call_xmethod (struct value *method, int argc, struct value **argv)
2492 {
2493 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2494 && method->lval == lval_xcallable && argc > 0);
2495
2496 return invoke_xmethod (method->location.xm_worker,
2497 argv[0], argv + 1, argc - 1);
2498 }
2499 \f
2500 /* Extract a value as a C number (either long or double).
2501 Knows how to convert fixed values to double, or
2502 floating values to long.
2503 Does not deallocate the value. */
2504
2505 LONGEST
2506 value_as_long (struct value *val)
2507 {
2508 /* This coerces arrays and functions, which is necessary (e.g.
2509 in disassemble_command). It also dereferences references, which
2510 I suspect is the most logical thing to do. */
2511 val = coerce_array (val);
2512 return unpack_long (value_type (val), value_contents (val));
2513 }
2514
2515 DOUBLEST
2516 value_as_double (struct value *val)
2517 {
2518 DOUBLEST foo;
2519 int inv;
2520
2521 foo = unpack_double (value_type (val), value_contents (val), &inv);
2522 if (inv)
2523 error (_("Invalid floating value found in program."));
2524 return foo;
2525 }
2526
2527 /* Extract a value as a C pointer. Does not deallocate the value.
2528 Note that val's type may not actually be a pointer; value_as_long
2529 handles all the cases. */
2530 CORE_ADDR
2531 value_as_address (struct value *val)
2532 {
2533 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2534
2535 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2536 whether we want this to be true eventually. */
2537 #if 0
2538 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2539 non-address (e.g. argument to "signal", "info break", etc.), or
2540 for pointers to char, in which the low bits *are* significant. */
2541 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2542 #else
2543
2544 /* There are several targets (IA-64, PowerPC, and others) which
2545 don't represent pointers to functions as simply the address of
2546 the function's entry point. For example, on the IA-64, a
2547 function pointer points to a two-word descriptor, generated by
2548 the linker, which contains the function's entry point, and the
2549 value the IA-64 "global pointer" register should have --- to
2550 support position-independent code. The linker generates
2551 descriptors only for those functions whose addresses are taken.
2552
2553 On such targets, it's difficult for GDB to convert an arbitrary
2554 function address into a function pointer; it has to either find
2555 an existing descriptor for that function, or call malloc and
2556 build its own. On some targets, it is impossible for GDB to
2557 build a descriptor at all: the descriptor must contain a jump
2558 instruction; data memory cannot be executed; and code memory
2559 cannot be modified.
2560
2561 Upon entry to this function, if VAL is a value of type `function'
2562 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2563 value_address (val) is the address of the function. This is what
2564 you'll get if you evaluate an expression like `main'. The call
2565 to COERCE_ARRAY below actually does all the usual unary
2566 conversions, which includes converting values of type `function'
2567 to `pointer to function'. This is the challenging conversion
2568 discussed above. Then, `unpack_long' will convert that pointer
2569 back into an address.
2570
2571 So, suppose the user types `disassemble foo' on an architecture
2572 with a strange function pointer representation, on which GDB
2573 cannot build its own descriptors, and suppose further that `foo'
2574 has no linker-built descriptor. The address->pointer conversion
2575 will signal an error and prevent the command from running, even
2576 though the next step would have been to convert the pointer
2577 directly back into the same address.
2578
2579 The following shortcut avoids this whole mess. If VAL is a
2580 function, just return its address directly. */
2581 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2582 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2583 return value_address (val);
2584
2585 val = coerce_array (val);
2586
2587 /* Some architectures (e.g. Harvard), map instruction and data
2588 addresses onto a single large unified address space. For
2589 instance: An architecture may consider a large integer in the
2590 range 0x10000000 .. 0x1000ffff to already represent a data
2591 addresses (hence not need a pointer to address conversion) while
2592 a small integer would still need to be converted integer to
2593 pointer to address. Just assume such architectures handle all
2594 integer conversions in a single function. */
2595
2596 /* JimB writes:
2597
2598 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2599 must admonish GDB hackers to make sure its behavior matches the
2600 compiler's, whenever possible.
2601
2602 In general, I think GDB should evaluate expressions the same way
2603 the compiler does. When the user copies an expression out of
2604 their source code and hands it to a `print' command, they should
2605 get the same value the compiler would have computed. Any
2606 deviation from this rule can cause major confusion and annoyance,
2607 and needs to be justified carefully. In other words, GDB doesn't
2608 really have the freedom to do these conversions in clever and
2609 useful ways.
2610
2611 AndrewC pointed out that users aren't complaining about how GDB
2612 casts integers to pointers; they are complaining that they can't
2613 take an address from a disassembly listing and give it to `x/i'.
2614 This is certainly important.
2615
2616 Adding an architecture method like integer_to_address() certainly
2617 makes it possible for GDB to "get it right" in all circumstances
2618 --- the target has complete control over how things get done, so
2619 people can Do The Right Thing for their target without breaking
2620 anyone else. The standard doesn't specify how integers get
2621 converted to pointers; usually, the ABI doesn't either, but
2622 ABI-specific code is a more reasonable place to handle it. */
2623
2624 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2625 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2626 && gdbarch_integer_to_address_p (gdbarch))
2627 return gdbarch_integer_to_address (gdbarch, value_type (val),
2628 value_contents (val));
2629
2630 return unpack_long (value_type (val), value_contents (val));
2631 #endif
2632 }
2633 \f
2634 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2635 as a long, or as a double, assuming the raw data is described
2636 by type TYPE. Knows how to convert different sizes of values
2637 and can convert between fixed and floating point. We don't assume
2638 any alignment for the raw data. Return value is in host byte order.
2639
2640 If you want functions and arrays to be coerced to pointers, and
2641 references to be dereferenced, call value_as_long() instead.
2642
2643 C++: It is assumed that the front-end has taken care of
2644 all matters concerning pointers to members. A pointer
2645 to member which reaches here is considered to be equivalent
2646 to an INT (or some size). After all, it is only an offset. */
2647
2648 LONGEST
2649 unpack_long (struct type *type, const gdb_byte *valaddr)
2650 {
2651 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2652 enum type_code code = TYPE_CODE (type);
2653 int len = TYPE_LENGTH (type);
2654 int nosign = TYPE_UNSIGNED (type);
2655
2656 switch (code)
2657 {
2658 case TYPE_CODE_TYPEDEF:
2659 return unpack_long (check_typedef (type), valaddr);
2660 case TYPE_CODE_ENUM:
2661 case TYPE_CODE_FLAGS:
2662 case TYPE_CODE_BOOL:
2663 case TYPE_CODE_INT:
2664 case TYPE_CODE_CHAR:
2665 case TYPE_CODE_RANGE:
2666 case TYPE_CODE_MEMBERPTR:
2667 if (nosign)
2668 return extract_unsigned_integer (valaddr, len, byte_order);
2669 else
2670 return extract_signed_integer (valaddr, len, byte_order);
2671
2672 case TYPE_CODE_FLT:
2673 return extract_typed_floating (valaddr, type);
2674
2675 case TYPE_CODE_DECFLOAT:
2676 /* libdecnumber has a function to convert from decimal to integer, but
2677 it doesn't work when the decimal number has a fractional part. */
2678 return decimal_to_doublest (valaddr, len, byte_order);
2679
2680 case TYPE_CODE_PTR:
2681 case TYPE_CODE_REF:
2682 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2683 whether we want this to be true eventually. */
2684 return extract_typed_address (valaddr, type);
2685
2686 default:
2687 error (_("Value can't be converted to integer."));
2688 }
2689 return 0; /* Placate lint. */
2690 }
2691
2692 /* Return a double value from the specified type and address.
2693 INVP points to an int which is set to 0 for valid value,
2694 1 for invalid value (bad float format). In either case,
2695 the returned double is OK to use. Argument is in target
2696 format, result is in host format. */
2697
2698 DOUBLEST
2699 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2700 {
2701 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2702 enum type_code code;
2703 int len;
2704 int nosign;
2705
2706 *invp = 0; /* Assume valid. */
2707 CHECK_TYPEDEF (type);
2708 code = TYPE_CODE (type);
2709 len = TYPE_LENGTH (type);
2710 nosign = TYPE_UNSIGNED (type);
2711 if (code == TYPE_CODE_FLT)
2712 {
2713 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2714 floating-point value was valid (using the macro
2715 INVALID_FLOAT). That test/macro have been removed.
2716
2717 It turns out that only the VAX defined this macro and then
2718 only in a non-portable way. Fixing the portability problem
2719 wouldn't help since the VAX floating-point code is also badly
2720 bit-rotten. The target needs to add definitions for the
2721 methods gdbarch_float_format and gdbarch_double_format - these
2722 exactly describe the target floating-point format. The
2723 problem here is that the corresponding floatformat_vax_f and
2724 floatformat_vax_d values these methods should be set to are
2725 also not defined either. Oops!
2726
2727 Hopefully someone will add both the missing floatformat
2728 definitions and the new cases for floatformat_is_valid (). */
2729
2730 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2731 {
2732 *invp = 1;
2733 return 0.0;
2734 }
2735
2736 return extract_typed_floating (valaddr, type);
2737 }
2738 else if (code == TYPE_CODE_DECFLOAT)
2739 return decimal_to_doublest (valaddr, len, byte_order);
2740 else if (nosign)
2741 {
2742 /* Unsigned -- be sure we compensate for signed LONGEST. */
2743 return (ULONGEST) unpack_long (type, valaddr);
2744 }
2745 else
2746 {
2747 /* Signed -- we are OK with unpack_long. */
2748 return unpack_long (type, valaddr);
2749 }
2750 }
2751
2752 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2753 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2754 We don't assume any alignment for the raw data. Return value is in
2755 host byte order.
2756
2757 If you want functions and arrays to be coerced to pointers, and
2758 references to be dereferenced, call value_as_address() instead.
2759
2760 C++: It is assumed that the front-end has taken care of
2761 all matters concerning pointers to members. A pointer
2762 to member which reaches here is considered to be equivalent
2763 to an INT (or some size). After all, it is only an offset. */
2764
2765 CORE_ADDR
2766 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2767 {
2768 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2769 whether we want this to be true eventually. */
2770 return unpack_long (type, valaddr);
2771 }
2772
2773 \f
2774 /* Get the value of the FIELDNO'th field (which must be static) of
2775 TYPE. */
2776
2777 struct value *
2778 value_static_field (struct type *type, int fieldno)
2779 {
2780 struct value *retval;
2781
2782 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2783 {
2784 case FIELD_LOC_KIND_PHYSADDR:
2785 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2786 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2787 break;
2788 case FIELD_LOC_KIND_PHYSNAME:
2789 {
2790 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2791 /* TYPE_FIELD_NAME (type, fieldno); */
2792 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2793
2794 if (sym == NULL)
2795 {
2796 /* With some compilers, e.g. HP aCC, static data members are
2797 reported as non-debuggable symbols. */
2798 struct bound_minimal_symbol msym
2799 = lookup_minimal_symbol (phys_name, NULL, NULL);
2800
2801 if (!msym.minsym)
2802 return allocate_optimized_out_value (type);
2803 else
2804 {
2805 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2806 BMSYMBOL_VALUE_ADDRESS (msym));
2807 }
2808 }
2809 else
2810 retval = value_of_variable (sym, NULL);
2811 break;
2812 }
2813 default:
2814 gdb_assert_not_reached ("unexpected field location kind");
2815 }
2816
2817 return retval;
2818 }
2819
2820 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2821 You have to be careful here, since the size of the data area for the value
2822 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2823 than the old enclosing type, you have to allocate more space for the
2824 data. */
2825
2826 void
2827 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2828 {
2829 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2830 val->contents =
2831 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2832
2833 val->enclosing_type = new_encl_type;
2834 }
2835
2836 /* Given a value ARG1 (offset by OFFSET bytes)
2837 of a struct or union type ARG_TYPE,
2838 extract and return the value of one of its (non-static) fields.
2839 FIELDNO says which field. */
2840
2841 struct value *
2842 value_primitive_field (struct value *arg1, int offset,
2843 int fieldno, struct type *arg_type)
2844 {
2845 struct value *v;
2846 struct type *type;
2847
2848 CHECK_TYPEDEF (arg_type);
2849 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2850
2851 /* Call check_typedef on our type to make sure that, if TYPE
2852 is a TYPE_CODE_TYPEDEF, its length is set to the length
2853 of the target type instead of zero. However, we do not
2854 replace the typedef type by the target type, because we want
2855 to keep the typedef in order to be able to print the type
2856 description correctly. */
2857 check_typedef (type);
2858
2859 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2860 {
2861 /* Handle packed fields.
2862
2863 Create a new value for the bitfield, with bitpos and bitsize
2864 set. If possible, arrange offset and bitpos so that we can
2865 do a single aligned read of the size of the containing type.
2866 Otherwise, adjust offset to the byte containing the first
2867 bit. Assume that the address, offset, and embedded offset
2868 are sufficiently aligned. */
2869
2870 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2871 int container_bitsize = TYPE_LENGTH (type) * 8;
2872
2873 if (arg1->optimized_out)
2874 v = allocate_optimized_out_value (type);
2875 else
2876 {
2877 v = allocate_value_lazy (type);
2878 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2879 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2880 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2881 v->bitpos = bitpos % container_bitsize;
2882 else
2883 v->bitpos = bitpos % 8;
2884 v->offset = (value_embedded_offset (arg1)
2885 + offset
2886 + (bitpos - v->bitpos) / 8);
2887 set_value_parent (v, arg1);
2888 if (!value_lazy (arg1))
2889 value_fetch_lazy (v);
2890 }
2891 }
2892 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2893 {
2894 /* This field is actually a base subobject, so preserve the
2895 entire object's contents for later references to virtual
2896 bases, etc. */
2897 int boffset;
2898
2899 /* Lazy register values with offsets are not supported. */
2900 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2901 value_fetch_lazy (arg1);
2902
2903 /* The optimized_out flag is only set correctly once a lazy value is
2904 loaded, having just loaded some lazy values we should check the
2905 optimized out case now. */
2906 if (arg1->optimized_out)
2907 v = allocate_optimized_out_value (type);
2908 else
2909 {
2910 /* We special case virtual inheritance here because this
2911 requires access to the contents, which we would rather avoid
2912 for references to ordinary fields of unavailable values. */
2913 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2914 boffset = baseclass_offset (arg_type, fieldno,
2915 value_contents (arg1),
2916 value_embedded_offset (arg1),
2917 value_address (arg1),
2918 arg1);
2919 else
2920 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2921
2922 if (value_lazy (arg1))
2923 v = allocate_value_lazy (value_enclosing_type (arg1));
2924 else
2925 {
2926 v = allocate_value (value_enclosing_type (arg1));
2927 value_contents_copy_raw (v, 0, arg1, 0,
2928 TYPE_LENGTH (value_enclosing_type (arg1)));
2929 }
2930 v->type = type;
2931 v->offset = value_offset (arg1);
2932 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2933 }
2934 }
2935 else
2936 {
2937 /* Plain old data member */
2938 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2939
2940 /* Lazy register values with offsets are not supported. */
2941 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2942 value_fetch_lazy (arg1);
2943
2944 /* The optimized_out flag is only set correctly once a lazy value is
2945 loaded, having just loaded some lazy values we should check for
2946 the optimized out case now. */
2947 if (arg1->optimized_out)
2948 v = allocate_optimized_out_value (type);
2949 else if (value_lazy (arg1))
2950 v = allocate_value_lazy (type);
2951 else
2952 {
2953 v = allocate_value (type);
2954 value_contents_copy_raw (v, value_embedded_offset (v),
2955 arg1, value_embedded_offset (arg1) + offset,
2956 TYPE_LENGTH (type));
2957 }
2958 v->offset = (value_offset (arg1) + offset
2959 + value_embedded_offset (arg1));
2960 }
2961 set_value_component_location (v, arg1);
2962 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
2963 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
2964 return v;
2965 }
2966
2967 /* Given a value ARG1 of a struct or union type,
2968 extract and return the value of one of its (non-static) fields.
2969 FIELDNO says which field. */
2970
2971 struct value *
2972 value_field (struct value *arg1, int fieldno)
2973 {
2974 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
2975 }
2976
2977 /* Return a non-virtual function as a value.
2978 F is the list of member functions which contains the desired method.
2979 J is an index into F which provides the desired method.
2980
2981 We only use the symbol for its address, so be happy with either a
2982 full symbol or a minimal symbol. */
2983
2984 struct value *
2985 value_fn_field (struct value **arg1p, struct fn_field *f,
2986 int j, struct type *type,
2987 int offset)
2988 {
2989 struct value *v;
2990 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
2991 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
2992 struct symbol *sym;
2993 struct bound_minimal_symbol msym;
2994
2995 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
2996 if (sym != NULL)
2997 {
2998 memset (&msym, 0, sizeof (msym));
2999 }
3000 else
3001 {
3002 gdb_assert (sym == NULL);
3003 msym = lookup_bound_minimal_symbol (physname);
3004 if (msym.minsym == NULL)
3005 return NULL;
3006 }
3007
3008 v = allocate_value (ftype);
3009 if (sym)
3010 {
3011 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3012 }
3013 else
3014 {
3015 /* The minimal symbol might point to a function descriptor;
3016 resolve it to the actual code address instead. */
3017 struct objfile *objfile = msym.objfile;
3018 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3019
3020 set_value_address (v,
3021 gdbarch_convert_from_func_ptr_addr
3022 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3023 }
3024
3025 if (arg1p)
3026 {
3027 if (type != value_type (*arg1p))
3028 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3029 value_addr (*arg1p)));
3030
3031 /* Move the `this' pointer according to the offset.
3032 VALUE_OFFSET (*arg1p) += offset; */
3033 }
3034
3035 return v;
3036 }
3037
3038 \f
3039
3040 /* Helper function for both unpack_value_bits_as_long and
3041 unpack_bits_as_long. See those functions for more details on the
3042 interface; the only difference is that this function accepts either
3043 a NULL or a non-NULL ORIGINAL_VALUE. */
3044
3045 static int
3046 unpack_value_bits_as_long_1 (struct type *field_type, const gdb_byte *valaddr,
3047 int embedded_offset, int bitpos, int bitsize,
3048 const struct value *original_value,
3049 LONGEST *result)
3050 {
3051 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3052 ULONGEST val;
3053 ULONGEST valmask;
3054 int lsbcount;
3055 int bytes_read;
3056 int read_offset;
3057
3058 /* Read the minimum number of bytes required; there may not be
3059 enough bytes to read an entire ULONGEST. */
3060 CHECK_TYPEDEF (field_type);
3061 if (bitsize)
3062 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3063 else
3064 bytes_read = TYPE_LENGTH (field_type);
3065
3066 read_offset = bitpos / 8;
3067
3068 if (original_value != NULL
3069 && !value_bits_available (original_value, embedded_offset + bitpos,
3070 bitsize))
3071 return 0;
3072
3073 val = extract_unsigned_integer (valaddr + embedded_offset + read_offset,
3074 bytes_read, byte_order);
3075
3076 /* Extract bits. See comment above. */
3077
3078 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3079 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3080 else
3081 lsbcount = (bitpos % 8);
3082 val >>= lsbcount;
3083
3084 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3085 If the field is signed, and is negative, then sign extend. */
3086
3087 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3088 {
3089 valmask = (((ULONGEST) 1) << bitsize) - 1;
3090 val &= valmask;
3091 if (!TYPE_UNSIGNED (field_type))
3092 {
3093 if (val & (valmask ^ (valmask >> 1)))
3094 {
3095 val |= ~valmask;
3096 }
3097 }
3098 }
3099
3100 *result = val;
3101 return 1;
3102 }
3103
3104 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3105 VALADDR + EMBEDDED_OFFSET, and store the result in *RESULT.
3106 VALADDR points to the contents of ORIGINAL_VALUE, which must not be
3107 NULL. The bitfield starts at BITPOS bits and contains BITSIZE
3108 bits.
3109
3110 Returns false if the value contents are unavailable, otherwise
3111 returns true, indicating a valid value has been stored in *RESULT.
3112
3113 Extracting bits depends on endianness of the machine. Compute the
3114 number of least significant bits to discard. For big endian machines,
3115 we compute the total number of bits in the anonymous object, subtract
3116 off the bit count from the MSB of the object to the MSB of the
3117 bitfield, then the size of the bitfield, which leaves the LSB discard
3118 count. For little endian machines, the discard count is simply the
3119 number of bits from the LSB of the anonymous object to the LSB of the
3120 bitfield.
3121
3122 If the field is signed, we also do sign extension. */
3123
3124 int
3125 unpack_value_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3126 int embedded_offset, int bitpos, int bitsize,
3127 const struct value *original_value,
3128 LONGEST *result)
3129 {
3130 gdb_assert (original_value != NULL);
3131
3132 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3133 bitpos, bitsize, original_value, result);
3134
3135 }
3136
3137 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3138 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3139 ORIGINAL_VALUE. See unpack_value_bits_as_long for more
3140 details. */
3141
3142 static int
3143 unpack_value_field_as_long_1 (struct type *type, const gdb_byte *valaddr,
3144 int embedded_offset, int fieldno,
3145 const struct value *val, LONGEST *result)
3146 {
3147 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3148 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3149 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3150
3151 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3152 bitpos, bitsize, val,
3153 result);
3154 }
3155
3156 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3157 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3158 ORIGINAL_VALUE, which must not be NULL. See
3159 unpack_value_bits_as_long for more details. */
3160
3161 int
3162 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3163 int embedded_offset, int fieldno,
3164 const struct value *val, LONGEST *result)
3165 {
3166 gdb_assert (val != NULL);
3167
3168 return unpack_value_field_as_long_1 (type, valaddr, embedded_offset,
3169 fieldno, val, result);
3170 }
3171
3172 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3173 object at VALADDR. See unpack_value_bits_as_long for more details.
3174 This function differs from unpack_value_field_as_long in that it
3175 operates without a struct value object. */
3176
3177 LONGEST
3178 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3179 {
3180 LONGEST result;
3181
3182 unpack_value_field_as_long_1 (type, valaddr, 0, fieldno, NULL, &result);
3183 return result;
3184 }
3185
3186 /* Return a new value with type TYPE, which is FIELDNO field of the
3187 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3188 of VAL. If the VAL's contents required to extract the bitfield
3189 from are unavailable, the new value is correspondingly marked as
3190 unavailable. */
3191
3192 struct value *
3193 value_field_bitfield (struct type *type, int fieldno,
3194 const gdb_byte *valaddr,
3195 int embedded_offset, const struct value *val)
3196 {
3197 LONGEST l;
3198
3199 if (!unpack_value_field_as_long (type, valaddr, embedded_offset, fieldno,
3200 val, &l))
3201 {
3202 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3203 struct value *retval = allocate_value (field_type);
3204 mark_value_bytes_unavailable (retval, 0, TYPE_LENGTH (field_type));
3205 return retval;
3206 }
3207 else
3208 {
3209 return value_from_longest (TYPE_FIELD_TYPE (type, fieldno), l);
3210 }
3211 }
3212
3213 /* Modify the value of a bitfield. ADDR points to a block of memory in
3214 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3215 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3216 indicate which bits (in target bit order) comprise the bitfield.
3217 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3218 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3219
3220 void
3221 modify_field (struct type *type, gdb_byte *addr,
3222 LONGEST fieldval, int bitpos, int bitsize)
3223 {
3224 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3225 ULONGEST oword;
3226 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3227 int bytesize;
3228
3229 /* Normalize BITPOS. */
3230 addr += bitpos / 8;
3231 bitpos %= 8;
3232
3233 /* If a negative fieldval fits in the field in question, chop
3234 off the sign extension bits. */
3235 if ((~fieldval & ~(mask >> 1)) == 0)
3236 fieldval &= mask;
3237
3238 /* Warn if value is too big to fit in the field in question. */
3239 if (0 != (fieldval & ~mask))
3240 {
3241 /* FIXME: would like to include fieldval in the message, but
3242 we don't have a sprintf_longest. */
3243 warning (_("Value does not fit in %d bits."), bitsize);
3244
3245 /* Truncate it, otherwise adjoining fields may be corrupted. */
3246 fieldval &= mask;
3247 }
3248
3249 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3250 false valgrind reports. */
3251
3252 bytesize = (bitpos + bitsize + 7) / 8;
3253 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3254
3255 /* Shifting for bit field depends on endianness of the target machine. */
3256 if (gdbarch_bits_big_endian (get_type_arch (type)))
3257 bitpos = bytesize * 8 - bitpos - bitsize;
3258
3259 oword &= ~(mask << bitpos);
3260 oword |= fieldval << bitpos;
3261
3262 store_unsigned_integer (addr, bytesize, byte_order, oword);
3263 }
3264 \f
3265 /* Pack NUM into BUF using a target format of TYPE. */
3266
3267 void
3268 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3269 {
3270 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3271 int len;
3272
3273 type = check_typedef (type);
3274 len = TYPE_LENGTH (type);
3275
3276 switch (TYPE_CODE (type))
3277 {
3278 case TYPE_CODE_INT:
3279 case TYPE_CODE_CHAR:
3280 case TYPE_CODE_ENUM:
3281 case TYPE_CODE_FLAGS:
3282 case TYPE_CODE_BOOL:
3283 case TYPE_CODE_RANGE:
3284 case TYPE_CODE_MEMBERPTR:
3285 store_signed_integer (buf, len, byte_order, num);
3286 break;
3287
3288 case TYPE_CODE_REF:
3289 case TYPE_CODE_PTR:
3290 store_typed_address (buf, type, (CORE_ADDR) num);
3291 break;
3292
3293 default:
3294 error (_("Unexpected type (%d) encountered for integer constant."),
3295 TYPE_CODE (type));
3296 }
3297 }
3298
3299
3300 /* Pack NUM into BUF using a target format of TYPE. */
3301
3302 static void
3303 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3304 {
3305 int len;
3306 enum bfd_endian byte_order;
3307
3308 type = check_typedef (type);
3309 len = TYPE_LENGTH (type);
3310 byte_order = gdbarch_byte_order (get_type_arch (type));
3311
3312 switch (TYPE_CODE (type))
3313 {
3314 case TYPE_CODE_INT:
3315 case TYPE_CODE_CHAR:
3316 case TYPE_CODE_ENUM:
3317 case TYPE_CODE_FLAGS:
3318 case TYPE_CODE_BOOL:
3319 case TYPE_CODE_RANGE:
3320 case TYPE_CODE_MEMBERPTR:
3321 store_unsigned_integer (buf, len, byte_order, num);
3322 break;
3323
3324 case TYPE_CODE_REF:
3325 case TYPE_CODE_PTR:
3326 store_typed_address (buf, type, (CORE_ADDR) num);
3327 break;
3328
3329 default:
3330 error (_("Unexpected type (%d) encountered "
3331 "for unsigned integer constant."),
3332 TYPE_CODE (type));
3333 }
3334 }
3335
3336
3337 /* Convert C numbers into newly allocated values. */
3338
3339 struct value *
3340 value_from_longest (struct type *type, LONGEST num)
3341 {
3342 struct value *val = allocate_value (type);
3343
3344 pack_long (value_contents_raw (val), type, num);
3345 return val;
3346 }
3347
3348
3349 /* Convert C unsigned numbers into newly allocated values. */
3350
3351 struct value *
3352 value_from_ulongest (struct type *type, ULONGEST num)
3353 {
3354 struct value *val = allocate_value (type);
3355
3356 pack_unsigned_long (value_contents_raw (val), type, num);
3357
3358 return val;
3359 }
3360
3361
3362 /* Create a value representing a pointer of type TYPE to the address
3363 ADDR. The type of the created value may differ from the passed
3364 type TYPE. Make sure to retrieve the returned values's new type
3365 after this call e.g. in case of an variable length array. */
3366
3367 struct value *
3368 value_from_pointer (struct type *type, CORE_ADDR addr)
3369 {
3370 struct type *resolved_type = resolve_dynamic_type (type, addr);
3371 struct value *val = allocate_value (resolved_type);
3372
3373 store_typed_address (value_contents_raw (val),
3374 check_typedef (resolved_type), addr);
3375 return val;
3376 }
3377
3378
3379 /* Create a value of type TYPE whose contents come from VALADDR, if it
3380 is non-null, and whose memory address (in the inferior) is
3381 ADDRESS. The type of the created value may differ from the passed
3382 type TYPE. Make sure to retrieve values new type after this call.
3383 Note that TYPE is not passed through resolve_dynamic_type; this is
3384 a special API intended for use only by Ada. */
3385
3386 struct value *
3387 value_from_contents_and_address_unresolved (struct type *type,
3388 const gdb_byte *valaddr,
3389 CORE_ADDR address)
3390 {
3391 struct value *v;
3392
3393 if (valaddr == NULL)
3394 v = allocate_value_lazy (type);
3395 else
3396 v = value_from_contents (type, valaddr);
3397 set_value_address (v, address);
3398 VALUE_LVAL (v) = lval_memory;
3399 return v;
3400 }
3401
3402 /* Create a value of type TYPE whose contents come from VALADDR, if it
3403 is non-null, and whose memory address (in the inferior) is
3404 ADDRESS. The type of the created value may differ from the passed
3405 type TYPE. Make sure to retrieve values new type after this call. */
3406
3407 struct value *
3408 value_from_contents_and_address (struct type *type,
3409 const gdb_byte *valaddr,
3410 CORE_ADDR address)
3411 {
3412 struct type *resolved_type = resolve_dynamic_type (type, address);
3413 struct value *v;
3414
3415 if (valaddr == NULL)
3416 v = allocate_value_lazy (resolved_type);
3417 else
3418 v = value_from_contents (resolved_type, valaddr);
3419 set_value_address (v, address);
3420 VALUE_LVAL (v) = lval_memory;
3421 return v;
3422 }
3423
3424 /* Create a value of type TYPE holding the contents CONTENTS.
3425 The new value is `not_lval'. */
3426
3427 struct value *
3428 value_from_contents (struct type *type, const gdb_byte *contents)
3429 {
3430 struct value *result;
3431
3432 result = allocate_value (type);
3433 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3434 return result;
3435 }
3436
3437 struct value *
3438 value_from_double (struct type *type, DOUBLEST num)
3439 {
3440 struct value *val = allocate_value (type);
3441 struct type *base_type = check_typedef (type);
3442 enum type_code code = TYPE_CODE (base_type);
3443
3444 if (code == TYPE_CODE_FLT)
3445 {
3446 store_typed_floating (value_contents_raw (val), base_type, num);
3447 }
3448 else
3449 error (_("Unexpected type encountered for floating constant."));
3450
3451 return val;
3452 }
3453
3454 struct value *
3455 value_from_decfloat (struct type *type, const gdb_byte *dec)
3456 {
3457 struct value *val = allocate_value (type);
3458
3459 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3460 return val;
3461 }
3462
3463 /* Extract a value from the history file. Input will be of the form
3464 $digits or $$digits. See block comment above 'write_dollar_variable'
3465 for details. */
3466
3467 struct value *
3468 value_from_history_ref (char *h, char **endp)
3469 {
3470 int index, len;
3471
3472 if (h[0] == '$')
3473 len = 1;
3474 else
3475 return NULL;
3476
3477 if (h[1] == '$')
3478 len = 2;
3479
3480 /* Find length of numeral string. */
3481 for (; isdigit (h[len]); len++)
3482 ;
3483
3484 /* Make sure numeral string is not part of an identifier. */
3485 if (h[len] == '_' || isalpha (h[len]))
3486 return NULL;
3487
3488 /* Now collect the index value. */
3489 if (h[1] == '$')
3490 {
3491 if (len == 2)
3492 {
3493 /* For some bizarre reason, "$$" is equivalent to "$$1",
3494 rather than to "$$0" as it ought to be! */
3495 index = -1;
3496 *endp += len;
3497 }
3498 else
3499 index = -strtol (&h[2], endp, 10);
3500 }
3501 else
3502 {
3503 if (len == 1)
3504 {
3505 /* "$" is equivalent to "$0". */
3506 index = 0;
3507 *endp += len;
3508 }
3509 else
3510 index = strtol (&h[1], endp, 10);
3511 }
3512
3513 return access_value_history (index);
3514 }
3515
3516 struct value *
3517 coerce_ref_if_computed (const struct value *arg)
3518 {
3519 const struct lval_funcs *funcs;
3520
3521 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3522 return NULL;
3523
3524 if (value_lval_const (arg) != lval_computed)
3525 return NULL;
3526
3527 funcs = value_computed_funcs (arg);
3528 if (funcs->coerce_ref == NULL)
3529 return NULL;
3530
3531 return funcs->coerce_ref (arg);
3532 }
3533
3534 /* Look at value.h for description. */
3535
3536 struct value *
3537 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3538 struct type *original_type,
3539 struct value *original_value)
3540 {
3541 /* Re-adjust type. */
3542 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3543
3544 /* Add embedding info. */
3545 set_value_enclosing_type (value, enc_type);
3546 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3547
3548 /* We may be pointing to an object of some derived type. */
3549 return value_full_object (value, NULL, 0, 0, 0);
3550 }
3551
3552 struct value *
3553 coerce_ref (struct value *arg)
3554 {
3555 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3556 struct value *retval;
3557 struct type *enc_type;
3558
3559 retval = coerce_ref_if_computed (arg);
3560 if (retval)
3561 return retval;
3562
3563 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3564 return arg;
3565
3566 enc_type = check_typedef (value_enclosing_type (arg));
3567 enc_type = TYPE_TARGET_TYPE (enc_type);
3568
3569 retval = value_at_lazy (enc_type,
3570 unpack_pointer (value_type (arg),
3571 value_contents (arg)));
3572 enc_type = value_type (retval);
3573 return readjust_indirect_value_type (retval, enc_type,
3574 value_type_arg_tmp, arg);
3575 }
3576
3577 struct value *
3578 coerce_array (struct value *arg)
3579 {
3580 struct type *type;
3581
3582 arg = coerce_ref (arg);
3583 type = check_typedef (value_type (arg));
3584
3585 switch (TYPE_CODE (type))
3586 {
3587 case TYPE_CODE_ARRAY:
3588 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3589 arg = value_coerce_array (arg);
3590 break;
3591 case TYPE_CODE_FUNC:
3592 arg = value_coerce_function (arg);
3593 break;
3594 }
3595 return arg;
3596 }
3597 \f
3598
3599 /* Return the return value convention that will be used for the
3600 specified type. */
3601
3602 enum return_value_convention
3603 struct_return_convention (struct gdbarch *gdbarch,
3604 struct value *function, struct type *value_type)
3605 {
3606 enum type_code code = TYPE_CODE (value_type);
3607
3608 if (code == TYPE_CODE_ERROR)
3609 error (_("Function return type unknown."));
3610
3611 /* Probe the architecture for the return-value convention. */
3612 return gdbarch_return_value (gdbarch, function, value_type,
3613 NULL, NULL, NULL);
3614 }
3615
3616 /* Return true if the function returning the specified type is using
3617 the convention of returning structures in memory (passing in the
3618 address as a hidden first parameter). */
3619
3620 int
3621 using_struct_return (struct gdbarch *gdbarch,
3622 struct value *function, struct type *value_type)
3623 {
3624 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3625 /* A void return value is never in memory. See also corresponding
3626 code in "print_return_value". */
3627 return 0;
3628
3629 return (struct_return_convention (gdbarch, function, value_type)
3630 != RETURN_VALUE_REGISTER_CONVENTION);
3631 }
3632
3633 /* Set the initialized field in a value struct. */
3634
3635 void
3636 set_value_initialized (struct value *val, int status)
3637 {
3638 val->initialized = status;
3639 }
3640
3641 /* Return the initialized field in a value struct. */
3642
3643 int
3644 value_initialized (struct value *val)
3645 {
3646 return val->initialized;
3647 }
3648
3649 /* Called only from the value_contents and value_contents_all()
3650 macros, if the current data for a variable needs to be loaded into
3651 value_contents(VAL). Fetches the data from the user's process, and
3652 clears the lazy flag to indicate that the data in the buffer is
3653 valid.
3654
3655 If the value is zero-length, we avoid calling read_memory, which
3656 would abort. We mark the value as fetched anyway -- all 0 bytes of
3657 it.
3658
3659 This function returns a value because it is used in the
3660 value_contents macro as part of an expression, where a void would
3661 not work. The value is ignored. */
3662
3663 int
3664 value_fetch_lazy (struct value *val)
3665 {
3666 gdb_assert (value_lazy (val));
3667 allocate_value_contents (val);
3668 if (value_bitsize (val))
3669 {
3670 /* To read a lazy bitfield, read the entire enclosing value. This
3671 prevents reading the same block of (possibly volatile) memory once
3672 per bitfield. It would be even better to read only the containing
3673 word, but we have no way to record that just specific bits of a
3674 value have been fetched. */
3675 struct type *type = check_typedef (value_type (val));
3676 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3677 struct value *parent = value_parent (val);
3678 LONGEST offset = value_offset (val);
3679 LONGEST num;
3680
3681 if (value_lazy (parent))
3682 value_fetch_lazy (parent);
3683
3684 if (!value_bits_valid (parent,
3685 TARGET_CHAR_BIT * offset + value_bitpos (val),
3686 value_bitsize (val)))
3687 set_value_optimized_out (val, 1);
3688 else if (!unpack_value_bits_as_long (value_type (val),
3689 value_contents_for_printing (parent),
3690 offset,
3691 value_bitpos (val),
3692 value_bitsize (val), parent, &num))
3693 mark_value_bytes_unavailable (val,
3694 value_embedded_offset (val),
3695 TYPE_LENGTH (type));
3696 else
3697 store_signed_integer (value_contents_raw (val), TYPE_LENGTH (type),
3698 byte_order, num);
3699 }
3700 else if (VALUE_LVAL (val) == lval_memory)
3701 {
3702 CORE_ADDR addr = value_address (val);
3703 struct type *type = check_typedef (value_enclosing_type (val));
3704
3705 if (TYPE_LENGTH (type))
3706 read_value_memory (val, 0, value_stack (val),
3707 addr, value_contents_all_raw (val),
3708 TYPE_LENGTH (type));
3709 }
3710 else if (VALUE_LVAL (val) == lval_register)
3711 {
3712 struct frame_info *frame;
3713 int regnum;
3714 struct type *type = check_typedef (value_type (val));
3715 struct value *new_val = val, *mark = value_mark ();
3716
3717 /* Offsets are not supported here; lazy register values must
3718 refer to the entire register. */
3719 gdb_assert (value_offset (val) == 0);
3720
3721 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3722 {
3723 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3724
3725 frame = frame_find_by_id (frame_id);
3726 regnum = VALUE_REGNUM (new_val);
3727
3728 gdb_assert (frame != NULL);
3729
3730 /* Convertible register routines are used for multi-register
3731 values and for interpretation in different types
3732 (e.g. float or int from a double register). Lazy
3733 register values should have the register's natural type,
3734 so they do not apply. */
3735 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3736 regnum, type));
3737
3738 new_val = get_frame_register_value (frame, regnum);
3739
3740 /* If we get another lazy lval_register value, it means the
3741 register is found by reading it from the next frame.
3742 get_frame_register_value should never return a value with
3743 the frame id pointing to FRAME. If it does, it means we
3744 either have two consecutive frames with the same frame id
3745 in the frame chain, or some code is trying to unwind
3746 behind get_prev_frame's back (e.g., a frame unwind
3747 sniffer trying to unwind), bypassing its validations. In
3748 any case, it should always be an internal error to end up
3749 in this situation. */
3750 if (VALUE_LVAL (new_val) == lval_register
3751 && value_lazy (new_val)
3752 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3753 internal_error (__FILE__, __LINE__,
3754 _("infinite loop while fetching a register"));
3755 }
3756
3757 /* If it's still lazy (for instance, a saved register on the
3758 stack), fetch it. */
3759 if (value_lazy (new_val))
3760 value_fetch_lazy (new_val);
3761
3762 /* If the register was not saved, mark it optimized out. */
3763 if (value_optimized_out (new_val))
3764 set_value_optimized_out (val, 1);
3765 else
3766 {
3767 set_value_lazy (val, 0);
3768 value_contents_copy (val, value_embedded_offset (val),
3769 new_val, value_embedded_offset (new_val),
3770 TYPE_LENGTH (type));
3771 }
3772
3773 if (frame_debug)
3774 {
3775 struct gdbarch *gdbarch;
3776 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3777 regnum = VALUE_REGNUM (val);
3778 gdbarch = get_frame_arch (frame);
3779
3780 fprintf_unfiltered (gdb_stdlog,
3781 "{ value_fetch_lazy "
3782 "(frame=%d,regnum=%d(%s),...) ",
3783 frame_relative_level (frame), regnum,
3784 user_reg_map_regnum_to_name (gdbarch, regnum));
3785
3786 fprintf_unfiltered (gdb_stdlog, "->");
3787 if (value_optimized_out (new_val))
3788 {
3789 fprintf_unfiltered (gdb_stdlog, " ");
3790 val_print_optimized_out (new_val, gdb_stdlog);
3791 }
3792 else
3793 {
3794 int i;
3795 const gdb_byte *buf = value_contents (new_val);
3796
3797 if (VALUE_LVAL (new_val) == lval_register)
3798 fprintf_unfiltered (gdb_stdlog, " register=%d",
3799 VALUE_REGNUM (new_val));
3800 else if (VALUE_LVAL (new_val) == lval_memory)
3801 fprintf_unfiltered (gdb_stdlog, " address=%s",
3802 paddress (gdbarch,
3803 value_address (new_val)));
3804 else
3805 fprintf_unfiltered (gdb_stdlog, " computed");
3806
3807 fprintf_unfiltered (gdb_stdlog, " bytes=");
3808 fprintf_unfiltered (gdb_stdlog, "[");
3809 for (i = 0; i < register_size (gdbarch, regnum); i++)
3810 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3811 fprintf_unfiltered (gdb_stdlog, "]");
3812 }
3813
3814 fprintf_unfiltered (gdb_stdlog, " }\n");
3815 }
3816
3817 /* Dispose of the intermediate values. This prevents
3818 watchpoints from trying to watch the saved frame pointer. */
3819 value_free_to_mark (mark);
3820 }
3821 else if (VALUE_LVAL (val) == lval_computed
3822 && value_computed_funcs (val)->read != NULL)
3823 value_computed_funcs (val)->read (val);
3824 /* Don't call value_optimized_out on val, doing so would result in a
3825 recursive call back to value_fetch_lazy, instead check the
3826 optimized_out flag directly. */
3827 else if (val->optimized_out)
3828 /* Keep it optimized out. */;
3829 else
3830 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3831
3832 set_value_lazy (val, 0);
3833 return 0;
3834 }
3835
3836 /* Implementation of the convenience function $_isvoid. */
3837
3838 static struct value *
3839 isvoid_internal_fn (struct gdbarch *gdbarch,
3840 const struct language_defn *language,
3841 void *cookie, int argc, struct value **argv)
3842 {
3843 int ret;
3844
3845 if (argc != 1)
3846 error (_("You must provide one argument for $_isvoid."));
3847
3848 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3849
3850 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3851 }
3852
3853 void
3854 _initialize_values (void)
3855 {
3856 add_cmd ("convenience", no_class, show_convenience, _("\
3857 Debugger convenience (\"$foo\") variables and functions.\n\
3858 Convenience variables are created when you assign them values;\n\
3859 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3860 \n\
3861 A few convenience variables are given values automatically:\n\
3862 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3863 \"$__\" holds the contents of the last address examined with \"x\"."
3864 #ifdef HAVE_PYTHON
3865 "\n\n\
3866 Convenience functions are defined via the Python API."
3867 #endif
3868 ), &showlist);
3869 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3870
3871 add_cmd ("values", no_set_class, show_values, _("\
3872 Elements of value history around item number IDX (or last ten)."),
3873 &showlist);
3874
3875 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3876 Initialize a convenience variable if necessary.\n\
3877 init-if-undefined VARIABLE = EXPRESSION\n\
3878 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3879 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3880 VARIABLE is already initialized."));
3881
3882 add_prefix_cmd ("function", no_class, function_command, _("\
3883 Placeholder command for showing help on convenience functions."),
3884 &functionlist, "function ", 0, &cmdlist);
3885
3886 add_internal_function ("_isvoid", _("\
3887 Check whether an expression is void.\n\
3888 Usage: $_isvoid (expression)\n\
3889 Return 1 if the expression is void, zero otherwise."),
3890 isvoid_internal_fn, NULL);
3891 }
This page took 0.167506 seconds and 4 git commands to generate.