Garbage collect value_contents_equal.
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include <string.h>
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "command.h"
28 #include "gdbcmd.h"
29 #include "target.h"
30 #include "language.h"
31 #include "demangle.h"
32 #include "doublest.h"
33 #include "gdb_assert.h"
34 #include "regcache.h"
35 #include "block.h"
36 #include "dfp.h"
37 #include "objfiles.h"
38 #include "valprint.h"
39 #include "cli/cli-decode.h"
40 #include "exceptions.h"
41 #include "extension.h"
42 #include <ctype.h>
43 #include "tracepoint.h"
44 #include "cp-abi.h"
45 #include "user-regs.h"
46
47 /* Prototypes for exported functions. */
48
49 void _initialize_values (void);
50
51 /* Definition of a user function. */
52 struct internal_function
53 {
54 /* The name of the function. It is a bit odd to have this in the
55 function itself -- the user might use a differently-named
56 convenience variable to hold the function. */
57 char *name;
58
59 /* The handler. */
60 internal_function_fn handler;
61
62 /* User data for the handler. */
63 void *cookie;
64 };
65
66 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
67
68 struct range
69 {
70 /* Lowest offset in the range. */
71 int offset;
72
73 /* Length of the range. */
74 int length;
75 };
76
77 typedef struct range range_s;
78
79 DEF_VEC_O(range_s);
80
81 /* Returns true if the ranges defined by [offset1, offset1+len1) and
82 [offset2, offset2+len2) overlap. */
83
84 static int
85 ranges_overlap (int offset1, int len1,
86 int offset2, int len2)
87 {
88 ULONGEST h, l;
89
90 l = max (offset1, offset2);
91 h = min (offset1 + len1, offset2 + len2);
92 return (l < h);
93 }
94
95 /* Returns true if the first argument is strictly less than the
96 second, useful for VEC_lower_bound. We keep ranges sorted by
97 offset and coalesce overlapping and contiguous ranges, so this just
98 compares the starting offset. */
99
100 static int
101 range_lessthan (const range_s *r1, const range_s *r2)
102 {
103 return r1->offset < r2->offset;
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (VEC(range_s) *ranges, int offset, int length)
111 {
112 range_s what;
113 int i;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
150
151 if (i > 0)
152 {
153 struct range *bef = VEC_index (range_s, ranges, i - 1);
154
155 if (ranges_overlap (bef->offset, bef->length, offset, length))
156 return 1;
157 }
158
159 if (i < VEC_length (range_s, ranges))
160 {
161 struct range *r = VEC_index (range_s, ranges, i);
162
163 if (ranges_overlap (r->offset, r->length, offset, length))
164 return 1;
165 }
166
167 return 0;
168 }
169
170 static struct cmd_list_element *functionlist;
171
172 /* Note that the fields in this structure are arranged to save a bit
173 of memory. */
174
175 struct value
176 {
177 /* Type of value; either not an lval, or one of the various
178 different possible kinds of lval. */
179 enum lval_type lval;
180
181 /* Is it modifiable? Only relevant if lval != not_lval. */
182 unsigned int modifiable : 1;
183
184 /* If zero, contents of this value are in the contents field. If
185 nonzero, contents are in inferior. If the lval field is lval_memory,
186 the contents are in inferior memory at location.address plus offset.
187 The lval field may also be lval_register.
188
189 WARNING: This field is used by the code which handles watchpoints
190 (see breakpoint.c) to decide whether a particular value can be
191 watched by hardware watchpoints. If the lazy flag is set for
192 some member of a value chain, it is assumed that this member of
193 the chain doesn't need to be watched as part of watching the
194 value itself. This is how GDB avoids watching the entire struct
195 or array when the user wants to watch a single struct member or
196 array element. If you ever change the way lazy flag is set and
197 reset, be sure to consider this use as well! */
198 unsigned int lazy : 1;
199
200 /* If nonzero, this is the value of a variable that does not
201 actually exist in the program. If nonzero, and LVAL is
202 lval_register, this is a register ($pc, $sp, etc., never a
203 program variable) that has not been saved in the frame. All
204 optimized-out values are treated pretty much the same, except
205 registers have a different string representation and related
206 error strings. */
207 unsigned int optimized_out : 1;
208
209 /* If value is a variable, is it initialized or not. */
210 unsigned int initialized : 1;
211
212 /* If value is from the stack. If this is set, read_stack will be
213 used instead of read_memory to enable extra caching. */
214 unsigned int stack : 1;
215
216 /* If the value has been released. */
217 unsigned int released : 1;
218
219 /* Register number if the value is from a register. */
220 short regnum;
221
222 /* Location of value (if lval). */
223 union
224 {
225 /* If lval == lval_memory, this is the address in the inferior.
226 If lval == lval_register, this is the byte offset into the
227 registers structure. */
228 CORE_ADDR address;
229
230 /* Pointer to internal variable. */
231 struct internalvar *internalvar;
232
233 /* Pointer to xmethod worker. */
234 struct xmethod_worker *xm_worker;
235
236 /* If lval == lval_computed, this is a set of function pointers
237 to use to access and describe the value, and a closure pointer
238 for them to use. */
239 struct
240 {
241 /* Functions to call. */
242 const struct lval_funcs *funcs;
243
244 /* Closure for those functions to use. */
245 void *closure;
246 } computed;
247 } location;
248
249 /* Describes offset of a value within lval of a structure in bytes.
250 If lval == lval_memory, this is an offset to the address. If
251 lval == lval_register, this is a further offset from
252 location.address within the registers structure. Note also the
253 member embedded_offset below. */
254 int offset;
255
256 /* Only used for bitfields; number of bits contained in them. */
257 int bitsize;
258
259 /* Only used for bitfields; position of start of field. For
260 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
261 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
262 int bitpos;
263
264 /* The number of references to this value. When a value is created,
265 the value chain holds a reference, so REFERENCE_COUNT is 1. If
266 release_value is called, this value is removed from the chain but
267 the caller of release_value now has a reference to this value.
268 The caller must arrange for a call to value_free later. */
269 int reference_count;
270
271 /* Only used for bitfields; the containing value. This allows a
272 single read from the target when displaying multiple
273 bitfields. */
274 struct value *parent;
275
276 /* Frame register value is relative to. This will be described in
277 the lval enum above as "lval_register". */
278 struct frame_id frame_id;
279
280 /* Type of the value. */
281 struct type *type;
282
283 /* If a value represents a C++ object, then the `type' field gives
284 the object's compile-time type. If the object actually belongs
285 to some class derived from `type', perhaps with other base
286 classes and additional members, then `type' is just a subobject
287 of the real thing, and the full object is probably larger than
288 `type' would suggest.
289
290 If `type' is a dynamic class (i.e. one with a vtable), then GDB
291 can actually determine the object's run-time type by looking at
292 the run-time type information in the vtable. When this
293 information is available, we may elect to read in the entire
294 object, for several reasons:
295
296 - When printing the value, the user would probably rather see the
297 full object, not just the limited portion apparent from the
298 compile-time type.
299
300 - If `type' has virtual base classes, then even printing `type'
301 alone may require reaching outside the `type' portion of the
302 object to wherever the virtual base class has been stored.
303
304 When we store the entire object, `enclosing_type' is the run-time
305 type -- the complete object -- and `embedded_offset' is the
306 offset of `type' within that larger type, in bytes. The
307 value_contents() macro takes `embedded_offset' into account, so
308 most GDB code continues to see the `type' portion of the value,
309 just as the inferior would.
310
311 If `type' is a pointer to an object, then `enclosing_type' is a
312 pointer to the object's run-time type, and `pointed_to_offset' is
313 the offset in bytes from the full object to the pointed-to object
314 -- that is, the value `embedded_offset' would have if we followed
315 the pointer and fetched the complete object. (I don't really see
316 the point. Why not just determine the run-time type when you
317 indirect, and avoid the special case? The contents don't matter
318 until you indirect anyway.)
319
320 If we're not doing anything fancy, `enclosing_type' is equal to
321 `type', and `embedded_offset' is zero, so everything works
322 normally. */
323 struct type *enclosing_type;
324 int embedded_offset;
325 int pointed_to_offset;
326
327 /* Values are stored in a chain, so that they can be deleted easily
328 over calls to the inferior. Values assigned to internal
329 variables, put into the value history or exposed to Python are
330 taken off this list. */
331 struct value *next;
332
333 /* Actual contents of the value. Target byte-order. NULL or not
334 valid if lazy is nonzero. */
335 gdb_byte *contents;
336
337 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
338 rather than available, since the common and default case is for a
339 value to be available. This is filled in at value read time. The
340 unavailable ranges are tracked in bits. */
341 VEC(range_s) *unavailable;
342 };
343
344 int
345 value_bits_available (const struct value *value, int offset, int length)
346 {
347 gdb_assert (!value->lazy);
348
349 return !ranges_contain (value->unavailable, offset, length);
350 }
351
352 int
353 value_bytes_available (const struct value *value, int offset, int length)
354 {
355 return value_bits_available (value,
356 offset * TARGET_CHAR_BIT,
357 length * TARGET_CHAR_BIT);
358 }
359
360 int
361 value_entirely_available (struct value *value)
362 {
363 /* We can only tell whether the whole value is available when we try
364 to read it. */
365 if (value->lazy)
366 value_fetch_lazy (value);
367
368 if (VEC_empty (range_s, value->unavailable))
369 return 1;
370 return 0;
371 }
372
373 int
374 value_entirely_unavailable (struct value *value)
375 {
376 /* We can only tell whether the whole value is available when we try
377 to read it. */
378 if (value->lazy)
379 value_fetch_lazy (value);
380
381 if (VEC_length (range_s, value->unavailable) == 1)
382 {
383 struct range *t = VEC_index (range_s, value->unavailable, 0);
384
385 if (t->offset == 0
386 && t->length == (TARGET_CHAR_BIT
387 * TYPE_LENGTH (value_enclosing_type (value))))
388 return 1;
389 }
390
391 return 0;
392 }
393
394 void
395 mark_value_bits_unavailable (struct value *value, int offset, int length)
396 {
397 range_s newr;
398 int i;
399
400 /* Insert the range sorted. If there's overlap or the new range
401 would be contiguous with an existing range, merge. */
402
403 newr.offset = offset;
404 newr.length = length;
405
406 /* Do a binary search for the position the given range would be
407 inserted if we only considered the starting OFFSET of ranges.
408 Call that position I. Since we also have LENGTH to care for
409 (this is a range afterall), we need to check if the _previous_
410 range overlaps the I range. E.g., calling R the new range:
411
412 #1 - overlaps with previous
413
414 R
415 |-...-|
416 |---| |---| |------| ... |--|
417 0 1 2 N
418
419 I=1
420
421 In the case #1 above, the binary search would return `I=1',
422 meaning, this OFFSET should be inserted at position 1, and the
423 current position 1 should be pushed further (and become 2). But,
424 note that `0' overlaps with R, so we want to merge them.
425
426 A similar consideration needs to be taken if the new range would
427 be contiguous with the previous range:
428
429 #2 - contiguous with previous
430
431 R
432 |-...-|
433 |--| |---| |------| ... |--|
434 0 1 2 N
435
436 I=1
437
438 If there's no overlap with the previous range, as in:
439
440 #3 - not overlapping and not contiguous
441
442 R
443 |-...-|
444 |--| |---| |------| ... |--|
445 0 1 2 N
446
447 I=1
448
449 or if I is 0:
450
451 #4 - R is the range with lowest offset
452
453 R
454 |-...-|
455 |--| |---| |------| ... |--|
456 0 1 2 N
457
458 I=0
459
460 ... we just push the new range to I.
461
462 All the 4 cases above need to consider that the new range may
463 also overlap several of the ranges that follow, or that R may be
464 contiguous with the following range, and merge. E.g.,
465
466 #5 - overlapping following ranges
467
468 R
469 |------------------------|
470 |--| |---| |------| ... |--|
471 0 1 2 N
472
473 I=0
474
475 or:
476
477 R
478 |-------|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 */
485
486 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
487 if (i > 0)
488 {
489 struct range *bef = VEC_index (range_s, value->unavailable, i - 1);
490
491 if (ranges_overlap (bef->offset, bef->length, offset, length))
492 {
493 /* #1 */
494 ULONGEST l = min (bef->offset, offset);
495 ULONGEST h = max (bef->offset + bef->length, offset + length);
496
497 bef->offset = l;
498 bef->length = h - l;
499 i--;
500 }
501 else if (offset == bef->offset + bef->length)
502 {
503 /* #2 */
504 bef->length += length;
505 i--;
506 }
507 else
508 {
509 /* #3 */
510 VEC_safe_insert (range_s, value->unavailable, i, &newr);
511 }
512 }
513 else
514 {
515 /* #4 */
516 VEC_safe_insert (range_s, value->unavailable, i, &newr);
517 }
518
519 /* Check whether the ranges following the one we've just added or
520 touched can be folded in (#5 above). */
521 if (i + 1 < VEC_length (range_s, value->unavailable))
522 {
523 struct range *t;
524 struct range *r;
525 int removed = 0;
526 int next = i + 1;
527
528 /* Get the range we just touched. */
529 t = VEC_index (range_s, value->unavailable, i);
530 removed = 0;
531
532 i = next;
533 for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
534 if (r->offset <= t->offset + t->length)
535 {
536 ULONGEST l, h;
537
538 l = min (t->offset, r->offset);
539 h = max (t->offset + t->length, r->offset + r->length);
540
541 t->offset = l;
542 t->length = h - l;
543
544 removed++;
545 }
546 else
547 {
548 /* If we couldn't merge this one, we won't be able to
549 merge following ones either, since the ranges are
550 always sorted by OFFSET. */
551 break;
552 }
553
554 if (removed != 0)
555 VEC_block_remove (range_s, value->unavailable, next, removed);
556 }
557 }
558
559 void
560 mark_value_bytes_unavailable (struct value *value, int offset, int length)
561 {
562 mark_value_bits_unavailable (value,
563 offset * TARGET_CHAR_BIT,
564 length * TARGET_CHAR_BIT);
565 }
566
567 /* Find the first range in RANGES that overlaps the range defined by
568 OFFSET and LENGTH, starting at element POS in the RANGES vector,
569 Returns the index into RANGES where such overlapping range was
570 found, or -1 if none was found. */
571
572 static int
573 find_first_range_overlap (VEC(range_s) *ranges, int pos,
574 int offset, int length)
575 {
576 range_s *r;
577 int i;
578
579 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
580 if (ranges_overlap (r->offset, r->length, offset, length))
581 return i;
582
583 return -1;
584 }
585
586 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
587 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
588 return non-zero.
589
590 It must always be the case that:
591 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
592
593 It is assumed that memory can be accessed from:
594 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
595 to:
596 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
597 / TARGET_CHAR_BIT) */
598 static int
599 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
600 const gdb_byte *ptr2, size_t offset2_bits,
601 size_t length_bits)
602 {
603 gdb_assert (offset1_bits % TARGET_CHAR_BIT
604 == offset2_bits % TARGET_CHAR_BIT);
605
606 if (offset1_bits % TARGET_CHAR_BIT != 0)
607 {
608 size_t bits;
609 gdb_byte mask, b1, b2;
610
611 /* The offset from the base pointers PTR1 and PTR2 is not a complete
612 number of bytes. A number of bits up to either the next exact
613 byte boundary, or LENGTH_BITS (which ever is sooner) will be
614 compared. */
615 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
616 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
617 mask = (1 << bits) - 1;
618
619 if (length_bits < bits)
620 {
621 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
622 bits = length_bits;
623 }
624
625 /* Now load the two bytes and mask off the bits we care about. */
626 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
627 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
628
629 if (b1 != b2)
630 return 1;
631
632 /* Now update the length and offsets to take account of the bits
633 we've just compared. */
634 length_bits -= bits;
635 offset1_bits += bits;
636 offset2_bits += bits;
637 }
638
639 if (length_bits % TARGET_CHAR_BIT != 0)
640 {
641 size_t bits;
642 size_t o1, o2;
643 gdb_byte mask, b1, b2;
644
645 /* The length is not an exact number of bytes. After the previous
646 IF.. block then the offsets are byte aligned, or the
647 length is zero (in which case this code is not reached). Compare
648 a number of bits at the end of the region, starting from an exact
649 byte boundary. */
650 bits = length_bits % TARGET_CHAR_BIT;
651 o1 = offset1_bits + length_bits - bits;
652 o2 = offset2_bits + length_bits - bits;
653
654 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
655 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
656
657 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
658 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
659
660 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
661 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
662
663 if (b1 != b2)
664 return 1;
665
666 length_bits -= bits;
667 }
668
669 if (length_bits > 0)
670 {
671 /* We've now taken care of any stray "bits" at the start, or end of
672 the region to compare, the remainder can be covered with a simple
673 memcmp. */
674 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
675 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
676 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
677
678 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
679 ptr2 + offset2_bits / TARGET_CHAR_BIT,
680 length_bits / TARGET_CHAR_BIT);
681 }
682
683 /* Length is zero, regions match. */
684 return 0;
685 }
686
687 /* Helper function for value_available_contents_eq. The only difference is
688 that this function is bit rather than byte based.
689
690 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits with
691 LENGTH bits of VAL2's contents starting at OFFSET2 bits. Return true
692 if the available bits match. */
693
694 static int
695 value_available_contents_bits_eq (const struct value *val1, int offset1,
696 const struct value *val2, int offset2,
697 int length)
698 {
699 int idx1 = 0, idx2 = 0;
700
701 /* See function description in value.h. */
702 gdb_assert (!val1->lazy && !val2->lazy);
703
704 while (length > 0)
705 {
706 range_s *r1, *r2;
707 ULONGEST l1, h1;
708 ULONGEST l2, h2;
709
710 idx1 = find_first_range_overlap (val1->unavailable, idx1,
711 offset1, length);
712 idx2 = find_first_range_overlap (val2->unavailable, idx2,
713 offset2, length);
714
715 /* The usual case is for both values to be completely available. */
716 if (idx1 == -1 && idx2 == -1)
717 return (memcmp_with_bit_offsets (val1->contents, offset1,
718 val2->contents, offset2,
719 length) == 0);
720 /* The contents only match equal if the available set matches as
721 well. */
722 else if (idx1 == -1 || idx2 == -1)
723 return 0;
724
725 gdb_assert (idx1 != -1 && idx2 != -1);
726
727 r1 = VEC_index (range_s, val1->unavailable, idx1);
728 r2 = VEC_index (range_s, val2->unavailable, idx2);
729
730 /* Get the unavailable windows intersected by the incoming
731 ranges. The first and last ranges that overlap the argument
732 range may be wider than said incoming arguments ranges. */
733 l1 = max (offset1, r1->offset);
734 h1 = min (offset1 + length, r1->offset + r1->length);
735
736 l2 = max (offset2, r2->offset);
737 h2 = min (offset2 + length, r2->offset + r2->length);
738
739 /* Make them relative to the respective start offsets, so we can
740 compare them for equality. */
741 l1 -= offset1;
742 h1 -= offset1;
743
744 l2 -= offset2;
745 h2 -= offset2;
746
747 /* Different availability, no match. */
748 if (l1 != l2 || h1 != h2)
749 return 0;
750
751 /* Compare the _available_ contents. */
752 if (memcmp_with_bit_offsets (val1->contents, offset1,
753 val2->contents, offset2, l1) != 0)
754 return 0;
755
756 length -= h1;
757 offset1 += h1;
758 offset2 += h1;
759 }
760
761 return 1;
762 }
763
764 int
765 value_available_contents_eq (const struct value *val1, int offset1,
766 const struct value *val2, int offset2,
767 int length)
768 {
769 return value_available_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
770 val2, offset2 * TARGET_CHAR_BIT,
771 length * TARGET_CHAR_BIT);
772 }
773
774 /* Prototypes for local functions. */
775
776 static void show_values (char *, int);
777
778 static void show_convenience (char *, int);
779
780
781 /* The value-history records all the values printed
782 by print commands during this session. Each chunk
783 records 60 consecutive values. The first chunk on
784 the chain records the most recent values.
785 The total number of values is in value_history_count. */
786
787 #define VALUE_HISTORY_CHUNK 60
788
789 struct value_history_chunk
790 {
791 struct value_history_chunk *next;
792 struct value *values[VALUE_HISTORY_CHUNK];
793 };
794
795 /* Chain of chunks now in use. */
796
797 static struct value_history_chunk *value_history_chain;
798
799 static int value_history_count; /* Abs number of last entry stored. */
800
801 \f
802 /* List of all value objects currently allocated
803 (except for those released by calls to release_value)
804 This is so they can be freed after each command. */
805
806 static struct value *all_values;
807
808 /* Allocate a lazy value for type TYPE. Its actual content is
809 "lazily" allocated too: the content field of the return value is
810 NULL; it will be allocated when it is fetched from the target. */
811
812 struct value *
813 allocate_value_lazy (struct type *type)
814 {
815 struct value *val;
816
817 /* Call check_typedef on our type to make sure that, if TYPE
818 is a TYPE_CODE_TYPEDEF, its length is set to the length
819 of the target type instead of zero. However, we do not
820 replace the typedef type by the target type, because we want
821 to keep the typedef in order to be able to set the VAL's type
822 description correctly. */
823 check_typedef (type);
824
825 val = (struct value *) xzalloc (sizeof (struct value));
826 val->contents = NULL;
827 val->next = all_values;
828 all_values = val;
829 val->type = type;
830 val->enclosing_type = type;
831 VALUE_LVAL (val) = not_lval;
832 val->location.address = 0;
833 VALUE_FRAME_ID (val) = null_frame_id;
834 val->offset = 0;
835 val->bitpos = 0;
836 val->bitsize = 0;
837 VALUE_REGNUM (val) = -1;
838 val->lazy = 1;
839 val->optimized_out = 0;
840 val->embedded_offset = 0;
841 val->pointed_to_offset = 0;
842 val->modifiable = 1;
843 val->initialized = 1; /* Default to initialized. */
844
845 /* Values start out on the all_values chain. */
846 val->reference_count = 1;
847
848 return val;
849 }
850
851 /* Allocate the contents of VAL if it has not been allocated yet. */
852
853 static void
854 allocate_value_contents (struct value *val)
855 {
856 if (!val->contents)
857 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
858 }
859
860 /* Allocate a value and its contents for type TYPE. */
861
862 struct value *
863 allocate_value (struct type *type)
864 {
865 struct value *val = allocate_value_lazy (type);
866
867 allocate_value_contents (val);
868 val->lazy = 0;
869 return val;
870 }
871
872 /* Allocate a value that has the correct length
873 for COUNT repetitions of type TYPE. */
874
875 struct value *
876 allocate_repeat_value (struct type *type, int count)
877 {
878 int low_bound = current_language->string_lower_bound; /* ??? */
879 /* FIXME-type-allocation: need a way to free this type when we are
880 done with it. */
881 struct type *array_type
882 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
883
884 return allocate_value (array_type);
885 }
886
887 struct value *
888 allocate_computed_value (struct type *type,
889 const struct lval_funcs *funcs,
890 void *closure)
891 {
892 struct value *v = allocate_value_lazy (type);
893
894 VALUE_LVAL (v) = lval_computed;
895 v->location.computed.funcs = funcs;
896 v->location.computed.closure = closure;
897
898 return v;
899 }
900
901 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
902
903 struct value *
904 allocate_optimized_out_value (struct type *type)
905 {
906 struct value *retval = allocate_value_lazy (type);
907
908 set_value_optimized_out (retval, 1);
909 set_value_lazy (retval, 0);
910 return retval;
911 }
912
913 /* Accessor methods. */
914
915 struct value *
916 value_next (struct value *value)
917 {
918 return value->next;
919 }
920
921 struct type *
922 value_type (const struct value *value)
923 {
924 return value->type;
925 }
926 void
927 deprecated_set_value_type (struct value *value, struct type *type)
928 {
929 value->type = type;
930 }
931
932 int
933 value_offset (const struct value *value)
934 {
935 return value->offset;
936 }
937 void
938 set_value_offset (struct value *value, int offset)
939 {
940 value->offset = offset;
941 }
942
943 int
944 value_bitpos (const struct value *value)
945 {
946 return value->bitpos;
947 }
948 void
949 set_value_bitpos (struct value *value, int bit)
950 {
951 value->bitpos = bit;
952 }
953
954 int
955 value_bitsize (const struct value *value)
956 {
957 return value->bitsize;
958 }
959 void
960 set_value_bitsize (struct value *value, int bit)
961 {
962 value->bitsize = bit;
963 }
964
965 struct value *
966 value_parent (struct value *value)
967 {
968 return value->parent;
969 }
970
971 /* See value.h. */
972
973 void
974 set_value_parent (struct value *value, struct value *parent)
975 {
976 struct value *old = value->parent;
977
978 value->parent = parent;
979 if (parent != NULL)
980 value_incref (parent);
981 value_free (old);
982 }
983
984 gdb_byte *
985 value_contents_raw (struct value *value)
986 {
987 allocate_value_contents (value);
988 return value->contents + value->embedded_offset;
989 }
990
991 gdb_byte *
992 value_contents_all_raw (struct value *value)
993 {
994 allocate_value_contents (value);
995 return value->contents;
996 }
997
998 struct type *
999 value_enclosing_type (struct value *value)
1000 {
1001 return value->enclosing_type;
1002 }
1003
1004 /* Look at value.h for description. */
1005
1006 struct type *
1007 value_actual_type (struct value *value, int resolve_simple_types,
1008 int *real_type_found)
1009 {
1010 struct value_print_options opts;
1011 struct type *result;
1012
1013 get_user_print_options (&opts);
1014
1015 if (real_type_found)
1016 *real_type_found = 0;
1017 result = value_type (value);
1018 if (opts.objectprint)
1019 {
1020 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1021 fetch its rtti type. */
1022 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1023 || TYPE_CODE (result) == TYPE_CODE_REF)
1024 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1025 == TYPE_CODE_STRUCT)
1026 {
1027 struct type *real_type;
1028
1029 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1030 if (real_type)
1031 {
1032 if (real_type_found)
1033 *real_type_found = 1;
1034 result = real_type;
1035 }
1036 }
1037 else if (resolve_simple_types)
1038 {
1039 if (real_type_found)
1040 *real_type_found = 1;
1041 result = value_enclosing_type (value);
1042 }
1043 }
1044
1045 return result;
1046 }
1047
1048 void
1049 error_value_optimized_out (void)
1050 {
1051 error (_("value has been optimized out"));
1052 }
1053
1054 static void
1055 require_not_optimized_out (const struct value *value)
1056 {
1057 if (value->optimized_out)
1058 {
1059 if (value->lval == lval_register)
1060 error (_("register has not been saved in frame"));
1061 else
1062 error_value_optimized_out ();
1063 }
1064 }
1065
1066 static void
1067 require_available (const struct value *value)
1068 {
1069 if (!VEC_empty (range_s, value->unavailable))
1070 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1071 }
1072
1073 const gdb_byte *
1074 value_contents_for_printing (struct value *value)
1075 {
1076 if (value->lazy)
1077 value_fetch_lazy (value);
1078 return value->contents;
1079 }
1080
1081 const gdb_byte *
1082 value_contents_for_printing_const (const struct value *value)
1083 {
1084 gdb_assert (!value->lazy);
1085 return value->contents;
1086 }
1087
1088 const gdb_byte *
1089 value_contents_all (struct value *value)
1090 {
1091 const gdb_byte *result = value_contents_for_printing (value);
1092 require_not_optimized_out (value);
1093 require_available (value);
1094 return result;
1095 }
1096
1097 /* Copy LENGTH bytes of SRC value's (all) contents
1098 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1099 contents, starting at DST_OFFSET. If unavailable contents are
1100 being copied from SRC, the corresponding DST contents are marked
1101 unavailable accordingly. Neither DST nor SRC may be lazy
1102 values.
1103
1104 It is assumed the contents of DST in the [DST_OFFSET,
1105 DST_OFFSET+LENGTH) range are wholly available. */
1106
1107 void
1108 value_contents_copy_raw (struct value *dst, int dst_offset,
1109 struct value *src, int src_offset, int length)
1110 {
1111 range_s *r;
1112 int i;
1113 int src_bit_offset, dst_bit_offset, bit_length;
1114
1115 /* A lazy DST would make that this copy operation useless, since as
1116 soon as DST's contents were un-lazied (by a later value_contents
1117 call, say), the contents would be overwritten. A lazy SRC would
1118 mean we'd be copying garbage. */
1119 gdb_assert (!dst->lazy && !src->lazy);
1120
1121 /* The overwritten DST range gets unavailability ORed in, not
1122 replaced. Make sure to remember to implement replacing if it
1123 turns out actually necessary. */
1124 gdb_assert (value_bytes_available (dst, dst_offset, length));
1125
1126 /* Copy the data. */
1127 memcpy (value_contents_all_raw (dst) + dst_offset,
1128 value_contents_all_raw (src) + src_offset,
1129 length);
1130
1131 /* Copy the meta-data, adjusted. */
1132 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1133 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1134 bit_length = length * TARGET_CHAR_BIT;
1135 for (i = 0; VEC_iterate (range_s, src->unavailable, i, r); i++)
1136 {
1137 ULONGEST h, l;
1138
1139 l = max (r->offset, src_bit_offset);
1140 h = min (r->offset + r->length, src_bit_offset + bit_length);
1141
1142 if (l < h)
1143 mark_value_bits_unavailable (dst,
1144 dst_bit_offset + (l - src_bit_offset),
1145 h - l);
1146 }
1147 }
1148
1149 /* Copy LENGTH bytes of SRC value's (all) contents
1150 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1151 (all) contents, starting at DST_OFFSET. If unavailable contents
1152 are being copied from SRC, the corresponding DST contents are
1153 marked unavailable accordingly. DST must not be lazy. If SRC is
1154 lazy, it will be fetched now. If SRC is not valid (is optimized
1155 out), an error is thrown.
1156
1157 It is assumed the contents of DST in the [DST_OFFSET,
1158 DST_OFFSET+LENGTH) range are wholly available. */
1159
1160 void
1161 value_contents_copy (struct value *dst, int dst_offset,
1162 struct value *src, int src_offset, int length)
1163 {
1164 require_not_optimized_out (src);
1165
1166 if (src->lazy)
1167 value_fetch_lazy (src);
1168
1169 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1170 }
1171
1172 int
1173 value_lazy (struct value *value)
1174 {
1175 return value->lazy;
1176 }
1177
1178 void
1179 set_value_lazy (struct value *value, int val)
1180 {
1181 value->lazy = val;
1182 }
1183
1184 int
1185 value_stack (struct value *value)
1186 {
1187 return value->stack;
1188 }
1189
1190 void
1191 set_value_stack (struct value *value, int val)
1192 {
1193 value->stack = val;
1194 }
1195
1196 const gdb_byte *
1197 value_contents (struct value *value)
1198 {
1199 const gdb_byte *result = value_contents_writeable (value);
1200 require_not_optimized_out (value);
1201 require_available (value);
1202 return result;
1203 }
1204
1205 gdb_byte *
1206 value_contents_writeable (struct value *value)
1207 {
1208 if (value->lazy)
1209 value_fetch_lazy (value);
1210 return value_contents_raw (value);
1211 }
1212
1213 int
1214 value_optimized_out (struct value *value)
1215 {
1216 /* We can only know if a value is optimized out once we have tried to
1217 fetch it. */
1218 if (!value->optimized_out && value->lazy)
1219 value_fetch_lazy (value);
1220
1221 return value->optimized_out;
1222 }
1223
1224 int
1225 value_optimized_out_const (const struct value *value)
1226 {
1227 return value->optimized_out;
1228 }
1229
1230 void
1231 set_value_optimized_out (struct value *value, int val)
1232 {
1233 value->optimized_out = val;
1234 }
1235
1236 int
1237 value_entirely_optimized_out (const struct value *value)
1238 {
1239 if (!value->optimized_out)
1240 return 0;
1241 if (value->lval != lval_computed
1242 || !value->location.computed.funcs->check_any_valid)
1243 return 1;
1244 return !value->location.computed.funcs->check_any_valid (value);
1245 }
1246
1247 int
1248 value_bits_valid (const struct value *value, int offset, int length)
1249 {
1250 if (!value->optimized_out)
1251 return 1;
1252 if (value->lval != lval_computed
1253 || !value->location.computed.funcs->check_validity)
1254 return 0;
1255 return value->location.computed.funcs->check_validity (value, offset,
1256 length);
1257 }
1258
1259 int
1260 value_bits_synthetic_pointer (const struct value *value,
1261 int offset, int length)
1262 {
1263 if (value->lval != lval_computed
1264 || !value->location.computed.funcs->check_synthetic_pointer)
1265 return 0;
1266 return value->location.computed.funcs->check_synthetic_pointer (value,
1267 offset,
1268 length);
1269 }
1270
1271 int
1272 value_embedded_offset (struct value *value)
1273 {
1274 return value->embedded_offset;
1275 }
1276
1277 void
1278 set_value_embedded_offset (struct value *value, int val)
1279 {
1280 value->embedded_offset = val;
1281 }
1282
1283 int
1284 value_pointed_to_offset (struct value *value)
1285 {
1286 return value->pointed_to_offset;
1287 }
1288
1289 void
1290 set_value_pointed_to_offset (struct value *value, int val)
1291 {
1292 value->pointed_to_offset = val;
1293 }
1294
1295 const struct lval_funcs *
1296 value_computed_funcs (const struct value *v)
1297 {
1298 gdb_assert (value_lval_const (v) == lval_computed);
1299
1300 return v->location.computed.funcs;
1301 }
1302
1303 void *
1304 value_computed_closure (const struct value *v)
1305 {
1306 gdb_assert (v->lval == lval_computed);
1307
1308 return v->location.computed.closure;
1309 }
1310
1311 enum lval_type *
1312 deprecated_value_lval_hack (struct value *value)
1313 {
1314 return &value->lval;
1315 }
1316
1317 enum lval_type
1318 value_lval_const (const struct value *value)
1319 {
1320 return value->lval;
1321 }
1322
1323 CORE_ADDR
1324 value_address (const struct value *value)
1325 {
1326 if (value->lval == lval_internalvar
1327 || value->lval == lval_internalvar_component
1328 || value->lval == lval_xcallable)
1329 return 0;
1330 if (value->parent != NULL)
1331 return value_address (value->parent) + value->offset;
1332 else
1333 return value->location.address + value->offset;
1334 }
1335
1336 CORE_ADDR
1337 value_raw_address (struct value *value)
1338 {
1339 if (value->lval == lval_internalvar
1340 || value->lval == lval_internalvar_component
1341 || value->lval == lval_xcallable)
1342 return 0;
1343 return value->location.address;
1344 }
1345
1346 void
1347 set_value_address (struct value *value, CORE_ADDR addr)
1348 {
1349 gdb_assert (value->lval != lval_internalvar
1350 && value->lval != lval_internalvar_component
1351 && value->lval != lval_xcallable);
1352 value->location.address = addr;
1353 }
1354
1355 struct internalvar **
1356 deprecated_value_internalvar_hack (struct value *value)
1357 {
1358 return &value->location.internalvar;
1359 }
1360
1361 struct frame_id *
1362 deprecated_value_frame_id_hack (struct value *value)
1363 {
1364 return &value->frame_id;
1365 }
1366
1367 short *
1368 deprecated_value_regnum_hack (struct value *value)
1369 {
1370 return &value->regnum;
1371 }
1372
1373 int
1374 deprecated_value_modifiable (struct value *value)
1375 {
1376 return value->modifiable;
1377 }
1378 \f
1379 /* Return a mark in the value chain. All values allocated after the
1380 mark is obtained (except for those released) are subject to being freed
1381 if a subsequent value_free_to_mark is passed the mark. */
1382 struct value *
1383 value_mark (void)
1384 {
1385 return all_values;
1386 }
1387
1388 /* Take a reference to VAL. VAL will not be deallocated until all
1389 references are released. */
1390
1391 void
1392 value_incref (struct value *val)
1393 {
1394 val->reference_count++;
1395 }
1396
1397 /* Release a reference to VAL, which was acquired with value_incref.
1398 This function is also called to deallocate values from the value
1399 chain. */
1400
1401 void
1402 value_free (struct value *val)
1403 {
1404 if (val)
1405 {
1406 gdb_assert (val->reference_count > 0);
1407 val->reference_count--;
1408 if (val->reference_count > 0)
1409 return;
1410
1411 /* If there's an associated parent value, drop our reference to
1412 it. */
1413 if (val->parent != NULL)
1414 value_free (val->parent);
1415
1416 if (VALUE_LVAL (val) == lval_computed)
1417 {
1418 const struct lval_funcs *funcs = val->location.computed.funcs;
1419
1420 if (funcs->free_closure)
1421 funcs->free_closure (val);
1422 }
1423 else if (VALUE_LVAL (val) == lval_xcallable)
1424 free_xmethod_worker (val->location.xm_worker);
1425
1426 xfree (val->contents);
1427 VEC_free (range_s, val->unavailable);
1428 }
1429 xfree (val);
1430 }
1431
1432 /* Free all values allocated since MARK was obtained by value_mark
1433 (except for those released). */
1434 void
1435 value_free_to_mark (struct value *mark)
1436 {
1437 struct value *val;
1438 struct value *next;
1439
1440 for (val = all_values; val && val != mark; val = next)
1441 {
1442 next = val->next;
1443 val->released = 1;
1444 value_free (val);
1445 }
1446 all_values = val;
1447 }
1448
1449 /* Free all the values that have been allocated (except for those released).
1450 Call after each command, successful or not.
1451 In practice this is called before each command, which is sufficient. */
1452
1453 void
1454 free_all_values (void)
1455 {
1456 struct value *val;
1457 struct value *next;
1458
1459 for (val = all_values; val; val = next)
1460 {
1461 next = val->next;
1462 val->released = 1;
1463 value_free (val);
1464 }
1465
1466 all_values = 0;
1467 }
1468
1469 /* Frees all the elements in a chain of values. */
1470
1471 void
1472 free_value_chain (struct value *v)
1473 {
1474 struct value *next;
1475
1476 for (; v; v = next)
1477 {
1478 next = value_next (v);
1479 value_free (v);
1480 }
1481 }
1482
1483 /* Remove VAL from the chain all_values
1484 so it will not be freed automatically. */
1485
1486 void
1487 release_value (struct value *val)
1488 {
1489 struct value *v;
1490
1491 if (all_values == val)
1492 {
1493 all_values = val->next;
1494 val->next = NULL;
1495 val->released = 1;
1496 return;
1497 }
1498
1499 for (v = all_values; v; v = v->next)
1500 {
1501 if (v->next == val)
1502 {
1503 v->next = val->next;
1504 val->next = NULL;
1505 val->released = 1;
1506 break;
1507 }
1508 }
1509 }
1510
1511 /* If the value is not already released, release it.
1512 If the value is already released, increment its reference count.
1513 That is, this function ensures that the value is released from the
1514 value chain and that the caller owns a reference to it. */
1515
1516 void
1517 release_value_or_incref (struct value *val)
1518 {
1519 if (val->released)
1520 value_incref (val);
1521 else
1522 release_value (val);
1523 }
1524
1525 /* Release all values up to mark */
1526 struct value *
1527 value_release_to_mark (struct value *mark)
1528 {
1529 struct value *val;
1530 struct value *next;
1531
1532 for (val = next = all_values; next; next = next->next)
1533 {
1534 if (next->next == mark)
1535 {
1536 all_values = next->next;
1537 next->next = NULL;
1538 return val;
1539 }
1540 next->released = 1;
1541 }
1542 all_values = 0;
1543 return val;
1544 }
1545
1546 /* Return a copy of the value ARG.
1547 It contains the same contents, for same memory address,
1548 but it's a different block of storage. */
1549
1550 struct value *
1551 value_copy (struct value *arg)
1552 {
1553 struct type *encl_type = value_enclosing_type (arg);
1554 struct value *val;
1555
1556 if (value_lazy (arg))
1557 val = allocate_value_lazy (encl_type);
1558 else
1559 val = allocate_value (encl_type);
1560 val->type = arg->type;
1561 VALUE_LVAL (val) = VALUE_LVAL (arg);
1562 val->location = arg->location;
1563 val->offset = arg->offset;
1564 val->bitpos = arg->bitpos;
1565 val->bitsize = arg->bitsize;
1566 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1567 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1568 val->lazy = arg->lazy;
1569 val->optimized_out = arg->optimized_out;
1570 val->embedded_offset = value_embedded_offset (arg);
1571 val->pointed_to_offset = arg->pointed_to_offset;
1572 val->modifiable = arg->modifiable;
1573 if (!value_lazy (val))
1574 {
1575 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1576 TYPE_LENGTH (value_enclosing_type (arg)));
1577
1578 }
1579 val->unavailable = VEC_copy (range_s, arg->unavailable);
1580 set_value_parent (val, arg->parent);
1581 if (VALUE_LVAL (val) == lval_computed)
1582 {
1583 const struct lval_funcs *funcs = val->location.computed.funcs;
1584
1585 if (funcs->copy_closure)
1586 val->location.computed.closure = funcs->copy_closure (val);
1587 }
1588 return val;
1589 }
1590
1591 /* Return a version of ARG that is non-lvalue. */
1592
1593 struct value *
1594 value_non_lval (struct value *arg)
1595 {
1596 if (VALUE_LVAL (arg) != not_lval)
1597 {
1598 struct type *enc_type = value_enclosing_type (arg);
1599 struct value *val = allocate_value (enc_type);
1600
1601 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1602 TYPE_LENGTH (enc_type));
1603 val->type = arg->type;
1604 set_value_embedded_offset (val, value_embedded_offset (arg));
1605 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1606 return val;
1607 }
1608 return arg;
1609 }
1610
1611 void
1612 set_value_component_location (struct value *component,
1613 const struct value *whole)
1614 {
1615 gdb_assert (whole->lval != lval_xcallable);
1616
1617 if (whole->lval == lval_internalvar)
1618 VALUE_LVAL (component) = lval_internalvar_component;
1619 else
1620 VALUE_LVAL (component) = whole->lval;
1621
1622 component->location = whole->location;
1623 if (whole->lval == lval_computed)
1624 {
1625 const struct lval_funcs *funcs = whole->location.computed.funcs;
1626
1627 if (funcs->copy_closure)
1628 component->location.computed.closure = funcs->copy_closure (whole);
1629 }
1630 }
1631
1632 \f
1633 /* Access to the value history. */
1634
1635 /* Record a new value in the value history.
1636 Returns the absolute history index of the entry. */
1637
1638 int
1639 record_latest_value (struct value *val)
1640 {
1641 int i;
1642
1643 /* We don't want this value to have anything to do with the inferior anymore.
1644 In particular, "set $1 = 50" should not affect the variable from which
1645 the value was taken, and fast watchpoints should be able to assume that
1646 a value on the value history never changes. */
1647 if (value_lazy (val))
1648 value_fetch_lazy (val);
1649 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1650 from. This is a bit dubious, because then *&$1 does not just return $1
1651 but the current contents of that location. c'est la vie... */
1652 val->modifiable = 0;
1653
1654 /* The value may have already been released, in which case we're adding a
1655 new reference for its entry in the history. That is why we call
1656 release_value_or_incref here instead of release_value. */
1657 release_value_or_incref (val);
1658
1659 /* Here we treat value_history_count as origin-zero
1660 and applying to the value being stored now. */
1661
1662 i = value_history_count % VALUE_HISTORY_CHUNK;
1663 if (i == 0)
1664 {
1665 struct value_history_chunk *new
1666 = (struct value_history_chunk *)
1667
1668 xmalloc (sizeof (struct value_history_chunk));
1669 memset (new->values, 0, sizeof new->values);
1670 new->next = value_history_chain;
1671 value_history_chain = new;
1672 }
1673
1674 value_history_chain->values[i] = val;
1675
1676 /* Now we regard value_history_count as origin-one
1677 and applying to the value just stored. */
1678
1679 return ++value_history_count;
1680 }
1681
1682 /* Return a copy of the value in the history with sequence number NUM. */
1683
1684 struct value *
1685 access_value_history (int num)
1686 {
1687 struct value_history_chunk *chunk;
1688 int i;
1689 int absnum = num;
1690
1691 if (absnum <= 0)
1692 absnum += value_history_count;
1693
1694 if (absnum <= 0)
1695 {
1696 if (num == 0)
1697 error (_("The history is empty."));
1698 else if (num == 1)
1699 error (_("There is only one value in the history."));
1700 else
1701 error (_("History does not go back to $$%d."), -num);
1702 }
1703 if (absnum > value_history_count)
1704 error (_("History has not yet reached $%d."), absnum);
1705
1706 absnum--;
1707
1708 /* Now absnum is always absolute and origin zero. */
1709
1710 chunk = value_history_chain;
1711 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1712 - absnum / VALUE_HISTORY_CHUNK;
1713 i > 0; i--)
1714 chunk = chunk->next;
1715
1716 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1717 }
1718
1719 static void
1720 show_values (char *num_exp, int from_tty)
1721 {
1722 int i;
1723 struct value *val;
1724 static int num = 1;
1725
1726 if (num_exp)
1727 {
1728 /* "show values +" should print from the stored position.
1729 "show values <exp>" should print around value number <exp>. */
1730 if (num_exp[0] != '+' || num_exp[1] != '\0')
1731 num = parse_and_eval_long (num_exp) - 5;
1732 }
1733 else
1734 {
1735 /* "show values" means print the last 10 values. */
1736 num = value_history_count - 9;
1737 }
1738
1739 if (num <= 0)
1740 num = 1;
1741
1742 for (i = num; i < num + 10 && i <= value_history_count; i++)
1743 {
1744 struct value_print_options opts;
1745
1746 val = access_value_history (i);
1747 printf_filtered (("$%d = "), i);
1748 get_user_print_options (&opts);
1749 value_print (val, gdb_stdout, &opts);
1750 printf_filtered (("\n"));
1751 }
1752
1753 /* The next "show values +" should start after what we just printed. */
1754 num += 10;
1755
1756 /* Hitting just return after this command should do the same thing as
1757 "show values +". If num_exp is null, this is unnecessary, since
1758 "show values +" is not useful after "show values". */
1759 if (from_tty && num_exp)
1760 {
1761 num_exp[0] = '+';
1762 num_exp[1] = '\0';
1763 }
1764 }
1765 \f
1766 /* Internal variables. These are variables within the debugger
1767 that hold values assigned by debugger commands.
1768 The user refers to them with a '$' prefix
1769 that does not appear in the variable names stored internally. */
1770
1771 struct internalvar
1772 {
1773 struct internalvar *next;
1774 char *name;
1775
1776 /* We support various different kinds of content of an internal variable.
1777 enum internalvar_kind specifies the kind, and union internalvar_data
1778 provides the data associated with this particular kind. */
1779
1780 enum internalvar_kind
1781 {
1782 /* The internal variable is empty. */
1783 INTERNALVAR_VOID,
1784
1785 /* The value of the internal variable is provided directly as
1786 a GDB value object. */
1787 INTERNALVAR_VALUE,
1788
1789 /* A fresh value is computed via a call-back routine on every
1790 access to the internal variable. */
1791 INTERNALVAR_MAKE_VALUE,
1792
1793 /* The internal variable holds a GDB internal convenience function. */
1794 INTERNALVAR_FUNCTION,
1795
1796 /* The variable holds an integer value. */
1797 INTERNALVAR_INTEGER,
1798
1799 /* The variable holds a GDB-provided string. */
1800 INTERNALVAR_STRING,
1801
1802 } kind;
1803
1804 union internalvar_data
1805 {
1806 /* A value object used with INTERNALVAR_VALUE. */
1807 struct value *value;
1808
1809 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1810 struct
1811 {
1812 /* The functions to call. */
1813 const struct internalvar_funcs *functions;
1814
1815 /* The function's user-data. */
1816 void *data;
1817 } make_value;
1818
1819 /* The internal function used with INTERNALVAR_FUNCTION. */
1820 struct
1821 {
1822 struct internal_function *function;
1823 /* True if this is the canonical name for the function. */
1824 int canonical;
1825 } fn;
1826
1827 /* An integer value used with INTERNALVAR_INTEGER. */
1828 struct
1829 {
1830 /* If type is non-NULL, it will be used as the type to generate
1831 a value for this internal variable. If type is NULL, a default
1832 integer type for the architecture is used. */
1833 struct type *type;
1834 LONGEST val;
1835 } integer;
1836
1837 /* A string value used with INTERNALVAR_STRING. */
1838 char *string;
1839 } u;
1840 };
1841
1842 static struct internalvar *internalvars;
1843
1844 /* If the variable does not already exist create it and give it the
1845 value given. If no value is given then the default is zero. */
1846 static void
1847 init_if_undefined_command (char* args, int from_tty)
1848 {
1849 struct internalvar* intvar;
1850
1851 /* Parse the expression - this is taken from set_command(). */
1852 struct expression *expr = parse_expression (args);
1853 register struct cleanup *old_chain =
1854 make_cleanup (free_current_contents, &expr);
1855
1856 /* Validate the expression.
1857 Was the expression an assignment?
1858 Or even an expression at all? */
1859 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1860 error (_("Init-if-undefined requires an assignment expression."));
1861
1862 /* Extract the variable from the parsed expression.
1863 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1864 if (expr->elts[1].opcode != OP_INTERNALVAR)
1865 error (_("The first parameter to init-if-undefined "
1866 "should be a GDB variable."));
1867 intvar = expr->elts[2].internalvar;
1868
1869 /* Only evaluate the expression if the lvalue is void.
1870 This may still fail if the expresssion is invalid. */
1871 if (intvar->kind == INTERNALVAR_VOID)
1872 evaluate_expression (expr);
1873
1874 do_cleanups (old_chain);
1875 }
1876
1877
1878 /* Look up an internal variable with name NAME. NAME should not
1879 normally include a dollar sign.
1880
1881 If the specified internal variable does not exist,
1882 the return value is NULL. */
1883
1884 struct internalvar *
1885 lookup_only_internalvar (const char *name)
1886 {
1887 struct internalvar *var;
1888
1889 for (var = internalvars; var; var = var->next)
1890 if (strcmp (var->name, name) == 0)
1891 return var;
1892
1893 return NULL;
1894 }
1895
1896 /* Complete NAME by comparing it to the names of internal variables.
1897 Returns a vector of newly allocated strings, or NULL if no matches
1898 were found. */
1899
1900 VEC (char_ptr) *
1901 complete_internalvar (const char *name)
1902 {
1903 VEC (char_ptr) *result = NULL;
1904 struct internalvar *var;
1905 int len;
1906
1907 len = strlen (name);
1908
1909 for (var = internalvars; var; var = var->next)
1910 if (strncmp (var->name, name, len) == 0)
1911 {
1912 char *r = xstrdup (var->name);
1913
1914 VEC_safe_push (char_ptr, result, r);
1915 }
1916
1917 return result;
1918 }
1919
1920 /* Create an internal variable with name NAME and with a void value.
1921 NAME should not normally include a dollar sign. */
1922
1923 struct internalvar *
1924 create_internalvar (const char *name)
1925 {
1926 struct internalvar *var;
1927
1928 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
1929 var->name = concat (name, (char *)NULL);
1930 var->kind = INTERNALVAR_VOID;
1931 var->next = internalvars;
1932 internalvars = var;
1933 return var;
1934 }
1935
1936 /* Create an internal variable with name NAME and register FUN as the
1937 function that value_of_internalvar uses to create a value whenever
1938 this variable is referenced. NAME should not normally include a
1939 dollar sign. DATA is passed uninterpreted to FUN when it is
1940 called. CLEANUP, if not NULL, is called when the internal variable
1941 is destroyed. It is passed DATA as its only argument. */
1942
1943 struct internalvar *
1944 create_internalvar_type_lazy (const char *name,
1945 const struct internalvar_funcs *funcs,
1946 void *data)
1947 {
1948 struct internalvar *var = create_internalvar (name);
1949
1950 var->kind = INTERNALVAR_MAKE_VALUE;
1951 var->u.make_value.functions = funcs;
1952 var->u.make_value.data = data;
1953 return var;
1954 }
1955
1956 /* See documentation in value.h. */
1957
1958 int
1959 compile_internalvar_to_ax (struct internalvar *var,
1960 struct agent_expr *expr,
1961 struct axs_value *value)
1962 {
1963 if (var->kind != INTERNALVAR_MAKE_VALUE
1964 || var->u.make_value.functions->compile_to_ax == NULL)
1965 return 0;
1966
1967 var->u.make_value.functions->compile_to_ax (var, expr, value,
1968 var->u.make_value.data);
1969 return 1;
1970 }
1971
1972 /* Look up an internal variable with name NAME. NAME should not
1973 normally include a dollar sign.
1974
1975 If the specified internal variable does not exist,
1976 one is created, with a void value. */
1977
1978 struct internalvar *
1979 lookup_internalvar (const char *name)
1980 {
1981 struct internalvar *var;
1982
1983 var = lookup_only_internalvar (name);
1984 if (var)
1985 return var;
1986
1987 return create_internalvar (name);
1988 }
1989
1990 /* Return current value of internal variable VAR. For variables that
1991 are not inherently typed, use a value type appropriate for GDBARCH. */
1992
1993 struct value *
1994 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
1995 {
1996 struct value *val;
1997 struct trace_state_variable *tsv;
1998
1999 /* If there is a trace state variable of the same name, assume that
2000 is what we really want to see. */
2001 tsv = find_trace_state_variable (var->name);
2002 if (tsv)
2003 {
2004 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2005 &(tsv->value));
2006 if (tsv->value_known)
2007 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2008 tsv->value);
2009 else
2010 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2011 return val;
2012 }
2013
2014 switch (var->kind)
2015 {
2016 case INTERNALVAR_VOID:
2017 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2018 break;
2019
2020 case INTERNALVAR_FUNCTION:
2021 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2022 break;
2023
2024 case INTERNALVAR_INTEGER:
2025 if (!var->u.integer.type)
2026 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2027 var->u.integer.val);
2028 else
2029 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2030 break;
2031
2032 case INTERNALVAR_STRING:
2033 val = value_cstring (var->u.string, strlen (var->u.string),
2034 builtin_type (gdbarch)->builtin_char);
2035 break;
2036
2037 case INTERNALVAR_VALUE:
2038 val = value_copy (var->u.value);
2039 if (value_lazy (val))
2040 value_fetch_lazy (val);
2041 break;
2042
2043 case INTERNALVAR_MAKE_VALUE:
2044 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2045 var->u.make_value.data);
2046 break;
2047
2048 default:
2049 internal_error (__FILE__, __LINE__, _("bad kind"));
2050 }
2051
2052 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2053 on this value go back to affect the original internal variable.
2054
2055 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2056 no underlying modifyable state in the internal variable.
2057
2058 Likewise, if the variable's value is a computed lvalue, we want
2059 references to it to produce another computed lvalue, where
2060 references and assignments actually operate through the
2061 computed value's functions.
2062
2063 This means that internal variables with computed values
2064 behave a little differently from other internal variables:
2065 assignments to them don't just replace the previous value
2066 altogether. At the moment, this seems like the behavior we
2067 want. */
2068
2069 if (var->kind != INTERNALVAR_MAKE_VALUE
2070 && val->lval != lval_computed)
2071 {
2072 VALUE_LVAL (val) = lval_internalvar;
2073 VALUE_INTERNALVAR (val) = var;
2074 }
2075
2076 return val;
2077 }
2078
2079 int
2080 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2081 {
2082 if (var->kind == INTERNALVAR_INTEGER)
2083 {
2084 *result = var->u.integer.val;
2085 return 1;
2086 }
2087
2088 if (var->kind == INTERNALVAR_VALUE)
2089 {
2090 struct type *type = check_typedef (value_type (var->u.value));
2091
2092 if (TYPE_CODE (type) == TYPE_CODE_INT)
2093 {
2094 *result = value_as_long (var->u.value);
2095 return 1;
2096 }
2097 }
2098
2099 return 0;
2100 }
2101
2102 static int
2103 get_internalvar_function (struct internalvar *var,
2104 struct internal_function **result)
2105 {
2106 switch (var->kind)
2107 {
2108 case INTERNALVAR_FUNCTION:
2109 *result = var->u.fn.function;
2110 return 1;
2111
2112 default:
2113 return 0;
2114 }
2115 }
2116
2117 void
2118 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2119 int bitsize, struct value *newval)
2120 {
2121 gdb_byte *addr;
2122
2123 switch (var->kind)
2124 {
2125 case INTERNALVAR_VALUE:
2126 addr = value_contents_writeable (var->u.value);
2127
2128 if (bitsize)
2129 modify_field (value_type (var->u.value), addr + offset,
2130 value_as_long (newval), bitpos, bitsize);
2131 else
2132 memcpy (addr + offset, value_contents (newval),
2133 TYPE_LENGTH (value_type (newval)));
2134 break;
2135
2136 default:
2137 /* We can never get a component of any other kind. */
2138 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2139 }
2140 }
2141
2142 void
2143 set_internalvar (struct internalvar *var, struct value *val)
2144 {
2145 enum internalvar_kind new_kind;
2146 union internalvar_data new_data = { 0 };
2147
2148 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2149 error (_("Cannot overwrite convenience function %s"), var->name);
2150
2151 /* Prepare new contents. */
2152 switch (TYPE_CODE (check_typedef (value_type (val))))
2153 {
2154 case TYPE_CODE_VOID:
2155 new_kind = INTERNALVAR_VOID;
2156 break;
2157
2158 case TYPE_CODE_INTERNAL_FUNCTION:
2159 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2160 new_kind = INTERNALVAR_FUNCTION;
2161 get_internalvar_function (VALUE_INTERNALVAR (val),
2162 &new_data.fn.function);
2163 /* Copies created here are never canonical. */
2164 break;
2165
2166 default:
2167 new_kind = INTERNALVAR_VALUE;
2168 new_data.value = value_copy (val);
2169 new_data.value->modifiable = 1;
2170
2171 /* Force the value to be fetched from the target now, to avoid problems
2172 later when this internalvar is referenced and the target is gone or
2173 has changed. */
2174 if (value_lazy (new_data.value))
2175 value_fetch_lazy (new_data.value);
2176
2177 /* Release the value from the value chain to prevent it from being
2178 deleted by free_all_values. From here on this function should not
2179 call error () until new_data is installed into the var->u to avoid
2180 leaking memory. */
2181 release_value (new_data.value);
2182 break;
2183 }
2184
2185 /* Clean up old contents. */
2186 clear_internalvar (var);
2187
2188 /* Switch over. */
2189 var->kind = new_kind;
2190 var->u = new_data;
2191 /* End code which must not call error(). */
2192 }
2193
2194 void
2195 set_internalvar_integer (struct internalvar *var, LONGEST l)
2196 {
2197 /* Clean up old contents. */
2198 clear_internalvar (var);
2199
2200 var->kind = INTERNALVAR_INTEGER;
2201 var->u.integer.type = NULL;
2202 var->u.integer.val = l;
2203 }
2204
2205 void
2206 set_internalvar_string (struct internalvar *var, const char *string)
2207 {
2208 /* Clean up old contents. */
2209 clear_internalvar (var);
2210
2211 var->kind = INTERNALVAR_STRING;
2212 var->u.string = xstrdup (string);
2213 }
2214
2215 static void
2216 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2217 {
2218 /* Clean up old contents. */
2219 clear_internalvar (var);
2220
2221 var->kind = INTERNALVAR_FUNCTION;
2222 var->u.fn.function = f;
2223 var->u.fn.canonical = 1;
2224 /* Variables installed here are always the canonical version. */
2225 }
2226
2227 void
2228 clear_internalvar (struct internalvar *var)
2229 {
2230 /* Clean up old contents. */
2231 switch (var->kind)
2232 {
2233 case INTERNALVAR_VALUE:
2234 value_free (var->u.value);
2235 break;
2236
2237 case INTERNALVAR_STRING:
2238 xfree (var->u.string);
2239 break;
2240
2241 case INTERNALVAR_MAKE_VALUE:
2242 if (var->u.make_value.functions->destroy != NULL)
2243 var->u.make_value.functions->destroy (var->u.make_value.data);
2244 break;
2245
2246 default:
2247 break;
2248 }
2249
2250 /* Reset to void kind. */
2251 var->kind = INTERNALVAR_VOID;
2252 }
2253
2254 char *
2255 internalvar_name (struct internalvar *var)
2256 {
2257 return var->name;
2258 }
2259
2260 static struct internal_function *
2261 create_internal_function (const char *name,
2262 internal_function_fn handler, void *cookie)
2263 {
2264 struct internal_function *ifn = XNEW (struct internal_function);
2265
2266 ifn->name = xstrdup (name);
2267 ifn->handler = handler;
2268 ifn->cookie = cookie;
2269 return ifn;
2270 }
2271
2272 char *
2273 value_internal_function_name (struct value *val)
2274 {
2275 struct internal_function *ifn;
2276 int result;
2277
2278 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2279 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2280 gdb_assert (result);
2281
2282 return ifn->name;
2283 }
2284
2285 struct value *
2286 call_internal_function (struct gdbarch *gdbarch,
2287 const struct language_defn *language,
2288 struct value *func, int argc, struct value **argv)
2289 {
2290 struct internal_function *ifn;
2291 int result;
2292
2293 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2294 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2295 gdb_assert (result);
2296
2297 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2298 }
2299
2300 /* The 'function' command. This does nothing -- it is just a
2301 placeholder to let "help function NAME" work. This is also used as
2302 the implementation of the sub-command that is created when
2303 registering an internal function. */
2304 static void
2305 function_command (char *command, int from_tty)
2306 {
2307 /* Do nothing. */
2308 }
2309
2310 /* Clean up if an internal function's command is destroyed. */
2311 static void
2312 function_destroyer (struct cmd_list_element *self, void *ignore)
2313 {
2314 xfree ((char *) self->name);
2315 xfree (self->doc);
2316 }
2317
2318 /* Add a new internal function. NAME is the name of the function; DOC
2319 is a documentation string describing the function. HANDLER is
2320 called when the function is invoked. COOKIE is an arbitrary
2321 pointer which is passed to HANDLER and is intended for "user
2322 data". */
2323 void
2324 add_internal_function (const char *name, const char *doc,
2325 internal_function_fn handler, void *cookie)
2326 {
2327 struct cmd_list_element *cmd;
2328 struct internal_function *ifn;
2329 struct internalvar *var = lookup_internalvar (name);
2330
2331 ifn = create_internal_function (name, handler, cookie);
2332 set_internalvar_function (var, ifn);
2333
2334 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2335 &functionlist);
2336 cmd->destroyer = function_destroyer;
2337 }
2338
2339 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2340 prevent cycles / duplicates. */
2341
2342 void
2343 preserve_one_value (struct value *value, struct objfile *objfile,
2344 htab_t copied_types)
2345 {
2346 if (TYPE_OBJFILE (value->type) == objfile)
2347 value->type = copy_type_recursive (objfile, value->type, copied_types);
2348
2349 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2350 value->enclosing_type = copy_type_recursive (objfile,
2351 value->enclosing_type,
2352 copied_types);
2353 }
2354
2355 /* Likewise for internal variable VAR. */
2356
2357 static void
2358 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2359 htab_t copied_types)
2360 {
2361 switch (var->kind)
2362 {
2363 case INTERNALVAR_INTEGER:
2364 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2365 var->u.integer.type
2366 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2367 break;
2368
2369 case INTERNALVAR_VALUE:
2370 preserve_one_value (var->u.value, objfile, copied_types);
2371 break;
2372 }
2373 }
2374
2375 /* Update the internal variables and value history when OBJFILE is
2376 discarded; we must copy the types out of the objfile. New global types
2377 will be created for every convenience variable which currently points to
2378 this objfile's types, and the convenience variables will be adjusted to
2379 use the new global types. */
2380
2381 void
2382 preserve_values (struct objfile *objfile)
2383 {
2384 htab_t copied_types;
2385 struct value_history_chunk *cur;
2386 struct internalvar *var;
2387 int i;
2388
2389 /* Create the hash table. We allocate on the objfile's obstack, since
2390 it is soon to be deleted. */
2391 copied_types = create_copied_types_hash (objfile);
2392
2393 for (cur = value_history_chain; cur; cur = cur->next)
2394 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2395 if (cur->values[i])
2396 preserve_one_value (cur->values[i], objfile, copied_types);
2397
2398 for (var = internalvars; var; var = var->next)
2399 preserve_one_internalvar (var, objfile, copied_types);
2400
2401 preserve_ext_lang_values (objfile, copied_types);
2402
2403 htab_delete (copied_types);
2404 }
2405
2406 static void
2407 show_convenience (char *ignore, int from_tty)
2408 {
2409 struct gdbarch *gdbarch = get_current_arch ();
2410 struct internalvar *var;
2411 int varseen = 0;
2412 struct value_print_options opts;
2413
2414 get_user_print_options (&opts);
2415 for (var = internalvars; var; var = var->next)
2416 {
2417 volatile struct gdb_exception ex;
2418
2419 if (!varseen)
2420 {
2421 varseen = 1;
2422 }
2423 printf_filtered (("$%s = "), var->name);
2424
2425 TRY_CATCH (ex, RETURN_MASK_ERROR)
2426 {
2427 struct value *val;
2428
2429 val = value_of_internalvar (gdbarch, var);
2430 value_print (val, gdb_stdout, &opts);
2431 }
2432 if (ex.reason < 0)
2433 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2434 printf_filtered (("\n"));
2435 }
2436 if (!varseen)
2437 {
2438 /* This text does not mention convenience functions on purpose.
2439 The user can't create them except via Python, and if Python support
2440 is installed this message will never be printed ($_streq will
2441 exist). */
2442 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2443 "Convenience variables have "
2444 "names starting with \"$\";\n"
2445 "use \"set\" as in \"set "
2446 "$foo = 5\" to define them.\n"));
2447 }
2448 }
2449 \f
2450 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2451
2452 struct value *
2453 value_of_xmethod (struct xmethod_worker *worker)
2454 {
2455 if (worker->value == NULL)
2456 {
2457 struct value *v;
2458
2459 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2460 v->lval = lval_xcallable;
2461 v->location.xm_worker = worker;
2462 v->modifiable = 0;
2463 worker->value = v;
2464 }
2465
2466 return worker->value;
2467 }
2468
2469 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2470
2471 struct value *
2472 call_xmethod (struct value *method, int argc, struct value **argv)
2473 {
2474 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2475 && method->lval == lval_xcallable && argc > 0);
2476
2477 return invoke_xmethod (method->location.xm_worker,
2478 argv[0], argv + 1, argc - 1);
2479 }
2480 \f
2481 /* Extract a value as a C number (either long or double).
2482 Knows how to convert fixed values to double, or
2483 floating values to long.
2484 Does not deallocate the value. */
2485
2486 LONGEST
2487 value_as_long (struct value *val)
2488 {
2489 /* This coerces arrays and functions, which is necessary (e.g.
2490 in disassemble_command). It also dereferences references, which
2491 I suspect is the most logical thing to do. */
2492 val = coerce_array (val);
2493 return unpack_long (value_type (val), value_contents (val));
2494 }
2495
2496 DOUBLEST
2497 value_as_double (struct value *val)
2498 {
2499 DOUBLEST foo;
2500 int inv;
2501
2502 foo = unpack_double (value_type (val), value_contents (val), &inv);
2503 if (inv)
2504 error (_("Invalid floating value found in program."));
2505 return foo;
2506 }
2507
2508 /* Extract a value as a C pointer. Does not deallocate the value.
2509 Note that val's type may not actually be a pointer; value_as_long
2510 handles all the cases. */
2511 CORE_ADDR
2512 value_as_address (struct value *val)
2513 {
2514 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2515
2516 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2517 whether we want this to be true eventually. */
2518 #if 0
2519 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2520 non-address (e.g. argument to "signal", "info break", etc.), or
2521 for pointers to char, in which the low bits *are* significant. */
2522 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2523 #else
2524
2525 /* There are several targets (IA-64, PowerPC, and others) which
2526 don't represent pointers to functions as simply the address of
2527 the function's entry point. For example, on the IA-64, a
2528 function pointer points to a two-word descriptor, generated by
2529 the linker, which contains the function's entry point, and the
2530 value the IA-64 "global pointer" register should have --- to
2531 support position-independent code. The linker generates
2532 descriptors only for those functions whose addresses are taken.
2533
2534 On such targets, it's difficult for GDB to convert an arbitrary
2535 function address into a function pointer; it has to either find
2536 an existing descriptor for that function, or call malloc and
2537 build its own. On some targets, it is impossible for GDB to
2538 build a descriptor at all: the descriptor must contain a jump
2539 instruction; data memory cannot be executed; and code memory
2540 cannot be modified.
2541
2542 Upon entry to this function, if VAL is a value of type `function'
2543 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2544 value_address (val) is the address of the function. This is what
2545 you'll get if you evaluate an expression like `main'. The call
2546 to COERCE_ARRAY below actually does all the usual unary
2547 conversions, which includes converting values of type `function'
2548 to `pointer to function'. This is the challenging conversion
2549 discussed above. Then, `unpack_long' will convert that pointer
2550 back into an address.
2551
2552 So, suppose the user types `disassemble foo' on an architecture
2553 with a strange function pointer representation, on which GDB
2554 cannot build its own descriptors, and suppose further that `foo'
2555 has no linker-built descriptor. The address->pointer conversion
2556 will signal an error and prevent the command from running, even
2557 though the next step would have been to convert the pointer
2558 directly back into the same address.
2559
2560 The following shortcut avoids this whole mess. If VAL is a
2561 function, just return its address directly. */
2562 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2563 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2564 return value_address (val);
2565
2566 val = coerce_array (val);
2567
2568 /* Some architectures (e.g. Harvard), map instruction and data
2569 addresses onto a single large unified address space. For
2570 instance: An architecture may consider a large integer in the
2571 range 0x10000000 .. 0x1000ffff to already represent a data
2572 addresses (hence not need a pointer to address conversion) while
2573 a small integer would still need to be converted integer to
2574 pointer to address. Just assume such architectures handle all
2575 integer conversions in a single function. */
2576
2577 /* JimB writes:
2578
2579 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2580 must admonish GDB hackers to make sure its behavior matches the
2581 compiler's, whenever possible.
2582
2583 In general, I think GDB should evaluate expressions the same way
2584 the compiler does. When the user copies an expression out of
2585 their source code and hands it to a `print' command, they should
2586 get the same value the compiler would have computed. Any
2587 deviation from this rule can cause major confusion and annoyance,
2588 and needs to be justified carefully. In other words, GDB doesn't
2589 really have the freedom to do these conversions in clever and
2590 useful ways.
2591
2592 AndrewC pointed out that users aren't complaining about how GDB
2593 casts integers to pointers; they are complaining that they can't
2594 take an address from a disassembly listing and give it to `x/i'.
2595 This is certainly important.
2596
2597 Adding an architecture method like integer_to_address() certainly
2598 makes it possible for GDB to "get it right" in all circumstances
2599 --- the target has complete control over how things get done, so
2600 people can Do The Right Thing for their target without breaking
2601 anyone else. The standard doesn't specify how integers get
2602 converted to pointers; usually, the ABI doesn't either, but
2603 ABI-specific code is a more reasonable place to handle it. */
2604
2605 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2606 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2607 && gdbarch_integer_to_address_p (gdbarch))
2608 return gdbarch_integer_to_address (gdbarch, value_type (val),
2609 value_contents (val));
2610
2611 return unpack_long (value_type (val), value_contents (val));
2612 #endif
2613 }
2614 \f
2615 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2616 as a long, or as a double, assuming the raw data is described
2617 by type TYPE. Knows how to convert different sizes of values
2618 and can convert between fixed and floating point. We don't assume
2619 any alignment for the raw data. Return value is in host byte order.
2620
2621 If you want functions and arrays to be coerced to pointers, and
2622 references to be dereferenced, call value_as_long() instead.
2623
2624 C++: It is assumed that the front-end has taken care of
2625 all matters concerning pointers to members. A pointer
2626 to member which reaches here is considered to be equivalent
2627 to an INT (or some size). After all, it is only an offset. */
2628
2629 LONGEST
2630 unpack_long (struct type *type, const gdb_byte *valaddr)
2631 {
2632 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2633 enum type_code code = TYPE_CODE (type);
2634 int len = TYPE_LENGTH (type);
2635 int nosign = TYPE_UNSIGNED (type);
2636
2637 switch (code)
2638 {
2639 case TYPE_CODE_TYPEDEF:
2640 return unpack_long (check_typedef (type), valaddr);
2641 case TYPE_CODE_ENUM:
2642 case TYPE_CODE_FLAGS:
2643 case TYPE_CODE_BOOL:
2644 case TYPE_CODE_INT:
2645 case TYPE_CODE_CHAR:
2646 case TYPE_CODE_RANGE:
2647 case TYPE_CODE_MEMBERPTR:
2648 if (nosign)
2649 return extract_unsigned_integer (valaddr, len, byte_order);
2650 else
2651 return extract_signed_integer (valaddr, len, byte_order);
2652
2653 case TYPE_CODE_FLT:
2654 return extract_typed_floating (valaddr, type);
2655
2656 case TYPE_CODE_DECFLOAT:
2657 /* libdecnumber has a function to convert from decimal to integer, but
2658 it doesn't work when the decimal number has a fractional part. */
2659 return decimal_to_doublest (valaddr, len, byte_order);
2660
2661 case TYPE_CODE_PTR:
2662 case TYPE_CODE_REF:
2663 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2664 whether we want this to be true eventually. */
2665 return extract_typed_address (valaddr, type);
2666
2667 default:
2668 error (_("Value can't be converted to integer."));
2669 }
2670 return 0; /* Placate lint. */
2671 }
2672
2673 /* Return a double value from the specified type and address.
2674 INVP points to an int which is set to 0 for valid value,
2675 1 for invalid value (bad float format). In either case,
2676 the returned double is OK to use. Argument is in target
2677 format, result is in host format. */
2678
2679 DOUBLEST
2680 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2681 {
2682 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2683 enum type_code code;
2684 int len;
2685 int nosign;
2686
2687 *invp = 0; /* Assume valid. */
2688 CHECK_TYPEDEF (type);
2689 code = TYPE_CODE (type);
2690 len = TYPE_LENGTH (type);
2691 nosign = TYPE_UNSIGNED (type);
2692 if (code == TYPE_CODE_FLT)
2693 {
2694 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2695 floating-point value was valid (using the macro
2696 INVALID_FLOAT). That test/macro have been removed.
2697
2698 It turns out that only the VAX defined this macro and then
2699 only in a non-portable way. Fixing the portability problem
2700 wouldn't help since the VAX floating-point code is also badly
2701 bit-rotten. The target needs to add definitions for the
2702 methods gdbarch_float_format and gdbarch_double_format - these
2703 exactly describe the target floating-point format. The
2704 problem here is that the corresponding floatformat_vax_f and
2705 floatformat_vax_d values these methods should be set to are
2706 also not defined either. Oops!
2707
2708 Hopefully someone will add both the missing floatformat
2709 definitions and the new cases for floatformat_is_valid (). */
2710
2711 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2712 {
2713 *invp = 1;
2714 return 0.0;
2715 }
2716
2717 return extract_typed_floating (valaddr, type);
2718 }
2719 else if (code == TYPE_CODE_DECFLOAT)
2720 return decimal_to_doublest (valaddr, len, byte_order);
2721 else if (nosign)
2722 {
2723 /* Unsigned -- be sure we compensate for signed LONGEST. */
2724 return (ULONGEST) unpack_long (type, valaddr);
2725 }
2726 else
2727 {
2728 /* Signed -- we are OK with unpack_long. */
2729 return unpack_long (type, valaddr);
2730 }
2731 }
2732
2733 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2734 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2735 We don't assume any alignment for the raw data. Return value is in
2736 host byte order.
2737
2738 If you want functions and arrays to be coerced to pointers, and
2739 references to be dereferenced, call value_as_address() instead.
2740
2741 C++: It is assumed that the front-end has taken care of
2742 all matters concerning pointers to members. A pointer
2743 to member which reaches here is considered to be equivalent
2744 to an INT (or some size). After all, it is only an offset. */
2745
2746 CORE_ADDR
2747 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2748 {
2749 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2750 whether we want this to be true eventually. */
2751 return unpack_long (type, valaddr);
2752 }
2753
2754 \f
2755 /* Get the value of the FIELDNO'th field (which must be static) of
2756 TYPE. */
2757
2758 struct value *
2759 value_static_field (struct type *type, int fieldno)
2760 {
2761 struct value *retval;
2762
2763 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2764 {
2765 case FIELD_LOC_KIND_PHYSADDR:
2766 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2767 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2768 break;
2769 case FIELD_LOC_KIND_PHYSNAME:
2770 {
2771 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2772 /* TYPE_FIELD_NAME (type, fieldno); */
2773 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2774
2775 if (sym == NULL)
2776 {
2777 /* With some compilers, e.g. HP aCC, static data members are
2778 reported as non-debuggable symbols. */
2779 struct bound_minimal_symbol msym
2780 = lookup_minimal_symbol (phys_name, NULL, NULL);
2781
2782 if (!msym.minsym)
2783 return allocate_optimized_out_value (type);
2784 else
2785 {
2786 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2787 BMSYMBOL_VALUE_ADDRESS (msym));
2788 }
2789 }
2790 else
2791 retval = value_of_variable (sym, NULL);
2792 break;
2793 }
2794 default:
2795 gdb_assert_not_reached ("unexpected field location kind");
2796 }
2797
2798 return retval;
2799 }
2800
2801 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2802 You have to be careful here, since the size of the data area for the value
2803 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2804 than the old enclosing type, you have to allocate more space for the
2805 data. */
2806
2807 void
2808 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2809 {
2810 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2811 val->contents =
2812 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2813
2814 val->enclosing_type = new_encl_type;
2815 }
2816
2817 /* Given a value ARG1 (offset by OFFSET bytes)
2818 of a struct or union type ARG_TYPE,
2819 extract and return the value of one of its (non-static) fields.
2820 FIELDNO says which field. */
2821
2822 struct value *
2823 value_primitive_field (struct value *arg1, int offset,
2824 int fieldno, struct type *arg_type)
2825 {
2826 struct value *v;
2827 struct type *type;
2828
2829 CHECK_TYPEDEF (arg_type);
2830 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2831
2832 /* Call check_typedef on our type to make sure that, if TYPE
2833 is a TYPE_CODE_TYPEDEF, its length is set to the length
2834 of the target type instead of zero. However, we do not
2835 replace the typedef type by the target type, because we want
2836 to keep the typedef in order to be able to print the type
2837 description correctly. */
2838 check_typedef (type);
2839
2840 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2841 {
2842 /* Handle packed fields.
2843
2844 Create a new value for the bitfield, with bitpos and bitsize
2845 set. If possible, arrange offset and bitpos so that we can
2846 do a single aligned read of the size of the containing type.
2847 Otherwise, adjust offset to the byte containing the first
2848 bit. Assume that the address, offset, and embedded offset
2849 are sufficiently aligned. */
2850
2851 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2852 int container_bitsize = TYPE_LENGTH (type) * 8;
2853
2854 if (arg1->optimized_out)
2855 v = allocate_optimized_out_value (type);
2856 else
2857 {
2858 v = allocate_value_lazy (type);
2859 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2860 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2861 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2862 v->bitpos = bitpos % container_bitsize;
2863 else
2864 v->bitpos = bitpos % 8;
2865 v->offset = (value_embedded_offset (arg1)
2866 + offset
2867 + (bitpos - v->bitpos) / 8);
2868 set_value_parent (v, arg1);
2869 if (!value_lazy (arg1))
2870 value_fetch_lazy (v);
2871 }
2872 }
2873 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2874 {
2875 /* This field is actually a base subobject, so preserve the
2876 entire object's contents for later references to virtual
2877 bases, etc. */
2878 int boffset;
2879
2880 /* Lazy register values with offsets are not supported. */
2881 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2882 value_fetch_lazy (arg1);
2883
2884 /* The optimized_out flag is only set correctly once a lazy value is
2885 loaded, having just loaded some lazy values we should check the
2886 optimized out case now. */
2887 if (arg1->optimized_out)
2888 v = allocate_optimized_out_value (type);
2889 else
2890 {
2891 /* We special case virtual inheritance here because this
2892 requires access to the contents, which we would rather avoid
2893 for references to ordinary fields of unavailable values. */
2894 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2895 boffset = baseclass_offset (arg_type, fieldno,
2896 value_contents (arg1),
2897 value_embedded_offset (arg1),
2898 value_address (arg1),
2899 arg1);
2900 else
2901 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2902
2903 if (value_lazy (arg1))
2904 v = allocate_value_lazy (value_enclosing_type (arg1));
2905 else
2906 {
2907 v = allocate_value (value_enclosing_type (arg1));
2908 value_contents_copy_raw (v, 0, arg1, 0,
2909 TYPE_LENGTH (value_enclosing_type (arg1)));
2910 }
2911 v->type = type;
2912 v->offset = value_offset (arg1);
2913 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2914 }
2915 }
2916 else
2917 {
2918 /* Plain old data member */
2919 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2920
2921 /* Lazy register values with offsets are not supported. */
2922 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2923 value_fetch_lazy (arg1);
2924
2925 /* The optimized_out flag is only set correctly once a lazy value is
2926 loaded, having just loaded some lazy values we should check for
2927 the optimized out case now. */
2928 if (arg1->optimized_out)
2929 v = allocate_optimized_out_value (type);
2930 else if (value_lazy (arg1))
2931 v = allocate_value_lazy (type);
2932 else
2933 {
2934 v = allocate_value (type);
2935 value_contents_copy_raw (v, value_embedded_offset (v),
2936 arg1, value_embedded_offset (arg1) + offset,
2937 TYPE_LENGTH (type));
2938 }
2939 v->offset = (value_offset (arg1) + offset
2940 + value_embedded_offset (arg1));
2941 }
2942 set_value_component_location (v, arg1);
2943 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
2944 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
2945 return v;
2946 }
2947
2948 /* Given a value ARG1 of a struct or union type,
2949 extract and return the value of one of its (non-static) fields.
2950 FIELDNO says which field. */
2951
2952 struct value *
2953 value_field (struct value *arg1, int fieldno)
2954 {
2955 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
2956 }
2957
2958 /* Return a non-virtual function as a value.
2959 F is the list of member functions which contains the desired method.
2960 J is an index into F which provides the desired method.
2961
2962 We only use the symbol for its address, so be happy with either a
2963 full symbol or a minimal symbol. */
2964
2965 struct value *
2966 value_fn_field (struct value **arg1p, struct fn_field *f,
2967 int j, struct type *type,
2968 int offset)
2969 {
2970 struct value *v;
2971 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
2972 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
2973 struct symbol *sym;
2974 struct bound_minimal_symbol msym;
2975
2976 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
2977 if (sym != NULL)
2978 {
2979 memset (&msym, 0, sizeof (msym));
2980 }
2981 else
2982 {
2983 gdb_assert (sym == NULL);
2984 msym = lookup_bound_minimal_symbol (physname);
2985 if (msym.minsym == NULL)
2986 return NULL;
2987 }
2988
2989 v = allocate_value (ftype);
2990 if (sym)
2991 {
2992 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
2993 }
2994 else
2995 {
2996 /* The minimal symbol might point to a function descriptor;
2997 resolve it to the actual code address instead. */
2998 struct objfile *objfile = msym.objfile;
2999 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3000
3001 set_value_address (v,
3002 gdbarch_convert_from_func_ptr_addr
3003 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3004 }
3005
3006 if (arg1p)
3007 {
3008 if (type != value_type (*arg1p))
3009 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3010 value_addr (*arg1p)));
3011
3012 /* Move the `this' pointer according to the offset.
3013 VALUE_OFFSET (*arg1p) += offset; */
3014 }
3015
3016 return v;
3017 }
3018
3019 \f
3020
3021 /* Helper function for both unpack_value_bits_as_long and
3022 unpack_bits_as_long. See those functions for more details on the
3023 interface; the only difference is that this function accepts either
3024 a NULL or a non-NULL ORIGINAL_VALUE. */
3025
3026 static int
3027 unpack_value_bits_as_long_1 (struct type *field_type, const gdb_byte *valaddr,
3028 int embedded_offset, int bitpos, int bitsize,
3029 const struct value *original_value,
3030 LONGEST *result)
3031 {
3032 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3033 ULONGEST val;
3034 ULONGEST valmask;
3035 int lsbcount;
3036 int bytes_read;
3037 int read_offset;
3038
3039 /* Read the minimum number of bytes required; there may not be
3040 enough bytes to read an entire ULONGEST. */
3041 CHECK_TYPEDEF (field_type);
3042 if (bitsize)
3043 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3044 else
3045 bytes_read = TYPE_LENGTH (field_type);
3046
3047 read_offset = bitpos / 8;
3048
3049 if (original_value != NULL
3050 && !value_bits_available (original_value, embedded_offset + bitpos,
3051 bitsize))
3052 return 0;
3053
3054 val = extract_unsigned_integer (valaddr + embedded_offset + read_offset,
3055 bytes_read, byte_order);
3056
3057 /* Extract bits. See comment above. */
3058
3059 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3060 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3061 else
3062 lsbcount = (bitpos % 8);
3063 val >>= lsbcount;
3064
3065 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3066 If the field is signed, and is negative, then sign extend. */
3067
3068 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3069 {
3070 valmask = (((ULONGEST) 1) << bitsize) - 1;
3071 val &= valmask;
3072 if (!TYPE_UNSIGNED (field_type))
3073 {
3074 if (val & (valmask ^ (valmask >> 1)))
3075 {
3076 val |= ~valmask;
3077 }
3078 }
3079 }
3080
3081 *result = val;
3082 return 1;
3083 }
3084
3085 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3086 VALADDR + EMBEDDED_OFFSET, and store the result in *RESULT.
3087 VALADDR points to the contents of ORIGINAL_VALUE, which must not be
3088 NULL. The bitfield starts at BITPOS bits and contains BITSIZE
3089 bits.
3090
3091 Returns false if the value contents are unavailable, otherwise
3092 returns true, indicating a valid value has been stored in *RESULT.
3093
3094 Extracting bits depends on endianness of the machine. Compute the
3095 number of least significant bits to discard. For big endian machines,
3096 we compute the total number of bits in the anonymous object, subtract
3097 off the bit count from the MSB of the object to the MSB of the
3098 bitfield, then the size of the bitfield, which leaves the LSB discard
3099 count. For little endian machines, the discard count is simply the
3100 number of bits from the LSB of the anonymous object to the LSB of the
3101 bitfield.
3102
3103 If the field is signed, we also do sign extension. */
3104
3105 int
3106 unpack_value_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3107 int embedded_offset, int bitpos, int bitsize,
3108 const struct value *original_value,
3109 LONGEST *result)
3110 {
3111 gdb_assert (original_value != NULL);
3112
3113 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3114 bitpos, bitsize, original_value, result);
3115
3116 }
3117
3118 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3119 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3120 ORIGINAL_VALUE. See unpack_value_bits_as_long for more
3121 details. */
3122
3123 static int
3124 unpack_value_field_as_long_1 (struct type *type, const gdb_byte *valaddr,
3125 int embedded_offset, int fieldno,
3126 const struct value *val, LONGEST *result)
3127 {
3128 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3129 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3130 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3131
3132 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3133 bitpos, bitsize, val,
3134 result);
3135 }
3136
3137 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3138 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3139 ORIGINAL_VALUE, which must not be NULL. See
3140 unpack_value_bits_as_long for more details. */
3141
3142 int
3143 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3144 int embedded_offset, int fieldno,
3145 const struct value *val, LONGEST *result)
3146 {
3147 gdb_assert (val != NULL);
3148
3149 return unpack_value_field_as_long_1 (type, valaddr, embedded_offset,
3150 fieldno, val, result);
3151 }
3152
3153 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3154 object at VALADDR. See unpack_value_bits_as_long for more details.
3155 This function differs from unpack_value_field_as_long in that it
3156 operates without a struct value object. */
3157
3158 LONGEST
3159 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3160 {
3161 LONGEST result;
3162
3163 unpack_value_field_as_long_1 (type, valaddr, 0, fieldno, NULL, &result);
3164 return result;
3165 }
3166
3167 /* Return a new value with type TYPE, which is FIELDNO field of the
3168 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3169 of VAL. If the VAL's contents required to extract the bitfield
3170 from are unavailable, the new value is correspondingly marked as
3171 unavailable. */
3172
3173 struct value *
3174 value_field_bitfield (struct type *type, int fieldno,
3175 const gdb_byte *valaddr,
3176 int embedded_offset, const struct value *val)
3177 {
3178 LONGEST l;
3179
3180 if (!unpack_value_field_as_long (type, valaddr, embedded_offset, fieldno,
3181 val, &l))
3182 {
3183 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3184 struct value *retval = allocate_value (field_type);
3185 mark_value_bytes_unavailable (retval, 0, TYPE_LENGTH (field_type));
3186 return retval;
3187 }
3188 else
3189 {
3190 return value_from_longest (TYPE_FIELD_TYPE (type, fieldno), l);
3191 }
3192 }
3193
3194 /* Modify the value of a bitfield. ADDR points to a block of memory in
3195 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3196 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3197 indicate which bits (in target bit order) comprise the bitfield.
3198 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3199 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3200
3201 void
3202 modify_field (struct type *type, gdb_byte *addr,
3203 LONGEST fieldval, int bitpos, int bitsize)
3204 {
3205 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3206 ULONGEST oword;
3207 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3208 int bytesize;
3209
3210 /* Normalize BITPOS. */
3211 addr += bitpos / 8;
3212 bitpos %= 8;
3213
3214 /* If a negative fieldval fits in the field in question, chop
3215 off the sign extension bits. */
3216 if ((~fieldval & ~(mask >> 1)) == 0)
3217 fieldval &= mask;
3218
3219 /* Warn if value is too big to fit in the field in question. */
3220 if (0 != (fieldval & ~mask))
3221 {
3222 /* FIXME: would like to include fieldval in the message, but
3223 we don't have a sprintf_longest. */
3224 warning (_("Value does not fit in %d bits."), bitsize);
3225
3226 /* Truncate it, otherwise adjoining fields may be corrupted. */
3227 fieldval &= mask;
3228 }
3229
3230 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3231 false valgrind reports. */
3232
3233 bytesize = (bitpos + bitsize + 7) / 8;
3234 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3235
3236 /* Shifting for bit field depends on endianness of the target machine. */
3237 if (gdbarch_bits_big_endian (get_type_arch (type)))
3238 bitpos = bytesize * 8 - bitpos - bitsize;
3239
3240 oword &= ~(mask << bitpos);
3241 oword |= fieldval << bitpos;
3242
3243 store_unsigned_integer (addr, bytesize, byte_order, oword);
3244 }
3245 \f
3246 /* Pack NUM into BUF using a target format of TYPE. */
3247
3248 void
3249 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3250 {
3251 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3252 int len;
3253
3254 type = check_typedef (type);
3255 len = TYPE_LENGTH (type);
3256
3257 switch (TYPE_CODE (type))
3258 {
3259 case TYPE_CODE_INT:
3260 case TYPE_CODE_CHAR:
3261 case TYPE_CODE_ENUM:
3262 case TYPE_CODE_FLAGS:
3263 case TYPE_CODE_BOOL:
3264 case TYPE_CODE_RANGE:
3265 case TYPE_CODE_MEMBERPTR:
3266 store_signed_integer (buf, len, byte_order, num);
3267 break;
3268
3269 case TYPE_CODE_REF:
3270 case TYPE_CODE_PTR:
3271 store_typed_address (buf, type, (CORE_ADDR) num);
3272 break;
3273
3274 default:
3275 error (_("Unexpected type (%d) encountered for integer constant."),
3276 TYPE_CODE (type));
3277 }
3278 }
3279
3280
3281 /* Pack NUM into BUF using a target format of TYPE. */
3282
3283 static void
3284 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3285 {
3286 int len;
3287 enum bfd_endian byte_order;
3288
3289 type = check_typedef (type);
3290 len = TYPE_LENGTH (type);
3291 byte_order = gdbarch_byte_order (get_type_arch (type));
3292
3293 switch (TYPE_CODE (type))
3294 {
3295 case TYPE_CODE_INT:
3296 case TYPE_CODE_CHAR:
3297 case TYPE_CODE_ENUM:
3298 case TYPE_CODE_FLAGS:
3299 case TYPE_CODE_BOOL:
3300 case TYPE_CODE_RANGE:
3301 case TYPE_CODE_MEMBERPTR:
3302 store_unsigned_integer (buf, len, byte_order, num);
3303 break;
3304
3305 case TYPE_CODE_REF:
3306 case TYPE_CODE_PTR:
3307 store_typed_address (buf, type, (CORE_ADDR) num);
3308 break;
3309
3310 default:
3311 error (_("Unexpected type (%d) encountered "
3312 "for unsigned integer constant."),
3313 TYPE_CODE (type));
3314 }
3315 }
3316
3317
3318 /* Convert C numbers into newly allocated values. */
3319
3320 struct value *
3321 value_from_longest (struct type *type, LONGEST num)
3322 {
3323 struct value *val = allocate_value (type);
3324
3325 pack_long (value_contents_raw (val), type, num);
3326 return val;
3327 }
3328
3329
3330 /* Convert C unsigned numbers into newly allocated values. */
3331
3332 struct value *
3333 value_from_ulongest (struct type *type, ULONGEST num)
3334 {
3335 struct value *val = allocate_value (type);
3336
3337 pack_unsigned_long (value_contents_raw (val), type, num);
3338
3339 return val;
3340 }
3341
3342
3343 /* Create a value representing a pointer of type TYPE to the address
3344 ADDR. The type of the created value may differ from the passed
3345 type TYPE. Make sure to retrieve the returned values's new type
3346 after this call e.g. in case of an variable length array. */
3347
3348 struct value *
3349 value_from_pointer (struct type *type, CORE_ADDR addr)
3350 {
3351 struct type *resolved_type = resolve_dynamic_type (type, addr);
3352 struct value *val = allocate_value (resolved_type);
3353
3354 store_typed_address (value_contents_raw (val),
3355 check_typedef (resolved_type), addr);
3356 return val;
3357 }
3358
3359
3360 /* Create a value of type TYPE whose contents come from VALADDR, if it
3361 is non-null, and whose memory address (in the inferior) is
3362 ADDRESS. The type of the created value may differ from the passed
3363 type TYPE. Make sure to retrieve values new type after this call.
3364 Note that TYPE is not passed through resolve_dynamic_type; this is
3365 a special API intended for use only by Ada. */
3366
3367 struct value *
3368 value_from_contents_and_address_unresolved (struct type *type,
3369 const gdb_byte *valaddr,
3370 CORE_ADDR address)
3371 {
3372 struct value *v;
3373
3374 if (valaddr == NULL)
3375 v = allocate_value_lazy (type);
3376 else
3377 v = value_from_contents (type, valaddr);
3378 set_value_address (v, address);
3379 VALUE_LVAL (v) = lval_memory;
3380 return v;
3381 }
3382
3383 /* Create a value of type TYPE whose contents come from VALADDR, if it
3384 is non-null, and whose memory address (in the inferior) is
3385 ADDRESS. The type of the created value may differ from the passed
3386 type TYPE. Make sure to retrieve values new type after this call. */
3387
3388 struct value *
3389 value_from_contents_and_address (struct type *type,
3390 const gdb_byte *valaddr,
3391 CORE_ADDR address)
3392 {
3393 struct type *resolved_type = resolve_dynamic_type (type, address);
3394 struct value *v;
3395
3396 if (valaddr == NULL)
3397 v = allocate_value_lazy (resolved_type);
3398 else
3399 v = value_from_contents (resolved_type, valaddr);
3400 set_value_address (v, address);
3401 VALUE_LVAL (v) = lval_memory;
3402 return v;
3403 }
3404
3405 /* Create a value of type TYPE holding the contents CONTENTS.
3406 The new value is `not_lval'. */
3407
3408 struct value *
3409 value_from_contents (struct type *type, const gdb_byte *contents)
3410 {
3411 struct value *result;
3412
3413 result = allocate_value (type);
3414 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3415 return result;
3416 }
3417
3418 struct value *
3419 value_from_double (struct type *type, DOUBLEST num)
3420 {
3421 struct value *val = allocate_value (type);
3422 struct type *base_type = check_typedef (type);
3423 enum type_code code = TYPE_CODE (base_type);
3424
3425 if (code == TYPE_CODE_FLT)
3426 {
3427 store_typed_floating (value_contents_raw (val), base_type, num);
3428 }
3429 else
3430 error (_("Unexpected type encountered for floating constant."));
3431
3432 return val;
3433 }
3434
3435 struct value *
3436 value_from_decfloat (struct type *type, const gdb_byte *dec)
3437 {
3438 struct value *val = allocate_value (type);
3439
3440 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3441 return val;
3442 }
3443
3444 /* Extract a value from the history file. Input will be of the form
3445 $digits or $$digits. See block comment above 'write_dollar_variable'
3446 for details. */
3447
3448 struct value *
3449 value_from_history_ref (char *h, char **endp)
3450 {
3451 int index, len;
3452
3453 if (h[0] == '$')
3454 len = 1;
3455 else
3456 return NULL;
3457
3458 if (h[1] == '$')
3459 len = 2;
3460
3461 /* Find length of numeral string. */
3462 for (; isdigit (h[len]); len++)
3463 ;
3464
3465 /* Make sure numeral string is not part of an identifier. */
3466 if (h[len] == '_' || isalpha (h[len]))
3467 return NULL;
3468
3469 /* Now collect the index value. */
3470 if (h[1] == '$')
3471 {
3472 if (len == 2)
3473 {
3474 /* For some bizarre reason, "$$" is equivalent to "$$1",
3475 rather than to "$$0" as it ought to be! */
3476 index = -1;
3477 *endp += len;
3478 }
3479 else
3480 index = -strtol (&h[2], endp, 10);
3481 }
3482 else
3483 {
3484 if (len == 1)
3485 {
3486 /* "$" is equivalent to "$0". */
3487 index = 0;
3488 *endp += len;
3489 }
3490 else
3491 index = strtol (&h[1], endp, 10);
3492 }
3493
3494 return access_value_history (index);
3495 }
3496
3497 struct value *
3498 coerce_ref_if_computed (const struct value *arg)
3499 {
3500 const struct lval_funcs *funcs;
3501
3502 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3503 return NULL;
3504
3505 if (value_lval_const (arg) != lval_computed)
3506 return NULL;
3507
3508 funcs = value_computed_funcs (arg);
3509 if (funcs->coerce_ref == NULL)
3510 return NULL;
3511
3512 return funcs->coerce_ref (arg);
3513 }
3514
3515 /* Look at value.h for description. */
3516
3517 struct value *
3518 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3519 struct type *original_type,
3520 struct value *original_value)
3521 {
3522 /* Re-adjust type. */
3523 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3524
3525 /* Add embedding info. */
3526 set_value_enclosing_type (value, enc_type);
3527 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3528
3529 /* We may be pointing to an object of some derived type. */
3530 return value_full_object (value, NULL, 0, 0, 0);
3531 }
3532
3533 struct value *
3534 coerce_ref (struct value *arg)
3535 {
3536 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3537 struct value *retval;
3538 struct type *enc_type;
3539
3540 retval = coerce_ref_if_computed (arg);
3541 if (retval)
3542 return retval;
3543
3544 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3545 return arg;
3546
3547 enc_type = check_typedef (value_enclosing_type (arg));
3548 enc_type = TYPE_TARGET_TYPE (enc_type);
3549
3550 retval = value_at_lazy (enc_type,
3551 unpack_pointer (value_type (arg),
3552 value_contents (arg)));
3553 enc_type = value_type (retval);
3554 return readjust_indirect_value_type (retval, enc_type,
3555 value_type_arg_tmp, arg);
3556 }
3557
3558 struct value *
3559 coerce_array (struct value *arg)
3560 {
3561 struct type *type;
3562
3563 arg = coerce_ref (arg);
3564 type = check_typedef (value_type (arg));
3565
3566 switch (TYPE_CODE (type))
3567 {
3568 case TYPE_CODE_ARRAY:
3569 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3570 arg = value_coerce_array (arg);
3571 break;
3572 case TYPE_CODE_FUNC:
3573 arg = value_coerce_function (arg);
3574 break;
3575 }
3576 return arg;
3577 }
3578 \f
3579
3580 /* Return the return value convention that will be used for the
3581 specified type. */
3582
3583 enum return_value_convention
3584 struct_return_convention (struct gdbarch *gdbarch,
3585 struct value *function, struct type *value_type)
3586 {
3587 enum type_code code = TYPE_CODE (value_type);
3588
3589 if (code == TYPE_CODE_ERROR)
3590 error (_("Function return type unknown."));
3591
3592 /* Probe the architecture for the return-value convention. */
3593 return gdbarch_return_value (gdbarch, function, value_type,
3594 NULL, NULL, NULL);
3595 }
3596
3597 /* Return true if the function returning the specified type is using
3598 the convention of returning structures in memory (passing in the
3599 address as a hidden first parameter). */
3600
3601 int
3602 using_struct_return (struct gdbarch *gdbarch,
3603 struct value *function, struct type *value_type)
3604 {
3605 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3606 /* A void return value is never in memory. See also corresponding
3607 code in "print_return_value". */
3608 return 0;
3609
3610 return (struct_return_convention (gdbarch, function, value_type)
3611 != RETURN_VALUE_REGISTER_CONVENTION);
3612 }
3613
3614 /* Set the initialized field in a value struct. */
3615
3616 void
3617 set_value_initialized (struct value *val, int status)
3618 {
3619 val->initialized = status;
3620 }
3621
3622 /* Return the initialized field in a value struct. */
3623
3624 int
3625 value_initialized (struct value *val)
3626 {
3627 return val->initialized;
3628 }
3629
3630 /* Called only from the value_contents and value_contents_all()
3631 macros, if the current data for a variable needs to be loaded into
3632 value_contents(VAL). Fetches the data from the user's process, and
3633 clears the lazy flag to indicate that the data in the buffer is
3634 valid.
3635
3636 If the value is zero-length, we avoid calling read_memory, which
3637 would abort. We mark the value as fetched anyway -- all 0 bytes of
3638 it.
3639
3640 This function returns a value because it is used in the
3641 value_contents macro as part of an expression, where a void would
3642 not work. The value is ignored. */
3643
3644 int
3645 value_fetch_lazy (struct value *val)
3646 {
3647 gdb_assert (value_lazy (val));
3648 allocate_value_contents (val);
3649 if (value_bitsize (val))
3650 {
3651 /* To read a lazy bitfield, read the entire enclosing value. This
3652 prevents reading the same block of (possibly volatile) memory once
3653 per bitfield. It would be even better to read only the containing
3654 word, but we have no way to record that just specific bits of a
3655 value have been fetched. */
3656 struct type *type = check_typedef (value_type (val));
3657 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3658 struct value *parent = value_parent (val);
3659 LONGEST offset = value_offset (val);
3660 LONGEST num;
3661
3662 if (value_lazy (parent))
3663 value_fetch_lazy (parent);
3664
3665 if (!value_bits_valid (parent,
3666 TARGET_CHAR_BIT * offset + value_bitpos (val),
3667 value_bitsize (val)))
3668 set_value_optimized_out (val, 1);
3669 else if (!unpack_value_bits_as_long (value_type (val),
3670 value_contents_for_printing (parent),
3671 offset,
3672 value_bitpos (val),
3673 value_bitsize (val), parent, &num))
3674 mark_value_bytes_unavailable (val,
3675 value_embedded_offset (val),
3676 TYPE_LENGTH (type));
3677 else
3678 store_signed_integer (value_contents_raw (val), TYPE_LENGTH (type),
3679 byte_order, num);
3680 }
3681 else if (VALUE_LVAL (val) == lval_memory)
3682 {
3683 CORE_ADDR addr = value_address (val);
3684 struct type *type = check_typedef (value_enclosing_type (val));
3685
3686 if (TYPE_LENGTH (type))
3687 read_value_memory (val, 0, value_stack (val),
3688 addr, value_contents_all_raw (val),
3689 TYPE_LENGTH (type));
3690 }
3691 else if (VALUE_LVAL (val) == lval_register)
3692 {
3693 struct frame_info *frame;
3694 int regnum;
3695 struct type *type = check_typedef (value_type (val));
3696 struct value *new_val = val, *mark = value_mark ();
3697
3698 /* Offsets are not supported here; lazy register values must
3699 refer to the entire register. */
3700 gdb_assert (value_offset (val) == 0);
3701
3702 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3703 {
3704 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3705
3706 frame = frame_find_by_id (frame_id);
3707 regnum = VALUE_REGNUM (new_val);
3708
3709 gdb_assert (frame != NULL);
3710
3711 /* Convertible register routines are used for multi-register
3712 values and for interpretation in different types
3713 (e.g. float or int from a double register). Lazy
3714 register values should have the register's natural type,
3715 so they do not apply. */
3716 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3717 regnum, type));
3718
3719 new_val = get_frame_register_value (frame, regnum);
3720
3721 /* If we get another lazy lval_register value, it means the
3722 register is found by reading it from the next frame.
3723 get_frame_register_value should never return a value with
3724 the frame id pointing to FRAME. If it does, it means we
3725 either have two consecutive frames with the same frame id
3726 in the frame chain, or some code is trying to unwind
3727 behind get_prev_frame's back (e.g., a frame unwind
3728 sniffer trying to unwind), bypassing its validations. In
3729 any case, it should always be an internal error to end up
3730 in this situation. */
3731 if (VALUE_LVAL (new_val) == lval_register
3732 && value_lazy (new_val)
3733 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3734 internal_error (__FILE__, __LINE__,
3735 _("infinite loop while fetching a register"));
3736 }
3737
3738 /* If it's still lazy (for instance, a saved register on the
3739 stack), fetch it. */
3740 if (value_lazy (new_val))
3741 value_fetch_lazy (new_val);
3742
3743 /* If the register was not saved, mark it optimized out. */
3744 if (value_optimized_out (new_val))
3745 set_value_optimized_out (val, 1);
3746 else
3747 {
3748 set_value_lazy (val, 0);
3749 value_contents_copy (val, value_embedded_offset (val),
3750 new_val, value_embedded_offset (new_val),
3751 TYPE_LENGTH (type));
3752 }
3753
3754 if (frame_debug)
3755 {
3756 struct gdbarch *gdbarch;
3757 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3758 regnum = VALUE_REGNUM (val);
3759 gdbarch = get_frame_arch (frame);
3760
3761 fprintf_unfiltered (gdb_stdlog,
3762 "{ value_fetch_lazy "
3763 "(frame=%d,regnum=%d(%s),...) ",
3764 frame_relative_level (frame), regnum,
3765 user_reg_map_regnum_to_name (gdbarch, regnum));
3766
3767 fprintf_unfiltered (gdb_stdlog, "->");
3768 if (value_optimized_out (new_val))
3769 {
3770 fprintf_unfiltered (gdb_stdlog, " ");
3771 val_print_optimized_out (new_val, gdb_stdlog);
3772 }
3773 else
3774 {
3775 int i;
3776 const gdb_byte *buf = value_contents (new_val);
3777
3778 if (VALUE_LVAL (new_val) == lval_register)
3779 fprintf_unfiltered (gdb_stdlog, " register=%d",
3780 VALUE_REGNUM (new_val));
3781 else if (VALUE_LVAL (new_val) == lval_memory)
3782 fprintf_unfiltered (gdb_stdlog, " address=%s",
3783 paddress (gdbarch,
3784 value_address (new_val)));
3785 else
3786 fprintf_unfiltered (gdb_stdlog, " computed");
3787
3788 fprintf_unfiltered (gdb_stdlog, " bytes=");
3789 fprintf_unfiltered (gdb_stdlog, "[");
3790 for (i = 0; i < register_size (gdbarch, regnum); i++)
3791 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3792 fprintf_unfiltered (gdb_stdlog, "]");
3793 }
3794
3795 fprintf_unfiltered (gdb_stdlog, " }\n");
3796 }
3797
3798 /* Dispose of the intermediate values. This prevents
3799 watchpoints from trying to watch the saved frame pointer. */
3800 value_free_to_mark (mark);
3801 }
3802 else if (VALUE_LVAL (val) == lval_computed
3803 && value_computed_funcs (val)->read != NULL)
3804 value_computed_funcs (val)->read (val);
3805 /* Don't call value_optimized_out on val, doing so would result in a
3806 recursive call back to value_fetch_lazy, instead check the
3807 optimized_out flag directly. */
3808 else if (val->optimized_out)
3809 /* Keep it optimized out. */;
3810 else
3811 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3812
3813 set_value_lazy (val, 0);
3814 return 0;
3815 }
3816
3817 /* Implementation of the convenience function $_isvoid. */
3818
3819 static struct value *
3820 isvoid_internal_fn (struct gdbarch *gdbarch,
3821 const struct language_defn *language,
3822 void *cookie, int argc, struct value **argv)
3823 {
3824 int ret;
3825
3826 if (argc != 1)
3827 error (_("You must provide one argument for $_isvoid."));
3828
3829 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3830
3831 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3832 }
3833
3834 void
3835 _initialize_values (void)
3836 {
3837 add_cmd ("convenience", no_class, show_convenience, _("\
3838 Debugger convenience (\"$foo\") variables and functions.\n\
3839 Convenience variables are created when you assign them values;\n\
3840 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3841 \n\
3842 A few convenience variables are given values automatically:\n\
3843 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3844 \"$__\" holds the contents of the last address examined with \"x\"."
3845 #ifdef HAVE_PYTHON
3846 "\n\n\
3847 Convenience functions are defined via the Python API."
3848 #endif
3849 ), &showlist);
3850 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3851
3852 add_cmd ("values", no_set_class, show_values, _("\
3853 Elements of value history around item number IDX (or last ten)."),
3854 &showlist);
3855
3856 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3857 Initialize a convenience variable if necessary.\n\
3858 init-if-undefined VARIABLE = EXPRESSION\n\
3859 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3860 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3861 VARIABLE is already initialized."));
3862
3863 add_prefix_cmd ("function", no_class, function_command, _("\
3864 Placeholder command for showing help on convenience functions."),
3865 &functionlist, "function ", 0, &cmdlist);
3866
3867 add_internal_function ("_isvoid", _("\
3868 Check whether an expression is void.\n\
3869 Usage: $_isvoid (expression)\n\
3870 Return 1 if the expression is void, zero otherwise."),
3871 isvoid_internal_fn, NULL);
3872 }
This page took 0.122662 seconds and 5 git commands to generate.