[Linux] Optimize PID -> struct lwp_info lookup
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 int offset;
69
70 /* Length of the range. */
71 int length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (int offset1, int len1,
83 int offset2, int len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109 range_s what;
110 int i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. If lval == lval_memory, this is an offset to
239 the address. If lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the member
241 embedded_offset below. */
242 int offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 int bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 int bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in target addressable memory
295 units. The value_contents() macro takes `embedded_offset' into account,
296 so most GDB code continues to see the `type' portion of the value, just
297 as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in target addressable memory units from the full object
302 to the pointed-to object -- that is, the value `embedded_offset' would
303 have if we followed the pointer and fetched the complete object.
304 (I don't really see the point. Why not just determine the
305 run-time type when you indirect, and avoid the special case? The
306 contents don't matter until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 int embedded_offset;
313 int pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 /* See value.h. */
344
345 struct gdbarch *
346 get_value_arch (const struct value *value)
347 {
348 return get_type_arch (value_type (value));
349 }
350
351 int
352 value_bits_available (const struct value *value, int offset, int length)
353 {
354 gdb_assert (!value->lazy);
355
356 return !ranges_contain (value->unavailable, offset, length);
357 }
358
359 int
360 value_bytes_available (const struct value *value, int offset, int length)
361 {
362 return value_bits_available (value,
363 offset * TARGET_CHAR_BIT,
364 length * TARGET_CHAR_BIT);
365 }
366
367 int
368 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
369 {
370 gdb_assert (!value->lazy);
371
372 return ranges_contain (value->optimized_out, bit_offset, bit_length);
373 }
374
375 int
376 value_entirely_available (struct value *value)
377 {
378 /* We can only tell whether the whole value is available when we try
379 to read it. */
380 if (value->lazy)
381 value_fetch_lazy (value);
382
383 if (VEC_empty (range_s, value->unavailable))
384 return 1;
385 return 0;
386 }
387
388 /* Returns true if VALUE is entirely covered by RANGES. If the value
389 is lazy, it'll be read now. Note that RANGE is a pointer to
390 pointer because reading the value might change *RANGE. */
391
392 static int
393 value_entirely_covered_by_range_vector (struct value *value,
394 VEC(range_s) **ranges)
395 {
396 /* We can only tell whether the whole value is optimized out /
397 unavailable when we try to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (VEC_length (range_s, *ranges) == 1)
402 {
403 struct range *t = VEC_index (range_s, *ranges, 0);
404
405 if (t->offset == 0
406 && t->length == (TARGET_CHAR_BIT
407 * TYPE_LENGTH (value_enclosing_type (value))))
408 return 1;
409 }
410
411 return 0;
412 }
413
414 int
415 value_entirely_unavailable (struct value *value)
416 {
417 return value_entirely_covered_by_range_vector (value, &value->unavailable);
418 }
419
420 int
421 value_entirely_optimized_out (struct value *value)
422 {
423 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
424 }
425
426 /* Insert into the vector pointed to by VECTORP the bit range starting of
427 OFFSET bits, and extending for the next LENGTH bits. */
428
429 static void
430 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
431 {
432 range_s newr;
433 int i;
434
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
437
438 newr.offset = offset;
439 newr.length = length;
440
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
446
447 #1 - overlaps with previous
448
449 R
450 |-...-|
451 |---| |---| |------| ... |--|
452 0 1 2 N
453
454 I=1
455
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
460
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
463
464 #2 - contiguous with previous
465
466 R
467 |-...-|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=1
472
473 If there's no overlap with the previous range, as in:
474
475 #3 - not overlapping and not contiguous
476
477 R
478 |-...-|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 or if I is 0:
485
486 #4 - R is the range with lowest offset
487
488 R
489 |-...-|
490 |--| |---| |------| ... |--|
491 0 1 2 N
492
493 I=0
494
495 ... we just push the new range to I.
496
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
500
501 #5 - overlapping following ranges
502
503 R
504 |------------------------|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=0
509
510 or:
511
512 R
513 |-------|
514 |--| |---| |------| ... |--|
515 0 1 2 N
516
517 I=1
518
519 */
520
521 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
522 if (i > 0)
523 {
524 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
525
526 if (ranges_overlap (bef->offset, bef->length, offset, length))
527 {
528 /* #1 */
529 ULONGEST l = min (bef->offset, offset);
530 ULONGEST h = max (bef->offset + bef->length, offset + length);
531
532 bef->offset = l;
533 bef->length = h - l;
534 i--;
535 }
536 else if (offset == bef->offset + bef->length)
537 {
538 /* #2 */
539 bef->length += length;
540 i--;
541 }
542 else
543 {
544 /* #3 */
545 VEC_safe_insert (range_s, *vectorp, i, &newr);
546 }
547 }
548 else
549 {
550 /* #4 */
551 VEC_safe_insert (range_s, *vectorp, i, &newr);
552 }
553
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i + 1 < VEC_length (range_s, *vectorp))
557 {
558 struct range *t;
559 struct range *r;
560 int removed = 0;
561 int next = i + 1;
562
563 /* Get the range we just touched. */
564 t = VEC_index (range_s, *vectorp, i);
565 removed = 0;
566
567 i = next;
568 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
569 if (r->offset <= t->offset + t->length)
570 {
571 ULONGEST l, h;
572
573 l = min (t->offset, r->offset);
574 h = max (t->offset + t->length, r->offset + r->length);
575
576 t->offset = l;
577 t->length = h - l;
578
579 removed++;
580 }
581 else
582 {
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
586 break;
587 }
588
589 if (removed != 0)
590 VEC_block_remove (range_s, *vectorp, next, removed);
591 }
592 }
593
594 void
595 mark_value_bits_unavailable (struct value *value, int offset, int length)
596 {
597 insert_into_bit_range_vector (&value->unavailable, offset, length);
598 }
599
600 void
601 mark_value_bytes_unavailable (struct value *value, int offset, int length)
602 {
603 mark_value_bits_unavailable (value,
604 offset * TARGET_CHAR_BIT,
605 length * TARGET_CHAR_BIT);
606 }
607
608 /* Find the first range in RANGES that overlaps the range defined by
609 OFFSET and LENGTH, starting at element POS in the RANGES vector,
610 Returns the index into RANGES where such overlapping range was
611 found, or -1 if none was found. */
612
613 static int
614 find_first_range_overlap (VEC(range_s) *ranges, int pos,
615 int offset, int length)
616 {
617 range_s *r;
618 int i;
619
620 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
621 if (ranges_overlap (r->offset, r->length, offset, length))
622 return i;
623
624 return -1;
625 }
626
627 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
628 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
629 return non-zero.
630
631 It must always be the case that:
632 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
633
634 It is assumed that memory can be accessed from:
635 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
636 to:
637 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
638 / TARGET_CHAR_BIT) */
639 static int
640 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
641 const gdb_byte *ptr2, size_t offset2_bits,
642 size_t length_bits)
643 {
644 gdb_assert (offset1_bits % TARGET_CHAR_BIT
645 == offset2_bits % TARGET_CHAR_BIT);
646
647 if (offset1_bits % TARGET_CHAR_BIT != 0)
648 {
649 size_t bits;
650 gdb_byte mask, b1, b2;
651
652 /* The offset from the base pointers PTR1 and PTR2 is not a complete
653 number of bytes. A number of bits up to either the next exact
654 byte boundary, or LENGTH_BITS (which ever is sooner) will be
655 compared. */
656 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
657 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
658 mask = (1 << bits) - 1;
659
660 if (length_bits < bits)
661 {
662 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
663 bits = length_bits;
664 }
665
666 /* Now load the two bytes and mask off the bits we care about. */
667 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
668 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
669
670 if (b1 != b2)
671 return 1;
672
673 /* Now update the length and offsets to take account of the bits
674 we've just compared. */
675 length_bits -= bits;
676 offset1_bits += bits;
677 offset2_bits += bits;
678 }
679
680 if (length_bits % TARGET_CHAR_BIT != 0)
681 {
682 size_t bits;
683 size_t o1, o2;
684 gdb_byte mask, b1, b2;
685
686 /* The length is not an exact number of bytes. After the previous
687 IF.. block then the offsets are byte aligned, or the
688 length is zero (in which case this code is not reached). Compare
689 a number of bits at the end of the region, starting from an exact
690 byte boundary. */
691 bits = length_bits % TARGET_CHAR_BIT;
692 o1 = offset1_bits + length_bits - bits;
693 o2 = offset2_bits + length_bits - bits;
694
695 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
696 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
697
698 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
699 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
700
701 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
702 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
703
704 if (b1 != b2)
705 return 1;
706
707 length_bits -= bits;
708 }
709
710 if (length_bits > 0)
711 {
712 /* We've now taken care of any stray "bits" at the start, or end of
713 the region to compare, the remainder can be covered with a simple
714 memcmp. */
715 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
716 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
717 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
718
719 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
720 ptr2 + offset2_bits / TARGET_CHAR_BIT,
721 length_bits / TARGET_CHAR_BIT);
722 }
723
724 /* Length is zero, regions match. */
725 return 0;
726 }
727
728 /* Helper struct for find_first_range_overlap_and_match and
729 value_contents_bits_eq. Keep track of which slot of a given ranges
730 vector have we last looked at. */
731
732 struct ranges_and_idx
733 {
734 /* The ranges. */
735 VEC(range_s) *ranges;
736
737 /* The range we've last found in RANGES. Given ranges are sorted,
738 we can start the next lookup here. */
739 int idx;
740 };
741
742 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
743 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
744 ranges starting at OFFSET2 bits. Return true if the ranges match
745 and fill in *L and *H with the overlapping window relative to
746 (both) OFFSET1 or OFFSET2. */
747
748 static int
749 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
750 struct ranges_and_idx *rp2,
751 int offset1, int offset2,
752 int length, ULONGEST *l, ULONGEST *h)
753 {
754 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
755 offset1, length);
756 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
757 offset2, length);
758
759 if (rp1->idx == -1 && rp2->idx == -1)
760 {
761 *l = length;
762 *h = length;
763 return 1;
764 }
765 else if (rp1->idx == -1 || rp2->idx == -1)
766 return 0;
767 else
768 {
769 range_s *r1, *r2;
770 ULONGEST l1, h1;
771 ULONGEST l2, h2;
772
773 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
774 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
775
776 /* Get the unavailable windows intersected by the incoming
777 ranges. The first and last ranges that overlap the argument
778 range may be wider than said incoming arguments ranges. */
779 l1 = max (offset1, r1->offset);
780 h1 = min (offset1 + length, r1->offset + r1->length);
781
782 l2 = max (offset2, r2->offset);
783 h2 = min (offset2 + length, offset2 + r2->length);
784
785 /* Make them relative to the respective start offsets, so we can
786 compare them for equality. */
787 l1 -= offset1;
788 h1 -= offset1;
789
790 l2 -= offset2;
791 h2 -= offset2;
792
793 /* Different ranges, no match. */
794 if (l1 != l2 || h1 != h2)
795 return 0;
796
797 *h = h1;
798 *l = l1;
799 return 1;
800 }
801 }
802
803 /* Helper function for value_contents_eq. The only difference is that
804 this function is bit rather than byte based.
805
806 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
807 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
808 Return true if the available bits match. */
809
810 static int
811 value_contents_bits_eq (const struct value *val1, int offset1,
812 const struct value *val2, int offset2,
813 int length)
814 {
815 /* Each array element corresponds to a ranges source (unavailable,
816 optimized out). '1' is for VAL1, '2' for VAL2. */
817 struct ranges_and_idx rp1[2], rp2[2];
818
819 /* See function description in value.h. */
820 gdb_assert (!val1->lazy && !val2->lazy);
821
822 /* We shouldn't be trying to compare past the end of the values. */
823 gdb_assert (offset1 + length
824 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
825 gdb_assert (offset2 + length
826 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
827
828 memset (&rp1, 0, sizeof (rp1));
829 memset (&rp2, 0, sizeof (rp2));
830 rp1[0].ranges = val1->unavailable;
831 rp2[0].ranges = val2->unavailable;
832 rp1[1].ranges = val1->optimized_out;
833 rp2[1].ranges = val2->optimized_out;
834
835 while (length > 0)
836 {
837 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
838 int i;
839
840 for (i = 0; i < 2; i++)
841 {
842 ULONGEST l_tmp, h_tmp;
843
844 /* The contents only match equal if the invalid/unavailable
845 contents ranges match as well. */
846 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
847 offset1, offset2, length,
848 &l_tmp, &h_tmp))
849 return 0;
850
851 /* We're interested in the lowest/first range found. */
852 if (i == 0 || l_tmp < l)
853 {
854 l = l_tmp;
855 h = h_tmp;
856 }
857 }
858
859 /* Compare the available/valid contents. */
860 if (memcmp_with_bit_offsets (val1->contents, offset1,
861 val2->contents, offset2, l) != 0)
862 return 0;
863
864 length -= h;
865 offset1 += h;
866 offset2 += h;
867 }
868
869 return 1;
870 }
871
872 int
873 value_contents_eq (const struct value *val1, int offset1,
874 const struct value *val2, int offset2,
875 int length)
876 {
877 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
878 val2, offset2 * TARGET_CHAR_BIT,
879 length * TARGET_CHAR_BIT);
880 }
881
882 /* Prototypes for local functions. */
883
884 static void show_values (char *, int);
885
886 static void show_convenience (char *, int);
887
888
889 /* The value-history records all the values printed
890 by print commands during this session. Each chunk
891 records 60 consecutive values. The first chunk on
892 the chain records the most recent values.
893 The total number of values is in value_history_count. */
894
895 #define VALUE_HISTORY_CHUNK 60
896
897 struct value_history_chunk
898 {
899 struct value_history_chunk *next;
900 struct value *values[VALUE_HISTORY_CHUNK];
901 };
902
903 /* Chain of chunks now in use. */
904
905 static struct value_history_chunk *value_history_chain;
906
907 static int value_history_count; /* Abs number of last entry stored. */
908
909 \f
910 /* List of all value objects currently allocated
911 (except for those released by calls to release_value)
912 This is so they can be freed after each command. */
913
914 static struct value *all_values;
915
916 /* Allocate a lazy value for type TYPE. Its actual content is
917 "lazily" allocated too: the content field of the return value is
918 NULL; it will be allocated when it is fetched from the target. */
919
920 struct value *
921 allocate_value_lazy (struct type *type)
922 {
923 struct value *val;
924
925 /* Call check_typedef on our type to make sure that, if TYPE
926 is a TYPE_CODE_TYPEDEF, its length is set to the length
927 of the target type instead of zero. However, we do not
928 replace the typedef type by the target type, because we want
929 to keep the typedef in order to be able to set the VAL's type
930 description correctly. */
931 check_typedef (type);
932
933 val = XCNEW (struct value);
934 val->contents = NULL;
935 val->next = all_values;
936 all_values = val;
937 val->type = type;
938 val->enclosing_type = type;
939 VALUE_LVAL (val) = not_lval;
940 val->location.address = 0;
941 VALUE_FRAME_ID (val) = null_frame_id;
942 val->offset = 0;
943 val->bitpos = 0;
944 val->bitsize = 0;
945 VALUE_REGNUM (val) = -1;
946 val->lazy = 1;
947 val->embedded_offset = 0;
948 val->pointed_to_offset = 0;
949 val->modifiable = 1;
950 val->initialized = 1; /* Default to initialized. */
951
952 /* Values start out on the all_values chain. */
953 val->reference_count = 1;
954
955 return val;
956 }
957
958 /* The maximum size, in bytes, that GDB will try to allocate for a value.
959 The initial value of 64k was not selected for any specific reason, it is
960 just a reasonable starting point. */
961
962 static int max_value_size = 65536; /* 64k bytes */
963
964 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
965 LONGEST, otherwise GDB will not be able to parse integer values from the
966 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
967 be unable to parse "set max-value-size 2".
968
969 As we want a consistent GDB experience across hosts with different sizes
970 of LONGEST, this arbitrary minimum value was selected, so long as this
971 is bigger than LONGEST on all GDB supported hosts we're fine. */
972
973 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
974 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
975
976 /* Implement the "set max-value-size" command. */
977
978 static void
979 set_max_value_size (char *args, int from_tty,
980 struct cmd_list_element *c)
981 {
982 gdb_assert (max_value_size == -1 || max_value_size >= 0);
983
984 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
985 {
986 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
987 error (_("max-value-size set too low, increasing to %d bytes"),
988 max_value_size);
989 }
990 }
991
992 /* Implement the "show max-value-size" command. */
993
994 static void
995 show_max_value_size (struct ui_file *file, int from_tty,
996 struct cmd_list_element *c, const char *value)
997 {
998 if (max_value_size == -1)
999 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1000 else
1001 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1002 max_value_size);
1003 }
1004
1005 /* Called before we attempt to allocate or reallocate a buffer for the
1006 contents of a value. TYPE is the type of the value for which we are
1007 allocating the buffer. If the buffer is too large (based on the user
1008 controllable setting) then throw an error. If this function returns
1009 then we should attempt to allocate the buffer. */
1010
1011 static void
1012 check_type_length_before_alloc (const struct type *type)
1013 {
1014 unsigned int length = TYPE_LENGTH (type);
1015
1016 if (max_value_size > -1 && length > max_value_size)
1017 {
1018 if (TYPE_NAME (type) != NULL)
1019 error (_("value of type `%s' requires %u bytes, which is more "
1020 "than max-value-size"), TYPE_NAME (type), length);
1021 else
1022 error (_("value requires %u bytes, which is more than "
1023 "max-value-size"), length);
1024 }
1025 }
1026
1027 /* Allocate the contents of VAL if it has not been allocated yet. */
1028
1029 static void
1030 allocate_value_contents (struct value *val)
1031 {
1032 if (!val->contents)
1033 {
1034 check_type_length_before_alloc (val->enclosing_type);
1035 val->contents
1036 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1037 }
1038 }
1039
1040 /* Allocate a value and its contents for type TYPE. */
1041
1042 struct value *
1043 allocate_value (struct type *type)
1044 {
1045 struct value *val = allocate_value_lazy (type);
1046
1047 allocate_value_contents (val);
1048 val->lazy = 0;
1049 return val;
1050 }
1051
1052 /* Allocate a value that has the correct length
1053 for COUNT repetitions of type TYPE. */
1054
1055 struct value *
1056 allocate_repeat_value (struct type *type, int count)
1057 {
1058 int low_bound = current_language->string_lower_bound; /* ??? */
1059 /* FIXME-type-allocation: need a way to free this type when we are
1060 done with it. */
1061 struct type *array_type
1062 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1063
1064 return allocate_value (array_type);
1065 }
1066
1067 struct value *
1068 allocate_computed_value (struct type *type,
1069 const struct lval_funcs *funcs,
1070 void *closure)
1071 {
1072 struct value *v = allocate_value_lazy (type);
1073
1074 VALUE_LVAL (v) = lval_computed;
1075 v->location.computed.funcs = funcs;
1076 v->location.computed.closure = closure;
1077
1078 return v;
1079 }
1080
1081 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1082
1083 struct value *
1084 allocate_optimized_out_value (struct type *type)
1085 {
1086 struct value *retval = allocate_value_lazy (type);
1087
1088 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1089 set_value_lazy (retval, 0);
1090 return retval;
1091 }
1092
1093 /* Accessor methods. */
1094
1095 struct value *
1096 value_next (const struct value *value)
1097 {
1098 return value->next;
1099 }
1100
1101 struct type *
1102 value_type (const struct value *value)
1103 {
1104 return value->type;
1105 }
1106 void
1107 deprecated_set_value_type (struct value *value, struct type *type)
1108 {
1109 value->type = type;
1110 }
1111
1112 int
1113 value_offset (const struct value *value)
1114 {
1115 return value->offset;
1116 }
1117 void
1118 set_value_offset (struct value *value, int offset)
1119 {
1120 value->offset = offset;
1121 }
1122
1123 int
1124 value_bitpos (const struct value *value)
1125 {
1126 return value->bitpos;
1127 }
1128 void
1129 set_value_bitpos (struct value *value, int bit)
1130 {
1131 value->bitpos = bit;
1132 }
1133
1134 int
1135 value_bitsize (const struct value *value)
1136 {
1137 return value->bitsize;
1138 }
1139 void
1140 set_value_bitsize (struct value *value, int bit)
1141 {
1142 value->bitsize = bit;
1143 }
1144
1145 struct value *
1146 value_parent (const struct value *value)
1147 {
1148 return value->parent;
1149 }
1150
1151 /* See value.h. */
1152
1153 void
1154 set_value_parent (struct value *value, struct value *parent)
1155 {
1156 struct value *old = value->parent;
1157
1158 value->parent = parent;
1159 if (parent != NULL)
1160 value_incref (parent);
1161 value_free (old);
1162 }
1163
1164 gdb_byte *
1165 value_contents_raw (struct value *value)
1166 {
1167 struct gdbarch *arch = get_value_arch (value);
1168 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1169
1170 allocate_value_contents (value);
1171 return value->contents + value->embedded_offset * unit_size;
1172 }
1173
1174 gdb_byte *
1175 value_contents_all_raw (struct value *value)
1176 {
1177 allocate_value_contents (value);
1178 return value->contents;
1179 }
1180
1181 struct type *
1182 value_enclosing_type (const struct value *value)
1183 {
1184 return value->enclosing_type;
1185 }
1186
1187 /* Look at value.h for description. */
1188
1189 struct type *
1190 value_actual_type (struct value *value, int resolve_simple_types,
1191 int *real_type_found)
1192 {
1193 struct value_print_options opts;
1194 struct type *result;
1195
1196 get_user_print_options (&opts);
1197
1198 if (real_type_found)
1199 *real_type_found = 0;
1200 result = value_type (value);
1201 if (opts.objectprint)
1202 {
1203 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1204 fetch its rtti type. */
1205 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1206 || TYPE_CODE (result) == TYPE_CODE_REF)
1207 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1208 == TYPE_CODE_STRUCT
1209 && !value_optimized_out (value))
1210 {
1211 struct type *real_type;
1212
1213 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1214 if (real_type)
1215 {
1216 if (real_type_found)
1217 *real_type_found = 1;
1218 result = real_type;
1219 }
1220 }
1221 else if (resolve_simple_types)
1222 {
1223 if (real_type_found)
1224 *real_type_found = 1;
1225 result = value_enclosing_type (value);
1226 }
1227 }
1228
1229 return result;
1230 }
1231
1232 void
1233 error_value_optimized_out (void)
1234 {
1235 error (_("value has been optimized out"));
1236 }
1237
1238 static void
1239 require_not_optimized_out (const struct value *value)
1240 {
1241 if (!VEC_empty (range_s, value->optimized_out))
1242 {
1243 if (value->lval == lval_register)
1244 error (_("register has not been saved in frame"));
1245 else
1246 error_value_optimized_out ();
1247 }
1248 }
1249
1250 static void
1251 require_available (const struct value *value)
1252 {
1253 if (!VEC_empty (range_s, value->unavailable))
1254 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1255 }
1256
1257 const gdb_byte *
1258 value_contents_for_printing (struct value *value)
1259 {
1260 if (value->lazy)
1261 value_fetch_lazy (value);
1262 return value->contents;
1263 }
1264
1265 const gdb_byte *
1266 value_contents_for_printing_const (const struct value *value)
1267 {
1268 gdb_assert (!value->lazy);
1269 return value->contents;
1270 }
1271
1272 const gdb_byte *
1273 value_contents_all (struct value *value)
1274 {
1275 const gdb_byte *result = value_contents_for_printing (value);
1276 require_not_optimized_out (value);
1277 require_available (value);
1278 return result;
1279 }
1280
1281 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1282 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1283
1284 static void
1285 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1286 VEC (range_s) *src_range, int src_bit_offset,
1287 int bit_length)
1288 {
1289 range_s *r;
1290 int i;
1291
1292 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1293 {
1294 ULONGEST h, l;
1295
1296 l = max (r->offset, src_bit_offset);
1297 h = min (r->offset + r->length, src_bit_offset + bit_length);
1298
1299 if (l < h)
1300 insert_into_bit_range_vector (dst_range,
1301 dst_bit_offset + (l - src_bit_offset),
1302 h - l);
1303 }
1304 }
1305
1306 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1307 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1308
1309 static void
1310 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1311 const struct value *src, int src_bit_offset,
1312 int bit_length)
1313 {
1314 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1315 src->unavailable, src_bit_offset,
1316 bit_length);
1317 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1318 src->optimized_out, src_bit_offset,
1319 bit_length);
1320 }
1321
1322 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1323 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1324 contents, starting at DST_OFFSET. If unavailable contents are
1325 being copied from SRC, the corresponding DST contents are marked
1326 unavailable accordingly. Neither DST nor SRC may be lazy
1327 values.
1328
1329 It is assumed the contents of DST in the [DST_OFFSET,
1330 DST_OFFSET+LENGTH) range are wholly available. */
1331
1332 void
1333 value_contents_copy_raw (struct value *dst, int dst_offset,
1334 struct value *src, int src_offset, int length)
1335 {
1336 int src_bit_offset, dst_bit_offset, bit_length;
1337 struct gdbarch *arch = get_value_arch (src);
1338 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1339
1340 /* A lazy DST would make that this copy operation useless, since as
1341 soon as DST's contents were un-lazied (by a later value_contents
1342 call, say), the contents would be overwritten. A lazy SRC would
1343 mean we'd be copying garbage. */
1344 gdb_assert (!dst->lazy && !src->lazy);
1345
1346 /* The overwritten DST range gets unavailability ORed in, not
1347 replaced. Make sure to remember to implement replacing if it
1348 turns out actually necessary. */
1349 gdb_assert (value_bytes_available (dst, dst_offset, length));
1350 gdb_assert (!value_bits_any_optimized_out (dst,
1351 TARGET_CHAR_BIT * dst_offset,
1352 TARGET_CHAR_BIT * length));
1353
1354 /* Copy the data. */
1355 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1356 value_contents_all_raw (src) + src_offset * unit_size,
1357 length * unit_size);
1358
1359 /* Copy the meta-data, adjusted. */
1360 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1361 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1362 bit_length = length * unit_size * HOST_CHAR_BIT;
1363
1364 value_ranges_copy_adjusted (dst, dst_bit_offset,
1365 src, src_bit_offset,
1366 bit_length);
1367 }
1368
1369 /* Copy LENGTH bytes of SRC value's (all) contents
1370 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1371 (all) contents, starting at DST_OFFSET. If unavailable contents
1372 are being copied from SRC, the corresponding DST contents are
1373 marked unavailable accordingly. DST must not be lazy. If SRC is
1374 lazy, it will be fetched now.
1375
1376 It is assumed the contents of DST in the [DST_OFFSET,
1377 DST_OFFSET+LENGTH) range are wholly available. */
1378
1379 void
1380 value_contents_copy (struct value *dst, int dst_offset,
1381 struct value *src, int src_offset, int length)
1382 {
1383 if (src->lazy)
1384 value_fetch_lazy (src);
1385
1386 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1387 }
1388
1389 int
1390 value_lazy (const struct value *value)
1391 {
1392 return value->lazy;
1393 }
1394
1395 void
1396 set_value_lazy (struct value *value, int val)
1397 {
1398 value->lazy = val;
1399 }
1400
1401 int
1402 value_stack (const struct value *value)
1403 {
1404 return value->stack;
1405 }
1406
1407 void
1408 set_value_stack (struct value *value, int val)
1409 {
1410 value->stack = val;
1411 }
1412
1413 const gdb_byte *
1414 value_contents (struct value *value)
1415 {
1416 const gdb_byte *result = value_contents_writeable (value);
1417 require_not_optimized_out (value);
1418 require_available (value);
1419 return result;
1420 }
1421
1422 gdb_byte *
1423 value_contents_writeable (struct value *value)
1424 {
1425 if (value->lazy)
1426 value_fetch_lazy (value);
1427 return value_contents_raw (value);
1428 }
1429
1430 int
1431 value_optimized_out (struct value *value)
1432 {
1433 /* We can only know if a value is optimized out once we have tried to
1434 fetch it. */
1435 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1436 {
1437 TRY
1438 {
1439 value_fetch_lazy (value);
1440 }
1441 CATCH (ex, RETURN_MASK_ERROR)
1442 {
1443 /* Fall back to checking value->optimized_out. */
1444 }
1445 END_CATCH
1446 }
1447
1448 return !VEC_empty (range_s, value->optimized_out);
1449 }
1450
1451 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1452 the following LENGTH bytes. */
1453
1454 void
1455 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1456 {
1457 mark_value_bits_optimized_out (value,
1458 offset * TARGET_CHAR_BIT,
1459 length * TARGET_CHAR_BIT);
1460 }
1461
1462 /* See value.h. */
1463
1464 void
1465 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1466 {
1467 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1468 }
1469
1470 int
1471 value_bits_synthetic_pointer (const struct value *value,
1472 int offset, int length)
1473 {
1474 if (value->lval != lval_computed
1475 || !value->location.computed.funcs->check_synthetic_pointer)
1476 return 0;
1477 return value->location.computed.funcs->check_synthetic_pointer (value,
1478 offset,
1479 length);
1480 }
1481
1482 int
1483 value_embedded_offset (const struct value *value)
1484 {
1485 return value->embedded_offset;
1486 }
1487
1488 void
1489 set_value_embedded_offset (struct value *value, int val)
1490 {
1491 value->embedded_offset = val;
1492 }
1493
1494 int
1495 value_pointed_to_offset (const struct value *value)
1496 {
1497 return value->pointed_to_offset;
1498 }
1499
1500 void
1501 set_value_pointed_to_offset (struct value *value, int val)
1502 {
1503 value->pointed_to_offset = val;
1504 }
1505
1506 const struct lval_funcs *
1507 value_computed_funcs (const struct value *v)
1508 {
1509 gdb_assert (value_lval_const (v) == lval_computed);
1510
1511 return v->location.computed.funcs;
1512 }
1513
1514 void *
1515 value_computed_closure (const struct value *v)
1516 {
1517 gdb_assert (v->lval == lval_computed);
1518
1519 return v->location.computed.closure;
1520 }
1521
1522 enum lval_type *
1523 deprecated_value_lval_hack (struct value *value)
1524 {
1525 return &value->lval;
1526 }
1527
1528 enum lval_type
1529 value_lval_const (const struct value *value)
1530 {
1531 return value->lval;
1532 }
1533
1534 CORE_ADDR
1535 value_address (const struct value *value)
1536 {
1537 if (value->lval == lval_internalvar
1538 || value->lval == lval_internalvar_component
1539 || value->lval == lval_xcallable)
1540 return 0;
1541 if (value->parent != NULL)
1542 return value_address (value->parent) + value->offset;
1543 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1544 {
1545 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1546 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1547 }
1548
1549 return value->location.address + value->offset;
1550 }
1551
1552 CORE_ADDR
1553 value_raw_address (const struct value *value)
1554 {
1555 if (value->lval == lval_internalvar
1556 || value->lval == lval_internalvar_component
1557 || value->lval == lval_xcallable)
1558 return 0;
1559 return value->location.address;
1560 }
1561
1562 void
1563 set_value_address (struct value *value, CORE_ADDR addr)
1564 {
1565 gdb_assert (value->lval != lval_internalvar
1566 && value->lval != lval_internalvar_component
1567 && value->lval != lval_xcallable);
1568 value->location.address = addr;
1569 }
1570
1571 struct internalvar **
1572 deprecated_value_internalvar_hack (struct value *value)
1573 {
1574 return &value->location.internalvar;
1575 }
1576
1577 struct frame_id *
1578 deprecated_value_frame_id_hack (struct value *value)
1579 {
1580 return &value->frame_id;
1581 }
1582
1583 short *
1584 deprecated_value_regnum_hack (struct value *value)
1585 {
1586 return &value->regnum;
1587 }
1588
1589 int
1590 deprecated_value_modifiable (const struct value *value)
1591 {
1592 return value->modifiable;
1593 }
1594 \f
1595 /* Return a mark in the value chain. All values allocated after the
1596 mark is obtained (except for those released) are subject to being freed
1597 if a subsequent value_free_to_mark is passed the mark. */
1598 struct value *
1599 value_mark (void)
1600 {
1601 return all_values;
1602 }
1603
1604 /* Take a reference to VAL. VAL will not be deallocated until all
1605 references are released. */
1606
1607 void
1608 value_incref (struct value *val)
1609 {
1610 val->reference_count++;
1611 }
1612
1613 /* Release a reference to VAL, which was acquired with value_incref.
1614 This function is also called to deallocate values from the value
1615 chain. */
1616
1617 void
1618 value_free (struct value *val)
1619 {
1620 if (val)
1621 {
1622 gdb_assert (val->reference_count > 0);
1623 val->reference_count--;
1624 if (val->reference_count > 0)
1625 return;
1626
1627 /* If there's an associated parent value, drop our reference to
1628 it. */
1629 if (val->parent != NULL)
1630 value_free (val->parent);
1631
1632 if (VALUE_LVAL (val) == lval_computed)
1633 {
1634 const struct lval_funcs *funcs = val->location.computed.funcs;
1635
1636 if (funcs->free_closure)
1637 funcs->free_closure (val);
1638 }
1639 else if (VALUE_LVAL (val) == lval_xcallable)
1640 free_xmethod_worker (val->location.xm_worker);
1641
1642 xfree (val->contents);
1643 VEC_free (range_s, val->unavailable);
1644 }
1645 xfree (val);
1646 }
1647
1648 /* Free all values allocated since MARK was obtained by value_mark
1649 (except for those released). */
1650 void
1651 value_free_to_mark (const struct value *mark)
1652 {
1653 struct value *val;
1654 struct value *next;
1655
1656 for (val = all_values; val && val != mark; val = next)
1657 {
1658 next = val->next;
1659 val->released = 1;
1660 value_free (val);
1661 }
1662 all_values = val;
1663 }
1664
1665 /* Free all the values that have been allocated (except for those released).
1666 Call after each command, successful or not.
1667 In practice this is called before each command, which is sufficient. */
1668
1669 void
1670 free_all_values (void)
1671 {
1672 struct value *val;
1673 struct value *next;
1674
1675 for (val = all_values; val; val = next)
1676 {
1677 next = val->next;
1678 val->released = 1;
1679 value_free (val);
1680 }
1681
1682 all_values = 0;
1683 }
1684
1685 /* Frees all the elements in a chain of values. */
1686
1687 void
1688 free_value_chain (struct value *v)
1689 {
1690 struct value *next;
1691
1692 for (; v; v = next)
1693 {
1694 next = value_next (v);
1695 value_free (v);
1696 }
1697 }
1698
1699 /* Remove VAL from the chain all_values
1700 so it will not be freed automatically. */
1701
1702 void
1703 release_value (struct value *val)
1704 {
1705 struct value *v;
1706
1707 if (all_values == val)
1708 {
1709 all_values = val->next;
1710 val->next = NULL;
1711 val->released = 1;
1712 return;
1713 }
1714
1715 for (v = all_values; v; v = v->next)
1716 {
1717 if (v->next == val)
1718 {
1719 v->next = val->next;
1720 val->next = NULL;
1721 val->released = 1;
1722 break;
1723 }
1724 }
1725 }
1726
1727 /* If the value is not already released, release it.
1728 If the value is already released, increment its reference count.
1729 That is, this function ensures that the value is released from the
1730 value chain and that the caller owns a reference to it. */
1731
1732 void
1733 release_value_or_incref (struct value *val)
1734 {
1735 if (val->released)
1736 value_incref (val);
1737 else
1738 release_value (val);
1739 }
1740
1741 /* Release all values up to mark */
1742 struct value *
1743 value_release_to_mark (const struct value *mark)
1744 {
1745 struct value *val;
1746 struct value *next;
1747
1748 for (val = next = all_values; next; next = next->next)
1749 {
1750 if (next->next == mark)
1751 {
1752 all_values = next->next;
1753 next->next = NULL;
1754 return val;
1755 }
1756 next->released = 1;
1757 }
1758 all_values = 0;
1759 return val;
1760 }
1761
1762 /* Return a copy of the value ARG.
1763 It contains the same contents, for same memory address,
1764 but it's a different block of storage. */
1765
1766 struct value *
1767 value_copy (struct value *arg)
1768 {
1769 struct type *encl_type = value_enclosing_type (arg);
1770 struct value *val;
1771
1772 if (value_lazy (arg))
1773 val = allocate_value_lazy (encl_type);
1774 else
1775 val = allocate_value (encl_type);
1776 val->type = arg->type;
1777 VALUE_LVAL (val) = VALUE_LVAL (arg);
1778 val->location = arg->location;
1779 val->offset = arg->offset;
1780 val->bitpos = arg->bitpos;
1781 val->bitsize = arg->bitsize;
1782 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1783 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1784 val->lazy = arg->lazy;
1785 val->embedded_offset = value_embedded_offset (arg);
1786 val->pointed_to_offset = arg->pointed_to_offset;
1787 val->modifiable = arg->modifiable;
1788 if (!value_lazy (val))
1789 {
1790 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1791 TYPE_LENGTH (value_enclosing_type (arg)));
1792
1793 }
1794 val->unavailable = VEC_copy (range_s, arg->unavailable);
1795 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1796 set_value_parent (val, arg->parent);
1797 if (VALUE_LVAL (val) == lval_computed)
1798 {
1799 const struct lval_funcs *funcs = val->location.computed.funcs;
1800
1801 if (funcs->copy_closure)
1802 val->location.computed.closure = funcs->copy_closure (val);
1803 }
1804 return val;
1805 }
1806
1807 /* Return a "const" and/or "volatile" qualified version of the value V.
1808 If CNST is true, then the returned value will be qualified with
1809 "const".
1810 if VOLTL is true, then the returned value will be qualified with
1811 "volatile". */
1812
1813 struct value *
1814 make_cv_value (int cnst, int voltl, struct value *v)
1815 {
1816 struct type *val_type = value_type (v);
1817 struct type *enclosing_type = value_enclosing_type (v);
1818 struct value *cv_val = value_copy (v);
1819
1820 deprecated_set_value_type (cv_val,
1821 make_cv_type (cnst, voltl, val_type, NULL));
1822 set_value_enclosing_type (cv_val,
1823 make_cv_type (cnst, voltl, enclosing_type, NULL));
1824
1825 return cv_val;
1826 }
1827
1828 /* Return a version of ARG that is non-lvalue. */
1829
1830 struct value *
1831 value_non_lval (struct value *arg)
1832 {
1833 if (VALUE_LVAL (arg) != not_lval)
1834 {
1835 struct type *enc_type = value_enclosing_type (arg);
1836 struct value *val = allocate_value (enc_type);
1837
1838 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1839 TYPE_LENGTH (enc_type));
1840 val->type = arg->type;
1841 set_value_embedded_offset (val, value_embedded_offset (arg));
1842 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1843 return val;
1844 }
1845 return arg;
1846 }
1847
1848 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1849
1850 void
1851 value_force_lval (struct value *v, CORE_ADDR addr)
1852 {
1853 gdb_assert (VALUE_LVAL (v) == not_lval);
1854
1855 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1856 v->lval = lval_memory;
1857 v->location.address = addr;
1858 }
1859
1860 void
1861 set_value_component_location (struct value *component,
1862 const struct value *whole)
1863 {
1864 struct type *type;
1865
1866 gdb_assert (whole->lval != lval_xcallable);
1867
1868 if (whole->lval == lval_internalvar)
1869 VALUE_LVAL (component) = lval_internalvar_component;
1870 else
1871 VALUE_LVAL (component) = whole->lval;
1872
1873 component->location = whole->location;
1874 if (whole->lval == lval_computed)
1875 {
1876 const struct lval_funcs *funcs = whole->location.computed.funcs;
1877
1878 if (funcs->copy_closure)
1879 component->location.computed.closure = funcs->copy_closure (whole);
1880 }
1881
1882 /* If type has a dynamic resolved location property
1883 update it's value address. */
1884 type = value_type (whole);
1885 if (NULL != TYPE_DATA_LOCATION (type)
1886 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1887 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1888 }
1889
1890 /* Access to the value history. */
1891
1892 /* Record a new value in the value history.
1893 Returns the absolute history index of the entry. */
1894
1895 int
1896 record_latest_value (struct value *val)
1897 {
1898 int i;
1899
1900 /* We don't want this value to have anything to do with the inferior anymore.
1901 In particular, "set $1 = 50" should not affect the variable from which
1902 the value was taken, and fast watchpoints should be able to assume that
1903 a value on the value history never changes. */
1904 if (value_lazy (val))
1905 value_fetch_lazy (val);
1906 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1907 from. This is a bit dubious, because then *&$1 does not just return $1
1908 but the current contents of that location. c'est la vie... */
1909 val->modifiable = 0;
1910
1911 /* The value may have already been released, in which case we're adding a
1912 new reference for its entry in the history. That is why we call
1913 release_value_or_incref here instead of release_value. */
1914 release_value_or_incref (val);
1915
1916 /* Here we treat value_history_count as origin-zero
1917 and applying to the value being stored now. */
1918
1919 i = value_history_count % VALUE_HISTORY_CHUNK;
1920 if (i == 0)
1921 {
1922 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1923
1924 newobj->next = value_history_chain;
1925 value_history_chain = newobj;
1926 }
1927
1928 value_history_chain->values[i] = val;
1929
1930 /* Now we regard value_history_count as origin-one
1931 and applying to the value just stored. */
1932
1933 return ++value_history_count;
1934 }
1935
1936 /* Return a copy of the value in the history with sequence number NUM. */
1937
1938 struct value *
1939 access_value_history (int num)
1940 {
1941 struct value_history_chunk *chunk;
1942 int i;
1943 int absnum = num;
1944
1945 if (absnum <= 0)
1946 absnum += value_history_count;
1947
1948 if (absnum <= 0)
1949 {
1950 if (num == 0)
1951 error (_("The history is empty."));
1952 else if (num == 1)
1953 error (_("There is only one value in the history."));
1954 else
1955 error (_("History does not go back to $$%d."), -num);
1956 }
1957 if (absnum > value_history_count)
1958 error (_("History has not yet reached $%d."), absnum);
1959
1960 absnum--;
1961
1962 /* Now absnum is always absolute and origin zero. */
1963
1964 chunk = value_history_chain;
1965 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1966 - absnum / VALUE_HISTORY_CHUNK;
1967 i > 0; i--)
1968 chunk = chunk->next;
1969
1970 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1971 }
1972
1973 static void
1974 show_values (char *num_exp, int from_tty)
1975 {
1976 int i;
1977 struct value *val;
1978 static int num = 1;
1979
1980 if (num_exp)
1981 {
1982 /* "show values +" should print from the stored position.
1983 "show values <exp>" should print around value number <exp>. */
1984 if (num_exp[0] != '+' || num_exp[1] != '\0')
1985 num = parse_and_eval_long (num_exp) - 5;
1986 }
1987 else
1988 {
1989 /* "show values" means print the last 10 values. */
1990 num = value_history_count - 9;
1991 }
1992
1993 if (num <= 0)
1994 num = 1;
1995
1996 for (i = num; i < num + 10 && i <= value_history_count; i++)
1997 {
1998 struct value_print_options opts;
1999
2000 val = access_value_history (i);
2001 printf_filtered (("$%d = "), i);
2002 get_user_print_options (&opts);
2003 value_print (val, gdb_stdout, &opts);
2004 printf_filtered (("\n"));
2005 }
2006
2007 /* The next "show values +" should start after what we just printed. */
2008 num += 10;
2009
2010 /* Hitting just return after this command should do the same thing as
2011 "show values +". If num_exp is null, this is unnecessary, since
2012 "show values +" is not useful after "show values". */
2013 if (from_tty && num_exp)
2014 {
2015 num_exp[0] = '+';
2016 num_exp[1] = '\0';
2017 }
2018 }
2019 \f
2020 enum internalvar_kind
2021 {
2022 /* The internal variable is empty. */
2023 INTERNALVAR_VOID,
2024
2025 /* The value of the internal variable is provided directly as
2026 a GDB value object. */
2027 INTERNALVAR_VALUE,
2028
2029 /* A fresh value is computed via a call-back routine on every
2030 access to the internal variable. */
2031 INTERNALVAR_MAKE_VALUE,
2032
2033 /* The internal variable holds a GDB internal convenience function. */
2034 INTERNALVAR_FUNCTION,
2035
2036 /* The variable holds an integer value. */
2037 INTERNALVAR_INTEGER,
2038
2039 /* The variable holds a GDB-provided string. */
2040 INTERNALVAR_STRING,
2041 };
2042
2043 union internalvar_data
2044 {
2045 /* A value object used with INTERNALVAR_VALUE. */
2046 struct value *value;
2047
2048 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2049 struct
2050 {
2051 /* The functions to call. */
2052 const struct internalvar_funcs *functions;
2053
2054 /* The function's user-data. */
2055 void *data;
2056 } make_value;
2057
2058 /* The internal function used with INTERNALVAR_FUNCTION. */
2059 struct
2060 {
2061 struct internal_function *function;
2062 /* True if this is the canonical name for the function. */
2063 int canonical;
2064 } fn;
2065
2066 /* An integer value used with INTERNALVAR_INTEGER. */
2067 struct
2068 {
2069 /* If type is non-NULL, it will be used as the type to generate
2070 a value for this internal variable. If type is NULL, a default
2071 integer type for the architecture is used. */
2072 struct type *type;
2073 LONGEST val;
2074 } integer;
2075
2076 /* A string value used with INTERNALVAR_STRING. */
2077 char *string;
2078 };
2079
2080 /* Internal variables. These are variables within the debugger
2081 that hold values assigned by debugger commands.
2082 The user refers to them with a '$' prefix
2083 that does not appear in the variable names stored internally. */
2084
2085 struct internalvar
2086 {
2087 struct internalvar *next;
2088 char *name;
2089
2090 /* We support various different kinds of content of an internal variable.
2091 enum internalvar_kind specifies the kind, and union internalvar_data
2092 provides the data associated with this particular kind. */
2093
2094 enum internalvar_kind kind;
2095
2096 union internalvar_data u;
2097 };
2098
2099 static struct internalvar *internalvars;
2100
2101 /* If the variable does not already exist create it and give it the
2102 value given. If no value is given then the default is zero. */
2103 static void
2104 init_if_undefined_command (char* args, int from_tty)
2105 {
2106 struct internalvar* intvar;
2107
2108 /* Parse the expression - this is taken from set_command(). */
2109 struct expression *expr = parse_expression (args);
2110 register struct cleanup *old_chain =
2111 make_cleanup (free_current_contents, &expr);
2112
2113 /* Validate the expression.
2114 Was the expression an assignment?
2115 Or even an expression at all? */
2116 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2117 error (_("Init-if-undefined requires an assignment expression."));
2118
2119 /* Extract the variable from the parsed expression.
2120 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2121 if (expr->elts[1].opcode != OP_INTERNALVAR)
2122 error (_("The first parameter to init-if-undefined "
2123 "should be a GDB variable."));
2124 intvar = expr->elts[2].internalvar;
2125
2126 /* Only evaluate the expression if the lvalue is void.
2127 This may still fail if the expresssion is invalid. */
2128 if (intvar->kind == INTERNALVAR_VOID)
2129 evaluate_expression (expr);
2130
2131 do_cleanups (old_chain);
2132 }
2133
2134
2135 /* Look up an internal variable with name NAME. NAME should not
2136 normally include a dollar sign.
2137
2138 If the specified internal variable does not exist,
2139 the return value is NULL. */
2140
2141 struct internalvar *
2142 lookup_only_internalvar (const char *name)
2143 {
2144 struct internalvar *var;
2145
2146 for (var = internalvars; var; var = var->next)
2147 if (strcmp (var->name, name) == 0)
2148 return var;
2149
2150 return NULL;
2151 }
2152
2153 /* Complete NAME by comparing it to the names of internal variables.
2154 Returns a vector of newly allocated strings, or NULL if no matches
2155 were found. */
2156
2157 VEC (char_ptr) *
2158 complete_internalvar (const char *name)
2159 {
2160 VEC (char_ptr) *result = NULL;
2161 struct internalvar *var;
2162 int len;
2163
2164 len = strlen (name);
2165
2166 for (var = internalvars; var; var = var->next)
2167 if (strncmp (var->name, name, len) == 0)
2168 {
2169 char *r = xstrdup (var->name);
2170
2171 VEC_safe_push (char_ptr, result, r);
2172 }
2173
2174 return result;
2175 }
2176
2177 /* Create an internal variable with name NAME and with a void value.
2178 NAME should not normally include a dollar sign. */
2179
2180 struct internalvar *
2181 create_internalvar (const char *name)
2182 {
2183 struct internalvar *var = XNEW (struct internalvar);
2184
2185 var->name = concat (name, (char *)NULL);
2186 var->kind = INTERNALVAR_VOID;
2187 var->next = internalvars;
2188 internalvars = var;
2189 return var;
2190 }
2191
2192 /* Create an internal variable with name NAME and register FUN as the
2193 function that value_of_internalvar uses to create a value whenever
2194 this variable is referenced. NAME should not normally include a
2195 dollar sign. DATA is passed uninterpreted to FUN when it is
2196 called. CLEANUP, if not NULL, is called when the internal variable
2197 is destroyed. It is passed DATA as its only argument. */
2198
2199 struct internalvar *
2200 create_internalvar_type_lazy (const char *name,
2201 const struct internalvar_funcs *funcs,
2202 void *data)
2203 {
2204 struct internalvar *var = create_internalvar (name);
2205
2206 var->kind = INTERNALVAR_MAKE_VALUE;
2207 var->u.make_value.functions = funcs;
2208 var->u.make_value.data = data;
2209 return var;
2210 }
2211
2212 /* See documentation in value.h. */
2213
2214 int
2215 compile_internalvar_to_ax (struct internalvar *var,
2216 struct agent_expr *expr,
2217 struct axs_value *value)
2218 {
2219 if (var->kind != INTERNALVAR_MAKE_VALUE
2220 || var->u.make_value.functions->compile_to_ax == NULL)
2221 return 0;
2222
2223 var->u.make_value.functions->compile_to_ax (var, expr, value,
2224 var->u.make_value.data);
2225 return 1;
2226 }
2227
2228 /* Look up an internal variable with name NAME. NAME should not
2229 normally include a dollar sign.
2230
2231 If the specified internal variable does not exist,
2232 one is created, with a void value. */
2233
2234 struct internalvar *
2235 lookup_internalvar (const char *name)
2236 {
2237 struct internalvar *var;
2238
2239 var = lookup_only_internalvar (name);
2240 if (var)
2241 return var;
2242
2243 return create_internalvar (name);
2244 }
2245
2246 /* Return current value of internal variable VAR. For variables that
2247 are not inherently typed, use a value type appropriate for GDBARCH. */
2248
2249 struct value *
2250 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2251 {
2252 struct value *val;
2253 struct trace_state_variable *tsv;
2254
2255 /* If there is a trace state variable of the same name, assume that
2256 is what we really want to see. */
2257 tsv = find_trace_state_variable (var->name);
2258 if (tsv)
2259 {
2260 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2261 &(tsv->value));
2262 if (tsv->value_known)
2263 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2264 tsv->value);
2265 else
2266 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2267 return val;
2268 }
2269
2270 switch (var->kind)
2271 {
2272 case INTERNALVAR_VOID:
2273 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2274 break;
2275
2276 case INTERNALVAR_FUNCTION:
2277 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2278 break;
2279
2280 case INTERNALVAR_INTEGER:
2281 if (!var->u.integer.type)
2282 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2283 var->u.integer.val);
2284 else
2285 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2286 break;
2287
2288 case INTERNALVAR_STRING:
2289 val = value_cstring (var->u.string, strlen (var->u.string),
2290 builtin_type (gdbarch)->builtin_char);
2291 break;
2292
2293 case INTERNALVAR_VALUE:
2294 val = value_copy (var->u.value);
2295 if (value_lazy (val))
2296 value_fetch_lazy (val);
2297 break;
2298
2299 case INTERNALVAR_MAKE_VALUE:
2300 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2301 var->u.make_value.data);
2302 break;
2303
2304 default:
2305 internal_error (__FILE__, __LINE__, _("bad kind"));
2306 }
2307
2308 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2309 on this value go back to affect the original internal variable.
2310
2311 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2312 no underlying modifyable state in the internal variable.
2313
2314 Likewise, if the variable's value is a computed lvalue, we want
2315 references to it to produce another computed lvalue, where
2316 references and assignments actually operate through the
2317 computed value's functions.
2318
2319 This means that internal variables with computed values
2320 behave a little differently from other internal variables:
2321 assignments to them don't just replace the previous value
2322 altogether. At the moment, this seems like the behavior we
2323 want. */
2324
2325 if (var->kind != INTERNALVAR_MAKE_VALUE
2326 && val->lval != lval_computed)
2327 {
2328 VALUE_LVAL (val) = lval_internalvar;
2329 VALUE_INTERNALVAR (val) = var;
2330 }
2331
2332 return val;
2333 }
2334
2335 int
2336 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2337 {
2338 if (var->kind == INTERNALVAR_INTEGER)
2339 {
2340 *result = var->u.integer.val;
2341 return 1;
2342 }
2343
2344 if (var->kind == INTERNALVAR_VALUE)
2345 {
2346 struct type *type = check_typedef (value_type (var->u.value));
2347
2348 if (TYPE_CODE (type) == TYPE_CODE_INT)
2349 {
2350 *result = value_as_long (var->u.value);
2351 return 1;
2352 }
2353 }
2354
2355 return 0;
2356 }
2357
2358 static int
2359 get_internalvar_function (struct internalvar *var,
2360 struct internal_function **result)
2361 {
2362 switch (var->kind)
2363 {
2364 case INTERNALVAR_FUNCTION:
2365 *result = var->u.fn.function;
2366 return 1;
2367
2368 default:
2369 return 0;
2370 }
2371 }
2372
2373 void
2374 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2375 int bitsize, struct value *newval)
2376 {
2377 gdb_byte *addr;
2378 struct gdbarch *arch;
2379 int unit_size;
2380
2381 switch (var->kind)
2382 {
2383 case INTERNALVAR_VALUE:
2384 addr = value_contents_writeable (var->u.value);
2385 arch = get_value_arch (var->u.value);
2386 unit_size = gdbarch_addressable_memory_unit_size (arch);
2387
2388 if (bitsize)
2389 modify_field (value_type (var->u.value), addr + offset,
2390 value_as_long (newval), bitpos, bitsize);
2391 else
2392 memcpy (addr + offset * unit_size, value_contents (newval),
2393 TYPE_LENGTH (value_type (newval)));
2394 break;
2395
2396 default:
2397 /* We can never get a component of any other kind. */
2398 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2399 }
2400 }
2401
2402 void
2403 set_internalvar (struct internalvar *var, struct value *val)
2404 {
2405 enum internalvar_kind new_kind;
2406 union internalvar_data new_data = { 0 };
2407
2408 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2409 error (_("Cannot overwrite convenience function %s"), var->name);
2410
2411 /* Prepare new contents. */
2412 switch (TYPE_CODE (check_typedef (value_type (val))))
2413 {
2414 case TYPE_CODE_VOID:
2415 new_kind = INTERNALVAR_VOID;
2416 break;
2417
2418 case TYPE_CODE_INTERNAL_FUNCTION:
2419 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2420 new_kind = INTERNALVAR_FUNCTION;
2421 get_internalvar_function (VALUE_INTERNALVAR (val),
2422 &new_data.fn.function);
2423 /* Copies created here are never canonical. */
2424 break;
2425
2426 default:
2427 new_kind = INTERNALVAR_VALUE;
2428 new_data.value = value_copy (val);
2429 new_data.value->modifiable = 1;
2430
2431 /* Force the value to be fetched from the target now, to avoid problems
2432 later when this internalvar is referenced and the target is gone or
2433 has changed. */
2434 if (value_lazy (new_data.value))
2435 value_fetch_lazy (new_data.value);
2436
2437 /* Release the value from the value chain to prevent it from being
2438 deleted by free_all_values. From here on this function should not
2439 call error () until new_data is installed into the var->u to avoid
2440 leaking memory. */
2441 release_value (new_data.value);
2442
2443 /* Internal variables which are created from values with a dynamic
2444 location don't need the location property of the origin anymore.
2445 The resolved dynamic location is used prior then any other address
2446 when accessing the value.
2447 If we keep it, we would still refer to the origin value.
2448 Remove the location property in case it exist. */
2449 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2450
2451 break;
2452 }
2453
2454 /* Clean up old contents. */
2455 clear_internalvar (var);
2456
2457 /* Switch over. */
2458 var->kind = new_kind;
2459 var->u = new_data;
2460 /* End code which must not call error(). */
2461 }
2462
2463 void
2464 set_internalvar_integer (struct internalvar *var, LONGEST l)
2465 {
2466 /* Clean up old contents. */
2467 clear_internalvar (var);
2468
2469 var->kind = INTERNALVAR_INTEGER;
2470 var->u.integer.type = NULL;
2471 var->u.integer.val = l;
2472 }
2473
2474 void
2475 set_internalvar_string (struct internalvar *var, const char *string)
2476 {
2477 /* Clean up old contents. */
2478 clear_internalvar (var);
2479
2480 var->kind = INTERNALVAR_STRING;
2481 var->u.string = xstrdup (string);
2482 }
2483
2484 static void
2485 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2486 {
2487 /* Clean up old contents. */
2488 clear_internalvar (var);
2489
2490 var->kind = INTERNALVAR_FUNCTION;
2491 var->u.fn.function = f;
2492 var->u.fn.canonical = 1;
2493 /* Variables installed here are always the canonical version. */
2494 }
2495
2496 void
2497 clear_internalvar (struct internalvar *var)
2498 {
2499 /* Clean up old contents. */
2500 switch (var->kind)
2501 {
2502 case INTERNALVAR_VALUE:
2503 value_free (var->u.value);
2504 break;
2505
2506 case INTERNALVAR_STRING:
2507 xfree (var->u.string);
2508 break;
2509
2510 case INTERNALVAR_MAKE_VALUE:
2511 if (var->u.make_value.functions->destroy != NULL)
2512 var->u.make_value.functions->destroy (var->u.make_value.data);
2513 break;
2514
2515 default:
2516 break;
2517 }
2518
2519 /* Reset to void kind. */
2520 var->kind = INTERNALVAR_VOID;
2521 }
2522
2523 char *
2524 internalvar_name (const struct internalvar *var)
2525 {
2526 return var->name;
2527 }
2528
2529 static struct internal_function *
2530 create_internal_function (const char *name,
2531 internal_function_fn handler, void *cookie)
2532 {
2533 struct internal_function *ifn = XNEW (struct internal_function);
2534
2535 ifn->name = xstrdup (name);
2536 ifn->handler = handler;
2537 ifn->cookie = cookie;
2538 return ifn;
2539 }
2540
2541 char *
2542 value_internal_function_name (struct value *val)
2543 {
2544 struct internal_function *ifn;
2545 int result;
2546
2547 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2548 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2549 gdb_assert (result);
2550
2551 return ifn->name;
2552 }
2553
2554 struct value *
2555 call_internal_function (struct gdbarch *gdbarch,
2556 const struct language_defn *language,
2557 struct value *func, int argc, struct value **argv)
2558 {
2559 struct internal_function *ifn;
2560 int result;
2561
2562 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2563 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2564 gdb_assert (result);
2565
2566 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2567 }
2568
2569 /* The 'function' command. This does nothing -- it is just a
2570 placeholder to let "help function NAME" work. This is also used as
2571 the implementation of the sub-command that is created when
2572 registering an internal function. */
2573 static void
2574 function_command (char *command, int from_tty)
2575 {
2576 /* Do nothing. */
2577 }
2578
2579 /* Clean up if an internal function's command is destroyed. */
2580 static void
2581 function_destroyer (struct cmd_list_element *self, void *ignore)
2582 {
2583 xfree ((char *) self->name);
2584 xfree ((char *) self->doc);
2585 }
2586
2587 /* Add a new internal function. NAME is the name of the function; DOC
2588 is a documentation string describing the function. HANDLER is
2589 called when the function is invoked. COOKIE is an arbitrary
2590 pointer which is passed to HANDLER and is intended for "user
2591 data". */
2592 void
2593 add_internal_function (const char *name, const char *doc,
2594 internal_function_fn handler, void *cookie)
2595 {
2596 struct cmd_list_element *cmd;
2597 struct internal_function *ifn;
2598 struct internalvar *var = lookup_internalvar (name);
2599
2600 ifn = create_internal_function (name, handler, cookie);
2601 set_internalvar_function (var, ifn);
2602
2603 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2604 &functionlist);
2605 cmd->destroyer = function_destroyer;
2606 }
2607
2608 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2609 prevent cycles / duplicates. */
2610
2611 void
2612 preserve_one_value (struct value *value, struct objfile *objfile,
2613 htab_t copied_types)
2614 {
2615 if (TYPE_OBJFILE (value->type) == objfile)
2616 value->type = copy_type_recursive (objfile, value->type, copied_types);
2617
2618 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2619 value->enclosing_type = copy_type_recursive (objfile,
2620 value->enclosing_type,
2621 copied_types);
2622 }
2623
2624 /* Likewise for internal variable VAR. */
2625
2626 static void
2627 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2628 htab_t copied_types)
2629 {
2630 switch (var->kind)
2631 {
2632 case INTERNALVAR_INTEGER:
2633 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2634 var->u.integer.type
2635 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2636 break;
2637
2638 case INTERNALVAR_VALUE:
2639 preserve_one_value (var->u.value, objfile, copied_types);
2640 break;
2641 }
2642 }
2643
2644 /* Update the internal variables and value history when OBJFILE is
2645 discarded; we must copy the types out of the objfile. New global types
2646 will be created for every convenience variable which currently points to
2647 this objfile's types, and the convenience variables will be adjusted to
2648 use the new global types. */
2649
2650 void
2651 preserve_values (struct objfile *objfile)
2652 {
2653 htab_t copied_types;
2654 struct value_history_chunk *cur;
2655 struct internalvar *var;
2656 int i;
2657
2658 /* Create the hash table. We allocate on the objfile's obstack, since
2659 it is soon to be deleted. */
2660 copied_types = create_copied_types_hash (objfile);
2661
2662 for (cur = value_history_chain; cur; cur = cur->next)
2663 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2664 if (cur->values[i])
2665 preserve_one_value (cur->values[i], objfile, copied_types);
2666
2667 for (var = internalvars; var; var = var->next)
2668 preserve_one_internalvar (var, objfile, copied_types);
2669
2670 preserve_ext_lang_values (objfile, copied_types);
2671
2672 htab_delete (copied_types);
2673 }
2674
2675 static void
2676 show_convenience (char *ignore, int from_tty)
2677 {
2678 struct gdbarch *gdbarch = get_current_arch ();
2679 struct internalvar *var;
2680 int varseen = 0;
2681 struct value_print_options opts;
2682
2683 get_user_print_options (&opts);
2684 for (var = internalvars; var; var = var->next)
2685 {
2686
2687 if (!varseen)
2688 {
2689 varseen = 1;
2690 }
2691 printf_filtered (("$%s = "), var->name);
2692
2693 TRY
2694 {
2695 struct value *val;
2696
2697 val = value_of_internalvar (gdbarch, var);
2698 value_print (val, gdb_stdout, &opts);
2699 }
2700 CATCH (ex, RETURN_MASK_ERROR)
2701 {
2702 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2703 }
2704 END_CATCH
2705
2706 printf_filtered (("\n"));
2707 }
2708 if (!varseen)
2709 {
2710 /* This text does not mention convenience functions on purpose.
2711 The user can't create them except via Python, and if Python support
2712 is installed this message will never be printed ($_streq will
2713 exist). */
2714 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2715 "Convenience variables have "
2716 "names starting with \"$\";\n"
2717 "use \"set\" as in \"set "
2718 "$foo = 5\" to define them.\n"));
2719 }
2720 }
2721 \f
2722 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2723
2724 struct value *
2725 value_of_xmethod (struct xmethod_worker *worker)
2726 {
2727 if (worker->value == NULL)
2728 {
2729 struct value *v;
2730
2731 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2732 v->lval = lval_xcallable;
2733 v->location.xm_worker = worker;
2734 v->modifiable = 0;
2735 worker->value = v;
2736 }
2737
2738 return worker->value;
2739 }
2740
2741 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2742
2743 struct type *
2744 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2745 {
2746 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2747 && method->lval == lval_xcallable && argc > 0);
2748
2749 return get_xmethod_result_type (method->location.xm_worker,
2750 argv[0], argv + 1, argc - 1);
2751 }
2752
2753 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2754
2755 struct value *
2756 call_xmethod (struct value *method, int argc, struct value **argv)
2757 {
2758 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2759 && method->lval == lval_xcallable && argc > 0);
2760
2761 return invoke_xmethod (method->location.xm_worker,
2762 argv[0], argv + 1, argc - 1);
2763 }
2764 \f
2765 /* Extract a value as a C number (either long or double).
2766 Knows how to convert fixed values to double, or
2767 floating values to long.
2768 Does not deallocate the value. */
2769
2770 LONGEST
2771 value_as_long (struct value *val)
2772 {
2773 /* This coerces arrays and functions, which is necessary (e.g.
2774 in disassemble_command). It also dereferences references, which
2775 I suspect is the most logical thing to do. */
2776 val = coerce_array (val);
2777 return unpack_long (value_type (val), value_contents (val));
2778 }
2779
2780 DOUBLEST
2781 value_as_double (struct value *val)
2782 {
2783 DOUBLEST foo;
2784 int inv;
2785
2786 foo = unpack_double (value_type (val), value_contents (val), &inv);
2787 if (inv)
2788 error (_("Invalid floating value found in program."));
2789 return foo;
2790 }
2791
2792 /* Extract a value as a C pointer. Does not deallocate the value.
2793 Note that val's type may not actually be a pointer; value_as_long
2794 handles all the cases. */
2795 CORE_ADDR
2796 value_as_address (struct value *val)
2797 {
2798 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2799
2800 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2801 whether we want this to be true eventually. */
2802 #if 0
2803 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2804 non-address (e.g. argument to "signal", "info break", etc.), or
2805 for pointers to char, in which the low bits *are* significant. */
2806 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2807 #else
2808
2809 /* There are several targets (IA-64, PowerPC, and others) which
2810 don't represent pointers to functions as simply the address of
2811 the function's entry point. For example, on the IA-64, a
2812 function pointer points to a two-word descriptor, generated by
2813 the linker, which contains the function's entry point, and the
2814 value the IA-64 "global pointer" register should have --- to
2815 support position-independent code. The linker generates
2816 descriptors only for those functions whose addresses are taken.
2817
2818 On such targets, it's difficult for GDB to convert an arbitrary
2819 function address into a function pointer; it has to either find
2820 an existing descriptor for that function, or call malloc and
2821 build its own. On some targets, it is impossible for GDB to
2822 build a descriptor at all: the descriptor must contain a jump
2823 instruction; data memory cannot be executed; and code memory
2824 cannot be modified.
2825
2826 Upon entry to this function, if VAL is a value of type `function'
2827 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2828 value_address (val) is the address of the function. This is what
2829 you'll get if you evaluate an expression like `main'. The call
2830 to COERCE_ARRAY below actually does all the usual unary
2831 conversions, which includes converting values of type `function'
2832 to `pointer to function'. This is the challenging conversion
2833 discussed above. Then, `unpack_long' will convert that pointer
2834 back into an address.
2835
2836 So, suppose the user types `disassemble foo' on an architecture
2837 with a strange function pointer representation, on which GDB
2838 cannot build its own descriptors, and suppose further that `foo'
2839 has no linker-built descriptor. The address->pointer conversion
2840 will signal an error and prevent the command from running, even
2841 though the next step would have been to convert the pointer
2842 directly back into the same address.
2843
2844 The following shortcut avoids this whole mess. If VAL is a
2845 function, just return its address directly. */
2846 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2847 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2848 return value_address (val);
2849
2850 val = coerce_array (val);
2851
2852 /* Some architectures (e.g. Harvard), map instruction and data
2853 addresses onto a single large unified address space. For
2854 instance: An architecture may consider a large integer in the
2855 range 0x10000000 .. 0x1000ffff to already represent a data
2856 addresses (hence not need a pointer to address conversion) while
2857 a small integer would still need to be converted integer to
2858 pointer to address. Just assume such architectures handle all
2859 integer conversions in a single function. */
2860
2861 /* JimB writes:
2862
2863 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2864 must admonish GDB hackers to make sure its behavior matches the
2865 compiler's, whenever possible.
2866
2867 In general, I think GDB should evaluate expressions the same way
2868 the compiler does. When the user copies an expression out of
2869 their source code and hands it to a `print' command, they should
2870 get the same value the compiler would have computed. Any
2871 deviation from this rule can cause major confusion and annoyance,
2872 and needs to be justified carefully. In other words, GDB doesn't
2873 really have the freedom to do these conversions in clever and
2874 useful ways.
2875
2876 AndrewC pointed out that users aren't complaining about how GDB
2877 casts integers to pointers; they are complaining that they can't
2878 take an address from a disassembly listing and give it to `x/i'.
2879 This is certainly important.
2880
2881 Adding an architecture method like integer_to_address() certainly
2882 makes it possible for GDB to "get it right" in all circumstances
2883 --- the target has complete control over how things get done, so
2884 people can Do The Right Thing for their target without breaking
2885 anyone else. The standard doesn't specify how integers get
2886 converted to pointers; usually, the ABI doesn't either, but
2887 ABI-specific code is a more reasonable place to handle it. */
2888
2889 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2890 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2891 && gdbarch_integer_to_address_p (gdbarch))
2892 return gdbarch_integer_to_address (gdbarch, value_type (val),
2893 value_contents (val));
2894
2895 return unpack_long (value_type (val), value_contents (val));
2896 #endif
2897 }
2898 \f
2899 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2900 as a long, or as a double, assuming the raw data is described
2901 by type TYPE. Knows how to convert different sizes of values
2902 and can convert between fixed and floating point. We don't assume
2903 any alignment for the raw data. Return value is in host byte order.
2904
2905 If you want functions and arrays to be coerced to pointers, and
2906 references to be dereferenced, call value_as_long() instead.
2907
2908 C++: It is assumed that the front-end has taken care of
2909 all matters concerning pointers to members. A pointer
2910 to member which reaches here is considered to be equivalent
2911 to an INT (or some size). After all, it is only an offset. */
2912
2913 LONGEST
2914 unpack_long (struct type *type, const gdb_byte *valaddr)
2915 {
2916 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2917 enum type_code code = TYPE_CODE (type);
2918 int len = TYPE_LENGTH (type);
2919 int nosign = TYPE_UNSIGNED (type);
2920
2921 switch (code)
2922 {
2923 case TYPE_CODE_TYPEDEF:
2924 return unpack_long (check_typedef (type), valaddr);
2925 case TYPE_CODE_ENUM:
2926 case TYPE_CODE_FLAGS:
2927 case TYPE_CODE_BOOL:
2928 case TYPE_CODE_INT:
2929 case TYPE_CODE_CHAR:
2930 case TYPE_CODE_RANGE:
2931 case TYPE_CODE_MEMBERPTR:
2932 if (nosign)
2933 return extract_unsigned_integer (valaddr, len, byte_order);
2934 else
2935 return extract_signed_integer (valaddr, len, byte_order);
2936
2937 case TYPE_CODE_FLT:
2938 return (LONGEST) extract_typed_floating (valaddr, type);
2939
2940 case TYPE_CODE_DECFLOAT:
2941 /* libdecnumber has a function to convert from decimal to integer, but
2942 it doesn't work when the decimal number has a fractional part. */
2943 return (LONGEST) decimal_to_doublest (valaddr, len, byte_order);
2944
2945 case TYPE_CODE_PTR:
2946 case TYPE_CODE_REF:
2947 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2948 whether we want this to be true eventually. */
2949 return extract_typed_address (valaddr, type);
2950
2951 default:
2952 error (_("Value can't be converted to integer."));
2953 }
2954 return 0; /* Placate lint. */
2955 }
2956
2957 /* Return a double value from the specified type and address.
2958 INVP points to an int which is set to 0 for valid value,
2959 1 for invalid value (bad float format). In either case,
2960 the returned double is OK to use. Argument is in target
2961 format, result is in host format. */
2962
2963 DOUBLEST
2964 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2965 {
2966 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2967 enum type_code code;
2968 int len;
2969 int nosign;
2970
2971 *invp = 0; /* Assume valid. */
2972 type = check_typedef (type);
2973 code = TYPE_CODE (type);
2974 len = TYPE_LENGTH (type);
2975 nosign = TYPE_UNSIGNED (type);
2976 if (code == TYPE_CODE_FLT)
2977 {
2978 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2979 floating-point value was valid (using the macro
2980 INVALID_FLOAT). That test/macro have been removed.
2981
2982 It turns out that only the VAX defined this macro and then
2983 only in a non-portable way. Fixing the portability problem
2984 wouldn't help since the VAX floating-point code is also badly
2985 bit-rotten. The target needs to add definitions for the
2986 methods gdbarch_float_format and gdbarch_double_format - these
2987 exactly describe the target floating-point format. The
2988 problem here is that the corresponding floatformat_vax_f and
2989 floatformat_vax_d values these methods should be set to are
2990 also not defined either. Oops!
2991
2992 Hopefully someone will add both the missing floatformat
2993 definitions and the new cases for floatformat_is_valid (). */
2994
2995 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2996 {
2997 *invp = 1;
2998 return 0.0;
2999 }
3000
3001 return extract_typed_floating (valaddr, type);
3002 }
3003 else if (code == TYPE_CODE_DECFLOAT)
3004 return decimal_to_doublest (valaddr, len, byte_order);
3005 else if (nosign)
3006 {
3007 /* Unsigned -- be sure we compensate for signed LONGEST. */
3008 return (ULONGEST) unpack_long (type, valaddr);
3009 }
3010 else
3011 {
3012 /* Signed -- we are OK with unpack_long. */
3013 return unpack_long (type, valaddr);
3014 }
3015 }
3016
3017 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3018 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3019 We don't assume any alignment for the raw data. Return value is in
3020 host byte order.
3021
3022 If you want functions and arrays to be coerced to pointers, and
3023 references to be dereferenced, call value_as_address() instead.
3024
3025 C++: It is assumed that the front-end has taken care of
3026 all matters concerning pointers to members. A pointer
3027 to member which reaches here is considered to be equivalent
3028 to an INT (or some size). After all, it is only an offset. */
3029
3030 CORE_ADDR
3031 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3032 {
3033 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3034 whether we want this to be true eventually. */
3035 return unpack_long (type, valaddr);
3036 }
3037
3038 \f
3039 /* Get the value of the FIELDNO'th field (which must be static) of
3040 TYPE. */
3041
3042 struct value *
3043 value_static_field (struct type *type, int fieldno)
3044 {
3045 struct value *retval;
3046
3047 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3048 {
3049 case FIELD_LOC_KIND_PHYSADDR:
3050 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3051 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3052 break;
3053 case FIELD_LOC_KIND_PHYSNAME:
3054 {
3055 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3056 /* TYPE_FIELD_NAME (type, fieldno); */
3057 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3058
3059 if (sym.symbol == NULL)
3060 {
3061 /* With some compilers, e.g. HP aCC, static data members are
3062 reported as non-debuggable symbols. */
3063 struct bound_minimal_symbol msym
3064 = lookup_minimal_symbol (phys_name, NULL, NULL);
3065
3066 if (!msym.minsym)
3067 return allocate_optimized_out_value (type);
3068 else
3069 {
3070 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3071 BMSYMBOL_VALUE_ADDRESS (msym));
3072 }
3073 }
3074 else
3075 retval = value_of_variable (sym.symbol, sym.block);
3076 break;
3077 }
3078 default:
3079 gdb_assert_not_reached ("unexpected field location kind");
3080 }
3081
3082 return retval;
3083 }
3084
3085 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3086 You have to be careful here, since the size of the data area for the value
3087 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3088 than the old enclosing type, you have to allocate more space for the
3089 data. */
3090
3091 void
3092 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3093 {
3094 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3095 {
3096 check_type_length_before_alloc (new_encl_type);
3097 val->contents
3098 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3099 }
3100
3101 val->enclosing_type = new_encl_type;
3102 }
3103
3104 /* Given a value ARG1 (offset by OFFSET bytes)
3105 of a struct or union type ARG_TYPE,
3106 extract and return the value of one of its (non-static) fields.
3107 FIELDNO says which field. */
3108
3109 struct value *
3110 value_primitive_field (struct value *arg1, int offset,
3111 int fieldno, struct type *arg_type)
3112 {
3113 struct value *v;
3114 struct type *type;
3115 struct gdbarch *arch = get_value_arch (arg1);
3116 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3117
3118 arg_type = check_typedef (arg_type);
3119 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3120
3121 /* Call check_typedef on our type to make sure that, if TYPE
3122 is a TYPE_CODE_TYPEDEF, its length is set to the length
3123 of the target type instead of zero. However, we do not
3124 replace the typedef type by the target type, because we want
3125 to keep the typedef in order to be able to print the type
3126 description correctly. */
3127 check_typedef (type);
3128
3129 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3130 {
3131 /* Handle packed fields.
3132
3133 Create a new value for the bitfield, with bitpos and bitsize
3134 set. If possible, arrange offset and bitpos so that we can
3135 do a single aligned read of the size of the containing type.
3136 Otherwise, adjust offset to the byte containing the first
3137 bit. Assume that the address, offset, and embedded offset
3138 are sufficiently aligned. */
3139
3140 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3141 int container_bitsize = TYPE_LENGTH (type) * 8;
3142
3143 v = allocate_value_lazy (type);
3144 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3145 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3146 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3147 v->bitpos = bitpos % container_bitsize;
3148 else
3149 v->bitpos = bitpos % 8;
3150 v->offset = (value_embedded_offset (arg1)
3151 + offset
3152 + (bitpos - v->bitpos) / 8);
3153 set_value_parent (v, arg1);
3154 if (!value_lazy (arg1))
3155 value_fetch_lazy (v);
3156 }
3157 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3158 {
3159 /* This field is actually a base subobject, so preserve the
3160 entire object's contents for later references to virtual
3161 bases, etc. */
3162 int boffset;
3163
3164 /* Lazy register values with offsets are not supported. */
3165 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3166 value_fetch_lazy (arg1);
3167
3168 /* We special case virtual inheritance here because this
3169 requires access to the contents, which we would rather avoid
3170 for references to ordinary fields of unavailable values. */
3171 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3172 boffset = baseclass_offset (arg_type, fieldno,
3173 value_contents (arg1),
3174 value_embedded_offset (arg1),
3175 value_address (arg1),
3176 arg1);
3177 else
3178 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3179
3180 if (value_lazy (arg1))
3181 v = allocate_value_lazy (value_enclosing_type (arg1));
3182 else
3183 {
3184 v = allocate_value (value_enclosing_type (arg1));
3185 value_contents_copy_raw (v, 0, arg1, 0,
3186 TYPE_LENGTH (value_enclosing_type (arg1)));
3187 }
3188 v->type = type;
3189 v->offset = value_offset (arg1);
3190 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3191 }
3192 else if (NULL != TYPE_DATA_LOCATION (type))
3193 {
3194 /* Field is a dynamic data member. */
3195
3196 gdb_assert (0 == offset);
3197 /* We expect an already resolved data location. */
3198 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3199 /* For dynamic data types defer memory allocation
3200 until we actual access the value. */
3201 v = allocate_value_lazy (type);
3202 }
3203 else
3204 {
3205 /* Plain old data member */
3206 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3207 / (HOST_CHAR_BIT * unit_size));
3208
3209 /* Lazy register values with offsets are not supported. */
3210 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3211 value_fetch_lazy (arg1);
3212
3213 if (value_lazy (arg1))
3214 v = allocate_value_lazy (type);
3215 else
3216 {
3217 v = allocate_value (type);
3218 value_contents_copy_raw (v, value_embedded_offset (v),
3219 arg1, value_embedded_offset (arg1) + offset,
3220 type_length_units (type));
3221 }
3222 v->offset = (value_offset (arg1) + offset
3223 + value_embedded_offset (arg1));
3224 }
3225 set_value_component_location (v, arg1);
3226 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3227 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3228 return v;
3229 }
3230
3231 /* Given a value ARG1 of a struct or union type,
3232 extract and return the value of one of its (non-static) fields.
3233 FIELDNO says which field. */
3234
3235 struct value *
3236 value_field (struct value *arg1, int fieldno)
3237 {
3238 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3239 }
3240
3241 /* Return a non-virtual function as a value.
3242 F is the list of member functions which contains the desired method.
3243 J is an index into F which provides the desired method.
3244
3245 We only use the symbol for its address, so be happy with either a
3246 full symbol or a minimal symbol. */
3247
3248 struct value *
3249 value_fn_field (struct value **arg1p, struct fn_field *f,
3250 int j, struct type *type,
3251 int offset)
3252 {
3253 struct value *v;
3254 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3255 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3256 struct symbol *sym;
3257 struct bound_minimal_symbol msym;
3258
3259 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3260 if (sym != NULL)
3261 {
3262 memset (&msym, 0, sizeof (msym));
3263 }
3264 else
3265 {
3266 gdb_assert (sym == NULL);
3267 msym = lookup_bound_minimal_symbol (physname);
3268 if (msym.minsym == NULL)
3269 return NULL;
3270 }
3271
3272 v = allocate_value (ftype);
3273 if (sym)
3274 {
3275 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3276 }
3277 else
3278 {
3279 /* The minimal symbol might point to a function descriptor;
3280 resolve it to the actual code address instead. */
3281 struct objfile *objfile = msym.objfile;
3282 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3283
3284 set_value_address (v,
3285 gdbarch_convert_from_func_ptr_addr
3286 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3287 }
3288
3289 if (arg1p)
3290 {
3291 if (type != value_type (*arg1p))
3292 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3293 value_addr (*arg1p)));
3294
3295 /* Move the `this' pointer according to the offset.
3296 VALUE_OFFSET (*arg1p) += offset; */
3297 }
3298
3299 return v;
3300 }
3301
3302 \f
3303
3304 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3305 VALADDR, and store the result in *RESULT.
3306 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3307
3308 Extracting bits depends on endianness of the machine. Compute the
3309 number of least significant bits to discard. For big endian machines,
3310 we compute the total number of bits in the anonymous object, subtract
3311 off the bit count from the MSB of the object to the MSB of the
3312 bitfield, then the size of the bitfield, which leaves the LSB discard
3313 count. For little endian machines, the discard count is simply the
3314 number of bits from the LSB of the anonymous object to the LSB of the
3315 bitfield.
3316
3317 If the field is signed, we also do sign extension. */
3318
3319 static LONGEST
3320 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3321 int bitpos, int bitsize)
3322 {
3323 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3324 ULONGEST val;
3325 ULONGEST valmask;
3326 int lsbcount;
3327 int bytes_read;
3328 int read_offset;
3329
3330 /* Read the minimum number of bytes required; there may not be
3331 enough bytes to read an entire ULONGEST. */
3332 field_type = check_typedef (field_type);
3333 if (bitsize)
3334 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3335 else
3336 bytes_read = TYPE_LENGTH (field_type);
3337
3338 read_offset = bitpos / 8;
3339
3340 val = extract_unsigned_integer (valaddr + read_offset,
3341 bytes_read, byte_order);
3342
3343 /* Extract bits. See comment above. */
3344
3345 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3346 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3347 else
3348 lsbcount = (bitpos % 8);
3349 val >>= lsbcount;
3350
3351 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3352 If the field is signed, and is negative, then sign extend. */
3353
3354 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3355 {
3356 valmask = (((ULONGEST) 1) << bitsize) - 1;
3357 val &= valmask;
3358 if (!TYPE_UNSIGNED (field_type))
3359 {
3360 if (val & (valmask ^ (valmask >> 1)))
3361 {
3362 val |= ~valmask;
3363 }
3364 }
3365 }
3366
3367 return val;
3368 }
3369
3370 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3371 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3372 ORIGINAL_VALUE, which must not be NULL. See
3373 unpack_value_bits_as_long for more details. */
3374
3375 int
3376 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3377 int embedded_offset, int fieldno,
3378 const struct value *val, LONGEST *result)
3379 {
3380 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3381 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3382 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3383 int bit_offset;
3384
3385 gdb_assert (val != NULL);
3386
3387 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3388 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3389 || !value_bits_available (val, bit_offset, bitsize))
3390 return 0;
3391
3392 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3393 bitpos, bitsize);
3394 return 1;
3395 }
3396
3397 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3398 object at VALADDR. See unpack_bits_as_long for more details. */
3399
3400 LONGEST
3401 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3402 {
3403 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3404 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3405 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3406
3407 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3408 }
3409
3410 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3411 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3412 the contents in DEST_VAL, zero or sign extending if the type of
3413 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3414 VAL. If the VAL's contents required to extract the bitfield from
3415 are unavailable/optimized out, DEST_VAL is correspondingly
3416 marked unavailable/optimized out. */
3417
3418 void
3419 unpack_value_bitfield (struct value *dest_val,
3420 int bitpos, int bitsize,
3421 const gdb_byte *valaddr, int embedded_offset,
3422 const struct value *val)
3423 {
3424 enum bfd_endian byte_order;
3425 int src_bit_offset;
3426 int dst_bit_offset;
3427 LONGEST num;
3428 struct type *field_type = value_type (dest_val);
3429
3430 /* First, unpack and sign extend the bitfield as if it was wholly
3431 available. Invalid/unavailable bits are read as zero, but that's
3432 OK, as they'll end up marked below. */
3433 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3434 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3435 bitpos, bitsize);
3436 store_signed_integer (value_contents_raw (dest_val),
3437 TYPE_LENGTH (field_type), byte_order, num);
3438
3439 /* Now copy the optimized out / unavailability ranges to the right
3440 bits. */
3441 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3442 if (byte_order == BFD_ENDIAN_BIG)
3443 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3444 else
3445 dst_bit_offset = 0;
3446 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3447 val, src_bit_offset, bitsize);
3448 }
3449
3450 /* Return a new value with type TYPE, which is FIELDNO field of the
3451 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3452 of VAL. If the VAL's contents required to extract the bitfield
3453 from are unavailable/optimized out, the new value is
3454 correspondingly marked unavailable/optimized out. */
3455
3456 struct value *
3457 value_field_bitfield (struct type *type, int fieldno,
3458 const gdb_byte *valaddr,
3459 int embedded_offset, const struct value *val)
3460 {
3461 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3462 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3463 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3464
3465 unpack_value_bitfield (res_val, bitpos, bitsize,
3466 valaddr, embedded_offset, val);
3467
3468 return res_val;
3469 }
3470
3471 /* Modify the value of a bitfield. ADDR points to a block of memory in
3472 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3473 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3474 indicate which bits (in target bit order) comprise the bitfield.
3475 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3476 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3477
3478 void
3479 modify_field (struct type *type, gdb_byte *addr,
3480 LONGEST fieldval, int bitpos, int bitsize)
3481 {
3482 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3483 ULONGEST oword;
3484 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3485 int bytesize;
3486
3487 /* Normalize BITPOS. */
3488 addr += bitpos / 8;
3489 bitpos %= 8;
3490
3491 /* If a negative fieldval fits in the field in question, chop
3492 off the sign extension bits. */
3493 if ((~fieldval & ~(mask >> 1)) == 0)
3494 fieldval &= mask;
3495
3496 /* Warn if value is too big to fit in the field in question. */
3497 if (0 != (fieldval & ~mask))
3498 {
3499 /* FIXME: would like to include fieldval in the message, but
3500 we don't have a sprintf_longest. */
3501 warning (_("Value does not fit in %d bits."), bitsize);
3502
3503 /* Truncate it, otherwise adjoining fields may be corrupted. */
3504 fieldval &= mask;
3505 }
3506
3507 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3508 false valgrind reports. */
3509
3510 bytesize = (bitpos + bitsize + 7) / 8;
3511 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3512
3513 /* Shifting for bit field depends on endianness of the target machine. */
3514 if (gdbarch_bits_big_endian (get_type_arch (type)))
3515 bitpos = bytesize * 8 - bitpos - bitsize;
3516
3517 oword &= ~(mask << bitpos);
3518 oword |= fieldval << bitpos;
3519
3520 store_unsigned_integer (addr, bytesize, byte_order, oword);
3521 }
3522 \f
3523 /* Pack NUM into BUF using a target format of TYPE. */
3524
3525 void
3526 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3527 {
3528 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3529 int len;
3530
3531 type = check_typedef (type);
3532 len = TYPE_LENGTH (type);
3533
3534 switch (TYPE_CODE (type))
3535 {
3536 case TYPE_CODE_INT:
3537 case TYPE_CODE_CHAR:
3538 case TYPE_CODE_ENUM:
3539 case TYPE_CODE_FLAGS:
3540 case TYPE_CODE_BOOL:
3541 case TYPE_CODE_RANGE:
3542 case TYPE_CODE_MEMBERPTR:
3543 store_signed_integer (buf, len, byte_order, num);
3544 break;
3545
3546 case TYPE_CODE_REF:
3547 case TYPE_CODE_PTR:
3548 store_typed_address (buf, type, (CORE_ADDR) num);
3549 break;
3550
3551 default:
3552 error (_("Unexpected type (%d) encountered for integer constant."),
3553 TYPE_CODE (type));
3554 }
3555 }
3556
3557
3558 /* Pack NUM into BUF using a target format of TYPE. */
3559
3560 static void
3561 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3562 {
3563 int len;
3564 enum bfd_endian byte_order;
3565
3566 type = check_typedef (type);
3567 len = TYPE_LENGTH (type);
3568 byte_order = gdbarch_byte_order (get_type_arch (type));
3569
3570 switch (TYPE_CODE (type))
3571 {
3572 case TYPE_CODE_INT:
3573 case TYPE_CODE_CHAR:
3574 case TYPE_CODE_ENUM:
3575 case TYPE_CODE_FLAGS:
3576 case TYPE_CODE_BOOL:
3577 case TYPE_CODE_RANGE:
3578 case TYPE_CODE_MEMBERPTR:
3579 store_unsigned_integer (buf, len, byte_order, num);
3580 break;
3581
3582 case TYPE_CODE_REF:
3583 case TYPE_CODE_PTR:
3584 store_typed_address (buf, type, (CORE_ADDR) num);
3585 break;
3586
3587 default:
3588 error (_("Unexpected type (%d) encountered "
3589 "for unsigned integer constant."),
3590 TYPE_CODE (type));
3591 }
3592 }
3593
3594
3595 /* Convert C numbers into newly allocated values. */
3596
3597 struct value *
3598 value_from_longest (struct type *type, LONGEST num)
3599 {
3600 struct value *val = allocate_value (type);
3601
3602 pack_long (value_contents_raw (val), type, num);
3603 return val;
3604 }
3605
3606
3607 /* Convert C unsigned numbers into newly allocated values. */
3608
3609 struct value *
3610 value_from_ulongest (struct type *type, ULONGEST num)
3611 {
3612 struct value *val = allocate_value (type);
3613
3614 pack_unsigned_long (value_contents_raw (val), type, num);
3615
3616 return val;
3617 }
3618
3619
3620 /* Create a value representing a pointer of type TYPE to the address
3621 ADDR. */
3622
3623 struct value *
3624 value_from_pointer (struct type *type, CORE_ADDR addr)
3625 {
3626 struct value *val = allocate_value (type);
3627
3628 store_typed_address (value_contents_raw (val),
3629 check_typedef (type), addr);
3630 return val;
3631 }
3632
3633
3634 /* Create a value of type TYPE whose contents come from VALADDR, if it
3635 is non-null, and whose memory address (in the inferior) is
3636 ADDRESS. The type of the created value may differ from the passed
3637 type TYPE. Make sure to retrieve values new type after this call.
3638 Note that TYPE is not passed through resolve_dynamic_type; this is
3639 a special API intended for use only by Ada. */
3640
3641 struct value *
3642 value_from_contents_and_address_unresolved (struct type *type,
3643 const gdb_byte *valaddr,
3644 CORE_ADDR address)
3645 {
3646 struct value *v;
3647
3648 if (valaddr == NULL)
3649 v = allocate_value_lazy (type);
3650 else
3651 v = value_from_contents (type, valaddr);
3652 set_value_address (v, address);
3653 VALUE_LVAL (v) = lval_memory;
3654 return v;
3655 }
3656
3657 /* Create a value of type TYPE whose contents come from VALADDR, if it
3658 is non-null, and whose memory address (in the inferior) is
3659 ADDRESS. The type of the created value may differ from the passed
3660 type TYPE. Make sure to retrieve values new type after this call. */
3661
3662 struct value *
3663 value_from_contents_and_address (struct type *type,
3664 const gdb_byte *valaddr,
3665 CORE_ADDR address)
3666 {
3667 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3668 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3669 struct value *v;
3670
3671 if (valaddr == NULL)
3672 v = allocate_value_lazy (resolved_type);
3673 else
3674 v = value_from_contents (resolved_type, valaddr);
3675 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3676 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3677 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3678 set_value_address (v, address);
3679 VALUE_LVAL (v) = lval_memory;
3680 return v;
3681 }
3682
3683 /* Create a value of type TYPE holding the contents CONTENTS.
3684 The new value is `not_lval'. */
3685
3686 struct value *
3687 value_from_contents (struct type *type, const gdb_byte *contents)
3688 {
3689 struct value *result;
3690
3691 result = allocate_value (type);
3692 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3693 return result;
3694 }
3695
3696 struct value *
3697 value_from_double (struct type *type, DOUBLEST num)
3698 {
3699 struct value *val = allocate_value (type);
3700 struct type *base_type = check_typedef (type);
3701 enum type_code code = TYPE_CODE (base_type);
3702
3703 if (code == TYPE_CODE_FLT)
3704 {
3705 store_typed_floating (value_contents_raw (val), base_type, num);
3706 }
3707 else
3708 error (_("Unexpected type encountered for floating constant."));
3709
3710 return val;
3711 }
3712
3713 struct value *
3714 value_from_decfloat (struct type *type, const gdb_byte *dec)
3715 {
3716 struct value *val = allocate_value (type);
3717
3718 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3719 return val;
3720 }
3721
3722 /* Extract a value from the history file. Input will be of the form
3723 $digits or $$digits. See block comment above 'write_dollar_variable'
3724 for details. */
3725
3726 struct value *
3727 value_from_history_ref (const char *h, const char **endp)
3728 {
3729 int index, len;
3730
3731 if (h[0] == '$')
3732 len = 1;
3733 else
3734 return NULL;
3735
3736 if (h[1] == '$')
3737 len = 2;
3738
3739 /* Find length of numeral string. */
3740 for (; isdigit (h[len]); len++)
3741 ;
3742
3743 /* Make sure numeral string is not part of an identifier. */
3744 if (h[len] == '_' || isalpha (h[len]))
3745 return NULL;
3746
3747 /* Now collect the index value. */
3748 if (h[1] == '$')
3749 {
3750 if (len == 2)
3751 {
3752 /* For some bizarre reason, "$$" is equivalent to "$$1",
3753 rather than to "$$0" as it ought to be! */
3754 index = -1;
3755 *endp += len;
3756 }
3757 else
3758 {
3759 char *local_end;
3760
3761 index = -strtol (&h[2], &local_end, 10);
3762 *endp = local_end;
3763 }
3764 }
3765 else
3766 {
3767 if (len == 1)
3768 {
3769 /* "$" is equivalent to "$0". */
3770 index = 0;
3771 *endp += len;
3772 }
3773 else
3774 {
3775 char *local_end;
3776
3777 index = strtol (&h[1], &local_end, 10);
3778 *endp = local_end;
3779 }
3780 }
3781
3782 return access_value_history (index);
3783 }
3784
3785 struct value *
3786 coerce_ref_if_computed (const struct value *arg)
3787 {
3788 const struct lval_funcs *funcs;
3789
3790 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3791 return NULL;
3792
3793 if (value_lval_const (arg) != lval_computed)
3794 return NULL;
3795
3796 funcs = value_computed_funcs (arg);
3797 if (funcs->coerce_ref == NULL)
3798 return NULL;
3799
3800 return funcs->coerce_ref (arg);
3801 }
3802
3803 /* Look at value.h for description. */
3804
3805 struct value *
3806 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3807 const struct type *original_type,
3808 const struct value *original_value)
3809 {
3810 /* Re-adjust type. */
3811 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3812
3813 /* Add embedding info. */
3814 set_value_enclosing_type (value, enc_type);
3815 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3816
3817 /* We may be pointing to an object of some derived type. */
3818 return value_full_object (value, NULL, 0, 0, 0);
3819 }
3820
3821 struct value *
3822 coerce_ref (struct value *arg)
3823 {
3824 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3825 struct value *retval;
3826 struct type *enc_type;
3827
3828 retval = coerce_ref_if_computed (arg);
3829 if (retval)
3830 return retval;
3831
3832 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3833 return arg;
3834
3835 enc_type = check_typedef (value_enclosing_type (arg));
3836 enc_type = TYPE_TARGET_TYPE (enc_type);
3837
3838 retval = value_at_lazy (enc_type,
3839 unpack_pointer (value_type (arg),
3840 value_contents (arg)));
3841 enc_type = value_type (retval);
3842 return readjust_indirect_value_type (retval, enc_type,
3843 value_type_arg_tmp, arg);
3844 }
3845
3846 struct value *
3847 coerce_array (struct value *arg)
3848 {
3849 struct type *type;
3850
3851 arg = coerce_ref (arg);
3852 type = check_typedef (value_type (arg));
3853
3854 switch (TYPE_CODE (type))
3855 {
3856 case TYPE_CODE_ARRAY:
3857 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3858 arg = value_coerce_array (arg);
3859 break;
3860 case TYPE_CODE_FUNC:
3861 arg = value_coerce_function (arg);
3862 break;
3863 }
3864 return arg;
3865 }
3866 \f
3867
3868 /* Return the return value convention that will be used for the
3869 specified type. */
3870
3871 enum return_value_convention
3872 struct_return_convention (struct gdbarch *gdbarch,
3873 struct value *function, struct type *value_type)
3874 {
3875 enum type_code code = TYPE_CODE (value_type);
3876
3877 if (code == TYPE_CODE_ERROR)
3878 error (_("Function return type unknown."));
3879
3880 /* Probe the architecture for the return-value convention. */
3881 return gdbarch_return_value (gdbarch, function, value_type,
3882 NULL, NULL, NULL);
3883 }
3884
3885 /* Return true if the function returning the specified type is using
3886 the convention of returning structures in memory (passing in the
3887 address as a hidden first parameter). */
3888
3889 int
3890 using_struct_return (struct gdbarch *gdbarch,
3891 struct value *function, struct type *value_type)
3892 {
3893 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3894 /* A void return value is never in memory. See also corresponding
3895 code in "print_return_value". */
3896 return 0;
3897
3898 return (struct_return_convention (gdbarch, function, value_type)
3899 != RETURN_VALUE_REGISTER_CONVENTION);
3900 }
3901
3902 /* Set the initialized field in a value struct. */
3903
3904 void
3905 set_value_initialized (struct value *val, int status)
3906 {
3907 val->initialized = status;
3908 }
3909
3910 /* Return the initialized field in a value struct. */
3911
3912 int
3913 value_initialized (const struct value *val)
3914 {
3915 return val->initialized;
3916 }
3917
3918 /* Load the actual content of a lazy value. Fetch the data from the
3919 user's process and clear the lazy flag to indicate that the data in
3920 the buffer is valid.
3921
3922 If the value is zero-length, we avoid calling read_memory, which
3923 would abort. We mark the value as fetched anyway -- all 0 bytes of
3924 it. */
3925
3926 void
3927 value_fetch_lazy (struct value *val)
3928 {
3929 gdb_assert (value_lazy (val));
3930 allocate_value_contents (val);
3931 /* A value is either lazy, or fully fetched. The
3932 availability/validity is only established as we try to fetch a
3933 value. */
3934 gdb_assert (VEC_empty (range_s, val->optimized_out));
3935 gdb_assert (VEC_empty (range_s, val->unavailable));
3936 if (value_bitsize (val))
3937 {
3938 /* To read a lazy bitfield, read the entire enclosing value. This
3939 prevents reading the same block of (possibly volatile) memory once
3940 per bitfield. It would be even better to read only the containing
3941 word, but we have no way to record that just specific bits of a
3942 value have been fetched. */
3943 struct type *type = check_typedef (value_type (val));
3944 struct value *parent = value_parent (val);
3945
3946 if (value_lazy (parent))
3947 value_fetch_lazy (parent);
3948
3949 unpack_value_bitfield (val,
3950 value_bitpos (val), value_bitsize (val),
3951 value_contents_for_printing (parent),
3952 value_offset (val), parent);
3953 }
3954 else if (VALUE_LVAL (val) == lval_memory)
3955 {
3956 CORE_ADDR addr = value_address (val);
3957 struct type *type = check_typedef (value_enclosing_type (val));
3958
3959 if (TYPE_LENGTH (type))
3960 read_value_memory (val, 0, value_stack (val),
3961 addr, value_contents_all_raw (val),
3962 type_length_units (type));
3963 }
3964 else if (VALUE_LVAL (val) == lval_register)
3965 {
3966 struct frame_info *frame;
3967 int regnum;
3968 struct type *type = check_typedef (value_type (val));
3969 struct value *new_val = val, *mark = value_mark ();
3970
3971 /* Offsets are not supported here; lazy register values must
3972 refer to the entire register. */
3973 gdb_assert (value_offset (val) == 0);
3974
3975 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3976 {
3977 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3978
3979 frame = frame_find_by_id (frame_id);
3980 regnum = VALUE_REGNUM (new_val);
3981
3982 gdb_assert (frame != NULL);
3983
3984 /* Convertible register routines are used for multi-register
3985 values and for interpretation in different types
3986 (e.g. float or int from a double register). Lazy
3987 register values should have the register's natural type,
3988 so they do not apply. */
3989 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3990 regnum, type));
3991
3992 new_val = get_frame_register_value (frame, regnum);
3993
3994 /* If we get another lazy lval_register value, it means the
3995 register is found by reading it from the next frame.
3996 get_frame_register_value should never return a value with
3997 the frame id pointing to FRAME. If it does, it means we
3998 either have two consecutive frames with the same frame id
3999 in the frame chain, or some code is trying to unwind
4000 behind get_prev_frame's back (e.g., a frame unwind
4001 sniffer trying to unwind), bypassing its validations. In
4002 any case, it should always be an internal error to end up
4003 in this situation. */
4004 if (VALUE_LVAL (new_val) == lval_register
4005 && value_lazy (new_val)
4006 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
4007 internal_error (__FILE__, __LINE__,
4008 _("infinite loop while fetching a register"));
4009 }
4010
4011 /* If it's still lazy (for instance, a saved register on the
4012 stack), fetch it. */
4013 if (value_lazy (new_val))
4014 value_fetch_lazy (new_val);
4015
4016 /* Copy the contents and the unavailability/optimized-out
4017 meta-data from NEW_VAL to VAL. */
4018 set_value_lazy (val, 0);
4019 value_contents_copy (val, value_embedded_offset (val),
4020 new_val, value_embedded_offset (new_val),
4021 type_length_units (type));
4022
4023 if (frame_debug)
4024 {
4025 struct gdbarch *gdbarch;
4026 frame = frame_find_by_id (VALUE_FRAME_ID (val));
4027 regnum = VALUE_REGNUM (val);
4028 gdbarch = get_frame_arch (frame);
4029
4030 fprintf_unfiltered (gdb_stdlog,
4031 "{ value_fetch_lazy "
4032 "(frame=%d,regnum=%d(%s),...) ",
4033 frame_relative_level (frame), regnum,
4034 user_reg_map_regnum_to_name (gdbarch, regnum));
4035
4036 fprintf_unfiltered (gdb_stdlog, "->");
4037 if (value_optimized_out (new_val))
4038 {
4039 fprintf_unfiltered (gdb_stdlog, " ");
4040 val_print_optimized_out (new_val, gdb_stdlog);
4041 }
4042 else
4043 {
4044 int i;
4045 const gdb_byte *buf = value_contents (new_val);
4046
4047 if (VALUE_LVAL (new_val) == lval_register)
4048 fprintf_unfiltered (gdb_stdlog, " register=%d",
4049 VALUE_REGNUM (new_val));
4050 else if (VALUE_LVAL (new_val) == lval_memory)
4051 fprintf_unfiltered (gdb_stdlog, " address=%s",
4052 paddress (gdbarch,
4053 value_address (new_val)));
4054 else
4055 fprintf_unfiltered (gdb_stdlog, " computed");
4056
4057 fprintf_unfiltered (gdb_stdlog, " bytes=");
4058 fprintf_unfiltered (gdb_stdlog, "[");
4059 for (i = 0; i < register_size (gdbarch, regnum); i++)
4060 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4061 fprintf_unfiltered (gdb_stdlog, "]");
4062 }
4063
4064 fprintf_unfiltered (gdb_stdlog, " }\n");
4065 }
4066
4067 /* Dispose of the intermediate values. This prevents
4068 watchpoints from trying to watch the saved frame pointer. */
4069 value_free_to_mark (mark);
4070 }
4071 else if (VALUE_LVAL (val) == lval_computed
4072 && value_computed_funcs (val)->read != NULL)
4073 value_computed_funcs (val)->read (val);
4074 else
4075 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4076
4077 set_value_lazy (val, 0);
4078 }
4079
4080 /* Implementation of the convenience function $_isvoid. */
4081
4082 static struct value *
4083 isvoid_internal_fn (struct gdbarch *gdbarch,
4084 const struct language_defn *language,
4085 void *cookie, int argc, struct value **argv)
4086 {
4087 int ret;
4088
4089 if (argc != 1)
4090 error (_("You must provide one argument for $_isvoid."));
4091
4092 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4093
4094 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4095 }
4096
4097 void
4098 _initialize_values (void)
4099 {
4100 add_cmd ("convenience", no_class, show_convenience, _("\
4101 Debugger convenience (\"$foo\") variables and functions.\n\
4102 Convenience variables are created when you assign them values;\n\
4103 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4104 \n\
4105 A few convenience variables are given values automatically:\n\
4106 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4107 \"$__\" holds the contents of the last address examined with \"x\"."
4108 #ifdef HAVE_PYTHON
4109 "\n\n\
4110 Convenience functions are defined via the Python API."
4111 #endif
4112 ), &showlist);
4113 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4114
4115 add_cmd ("values", no_set_class, show_values, _("\
4116 Elements of value history around item number IDX (or last ten)."),
4117 &showlist);
4118
4119 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4120 Initialize a convenience variable if necessary.\n\
4121 init-if-undefined VARIABLE = EXPRESSION\n\
4122 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4123 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4124 VARIABLE is already initialized."));
4125
4126 add_prefix_cmd ("function", no_class, function_command, _("\
4127 Placeholder command for showing help on convenience functions."),
4128 &functionlist, "function ", 0, &cmdlist);
4129
4130 add_internal_function ("_isvoid", _("\
4131 Check whether an expression is void.\n\
4132 Usage: $_isvoid (expression)\n\
4133 Return 1 if the expression is void, zero otherwise."),
4134 isvoid_internal_fn, NULL);
4135
4136 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4137 class_support, &max_value_size, _("\
4138 Set maximum sized value gdb will load from the inferior."), _("\
4139 Show maximum sized value gdb will load from the inferior."), _("\
4140 Use this to control the maximum size, in bytes, of a value that gdb\n\
4141 will load from the inferior. Setting this value to 'unlimited'\n\
4142 disables checking.\n\
4143 Setting this does not invalidate already allocated values, it only\n\
4144 prevents future values, larger than this size, from being allocated."),
4145 set_max_value_size,
4146 show_max_value_size,
4147 &setlist, &showlist);
4148 }
This page took 0.11565 seconds and 4 git commands to generate.