gdb/
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
4 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
5 2009, 2010, 2011 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdb_string.h"
25 #include "symtab.h"
26 #include "gdbtypes.h"
27 #include "value.h"
28 #include "gdbcore.h"
29 #include "command.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "language.h"
33 #include "demangle.h"
34 #include "doublest.h"
35 #include "gdb_assert.h"
36 #include "regcache.h"
37 #include "block.h"
38 #include "dfp.h"
39 #include "objfiles.h"
40 #include "valprint.h"
41 #include "cli/cli-decode.h"
42 #include "exceptions.h"
43 #include "python/python.h"
44
45 #include "tracepoint.h"
46
47 /* Prototypes for exported functions. */
48
49 void _initialize_values (void);
50
51 /* Definition of a user function. */
52 struct internal_function
53 {
54 /* The name of the function. It is a bit odd to have this in the
55 function itself -- the user might use a differently-named
56 convenience variable to hold the function. */
57 char *name;
58
59 /* The handler. */
60 internal_function_fn handler;
61
62 /* User data for the handler. */
63 void *cookie;
64 };
65
66 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
67
68 struct range
69 {
70 /* Lowest offset in the range. */
71 int offset;
72
73 /* Length of the range. */
74 int length;
75 };
76
77 typedef struct range range_s;
78
79 DEF_VEC_O(range_s);
80
81 /* Returns true if the ranges defined by [offset1, offset1+len1) and
82 [offset2, offset2+len2) overlap. */
83
84 static int
85 ranges_overlap (int offset1, int len1,
86 int offset2, int len2)
87 {
88 ULONGEST h, l;
89
90 l = max (offset1, offset2);
91 h = min (offset1 + len1, offset2 + len2);
92 return (l < h);
93 }
94
95 /* Returns true if the first argument is strictly less than the
96 second, useful for VEC_lower_bound. We keep ranges sorted by
97 offset and coalesce overlapping and contiguous ranges, so this just
98 compares the starting offset. */
99
100 static int
101 range_lessthan (const range_s *r1, const range_s *r2)
102 {
103 return r1->offset < r2->offset;
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (VEC(range_s) *ranges, int offset, int length)
111 {
112 range_s what;
113 int i;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
150
151 if (i > 0)
152 {
153 struct range *bef = VEC_index (range_s, ranges, i - 1);
154
155 if (ranges_overlap (bef->offset, bef->length, offset, length))
156 return 1;
157 }
158
159 if (i < VEC_length (range_s, ranges))
160 {
161 struct range *r = VEC_index (range_s, ranges, i);
162
163 if (ranges_overlap (r->offset, r->length, offset, length))
164 return 1;
165 }
166
167 return 0;
168 }
169
170 static struct cmd_list_element *functionlist;
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 int modifiable;
180
181 /* Location of value (if lval). */
182 union
183 {
184 /* If lval == lval_memory, this is the address in the inferior.
185 If lval == lval_register, this is the byte offset into the
186 registers structure. */
187 CORE_ADDR address;
188
189 /* Pointer to internal variable. */
190 struct internalvar *internalvar;
191
192 /* If lval == lval_computed, this is a set of function pointers
193 to use to access and describe the value, and a closure pointer
194 for them to use. */
195 struct
196 {
197 struct lval_funcs *funcs; /* Functions to call. */
198 void *closure; /* Closure for those functions to use. */
199 } computed;
200 } location;
201
202 /* Describes offset of a value within lval of a structure in bytes.
203 If lval == lval_memory, this is an offset to the address. If
204 lval == lval_register, this is a further offset from
205 location.address within the registers structure. Note also the
206 member embedded_offset below. */
207 int offset;
208
209 /* Only used for bitfields; number of bits contained in them. */
210 int bitsize;
211
212 /* Only used for bitfields; position of start of field. For
213 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
214 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
215 int bitpos;
216
217 /* Only used for bitfields; the containing value. This allows a
218 single read from the target when displaying multiple
219 bitfields. */
220 struct value *parent;
221
222 /* Frame register value is relative to. This will be described in
223 the lval enum above as "lval_register". */
224 struct frame_id frame_id;
225
226 /* Type of the value. */
227 struct type *type;
228
229 /* If a value represents a C++ object, then the `type' field gives
230 the object's compile-time type. If the object actually belongs
231 to some class derived from `type', perhaps with other base
232 classes and additional members, then `type' is just a subobject
233 of the real thing, and the full object is probably larger than
234 `type' would suggest.
235
236 If `type' is a dynamic class (i.e. one with a vtable), then GDB
237 can actually determine the object's run-time type by looking at
238 the run-time type information in the vtable. When this
239 information is available, we may elect to read in the entire
240 object, for several reasons:
241
242 - When printing the value, the user would probably rather see the
243 full object, not just the limited portion apparent from the
244 compile-time type.
245
246 - If `type' has virtual base classes, then even printing `type'
247 alone may require reaching outside the `type' portion of the
248 object to wherever the virtual base class has been stored.
249
250 When we store the entire object, `enclosing_type' is the run-time
251 type -- the complete object -- and `embedded_offset' is the
252 offset of `type' within that larger type, in bytes. The
253 value_contents() macro takes `embedded_offset' into account, so
254 most GDB code continues to see the `type' portion of the value,
255 just as the inferior would.
256
257 If `type' is a pointer to an object, then `enclosing_type' is a
258 pointer to the object's run-time type, and `pointed_to_offset' is
259 the offset in bytes from the full object to the pointed-to object
260 -- that is, the value `embedded_offset' would have if we followed
261 the pointer and fetched the complete object. (I don't really see
262 the point. Why not just determine the run-time type when you
263 indirect, and avoid the special case? The contents don't matter
264 until you indirect anyway.)
265
266 If we're not doing anything fancy, `enclosing_type' is equal to
267 `type', and `embedded_offset' is zero, so everything works
268 normally. */
269 struct type *enclosing_type;
270 int embedded_offset;
271 int pointed_to_offset;
272
273 /* Values are stored in a chain, so that they can be deleted easily
274 over calls to the inferior. Values assigned to internal
275 variables, put into the value history or exposed to Python are
276 taken off this list. */
277 struct value *next;
278
279 /* Register number if the value is from a register. */
280 short regnum;
281
282 /* If zero, contents of this value are in the contents field. If
283 nonzero, contents are in inferior. If the lval field is lval_memory,
284 the contents are in inferior memory at location.address plus offset.
285 The lval field may also be lval_register.
286
287 WARNING: This field is used by the code which handles watchpoints
288 (see breakpoint.c) to decide whether a particular value can be
289 watched by hardware watchpoints. If the lazy flag is set for
290 some member of a value chain, it is assumed that this member of
291 the chain doesn't need to be watched as part of watching the
292 value itself. This is how GDB avoids watching the entire struct
293 or array when the user wants to watch a single struct member or
294 array element. If you ever change the way lazy flag is set and
295 reset, be sure to consider this use as well! */
296 char lazy;
297
298 /* If nonzero, this is the value of a variable which does not
299 actually exist in the program. */
300 char optimized_out;
301
302 /* If value is a variable, is it initialized or not. */
303 int initialized;
304
305 /* If value is from the stack. If this is set, read_stack will be
306 used instead of read_memory to enable extra caching. */
307 int stack;
308
309 /* Actual contents of the value. Target byte-order. NULL or not
310 valid if lazy is nonzero. */
311 gdb_byte *contents;
312
313 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
314 rather than available, since the common and default case is for a
315 value to be available. This is filled in at value read time. */
316 VEC(range_s) *unavailable;
317
318 /* The number of references to this value. When a value is created,
319 the value chain holds a reference, so REFERENCE_COUNT is 1. If
320 release_value is called, this value is removed from the chain but
321 the caller of release_value now has a reference to this value.
322 The caller must arrange for a call to value_free later. */
323 int reference_count;
324 };
325
326 int
327 value_bytes_available (const struct value *value, int offset, int length)
328 {
329 gdb_assert (!value->lazy);
330
331 return !ranges_contain (value->unavailable, offset, length);
332 }
333
334 int
335 value_entirely_available (struct value *value)
336 {
337 /* We can only tell whether the whole value is available when we try
338 to read it. */
339 if (value->lazy)
340 value_fetch_lazy (value);
341
342 if (VEC_empty (range_s, value->unavailable))
343 return 1;
344 return 0;
345 }
346
347 void
348 mark_value_bytes_unavailable (struct value *value, int offset, int length)
349 {
350 range_s newr;
351 int i;
352
353 /* Insert the range sorted. If there's overlap or the new range
354 would be contiguous with an existing range, merge. */
355
356 newr.offset = offset;
357 newr.length = length;
358
359 /* Do a binary search for the position the given range would be
360 inserted if we only considered the starting OFFSET of ranges.
361 Call that position I. Since we also have LENGTH to care for
362 (this is a range afterall), we need to check if the _previous_
363 range overlaps the I range. E.g., calling R the new range:
364
365 #1 - overlaps with previous
366
367 R
368 |-...-|
369 |---| |---| |------| ... |--|
370 0 1 2 N
371
372 I=1
373
374 In the case #1 above, the binary search would return `I=1',
375 meaning, this OFFSET should be inserted at position 1, and the
376 current position 1 should be pushed further (and become 2). But,
377 note that `0' overlaps with R, so we want to merge them.
378
379 A similar consideration needs to be taken if the new range would
380 be contiguous with the previous range:
381
382 #2 - contiguous with previous
383
384 R
385 |-...-|
386 |--| |---| |------| ... |--|
387 0 1 2 N
388
389 I=1
390
391 If there's no overlap with the previous range, as in:
392
393 #3 - not overlapping and not contiguous
394
395 R
396 |-...-|
397 |--| |---| |------| ... |--|
398 0 1 2 N
399
400 I=1
401
402 or if I is 0:
403
404 #4 - R is the range with lowest offset
405
406 R
407 |-...-|
408 |--| |---| |------| ... |--|
409 0 1 2 N
410
411 I=0
412
413 ... we just push the new range to I.
414
415 All the 4 cases above need to consider that the new range may
416 also overlap several of the ranges that follow, or that R may be
417 contiguous with the following range, and merge. E.g.,
418
419 #5 - overlapping following ranges
420
421 R
422 |------------------------|
423 |--| |---| |------| ... |--|
424 0 1 2 N
425
426 I=0
427
428 or:
429
430 R
431 |-------|
432 |--| |---| |------| ... |--|
433 0 1 2 N
434
435 I=1
436
437 */
438
439 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
440 if (i > 0)
441 {
442 struct range *bef = VEC_index (range_s, value->unavailable, i - 1);
443
444 if (ranges_overlap (bef->offset, bef->length, offset, length))
445 {
446 /* #1 */
447 ULONGEST l = min (bef->offset, offset);
448 ULONGEST h = max (bef->offset + bef->length, offset + length);
449
450 bef->offset = l;
451 bef->length = h - l;
452 i--;
453 }
454 else if (offset == bef->offset + bef->length)
455 {
456 /* #2 */
457 bef->length += length;
458 i--;
459 }
460 else
461 {
462 /* #3 */
463 VEC_safe_insert (range_s, value->unavailable, i, &newr);
464 }
465 }
466 else
467 {
468 /* #4 */
469 VEC_safe_insert (range_s, value->unavailable, i, &newr);
470 }
471
472 /* Check whether the ranges following the one we've just added or
473 touched can be folded in (#5 above). */
474 if (i + 1 < VEC_length (range_s, value->unavailable))
475 {
476 struct range *t;
477 struct range *r;
478 int removed = 0;
479 int next = i + 1;
480
481 /* Get the range we just touched. */
482 t = VEC_index (range_s, value->unavailable, i);
483 removed = 0;
484
485 i = next;
486 for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
487 if (r->offset <= t->offset + t->length)
488 {
489 ULONGEST l, h;
490
491 l = min (t->offset, r->offset);
492 h = max (t->offset + t->length, r->offset + r->length);
493
494 t->offset = l;
495 t->length = h - l;
496
497 removed++;
498 }
499 else
500 {
501 /* If we couldn't merge this one, we won't be able to
502 merge following ones either, since the ranges are
503 always sorted by OFFSET. */
504 break;
505 }
506
507 if (removed != 0)
508 VEC_block_remove (range_s, value->unavailable, next, removed);
509 }
510 }
511
512 /* Find the first range in RANGES that overlaps the range defined by
513 OFFSET and LENGTH, starting at element POS in the RANGES vector,
514 Returns the index into RANGES where such overlapping range was
515 found, or -1 if none was found. */
516
517 static int
518 find_first_range_overlap (VEC(range_s) *ranges, int pos,
519 int offset, int length)
520 {
521 range_s *r;
522 int i;
523
524 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
525 if (ranges_overlap (r->offset, r->length, offset, length))
526 return i;
527
528 return -1;
529 }
530
531 int
532 value_available_contents_eq (const struct value *val1, int offset1,
533 const struct value *val2, int offset2,
534 int length)
535 {
536 int idx1 = 0, idx2 = 0;
537
538 /* This routine is used by printing routines, where we should
539 already have read the value. Note that we only know whether a
540 value chunk is available if we've tried to read it. */
541 gdb_assert (!val1->lazy && !val2->lazy);
542
543 while (length > 0)
544 {
545 range_s *r1, *r2;
546 ULONGEST l1, h1;
547 ULONGEST l2, h2;
548
549 idx1 = find_first_range_overlap (val1->unavailable, idx1,
550 offset1, length);
551 idx2 = find_first_range_overlap (val2->unavailable, idx2,
552 offset2, length);
553
554 /* The usual case is for both values to be completely available. */
555 if (idx1 == -1 && idx2 == -1)
556 return (memcmp (val1->contents + offset1,
557 val2->contents + offset2,
558 length) == 0);
559 /* The contents only match equal if the available set matches as
560 well. */
561 else if (idx1 == -1 || idx2 == -1)
562 return 0;
563
564 gdb_assert (idx1 != -1 && idx2 != -1);
565
566 r1 = VEC_index (range_s, val1->unavailable, idx1);
567 r2 = VEC_index (range_s, val2->unavailable, idx2);
568
569 /* Get the unavailable windows intersected by the incoming
570 ranges. The first and last ranges that overlap the argument
571 range may be wider than said incoming arguments ranges. */
572 l1 = max (offset1, r1->offset);
573 h1 = min (offset1 + length, r1->offset + r1->length);
574
575 l2 = max (offset2, r2->offset);
576 h2 = min (offset2 + length, r2->offset + r2->length);
577
578 /* Make them relative to the respective start offsets, so we can
579 compare them for equality. */
580 l1 -= offset1;
581 h1 -= offset1;
582
583 l2 -= offset2;
584 h2 -= offset2;
585
586 /* Different availability, no match. */
587 if (l1 != l2 || h1 != h2)
588 return 0;
589
590 /* Compare the _available_ contents. */
591 if (memcmp (val1->contents + offset1,
592 val2->contents + offset2,
593 l1) != 0)
594 return 0;
595
596 length -= h1;
597 offset1 += h1;
598 offset2 += h1;
599 }
600
601 return 1;
602 }
603
604 /* Prototypes for local functions. */
605
606 static void show_values (char *, int);
607
608 static void show_convenience (char *, int);
609
610
611 /* The value-history records all the values printed
612 by print commands during this session. Each chunk
613 records 60 consecutive values. The first chunk on
614 the chain records the most recent values.
615 The total number of values is in value_history_count. */
616
617 #define VALUE_HISTORY_CHUNK 60
618
619 struct value_history_chunk
620 {
621 struct value_history_chunk *next;
622 struct value *values[VALUE_HISTORY_CHUNK];
623 };
624
625 /* Chain of chunks now in use. */
626
627 static struct value_history_chunk *value_history_chain;
628
629 static int value_history_count; /* Abs number of last entry stored. */
630
631 \f
632 /* List of all value objects currently allocated
633 (except for those released by calls to release_value)
634 This is so they can be freed after each command. */
635
636 static struct value *all_values;
637
638 /* Allocate a lazy value for type TYPE. Its actual content is
639 "lazily" allocated too: the content field of the return value is
640 NULL; it will be allocated when it is fetched from the target. */
641
642 struct value *
643 allocate_value_lazy (struct type *type)
644 {
645 struct value *val;
646
647 /* Call check_typedef on our type to make sure that, if TYPE
648 is a TYPE_CODE_TYPEDEF, its length is set to the length
649 of the target type instead of zero. However, we do not
650 replace the typedef type by the target type, because we want
651 to keep the typedef in order to be able to set the VAL's type
652 description correctly. */
653 check_typedef (type);
654
655 val = (struct value *) xzalloc (sizeof (struct value));
656 val->contents = NULL;
657 val->next = all_values;
658 all_values = val;
659 val->type = type;
660 val->enclosing_type = type;
661 VALUE_LVAL (val) = not_lval;
662 val->location.address = 0;
663 VALUE_FRAME_ID (val) = null_frame_id;
664 val->offset = 0;
665 val->bitpos = 0;
666 val->bitsize = 0;
667 VALUE_REGNUM (val) = -1;
668 val->lazy = 1;
669 val->optimized_out = 0;
670 val->embedded_offset = 0;
671 val->pointed_to_offset = 0;
672 val->modifiable = 1;
673 val->initialized = 1; /* Default to initialized. */
674
675 /* Values start out on the all_values chain. */
676 val->reference_count = 1;
677
678 return val;
679 }
680
681 /* Allocate the contents of VAL if it has not been allocated yet. */
682
683 void
684 allocate_value_contents (struct value *val)
685 {
686 if (!val->contents)
687 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
688 }
689
690 /* Allocate a value and its contents for type TYPE. */
691
692 struct value *
693 allocate_value (struct type *type)
694 {
695 struct value *val = allocate_value_lazy (type);
696
697 allocate_value_contents (val);
698 val->lazy = 0;
699 return val;
700 }
701
702 /* Allocate a value that has the correct length
703 for COUNT repetitions of type TYPE. */
704
705 struct value *
706 allocate_repeat_value (struct type *type, int count)
707 {
708 int low_bound = current_language->string_lower_bound; /* ??? */
709 /* FIXME-type-allocation: need a way to free this type when we are
710 done with it. */
711 struct type *array_type
712 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
713
714 return allocate_value (array_type);
715 }
716
717 struct value *
718 allocate_computed_value (struct type *type,
719 struct lval_funcs *funcs,
720 void *closure)
721 {
722 struct value *v = allocate_value_lazy (type);
723
724 VALUE_LVAL (v) = lval_computed;
725 v->location.computed.funcs = funcs;
726 v->location.computed.closure = closure;
727
728 return v;
729 }
730
731 /* Accessor methods. */
732
733 struct value *
734 value_next (struct value *value)
735 {
736 return value->next;
737 }
738
739 struct type *
740 value_type (const struct value *value)
741 {
742 return value->type;
743 }
744 void
745 deprecated_set_value_type (struct value *value, struct type *type)
746 {
747 value->type = type;
748 }
749
750 int
751 value_offset (const struct value *value)
752 {
753 return value->offset;
754 }
755 void
756 set_value_offset (struct value *value, int offset)
757 {
758 value->offset = offset;
759 }
760
761 int
762 value_bitpos (const struct value *value)
763 {
764 return value->bitpos;
765 }
766 void
767 set_value_bitpos (struct value *value, int bit)
768 {
769 value->bitpos = bit;
770 }
771
772 int
773 value_bitsize (const struct value *value)
774 {
775 return value->bitsize;
776 }
777 void
778 set_value_bitsize (struct value *value, int bit)
779 {
780 value->bitsize = bit;
781 }
782
783 struct value *
784 value_parent (struct value *value)
785 {
786 return value->parent;
787 }
788
789 gdb_byte *
790 value_contents_raw (struct value *value)
791 {
792 allocate_value_contents (value);
793 return value->contents + value->embedded_offset;
794 }
795
796 gdb_byte *
797 value_contents_all_raw (struct value *value)
798 {
799 allocate_value_contents (value);
800 return value->contents;
801 }
802
803 struct type *
804 value_enclosing_type (struct value *value)
805 {
806 return value->enclosing_type;
807 }
808
809 static void
810 require_not_optimized_out (const struct value *value)
811 {
812 if (value->optimized_out)
813 error (_("value has been optimized out"));
814 }
815
816 static void
817 require_available (const struct value *value)
818 {
819 if (!VEC_empty (range_s, value->unavailable))
820 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
821 }
822
823 const gdb_byte *
824 value_contents_for_printing (struct value *value)
825 {
826 if (value->lazy)
827 value_fetch_lazy (value);
828 return value->contents;
829 }
830
831 const gdb_byte *
832 value_contents_for_printing_const (const struct value *value)
833 {
834 gdb_assert (!value->lazy);
835 return value->contents;
836 }
837
838 const gdb_byte *
839 value_contents_all (struct value *value)
840 {
841 const gdb_byte *result = value_contents_for_printing (value);
842 require_not_optimized_out (value);
843 require_available (value);
844 return result;
845 }
846
847 /* Copy LENGTH bytes of SRC value's (all) contents
848 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
849 contents, starting at DST_OFFSET. If unavailable contents are
850 being copied from SRC, the corresponding DST contents are marked
851 unavailable accordingly. Neither DST nor SRC may be lazy
852 values.
853
854 It is assumed the contents of DST in the [DST_OFFSET,
855 DST_OFFSET+LENGTH) range are wholly available. */
856
857 void
858 value_contents_copy_raw (struct value *dst, int dst_offset,
859 struct value *src, int src_offset, int length)
860 {
861 range_s *r;
862 int i;
863
864 /* A lazy DST would make that this copy operation useless, since as
865 soon as DST's contents were un-lazied (by a later value_contents
866 call, say), the contents would be overwritten. A lazy SRC would
867 mean we'd be copying garbage. */
868 gdb_assert (!dst->lazy && !src->lazy);
869
870 /* The overwritten DST range gets unavailability ORed in, not
871 replaced. Make sure to remember to implement replacing if it
872 turns out actually necessary. */
873 gdb_assert (value_bytes_available (dst, dst_offset, length));
874
875 /* Copy the data. */
876 memcpy (value_contents_all_raw (dst) + dst_offset,
877 value_contents_all_raw (src) + src_offset,
878 length);
879
880 /* Copy the meta-data, adjusted. */
881 for (i = 0; VEC_iterate (range_s, src->unavailable, i, r); i++)
882 {
883 ULONGEST h, l;
884
885 l = max (r->offset, src_offset);
886 h = min (r->offset + r->length, src_offset + length);
887
888 if (l < h)
889 mark_value_bytes_unavailable (dst,
890 dst_offset + (l - src_offset),
891 h - l);
892 }
893 }
894
895 /* Copy LENGTH bytes of SRC value's (all) contents
896 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
897 (all) contents, starting at DST_OFFSET. If unavailable contents
898 are being copied from SRC, the corresponding DST contents are
899 marked unavailable accordingly. DST must not be lazy. If SRC is
900 lazy, it will be fetched now. If SRC is not valid (is optimized
901 out), an error is thrown.
902
903 It is assumed the contents of DST in the [DST_OFFSET,
904 DST_OFFSET+LENGTH) range are wholly available. */
905
906 void
907 value_contents_copy (struct value *dst, int dst_offset,
908 struct value *src, int src_offset, int length)
909 {
910 require_not_optimized_out (src);
911
912 if (src->lazy)
913 value_fetch_lazy (src);
914
915 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
916 }
917
918 int
919 value_lazy (struct value *value)
920 {
921 return value->lazy;
922 }
923
924 void
925 set_value_lazy (struct value *value, int val)
926 {
927 value->lazy = val;
928 }
929
930 int
931 value_stack (struct value *value)
932 {
933 return value->stack;
934 }
935
936 void
937 set_value_stack (struct value *value, int val)
938 {
939 value->stack = val;
940 }
941
942 const gdb_byte *
943 value_contents (struct value *value)
944 {
945 const gdb_byte *result = value_contents_writeable (value);
946 require_not_optimized_out (value);
947 require_available (value);
948 return result;
949 }
950
951 gdb_byte *
952 value_contents_writeable (struct value *value)
953 {
954 if (value->lazy)
955 value_fetch_lazy (value);
956 return value_contents_raw (value);
957 }
958
959 /* Return non-zero if VAL1 and VAL2 have the same contents. Note that
960 this function is different from value_equal; in C the operator ==
961 can return 0 even if the two values being compared are equal. */
962
963 int
964 value_contents_equal (struct value *val1, struct value *val2)
965 {
966 struct type *type1;
967 struct type *type2;
968 int len;
969
970 type1 = check_typedef (value_type (val1));
971 type2 = check_typedef (value_type (val2));
972 len = TYPE_LENGTH (type1);
973 if (len != TYPE_LENGTH (type2))
974 return 0;
975
976 return (memcmp (value_contents (val1), value_contents (val2), len) == 0);
977 }
978
979 int
980 value_optimized_out (struct value *value)
981 {
982 return value->optimized_out;
983 }
984
985 void
986 set_value_optimized_out (struct value *value, int val)
987 {
988 value->optimized_out = val;
989 }
990
991 int
992 value_entirely_optimized_out (const struct value *value)
993 {
994 if (!value->optimized_out)
995 return 0;
996 if (value->lval != lval_computed
997 || !value->location.computed.funcs->check_any_valid)
998 return 1;
999 return !value->location.computed.funcs->check_any_valid (value);
1000 }
1001
1002 int
1003 value_bits_valid (const struct value *value, int offset, int length)
1004 {
1005 if (!value->optimized_out)
1006 return 1;
1007 if (value->lval != lval_computed
1008 || !value->location.computed.funcs->check_validity)
1009 return 0;
1010 return value->location.computed.funcs->check_validity (value, offset,
1011 length);
1012 }
1013
1014 int
1015 value_bits_synthetic_pointer (const struct value *value,
1016 int offset, int length)
1017 {
1018 if (value->lval != lval_computed
1019 || !value->location.computed.funcs->check_synthetic_pointer)
1020 return 0;
1021 return value->location.computed.funcs->check_synthetic_pointer (value,
1022 offset,
1023 length);
1024 }
1025
1026 int
1027 value_embedded_offset (struct value *value)
1028 {
1029 return value->embedded_offset;
1030 }
1031
1032 void
1033 set_value_embedded_offset (struct value *value, int val)
1034 {
1035 value->embedded_offset = val;
1036 }
1037
1038 int
1039 value_pointed_to_offset (struct value *value)
1040 {
1041 return value->pointed_to_offset;
1042 }
1043
1044 void
1045 set_value_pointed_to_offset (struct value *value, int val)
1046 {
1047 value->pointed_to_offset = val;
1048 }
1049
1050 struct lval_funcs *
1051 value_computed_funcs (struct value *v)
1052 {
1053 gdb_assert (VALUE_LVAL (v) == lval_computed);
1054
1055 return v->location.computed.funcs;
1056 }
1057
1058 void *
1059 value_computed_closure (const struct value *v)
1060 {
1061 gdb_assert (v->lval == lval_computed);
1062
1063 return v->location.computed.closure;
1064 }
1065
1066 enum lval_type *
1067 deprecated_value_lval_hack (struct value *value)
1068 {
1069 return &value->lval;
1070 }
1071
1072 CORE_ADDR
1073 value_address (const struct value *value)
1074 {
1075 if (value->lval == lval_internalvar
1076 || value->lval == lval_internalvar_component)
1077 return 0;
1078 return value->location.address + value->offset;
1079 }
1080
1081 CORE_ADDR
1082 value_raw_address (struct value *value)
1083 {
1084 if (value->lval == lval_internalvar
1085 || value->lval == lval_internalvar_component)
1086 return 0;
1087 return value->location.address;
1088 }
1089
1090 void
1091 set_value_address (struct value *value, CORE_ADDR addr)
1092 {
1093 gdb_assert (value->lval != lval_internalvar
1094 && value->lval != lval_internalvar_component);
1095 value->location.address = addr;
1096 }
1097
1098 struct internalvar **
1099 deprecated_value_internalvar_hack (struct value *value)
1100 {
1101 return &value->location.internalvar;
1102 }
1103
1104 struct frame_id *
1105 deprecated_value_frame_id_hack (struct value *value)
1106 {
1107 return &value->frame_id;
1108 }
1109
1110 short *
1111 deprecated_value_regnum_hack (struct value *value)
1112 {
1113 return &value->regnum;
1114 }
1115
1116 int
1117 deprecated_value_modifiable (struct value *value)
1118 {
1119 return value->modifiable;
1120 }
1121 void
1122 deprecated_set_value_modifiable (struct value *value, int modifiable)
1123 {
1124 value->modifiable = modifiable;
1125 }
1126 \f
1127 /* Return a mark in the value chain. All values allocated after the
1128 mark is obtained (except for those released) are subject to being freed
1129 if a subsequent value_free_to_mark is passed the mark. */
1130 struct value *
1131 value_mark (void)
1132 {
1133 return all_values;
1134 }
1135
1136 /* Take a reference to VAL. VAL will not be deallocated until all
1137 references are released. */
1138
1139 void
1140 value_incref (struct value *val)
1141 {
1142 val->reference_count++;
1143 }
1144
1145 /* Release a reference to VAL, which was acquired with value_incref.
1146 This function is also called to deallocate values from the value
1147 chain. */
1148
1149 void
1150 value_free (struct value *val)
1151 {
1152 if (val)
1153 {
1154 gdb_assert (val->reference_count > 0);
1155 val->reference_count--;
1156 if (val->reference_count > 0)
1157 return;
1158
1159 /* If there's an associated parent value, drop our reference to
1160 it. */
1161 if (val->parent != NULL)
1162 value_free (val->parent);
1163
1164 if (VALUE_LVAL (val) == lval_computed)
1165 {
1166 struct lval_funcs *funcs = val->location.computed.funcs;
1167
1168 if (funcs->free_closure)
1169 funcs->free_closure (val);
1170 }
1171
1172 xfree (val->contents);
1173 VEC_free (range_s, val->unavailable);
1174 }
1175 xfree (val);
1176 }
1177
1178 /* Free all values allocated since MARK was obtained by value_mark
1179 (except for those released). */
1180 void
1181 value_free_to_mark (struct value *mark)
1182 {
1183 struct value *val;
1184 struct value *next;
1185
1186 for (val = all_values; val && val != mark; val = next)
1187 {
1188 next = val->next;
1189 value_free (val);
1190 }
1191 all_values = val;
1192 }
1193
1194 /* Free all the values that have been allocated (except for those released).
1195 Call after each command, successful or not.
1196 In practice this is called before each command, which is sufficient. */
1197
1198 void
1199 free_all_values (void)
1200 {
1201 struct value *val;
1202 struct value *next;
1203
1204 for (val = all_values; val; val = next)
1205 {
1206 next = val->next;
1207 value_free (val);
1208 }
1209
1210 all_values = 0;
1211 }
1212
1213 /* Frees all the elements in a chain of values. */
1214
1215 void
1216 free_value_chain (struct value *v)
1217 {
1218 struct value *next;
1219
1220 for (; v; v = next)
1221 {
1222 next = value_next (v);
1223 value_free (v);
1224 }
1225 }
1226
1227 /* Remove VAL from the chain all_values
1228 so it will not be freed automatically. */
1229
1230 void
1231 release_value (struct value *val)
1232 {
1233 struct value *v;
1234
1235 if (all_values == val)
1236 {
1237 all_values = val->next;
1238 val->next = NULL;
1239 return;
1240 }
1241
1242 for (v = all_values; v; v = v->next)
1243 {
1244 if (v->next == val)
1245 {
1246 v->next = val->next;
1247 val->next = NULL;
1248 break;
1249 }
1250 }
1251 }
1252
1253 /* Release all values up to mark */
1254 struct value *
1255 value_release_to_mark (struct value *mark)
1256 {
1257 struct value *val;
1258 struct value *next;
1259
1260 for (val = next = all_values; next; next = next->next)
1261 if (next->next == mark)
1262 {
1263 all_values = next->next;
1264 next->next = NULL;
1265 return val;
1266 }
1267 all_values = 0;
1268 return val;
1269 }
1270
1271 /* Return a copy of the value ARG.
1272 It contains the same contents, for same memory address,
1273 but it's a different block of storage. */
1274
1275 struct value *
1276 value_copy (struct value *arg)
1277 {
1278 struct type *encl_type = value_enclosing_type (arg);
1279 struct value *val;
1280
1281 if (value_lazy (arg))
1282 val = allocate_value_lazy (encl_type);
1283 else
1284 val = allocate_value (encl_type);
1285 val->type = arg->type;
1286 VALUE_LVAL (val) = VALUE_LVAL (arg);
1287 val->location = arg->location;
1288 val->offset = arg->offset;
1289 val->bitpos = arg->bitpos;
1290 val->bitsize = arg->bitsize;
1291 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1292 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1293 val->lazy = arg->lazy;
1294 val->optimized_out = arg->optimized_out;
1295 val->embedded_offset = value_embedded_offset (arg);
1296 val->pointed_to_offset = arg->pointed_to_offset;
1297 val->modifiable = arg->modifiable;
1298 if (!value_lazy (val))
1299 {
1300 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1301 TYPE_LENGTH (value_enclosing_type (arg)));
1302
1303 }
1304 val->unavailable = VEC_copy (range_s, arg->unavailable);
1305 val->parent = arg->parent;
1306 if (val->parent)
1307 value_incref (val->parent);
1308 if (VALUE_LVAL (val) == lval_computed)
1309 {
1310 struct lval_funcs *funcs = val->location.computed.funcs;
1311
1312 if (funcs->copy_closure)
1313 val->location.computed.closure = funcs->copy_closure (val);
1314 }
1315 return val;
1316 }
1317
1318 /* Return a version of ARG that is non-lvalue. */
1319
1320 struct value *
1321 value_non_lval (struct value *arg)
1322 {
1323 if (VALUE_LVAL (arg) != not_lval)
1324 {
1325 struct type *enc_type = value_enclosing_type (arg);
1326 struct value *val = allocate_value (enc_type);
1327
1328 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1329 TYPE_LENGTH (enc_type));
1330 val->type = arg->type;
1331 set_value_embedded_offset (val, value_embedded_offset (arg));
1332 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1333 return val;
1334 }
1335 return arg;
1336 }
1337
1338 void
1339 set_value_component_location (struct value *component,
1340 const struct value *whole)
1341 {
1342 if (whole->lval == lval_internalvar)
1343 VALUE_LVAL (component) = lval_internalvar_component;
1344 else
1345 VALUE_LVAL (component) = whole->lval;
1346
1347 component->location = whole->location;
1348 if (whole->lval == lval_computed)
1349 {
1350 struct lval_funcs *funcs = whole->location.computed.funcs;
1351
1352 if (funcs->copy_closure)
1353 component->location.computed.closure = funcs->copy_closure (whole);
1354 }
1355 }
1356
1357 \f
1358 /* Access to the value history. */
1359
1360 /* Record a new value in the value history.
1361 Returns the absolute history index of the entry.
1362 Result of -1 indicates the value was not saved; otherwise it is the
1363 value history index of this new item. */
1364
1365 int
1366 record_latest_value (struct value *val)
1367 {
1368 int i;
1369
1370 /* We don't want this value to have anything to do with the inferior anymore.
1371 In particular, "set $1 = 50" should not affect the variable from which
1372 the value was taken, and fast watchpoints should be able to assume that
1373 a value on the value history never changes. */
1374 if (value_lazy (val))
1375 value_fetch_lazy (val);
1376 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1377 from. This is a bit dubious, because then *&$1 does not just return $1
1378 but the current contents of that location. c'est la vie... */
1379 val->modifiable = 0;
1380 release_value (val);
1381
1382 /* Here we treat value_history_count as origin-zero
1383 and applying to the value being stored now. */
1384
1385 i = value_history_count % VALUE_HISTORY_CHUNK;
1386 if (i == 0)
1387 {
1388 struct value_history_chunk *new
1389 = (struct value_history_chunk *)
1390
1391 xmalloc (sizeof (struct value_history_chunk));
1392 memset (new->values, 0, sizeof new->values);
1393 new->next = value_history_chain;
1394 value_history_chain = new;
1395 }
1396
1397 value_history_chain->values[i] = val;
1398
1399 /* Now we regard value_history_count as origin-one
1400 and applying to the value just stored. */
1401
1402 return ++value_history_count;
1403 }
1404
1405 /* Return a copy of the value in the history with sequence number NUM. */
1406
1407 struct value *
1408 access_value_history (int num)
1409 {
1410 struct value_history_chunk *chunk;
1411 int i;
1412 int absnum = num;
1413
1414 if (absnum <= 0)
1415 absnum += value_history_count;
1416
1417 if (absnum <= 0)
1418 {
1419 if (num == 0)
1420 error (_("The history is empty."));
1421 else if (num == 1)
1422 error (_("There is only one value in the history."));
1423 else
1424 error (_("History does not go back to $$%d."), -num);
1425 }
1426 if (absnum > value_history_count)
1427 error (_("History has not yet reached $%d."), absnum);
1428
1429 absnum--;
1430
1431 /* Now absnum is always absolute and origin zero. */
1432
1433 chunk = value_history_chain;
1434 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1435 - absnum / VALUE_HISTORY_CHUNK;
1436 i > 0; i--)
1437 chunk = chunk->next;
1438
1439 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1440 }
1441
1442 static void
1443 show_values (char *num_exp, int from_tty)
1444 {
1445 int i;
1446 struct value *val;
1447 static int num = 1;
1448
1449 if (num_exp)
1450 {
1451 /* "show values +" should print from the stored position.
1452 "show values <exp>" should print around value number <exp>. */
1453 if (num_exp[0] != '+' || num_exp[1] != '\0')
1454 num = parse_and_eval_long (num_exp) - 5;
1455 }
1456 else
1457 {
1458 /* "show values" means print the last 10 values. */
1459 num = value_history_count - 9;
1460 }
1461
1462 if (num <= 0)
1463 num = 1;
1464
1465 for (i = num; i < num + 10 && i <= value_history_count; i++)
1466 {
1467 struct value_print_options opts;
1468
1469 val = access_value_history (i);
1470 printf_filtered (("$%d = "), i);
1471 get_user_print_options (&opts);
1472 value_print (val, gdb_stdout, &opts);
1473 printf_filtered (("\n"));
1474 }
1475
1476 /* The next "show values +" should start after what we just printed. */
1477 num += 10;
1478
1479 /* Hitting just return after this command should do the same thing as
1480 "show values +". If num_exp is null, this is unnecessary, since
1481 "show values +" is not useful after "show values". */
1482 if (from_tty && num_exp)
1483 {
1484 num_exp[0] = '+';
1485 num_exp[1] = '\0';
1486 }
1487 }
1488 \f
1489 /* Internal variables. These are variables within the debugger
1490 that hold values assigned by debugger commands.
1491 The user refers to them with a '$' prefix
1492 that does not appear in the variable names stored internally. */
1493
1494 struct internalvar
1495 {
1496 struct internalvar *next;
1497 char *name;
1498
1499 /* We support various different kinds of content of an internal variable.
1500 enum internalvar_kind specifies the kind, and union internalvar_data
1501 provides the data associated with this particular kind. */
1502
1503 enum internalvar_kind
1504 {
1505 /* The internal variable is empty. */
1506 INTERNALVAR_VOID,
1507
1508 /* The value of the internal variable is provided directly as
1509 a GDB value object. */
1510 INTERNALVAR_VALUE,
1511
1512 /* A fresh value is computed via a call-back routine on every
1513 access to the internal variable. */
1514 INTERNALVAR_MAKE_VALUE,
1515
1516 /* The internal variable holds a GDB internal convenience function. */
1517 INTERNALVAR_FUNCTION,
1518
1519 /* The variable holds an integer value. */
1520 INTERNALVAR_INTEGER,
1521
1522 /* The variable holds a GDB-provided string. */
1523 INTERNALVAR_STRING,
1524
1525 } kind;
1526
1527 union internalvar_data
1528 {
1529 /* A value object used with INTERNALVAR_VALUE. */
1530 struct value *value;
1531
1532 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1533 internalvar_make_value make_value;
1534
1535 /* The internal function used with INTERNALVAR_FUNCTION. */
1536 struct
1537 {
1538 struct internal_function *function;
1539 /* True if this is the canonical name for the function. */
1540 int canonical;
1541 } fn;
1542
1543 /* An integer value used with INTERNALVAR_INTEGER. */
1544 struct
1545 {
1546 /* If type is non-NULL, it will be used as the type to generate
1547 a value for this internal variable. If type is NULL, a default
1548 integer type for the architecture is used. */
1549 struct type *type;
1550 LONGEST val;
1551 } integer;
1552
1553 /* A string value used with INTERNALVAR_STRING. */
1554 char *string;
1555 } u;
1556 };
1557
1558 static struct internalvar *internalvars;
1559
1560 /* If the variable does not already exist create it and give it the
1561 value given. If no value is given then the default is zero. */
1562 static void
1563 init_if_undefined_command (char* args, int from_tty)
1564 {
1565 struct internalvar* intvar;
1566
1567 /* Parse the expression - this is taken from set_command(). */
1568 struct expression *expr = parse_expression (args);
1569 register struct cleanup *old_chain =
1570 make_cleanup (free_current_contents, &expr);
1571
1572 /* Validate the expression.
1573 Was the expression an assignment?
1574 Or even an expression at all? */
1575 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1576 error (_("Init-if-undefined requires an assignment expression."));
1577
1578 /* Extract the variable from the parsed expression.
1579 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1580 if (expr->elts[1].opcode != OP_INTERNALVAR)
1581 error (_("The first parameter to init-if-undefined "
1582 "should be a GDB variable."));
1583 intvar = expr->elts[2].internalvar;
1584
1585 /* Only evaluate the expression if the lvalue is void.
1586 This may still fail if the expresssion is invalid. */
1587 if (intvar->kind == INTERNALVAR_VOID)
1588 evaluate_expression (expr);
1589
1590 do_cleanups (old_chain);
1591 }
1592
1593
1594 /* Look up an internal variable with name NAME. NAME should not
1595 normally include a dollar sign.
1596
1597 If the specified internal variable does not exist,
1598 the return value is NULL. */
1599
1600 struct internalvar *
1601 lookup_only_internalvar (const char *name)
1602 {
1603 struct internalvar *var;
1604
1605 for (var = internalvars; var; var = var->next)
1606 if (strcmp (var->name, name) == 0)
1607 return var;
1608
1609 return NULL;
1610 }
1611
1612
1613 /* Create an internal variable with name NAME and with a void value.
1614 NAME should not normally include a dollar sign. */
1615
1616 struct internalvar *
1617 create_internalvar (const char *name)
1618 {
1619 struct internalvar *var;
1620
1621 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
1622 var->name = concat (name, (char *)NULL);
1623 var->kind = INTERNALVAR_VOID;
1624 var->next = internalvars;
1625 internalvars = var;
1626 return var;
1627 }
1628
1629 /* Create an internal variable with name NAME and register FUN as the
1630 function that value_of_internalvar uses to create a value whenever
1631 this variable is referenced. NAME should not normally include a
1632 dollar sign. */
1633
1634 struct internalvar *
1635 create_internalvar_type_lazy (char *name, internalvar_make_value fun)
1636 {
1637 struct internalvar *var = create_internalvar (name);
1638
1639 var->kind = INTERNALVAR_MAKE_VALUE;
1640 var->u.make_value = fun;
1641 return var;
1642 }
1643
1644 /* Look up an internal variable with name NAME. NAME should not
1645 normally include a dollar sign.
1646
1647 If the specified internal variable does not exist,
1648 one is created, with a void value. */
1649
1650 struct internalvar *
1651 lookup_internalvar (const char *name)
1652 {
1653 struct internalvar *var;
1654
1655 var = lookup_only_internalvar (name);
1656 if (var)
1657 return var;
1658
1659 return create_internalvar (name);
1660 }
1661
1662 /* Return current value of internal variable VAR. For variables that
1663 are not inherently typed, use a value type appropriate for GDBARCH. */
1664
1665 struct value *
1666 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
1667 {
1668 struct value *val;
1669 struct trace_state_variable *tsv;
1670
1671 /* If there is a trace state variable of the same name, assume that
1672 is what we really want to see. */
1673 tsv = find_trace_state_variable (var->name);
1674 if (tsv)
1675 {
1676 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
1677 &(tsv->value));
1678 if (tsv->value_known)
1679 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
1680 tsv->value);
1681 else
1682 val = allocate_value (builtin_type (gdbarch)->builtin_void);
1683 return val;
1684 }
1685
1686 switch (var->kind)
1687 {
1688 case INTERNALVAR_VOID:
1689 val = allocate_value (builtin_type (gdbarch)->builtin_void);
1690 break;
1691
1692 case INTERNALVAR_FUNCTION:
1693 val = allocate_value (builtin_type (gdbarch)->internal_fn);
1694 break;
1695
1696 case INTERNALVAR_INTEGER:
1697 if (!var->u.integer.type)
1698 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
1699 var->u.integer.val);
1700 else
1701 val = value_from_longest (var->u.integer.type, var->u.integer.val);
1702 break;
1703
1704 case INTERNALVAR_STRING:
1705 val = value_cstring (var->u.string, strlen (var->u.string),
1706 builtin_type (gdbarch)->builtin_char);
1707 break;
1708
1709 case INTERNALVAR_VALUE:
1710 val = value_copy (var->u.value);
1711 if (value_lazy (val))
1712 value_fetch_lazy (val);
1713 break;
1714
1715 case INTERNALVAR_MAKE_VALUE:
1716 val = (*var->u.make_value) (gdbarch, var);
1717 break;
1718
1719 default:
1720 internal_error (__FILE__, __LINE__, _("bad kind"));
1721 }
1722
1723 /* Change the VALUE_LVAL to lval_internalvar so that future operations
1724 on this value go back to affect the original internal variable.
1725
1726 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
1727 no underlying modifyable state in the internal variable.
1728
1729 Likewise, if the variable's value is a computed lvalue, we want
1730 references to it to produce another computed lvalue, where
1731 references and assignments actually operate through the
1732 computed value's functions.
1733
1734 This means that internal variables with computed values
1735 behave a little differently from other internal variables:
1736 assignments to them don't just replace the previous value
1737 altogether. At the moment, this seems like the behavior we
1738 want. */
1739
1740 if (var->kind != INTERNALVAR_MAKE_VALUE
1741 && val->lval != lval_computed)
1742 {
1743 VALUE_LVAL (val) = lval_internalvar;
1744 VALUE_INTERNALVAR (val) = var;
1745 }
1746
1747 return val;
1748 }
1749
1750 int
1751 get_internalvar_integer (struct internalvar *var, LONGEST *result)
1752 {
1753 if (var->kind == INTERNALVAR_INTEGER)
1754 {
1755 *result = var->u.integer.val;
1756 return 1;
1757 }
1758
1759 if (var->kind == INTERNALVAR_VALUE)
1760 {
1761 struct type *type = check_typedef (value_type (var->u.value));
1762
1763 if (TYPE_CODE (type) == TYPE_CODE_INT)
1764 {
1765 *result = value_as_long (var->u.value);
1766 return 1;
1767 }
1768 }
1769
1770 return 0;
1771 }
1772
1773 static int
1774 get_internalvar_function (struct internalvar *var,
1775 struct internal_function **result)
1776 {
1777 switch (var->kind)
1778 {
1779 case INTERNALVAR_FUNCTION:
1780 *result = var->u.fn.function;
1781 return 1;
1782
1783 default:
1784 return 0;
1785 }
1786 }
1787
1788 void
1789 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
1790 int bitsize, struct value *newval)
1791 {
1792 gdb_byte *addr;
1793
1794 switch (var->kind)
1795 {
1796 case INTERNALVAR_VALUE:
1797 addr = value_contents_writeable (var->u.value);
1798
1799 if (bitsize)
1800 modify_field (value_type (var->u.value), addr + offset,
1801 value_as_long (newval), bitpos, bitsize);
1802 else
1803 memcpy (addr + offset, value_contents (newval),
1804 TYPE_LENGTH (value_type (newval)));
1805 break;
1806
1807 default:
1808 /* We can never get a component of any other kind. */
1809 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
1810 }
1811 }
1812
1813 void
1814 set_internalvar (struct internalvar *var, struct value *val)
1815 {
1816 enum internalvar_kind new_kind;
1817 union internalvar_data new_data = { 0 };
1818
1819 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
1820 error (_("Cannot overwrite convenience function %s"), var->name);
1821
1822 /* Prepare new contents. */
1823 switch (TYPE_CODE (check_typedef (value_type (val))))
1824 {
1825 case TYPE_CODE_VOID:
1826 new_kind = INTERNALVAR_VOID;
1827 break;
1828
1829 case TYPE_CODE_INTERNAL_FUNCTION:
1830 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
1831 new_kind = INTERNALVAR_FUNCTION;
1832 get_internalvar_function (VALUE_INTERNALVAR (val),
1833 &new_data.fn.function);
1834 /* Copies created here are never canonical. */
1835 break;
1836
1837 default:
1838 new_kind = INTERNALVAR_VALUE;
1839 new_data.value = value_copy (val);
1840 new_data.value->modifiable = 1;
1841
1842 /* Force the value to be fetched from the target now, to avoid problems
1843 later when this internalvar is referenced and the target is gone or
1844 has changed. */
1845 if (value_lazy (new_data.value))
1846 value_fetch_lazy (new_data.value);
1847
1848 /* Release the value from the value chain to prevent it from being
1849 deleted by free_all_values. From here on this function should not
1850 call error () until new_data is installed into the var->u to avoid
1851 leaking memory. */
1852 release_value (new_data.value);
1853 break;
1854 }
1855
1856 /* Clean up old contents. */
1857 clear_internalvar (var);
1858
1859 /* Switch over. */
1860 var->kind = new_kind;
1861 var->u = new_data;
1862 /* End code which must not call error(). */
1863 }
1864
1865 void
1866 set_internalvar_integer (struct internalvar *var, LONGEST l)
1867 {
1868 /* Clean up old contents. */
1869 clear_internalvar (var);
1870
1871 var->kind = INTERNALVAR_INTEGER;
1872 var->u.integer.type = NULL;
1873 var->u.integer.val = l;
1874 }
1875
1876 void
1877 set_internalvar_string (struct internalvar *var, const char *string)
1878 {
1879 /* Clean up old contents. */
1880 clear_internalvar (var);
1881
1882 var->kind = INTERNALVAR_STRING;
1883 var->u.string = xstrdup (string);
1884 }
1885
1886 static void
1887 set_internalvar_function (struct internalvar *var, struct internal_function *f)
1888 {
1889 /* Clean up old contents. */
1890 clear_internalvar (var);
1891
1892 var->kind = INTERNALVAR_FUNCTION;
1893 var->u.fn.function = f;
1894 var->u.fn.canonical = 1;
1895 /* Variables installed here are always the canonical version. */
1896 }
1897
1898 void
1899 clear_internalvar (struct internalvar *var)
1900 {
1901 /* Clean up old contents. */
1902 switch (var->kind)
1903 {
1904 case INTERNALVAR_VALUE:
1905 value_free (var->u.value);
1906 break;
1907
1908 case INTERNALVAR_STRING:
1909 xfree (var->u.string);
1910 break;
1911
1912 default:
1913 break;
1914 }
1915
1916 /* Reset to void kind. */
1917 var->kind = INTERNALVAR_VOID;
1918 }
1919
1920 char *
1921 internalvar_name (struct internalvar *var)
1922 {
1923 return var->name;
1924 }
1925
1926 static struct internal_function *
1927 create_internal_function (const char *name,
1928 internal_function_fn handler, void *cookie)
1929 {
1930 struct internal_function *ifn = XNEW (struct internal_function);
1931
1932 ifn->name = xstrdup (name);
1933 ifn->handler = handler;
1934 ifn->cookie = cookie;
1935 return ifn;
1936 }
1937
1938 char *
1939 value_internal_function_name (struct value *val)
1940 {
1941 struct internal_function *ifn;
1942 int result;
1943
1944 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
1945 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
1946 gdb_assert (result);
1947
1948 return ifn->name;
1949 }
1950
1951 struct value *
1952 call_internal_function (struct gdbarch *gdbarch,
1953 const struct language_defn *language,
1954 struct value *func, int argc, struct value **argv)
1955 {
1956 struct internal_function *ifn;
1957 int result;
1958
1959 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
1960 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
1961 gdb_assert (result);
1962
1963 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
1964 }
1965
1966 /* The 'function' command. This does nothing -- it is just a
1967 placeholder to let "help function NAME" work. This is also used as
1968 the implementation of the sub-command that is created when
1969 registering an internal function. */
1970 static void
1971 function_command (char *command, int from_tty)
1972 {
1973 /* Do nothing. */
1974 }
1975
1976 /* Clean up if an internal function's command is destroyed. */
1977 static void
1978 function_destroyer (struct cmd_list_element *self, void *ignore)
1979 {
1980 xfree (self->name);
1981 xfree (self->doc);
1982 }
1983
1984 /* Add a new internal function. NAME is the name of the function; DOC
1985 is a documentation string describing the function. HANDLER is
1986 called when the function is invoked. COOKIE is an arbitrary
1987 pointer which is passed to HANDLER and is intended for "user
1988 data". */
1989 void
1990 add_internal_function (const char *name, const char *doc,
1991 internal_function_fn handler, void *cookie)
1992 {
1993 struct cmd_list_element *cmd;
1994 struct internal_function *ifn;
1995 struct internalvar *var = lookup_internalvar (name);
1996
1997 ifn = create_internal_function (name, handler, cookie);
1998 set_internalvar_function (var, ifn);
1999
2000 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2001 &functionlist);
2002 cmd->destroyer = function_destroyer;
2003 }
2004
2005 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2006 prevent cycles / duplicates. */
2007
2008 void
2009 preserve_one_value (struct value *value, struct objfile *objfile,
2010 htab_t copied_types)
2011 {
2012 if (TYPE_OBJFILE (value->type) == objfile)
2013 value->type = copy_type_recursive (objfile, value->type, copied_types);
2014
2015 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2016 value->enclosing_type = copy_type_recursive (objfile,
2017 value->enclosing_type,
2018 copied_types);
2019 }
2020
2021 /* Likewise for internal variable VAR. */
2022
2023 static void
2024 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2025 htab_t copied_types)
2026 {
2027 switch (var->kind)
2028 {
2029 case INTERNALVAR_INTEGER:
2030 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2031 var->u.integer.type
2032 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2033 break;
2034
2035 case INTERNALVAR_VALUE:
2036 preserve_one_value (var->u.value, objfile, copied_types);
2037 break;
2038 }
2039 }
2040
2041 /* Update the internal variables and value history when OBJFILE is
2042 discarded; we must copy the types out of the objfile. New global types
2043 will be created for every convenience variable which currently points to
2044 this objfile's types, and the convenience variables will be adjusted to
2045 use the new global types. */
2046
2047 void
2048 preserve_values (struct objfile *objfile)
2049 {
2050 htab_t copied_types;
2051 struct value_history_chunk *cur;
2052 struct internalvar *var;
2053 int i;
2054
2055 /* Create the hash table. We allocate on the objfile's obstack, since
2056 it is soon to be deleted. */
2057 copied_types = create_copied_types_hash (objfile);
2058
2059 for (cur = value_history_chain; cur; cur = cur->next)
2060 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2061 if (cur->values[i])
2062 preserve_one_value (cur->values[i], objfile, copied_types);
2063
2064 for (var = internalvars; var; var = var->next)
2065 preserve_one_internalvar (var, objfile, copied_types);
2066
2067 preserve_python_values (objfile, copied_types);
2068
2069 htab_delete (copied_types);
2070 }
2071
2072 static void
2073 show_convenience (char *ignore, int from_tty)
2074 {
2075 struct gdbarch *gdbarch = get_current_arch ();
2076 struct internalvar *var;
2077 int varseen = 0;
2078 struct value_print_options opts;
2079
2080 get_user_print_options (&opts);
2081 for (var = internalvars; var; var = var->next)
2082 {
2083 if (!varseen)
2084 {
2085 varseen = 1;
2086 }
2087 printf_filtered (("$%s = "), var->name);
2088 value_print (value_of_internalvar (gdbarch, var), gdb_stdout,
2089 &opts);
2090 printf_filtered (("\n"));
2091 }
2092 if (!varseen)
2093 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2094 "Convenience variables have "
2095 "names starting with \"$\";\n"
2096 "use \"set\" as in \"set "
2097 "$foo = 5\" to define them.\n"));
2098 }
2099 \f
2100 /* Extract a value as a C number (either long or double).
2101 Knows how to convert fixed values to double, or
2102 floating values to long.
2103 Does not deallocate the value. */
2104
2105 LONGEST
2106 value_as_long (struct value *val)
2107 {
2108 /* This coerces arrays and functions, which is necessary (e.g.
2109 in disassemble_command). It also dereferences references, which
2110 I suspect is the most logical thing to do. */
2111 val = coerce_array (val);
2112 return unpack_long (value_type (val), value_contents (val));
2113 }
2114
2115 DOUBLEST
2116 value_as_double (struct value *val)
2117 {
2118 DOUBLEST foo;
2119 int inv;
2120
2121 foo = unpack_double (value_type (val), value_contents (val), &inv);
2122 if (inv)
2123 error (_("Invalid floating value found in program."));
2124 return foo;
2125 }
2126
2127 /* Extract a value as a C pointer. Does not deallocate the value.
2128 Note that val's type may not actually be a pointer; value_as_long
2129 handles all the cases. */
2130 CORE_ADDR
2131 value_as_address (struct value *val)
2132 {
2133 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2134
2135 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2136 whether we want this to be true eventually. */
2137 #if 0
2138 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2139 non-address (e.g. argument to "signal", "info break", etc.), or
2140 for pointers to char, in which the low bits *are* significant. */
2141 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2142 #else
2143
2144 /* There are several targets (IA-64, PowerPC, and others) which
2145 don't represent pointers to functions as simply the address of
2146 the function's entry point. For example, on the IA-64, a
2147 function pointer points to a two-word descriptor, generated by
2148 the linker, which contains the function's entry point, and the
2149 value the IA-64 "global pointer" register should have --- to
2150 support position-independent code. The linker generates
2151 descriptors only for those functions whose addresses are taken.
2152
2153 On such targets, it's difficult for GDB to convert an arbitrary
2154 function address into a function pointer; it has to either find
2155 an existing descriptor for that function, or call malloc and
2156 build its own. On some targets, it is impossible for GDB to
2157 build a descriptor at all: the descriptor must contain a jump
2158 instruction; data memory cannot be executed; and code memory
2159 cannot be modified.
2160
2161 Upon entry to this function, if VAL is a value of type `function'
2162 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2163 value_address (val) is the address of the function. This is what
2164 you'll get if you evaluate an expression like `main'. The call
2165 to COERCE_ARRAY below actually does all the usual unary
2166 conversions, which includes converting values of type `function'
2167 to `pointer to function'. This is the challenging conversion
2168 discussed above. Then, `unpack_long' will convert that pointer
2169 back into an address.
2170
2171 So, suppose the user types `disassemble foo' on an architecture
2172 with a strange function pointer representation, on which GDB
2173 cannot build its own descriptors, and suppose further that `foo'
2174 has no linker-built descriptor. The address->pointer conversion
2175 will signal an error and prevent the command from running, even
2176 though the next step would have been to convert the pointer
2177 directly back into the same address.
2178
2179 The following shortcut avoids this whole mess. If VAL is a
2180 function, just return its address directly. */
2181 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2182 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2183 return value_address (val);
2184
2185 val = coerce_array (val);
2186
2187 /* Some architectures (e.g. Harvard), map instruction and data
2188 addresses onto a single large unified address space. For
2189 instance: An architecture may consider a large integer in the
2190 range 0x10000000 .. 0x1000ffff to already represent a data
2191 addresses (hence not need a pointer to address conversion) while
2192 a small integer would still need to be converted integer to
2193 pointer to address. Just assume such architectures handle all
2194 integer conversions in a single function. */
2195
2196 /* JimB writes:
2197
2198 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2199 must admonish GDB hackers to make sure its behavior matches the
2200 compiler's, whenever possible.
2201
2202 In general, I think GDB should evaluate expressions the same way
2203 the compiler does. When the user copies an expression out of
2204 their source code and hands it to a `print' command, they should
2205 get the same value the compiler would have computed. Any
2206 deviation from this rule can cause major confusion and annoyance,
2207 and needs to be justified carefully. In other words, GDB doesn't
2208 really have the freedom to do these conversions in clever and
2209 useful ways.
2210
2211 AndrewC pointed out that users aren't complaining about how GDB
2212 casts integers to pointers; they are complaining that they can't
2213 take an address from a disassembly listing and give it to `x/i'.
2214 This is certainly important.
2215
2216 Adding an architecture method like integer_to_address() certainly
2217 makes it possible for GDB to "get it right" in all circumstances
2218 --- the target has complete control over how things get done, so
2219 people can Do The Right Thing for their target without breaking
2220 anyone else. The standard doesn't specify how integers get
2221 converted to pointers; usually, the ABI doesn't either, but
2222 ABI-specific code is a more reasonable place to handle it. */
2223
2224 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2225 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2226 && gdbarch_integer_to_address_p (gdbarch))
2227 return gdbarch_integer_to_address (gdbarch, value_type (val),
2228 value_contents (val));
2229
2230 return unpack_long (value_type (val), value_contents (val));
2231 #endif
2232 }
2233 \f
2234 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2235 as a long, or as a double, assuming the raw data is described
2236 by type TYPE. Knows how to convert different sizes of values
2237 and can convert between fixed and floating point. We don't assume
2238 any alignment for the raw data. Return value is in host byte order.
2239
2240 If you want functions and arrays to be coerced to pointers, and
2241 references to be dereferenced, call value_as_long() instead.
2242
2243 C++: It is assumed that the front-end has taken care of
2244 all matters concerning pointers to members. A pointer
2245 to member which reaches here is considered to be equivalent
2246 to an INT (or some size). After all, it is only an offset. */
2247
2248 LONGEST
2249 unpack_long (struct type *type, const gdb_byte *valaddr)
2250 {
2251 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2252 enum type_code code = TYPE_CODE (type);
2253 int len = TYPE_LENGTH (type);
2254 int nosign = TYPE_UNSIGNED (type);
2255
2256 switch (code)
2257 {
2258 case TYPE_CODE_TYPEDEF:
2259 return unpack_long (check_typedef (type), valaddr);
2260 case TYPE_CODE_ENUM:
2261 case TYPE_CODE_FLAGS:
2262 case TYPE_CODE_BOOL:
2263 case TYPE_CODE_INT:
2264 case TYPE_CODE_CHAR:
2265 case TYPE_CODE_RANGE:
2266 case TYPE_CODE_MEMBERPTR:
2267 if (nosign)
2268 return extract_unsigned_integer (valaddr, len, byte_order);
2269 else
2270 return extract_signed_integer (valaddr, len, byte_order);
2271
2272 case TYPE_CODE_FLT:
2273 return extract_typed_floating (valaddr, type);
2274
2275 case TYPE_CODE_DECFLOAT:
2276 /* libdecnumber has a function to convert from decimal to integer, but
2277 it doesn't work when the decimal number has a fractional part. */
2278 return decimal_to_doublest (valaddr, len, byte_order);
2279
2280 case TYPE_CODE_PTR:
2281 case TYPE_CODE_REF:
2282 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2283 whether we want this to be true eventually. */
2284 return extract_typed_address (valaddr, type);
2285
2286 default:
2287 error (_("Value can't be converted to integer."));
2288 }
2289 return 0; /* Placate lint. */
2290 }
2291
2292 /* Return a double value from the specified type and address.
2293 INVP points to an int which is set to 0 for valid value,
2294 1 for invalid value (bad float format). In either case,
2295 the returned double is OK to use. Argument is in target
2296 format, result is in host format. */
2297
2298 DOUBLEST
2299 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2300 {
2301 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2302 enum type_code code;
2303 int len;
2304 int nosign;
2305
2306 *invp = 0; /* Assume valid. */
2307 CHECK_TYPEDEF (type);
2308 code = TYPE_CODE (type);
2309 len = TYPE_LENGTH (type);
2310 nosign = TYPE_UNSIGNED (type);
2311 if (code == TYPE_CODE_FLT)
2312 {
2313 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2314 floating-point value was valid (using the macro
2315 INVALID_FLOAT). That test/macro have been removed.
2316
2317 It turns out that only the VAX defined this macro and then
2318 only in a non-portable way. Fixing the portability problem
2319 wouldn't help since the VAX floating-point code is also badly
2320 bit-rotten. The target needs to add definitions for the
2321 methods gdbarch_float_format and gdbarch_double_format - these
2322 exactly describe the target floating-point format. The
2323 problem here is that the corresponding floatformat_vax_f and
2324 floatformat_vax_d values these methods should be set to are
2325 also not defined either. Oops!
2326
2327 Hopefully someone will add both the missing floatformat
2328 definitions and the new cases for floatformat_is_valid (). */
2329
2330 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2331 {
2332 *invp = 1;
2333 return 0.0;
2334 }
2335
2336 return extract_typed_floating (valaddr, type);
2337 }
2338 else if (code == TYPE_CODE_DECFLOAT)
2339 return decimal_to_doublest (valaddr, len, byte_order);
2340 else if (nosign)
2341 {
2342 /* Unsigned -- be sure we compensate for signed LONGEST. */
2343 return (ULONGEST) unpack_long (type, valaddr);
2344 }
2345 else
2346 {
2347 /* Signed -- we are OK with unpack_long. */
2348 return unpack_long (type, valaddr);
2349 }
2350 }
2351
2352 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2353 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2354 We don't assume any alignment for the raw data. Return value is in
2355 host byte order.
2356
2357 If you want functions and arrays to be coerced to pointers, and
2358 references to be dereferenced, call value_as_address() instead.
2359
2360 C++: It is assumed that the front-end has taken care of
2361 all matters concerning pointers to members. A pointer
2362 to member which reaches here is considered to be equivalent
2363 to an INT (or some size). After all, it is only an offset. */
2364
2365 CORE_ADDR
2366 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2367 {
2368 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2369 whether we want this to be true eventually. */
2370 return unpack_long (type, valaddr);
2371 }
2372
2373 \f
2374 /* Get the value of the FIELDNO'th field (which must be static) of
2375 TYPE. Return NULL if the field doesn't exist or has been
2376 optimized out. */
2377
2378 struct value *
2379 value_static_field (struct type *type, int fieldno)
2380 {
2381 struct value *retval;
2382
2383 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2384 {
2385 case FIELD_LOC_KIND_PHYSADDR:
2386 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2387 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2388 break;
2389 case FIELD_LOC_KIND_PHYSNAME:
2390 {
2391 char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2392 /* TYPE_FIELD_NAME (type, fieldno); */
2393 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2394
2395 if (sym == NULL)
2396 {
2397 /* With some compilers, e.g. HP aCC, static data members are
2398 reported as non-debuggable symbols. */
2399 struct minimal_symbol *msym = lookup_minimal_symbol (phys_name,
2400 NULL, NULL);
2401
2402 if (!msym)
2403 return NULL;
2404 else
2405 {
2406 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2407 SYMBOL_VALUE_ADDRESS (msym));
2408 }
2409 }
2410 else
2411 retval = value_of_variable (sym, NULL);
2412 break;
2413 }
2414 default:
2415 gdb_assert_not_reached ("unexpected field location kind");
2416 }
2417
2418 return retval;
2419 }
2420
2421 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2422 You have to be careful here, since the size of the data area for the value
2423 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2424 than the old enclosing type, you have to allocate more space for the
2425 data. */
2426
2427 void
2428 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2429 {
2430 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2431 val->contents =
2432 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2433
2434 val->enclosing_type = new_encl_type;
2435 }
2436
2437 /* Given a value ARG1 (offset by OFFSET bytes)
2438 of a struct or union type ARG_TYPE,
2439 extract and return the value of one of its (non-static) fields.
2440 FIELDNO says which field. */
2441
2442 struct value *
2443 value_primitive_field (struct value *arg1, int offset,
2444 int fieldno, struct type *arg_type)
2445 {
2446 struct value *v;
2447 struct type *type;
2448
2449 CHECK_TYPEDEF (arg_type);
2450 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2451
2452 /* Call check_typedef on our type to make sure that, if TYPE
2453 is a TYPE_CODE_TYPEDEF, its length is set to the length
2454 of the target type instead of zero. However, we do not
2455 replace the typedef type by the target type, because we want
2456 to keep the typedef in order to be able to print the type
2457 description correctly. */
2458 check_typedef (type);
2459
2460 /* Handle packed fields */
2461
2462 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2463 {
2464 /* Create a new value for the bitfield, with bitpos and bitsize
2465 set. If possible, arrange offset and bitpos so that we can
2466 do a single aligned read of the size of the containing type.
2467 Otherwise, adjust offset to the byte containing the first
2468 bit. Assume that the address, offset, and embedded offset
2469 are sufficiently aligned. */
2470 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2471 int container_bitsize = TYPE_LENGTH (type) * 8;
2472
2473 v = allocate_value_lazy (type);
2474 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2475 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2476 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2477 v->bitpos = bitpos % container_bitsize;
2478 else
2479 v->bitpos = bitpos % 8;
2480 v->offset = (value_embedded_offset (arg1)
2481 + offset
2482 + (bitpos - v->bitpos) / 8);
2483 v->parent = arg1;
2484 value_incref (v->parent);
2485 if (!value_lazy (arg1))
2486 value_fetch_lazy (v);
2487 }
2488 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2489 {
2490 /* This field is actually a base subobject, so preserve the
2491 entire object's contents for later references to virtual
2492 bases, etc. */
2493
2494 /* Lazy register values with offsets are not supported. */
2495 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2496 value_fetch_lazy (arg1);
2497
2498 if (value_lazy (arg1))
2499 v = allocate_value_lazy (value_enclosing_type (arg1));
2500 else
2501 {
2502 v = allocate_value (value_enclosing_type (arg1));
2503 value_contents_copy_raw (v, 0, arg1, 0,
2504 TYPE_LENGTH (value_enclosing_type (arg1)));
2505 }
2506 v->type = type;
2507 v->offset = value_offset (arg1);
2508 v->embedded_offset = (offset + value_embedded_offset (arg1)
2509 + TYPE_FIELD_BITPOS (arg_type, fieldno) / 8);
2510 }
2511 else
2512 {
2513 /* Plain old data member */
2514 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2515
2516 /* Lazy register values with offsets are not supported. */
2517 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2518 value_fetch_lazy (arg1);
2519
2520 if (value_lazy (arg1))
2521 v = allocate_value_lazy (type);
2522 else
2523 {
2524 v = allocate_value (type);
2525 value_contents_copy_raw (v, value_embedded_offset (v),
2526 arg1, value_embedded_offset (arg1) + offset,
2527 TYPE_LENGTH (type));
2528 }
2529 v->offset = (value_offset (arg1) + offset
2530 + value_embedded_offset (arg1));
2531 }
2532 set_value_component_location (v, arg1);
2533 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
2534 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
2535 return v;
2536 }
2537
2538 /* Given a value ARG1 of a struct or union type,
2539 extract and return the value of one of its (non-static) fields.
2540 FIELDNO says which field. */
2541
2542 struct value *
2543 value_field (struct value *arg1, int fieldno)
2544 {
2545 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
2546 }
2547
2548 /* Return a non-virtual function as a value.
2549 F is the list of member functions which contains the desired method.
2550 J is an index into F which provides the desired method.
2551
2552 We only use the symbol for its address, so be happy with either a
2553 full symbol or a minimal symbol. */
2554
2555 struct value *
2556 value_fn_field (struct value **arg1p, struct fn_field *f,
2557 int j, struct type *type,
2558 int offset)
2559 {
2560 struct value *v;
2561 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
2562 char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
2563 struct symbol *sym;
2564 struct minimal_symbol *msym;
2565
2566 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
2567 if (sym != NULL)
2568 {
2569 msym = NULL;
2570 }
2571 else
2572 {
2573 gdb_assert (sym == NULL);
2574 msym = lookup_minimal_symbol (physname, NULL, NULL);
2575 if (msym == NULL)
2576 return NULL;
2577 }
2578
2579 v = allocate_value (ftype);
2580 if (sym)
2581 {
2582 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
2583 }
2584 else
2585 {
2586 /* The minimal symbol might point to a function descriptor;
2587 resolve it to the actual code address instead. */
2588 struct objfile *objfile = msymbol_objfile (msym);
2589 struct gdbarch *gdbarch = get_objfile_arch (objfile);
2590
2591 set_value_address (v,
2592 gdbarch_convert_from_func_ptr_addr
2593 (gdbarch, SYMBOL_VALUE_ADDRESS (msym), &current_target));
2594 }
2595
2596 if (arg1p)
2597 {
2598 if (type != value_type (*arg1p))
2599 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
2600 value_addr (*arg1p)));
2601
2602 /* Move the `this' pointer according to the offset.
2603 VALUE_OFFSET (*arg1p) += offset; */
2604 }
2605
2606 return v;
2607 }
2608
2609 \f
2610
2611 /* Helper function for both unpack_value_bits_as_long and
2612 unpack_bits_as_long. See those functions for more details on the
2613 interface; the only difference is that this function accepts either
2614 a NULL or a non-NULL ORIGINAL_VALUE. */
2615
2616 static int
2617 unpack_value_bits_as_long_1 (struct type *field_type, const gdb_byte *valaddr,
2618 int embedded_offset, int bitpos, int bitsize,
2619 const struct value *original_value,
2620 LONGEST *result)
2621 {
2622 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
2623 ULONGEST val;
2624 ULONGEST valmask;
2625 int lsbcount;
2626 int bytes_read;
2627 int read_offset;
2628
2629 /* Read the minimum number of bytes required; there may not be
2630 enough bytes to read an entire ULONGEST. */
2631 CHECK_TYPEDEF (field_type);
2632 if (bitsize)
2633 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
2634 else
2635 bytes_read = TYPE_LENGTH (field_type);
2636
2637 read_offset = bitpos / 8;
2638
2639 if (original_value != NULL
2640 && !value_bytes_available (original_value, embedded_offset + read_offset,
2641 bytes_read))
2642 return 0;
2643
2644 val = extract_unsigned_integer (valaddr + embedded_offset + read_offset,
2645 bytes_read, byte_order);
2646
2647 /* Extract bits. See comment above. */
2648
2649 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
2650 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
2651 else
2652 lsbcount = (bitpos % 8);
2653 val >>= lsbcount;
2654
2655 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
2656 If the field is signed, and is negative, then sign extend. */
2657
2658 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
2659 {
2660 valmask = (((ULONGEST) 1) << bitsize) - 1;
2661 val &= valmask;
2662 if (!TYPE_UNSIGNED (field_type))
2663 {
2664 if (val & (valmask ^ (valmask >> 1)))
2665 {
2666 val |= ~valmask;
2667 }
2668 }
2669 }
2670
2671 *result = val;
2672 return 1;
2673 }
2674
2675 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
2676 VALADDR + EMBEDDED_OFFSET, and store the result in *RESULT.
2677 VALADDR points to the contents of ORIGINAL_VALUE, which must not be
2678 NULL. The bitfield starts at BITPOS bits and contains BITSIZE
2679 bits.
2680
2681 Returns false if the value contents are unavailable, otherwise
2682 returns true, indicating a valid value has been stored in *RESULT.
2683
2684 Extracting bits depends on endianness of the machine. Compute the
2685 number of least significant bits to discard. For big endian machines,
2686 we compute the total number of bits in the anonymous object, subtract
2687 off the bit count from the MSB of the object to the MSB of the
2688 bitfield, then the size of the bitfield, which leaves the LSB discard
2689 count. For little endian machines, the discard count is simply the
2690 number of bits from the LSB of the anonymous object to the LSB of the
2691 bitfield.
2692
2693 If the field is signed, we also do sign extension. */
2694
2695 int
2696 unpack_value_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
2697 int embedded_offset, int bitpos, int bitsize,
2698 const struct value *original_value,
2699 LONGEST *result)
2700 {
2701 gdb_assert (original_value != NULL);
2702
2703 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
2704 bitpos, bitsize, original_value, result);
2705
2706 }
2707
2708 /* Unpack a field FIELDNO of the specified TYPE, from the object at
2709 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
2710 ORIGINAL_VALUE. See unpack_value_bits_as_long for more
2711 details. */
2712
2713 static int
2714 unpack_value_field_as_long_1 (struct type *type, const gdb_byte *valaddr,
2715 int embedded_offset, int fieldno,
2716 const struct value *val, LONGEST *result)
2717 {
2718 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
2719 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
2720 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2721
2722 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
2723 bitpos, bitsize, val,
2724 result);
2725 }
2726
2727 /* Unpack a field FIELDNO of the specified TYPE, from the object at
2728 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
2729 ORIGINAL_VALUE, which must not be NULL. See
2730 unpack_value_bits_as_long for more details. */
2731
2732 int
2733 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
2734 int embedded_offset, int fieldno,
2735 const struct value *val, LONGEST *result)
2736 {
2737 gdb_assert (val != NULL);
2738
2739 return unpack_value_field_as_long_1 (type, valaddr, embedded_offset,
2740 fieldno, val, result);
2741 }
2742
2743 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
2744 object at VALADDR. See unpack_value_bits_as_long for more details.
2745 This function differs from unpack_value_field_as_long in that it
2746 operates without a struct value object. */
2747
2748 LONGEST
2749 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
2750 {
2751 LONGEST result;
2752
2753 unpack_value_field_as_long_1 (type, valaddr, 0, fieldno, NULL, &result);
2754 return result;
2755 }
2756
2757 /* Return a new value with type TYPE, which is FIELDNO field of the
2758 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
2759 of VAL. If the VAL's contents required to extract the bitfield
2760 from are unavailable, the new value is correspondingly marked as
2761 unavailable. */
2762
2763 struct value *
2764 value_field_bitfield (struct type *type, int fieldno,
2765 const gdb_byte *valaddr,
2766 int embedded_offset, const struct value *val)
2767 {
2768 LONGEST l;
2769
2770 if (!unpack_value_field_as_long (type, valaddr, embedded_offset, fieldno,
2771 val, &l))
2772 {
2773 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2774 struct value *retval = allocate_value (field_type);
2775 mark_value_bytes_unavailable (retval, 0, TYPE_LENGTH (field_type));
2776 return retval;
2777 }
2778 else
2779 {
2780 return value_from_longest (TYPE_FIELD_TYPE (type, fieldno), l);
2781 }
2782 }
2783
2784 /* Modify the value of a bitfield. ADDR points to a block of memory in
2785 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
2786 is the desired value of the field, in host byte order. BITPOS and BITSIZE
2787 indicate which bits (in target bit order) comprise the bitfield.
2788 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
2789 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
2790
2791 void
2792 modify_field (struct type *type, gdb_byte *addr,
2793 LONGEST fieldval, int bitpos, int bitsize)
2794 {
2795 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2796 ULONGEST oword;
2797 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
2798 int bytesize;
2799
2800 /* Normalize BITPOS. */
2801 addr += bitpos / 8;
2802 bitpos %= 8;
2803
2804 /* If a negative fieldval fits in the field in question, chop
2805 off the sign extension bits. */
2806 if ((~fieldval & ~(mask >> 1)) == 0)
2807 fieldval &= mask;
2808
2809 /* Warn if value is too big to fit in the field in question. */
2810 if (0 != (fieldval & ~mask))
2811 {
2812 /* FIXME: would like to include fieldval in the message, but
2813 we don't have a sprintf_longest. */
2814 warning (_("Value does not fit in %d bits."), bitsize);
2815
2816 /* Truncate it, otherwise adjoining fields may be corrupted. */
2817 fieldval &= mask;
2818 }
2819
2820 /* Ensure no bytes outside of the modified ones get accessed as it may cause
2821 false valgrind reports. */
2822
2823 bytesize = (bitpos + bitsize + 7) / 8;
2824 oword = extract_unsigned_integer (addr, bytesize, byte_order);
2825
2826 /* Shifting for bit field depends on endianness of the target machine. */
2827 if (gdbarch_bits_big_endian (get_type_arch (type)))
2828 bitpos = bytesize * 8 - bitpos - bitsize;
2829
2830 oword &= ~(mask << bitpos);
2831 oword |= fieldval << bitpos;
2832
2833 store_unsigned_integer (addr, bytesize, byte_order, oword);
2834 }
2835 \f
2836 /* Pack NUM into BUF using a target format of TYPE. */
2837
2838 void
2839 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
2840 {
2841 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2842 int len;
2843
2844 type = check_typedef (type);
2845 len = TYPE_LENGTH (type);
2846
2847 switch (TYPE_CODE (type))
2848 {
2849 case TYPE_CODE_INT:
2850 case TYPE_CODE_CHAR:
2851 case TYPE_CODE_ENUM:
2852 case TYPE_CODE_FLAGS:
2853 case TYPE_CODE_BOOL:
2854 case TYPE_CODE_RANGE:
2855 case TYPE_CODE_MEMBERPTR:
2856 store_signed_integer (buf, len, byte_order, num);
2857 break;
2858
2859 case TYPE_CODE_REF:
2860 case TYPE_CODE_PTR:
2861 store_typed_address (buf, type, (CORE_ADDR) num);
2862 break;
2863
2864 default:
2865 error (_("Unexpected type (%d) encountered for integer constant."),
2866 TYPE_CODE (type));
2867 }
2868 }
2869
2870
2871 /* Pack NUM into BUF using a target format of TYPE. */
2872
2873 void
2874 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
2875 {
2876 int len;
2877 enum bfd_endian byte_order;
2878
2879 type = check_typedef (type);
2880 len = TYPE_LENGTH (type);
2881 byte_order = gdbarch_byte_order (get_type_arch (type));
2882
2883 switch (TYPE_CODE (type))
2884 {
2885 case TYPE_CODE_INT:
2886 case TYPE_CODE_CHAR:
2887 case TYPE_CODE_ENUM:
2888 case TYPE_CODE_FLAGS:
2889 case TYPE_CODE_BOOL:
2890 case TYPE_CODE_RANGE:
2891 case TYPE_CODE_MEMBERPTR:
2892 store_unsigned_integer (buf, len, byte_order, num);
2893 break;
2894
2895 case TYPE_CODE_REF:
2896 case TYPE_CODE_PTR:
2897 store_typed_address (buf, type, (CORE_ADDR) num);
2898 break;
2899
2900 default:
2901 error (_("Unexpected type (%d) encountered "
2902 "for unsigned integer constant."),
2903 TYPE_CODE (type));
2904 }
2905 }
2906
2907
2908 /* Convert C numbers into newly allocated values. */
2909
2910 struct value *
2911 value_from_longest (struct type *type, LONGEST num)
2912 {
2913 struct value *val = allocate_value (type);
2914
2915 pack_long (value_contents_raw (val), type, num);
2916 return val;
2917 }
2918
2919
2920 /* Convert C unsigned numbers into newly allocated values. */
2921
2922 struct value *
2923 value_from_ulongest (struct type *type, ULONGEST num)
2924 {
2925 struct value *val = allocate_value (type);
2926
2927 pack_unsigned_long (value_contents_raw (val), type, num);
2928
2929 return val;
2930 }
2931
2932
2933 /* Create a value representing a pointer of type TYPE to the address
2934 ADDR. */
2935 struct value *
2936 value_from_pointer (struct type *type, CORE_ADDR addr)
2937 {
2938 struct value *val = allocate_value (type);
2939
2940 store_typed_address (value_contents_raw (val), check_typedef (type), addr);
2941 return val;
2942 }
2943
2944
2945 /* Create a value of type TYPE whose contents come from VALADDR, if it
2946 is non-null, and whose memory address (in the inferior) is
2947 ADDRESS. */
2948
2949 struct value *
2950 value_from_contents_and_address (struct type *type,
2951 const gdb_byte *valaddr,
2952 CORE_ADDR address)
2953 {
2954 struct value *v;
2955
2956 if (valaddr == NULL)
2957 v = allocate_value_lazy (type);
2958 else
2959 {
2960 v = allocate_value (type);
2961 memcpy (value_contents_raw (v), valaddr, TYPE_LENGTH (type));
2962 }
2963 set_value_address (v, address);
2964 VALUE_LVAL (v) = lval_memory;
2965 return v;
2966 }
2967
2968 struct value *
2969 value_from_double (struct type *type, DOUBLEST num)
2970 {
2971 struct value *val = allocate_value (type);
2972 struct type *base_type = check_typedef (type);
2973 enum type_code code = TYPE_CODE (base_type);
2974
2975 if (code == TYPE_CODE_FLT)
2976 {
2977 store_typed_floating (value_contents_raw (val), base_type, num);
2978 }
2979 else
2980 error (_("Unexpected type encountered for floating constant."));
2981
2982 return val;
2983 }
2984
2985 struct value *
2986 value_from_decfloat (struct type *type, const gdb_byte *dec)
2987 {
2988 struct value *val = allocate_value (type);
2989
2990 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
2991 return val;
2992 }
2993
2994 struct value *
2995 coerce_ref (struct value *arg)
2996 {
2997 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
2998
2999 if (TYPE_CODE (value_type_arg_tmp) == TYPE_CODE_REF)
3000 arg = value_at_lazy (TYPE_TARGET_TYPE (value_type_arg_tmp),
3001 unpack_pointer (value_type (arg),
3002 value_contents (arg)));
3003 return arg;
3004 }
3005
3006 struct value *
3007 coerce_array (struct value *arg)
3008 {
3009 struct type *type;
3010
3011 arg = coerce_ref (arg);
3012 type = check_typedef (value_type (arg));
3013
3014 switch (TYPE_CODE (type))
3015 {
3016 case TYPE_CODE_ARRAY:
3017 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3018 arg = value_coerce_array (arg);
3019 break;
3020 case TYPE_CODE_FUNC:
3021 arg = value_coerce_function (arg);
3022 break;
3023 }
3024 return arg;
3025 }
3026 \f
3027
3028 /* Return true if the function returning the specified type is using
3029 the convention of returning structures in memory (passing in the
3030 address as a hidden first parameter). */
3031
3032 int
3033 using_struct_return (struct gdbarch *gdbarch,
3034 struct type *func_type, struct type *value_type)
3035 {
3036 enum type_code code = TYPE_CODE (value_type);
3037
3038 if (code == TYPE_CODE_ERROR)
3039 error (_("Function return type unknown."));
3040
3041 if (code == TYPE_CODE_VOID)
3042 /* A void return value is never in memory. See also corresponding
3043 code in "print_return_value". */
3044 return 0;
3045
3046 /* Probe the architecture for the return-value convention. */
3047 return (gdbarch_return_value (gdbarch, func_type, value_type,
3048 NULL, NULL, NULL)
3049 != RETURN_VALUE_REGISTER_CONVENTION);
3050 }
3051
3052 /* Set the initialized field in a value struct. */
3053
3054 void
3055 set_value_initialized (struct value *val, int status)
3056 {
3057 val->initialized = status;
3058 }
3059
3060 /* Return the initialized field in a value struct. */
3061
3062 int
3063 value_initialized (struct value *val)
3064 {
3065 return val->initialized;
3066 }
3067
3068 void
3069 _initialize_values (void)
3070 {
3071 add_cmd ("convenience", no_class, show_convenience, _("\
3072 Debugger convenience (\"$foo\") variables.\n\
3073 These variables are created when you assign them values;\n\
3074 thus, \"print $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3075 \n\
3076 A few convenience variables are given values automatically:\n\
3077 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3078 \"$__\" holds the contents of the last address examined with \"x\"."),
3079 &showlist);
3080
3081 add_cmd ("values", no_set_class, show_values, _("\
3082 Elements of value history around item number IDX (or last ten)."),
3083 &showlist);
3084
3085 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3086 Initialize a convenience variable if necessary.\n\
3087 init-if-undefined VARIABLE = EXPRESSION\n\
3088 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3089 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3090 VARIABLE is already initialized."));
3091
3092 add_prefix_cmd ("function", no_class, function_command, _("\
3093 Placeholder command for showing help on convenience functions."),
3094 &functionlist, "function ", 0, &cmdlist);
3095 }
This page took 0.092453 seconds and 4 git commands to generate.