Commit | Line | Data |
---|---|---|
c906108c | 1 | /* Low level packing and unpacking of values for GDB, the GNU Debugger. |
1bac305b | 2 | |
32d0add0 | 3 | Copyright (C) 1986-2015 Free Software Foundation, Inc. |
c906108c | 4 | |
c5aa993b | 5 | This file is part of GDB. |
c906108c | 6 | |
c5aa993b JM |
7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 9 | the Free Software Foundation; either version 3 of the License, or |
c5aa993b | 10 | (at your option) any later version. |
c906108c | 11 | |
c5aa993b JM |
12 | This program is distributed in the hope that it will be useful, |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
c906108c | 16 | |
c5aa993b | 17 | You should have received a copy of the GNU General Public License |
a9762ec7 | 18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
c906108c SS |
19 | |
20 | #include "defs.h" | |
e17c207e | 21 | #include "arch-utils.h" |
c906108c SS |
22 | #include "symtab.h" |
23 | #include "gdbtypes.h" | |
24 | #include "value.h" | |
25 | #include "gdbcore.h" | |
c906108c SS |
26 | #include "command.h" |
27 | #include "gdbcmd.h" | |
28 | #include "target.h" | |
29 | #include "language.h" | |
c906108c | 30 | #include "demangle.h" |
d16aafd8 | 31 | #include "doublest.h" |
36160dc4 | 32 | #include "regcache.h" |
fe898f56 | 33 | #include "block.h" |
27bc4d80 | 34 | #include "dfp.h" |
bccdca4a | 35 | #include "objfiles.h" |
79a45b7d | 36 | #include "valprint.h" |
bc3b79fd | 37 | #include "cli/cli-decode.h" |
6dddc817 | 38 | #include "extension.h" |
3bd0f5ef | 39 | #include <ctype.h> |
0914bcdb | 40 | #include "tracepoint.h" |
be335936 | 41 | #include "cp-abi.h" |
a58e2656 | 42 | #include "user-regs.h" |
0914bcdb | 43 | |
581e13c1 | 44 | /* Prototypes for exported functions. */ |
c906108c | 45 | |
a14ed312 | 46 | void _initialize_values (void); |
c906108c | 47 | |
bc3b79fd TJB |
48 | /* Definition of a user function. */ |
49 | struct internal_function | |
50 | { | |
51 | /* The name of the function. It is a bit odd to have this in the | |
52 | function itself -- the user might use a differently-named | |
53 | convenience variable to hold the function. */ | |
54 | char *name; | |
55 | ||
56 | /* The handler. */ | |
57 | internal_function_fn handler; | |
58 | ||
59 | /* User data for the handler. */ | |
60 | void *cookie; | |
61 | }; | |
62 | ||
4e07d55f PA |
63 | /* Defines an [OFFSET, OFFSET + LENGTH) range. */ |
64 | ||
65 | struct range | |
66 | { | |
67 | /* Lowest offset in the range. */ | |
68 | int offset; | |
69 | ||
70 | /* Length of the range. */ | |
71 | int length; | |
72 | }; | |
73 | ||
74 | typedef struct range range_s; | |
75 | ||
76 | DEF_VEC_O(range_s); | |
77 | ||
78 | /* Returns true if the ranges defined by [offset1, offset1+len1) and | |
79 | [offset2, offset2+len2) overlap. */ | |
80 | ||
81 | static int | |
82 | ranges_overlap (int offset1, int len1, | |
83 | int offset2, int len2) | |
84 | { | |
85 | ULONGEST h, l; | |
86 | ||
87 | l = max (offset1, offset2); | |
88 | h = min (offset1 + len1, offset2 + len2); | |
89 | return (l < h); | |
90 | } | |
91 | ||
92 | /* Returns true if the first argument is strictly less than the | |
93 | second, useful for VEC_lower_bound. We keep ranges sorted by | |
94 | offset and coalesce overlapping and contiguous ranges, so this just | |
95 | compares the starting offset. */ | |
96 | ||
97 | static int | |
98 | range_lessthan (const range_s *r1, const range_s *r2) | |
99 | { | |
100 | return r1->offset < r2->offset; | |
101 | } | |
102 | ||
103 | /* Returns true if RANGES contains any range that overlaps [OFFSET, | |
104 | OFFSET+LENGTH). */ | |
105 | ||
106 | static int | |
107 | ranges_contain (VEC(range_s) *ranges, int offset, int length) | |
108 | { | |
109 | range_s what; | |
110 | int i; | |
111 | ||
112 | what.offset = offset; | |
113 | what.length = length; | |
114 | ||
115 | /* We keep ranges sorted by offset and coalesce overlapping and | |
116 | contiguous ranges, so to check if a range list contains a given | |
117 | range, we can do a binary search for the position the given range | |
118 | would be inserted if we only considered the starting OFFSET of | |
119 | ranges. We call that position I. Since we also have LENGTH to | |
120 | care for (this is a range afterall), we need to check if the | |
121 | _previous_ range overlaps the I range. E.g., | |
122 | ||
123 | R | |
124 | |---| | |
125 | |---| |---| |------| ... |--| | |
126 | 0 1 2 N | |
127 | ||
128 | I=1 | |
129 | ||
130 | In the case above, the binary search would return `I=1', meaning, | |
131 | this OFFSET should be inserted at position 1, and the current | |
132 | position 1 should be pushed further (and before 2). But, `0' | |
133 | overlaps with R. | |
134 | ||
135 | Then we need to check if the I range overlaps the I range itself. | |
136 | E.g., | |
137 | ||
138 | R | |
139 | |---| | |
140 | |---| |---| |-------| ... |--| | |
141 | 0 1 2 N | |
142 | ||
143 | I=1 | |
144 | */ | |
145 | ||
146 | i = VEC_lower_bound (range_s, ranges, &what, range_lessthan); | |
147 | ||
148 | if (i > 0) | |
149 | { | |
150 | struct range *bef = VEC_index (range_s, ranges, i - 1); | |
151 | ||
152 | if (ranges_overlap (bef->offset, bef->length, offset, length)) | |
153 | return 1; | |
154 | } | |
155 | ||
156 | if (i < VEC_length (range_s, ranges)) | |
157 | { | |
158 | struct range *r = VEC_index (range_s, ranges, i); | |
159 | ||
160 | if (ranges_overlap (r->offset, r->length, offset, length)) | |
161 | return 1; | |
162 | } | |
163 | ||
164 | return 0; | |
165 | } | |
166 | ||
bc3b79fd TJB |
167 | static struct cmd_list_element *functionlist; |
168 | ||
87784a47 TT |
169 | /* Note that the fields in this structure are arranged to save a bit |
170 | of memory. */ | |
171 | ||
91294c83 AC |
172 | struct value |
173 | { | |
174 | /* Type of value; either not an lval, or one of the various | |
175 | different possible kinds of lval. */ | |
176 | enum lval_type lval; | |
177 | ||
178 | /* Is it modifiable? Only relevant if lval != not_lval. */ | |
87784a47 TT |
179 | unsigned int modifiable : 1; |
180 | ||
181 | /* If zero, contents of this value are in the contents field. If | |
182 | nonzero, contents are in inferior. If the lval field is lval_memory, | |
183 | the contents are in inferior memory at location.address plus offset. | |
184 | The lval field may also be lval_register. | |
185 | ||
186 | WARNING: This field is used by the code which handles watchpoints | |
187 | (see breakpoint.c) to decide whether a particular value can be | |
188 | watched by hardware watchpoints. If the lazy flag is set for | |
189 | some member of a value chain, it is assumed that this member of | |
190 | the chain doesn't need to be watched as part of watching the | |
191 | value itself. This is how GDB avoids watching the entire struct | |
192 | or array when the user wants to watch a single struct member or | |
193 | array element. If you ever change the way lazy flag is set and | |
194 | reset, be sure to consider this use as well! */ | |
195 | unsigned int lazy : 1; | |
196 | ||
87784a47 TT |
197 | /* If value is a variable, is it initialized or not. */ |
198 | unsigned int initialized : 1; | |
199 | ||
200 | /* If value is from the stack. If this is set, read_stack will be | |
201 | used instead of read_memory to enable extra caching. */ | |
202 | unsigned int stack : 1; | |
91294c83 | 203 | |
e848a8a5 TT |
204 | /* If the value has been released. */ |
205 | unsigned int released : 1; | |
206 | ||
98b1cfdc TT |
207 | /* Register number if the value is from a register. */ |
208 | short regnum; | |
209 | ||
91294c83 AC |
210 | /* Location of value (if lval). */ |
211 | union | |
212 | { | |
213 | /* If lval == lval_memory, this is the address in the inferior. | |
214 | If lval == lval_register, this is the byte offset into the | |
215 | registers structure. */ | |
216 | CORE_ADDR address; | |
217 | ||
218 | /* Pointer to internal variable. */ | |
219 | struct internalvar *internalvar; | |
5f5233d4 | 220 | |
e81e7f5e SC |
221 | /* Pointer to xmethod worker. */ |
222 | struct xmethod_worker *xm_worker; | |
223 | ||
5f5233d4 PA |
224 | /* If lval == lval_computed, this is a set of function pointers |
225 | to use to access and describe the value, and a closure pointer | |
226 | for them to use. */ | |
227 | struct | |
228 | { | |
c8f2448a JK |
229 | /* Functions to call. */ |
230 | const struct lval_funcs *funcs; | |
231 | ||
232 | /* Closure for those functions to use. */ | |
233 | void *closure; | |
5f5233d4 | 234 | } computed; |
91294c83 AC |
235 | } location; |
236 | ||
3723fda8 SM |
237 | /* Describes offset of a value within lval of a structure in target |
238 | addressable memory units. If lval == lval_memory, this is an offset to | |
239 | the address. If lval == lval_register, this is a further offset from | |
240 | location.address within the registers structure. Note also the member | |
241 | embedded_offset below. */ | |
91294c83 AC |
242 | int offset; |
243 | ||
244 | /* Only used for bitfields; number of bits contained in them. */ | |
245 | int bitsize; | |
246 | ||
247 | /* Only used for bitfields; position of start of field. For | |
32c9a795 | 248 | gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For |
581e13c1 | 249 | gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */ |
91294c83 AC |
250 | int bitpos; |
251 | ||
87784a47 TT |
252 | /* The number of references to this value. When a value is created, |
253 | the value chain holds a reference, so REFERENCE_COUNT is 1. If | |
254 | release_value is called, this value is removed from the chain but | |
255 | the caller of release_value now has a reference to this value. | |
256 | The caller must arrange for a call to value_free later. */ | |
257 | int reference_count; | |
258 | ||
4ea48cc1 DJ |
259 | /* Only used for bitfields; the containing value. This allows a |
260 | single read from the target when displaying multiple | |
261 | bitfields. */ | |
262 | struct value *parent; | |
263 | ||
91294c83 AC |
264 | /* Frame register value is relative to. This will be described in |
265 | the lval enum above as "lval_register". */ | |
266 | struct frame_id frame_id; | |
267 | ||
268 | /* Type of the value. */ | |
269 | struct type *type; | |
270 | ||
271 | /* If a value represents a C++ object, then the `type' field gives | |
272 | the object's compile-time type. If the object actually belongs | |
273 | to some class derived from `type', perhaps with other base | |
274 | classes and additional members, then `type' is just a subobject | |
275 | of the real thing, and the full object is probably larger than | |
276 | `type' would suggest. | |
277 | ||
278 | If `type' is a dynamic class (i.e. one with a vtable), then GDB | |
279 | can actually determine the object's run-time type by looking at | |
280 | the run-time type information in the vtable. When this | |
281 | information is available, we may elect to read in the entire | |
282 | object, for several reasons: | |
283 | ||
284 | - When printing the value, the user would probably rather see the | |
285 | full object, not just the limited portion apparent from the | |
286 | compile-time type. | |
287 | ||
288 | - If `type' has virtual base classes, then even printing `type' | |
289 | alone may require reaching outside the `type' portion of the | |
290 | object to wherever the virtual base class has been stored. | |
291 | ||
292 | When we store the entire object, `enclosing_type' is the run-time | |
293 | type -- the complete object -- and `embedded_offset' is the | |
3723fda8 SM |
294 | offset of `type' within that larger type, in target addressable memory |
295 | units. The value_contents() macro takes `embedded_offset' into account, | |
296 | so most GDB code continues to see the `type' portion of the value, just | |
297 | as the inferior would. | |
91294c83 AC |
298 | |
299 | If `type' is a pointer to an object, then `enclosing_type' is a | |
300 | pointer to the object's run-time type, and `pointed_to_offset' is | |
3723fda8 SM |
301 | the offset in target addressable memory units from the full object |
302 | to the pointed-to object -- that is, the value `embedded_offset' would | |
303 | have if we followed the pointer and fetched the complete object. | |
304 | (I don't really see the point. Why not just determine the | |
305 | run-time type when you indirect, and avoid the special case? The | |
306 | contents don't matter until you indirect anyway.) | |
91294c83 AC |
307 | |
308 | If we're not doing anything fancy, `enclosing_type' is equal to | |
309 | `type', and `embedded_offset' is zero, so everything works | |
310 | normally. */ | |
311 | struct type *enclosing_type; | |
312 | int embedded_offset; | |
313 | int pointed_to_offset; | |
314 | ||
315 | /* Values are stored in a chain, so that they can be deleted easily | |
316 | over calls to the inferior. Values assigned to internal | |
a08702d6 TJB |
317 | variables, put into the value history or exposed to Python are |
318 | taken off this list. */ | |
91294c83 AC |
319 | struct value *next; |
320 | ||
3e3d7139 JG |
321 | /* Actual contents of the value. Target byte-order. NULL or not |
322 | valid if lazy is nonzero. */ | |
323 | gdb_byte *contents; | |
828d3400 | 324 | |
4e07d55f PA |
325 | /* Unavailable ranges in CONTENTS. We mark unavailable ranges, |
326 | rather than available, since the common and default case is for a | |
9a0dc9e3 PA |
327 | value to be available. This is filled in at value read time. |
328 | The unavailable ranges are tracked in bits. Note that a contents | |
329 | bit that has been optimized out doesn't really exist in the | |
330 | program, so it can't be marked unavailable either. */ | |
4e07d55f | 331 | VEC(range_s) *unavailable; |
9a0dc9e3 PA |
332 | |
333 | /* Likewise, but for optimized out contents (a chunk of the value of | |
334 | a variable that does not actually exist in the program). If LVAL | |
335 | is lval_register, this is a register ($pc, $sp, etc., never a | |
336 | program variable) that has not been saved in the frame. Not | |
337 | saved registers and optimized-out program variables values are | |
338 | treated pretty much the same, except not-saved registers have a | |
339 | different string representation and related error strings. */ | |
340 | VEC(range_s) *optimized_out; | |
91294c83 AC |
341 | }; |
342 | ||
e512cdbd SM |
343 | /* See value.h. */ |
344 | ||
345 | struct gdbarch * | |
346 | get_value_arch (const struct value *value) | |
347 | { | |
348 | return get_type_arch (value_type (value)); | |
349 | } | |
350 | ||
4e07d55f | 351 | int |
bdf22206 | 352 | value_bits_available (const struct value *value, int offset, int length) |
4e07d55f PA |
353 | { |
354 | gdb_assert (!value->lazy); | |
355 | ||
356 | return !ranges_contain (value->unavailable, offset, length); | |
357 | } | |
358 | ||
bdf22206 AB |
359 | int |
360 | value_bytes_available (const struct value *value, int offset, int length) | |
361 | { | |
362 | return value_bits_available (value, | |
363 | offset * TARGET_CHAR_BIT, | |
364 | length * TARGET_CHAR_BIT); | |
365 | } | |
366 | ||
9a0dc9e3 PA |
367 | int |
368 | value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length) | |
369 | { | |
370 | gdb_assert (!value->lazy); | |
371 | ||
372 | return ranges_contain (value->optimized_out, bit_offset, bit_length); | |
373 | } | |
374 | ||
ec0a52e1 PA |
375 | int |
376 | value_entirely_available (struct value *value) | |
377 | { | |
378 | /* We can only tell whether the whole value is available when we try | |
379 | to read it. */ | |
380 | if (value->lazy) | |
381 | value_fetch_lazy (value); | |
382 | ||
383 | if (VEC_empty (range_s, value->unavailable)) | |
384 | return 1; | |
385 | return 0; | |
386 | } | |
387 | ||
9a0dc9e3 PA |
388 | /* Returns true if VALUE is entirely covered by RANGES. If the value |
389 | is lazy, it'll be read now. Note that RANGE is a pointer to | |
390 | pointer because reading the value might change *RANGE. */ | |
391 | ||
392 | static int | |
393 | value_entirely_covered_by_range_vector (struct value *value, | |
394 | VEC(range_s) **ranges) | |
6211c335 | 395 | { |
9a0dc9e3 PA |
396 | /* We can only tell whether the whole value is optimized out / |
397 | unavailable when we try to read it. */ | |
6211c335 YQ |
398 | if (value->lazy) |
399 | value_fetch_lazy (value); | |
400 | ||
9a0dc9e3 | 401 | if (VEC_length (range_s, *ranges) == 1) |
6211c335 | 402 | { |
9a0dc9e3 | 403 | struct range *t = VEC_index (range_s, *ranges, 0); |
6211c335 YQ |
404 | |
405 | if (t->offset == 0 | |
64c46ce4 JB |
406 | && t->length == (TARGET_CHAR_BIT |
407 | * TYPE_LENGTH (value_enclosing_type (value)))) | |
6211c335 YQ |
408 | return 1; |
409 | } | |
410 | ||
411 | return 0; | |
412 | } | |
413 | ||
9a0dc9e3 PA |
414 | int |
415 | value_entirely_unavailable (struct value *value) | |
416 | { | |
417 | return value_entirely_covered_by_range_vector (value, &value->unavailable); | |
418 | } | |
419 | ||
420 | int | |
421 | value_entirely_optimized_out (struct value *value) | |
422 | { | |
423 | return value_entirely_covered_by_range_vector (value, &value->optimized_out); | |
424 | } | |
425 | ||
426 | /* Insert into the vector pointed to by VECTORP the bit range starting of | |
427 | OFFSET bits, and extending for the next LENGTH bits. */ | |
428 | ||
429 | static void | |
430 | insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length) | |
4e07d55f PA |
431 | { |
432 | range_s newr; | |
433 | int i; | |
434 | ||
435 | /* Insert the range sorted. If there's overlap or the new range | |
436 | would be contiguous with an existing range, merge. */ | |
437 | ||
438 | newr.offset = offset; | |
439 | newr.length = length; | |
440 | ||
441 | /* Do a binary search for the position the given range would be | |
442 | inserted if we only considered the starting OFFSET of ranges. | |
443 | Call that position I. Since we also have LENGTH to care for | |
444 | (this is a range afterall), we need to check if the _previous_ | |
445 | range overlaps the I range. E.g., calling R the new range: | |
446 | ||
447 | #1 - overlaps with previous | |
448 | ||
449 | R | |
450 | |-...-| | |
451 | |---| |---| |------| ... |--| | |
452 | 0 1 2 N | |
453 | ||
454 | I=1 | |
455 | ||
456 | In the case #1 above, the binary search would return `I=1', | |
457 | meaning, this OFFSET should be inserted at position 1, and the | |
458 | current position 1 should be pushed further (and become 2). But, | |
459 | note that `0' overlaps with R, so we want to merge them. | |
460 | ||
461 | A similar consideration needs to be taken if the new range would | |
462 | be contiguous with the previous range: | |
463 | ||
464 | #2 - contiguous with previous | |
465 | ||
466 | R | |
467 | |-...-| | |
468 | |--| |---| |------| ... |--| | |
469 | 0 1 2 N | |
470 | ||
471 | I=1 | |
472 | ||
473 | If there's no overlap with the previous range, as in: | |
474 | ||
475 | #3 - not overlapping and not contiguous | |
476 | ||
477 | R | |
478 | |-...-| | |
479 | |--| |---| |------| ... |--| | |
480 | 0 1 2 N | |
481 | ||
482 | I=1 | |
483 | ||
484 | or if I is 0: | |
485 | ||
486 | #4 - R is the range with lowest offset | |
487 | ||
488 | R | |
489 | |-...-| | |
490 | |--| |---| |------| ... |--| | |
491 | 0 1 2 N | |
492 | ||
493 | I=0 | |
494 | ||
495 | ... we just push the new range to I. | |
496 | ||
497 | All the 4 cases above need to consider that the new range may | |
498 | also overlap several of the ranges that follow, or that R may be | |
499 | contiguous with the following range, and merge. E.g., | |
500 | ||
501 | #5 - overlapping following ranges | |
502 | ||
503 | R | |
504 | |------------------------| | |
505 | |--| |---| |------| ... |--| | |
506 | 0 1 2 N | |
507 | ||
508 | I=0 | |
509 | ||
510 | or: | |
511 | ||
512 | R | |
513 | |-------| | |
514 | |--| |---| |------| ... |--| | |
515 | 0 1 2 N | |
516 | ||
517 | I=1 | |
518 | ||
519 | */ | |
520 | ||
9a0dc9e3 | 521 | i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan); |
4e07d55f PA |
522 | if (i > 0) |
523 | { | |
9a0dc9e3 | 524 | struct range *bef = VEC_index (range_s, *vectorp, i - 1); |
4e07d55f PA |
525 | |
526 | if (ranges_overlap (bef->offset, bef->length, offset, length)) | |
527 | { | |
528 | /* #1 */ | |
529 | ULONGEST l = min (bef->offset, offset); | |
530 | ULONGEST h = max (bef->offset + bef->length, offset + length); | |
531 | ||
532 | bef->offset = l; | |
533 | bef->length = h - l; | |
534 | i--; | |
535 | } | |
536 | else if (offset == bef->offset + bef->length) | |
537 | { | |
538 | /* #2 */ | |
539 | bef->length += length; | |
540 | i--; | |
541 | } | |
542 | else | |
543 | { | |
544 | /* #3 */ | |
9a0dc9e3 | 545 | VEC_safe_insert (range_s, *vectorp, i, &newr); |
4e07d55f PA |
546 | } |
547 | } | |
548 | else | |
549 | { | |
550 | /* #4 */ | |
9a0dc9e3 | 551 | VEC_safe_insert (range_s, *vectorp, i, &newr); |
4e07d55f PA |
552 | } |
553 | ||
554 | /* Check whether the ranges following the one we've just added or | |
555 | touched can be folded in (#5 above). */ | |
9a0dc9e3 | 556 | if (i + 1 < VEC_length (range_s, *vectorp)) |
4e07d55f PA |
557 | { |
558 | struct range *t; | |
559 | struct range *r; | |
560 | int removed = 0; | |
561 | int next = i + 1; | |
562 | ||
563 | /* Get the range we just touched. */ | |
9a0dc9e3 | 564 | t = VEC_index (range_s, *vectorp, i); |
4e07d55f PA |
565 | removed = 0; |
566 | ||
567 | i = next; | |
9a0dc9e3 | 568 | for (; VEC_iterate (range_s, *vectorp, i, r); i++) |
4e07d55f PA |
569 | if (r->offset <= t->offset + t->length) |
570 | { | |
571 | ULONGEST l, h; | |
572 | ||
573 | l = min (t->offset, r->offset); | |
574 | h = max (t->offset + t->length, r->offset + r->length); | |
575 | ||
576 | t->offset = l; | |
577 | t->length = h - l; | |
578 | ||
579 | removed++; | |
580 | } | |
581 | else | |
582 | { | |
583 | /* If we couldn't merge this one, we won't be able to | |
584 | merge following ones either, since the ranges are | |
585 | always sorted by OFFSET. */ | |
586 | break; | |
587 | } | |
588 | ||
589 | if (removed != 0) | |
9a0dc9e3 | 590 | VEC_block_remove (range_s, *vectorp, next, removed); |
4e07d55f PA |
591 | } |
592 | } | |
593 | ||
9a0dc9e3 PA |
594 | void |
595 | mark_value_bits_unavailable (struct value *value, int offset, int length) | |
596 | { | |
597 | insert_into_bit_range_vector (&value->unavailable, offset, length); | |
598 | } | |
599 | ||
bdf22206 AB |
600 | void |
601 | mark_value_bytes_unavailable (struct value *value, int offset, int length) | |
602 | { | |
603 | mark_value_bits_unavailable (value, | |
604 | offset * TARGET_CHAR_BIT, | |
605 | length * TARGET_CHAR_BIT); | |
606 | } | |
607 | ||
c8c1c22f PA |
608 | /* Find the first range in RANGES that overlaps the range defined by |
609 | OFFSET and LENGTH, starting at element POS in the RANGES vector, | |
610 | Returns the index into RANGES where such overlapping range was | |
611 | found, or -1 if none was found. */ | |
612 | ||
613 | static int | |
614 | find_first_range_overlap (VEC(range_s) *ranges, int pos, | |
615 | int offset, int length) | |
616 | { | |
617 | range_s *r; | |
618 | int i; | |
619 | ||
620 | for (i = pos; VEC_iterate (range_s, ranges, i, r); i++) | |
621 | if (ranges_overlap (r->offset, r->length, offset, length)) | |
622 | return i; | |
623 | ||
624 | return -1; | |
625 | } | |
626 | ||
bdf22206 AB |
627 | /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at |
628 | PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise | |
629 | return non-zero. | |
630 | ||
631 | It must always be the case that: | |
632 | OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT | |
633 | ||
634 | It is assumed that memory can be accessed from: | |
635 | PTR + (OFFSET_BITS / TARGET_CHAR_BIT) | |
636 | to: | |
637 | PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1) | |
638 | / TARGET_CHAR_BIT) */ | |
639 | static int | |
640 | memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits, | |
641 | const gdb_byte *ptr2, size_t offset2_bits, | |
642 | size_t length_bits) | |
643 | { | |
644 | gdb_assert (offset1_bits % TARGET_CHAR_BIT | |
645 | == offset2_bits % TARGET_CHAR_BIT); | |
646 | ||
647 | if (offset1_bits % TARGET_CHAR_BIT != 0) | |
648 | { | |
649 | size_t bits; | |
650 | gdb_byte mask, b1, b2; | |
651 | ||
652 | /* The offset from the base pointers PTR1 and PTR2 is not a complete | |
653 | number of bytes. A number of bits up to either the next exact | |
654 | byte boundary, or LENGTH_BITS (which ever is sooner) will be | |
655 | compared. */ | |
656 | bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT; | |
657 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); | |
658 | mask = (1 << bits) - 1; | |
659 | ||
660 | if (length_bits < bits) | |
661 | { | |
662 | mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1); | |
663 | bits = length_bits; | |
664 | } | |
665 | ||
666 | /* Now load the two bytes and mask off the bits we care about. */ | |
667 | b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask; | |
668 | b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask; | |
669 | ||
670 | if (b1 != b2) | |
671 | return 1; | |
672 | ||
673 | /* Now update the length and offsets to take account of the bits | |
674 | we've just compared. */ | |
675 | length_bits -= bits; | |
676 | offset1_bits += bits; | |
677 | offset2_bits += bits; | |
678 | } | |
679 | ||
680 | if (length_bits % TARGET_CHAR_BIT != 0) | |
681 | { | |
682 | size_t bits; | |
683 | size_t o1, o2; | |
684 | gdb_byte mask, b1, b2; | |
685 | ||
686 | /* The length is not an exact number of bytes. After the previous | |
687 | IF.. block then the offsets are byte aligned, or the | |
688 | length is zero (in which case this code is not reached). Compare | |
689 | a number of bits at the end of the region, starting from an exact | |
690 | byte boundary. */ | |
691 | bits = length_bits % TARGET_CHAR_BIT; | |
692 | o1 = offset1_bits + length_bits - bits; | |
693 | o2 = offset2_bits + length_bits - bits; | |
694 | ||
695 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); | |
696 | mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits); | |
697 | ||
698 | gdb_assert (o1 % TARGET_CHAR_BIT == 0); | |
699 | gdb_assert (o2 % TARGET_CHAR_BIT == 0); | |
700 | ||
701 | b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask; | |
702 | b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask; | |
703 | ||
704 | if (b1 != b2) | |
705 | return 1; | |
706 | ||
707 | length_bits -= bits; | |
708 | } | |
709 | ||
710 | if (length_bits > 0) | |
711 | { | |
712 | /* We've now taken care of any stray "bits" at the start, or end of | |
713 | the region to compare, the remainder can be covered with a simple | |
714 | memcmp. */ | |
715 | gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0); | |
716 | gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0); | |
717 | gdb_assert (length_bits % TARGET_CHAR_BIT == 0); | |
718 | ||
719 | return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT, | |
720 | ptr2 + offset2_bits / TARGET_CHAR_BIT, | |
721 | length_bits / TARGET_CHAR_BIT); | |
722 | } | |
723 | ||
724 | /* Length is zero, regions match. */ | |
725 | return 0; | |
726 | } | |
727 | ||
9a0dc9e3 PA |
728 | /* Helper struct for find_first_range_overlap_and_match and |
729 | value_contents_bits_eq. Keep track of which slot of a given ranges | |
730 | vector have we last looked at. */ | |
bdf22206 | 731 | |
9a0dc9e3 PA |
732 | struct ranges_and_idx |
733 | { | |
734 | /* The ranges. */ | |
735 | VEC(range_s) *ranges; | |
736 | ||
737 | /* The range we've last found in RANGES. Given ranges are sorted, | |
738 | we can start the next lookup here. */ | |
739 | int idx; | |
740 | }; | |
741 | ||
742 | /* Helper function for value_contents_bits_eq. Compare LENGTH bits of | |
743 | RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's | |
744 | ranges starting at OFFSET2 bits. Return true if the ranges match | |
745 | and fill in *L and *H with the overlapping window relative to | |
746 | (both) OFFSET1 or OFFSET2. */ | |
bdf22206 AB |
747 | |
748 | static int | |
9a0dc9e3 PA |
749 | find_first_range_overlap_and_match (struct ranges_and_idx *rp1, |
750 | struct ranges_and_idx *rp2, | |
751 | int offset1, int offset2, | |
752 | int length, ULONGEST *l, ULONGEST *h) | |
c8c1c22f | 753 | { |
9a0dc9e3 PA |
754 | rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx, |
755 | offset1, length); | |
756 | rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx, | |
757 | offset2, length); | |
c8c1c22f | 758 | |
9a0dc9e3 PA |
759 | if (rp1->idx == -1 && rp2->idx == -1) |
760 | { | |
761 | *l = length; | |
762 | *h = length; | |
763 | return 1; | |
764 | } | |
765 | else if (rp1->idx == -1 || rp2->idx == -1) | |
766 | return 0; | |
767 | else | |
c8c1c22f PA |
768 | { |
769 | range_s *r1, *r2; | |
770 | ULONGEST l1, h1; | |
771 | ULONGEST l2, h2; | |
772 | ||
9a0dc9e3 PA |
773 | r1 = VEC_index (range_s, rp1->ranges, rp1->idx); |
774 | r2 = VEC_index (range_s, rp2->ranges, rp2->idx); | |
c8c1c22f PA |
775 | |
776 | /* Get the unavailable windows intersected by the incoming | |
777 | ranges. The first and last ranges that overlap the argument | |
778 | range may be wider than said incoming arguments ranges. */ | |
779 | l1 = max (offset1, r1->offset); | |
780 | h1 = min (offset1 + length, r1->offset + r1->length); | |
781 | ||
782 | l2 = max (offset2, r2->offset); | |
9a0dc9e3 | 783 | h2 = min (offset2 + length, offset2 + r2->length); |
c8c1c22f PA |
784 | |
785 | /* Make them relative to the respective start offsets, so we can | |
786 | compare them for equality. */ | |
787 | l1 -= offset1; | |
788 | h1 -= offset1; | |
789 | ||
790 | l2 -= offset2; | |
791 | h2 -= offset2; | |
792 | ||
9a0dc9e3 | 793 | /* Different ranges, no match. */ |
c8c1c22f PA |
794 | if (l1 != l2 || h1 != h2) |
795 | return 0; | |
796 | ||
9a0dc9e3 PA |
797 | *h = h1; |
798 | *l = l1; | |
799 | return 1; | |
800 | } | |
801 | } | |
802 | ||
803 | /* Helper function for value_contents_eq. The only difference is that | |
804 | this function is bit rather than byte based. | |
805 | ||
806 | Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits | |
807 | with LENGTH bits of VAL2's contents starting at OFFSET2 bits. | |
808 | Return true if the available bits match. */ | |
809 | ||
810 | static int | |
811 | value_contents_bits_eq (const struct value *val1, int offset1, | |
812 | const struct value *val2, int offset2, | |
813 | int length) | |
814 | { | |
815 | /* Each array element corresponds to a ranges source (unavailable, | |
816 | optimized out). '1' is for VAL1, '2' for VAL2. */ | |
817 | struct ranges_and_idx rp1[2], rp2[2]; | |
818 | ||
819 | /* See function description in value.h. */ | |
820 | gdb_assert (!val1->lazy && !val2->lazy); | |
821 | ||
822 | /* We shouldn't be trying to compare past the end of the values. */ | |
823 | gdb_assert (offset1 + length | |
824 | <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT); | |
825 | gdb_assert (offset2 + length | |
826 | <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT); | |
827 | ||
828 | memset (&rp1, 0, sizeof (rp1)); | |
829 | memset (&rp2, 0, sizeof (rp2)); | |
830 | rp1[0].ranges = val1->unavailable; | |
831 | rp2[0].ranges = val2->unavailable; | |
832 | rp1[1].ranges = val1->optimized_out; | |
833 | rp2[1].ranges = val2->optimized_out; | |
834 | ||
835 | while (length > 0) | |
836 | { | |
000339af | 837 | ULONGEST l = 0, h = 0; /* init for gcc -Wall */ |
9a0dc9e3 PA |
838 | int i; |
839 | ||
840 | for (i = 0; i < 2; i++) | |
841 | { | |
842 | ULONGEST l_tmp, h_tmp; | |
843 | ||
844 | /* The contents only match equal if the invalid/unavailable | |
845 | contents ranges match as well. */ | |
846 | if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i], | |
847 | offset1, offset2, length, | |
848 | &l_tmp, &h_tmp)) | |
849 | return 0; | |
850 | ||
851 | /* We're interested in the lowest/first range found. */ | |
852 | if (i == 0 || l_tmp < l) | |
853 | { | |
854 | l = l_tmp; | |
855 | h = h_tmp; | |
856 | } | |
857 | } | |
858 | ||
859 | /* Compare the available/valid contents. */ | |
bdf22206 | 860 | if (memcmp_with_bit_offsets (val1->contents, offset1, |
9a0dc9e3 | 861 | val2->contents, offset2, l) != 0) |
c8c1c22f PA |
862 | return 0; |
863 | ||
9a0dc9e3 PA |
864 | length -= h; |
865 | offset1 += h; | |
866 | offset2 += h; | |
c8c1c22f PA |
867 | } |
868 | ||
869 | return 1; | |
870 | } | |
871 | ||
bdf22206 | 872 | int |
9a0dc9e3 PA |
873 | value_contents_eq (const struct value *val1, int offset1, |
874 | const struct value *val2, int offset2, | |
875 | int length) | |
bdf22206 | 876 | { |
9a0dc9e3 PA |
877 | return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT, |
878 | val2, offset2 * TARGET_CHAR_BIT, | |
879 | length * TARGET_CHAR_BIT); | |
bdf22206 AB |
880 | } |
881 | ||
581e13c1 | 882 | /* Prototypes for local functions. */ |
c906108c | 883 | |
a14ed312 | 884 | static void show_values (char *, int); |
c906108c | 885 | |
a14ed312 | 886 | static void show_convenience (char *, int); |
c906108c | 887 | |
c906108c SS |
888 | |
889 | /* The value-history records all the values printed | |
890 | by print commands during this session. Each chunk | |
891 | records 60 consecutive values. The first chunk on | |
892 | the chain records the most recent values. | |
893 | The total number of values is in value_history_count. */ | |
894 | ||
895 | #define VALUE_HISTORY_CHUNK 60 | |
896 | ||
897 | struct value_history_chunk | |
c5aa993b JM |
898 | { |
899 | struct value_history_chunk *next; | |
f23631e4 | 900 | struct value *values[VALUE_HISTORY_CHUNK]; |
c5aa993b | 901 | }; |
c906108c SS |
902 | |
903 | /* Chain of chunks now in use. */ | |
904 | ||
905 | static struct value_history_chunk *value_history_chain; | |
906 | ||
581e13c1 | 907 | static int value_history_count; /* Abs number of last entry stored. */ |
bc3b79fd | 908 | |
c906108c SS |
909 | \f |
910 | /* List of all value objects currently allocated | |
911 | (except for those released by calls to release_value) | |
912 | This is so they can be freed after each command. */ | |
913 | ||
f23631e4 | 914 | static struct value *all_values; |
c906108c | 915 | |
3e3d7139 JG |
916 | /* Allocate a lazy value for type TYPE. Its actual content is |
917 | "lazily" allocated too: the content field of the return value is | |
918 | NULL; it will be allocated when it is fetched from the target. */ | |
c906108c | 919 | |
f23631e4 | 920 | struct value * |
3e3d7139 | 921 | allocate_value_lazy (struct type *type) |
c906108c | 922 | { |
f23631e4 | 923 | struct value *val; |
c54eabfa JK |
924 | |
925 | /* Call check_typedef on our type to make sure that, if TYPE | |
926 | is a TYPE_CODE_TYPEDEF, its length is set to the length | |
927 | of the target type instead of zero. However, we do not | |
928 | replace the typedef type by the target type, because we want | |
929 | to keep the typedef in order to be able to set the VAL's type | |
930 | description correctly. */ | |
931 | check_typedef (type); | |
c906108c | 932 | |
3e3d7139 JG |
933 | val = (struct value *) xzalloc (sizeof (struct value)); |
934 | val->contents = NULL; | |
df407dfe | 935 | val->next = all_values; |
c906108c | 936 | all_values = val; |
df407dfe | 937 | val->type = type; |
4754a64e | 938 | val->enclosing_type = type; |
c906108c | 939 | VALUE_LVAL (val) = not_lval; |
42ae5230 | 940 | val->location.address = 0; |
1df6926e | 941 | VALUE_FRAME_ID (val) = null_frame_id; |
df407dfe AC |
942 | val->offset = 0; |
943 | val->bitpos = 0; | |
944 | val->bitsize = 0; | |
9ee8fc9d | 945 | VALUE_REGNUM (val) = -1; |
3e3d7139 | 946 | val->lazy = 1; |
13c3b5f5 | 947 | val->embedded_offset = 0; |
b44d461b | 948 | val->pointed_to_offset = 0; |
c906108c | 949 | val->modifiable = 1; |
42be36b3 | 950 | val->initialized = 1; /* Default to initialized. */ |
828d3400 DJ |
951 | |
952 | /* Values start out on the all_values chain. */ | |
953 | val->reference_count = 1; | |
954 | ||
c906108c SS |
955 | return val; |
956 | } | |
957 | ||
3e3d7139 JG |
958 | /* Allocate the contents of VAL if it has not been allocated yet. */ |
959 | ||
548b762d | 960 | static void |
3e3d7139 JG |
961 | allocate_value_contents (struct value *val) |
962 | { | |
963 | if (!val->contents) | |
964 | val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)); | |
965 | } | |
966 | ||
967 | /* Allocate a value and its contents for type TYPE. */ | |
968 | ||
969 | struct value * | |
970 | allocate_value (struct type *type) | |
971 | { | |
972 | struct value *val = allocate_value_lazy (type); | |
a109c7c1 | 973 | |
3e3d7139 JG |
974 | allocate_value_contents (val); |
975 | val->lazy = 0; | |
976 | return val; | |
977 | } | |
978 | ||
c906108c | 979 | /* Allocate a value that has the correct length |
938f5214 | 980 | for COUNT repetitions of type TYPE. */ |
c906108c | 981 | |
f23631e4 | 982 | struct value * |
fba45db2 | 983 | allocate_repeat_value (struct type *type, int count) |
c906108c | 984 | { |
c5aa993b | 985 | int low_bound = current_language->string_lower_bound; /* ??? */ |
c906108c SS |
986 | /* FIXME-type-allocation: need a way to free this type when we are |
987 | done with it. */ | |
e3506a9f UW |
988 | struct type *array_type |
989 | = lookup_array_range_type (type, low_bound, count + low_bound - 1); | |
a109c7c1 | 990 | |
e3506a9f | 991 | return allocate_value (array_type); |
c906108c SS |
992 | } |
993 | ||
5f5233d4 PA |
994 | struct value * |
995 | allocate_computed_value (struct type *type, | |
c8f2448a | 996 | const struct lval_funcs *funcs, |
5f5233d4 PA |
997 | void *closure) |
998 | { | |
41e8491f | 999 | struct value *v = allocate_value_lazy (type); |
a109c7c1 | 1000 | |
5f5233d4 PA |
1001 | VALUE_LVAL (v) = lval_computed; |
1002 | v->location.computed.funcs = funcs; | |
1003 | v->location.computed.closure = closure; | |
5f5233d4 PA |
1004 | |
1005 | return v; | |
1006 | } | |
1007 | ||
a7035dbb JK |
1008 | /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */ |
1009 | ||
1010 | struct value * | |
1011 | allocate_optimized_out_value (struct type *type) | |
1012 | { | |
1013 | struct value *retval = allocate_value_lazy (type); | |
1014 | ||
9a0dc9e3 PA |
1015 | mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type)); |
1016 | set_value_lazy (retval, 0); | |
a7035dbb JK |
1017 | return retval; |
1018 | } | |
1019 | ||
df407dfe AC |
1020 | /* Accessor methods. */ |
1021 | ||
17cf0ecd AC |
1022 | struct value * |
1023 | value_next (struct value *value) | |
1024 | { | |
1025 | return value->next; | |
1026 | } | |
1027 | ||
df407dfe | 1028 | struct type * |
0e03807e | 1029 | value_type (const struct value *value) |
df407dfe AC |
1030 | { |
1031 | return value->type; | |
1032 | } | |
04624583 AC |
1033 | void |
1034 | deprecated_set_value_type (struct value *value, struct type *type) | |
1035 | { | |
1036 | value->type = type; | |
1037 | } | |
df407dfe AC |
1038 | |
1039 | int | |
0e03807e | 1040 | value_offset (const struct value *value) |
df407dfe AC |
1041 | { |
1042 | return value->offset; | |
1043 | } | |
f5cf64a7 AC |
1044 | void |
1045 | set_value_offset (struct value *value, int offset) | |
1046 | { | |
1047 | value->offset = offset; | |
1048 | } | |
df407dfe AC |
1049 | |
1050 | int | |
0e03807e | 1051 | value_bitpos (const struct value *value) |
df407dfe AC |
1052 | { |
1053 | return value->bitpos; | |
1054 | } | |
9bbda503 AC |
1055 | void |
1056 | set_value_bitpos (struct value *value, int bit) | |
1057 | { | |
1058 | value->bitpos = bit; | |
1059 | } | |
df407dfe AC |
1060 | |
1061 | int | |
0e03807e | 1062 | value_bitsize (const struct value *value) |
df407dfe AC |
1063 | { |
1064 | return value->bitsize; | |
1065 | } | |
9bbda503 AC |
1066 | void |
1067 | set_value_bitsize (struct value *value, int bit) | |
1068 | { | |
1069 | value->bitsize = bit; | |
1070 | } | |
df407dfe | 1071 | |
4ea48cc1 DJ |
1072 | struct value * |
1073 | value_parent (struct value *value) | |
1074 | { | |
1075 | return value->parent; | |
1076 | } | |
1077 | ||
53ba8333 JB |
1078 | /* See value.h. */ |
1079 | ||
1080 | void | |
1081 | set_value_parent (struct value *value, struct value *parent) | |
1082 | { | |
40501e00 TT |
1083 | struct value *old = value->parent; |
1084 | ||
53ba8333 | 1085 | value->parent = parent; |
40501e00 TT |
1086 | if (parent != NULL) |
1087 | value_incref (parent); | |
1088 | value_free (old); | |
53ba8333 JB |
1089 | } |
1090 | ||
fc1a4b47 | 1091 | gdb_byte * |
990a07ab AC |
1092 | value_contents_raw (struct value *value) |
1093 | { | |
3ae385af SM |
1094 | struct gdbarch *arch = get_value_arch (value); |
1095 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
1096 | ||
3e3d7139 | 1097 | allocate_value_contents (value); |
3ae385af | 1098 | return value->contents + value->embedded_offset * unit_size; |
990a07ab AC |
1099 | } |
1100 | ||
fc1a4b47 | 1101 | gdb_byte * |
990a07ab AC |
1102 | value_contents_all_raw (struct value *value) |
1103 | { | |
3e3d7139 JG |
1104 | allocate_value_contents (value); |
1105 | return value->contents; | |
990a07ab AC |
1106 | } |
1107 | ||
4754a64e AC |
1108 | struct type * |
1109 | value_enclosing_type (struct value *value) | |
1110 | { | |
1111 | return value->enclosing_type; | |
1112 | } | |
1113 | ||
8264ba82 AG |
1114 | /* Look at value.h for description. */ |
1115 | ||
1116 | struct type * | |
1117 | value_actual_type (struct value *value, int resolve_simple_types, | |
1118 | int *real_type_found) | |
1119 | { | |
1120 | struct value_print_options opts; | |
8264ba82 AG |
1121 | struct type *result; |
1122 | ||
1123 | get_user_print_options (&opts); | |
1124 | ||
1125 | if (real_type_found) | |
1126 | *real_type_found = 0; | |
1127 | result = value_type (value); | |
1128 | if (opts.objectprint) | |
1129 | { | |
5e34c6c3 LM |
1130 | /* If result's target type is TYPE_CODE_STRUCT, proceed to |
1131 | fetch its rtti type. */ | |
1132 | if ((TYPE_CODE (result) == TYPE_CODE_PTR | |
8264ba82 | 1133 | || TYPE_CODE (result) == TYPE_CODE_REF) |
5e34c6c3 LM |
1134 | && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result))) |
1135 | == TYPE_CODE_STRUCT) | |
8264ba82 AG |
1136 | { |
1137 | struct type *real_type; | |
1138 | ||
1139 | real_type = value_rtti_indirect_type (value, NULL, NULL, NULL); | |
1140 | if (real_type) | |
1141 | { | |
1142 | if (real_type_found) | |
1143 | *real_type_found = 1; | |
1144 | result = real_type; | |
1145 | } | |
1146 | } | |
1147 | else if (resolve_simple_types) | |
1148 | { | |
1149 | if (real_type_found) | |
1150 | *real_type_found = 1; | |
1151 | result = value_enclosing_type (value); | |
1152 | } | |
1153 | } | |
1154 | ||
1155 | return result; | |
1156 | } | |
1157 | ||
901461f8 PA |
1158 | void |
1159 | error_value_optimized_out (void) | |
1160 | { | |
1161 | error (_("value has been optimized out")); | |
1162 | } | |
1163 | ||
0e03807e | 1164 | static void |
4e07d55f | 1165 | require_not_optimized_out (const struct value *value) |
0e03807e | 1166 | { |
9a0dc9e3 | 1167 | if (!VEC_empty (range_s, value->optimized_out)) |
901461f8 PA |
1168 | { |
1169 | if (value->lval == lval_register) | |
1170 | error (_("register has not been saved in frame")); | |
1171 | else | |
1172 | error_value_optimized_out (); | |
1173 | } | |
0e03807e TT |
1174 | } |
1175 | ||
4e07d55f PA |
1176 | static void |
1177 | require_available (const struct value *value) | |
1178 | { | |
1179 | if (!VEC_empty (range_s, value->unavailable)) | |
8af8e3bc | 1180 | throw_error (NOT_AVAILABLE_ERROR, _("value is not available")); |
4e07d55f PA |
1181 | } |
1182 | ||
fc1a4b47 | 1183 | const gdb_byte * |
0e03807e | 1184 | value_contents_for_printing (struct value *value) |
46615f07 AC |
1185 | { |
1186 | if (value->lazy) | |
1187 | value_fetch_lazy (value); | |
3e3d7139 | 1188 | return value->contents; |
46615f07 AC |
1189 | } |
1190 | ||
de4127a3 PA |
1191 | const gdb_byte * |
1192 | value_contents_for_printing_const (const struct value *value) | |
1193 | { | |
1194 | gdb_assert (!value->lazy); | |
1195 | return value->contents; | |
1196 | } | |
1197 | ||
0e03807e TT |
1198 | const gdb_byte * |
1199 | value_contents_all (struct value *value) | |
1200 | { | |
1201 | const gdb_byte *result = value_contents_for_printing (value); | |
1202 | require_not_optimized_out (value); | |
4e07d55f | 1203 | require_available (value); |
0e03807e TT |
1204 | return result; |
1205 | } | |
1206 | ||
9a0dc9e3 PA |
1207 | /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET, |
1208 | SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */ | |
1209 | ||
1210 | static void | |
1211 | ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset, | |
1212 | VEC (range_s) *src_range, int src_bit_offset, | |
1213 | int bit_length) | |
1214 | { | |
1215 | range_s *r; | |
1216 | int i; | |
1217 | ||
1218 | for (i = 0; VEC_iterate (range_s, src_range, i, r); i++) | |
1219 | { | |
1220 | ULONGEST h, l; | |
1221 | ||
1222 | l = max (r->offset, src_bit_offset); | |
1223 | h = min (r->offset + r->length, src_bit_offset + bit_length); | |
1224 | ||
1225 | if (l < h) | |
1226 | insert_into_bit_range_vector (dst_range, | |
1227 | dst_bit_offset + (l - src_bit_offset), | |
1228 | h - l); | |
1229 | } | |
1230 | } | |
1231 | ||
4875ffdb PA |
1232 | /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET, |
1233 | SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */ | |
1234 | ||
1235 | static void | |
1236 | value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset, | |
1237 | const struct value *src, int src_bit_offset, | |
1238 | int bit_length) | |
1239 | { | |
1240 | ranges_copy_adjusted (&dst->unavailable, dst_bit_offset, | |
1241 | src->unavailable, src_bit_offset, | |
1242 | bit_length); | |
1243 | ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset, | |
1244 | src->optimized_out, src_bit_offset, | |
1245 | bit_length); | |
1246 | } | |
1247 | ||
3ae385af | 1248 | /* Copy LENGTH target addressable memory units of SRC value's (all) contents |
29976f3f PA |
1249 | (value_contents_all) starting at SRC_OFFSET, into DST value's (all) |
1250 | contents, starting at DST_OFFSET. If unavailable contents are | |
1251 | being copied from SRC, the corresponding DST contents are marked | |
1252 | unavailable accordingly. Neither DST nor SRC may be lazy | |
1253 | values. | |
1254 | ||
1255 | It is assumed the contents of DST in the [DST_OFFSET, | |
1256 | DST_OFFSET+LENGTH) range are wholly available. */ | |
39d37385 PA |
1257 | |
1258 | void | |
1259 | value_contents_copy_raw (struct value *dst, int dst_offset, | |
1260 | struct value *src, int src_offset, int length) | |
1261 | { | |
1262 | range_s *r; | |
bdf22206 | 1263 | int src_bit_offset, dst_bit_offset, bit_length; |
3ae385af SM |
1264 | struct gdbarch *arch = get_value_arch (src); |
1265 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
39d37385 PA |
1266 | |
1267 | /* A lazy DST would make that this copy operation useless, since as | |
1268 | soon as DST's contents were un-lazied (by a later value_contents | |
1269 | call, say), the contents would be overwritten. A lazy SRC would | |
1270 | mean we'd be copying garbage. */ | |
1271 | gdb_assert (!dst->lazy && !src->lazy); | |
1272 | ||
29976f3f PA |
1273 | /* The overwritten DST range gets unavailability ORed in, not |
1274 | replaced. Make sure to remember to implement replacing if it | |
1275 | turns out actually necessary. */ | |
1276 | gdb_assert (value_bytes_available (dst, dst_offset, length)); | |
9a0dc9e3 PA |
1277 | gdb_assert (!value_bits_any_optimized_out (dst, |
1278 | TARGET_CHAR_BIT * dst_offset, | |
1279 | TARGET_CHAR_BIT * length)); | |
29976f3f | 1280 | |
39d37385 | 1281 | /* Copy the data. */ |
3ae385af SM |
1282 | memcpy (value_contents_all_raw (dst) + dst_offset * unit_size, |
1283 | value_contents_all_raw (src) + src_offset * unit_size, | |
1284 | length * unit_size); | |
39d37385 PA |
1285 | |
1286 | /* Copy the meta-data, adjusted. */ | |
3ae385af SM |
1287 | src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT; |
1288 | dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT; | |
1289 | bit_length = length * unit_size * HOST_CHAR_BIT; | |
39d37385 | 1290 | |
4875ffdb PA |
1291 | value_ranges_copy_adjusted (dst, dst_bit_offset, |
1292 | src, src_bit_offset, | |
1293 | bit_length); | |
39d37385 PA |
1294 | } |
1295 | ||
29976f3f PA |
1296 | /* Copy LENGTH bytes of SRC value's (all) contents |
1297 | (value_contents_all) starting at SRC_OFFSET byte, into DST value's | |
1298 | (all) contents, starting at DST_OFFSET. If unavailable contents | |
1299 | are being copied from SRC, the corresponding DST contents are | |
1300 | marked unavailable accordingly. DST must not be lazy. If SRC is | |
9a0dc9e3 | 1301 | lazy, it will be fetched now. |
29976f3f PA |
1302 | |
1303 | It is assumed the contents of DST in the [DST_OFFSET, | |
1304 | DST_OFFSET+LENGTH) range are wholly available. */ | |
39d37385 PA |
1305 | |
1306 | void | |
1307 | value_contents_copy (struct value *dst, int dst_offset, | |
1308 | struct value *src, int src_offset, int length) | |
1309 | { | |
39d37385 PA |
1310 | if (src->lazy) |
1311 | value_fetch_lazy (src); | |
1312 | ||
1313 | value_contents_copy_raw (dst, dst_offset, src, src_offset, length); | |
1314 | } | |
1315 | ||
d69fe07e AC |
1316 | int |
1317 | value_lazy (struct value *value) | |
1318 | { | |
1319 | return value->lazy; | |
1320 | } | |
1321 | ||
dfa52d88 AC |
1322 | void |
1323 | set_value_lazy (struct value *value, int val) | |
1324 | { | |
1325 | value->lazy = val; | |
1326 | } | |
1327 | ||
4e5d721f DE |
1328 | int |
1329 | value_stack (struct value *value) | |
1330 | { | |
1331 | return value->stack; | |
1332 | } | |
1333 | ||
1334 | void | |
1335 | set_value_stack (struct value *value, int val) | |
1336 | { | |
1337 | value->stack = val; | |
1338 | } | |
1339 | ||
fc1a4b47 | 1340 | const gdb_byte * |
0fd88904 AC |
1341 | value_contents (struct value *value) |
1342 | { | |
0e03807e TT |
1343 | const gdb_byte *result = value_contents_writeable (value); |
1344 | require_not_optimized_out (value); | |
4e07d55f | 1345 | require_available (value); |
0e03807e | 1346 | return result; |
0fd88904 AC |
1347 | } |
1348 | ||
fc1a4b47 | 1349 | gdb_byte * |
0fd88904 AC |
1350 | value_contents_writeable (struct value *value) |
1351 | { | |
1352 | if (value->lazy) | |
1353 | value_fetch_lazy (value); | |
fc0c53a0 | 1354 | return value_contents_raw (value); |
0fd88904 AC |
1355 | } |
1356 | ||
feb13ab0 AC |
1357 | int |
1358 | value_optimized_out (struct value *value) | |
1359 | { | |
691a26f5 AB |
1360 | /* We can only know if a value is optimized out once we have tried to |
1361 | fetch it. */ | |
9a0dc9e3 | 1362 | if (VEC_empty (range_s, value->optimized_out) && value->lazy) |
691a26f5 AB |
1363 | value_fetch_lazy (value); |
1364 | ||
9a0dc9e3 | 1365 | return !VEC_empty (range_s, value->optimized_out); |
feb13ab0 AC |
1366 | } |
1367 | ||
9a0dc9e3 PA |
1368 | /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and |
1369 | the following LENGTH bytes. */ | |
eca07816 | 1370 | |
feb13ab0 | 1371 | void |
9a0dc9e3 | 1372 | mark_value_bytes_optimized_out (struct value *value, int offset, int length) |
feb13ab0 | 1373 | { |
9a0dc9e3 PA |
1374 | mark_value_bits_optimized_out (value, |
1375 | offset * TARGET_CHAR_BIT, | |
1376 | length * TARGET_CHAR_BIT); | |
feb13ab0 | 1377 | } |
13c3b5f5 | 1378 | |
9a0dc9e3 | 1379 | /* See value.h. */ |
0e03807e | 1380 | |
9a0dc9e3 PA |
1381 | void |
1382 | mark_value_bits_optimized_out (struct value *value, int offset, int length) | |
0e03807e | 1383 | { |
9a0dc9e3 | 1384 | insert_into_bit_range_vector (&value->optimized_out, offset, length); |
0e03807e TT |
1385 | } |
1386 | ||
8cf6f0b1 TT |
1387 | int |
1388 | value_bits_synthetic_pointer (const struct value *value, | |
1389 | int offset, int length) | |
1390 | { | |
e7303042 | 1391 | if (value->lval != lval_computed |
8cf6f0b1 TT |
1392 | || !value->location.computed.funcs->check_synthetic_pointer) |
1393 | return 0; | |
1394 | return value->location.computed.funcs->check_synthetic_pointer (value, | |
1395 | offset, | |
1396 | length); | |
1397 | } | |
1398 | ||
13c3b5f5 AC |
1399 | int |
1400 | value_embedded_offset (struct value *value) | |
1401 | { | |
1402 | return value->embedded_offset; | |
1403 | } | |
1404 | ||
1405 | void | |
1406 | set_value_embedded_offset (struct value *value, int val) | |
1407 | { | |
1408 | value->embedded_offset = val; | |
1409 | } | |
b44d461b AC |
1410 | |
1411 | int | |
1412 | value_pointed_to_offset (struct value *value) | |
1413 | { | |
1414 | return value->pointed_to_offset; | |
1415 | } | |
1416 | ||
1417 | void | |
1418 | set_value_pointed_to_offset (struct value *value, int val) | |
1419 | { | |
1420 | value->pointed_to_offset = val; | |
1421 | } | |
13bb5560 | 1422 | |
c8f2448a | 1423 | const struct lval_funcs * |
a471c594 | 1424 | value_computed_funcs (const struct value *v) |
5f5233d4 | 1425 | { |
a471c594 | 1426 | gdb_assert (value_lval_const (v) == lval_computed); |
5f5233d4 PA |
1427 | |
1428 | return v->location.computed.funcs; | |
1429 | } | |
1430 | ||
1431 | void * | |
0e03807e | 1432 | value_computed_closure (const struct value *v) |
5f5233d4 | 1433 | { |
0e03807e | 1434 | gdb_assert (v->lval == lval_computed); |
5f5233d4 PA |
1435 | |
1436 | return v->location.computed.closure; | |
1437 | } | |
1438 | ||
13bb5560 AC |
1439 | enum lval_type * |
1440 | deprecated_value_lval_hack (struct value *value) | |
1441 | { | |
1442 | return &value->lval; | |
1443 | } | |
1444 | ||
a471c594 JK |
1445 | enum lval_type |
1446 | value_lval_const (const struct value *value) | |
1447 | { | |
1448 | return value->lval; | |
1449 | } | |
1450 | ||
42ae5230 | 1451 | CORE_ADDR |
de4127a3 | 1452 | value_address (const struct value *value) |
42ae5230 TT |
1453 | { |
1454 | if (value->lval == lval_internalvar | |
e81e7f5e SC |
1455 | || value->lval == lval_internalvar_component |
1456 | || value->lval == lval_xcallable) | |
42ae5230 | 1457 | return 0; |
53ba8333 JB |
1458 | if (value->parent != NULL) |
1459 | return value_address (value->parent) + value->offset; | |
1460 | else | |
1461 | return value->location.address + value->offset; | |
42ae5230 TT |
1462 | } |
1463 | ||
1464 | CORE_ADDR | |
1465 | value_raw_address (struct value *value) | |
1466 | { | |
1467 | if (value->lval == lval_internalvar | |
e81e7f5e SC |
1468 | || value->lval == lval_internalvar_component |
1469 | || value->lval == lval_xcallable) | |
42ae5230 TT |
1470 | return 0; |
1471 | return value->location.address; | |
1472 | } | |
1473 | ||
1474 | void | |
1475 | set_value_address (struct value *value, CORE_ADDR addr) | |
13bb5560 | 1476 | { |
42ae5230 | 1477 | gdb_assert (value->lval != lval_internalvar |
e81e7f5e SC |
1478 | && value->lval != lval_internalvar_component |
1479 | && value->lval != lval_xcallable); | |
42ae5230 | 1480 | value->location.address = addr; |
13bb5560 AC |
1481 | } |
1482 | ||
1483 | struct internalvar ** | |
1484 | deprecated_value_internalvar_hack (struct value *value) | |
1485 | { | |
1486 | return &value->location.internalvar; | |
1487 | } | |
1488 | ||
1489 | struct frame_id * | |
1490 | deprecated_value_frame_id_hack (struct value *value) | |
1491 | { | |
1492 | return &value->frame_id; | |
1493 | } | |
1494 | ||
1495 | short * | |
1496 | deprecated_value_regnum_hack (struct value *value) | |
1497 | { | |
1498 | return &value->regnum; | |
1499 | } | |
88e3b34b AC |
1500 | |
1501 | int | |
1502 | deprecated_value_modifiable (struct value *value) | |
1503 | { | |
1504 | return value->modifiable; | |
1505 | } | |
990a07ab | 1506 | \f |
c906108c SS |
1507 | /* Return a mark in the value chain. All values allocated after the |
1508 | mark is obtained (except for those released) are subject to being freed | |
1509 | if a subsequent value_free_to_mark is passed the mark. */ | |
f23631e4 | 1510 | struct value * |
fba45db2 | 1511 | value_mark (void) |
c906108c SS |
1512 | { |
1513 | return all_values; | |
1514 | } | |
1515 | ||
828d3400 DJ |
1516 | /* Take a reference to VAL. VAL will not be deallocated until all |
1517 | references are released. */ | |
1518 | ||
1519 | void | |
1520 | value_incref (struct value *val) | |
1521 | { | |
1522 | val->reference_count++; | |
1523 | } | |
1524 | ||
1525 | /* Release a reference to VAL, which was acquired with value_incref. | |
1526 | This function is also called to deallocate values from the value | |
1527 | chain. */ | |
1528 | ||
3e3d7139 JG |
1529 | void |
1530 | value_free (struct value *val) | |
1531 | { | |
1532 | if (val) | |
5f5233d4 | 1533 | { |
828d3400 DJ |
1534 | gdb_assert (val->reference_count > 0); |
1535 | val->reference_count--; | |
1536 | if (val->reference_count > 0) | |
1537 | return; | |
1538 | ||
4ea48cc1 DJ |
1539 | /* If there's an associated parent value, drop our reference to |
1540 | it. */ | |
1541 | if (val->parent != NULL) | |
1542 | value_free (val->parent); | |
1543 | ||
5f5233d4 PA |
1544 | if (VALUE_LVAL (val) == lval_computed) |
1545 | { | |
c8f2448a | 1546 | const struct lval_funcs *funcs = val->location.computed.funcs; |
5f5233d4 PA |
1547 | |
1548 | if (funcs->free_closure) | |
1549 | funcs->free_closure (val); | |
1550 | } | |
e81e7f5e SC |
1551 | else if (VALUE_LVAL (val) == lval_xcallable) |
1552 | free_xmethod_worker (val->location.xm_worker); | |
5f5233d4 PA |
1553 | |
1554 | xfree (val->contents); | |
4e07d55f | 1555 | VEC_free (range_s, val->unavailable); |
5f5233d4 | 1556 | } |
3e3d7139 JG |
1557 | xfree (val); |
1558 | } | |
1559 | ||
c906108c SS |
1560 | /* Free all values allocated since MARK was obtained by value_mark |
1561 | (except for those released). */ | |
1562 | void | |
f23631e4 | 1563 | value_free_to_mark (struct value *mark) |
c906108c | 1564 | { |
f23631e4 AC |
1565 | struct value *val; |
1566 | struct value *next; | |
c906108c SS |
1567 | |
1568 | for (val = all_values; val && val != mark; val = next) | |
1569 | { | |
df407dfe | 1570 | next = val->next; |
e848a8a5 | 1571 | val->released = 1; |
c906108c SS |
1572 | value_free (val); |
1573 | } | |
1574 | all_values = val; | |
1575 | } | |
1576 | ||
1577 | /* Free all the values that have been allocated (except for those released). | |
725e88af DE |
1578 | Call after each command, successful or not. |
1579 | In practice this is called before each command, which is sufficient. */ | |
c906108c SS |
1580 | |
1581 | void | |
fba45db2 | 1582 | free_all_values (void) |
c906108c | 1583 | { |
f23631e4 AC |
1584 | struct value *val; |
1585 | struct value *next; | |
c906108c SS |
1586 | |
1587 | for (val = all_values; val; val = next) | |
1588 | { | |
df407dfe | 1589 | next = val->next; |
e848a8a5 | 1590 | val->released = 1; |
c906108c SS |
1591 | value_free (val); |
1592 | } | |
1593 | ||
1594 | all_values = 0; | |
1595 | } | |
1596 | ||
0cf6dd15 TJB |
1597 | /* Frees all the elements in a chain of values. */ |
1598 | ||
1599 | void | |
1600 | free_value_chain (struct value *v) | |
1601 | { | |
1602 | struct value *next; | |
1603 | ||
1604 | for (; v; v = next) | |
1605 | { | |
1606 | next = value_next (v); | |
1607 | value_free (v); | |
1608 | } | |
1609 | } | |
1610 | ||
c906108c SS |
1611 | /* Remove VAL from the chain all_values |
1612 | so it will not be freed automatically. */ | |
1613 | ||
1614 | void | |
f23631e4 | 1615 | release_value (struct value *val) |
c906108c | 1616 | { |
f23631e4 | 1617 | struct value *v; |
c906108c SS |
1618 | |
1619 | if (all_values == val) | |
1620 | { | |
1621 | all_values = val->next; | |
06a64a0b | 1622 | val->next = NULL; |
e848a8a5 | 1623 | val->released = 1; |
c906108c SS |
1624 | return; |
1625 | } | |
1626 | ||
1627 | for (v = all_values; v; v = v->next) | |
1628 | { | |
1629 | if (v->next == val) | |
1630 | { | |
1631 | v->next = val->next; | |
06a64a0b | 1632 | val->next = NULL; |
e848a8a5 | 1633 | val->released = 1; |
c906108c SS |
1634 | break; |
1635 | } | |
1636 | } | |
1637 | } | |
1638 | ||
e848a8a5 TT |
1639 | /* If the value is not already released, release it. |
1640 | If the value is already released, increment its reference count. | |
1641 | That is, this function ensures that the value is released from the | |
1642 | value chain and that the caller owns a reference to it. */ | |
1643 | ||
1644 | void | |
1645 | release_value_or_incref (struct value *val) | |
1646 | { | |
1647 | if (val->released) | |
1648 | value_incref (val); | |
1649 | else | |
1650 | release_value (val); | |
1651 | } | |
1652 | ||
c906108c | 1653 | /* Release all values up to mark */ |
f23631e4 AC |
1654 | struct value * |
1655 | value_release_to_mark (struct value *mark) | |
c906108c | 1656 | { |
f23631e4 AC |
1657 | struct value *val; |
1658 | struct value *next; | |
c906108c | 1659 | |
df407dfe | 1660 | for (val = next = all_values; next; next = next->next) |
e848a8a5 TT |
1661 | { |
1662 | if (next->next == mark) | |
1663 | { | |
1664 | all_values = next->next; | |
1665 | next->next = NULL; | |
1666 | return val; | |
1667 | } | |
1668 | next->released = 1; | |
1669 | } | |
c906108c SS |
1670 | all_values = 0; |
1671 | return val; | |
1672 | } | |
1673 | ||
1674 | /* Return a copy of the value ARG. | |
1675 | It contains the same contents, for same memory address, | |
1676 | but it's a different block of storage. */ | |
1677 | ||
f23631e4 AC |
1678 | struct value * |
1679 | value_copy (struct value *arg) | |
c906108c | 1680 | { |
4754a64e | 1681 | struct type *encl_type = value_enclosing_type (arg); |
3e3d7139 JG |
1682 | struct value *val; |
1683 | ||
1684 | if (value_lazy (arg)) | |
1685 | val = allocate_value_lazy (encl_type); | |
1686 | else | |
1687 | val = allocate_value (encl_type); | |
df407dfe | 1688 | val->type = arg->type; |
c906108c | 1689 | VALUE_LVAL (val) = VALUE_LVAL (arg); |
6f7c8fc2 | 1690 | val->location = arg->location; |
df407dfe AC |
1691 | val->offset = arg->offset; |
1692 | val->bitpos = arg->bitpos; | |
1693 | val->bitsize = arg->bitsize; | |
1df6926e | 1694 | VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg); |
9ee8fc9d | 1695 | VALUE_REGNUM (val) = VALUE_REGNUM (arg); |
d69fe07e | 1696 | val->lazy = arg->lazy; |
13c3b5f5 | 1697 | val->embedded_offset = value_embedded_offset (arg); |
b44d461b | 1698 | val->pointed_to_offset = arg->pointed_to_offset; |
c906108c | 1699 | val->modifiable = arg->modifiable; |
d69fe07e | 1700 | if (!value_lazy (val)) |
c906108c | 1701 | { |
990a07ab | 1702 | memcpy (value_contents_all_raw (val), value_contents_all_raw (arg), |
4754a64e | 1703 | TYPE_LENGTH (value_enclosing_type (arg))); |
c906108c SS |
1704 | |
1705 | } | |
4e07d55f | 1706 | val->unavailable = VEC_copy (range_s, arg->unavailable); |
9a0dc9e3 | 1707 | val->optimized_out = VEC_copy (range_s, arg->optimized_out); |
40501e00 | 1708 | set_value_parent (val, arg->parent); |
5f5233d4 PA |
1709 | if (VALUE_LVAL (val) == lval_computed) |
1710 | { | |
c8f2448a | 1711 | const struct lval_funcs *funcs = val->location.computed.funcs; |
5f5233d4 PA |
1712 | |
1713 | if (funcs->copy_closure) | |
1714 | val->location.computed.closure = funcs->copy_closure (val); | |
1715 | } | |
c906108c SS |
1716 | return val; |
1717 | } | |
74bcbdf3 | 1718 | |
4c082a81 SC |
1719 | /* Return a "const" and/or "volatile" qualified version of the value V. |
1720 | If CNST is true, then the returned value will be qualified with | |
1721 | "const". | |
1722 | if VOLTL is true, then the returned value will be qualified with | |
1723 | "volatile". */ | |
1724 | ||
1725 | struct value * | |
1726 | make_cv_value (int cnst, int voltl, struct value *v) | |
1727 | { | |
1728 | struct type *val_type = value_type (v); | |
1729 | struct type *enclosing_type = value_enclosing_type (v); | |
1730 | struct value *cv_val = value_copy (v); | |
1731 | ||
1732 | deprecated_set_value_type (cv_val, | |
1733 | make_cv_type (cnst, voltl, val_type, NULL)); | |
1734 | set_value_enclosing_type (cv_val, | |
1735 | make_cv_type (cnst, voltl, enclosing_type, NULL)); | |
1736 | ||
1737 | return cv_val; | |
1738 | } | |
1739 | ||
c37f7098 KW |
1740 | /* Return a version of ARG that is non-lvalue. */ |
1741 | ||
1742 | struct value * | |
1743 | value_non_lval (struct value *arg) | |
1744 | { | |
1745 | if (VALUE_LVAL (arg) != not_lval) | |
1746 | { | |
1747 | struct type *enc_type = value_enclosing_type (arg); | |
1748 | struct value *val = allocate_value (enc_type); | |
1749 | ||
1750 | memcpy (value_contents_all_raw (val), value_contents_all (arg), | |
1751 | TYPE_LENGTH (enc_type)); | |
1752 | val->type = arg->type; | |
1753 | set_value_embedded_offset (val, value_embedded_offset (arg)); | |
1754 | set_value_pointed_to_offset (val, value_pointed_to_offset (arg)); | |
1755 | return val; | |
1756 | } | |
1757 | return arg; | |
1758 | } | |
1759 | ||
6c659fc2 SC |
1760 | /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */ |
1761 | ||
1762 | void | |
1763 | value_force_lval (struct value *v, CORE_ADDR addr) | |
1764 | { | |
1765 | gdb_assert (VALUE_LVAL (v) == not_lval); | |
1766 | ||
1767 | write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v))); | |
1768 | v->lval = lval_memory; | |
1769 | v->location.address = addr; | |
1770 | } | |
1771 | ||
74bcbdf3 | 1772 | void |
0e03807e TT |
1773 | set_value_component_location (struct value *component, |
1774 | const struct value *whole) | |
74bcbdf3 | 1775 | { |
e81e7f5e SC |
1776 | gdb_assert (whole->lval != lval_xcallable); |
1777 | ||
0e03807e | 1778 | if (whole->lval == lval_internalvar) |
74bcbdf3 PA |
1779 | VALUE_LVAL (component) = lval_internalvar_component; |
1780 | else | |
0e03807e | 1781 | VALUE_LVAL (component) = whole->lval; |
5f5233d4 | 1782 | |
74bcbdf3 | 1783 | component->location = whole->location; |
0e03807e | 1784 | if (whole->lval == lval_computed) |
5f5233d4 | 1785 | { |
c8f2448a | 1786 | const struct lval_funcs *funcs = whole->location.computed.funcs; |
5f5233d4 PA |
1787 | |
1788 | if (funcs->copy_closure) | |
1789 | component->location.computed.closure = funcs->copy_closure (whole); | |
1790 | } | |
74bcbdf3 PA |
1791 | } |
1792 | ||
c906108c SS |
1793 | \f |
1794 | /* Access to the value history. */ | |
1795 | ||
1796 | /* Record a new value in the value history. | |
eddf0bae | 1797 | Returns the absolute history index of the entry. */ |
c906108c SS |
1798 | |
1799 | int | |
f23631e4 | 1800 | record_latest_value (struct value *val) |
c906108c SS |
1801 | { |
1802 | int i; | |
1803 | ||
1804 | /* We don't want this value to have anything to do with the inferior anymore. | |
1805 | In particular, "set $1 = 50" should not affect the variable from which | |
1806 | the value was taken, and fast watchpoints should be able to assume that | |
1807 | a value on the value history never changes. */ | |
d69fe07e | 1808 | if (value_lazy (val)) |
c906108c SS |
1809 | value_fetch_lazy (val); |
1810 | /* We preserve VALUE_LVAL so that the user can find out where it was fetched | |
1811 | from. This is a bit dubious, because then *&$1 does not just return $1 | |
1812 | but the current contents of that location. c'est la vie... */ | |
1813 | val->modifiable = 0; | |
350e1a76 DE |
1814 | |
1815 | /* The value may have already been released, in which case we're adding a | |
1816 | new reference for its entry in the history. That is why we call | |
1817 | release_value_or_incref here instead of release_value. */ | |
1818 | release_value_or_incref (val); | |
c906108c SS |
1819 | |
1820 | /* Here we treat value_history_count as origin-zero | |
1821 | and applying to the value being stored now. */ | |
1822 | ||
1823 | i = value_history_count % VALUE_HISTORY_CHUNK; | |
1824 | if (i == 0) | |
1825 | { | |
fe978cb0 | 1826 | struct value_history_chunk *newobj |
a109c7c1 MS |
1827 | = (struct value_history_chunk *) |
1828 | ||
c5aa993b | 1829 | xmalloc (sizeof (struct value_history_chunk)); |
fe978cb0 PA |
1830 | memset (newobj->values, 0, sizeof newobj->values); |
1831 | newobj->next = value_history_chain; | |
1832 | value_history_chain = newobj; | |
c906108c SS |
1833 | } |
1834 | ||
1835 | value_history_chain->values[i] = val; | |
1836 | ||
1837 | /* Now we regard value_history_count as origin-one | |
1838 | and applying to the value just stored. */ | |
1839 | ||
1840 | return ++value_history_count; | |
1841 | } | |
1842 | ||
1843 | /* Return a copy of the value in the history with sequence number NUM. */ | |
1844 | ||
f23631e4 | 1845 | struct value * |
fba45db2 | 1846 | access_value_history (int num) |
c906108c | 1847 | { |
f23631e4 | 1848 | struct value_history_chunk *chunk; |
52f0bd74 AC |
1849 | int i; |
1850 | int absnum = num; | |
c906108c SS |
1851 | |
1852 | if (absnum <= 0) | |
1853 | absnum += value_history_count; | |
1854 | ||
1855 | if (absnum <= 0) | |
1856 | { | |
1857 | if (num == 0) | |
8a3fe4f8 | 1858 | error (_("The history is empty.")); |
c906108c | 1859 | else if (num == 1) |
8a3fe4f8 | 1860 | error (_("There is only one value in the history.")); |
c906108c | 1861 | else |
8a3fe4f8 | 1862 | error (_("History does not go back to $$%d."), -num); |
c906108c SS |
1863 | } |
1864 | if (absnum > value_history_count) | |
8a3fe4f8 | 1865 | error (_("History has not yet reached $%d."), absnum); |
c906108c SS |
1866 | |
1867 | absnum--; | |
1868 | ||
1869 | /* Now absnum is always absolute and origin zero. */ | |
1870 | ||
1871 | chunk = value_history_chain; | |
3e43a32a MS |
1872 | for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK |
1873 | - absnum / VALUE_HISTORY_CHUNK; | |
c906108c SS |
1874 | i > 0; i--) |
1875 | chunk = chunk->next; | |
1876 | ||
1877 | return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]); | |
1878 | } | |
1879 | ||
c906108c | 1880 | static void |
fba45db2 | 1881 | show_values (char *num_exp, int from_tty) |
c906108c | 1882 | { |
52f0bd74 | 1883 | int i; |
f23631e4 | 1884 | struct value *val; |
c906108c SS |
1885 | static int num = 1; |
1886 | ||
1887 | if (num_exp) | |
1888 | { | |
f132ba9d TJB |
1889 | /* "show values +" should print from the stored position. |
1890 | "show values <exp>" should print around value number <exp>. */ | |
c906108c | 1891 | if (num_exp[0] != '+' || num_exp[1] != '\0') |
bb518678 | 1892 | num = parse_and_eval_long (num_exp) - 5; |
c906108c SS |
1893 | } |
1894 | else | |
1895 | { | |
f132ba9d | 1896 | /* "show values" means print the last 10 values. */ |
c906108c SS |
1897 | num = value_history_count - 9; |
1898 | } | |
1899 | ||
1900 | if (num <= 0) | |
1901 | num = 1; | |
1902 | ||
1903 | for (i = num; i < num + 10 && i <= value_history_count; i++) | |
1904 | { | |
79a45b7d | 1905 | struct value_print_options opts; |
a109c7c1 | 1906 | |
c906108c | 1907 | val = access_value_history (i); |
a3f17187 | 1908 | printf_filtered (("$%d = "), i); |
79a45b7d TT |
1909 | get_user_print_options (&opts); |
1910 | value_print (val, gdb_stdout, &opts); | |
a3f17187 | 1911 | printf_filtered (("\n")); |
c906108c SS |
1912 | } |
1913 | ||
f132ba9d | 1914 | /* The next "show values +" should start after what we just printed. */ |
c906108c SS |
1915 | num += 10; |
1916 | ||
1917 | /* Hitting just return after this command should do the same thing as | |
f132ba9d TJB |
1918 | "show values +". If num_exp is null, this is unnecessary, since |
1919 | "show values +" is not useful after "show values". */ | |
c906108c SS |
1920 | if (from_tty && num_exp) |
1921 | { | |
1922 | num_exp[0] = '+'; | |
1923 | num_exp[1] = '\0'; | |
1924 | } | |
1925 | } | |
1926 | \f | |
52059ffd TT |
1927 | enum internalvar_kind |
1928 | { | |
1929 | /* The internal variable is empty. */ | |
1930 | INTERNALVAR_VOID, | |
1931 | ||
1932 | /* The value of the internal variable is provided directly as | |
1933 | a GDB value object. */ | |
1934 | INTERNALVAR_VALUE, | |
1935 | ||
1936 | /* A fresh value is computed via a call-back routine on every | |
1937 | access to the internal variable. */ | |
1938 | INTERNALVAR_MAKE_VALUE, | |
1939 | ||
1940 | /* The internal variable holds a GDB internal convenience function. */ | |
1941 | INTERNALVAR_FUNCTION, | |
1942 | ||
1943 | /* The variable holds an integer value. */ | |
1944 | INTERNALVAR_INTEGER, | |
1945 | ||
1946 | /* The variable holds a GDB-provided string. */ | |
1947 | INTERNALVAR_STRING, | |
1948 | }; | |
1949 | ||
1950 | union internalvar_data | |
1951 | { | |
1952 | /* A value object used with INTERNALVAR_VALUE. */ | |
1953 | struct value *value; | |
1954 | ||
1955 | /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */ | |
1956 | struct | |
1957 | { | |
1958 | /* The functions to call. */ | |
1959 | const struct internalvar_funcs *functions; | |
1960 | ||
1961 | /* The function's user-data. */ | |
1962 | void *data; | |
1963 | } make_value; | |
1964 | ||
1965 | /* The internal function used with INTERNALVAR_FUNCTION. */ | |
1966 | struct | |
1967 | { | |
1968 | struct internal_function *function; | |
1969 | /* True if this is the canonical name for the function. */ | |
1970 | int canonical; | |
1971 | } fn; | |
1972 | ||
1973 | /* An integer value used with INTERNALVAR_INTEGER. */ | |
1974 | struct | |
1975 | { | |
1976 | /* If type is non-NULL, it will be used as the type to generate | |
1977 | a value for this internal variable. If type is NULL, a default | |
1978 | integer type for the architecture is used. */ | |
1979 | struct type *type; | |
1980 | LONGEST val; | |
1981 | } integer; | |
1982 | ||
1983 | /* A string value used with INTERNALVAR_STRING. */ | |
1984 | char *string; | |
1985 | }; | |
1986 | ||
c906108c SS |
1987 | /* Internal variables. These are variables within the debugger |
1988 | that hold values assigned by debugger commands. | |
1989 | The user refers to them with a '$' prefix | |
1990 | that does not appear in the variable names stored internally. */ | |
1991 | ||
4fa62494 UW |
1992 | struct internalvar |
1993 | { | |
1994 | struct internalvar *next; | |
1995 | char *name; | |
4fa62494 | 1996 | |
78267919 UW |
1997 | /* We support various different kinds of content of an internal variable. |
1998 | enum internalvar_kind specifies the kind, and union internalvar_data | |
1999 | provides the data associated with this particular kind. */ | |
2000 | ||
52059ffd | 2001 | enum internalvar_kind kind; |
4fa62494 | 2002 | |
52059ffd | 2003 | union internalvar_data u; |
4fa62494 UW |
2004 | }; |
2005 | ||
c906108c SS |
2006 | static struct internalvar *internalvars; |
2007 | ||
3e43a32a MS |
2008 | /* If the variable does not already exist create it and give it the |
2009 | value given. If no value is given then the default is zero. */ | |
53e5f3cf AS |
2010 | static void |
2011 | init_if_undefined_command (char* args, int from_tty) | |
2012 | { | |
2013 | struct internalvar* intvar; | |
2014 | ||
2015 | /* Parse the expression - this is taken from set_command(). */ | |
2016 | struct expression *expr = parse_expression (args); | |
2017 | register struct cleanup *old_chain = | |
2018 | make_cleanup (free_current_contents, &expr); | |
2019 | ||
2020 | /* Validate the expression. | |
2021 | Was the expression an assignment? | |
2022 | Or even an expression at all? */ | |
2023 | if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN) | |
2024 | error (_("Init-if-undefined requires an assignment expression.")); | |
2025 | ||
2026 | /* Extract the variable from the parsed expression. | |
2027 | In the case of an assign the lvalue will be in elts[1] and elts[2]. */ | |
2028 | if (expr->elts[1].opcode != OP_INTERNALVAR) | |
3e43a32a MS |
2029 | error (_("The first parameter to init-if-undefined " |
2030 | "should be a GDB variable.")); | |
53e5f3cf AS |
2031 | intvar = expr->elts[2].internalvar; |
2032 | ||
2033 | /* Only evaluate the expression if the lvalue is void. | |
2034 | This may still fail if the expresssion is invalid. */ | |
78267919 | 2035 | if (intvar->kind == INTERNALVAR_VOID) |
53e5f3cf AS |
2036 | evaluate_expression (expr); |
2037 | ||
2038 | do_cleanups (old_chain); | |
2039 | } | |
2040 | ||
2041 | ||
c906108c SS |
2042 | /* Look up an internal variable with name NAME. NAME should not |
2043 | normally include a dollar sign. | |
2044 | ||
2045 | If the specified internal variable does not exist, | |
c4a3d09a | 2046 | the return value is NULL. */ |
c906108c SS |
2047 | |
2048 | struct internalvar * | |
bc3b79fd | 2049 | lookup_only_internalvar (const char *name) |
c906108c | 2050 | { |
52f0bd74 | 2051 | struct internalvar *var; |
c906108c SS |
2052 | |
2053 | for (var = internalvars; var; var = var->next) | |
5cb316ef | 2054 | if (strcmp (var->name, name) == 0) |
c906108c SS |
2055 | return var; |
2056 | ||
c4a3d09a MF |
2057 | return NULL; |
2058 | } | |
2059 | ||
d55637df TT |
2060 | /* Complete NAME by comparing it to the names of internal variables. |
2061 | Returns a vector of newly allocated strings, or NULL if no matches | |
2062 | were found. */ | |
2063 | ||
2064 | VEC (char_ptr) * | |
2065 | complete_internalvar (const char *name) | |
2066 | { | |
2067 | VEC (char_ptr) *result = NULL; | |
2068 | struct internalvar *var; | |
2069 | int len; | |
2070 | ||
2071 | len = strlen (name); | |
2072 | ||
2073 | for (var = internalvars; var; var = var->next) | |
2074 | if (strncmp (var->name, name, len) == 0) | |
2075 | { | |
2076 | char *r = xstrdup (var->name); | |
2077 | ||
2078 | VEC_safe_push (char_ptr, result, r); | |
2079 | } | |
2080 | ||
2081 | return result; | |
2082 | } | |
c4a3d09a MF |
2083 | |
2084 | /* Create an internal variable with name NAME and with a void value. | |
2085 | NAME should not normally include a dollar sign. */ | |
2086 | ||
2087 | struct internalvar * | |
bc3b79fd | 2088 | create_internalvar (const char *name) |
c4a3d09a MF |
2089 | { |
2090 | struct internalvar *var; | |
a109c7c1 | 2091 | |
c906108c | 2092 | var = (struct internalvar *) xmalloc (sizeof (struct internalvar)); |
1754f103 | 2093 | var->name = concat (name, (char *)NULL); |
78267919 | 2094 | var->kind = INTERNALVAR_VOID; |
c906108c SS |
2095 | var->next = internalvars; |
2096 | internalvars = var; | |
2097 | return var; | |
2098 | } | |
2099 | ||
4aa995e1 PA |
2100 | /* Create an internal variable with name NAME and register FUN as the |
2101 | function that value_of_internalvar uses to create a value whenever | |
2102 | this variable is referenced. NAME should not normally include a | |
22d2b532 SDJ |
2103 | dollar sign. DATA is passed uninterpreted to FUN when it is |
2104 | called. CLEANUP, if not NULL, is called when the internal variable | |
2105 | is destroyed. It is passed DATA as its only argument. */ | |
4aa995e1 PA |
2106 | |
2107 | struct internalvar * | |
22d2b532 SDJ |
2108 | create_internalvar_type_lazy (const char *name, |
2109 | const struct internalvar_funcs *funcs, | |
2110 | void *data) | |
4aa995e1 | 2111 | { |
4fa62494 | 2112 | struct internalvar *var = create_internalvar (name); |
a109c7c1 | 2113 | |
78267919 | 2114 | var->kind = INTERNALVAR_MAKE_VALUE; |
22d2b532 SDJ |
2115 | var->u.make_value.functions = funcs; |
2116 | var->u.make_value.data = data; | |
4aa995e1 PA |
2117 | return var; |
2118 | } | |
c4a3d09a | 2119 | |
22d2b532 SDJ |
2120 | /* See documentation in value.h. */ |
2121 | ||
2122 | int | |
2123 | compile_internalvar_to_ax (struct internalvar *var, | |
2124 | struct agent_expr *expr, | |
2125 | struct axs_value *value) | |
2126 | { | |
2127 | if (var->kind != INTERNALVAR_MAKE_VALUE | |
2128 | || var->u.make_value.functions->compile_to_ax == NULL) | |
2129 | return 0; | |
2130 | ||
2131 | var->u.make_value.functions->compile_to_ax (var, expr, value, | |
2132 | var->u.make_value.data); | |
2133 | return 1; | |
2134 | } | |
2135 | ||
c4a3d09a MF |
2136 | /* Look up an internal variable with name NAME. NAME should not |
2137 | normally include a dollar sign. | |
2138 | ||
2139 | If the specified internal variable does not exist, | |
2140 | one is created, with a void value. */ | |
2141 | ||
2142 | struct internalvar * | |
bc3b79fd | 2143 | lookup_internalvar (const char *name) |
c4a3d09a MF |
2144 | { |
2145 | struct internalvar *var; | |
2146 | ||
2147 | var = lookup_only_internalvar (name); | |
2148 | if (var) | |
2149 | return var; | |
2150 | ||
2151 | return create_internalvar (name); | |
2152 | } | |
2153 | ||
78267919 UW |
2154 | /* Return current value of internal variable VAR. For variables that |
2155 | are not inherently typed, use a value type appropriate for GDBARCH. */ | |
2156 | ||
f23631e4 | 2157 | struct value * |
78267919 | 2158 | value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var) |
c906108c | 2159 | { |
f23631e4 | 2160 | struct value *val; |
0914bcdb SS |
2161 | struct trace_state_variable *tsv; |
2162 | ||
2163 | /* If there is a trace state variable of the same name, assume that | |
2164 | is what we really want to see. */ | |
2165 | tsv = find_trace_state_variable (var->name); | |
2166 | if (tsv) | |
2167 | { | |
2168 | tsv->value_known = target_get_trace_state_variable_value (tsv->number, | |
2169 | &(tsv->value)); | |
2170 | if (tsv->value_known) | |
2171 | val = value_from_longest (builtin_type (gdbarch)->builtin_int64, | |
2172 | tsv->value); | |
2173 | else | |
2174 | val = allocate_value (builtin_type (gdbarch)->builtin_void); | |
2175 | return val; | |
2176 | } | |
c906108c | 2177 | |
78267919 | 2178 | switch (var->kind) |
5f5233d4 | 2179 | { |
78267919 UW |
2180 | case INTERNALVAR_VOID: |
2181 | val = allocate_value (builtin_type (gdbarch)->builtin_void); | |
2182 | break; | |
4fa62494 | 2183 | |
78267919 UW |
2184 | case INTERNALVAR_FUNCTION: |
2185 | val = allocate_value (builtin_type (gdbarch)->internal_fn); | |
2186 | break; | |
4fa62494 | 2187 | |
cab0c772 UW |
2188 | case INTERNALVAR_INTEGER: |
2189 | if (!var->u.integer.type) | |
78267919 | 2190 | val = value_from_longest (builtin_type (gdbarch)->builtin_int, |
cab0c772 | 2191 | var->u.integer.val); |
78267919 | 2192 | else |
cab0c772 UW |
2193 | val = value_from_longest (var->u.integer.type, var->u.integer.val); |
2194 | break; | |
2195 | ||
78267919 UW |
2196 | case INTERNALVAR_STRING: |
2197 | val = value_cstring (var->u.string, strlen (var->u.string), | |
2198 | builtin_type (gdbarch)->builtin_char); | |
2199 | break; | |
4fa62494 | 2200 | |
78267919 UW |
2201 | case INTERNALVAR_VALUE: |
2202 | val = value_copy (var->u.value); | |
4aa995e1 PA |
2203 | if (value_lazy (val)) |
2204 | value_fetch_lazy (val); | |
78267919 | 2205 | break; |
4aa995e1 | 2206 | |
78267919 | 2207 | case INTERNALVAR_MAKE_VALUE: |
22d2b532 SDJ |
2208 | val = (*var->u.make_value.functions->make_value) (gdbarch, var, |
2209 | var->u.make_value.data); | |
78267919 UW |
2210 | break; |
2211 | ||
2212 | default: | |
9b20d036 | 2213 | internal_error (__FILE__, __LINE__, _("bad kind")); |
78267919 UW |
2214 | } |
2215 | ||
2216 | /* Change the VALUE_LVAL to lval_internalvar so that future operations | |
2217 | on this value go back to affect the original internal variable. | |
2218 | ||
2219 | Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have | |
2220 | no underlying modifyable state in the internal variable. | |
2221 | ||
2222 | Likewise, if the variable's value is a computed lvalue, we want | |
2223 | references to it to produce another computed lvalue, where | |
2224 | references and assignments actually operate through the | |
2225 | computed value's functions. | |
2226 | ||
2227 | This means that internal variables with computed values | |
2228 | behave a little differently from other internal variables: | |
2229 | assignments to them don't just replace the previous value | |
2230 | altogether. At the moment, this seems like the behavior we | |
2231 | want. */ | |
2232 | ||
2233 | if (var->kind != INTERNALVAR_MAKE_VALUE | |
2234 | && val->lval != lval_computed) | |
2235 | { | |
2236 | VALUE_LVAL (val) = lval_internalvar; | |
2237 | VALUE_INTERNALVAR (val) = var; | |
5f5233d4 | 2238 | } |
d3c139e9 | 2239 | |
4fa62494 UW |
2240 | return val; |
2241 | } | |
d3c139e9 | 2242 | |
4fa62494 UW |
2243 | int |
2244 | get_internalvar_integer (struct internalvar *var, LONGEST *result) | |
2245 | { | |
3158c6ed | 2246 | if (var->kind == INTERNALVAR_INTEGER) |
4fa62494 | 2247 | { |
cab0c772 UW |
2248 | *result = var->u.integer.val; |
2249 | return 1; | |
3158c6ed | 2250 | } |
d3c139e9 | 2251 | |
3158c6ed PA |
2252 | if (var->kind == INTERNALVAR_VALUE) |
2253 | { | |
2254 | struct type *type = check_typedef (value_type (var->u.value)); | |
2255 | ||
2256 | if (TYPE_CODE (type) == TYPE_CODE_INT) | |
2257 | { | |
2258 | *result = value_as_long (var->u.value); | |
2259 | return 1; | |
2260 | } | |
4fa62494 | 2261 | } |
3158c6ed PA |
2262 | |
2263 | return 0; | |
4fa62494 | 2264 | } |
d3c139e9 | 2265 | |
4fa62494 UW |
2266 | static int |
2267 | get_internalvar_function (struct internalvar *var, | |
2268 | struct internal_function **result) | |
2269 | { | |
78267919 | 2270 | switch (var->kind) |
d3c139e9 | 2271 | { |
78267919 UW |
2272 | case INTERNALVAR_FUNCTION: |
2273 | *result = var->u.fn.function; | |
4fa62494 | 2274 | return 1; |
d3c139e9 | 2275 | |
4fa62494 UW |
2276 | default: |
2277 | return 0; | |
2278 | } | |
c906108c SS |
2279 | } |
2280 | ||
2281 | void | |
fba45db2 | 2282 | set_internalvar_component (struct internalvar *var, int offset, int bitpos, |
f23631e4 | 2283 | int bitsize, struct value *newval) |
c906108c | 2284 | { |
4fa62494 | 2285 | gdb_byte *addr; |
3ae385af SM |
2286 | struct gdbarch *arch; |
2287 | int unit_size; | |
c906108c | 2288 | |
78267919 | 2289 | switch (var->kind) |
4fa62494 | 2290 | { |
78267919 UW |
2291 | case INTERNALVAR_VALUE: |
2292 | addr = value_contents_writeable (var->u.value); | |
3ae385af SM |
2293 | arch = get_value_arch (var->u.value); |
2294 | unit_size = gdbarch_addressable_memory_unit_size (arch); | |
4fa62494 UW |
2295 | |
2296 | if (bitsize) | |
50810684 | 2297 | modify_field (value_type (var->u.value), addr + offset, |
4fa62494 UW |
2298 | value_as_long (newval), bitpos, bitsize); |
2299 | else | |
3ae385af | 2300 | memcpy (addr + offset * unit_size, value_contents (newval), |
4fa62494 UW |
2301 | TYPE_LENGTH (value_type (newval))); |
2302 | break; | |
78267919 UW |
2303 | |
2304 | default: | |
2305 | /* We can never get a component of any other kind. */ | |
9b20d036 | 2306 | internal_error (__FILE__, __LINE__, _("set_internalvar_component")); |
4fa62494 | 2307 | } |
c906108c SS |
2308 | } |
2309 | ||
2310 | void | |
f23631e4 | 2311 | set_internalvar (struct internalvar *var, struct value *val) |
c906108c | 2312 | { |
78267919 | 2313 | enum internalvar_kind new_kind; |
4fa62494 | 2314 | union internalvar_data new_data = { 0 }; |
c906108c | 2315 | |
78267919 | 2316 | if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical) |
bc3b79fd TJB |
2317 | error (_("Cannot overwrite convenience function %s"), var->name); |
2318 | ||
4fa62494 | 2319 | /* Prepare new contents. */ |
78267919 | 2320 | switch (TYPE_CODE (check_typedef (value_type (val)))) |
4fa62494 UW |
2321 | { |
2322 | case TYPE_CODE_VOID: | |
78267919 | 2323 | new_kind = INTERNALVAR_VOID; |
4fa62494 UW |
2324 | break; |
2325 | ||
2326 | case TYPE_CODE_INTERNAL_FUNCTION: | |
2327 | gdb_assert (VALUE_LVAL (val) == lval_internalvar); | |
78267919 UW |
2328 | new_kind = INTERNALVAR_FUNCTION; |
2329 | get_internalvar_function (VALUE_INTERNALVAR (val), | |
2330 | &new_data.fn.function); | |
2331 | /* Copies created here are never canonical. */ | |
4fa62494 UW |
2332 | break; |
2333 | ||
4fa62494 | 2334 | default: |
78267919 UW |
2335 | new_kind = INTERNALVAR_VALUE; |
2336 | new_data.value = value_copy (val); | |
2337 | new_data.value->modifiable = 1; | |
4fa62494 UW |
2338 | |
2339 | /* Force the value to be fetched from the target now, to avoid problems | |
2340 | later when this internalvar is referenced and the target is gone or | |
2341 | has changed. */ | |
78267919 UW |
2342 | if (value_lazy (new_data.value)) |
2343 | value_fetch_lazy (new_data.value); | |
4fa62494 UW |
2344 | |
2345 | /* Release the value from the value chain to prevent it from being | |
2346 | deleted by free_all_values. From here on this function should not | |
2347 | call error () until new_data is installed into the var->u to avoid | |
2348 | leaking memory. */ | |
78267919 | 2349 | release_value (new_data.value); |
4fa62494 UW |
2350 | break; |
2351 | } | |
2352 | ||
2353 | /* Clean up old contents. */ | |
2354 | clear_internalvar (var); | |
2355 | ||
2356 | /* Switch over. */ | |
78267919 | 2357 | var->kind = new_kind; |
4fa62494 | 2358 | var->u = new_data; |
c906108c SS |
2359 | /* End code which must not call error(). */ |
2360 | } | |
2361 | ||
4fa62494 UW |
2362 | void |
2363 | set_internalvar_integer (struct internalvar *var, LONGEST l) | |
2364 | { | |
2365 | /* Clean up old contents. */ | |
2366 | clear_internalvar (var); | |
2367 | ||
cab0c772 UW |
2368 | var->kind = INTERNALVAR_INTEGER; |
2369 | var->u.integer.type = NULL; | |
2370 | var->u.integer.val = l; | |
78267919 UW |
2371 | } |
2372 | ||
2373 | void | |
2374 | set_internalvar_string (struct internalvar *var, const char *string) | |
2375 | { | |
2376 | /* Clean up old contents. */ | |
2377 | clear_internalvar (var); | |
2378 | ||
2379 | var->kind = INTERNALVAR_STRING; | |
2380 | var->u.string = xstrdup (string); | |
4fa62494 UW |
2381 | } |
2382 | ||
2383 | static void | |
2384 | set_internalvar_function (struct internalvar *var, struct internal_function *f) | |
2385 | { | |
2386 | /* Clean up old contents. */ | |
2387 | clear_internalvar (var); | |
2388 | ||
78267919 UW |
2389 | var->kind = INTERNALVAR_FUNCTION; |
2390 | var->u.fn.function = f; | |
2391 | var->u.fn.canonical = 1; | |
2392 | /* Variables installed here are always the canonical version. */ | |
4fa62494 UW |
2393 | } |
2394 | ||
2395 | void | |
2396 | clear_internalvar (struct internalvar *var) | |
2397 | { | |
2398 | /* Clean up old contents. */ | |
78267919 | 2399 | switch (var->kind) |
4fa62494 | 2400 | { |
78267919 UW |
2401 | case INTERNALVAR_VALUE: |
2402 | value_free (var->u.value); | |
2403 | break; | |
2404 | ||
2405 | case INTERNALVAR_STRING: | |
2406 | xfree (var->u.string); | |
4fa62494 UW |
2407 | break; |
2408 | ||
22d2b532 SDJ |
2409 | case INTERNALVAR_MAKE_VALUE: |
2410 | if (var->u.make_value.functions->destroy != NULL) | |
2411 | var->u.make_value.functions->destroy (var->u.make_value.data); | |
2412 | break; | |
2413 | ||
4fa62494 | 2414 | default: |
4fa62494 UW |
2415 | break; |
2416 | } | |
2417 | ||
78267919 UW |
2418 | /* Reset to void kind. */ |
2419 | var->kind = INTERNALVAR_VOID; | |
4fa62494 UW |
2420 | } |
2421 | ||
c906108c | 2422 | char * |
fba45db2 | 2423 | internalvar_name (struct internalvar *var) |
c906108c SS |
2424 | { |
2425 | return var->name; | |
2426 | } | |
2427 | ||
4fa62494 UW |
2428 | static struct internal_function * |
2429 | create_internal_function (const char *name, | |
2430 | internal_function_fn handler, void *cookie) | |
bc3b79fd | 2431 | { |
bc3b79fd | 2432 | struct internal_function *ifn = XNEW (struct internal_function); |
a109c7c1 | 2433 | |
bc3b79fd TJB |
2434 | ifn->name = xstrdup (name); |
2435 | ifn->handler = handler; | |
2436 | ifn->cookie = cookie; | |
4fa62494 | 2437 | return ifn; |
bc3b79fd TJB |
2438 | } |
2439 | ||
2440 | char * | |
2441 | value_internal_function_name (struct value *val) | |
2442 | { | |
4fa62494 UW |
2443 | struct internal_function *ifn; |
2444 | int result; | |
2445 | ||
2446 | gdb_assert (VALUE_LVAL (val) == lval_internalvar); | |
2447 | result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn); | |
2448 | gdb_assert (result); | |
2449 | ||
bc3b79fd TJB |
2450 | return ifn->name; |
2451 | } | |
2452 | ||
2453 | struct value * | |
d452c4bc UW |
2454 | call_internal_function (struct gdbarch *gdbarch, |
2455 | const struct language_defn *language, | |
2456 | struct value *func, int argc, struct value **argv) | |
bc3b79fd | 2457 | { |
4fa62494 UW |
2458 | struct internal_function *ifn; |
2459 | int result; | |
2460 | ||
2461 | gdb_assert (VALUE_LVAL (func) == lval_internalvar); | |
2462 | result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn); | |
2463 | gdb_assert (result); | |
2464 | ||
d452c4bc | 2465 | return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv); |
bc3b79fd TJB |
2466 | } |
2467 | ||
2468 | /* The 'function' command. This does nothing -- it is just a | |
2469 | placeholder to let "help function NAME" work. This is also used as | |
2470 | the implementation of the sub-command that is created when | |
2471 | registering an internal function. */ | |
2472 | static void | |
2473 | function_command (char *command, int from_tty) | |
2474 | { | |
2475 | /* Do nothing. */ | |
2476 | } | |
2477 | ||
2478 | /* Clean up if an internal function's command is destroyed. */ | |
2479 | static void | |
2480 | function_destroyer (struct cmd_list_element *self, void *ignore) | |
2481 | { | |
6f937416 | 2482 | xfree ((char *) self->name); |
1947513d | 2483 | xfree ((char *) self->doc); |
bc3b79fd TJB |
2484 | } |
2485 | ||
2486 | /* Add a new internal function. NAME is the name of the function; DOC | |
2487 | is a documentation string describing the function. HANDLER is | |
2488 | called when the function is invoked. COOKIE is an arbitrary | |
2489 | pointer which is passed to HANDLER and is intended for "user | |
2490 | data". */ | |
2491 | void | |
2492 | add_internal_function (const char *name, const char *doc, | |
2493 | internal_function_fn handler, void *cookie) | |
2494 | { | |
2495 | struct cmd_list_element *cmd; | |
4fa62494 | 2496 | struct internal_function *ifn; |
bc3b79fd | 2497 | struct internalvar *var = lookup_internalvar (name); |
4fa62494 UW |
2498 | |
2499 | ifn = create_internal_function (name, handler, cookie); | |
2500 | set_internalvar_function (var, ifn); | |
bc3b79fd TJB |
2501 | |
2502 | cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc, | |
2503 | &functionlist); | |
2504 | cmd->destroyer = function_destroyer; | |
2505 | } | |
2506 | ||
ae5a43e0 DJ |
2507 | /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to |
2508 | prevent cycles / duplicates. */ | |
2509 | ||
4e7a5ef5 | 2510 | void |
ae5a43e0 DJ |
2511 | preserve_one_value (struct value *value, struct objfile *objfile, |
2512 | htab_t copied_types) | |
2513 | { | |
2514 | if (TYPE_OBJFILE (value->type) == objfile) | |
2515 | value->type = copy_type_recursive (objfile, value->type, copied_types); | |
2516 | ||
2517 | if (TYPE_OBJFILE (value->enclosing_type) == objfile) | |
2518 | value->enclosing_type = copy_type_recursive (objfile, | |
2519 | value->enclosing_type, | |
2520 | copied_types); | |
2521 | } | |
2522 | ||
78267919 UW |
2523 | /* Likewise for internal variable VAR. */ |
2524 | ||
2525 | static void | |
2526 | preserve_one_internalvar (struct internalvar *var, struct objfile *objfile, | |
2527 | htab_t copied_types) | |
2528 | { | |
2529 | switch (var->kind) | |
2530 | { | |
cab0c772 UW |
2531 | case INTERNALVAR_INTEGER: |
2532 | if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile) | |
2533 | var->u.integer.type | |
2534 | = copy_type_recursive (objfile, var->u.integer.type, copied_types); | |
2535 | break; | |
2536 | ||
78267919 UW |
2537 | case INTERNALVAR_VALUE: |
2538 | preserve_one_value (var->u.value, objfile, copied_types); | |
2539 | break; | |
2540 | } | |
2541 | } | |
2542 | ||
ae5a43e0 DJ |
2543 | /* Update the internal variables and value history when OBJFILE is |
2544 | discarded; we must copy the types out of the objfile. New global types | |
2545 | will be created for every convenience variable which currently points to | |
2546 | this objfile's types, and the convenience variables will be adjusted to | |
2547 | use the new global types. */ | |
c906108c SS |
2548 | |
2549 | void | |
ae5a43e0 | 2550 | preserve_values (struct objfile *objfile) |
c906108c | 2551 | { |
ae5a43e0 DJ |
2552 | htab_t copied_types; |
2553 | struct value_history_chunk *cur; | |
52f0bd74 | 2554 | struct internalvar *var; |
ae5a43e0 | 2555 | int i; |
c906108c | 2556 | |
ae5a43e0 DJ |
2557 | /* Create the hash table. We allocate on the objfile's obstack, since |
2558 | it is soon to be deleted. */ | |
2559 | copied_types = create_copied_types_hash (objfile); | |
2560 | ||
2561 | for (cur = value_history_chain; cur; cur = cur->next) | |
2562 | for (i = 0; i < VALUE_HISTORY_CHUNK; i++) | |
2563 | if (cur->values[i]) | |
2564 | preserve_one_value (cur->values[i], objfile, copied_types); | |
2565 | ||
2566 | for (var = internalvars; var; var = var->next) | |
78267919 | 2567 | preserve_one_internalvar (var, objfile, copied_types); |
ae5a43e0 | 2568 | |
6dddc817 | 2569 | preserve_ext_lang_values (objfile, copied_types); |
a08702d6 | 2570 | |
ae5a43e0 | 2571 | htab_delete (copied_types); |
c906108c SS |
2572 | } |
2573 | ||
2574 | static void | |
fba45db2 | 2575 | show_convenience (char *ignore, int from_tty) |
c906108c | 2576 | { |
e17c207e | 2577 | struct gdbarch *gdbarch = get_current_arch (); |
52f0bd74 | 2578 | struct internalvar *var; |
c906108c | 2579 | int varseen = 0; |
79a45b7d | 2580 | struct value_print_options opts; |
c906108c | 2581 | |
79a45b7d | 2582 | get_user_print_options (&opts); |
c906108c SS |
2583 | for (var = internalvars; var; var = var->next) |
2584 | { | |
c709acd1 | 2585 | |
c906108c SS |
2586 | if (!varseen) |
2587 | { | |
2588 | varseen = 1; | |
2589 | } | |
a3f17187 | 2590 | printf_filtered (("$%s = "), var->name); |
c709acd1 | 2591 | |
492d29ea | 2592 | TRY |
c709acd1 PA |
2593 | { |
2594 | struct value *val; | |
2595 | ||
2596 | val = value_of_internalvar (gdbarch, var); | |
2597 | value_print (val, gdb_stdout, &opts); | |
2598 | } | |
492d29ea PA |
2599 | CATCH (ex, RETURN_MASK_ERROR) |
2600 | { | |
2601 | fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message); | |
2602 | } | |
2603 | END_CATCH | |
2604 | ||
a3f17187 | 2605 | printf_filtered (("\n")); |
c906108c SS |
2606 | } |
2607 | if (!varseen) | |
f47f77df DE |
2608 | { |
2609 | /* This text does not mention convenience functions on purpose. | |
2610 | The user can't create them except via Python, and if Python support | |
2611 | is installed this message will never be printed ($_streq will | |
2612 | exist). */ | |
2613 | printf_unfiltered (_("No debugger convenience variables now defined.\n" | |
2614 | "Convenience variables have " | |
2615 | "names starting with \"$\";\n" | |
2616 | "use \"set\" as in \"set " | |
2617 | "$foo = 5\" to define them.\n")); | |
2618 | } | |
c906108c SS |
2619 | } |
2620 | \f | |
e81e7f5e SC |
2621 | /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */ |
2622 | ||
2623 | struct value * | |
2624 | value_of_xmethod (struct xmethod_worker *worker) | |
2625 | { | |
2626 | if (worker->value == NULL) | |
2627 | { | |
2628 | struct value *v; | |
2629 | ||
2630 | v = allocate_value (builtin_type (target_gdbarch ())->xmethod); | |
2631 | v->lval = lval_xcallable; | |
2632 | v->location.xm_worker = worker; | |
2633 | v->modifiable = 0; | |
2634 | worker->value = v; | |
2635 | } | |
2636 | ||
2637 | return worker->value; | |
2638 | } | |
2639 | ||
2ce1cdbf DE |
2640 | /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */ |
2641 | ||
2642 | struct type * | |
2643 | result_type_of_xmethod (struct value *method, int argc, struct value **argv) | |
2644 | { | |
2645 | gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD | |
2646 | && method->lval == lval_xcallable && argc > 0); | |
2647 | ||
2648 | return get_xmethod_result_type (method->location.xm_worker, | |
2649 | argv[0], argv + 1, argc - 1); | |
2650 | } | |
2651 | ||
e81e7f5e SC |
2652 | /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */ |
2653 | ||
2654 | struct value * | |
2655 | call_xmethod (struct value *method, int argc, struct value **argv) | |
2656 | { | |
2657 | gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD | |
2658 | && method->lval == lval_xcallable && argc > 0); | |
2659 | ||
2660 | return invoke_xmethod (method->location.xm_worker, | |
2661 | argv[0], argv + 1, argc - 1); | |
2662 | } | |
2663 | \f | |
c906108c SS |
2664 | /* Extract a value as a C number (either long or double). |
2665 | Knows how to convert fixed values to double, or | |
2666 | floating values to long. | |
2667 | Does not deallocate the value. */ | |
2668 | ||
2669 | LONGEST | |
f23631e4 | 2670 | value_as_long (struct value *val) |
c906108c SS |
2671 | { |
2672 | /* This coerces arrays and functions, which is necessary (e.g. | |
2673 | in disassemble_command). It also dereferences references, which | |
2674 | I suspect is the most logical thing to do. */ | |
994b9211 | 2675 | val = coerce_array (val); |
0fd88904 | 2676 | return unpack_long (value_type (val), value_contents (val)); |
c906108c SS |
2677 | } |
2678 | ||
2679 | DOUBLEST | |
f23631e4 | 2680 | value_as_double (struct value *val) |
c906108c SS |
2681 | { |
2682 | DOUBLEST foo; | |
2683 | int inv; | |
c5aa993b | 2684 | |
0fd88904 | 2685 | foo = unpack_double (value_type (val), value_contents (val), &inv); |
c906108c | 2686 | if (inv) |
8a3fe4f8 | 2687 | error (_("Invalid floating value found in program.")); |
c906108c SS |
2688 | return foo; |
2689 | } | |
4ef30785 | 2690 | |
581e13c1 | 2691 | /* Extract a value as a C pointer. Does not deallocate the value. |
4478b372 JB |
2692 | Note that val's type may not actually be a pointer; value_as_long |
2693 | handles all the cases. */ | |
c906108c | 2694 | CORE_ADDR |
f23631e4 | 2695 | value_as_address (struct value *val) |
c906108c | 2696 | { |
50810684 UW |
2697 | struct gdbarch *gdbarch = get_type_arch (value_type (val)); |
2698 | ||
c906108c SS |
2699 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure |
2700 | whether we want this to be true eventually. */ | |
2701 | #if 0 | |
bf6ae464 | 2702 | /* gdbarch_addr_bits_remove is wrong if we are being called for a |
c906108c SS |
2703 | non-address (e.g. argument to "signal", "info break", etc.), or |
2704 | for pointers to char, in which the low bits *are* significant. */ | |
50810684 | 2705 | return gdbarch_addr_bits_remove (gdbarch, value_as_long (val)); |
c906108c | 2706 | #else |
f312f057 JB |
2707 | |
2708 | /* There are several targets (IA-64, PowerPC, and others) which | |
2709 | don't represent pointers to functions as simply the address of | |
2710 | the function's entry point. For example, on the IA-64, a | |
2711 | function pointer points to a two-word descriptor, generated by | |
2712 | the linker, which contains the function's entry point, and the | |
2713 | value the IA-64 "global pointer" register should have --- to | |
2714 | support position-independent code. The linker generates | |
2715 | descriptors only for those functions whose addresses are taken. | |
2716 | ||
2717 | On such targets, it's difficult for GDB to convert an arbitrary | |
2718 | function address into a function pointer; it has to either find | |
2719 | an existing descriptor for that function, or call malloc and | |
2720 | build its own. On some targets, it is impossible for GDB to | |
2721 | build a descriptor at all: the descriptor must contain a jump | |
2722 | instruction; data memory cannot be executed; and code memory | |
2723 | cannot be modified. | |
2724 | ||
2725 | Upon entry to this function, if VAL is a value of type `function' | |
2726 | (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then | |
42ae5230 | 2727 | value_address (val) is the address of the function. This is what |
f312f057 JB |
2728 | you'll get if you evaluate an expression like `main'. The call |
2729 | to COERCE_ARRAY below actually does all the usual unary | |
2730 | conversions, which includes converting values of type `function' | |
2731 | to `pointer to function'. This is the challenging conversion | |
2732 | discussed above. Then, `unpack_long' will convert that pointer | |
2733 | back into an address. | |
2734 | ||
2735 | So, suppose the user types `disassemble foo' on an architecture | |
2736 | with a strange function pointer representation, on which GDB | |
2737 | cannot build its own descriptors, and suppose further that `foo' | |
2738 | has no linker-built descriptor. The address->pointer conversion | |
2739 | will signal an error and prevent the command from running, even | |
2740 | though the next step would have been to convert the pointer | |
2741 | directly back into the same address. | |
2742 | ||
2743 | The following shortcut avoids this whole mess. If VAL is a | |
2744 | function, just return its address directly. */ | |
df407dfe AC |
2745 | if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC |
2746 | || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD) | |
42ae5230 | 2747 | return value_address (val); |
f312f057 | 2748 | |
994b9211 | 2749 | val = coerce_array (val); |
fc0c74b1 AC |
2750 | |
2751 | /* Some architectures (e.g. Harvard), map instruction and data | |
2752 | addresses onto a single large unified address space. For | |
2753 | instance: An architecture may consider a large integer in the | |
2754 | range 0x10000000 .. 0x1000ffff to already represent a data | |
2755 | addresses (hence not need a pointer to address conversion) while | |
2756 | a small integer would still need to be converted integer to | |
2757 | pointer to address. Just assume such architectures handle all | |
2758 | integer conversions in a single function. */ | |
2759 | ||
2760 | /* JimB writes: | |
2761 | ||
2762 | I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we | |
2763 | must admonish GDB hackers to make sure its behavior matches the | |
2764 | compiler's, whenever possible. | |
2765 | ||
2766 | In general, I think GDB should evaluate expressions the same way | |
2767 | the compiler does. When the user copies an expression out of | |
2768 | their source code and hands it to a `print' command, they should | |
2769 | get the same value the compiler would have computed. Any | |
2770 | deviation from this rule can cause major confusion and annoyance, | |
2771 | and needs to be justified carefully. In other words, GDB doesn't | |
2772 | really have the freedom to do these conversions in clever and | |
2773 | useful ways. | |
2774 | ||
2775 | AndrewC pointed out that users aren't complaining about how GDB | |
2776 | casts integers to pointers; they are complaining that they can't | |
2777 | take an address from a disassembly listing and give it to `x/i'. | |
2778 | This is certainly important. | |
2779 | ||
79dd2d24 | 2780 | Adding an architecture method like integer_to_address() certainly |
fc0c74b1 AC |
2781 | makes it possible for GDB to "get it right" in all circumstances |
2782 | --- the target has complete control over how things get done, so | |
2783 | people can Do The Right Thing for their target without breaking | |
2784 | anyone else. The standard doesn't specify how integers get | |
2785 | converted to pointers; usually, the ABI doesn't either, but | |
2786 | ABI-specific code is a more reasonable place to handle it. */ | |
2787 | ||
df407dfe AC |
2788 | if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR |
2789 | && TYPE_CODE (value_type (val)) != TYPE_CODE_REF | |
50810684 UW |
2790 | && gdbarch_integer_to_address_p (gdbarch)) |
2791 | return gdbarch_integer_to_address (gdbarch, value_type (val), | |
0fd88904 | 2792 | value_contents (val)); |
fc0c74b1 | 2793 | |
0fd88904 | 2794 | return unpack_long (value_type (val), value_contents (val)); |
c906108c SS |
2795 | #endif |
2796 | } | |
2797 | \f | |
2798 | /* Unpack raw data (copied from debugee, target byte order) at VALADDR | |
2799 | as a long, or as a double, assuming the raw data is described | |
2800 | by type TYPE. Knows how to convert different sizes of values | |
2801 | and can convert between fixed and floating point. We don't assume | |
2802 | any alignment for the raw data. Return value is in host byte order. | |
2803 | ||
2804 | If you want functions and arrays to be coerced to pointers, and | |
2805 | references to be dereferenced, call value_as_long() instead. | |
2806 | ||
2807 | C++: It is assumed that the front-end has taken care of | |
2808 | all matters concerning pointers to members. A pointer | |
2809 | to member which reaches here is considered to be equivalent | |
2810 | to an INT (or some size). After all, it is only an offset. */ | |
2811 | ||
2812 | LONGEST | |
fc1a4b47 | 2813 | unpack_long (struct type *type, const gdb_byte *valaddr) |
c906108c | 2814 | { |
e17a4113 | 2815 | enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); |
52f0bd74 AC |
2816 | enum type_code code = TYPE_CODE (type); |
2817 | int len = TYPE_LENGTH (type); | |
2818 | int nosign = TYPE_UNSIGNED (type); | |
c906108c | 2819 | |
c906108c SS |
2820 | switch (code) |
2821 | { | |
2822 | case TYPE_CODE_TYPEDEF: | |
2823 | return unpack_long (check_typedef (type), valaddr); | |
2824 | case TYPE_CODE_ENUM: | |
4f2aea11 | 2825 | case TYPE_CODE_FLAGS: |
c906108c SS |
2826 | case TYPE_CODE_BOOL: |
2827 | case TYPE_CODE_INT: | |
2828 | case TYPE_CODE_CHAR: | |
2829 | case TYPE_CODE_RANGE: | |
0d5de010 | 2830 | case TYPE_CODE_MEMBERPTR: |
c906108c | 2831 | if (nosign) |
e17a4113 | 2832 | return extract_unsigned_integer (valaddr, len, byte_order); |
c906108c | 2833 | else |
e17a4113 | 2834 | return extract_signed_integer (valaddr, len, byte_order); |
c906108c SS |
2835 | |
2836 | case TYPE_CODE_FLT: | |
96d2f608 | 2837 | return extract_typed_floating (valaddr, type); |
c906108c | 2838 | |
4ef30785 TJB |
2839 | case TYPE_CODE_DECFLOAT: |
2840 | /* libdecnumber has a function to convert from decimal to integer, but | |
2841 | it doesn't work when the decimal number has a fractional part. */ | |
e17a4113 | 2842 | return decimal_to_doublest (valaddr, len, byte_order); |
4ef30785 | 2843 | |
c906108c SS |
2844 | case TYPE_CODE_PTR: |
2845 | case TYPE_CODE_REF: | |
2846 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure | |
c5aa993b | 2847 | whether we want this to be true eventually. */ |
4478b372 | 2848 | return extract_typed_address (valaddr, type); |
c906108c | 2849 | |
c906108c | 2850 | default: |
8a3fe4f8 | 2851 | error (_("Value can't be converted to integer.")); |
c906108c | 2852 | } |
c5aa993b | 2853 | return 0; /* Placate lint. */ |
c906108c SS |
2854 | } |
2855 | ||
2856 | /* Return a double value from the specified type and address. | |
2857 | INVP points to an int which is set to 0 for valid value, | |
2858 | 1 for invalid value (bad float format). In either case, | |
2859 | the returned double is OK to use. Argument is in target | |
2860 | format, result is in host format. */ | |
2861 | ||
2862 | DOUBLEST | |
fc1a4b47 | 2863 | unpack_double (struct type *type, const gdb_byte *valaddr, int *invp) |
c906108c | 2864 | { |
e17a4113 | 2865 | enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); |
c906108c SS |
2866 | enum type_code code; |
2867 | int len; | |
2868 | int nosign; | |
2869 | ||
581e13c1 | 2870 | *invp = 0; /* Assume valid. */ |
f168693b | 2871 | type = check_typedef (type); |
c906108c SS |
2872 | code = TYPE_CODE (type); |
2873 | len = TYPE_LENGTH (type); | |
2874 | nosign = TYPE_UNSIGNED (type); | |
2875 | if (code == TYPE_CODE_FLT) | |
2876 | { | |
75bc7ddf AC |
2877 | /* NOTE: cagney/2002-02-19: There was a test here to see if the |
2878 | floating-point value was valid (using the macro | |
2879 | INVALID_FLOAT). That test/macro have been removed. | |
2880 | ||
2881 | It turns out that only the VAX defined this macro and then | |
2882 | only in a non-portable way. Fixing the portability problem | |
2883 | wouldn't help since the VAX floating-point code is also badly | |
2884 | bit-rotten. The target needs to add definitions for the | |
ea06eb3d | 2885 | methods gdbarch_float_format and gdbarch_double_format - these |
75bc7ddf AC |
2886 | exactly describe the target floating-point format. The |
2887 | problem here is that the corresponding floatformat_vax_f and | |
2888 | floatformat_vax_d values these methods should be set to are | |
2889 | also not defined either. Oops! | |
2890 | ||
2891 | Hopefully someone will add both the missing floatformat | |
ac79b88b DJ |
2892 | definitions and the new cases for floatformat_is_valid (). */ |
2893 | ||
2894 | if (!floatformat_is_valid (floatformat_from_type (type), valaddr)) | |
2895 | { | |
2896 | *invp = 1; | |
2897 | return 0.0; | |
2898 | } | |
2899 | ||
96d2f608 | 2900 | return extract_typed_floating (valaddr, type); |
c906108c | 2901 | } |
4ef30785 | 2902 | else if (code == TYPE_CODE_DECFLOAT) |
e17a4113 | 2903 | return decimal_to_doublest (valaddr, len, byte_order); |
c906108c SS |
2904 | else if (nosign) |
2905 | { | |
2906 | /* Unsigned -- be sure we compensate for signed LONGEST. */ | |
c906108c | 2907 | return (ULONGEST) unpack_long (type, valaddr); |
c906108c SS |
2908 | } |
2909 | else | |
2910 | { | |
2911 | /* Signed -- we are OK with unpack_long. */ | |
2912 | return unpack_long (type, valaddr); | |
2913 | } | |
2914 | } | |
2915 | ||
2916 | /* Unpack raw data (copied from debugee, target byte order) at VALADDR | |
2917 | as a CORE_ADDR, assuming the raw data is described by type TYPE. | |
2918 | We don't assume any alignment for the raw data. Return value is in | |
2919 | host byte order. | |
2920 | ||
2921 | If you want functions and arrays to be coerced to pointers, and | |
1aa20aa8 | 2922 | references to be dereferenced, call value_as_address() instead. |
c906108c SS |
2923 | |
2924 | C++: It is assumed that the front-end has taken care of | |
2925 | all matters concerning pointers to members. A pointer | |
2926 | to member which reaches here is considered to be equivalent | |
2927 | to an INT (or some size). After all, it is only an offset. */ | |
2928 | ||
2929 | CORE_ADDR | |
fc1a4b47 | 2930 | unpack_pointer (struct type *type, const gdb_byte *valaddr) |
c906108c SS |
2931 | { |
2932 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure | |
2933 | whether we want this to be true eventually. */ | |
2934 | return unpack_long (type, valaddr); | |
2935 | } | |
4478b372 | 2936 | |
c906108c | 2937 | \f |
1596cb5d | 2938 | /* Get the value of the FIELDNO'th field (which must be static) of |
686d4def | 2939 | TYPE. */ |
c906108c | 2940 | |
f23631e4 | 2941 | struct value * |
fba45db2 | 2942 | value_static_field (struct type *type, int fieldno) |
c906108c | 2943 | { |
948e66d9 DJ |
2944 | struct value *retval; |
2945 | ||
1596cb5d | 2946 | switch (TYPE_FIELD_LOC_KIND (type, fieldno)) |
c906108c | 2947 | { |
1596cb5d | 2948 | case FIELD_LOC_KIND_PHYSADDR: |
52e9fde8 SS |
2949 | retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno), |
2950 | TYPE_FIELD_STATIC_PHYSADDR (type, fieldno)); | |
1596cb5d DE |
2951 | break; |
2952 | case FIELD_LOC_KIND_PHYSNAME: | |
c906108c | 2953 | { |
ff355380 | 2954 | const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno); |
581e13c1 | 2955 | /* TYPE_FIELD_NAME (type, fieldno); */ |
d12307c1 | 2956 | struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0); |
94af9270 | 2957 | |
d12307c1 | 2958 | if (sym.symbol == NULL) |
c906108c | 2959 | { |
a109c7c1 | 2960 | /* With some compilers, e.g. HP aCC, static data members are |
581e13c1 | 2961 | reported as non-debuggable symbols. */ |
3b7344d5 TT |
2962 | struct bound_minimal_symbol msym |
2963 | = lookup_minimal_symbol (phys_name, NULL, NULL); | |
a109c7c1 | 2964 | |
3b7344d5 | 2965 | if (!msym.minsym) |
686d4def | 2966 | return allocate_optimized_out_value (type); |
c906108c | 2967 | else |
c5aa993b | 2968 | { |
52e9fde8 | 2969 | retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno), |
77e371c0 | 2970 | BMSYMBOL_VALUE_ADDRESS (msym)); |
c906108c SS |
2971 | } |
2972 | } | |
2973 | else | |
d12307c1 | 2974 | retval = value_of_variable (sym.symbol, sym.block); |
1596cb5d | 2975 | break; |
c906108c | 2976 | } |
1596cb5d | 2977 | default: |
f3574227 | 2978 | gdb_assert_not_reached ("unexpected field location kind"); |
1596cb5d DE |
2979 | } |
2980 | ||
948e66d9 | 2981 | return retval; |
c906108c SS |
2982 | } |
2983 | ||
4dfea560 DE |
2984 | /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE. |
2985 | You have to be careful here, since the size of the data area for the value | |
2986 | is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger | |
2987 | than the old enclosing type, you have to allocate more space for the | |
2988 | data. */ | |
2b127877 | 2989 | |
4dfea560 DE |
2990 | void |
2991 | set_value_enclosing_type (struct value *val, struct type *new_encl_type) | |
2b127877 | 2992 | { |
3e3d7139 JG |
2993 | if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val))) |
2994 | val->contents = | |
2995 | (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type)); | |
2996 | ||
2997 | val->enclosing_type = new_encl_type; | |
2b127877 DB |
2998 | } |
2999 | ||
c906108c SS |
3000 | /* Given a value ARG1 (offset by OFFSET bytes) |
3001 | of a struct or union type ARG_TYPE, | |
3002 | extract and return the value of one of its (non-static) fields. | |
581e13c1 | 3003 | FIELDNO says which field. */ |
c906108c | 3004 | |
f23631e4 AC |
3005 | struct value * |
3006 | value_primitive_field (struct value *arg1, int offset, | |
aa1ee363 | 3007 | int fieldno, struct type *arg_type) |
c906108c | 3008 | { |
f23631e4 | 3009 | struct value *v; |
52f0bd74 | 3010 | struct type *type; |
3ae385af SM |
3011 | struct gdbarch *arch = get_value_arch (arg1); |
3012 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
c906108c | 3013 | |
f168693b | 3014 | arg_type = check_typedef (arg_type); |
c906108c | 3015 | type = TYPE_FIELD_TYPE (arg_type, fieldno); |
c54eabfa JK |
3016 | |
3017 | /* Call check_typedef on our type to make sure that, if TYPE | |
3018 | is a TYPE_CODE_TYPEDEF, its length is set to the length | |
3019 | of the target type instead of zero. However, we do not | |
3020 | replace the typedef type by the target type, because we want | |
3021 | to keep the typedef in order to be able to print the type | |
3022 | description correctly. */ | |
3023 | check_typedef (type); | |
c906108c | 3024 | |
691a26f5 | 3025 | if (TYPE_FIELD_BITSIZE (arg_type, fieldno)) |
c906108c | 3026 | { |
22c05d8a JK |
3027 | /* Handle packed fields. |
3028 | ||
3029 | Create a new value for the bitfield, with bitpos and bitsize | |
4ea48cc1 DJ |
3030 | set. If possible, arrange offset and bitpos so that we can |
3031 | do a single aligned read of the size of the containing type. | |
3032 | Otherwise, adjust offset to the byte containing the first | |
3033 | bit. Assume that the address, offset, and embedded offset | |
3034 | are sufficiently aligned. */ | |
22c05d8a | 3035 | |
4ea48cc1 DJ |
3036 | int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno); |
3037 | int container_bitsize = TYPE_LENGTH (type) * 8; | |
3038 | ||
9a0dc9e3 PA |
3039 | v = allocate_value_lazy (type); |
3040 | v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno); | |
3041 | if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize | |
3042 | && TYPE_LENGTH (type) <= (int) sizeof (LONGEST)) | |
3043 | v->bitpos = bitpos % container_bitsize; | |
4ea48cc1 | 3044 | else |
9a0dc9e3 PA |
3045 | v->bitpos = bitpos % 8; |
3046 | v->offset = (value_embedded_offset (arg1) | |
3047 | + offset | |
3048 | + (bitpos - v->bitpos) / 8); | |
3049 | set_value_parent (v, arg1); | |
3050 | if (!value_lazy (arg1)) | |
3051 | value_fetch_lazy (v); | |
c906108c SS |
3052 | } |
3053 | else if (fieldno < TYPE_N_BASECLASSES (arg_type)) | |
3054 | { | |
3055 | /* This field is actually a base subobject, so preserve the | |
39d37385 PA |
3056 | entire object's contents for later references to virtual |
3057 | bases, etc. */ | |
be335936 | 3058 | int boffset; |
a4e2ee12 DJ |
3059 | |
3060 | /* Lazy register values with offsets are not supported. */ | |
3061 | if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) | |
3062 | value_fetch_lazy (arg1); | |
3063 | ||
9a0dc9e3 PA |
3064 | /* We special case virtual inheritance here because this |
3065 | requires access to the contents, which we would rather avoid | |
3066 | for references to ordinary fields of unavailable values. */ | |
3067 | if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno)) | |
3068 | boffset = baseclass_offset (arg_type, fieldno, | |
3069 | value_contents (arg1), | |
3070 | value_embedded_offset (arg1), | |
3071 | value_address (arg1), | |
3072 | arg1); | |
c906108c | 3073 | else |
9a0dc9e3 | 3074 | boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8; |
691a26f5 | 3075 | |
9a0dc9e3 PA |
3076 | if (value_lazy (arg1)) |
3077 | v = allocate_value_lazy (value_enclosing_type (arg1)); | |
3078 | else | |
3079 | { | |
3080 | v = allocate_value (value_enclosing_type (arg1)); | |
3081 | value_contents_copy_raw (v, 0, arg1, 0, | |
3082 | TYPE_LENGTH (value_enclosing_type (arg1))); | |
3e3d7139 | 3083 | } |
9a0dc9e3 PA |
3084 | v->type = type; |
3085 | v->offset = value_offset (arg1); | |
3086 | v->embedded_offset = offset + value_embedded_offset (arg1) + boffset; | |
c906108c SS |
3087 | } |
3088 | else | |
3089 | { | |
3090 | /* Plain old data member */ | |
3ae385af SM |
3091 | offset += (TYPE_FIELD_BITPOS (arg_type, fieldno) |
3092 | / (HOST_CHAR_BIT * unit_size)); | |
a4e2ee12 DJ |
3093 | |
3094 | /* Lazy register values with offsets are not supported. */ | |
3095 | if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) | |
3096 | value_fetch_lazy (arg1); | |
3097 | ||
9a0dc9e3 | 3098 | if (value_lazy (arg1)) |
3e3d7139 | 3099 | v = allocate_value_lazy (type); |
c906108c | 3100 | else |
3e3d7139 JG |
3101 | { |
3102 | v = allocate_value (type); | |
39d37385 PA |
3103 | value_contents_copy_raw (v, value_embedded_offset (v), |
3104 | arg1, value_embedded_offset (arg1) + offset, | |
3ae385af | 3105 | type_length_units (type)); |
3e3d7139 | 3106 | } |
df407dfe | 3107 | v->offset = (value_offset (arg1) + offset |
13c3b5f5 | 3108 | + value_embedded_offset (arg1)); |
c906108c | 3109 | } |
74bcbdf3 | 3110 | set_value_component_location (v, arg1); |
9ee8fc9d | 3111 | VALUE_REGNUM (v) = VALUE_REGNUM (arg1); |
0c16dd26 | 3112 | VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1); |
c906108c SS |
3113 | return v; |
3114 | } | |
3115 | ||
3116 | /* Given a value ARG1 of a struct or union type, | |
3117 | extract and return the value of one of its (non-static) fields. | |
581e13c1 | 3118 | FIELDNO says which field. */ |
c906108c | 3119 | |
f23631e4 | 3120 | struct value * |
aa1ee363 | 3121 | value_field (struct value *arg1, int fieldno) |
c906108c | 3122 | { |
df407dfe | 3123 | return value_primitive_field (arg1, 0, fieldno, value_type (arg1)); |
c906108c SS |
3124 | } |
3125 | ||
3126 | /* Return a non-virtual function as a value. | |
3127 | F is the list of member functions which contains the desired method. | |
0478d61c FF |
3128 | J is an index into F which provides the desired method. |
3129 | ||
3130 | We only use the symbol for its address, so be happy with either a | |
581e13c1 | 3131 | full symbol or a minimal symbol. */ |
c906108c | 3132 | |
f23631e4 | 3133 | struct value * |
3e43a32a MS |
3134 | value_fn_field (struct value **arg1p, struct fn_field *f, |
3135 | int j, struct type *type, | |
fba45db2 | 3136 | int offset) |
c906108c | 3137 | { |
f23631e4 | 3138 | struct value *v; |
52f0bd74 | 3139 | struct type *ftype = TYPE_FN_FIELD_TYPE (f, j); |
1d06ead6 | 3140 | const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j); |
c906108c | 3141 | struct symbol *sym; |
7c7b6655 | 3142 | struct bound_minimal_symbol msym; |
c906108c | 3143 | |
d12307c1 | 3144 | sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol; |
5ae326fa | 3145 | if (sym != NULL) |
0478d61c | 3146 | { |
7c7b6655 | 3147 | memset (&msym, 0, sizeof (msym)); |
5ae326fa AC |
3148 | } |
3149 | else | |
3150 | { | |
3151 | gdb_assert (sym == NULL); | |
7c7b6655 TT |
3152 | msym = lookup_bound_minimal_symbol (physname); |
3153 | if (msym.minsym == NULL) | |
5ae326fa | 3154 | return NULL; |
0478d61c FF |
3155 | } |
3156 | ||
c906108c | 3157 | v = allocate_value (ftype); |
0478d61c FF |
3158 | if (sym) |
3159 | { | |
42ae5230 | 3160 | set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym))); |
0478d61c FF |
3161 | } |
3162 | else | |
3163 | { | |
bccdca4a UW |
3164 | /* The minimal symbol might point to a function descriptor; |
3165 | resolve it to the actual code address instead. */ | |
7c7b6655 | 3166 | struct objfile *objfile = msym.objfile; |
bccdca4a UW |
3167 | struct gdbarch *gdbarch = get_objfile_arch (objfile); |
3168 | ||
42ae5230 TT |
3169 | set_value_address (v, |
3170 | gdbarch_convert_from_func_ptr_addr | |
77e371c0 | 3171 | (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), ¤t_target)); |
0478d61c | 3172 | } |
c906108c SS |
3173 | |
3174 | if (arg1p) | |
c5aa993b | 3175 | { |
df407dfe | 3176 | if (type != value_type (*arg1p)) |
c5aa993b JM |
3177 | *arg1p = value_ind (value_cast (lookup_pointer_type (type), |
3178 | value_addr (*arg1p))); | |
3179 | ||
070ad9f0 | 3180 | /* Move the `this' pointer according to the offset. |
581e13c1 | 3181 | VALUE_OFFSET (*arg1p) += offset; */ |
c906108c SS |
3182 | } |
3183 | ||
3184 | return v; | |
3185 | } | |
3186 | ||
c906108c | 3187 | \f |
c906108c | 3188 | |
4875ffdb PA |
3189 | /* Unpack a bitfield of the specified FIELD_TYPE, from the object at |
3190 | VALADDR, and store the result in *RESULT. | |
3191 | The bitfield starts at BITPOS bits and contains BITSIZE bits. | |
c906108c | 3192 | |
4875ffdb PA |
3193 | Extracting bits depends on endianness of the machine. Compute the |
3194 | number of least significant bits to discard. For big endian machines, | |
3195 | we compute the total number of bits in the anonymous object, subtract | |
3196 | off the bit count from the MSB of the object to the MSB of the | |
3197 | bitfield, then the size of the bitfield, which leaves the LSB discard | |
3198 | count. For little endian machines, the discard count is simply the | |
3199 | number of bits from the LSB of the anonymous object to the LSB of the | |
3200 | bitfield. | |
3201 | ||
3202 | If the field is signed, we also do sign extension. */ | |
3203 | ||
3204 | static LONGEST | |
3205 | unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, | |
3206 | int bitpos, int bitsize) | |
c906108c | 3207 | { |
4ea48cc1 | 3208 | enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type)); |
c906108c SS |
3209 | ULONGEST val; |
3210 | ULONGEST valmask; | |
c906108c | 3211 | int lsbcount; |
4a76eae5 | 3212 | int bytes_read; |
5467c6c8 | 3213 | int read_offset; |
c906108c | 3214 | |
4a76eae5 DJ |
3215 | /* Read the minimum number of bytes required; there may not be |
3216 | enough bytes to read an entire ULONGEST. */ | |
f168693b | 3217 | field_type = check_typedef (field_type); |
4a76eae5 DJ |
3218 | if (bitsize) |
3219 | bytes_read = ((bitpos % 8) + bitsize + 7) / 8; | |
3220 | else | |
3221 | bytes_read = TYPE_LENGTH (field_type); | |
3222 | ||
5467c6c8 PA |
3223 | read_offset = bitpos / 8; |
3224 | ||
4875ffdb | 3225 | val = extract_unsigned_integer (valaddr + read_offset, |
4a76eae5 | 3226 | bytes_read, byte_order); |
c906108c | 3227 | |
581e13c1 | 3228 | /* Extract bits. See comment above. */ |
c906108c | 3229 | |
4ea48cc1 | 3230 | if (gdbarch_bits_big_endian (get_type_arch (field_type))) |
4a76eae5 | 3231 | lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize); |
c906108c SS |
3232 | else |
3233 | lsbcount = (bitpos % 8); | |
3234 | val >>= lsbcount; | |
3235 | ||
3236 | /* If the field does not entirely fill a LONGEST, then zero the sign bits. | |
581e13c1 | 3237 | If the field is signed, and is negative, then sign extend. */ |
c906108c SS |
3238 | |
3239 | if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val))) | |
3240 | { | |
3241 | valmask = (((ULONGEST) 1) << bitsize) - 1; | |
3242 | val &= valmask; | |
3243 | if (!TYPE_UNSIGNED (field_type)) | |
3244 | { | |
3245 | if (val & (valmask ^ (valmask >> 1))) | |
3246 | { | |
3247 | val |= ~valmask; | |
3248 | } | |
3249 | } | |
3250 | } | |
5467c6c8 | 3251 | |
4875ffdb | 3252 | return val; |
5467c6c8 PA |
3253 | } |
3254 | ||
3255 | /* Unpack a field FIELDNO of the specified TYPE, from the object at | |
3256 | VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of | |
3257 | ORIGINAL_VALUE, which must not be NULL. See | |
3258 | unpack_value_bits_as_long for more details. */ | |
3259 | ||
3260 | int | |
3261 | unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr, | |
3262 | int embedded_offset, int fieldno, | |
3263 | const struct value *val, LONGEST *result) | |
3264 | { | |
4875ffdb PA |
3265 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
3266 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); | |
3267 | struct type *field_type = TYPE_FIELD_TYPE (type, fieldno); | |
3268 | int bit_offset; | |
3269 | ||
5467c6c8 PA |
3270 | gdb_assert (val != NULL); |
3271 | ||
4875ffdb PA |
3272 | bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; |
3273 | if (value_bits_any_optimized_out (val, bit_offset, bitsize) | |
3274 | || !value_bits_available (val, bit_offset, bitsize)) | |
3275 | return 0; | |
3276 | ||
3277 | *result = unpack_bits_as_long (field_type, valaddr + embedded_offset, | |
3278 | bitpos, bitsize); | |
3279 | return 1; | |
5467c6c8 PA |
3280 | } |
3281 | ||
3282 | /* Unpack a field FIELDNO of the specified TYPE, from the anonymous | |
4875ffdb | 3283 | object at VALADDR. See unpack_bits_as_long for more details. */ |
5467c6c8 PA |
3284 | |
3285 | LONGEST | |
3286 | unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) | |
3287 | { | |
4875ffdb PA |
3288 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
3289 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); | |
3290 | struct type *field_type = TYPE_FIELD_TYPE (type, fieldno); | |
5467c6c8 | 3291 | |
4875ffdb PA |
3292 | return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize); |
3293 | } | |
3294 | ||
3295 | /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at | |
3296 | VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store | |
3297 | the contents in DEST_VAL, zero or sign extending if the type of | |
3298 | DEST_VAL is wider than BITSIZE. VALADDR points to the contents of | |
3299 | VAL. If the VAL's contents required to extract the bitfield from | |
3300 | are unavailable/optimized out, DEST_VAL is correspondingly | |
3301 | marked unavailable/optimized out. */ | |
3302 | ||
bb9d5f81 | 3303 | void |
4875ffdb PA |
3304 | unpack_value_bitfield (struct value *dest_val, |
3305 | int bitpos, int bitsize, | |
3306 | const gdb_byte *valaddr, int embedded_offset, | |
3307 | const struct value *val) | |
3308 | { | |
3309 | enum bfd_endian byte_order; | |
3310 | int src_bit_offset; | |
3311 | int dst_bit_offset; | |
3312 | LONGEST num; | |
3313 | struct type *field_type = value_type (dest_val); | |
3314 | ||
3315 | /* First, unpack and sign extend the bitfield as if it was wholly | |
3316 | available. Invalid/unavailable bits are read as zero, but that's | |
3317 | OK, as they'll end up marked below. */ | |
3318 | byte_order = gdbarch_byte_order (get_type_arch (field_type)); | |
3319 | num = unpack_bits_as_long (field_type, valaddr + embedded_offset, | |
3320 | bitpos, bitsize); | |
3321 | store_signed_integer (value_contents_raw (dest_val), | |
3322 | TYPE_LENGTH (field_type), byte_order, num); | |
3323 | ||
3324 | /* Now copy the optimized out / unavailability ranges to the right | |
3325 | bits. */ | |
3326 | src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; | |
3327 | if (byte_order == BFD_ENDIAN_BIG) | |
3328 | dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize; | |
3329 | else | |
3330 | dst_bit_offset = 0; | |
3331 | value_ranges_copy_adjusted (dest_val, dst_bit_offset, | |
3332 | val, src_bit_offset, bitsize); | |
5467c6c8 PA |
3333 | } |
3334 | ||
3335 | /* Return a new value with type TYPE, which is FIELDNO field of the | |
3336 | object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents | |
3337 | of VAL. If the VAL's contents required to extract the bitfield | |
4875ffdb PA |
3338 | from are unavailable/optimized out, the new value is |
3339 | correspondingly marked unavailable/optimized out. */ | |
5467c6c8 PA |
3340 | |
3341 | struct value * | |
3342 | value_field_bitfield (struct type *type, int fieldno, | |
3343 | const gdb_byte *valaddr, | |
3344 | int embedded_offset, const struct value *val) | |
3345 | { | |
4875ffdb PA |
3346 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
3347 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); | |
3348 | struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno)); | |
5467c6c8 | 3349 | |
4875ffdb PA |
3350 | unpack_value_bitfield (res_val, bitpos, bitsize, |
3351 | valaddr, embedded_offset, val); | |
3352 | ||
3353 | return res_val; | |
4ea48cc1 DJ |
3354 | } |
3355 | ||
c906108c SS |
3356 | /* Modify the value of a bitfield. ADDR points to a block of memory in |
3357 | target byte order; the bitfield starts in the byte pointed to. FIELDVAL | |
3358 | is the desired value of the field, in host byte order. BITPOS and BITSIZE | |
581e13c1 | 3359 | indicate which bits (in target bit order) comprise the bitfield. |
19f220c3 | 3360 | Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and |
f4e88c8e | 3361 | 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */ |
c906108c SS |
3362 | |
3363 | void | |
50810684 UW |
3364 | modify_field (struct type *type, gdb_byte *addr, |
3365 | LONGEST fieldval, int bitpos, int bitsize) | |
c906108c | 3366 | { |
e17a4113 | 3367 | enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); |
f4e88c8e PH |
3368 | ULONGEST oword; |
3369 | ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize); | |
19f220c3 JK |
3370 | int bytesize; |
3371 | ||
3372 | /* Normalize BITPOS. */ | |
3373 | addr += bitpos / 8; | |
3374 | bitpos %= 8; | |
c906108c SS |
3375 | |
3376 | /* If a negative fieldval fits in the field in question, chop | |
3377 | off the sign extension bits. */ | |
f4e88c8e PH |
3378 | if ((~fieldval & ~(mask >> 1)) == 0) |
3379 | fieldval &= mask; | |
c906108c SS |
3380 | |
3381 | /* Warn if value is too big to fit in the field in question. */ | |
f4e88c8e | 3382 | if (0 != (fieldval & ~mask)) |
c906108c SS |
3383 | { |
3384 | /* FIXME: would like to include fieldval in the message, but | |
c5aa993b | 3385 | we don't have a sprintf_longest. */ |
8a3fe4f8 | 3386 | warning (_("Value does not fit in %d bits."), bitsize); |
c906108c SS |
3387 | |
3388 | /* Truncate it, otherwise adjoining fields may be corrupted. */ | |
f4e88c8e | 3389 | fieldval &= mask; |
c906108c SS |
3390 | } |
3391 | ||
19f220c3 JK |
3392 | /* Ensure no bytes outside of the modified ones get accessed as it may cause |
3393 | false valgrind reports. */ | |
3394 | ||
3395 | bytesize = (bitpos + bitsize + 7) / 8; | |
3396 | oword = extract_unsigned_integer (addr, bytesize, byte_order); | |
c906108c SS |
3397 | |
3398 | /* Shifting for bit field depends on endianness of the target machine. */ | |
50810684 | 3399 | if (gdbarch_bits_big_endian (get_type_arch (type))) |
19f220c3 | 3400 | bitpos = bytesize * 8 - bitpos - bitsize; |
c906108c | 3401 | |
f4e88c8e | 3402 | oword &= ~(mask << bitpos); |
c906108c SS |
3403 | oword |= fieldval << bitpos; |
3404 | ||
19f220c3 | 3405 | store_unsigned_integer (addr, bytesize, byte_order, oword); |
c906108c SS |
3406 | } |
3407 | \f | |
14d06750 | 3408 | /* Pack NUM into BUF using a target format of TYPE. */ |
c906108c | 3409 | |
14d06750 DJ |
3410 | void |
3411 | pack_long (gdb_byte *buf, struct type *type, LONGEST num) | |
c906108c | 3412 | { |
e17a4113 | 3413 | enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); |
52f0bd74 | 3414 | int len; |
14d06750 DJ |
3415 | |
3416 | type = check_typedef (type); | |
c906108c SS |
3417 | len = TYPE_LENGTH (type); |
3418 | ||
14d06750 | 3419 | switch (TYPE_CODE (type)) |
c906108c | 3420 | { |
c906108c SS |
3421 | case TYPE_CODE_INT: |
3422 | case TYPE_CODE_CHAR: | |
3423 | case TYPE_CODE_ENUM: | |
4f2aea11 | 3424 | case TYPE_CODE_FLAGS: |
c906108c SS |
3425 | case TYPE_CODE_BOOL: |
3426 | case TYPE_CODE_RANGE: | |
0d5de010 | 3427 | case TYPE_CODE_MEMBERPTR: |
e17a4113 | 3428 | store_signed_integer (buf, len, byte_order, num); |
c906108c | 3429 | break; |
c5aa993b | 3430 | |
c906108c SS |
3431 | case TYPE_CODE_REF: |
3432 | case TYPE_CODE_PTR: | |
14d06750 | 3433 | store_typed_address (buf, type, (CORE_ADDR) num); |
c906108c | 3434 | break; |
c5aa993b | 3435 | |
c906108c | 3436 | default: |
14d06750 DJ |
3437 | error (_("Unexpected type (%d) encountered for integer constant."), |
3438 | TYPE_CODE (type)); | |
c906108c | 3439 | } |
14d06750 DJ |
3440 | } |
3441 | ||
3442 | ||
595939de PM |
3443 | /* Pack NUM into BUF using a target format of TYPE. */ |
3444 | ||
70221824 | 3445 | static void |
595939de PM |
3446 | pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) |
3447 | { | |
3448 | int len; | |
3449 | enum bfd_endian byte_order; | |
3450 | ||
3451 | type = check_typedef (type); | |
3452 | len = TYPE_LENGTH (type); | |
3453 | byte_order = gdbarch_byte_order (get_type_arch (type)); | |
3454 | ||
3455 | switch (TYPE_CODE (type)) | |
3456 | { | |
3457 | case TYPE_CODE_INT: | |
3458 | case TYPE_CODE_CHAR: | |
3459 | case TYPE_CODE_ENUM: | |
3460 | case TYPE_CODE_FLAGS: | |
3461 | case TYPE_CODE_BOOL: | |
3462 | case TYPE_CODE_RANGE: | |
3463 | case TYPE_CODE_MEMBERPTR: | |
3464 | store_unsigned_integer (buf, len, byte_order, num); | |
3465 | break; | |
3466 | ||
3467 | case TYPE_CODE_REF: | |
3468 | case TYPE_CODE_PTR: | |
3469 | store_typed_address (buf, type, (CORE_ADDR) num); | |
3470 | break; | |
3471 | ||
3472 | default: | |
3e43a32a MS |
3473 | error (_("Unexpected type (%d) encountered " |
3474 | "for unsigned integer constant."), | |
595939de PM |
3475 | TYPE_CODE (type)); |
3476 | } | |
3477 | } | |
3478 | ||
3479 | ||
14d06750 DJ |
3480 | /* Convert C numbers into newly allocated values. */ |
3481 | ||
3482 | struct value * | |
3483 | value_from_longest (struct type *type, LONGEST num) | |
3484 | { | |
3485 | struct value *val = allocate_value (type); | |
3486 | ||
3487 | pack_long (value_contents_raw (val), type, num); | |
c906108c SS |
3488 | return val; |
3489 | } | |
3490 | ||
4478b372 | 3491 | |
595939de PM |
3492 | /* Convert C unsigned numbers into newly allocated values. */ |
3493 | ||
3494 | struct value * | |
3495 | value_from_ulongest (struct type *type, ULONGEST num) | |
3496 | { | |
3497 | struct value *val = allocate_value (type); | |
3498 | ||
3499 | pack_unsigned_long (value_contents_raw (val), type, num); | |
3500 | ||
3501 | return val; | |
3502 | } | |
3503 | ||
3504 | ||
4478b372 | 3505 | /* Create a value representing a pointer of type TYPE to the address |
cb417230 | 3506 | ADDR. */ |
80180f79 | 3507 | |
f23631e4 | 3508 | struct value * |
4478b372 JB |
3509 | value_from_pointer (struct type *type, CORE_ADDR addr) |
3510 | { | |
cb417230 | 3511 | struct value *val = allocate_value (type); |
a109c7c1 | 3512 | |
80180f79 | 3513 | store_typed_address (value_contents_raw (val), |
cb417230 | 3514 | check_typedef (type), addr); |
4478b372 JB |
3515 | return val; |
3516 | } | |
3517 | ||
3518 | ||
012370f6 TT |
3519 | /* Create a value of type TYPE whose contents come from VALADDR, if it |
3520 | is non-null, and whose memory address (in the inferior) is | |
3521 | ADDRESS. The type of the created value may differ from the passed | |
3522 | type TYPE. Make sure to retrieve values new type after this call. | |
3523 | Note that TYPE is not passed through resolve_dynamic_type; this is | |
3524 | a special API intended for use only by Ada. */ | |
3525 | ||
3526 | struct value * | |
3527 | value_from_contents_and_address_unresolved (struct type *type, | |
3528 | const gdb_byte *valaddr, | |
3529 | CORE_ADDR address) | |
3530 | { | |
3531 | struct value *v; | |
3532 | ||
3533 | if (valaddr == NULL) | |
3534 | v = allocate_value_lazy (type); | |
3535 | else | |
3536 | v = value_from_contents (type, valaddr); | |
3537 | set_value_address (v, address); | |
3538 | VALUE_LVAL (v) = lval_memory; | |
3539 | return v; | |
3540 | } | |
3541 | ||
8acb6b92 TT |
3542 | /* Create a value of type TYPE whose contents come from VALADDR, if it |
3543 | is non-null, and whose memory address (in the inferior) is | |
80180f79 SA |
3544 | ADDRESS. The type of the created value may differ from the passed |
3545 | type TYPE. Make sure to retrieve values new type after this call. */ | |
8acb6b92 TT |
3546 | |
3547 | struct value * | |
3548 | value_from_contents_and_address (struct type *type, | |
3549 | const gdb_byte *valaddr, | |
3550 | CORE_ADDR address) | |
3551 | { | |
c3345124 | 3552 | struct type *resolved_type = resolve_dynamic_type (type, valaddr, address); |
d36430db | 3553 | struct type *resolved_type_no_typedef = check_typedef (resolved_type); |
41e8491f | 3554 | struct value *v; |
a109c7c1 | 3555 | |
8acb6b92 | 3556 | if (valaddr == NULL) |
80180f79 | 3557 | v = allocate_value_lazy (resolved_type); |
8acb6b92 | 3558 | else |
80180f79 | 3559 | v = value_from_contents (resolved_type, valaddr); |
d36430db JB |
3560 | if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL |
3561 | && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST) | |
3562 | address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef); | |
42ae5230 | 3563 | set_value_address (v, address); |
33d502b4 | 3564 | VALUE_LVAL (v) = lval_memory; |
8acb6b92 TT |
3565 | return v; |
3566 | } | |
3567 | ||
8a9b8146 TT |
3568 | /* Create a value of type TYPE holding the contents CONTENTS. |
3569 | The new value is `not_lval'. */ | |
3570 | ||
3571 | struct value * | |
3572 | value_from_contents (struct type *type, const gdb_byte *contents) | |
3573 | { | |
3574 | struct value *result; | |
3575 | ||
3576 | result = allocate_value (type); | |
3577 | memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type)); | |
3578 | return result; | |
3579 | } | |
3580 | ||
f23631e4 | 3581 | struct value * |
fba45db2 | 3582 | value_from_double (struct type *type, DOUBLEST num) |
c906108c | 3583 | { |
f23631e4 | 3584 | struct value *val = allocate_value (type); |
c906108c | 3585 | struct type *base_type = check_typedef (type); |
52f0bd74 | 3586 | enum type_code code = TYPE_CODE (base_type); |
c906108c SS |
3587 | |
3588 | if (code == TYPE_CODE_FLT) | |
3589 | { | |
990a07ab | 3590 | store_typed_floating (value_contents_raw (val), base_type, num); |
c906108c SS |
3591 | } |
3592 | else | |
8a3fe4f8 | 3593 | error (_("Unexpected type encountered for floating constant.")); |
c906108c SS |
3594 | |
3595 | return val; | |
3596 | } | |
994b9211 | 3597 | |
27bc4d80 | 3598 | struct value * |
4ef30785 | 3599 | value_from_decfloat (struct type *type, const gdb_byte *dec) |
27bc4d80 TJB |
3600 | { |
3601 | struct value *val = allocate_value (type); | |
27bc4d80 | 3602 | |
4ef30785 | 3603 | memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type)); |
27bc4d80 TJB |
3604 | return val; |
3605 | } | |
3606 | ||
3bd0f5ef MS |
3607 | /* Extract a value from the history file. Input will be of the form |
3608 | $digits or $$digits. See block comment above 'write_dollar_variable' | |
3609 | for details. */ | |
3610 | ||
3611 | struct value * | |
e799154c | 3612 | value_from_history_ref (const char *h, const char **endp) |
3bd0f5ef MS |
3613 | { |
3614 | int index, len; | |
3615 | ||
3616 | if (h[0] == '$') | |
3617 | len = 1; | |
3618 | else | |
3619 | return NULL; | |
3620 | ||
3621 | if (h[1] == '$') | |
3622 | len = 2; | |
3623 | ||
3624 | /* Find length of numeral string. */ | |
3625 | for (; isdigit (h[len]); len++) | |
3626 | ; | |
3627 | ||
3628 | /* Make sure numeral string is not part of an identifier. */ | |
3629 | if (h[len] == '_' || isalpha (h[len])) | |
3630 | return NULL; | |
3631 | ||
3632 | /* Now collect the index value. */ | |
3633 | if (h[1] == '$') | |
3634 | { | |
3635 | if (len == 2) | |
3636 | { | |
3637 | /* For some bizarre reason, "$$" is equivalent to "$$1", | |
3638 | rather than to "$$0" as it ought to be! */ | |
3639 | index = -1; | |
3640 | *endp += len; | |
3641 | } | |
3642 | else | |
e799154c TT |
3643 | { |
3644 | char *local_end; | |
3645 | ||
3646 | index = -strtol (&h[2], &local_end, 10); | |
3647 | *endp = local_end; | |
3648 | } | |
3bd0f5ef MS |
3649 | } |
3650 | else | |
3651 | { | |
3652 | if (len == 1) | |
3653 | { | |
3654 | /* "$" is equivalent to "$0". */ | |
3655 | index = 0; | |
3656 | *endp += len; | |
3657 | } | |
3658 | else | |
e799154c TT |
3659 | { |
3660 | char *local_end; | |
3661 | ||
3662 | index = strtol (&h[1], &local_end, 10); | |
3663 | *endp = local_end; | |
3664 | } | |
3bd0f5ef MS |
3665 | } |
3666 | ||
3667 | return access_value_history (index); | |
3668 | } | |
3669 | ||
a471c594 JK |
3670 | struct value * |
3671 | coerce_ref_if_computed (const struct value *arg) | |
3672 | { | |
3673 | const struct lval_funcs *funcs; | |
3674 | ||
3675 | if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF) | |
3676 | return NULL; | |
3677 | ||
3678 | if (value_lval_const (arg) != lval_computed) | |
3679 | return NULL; | |
3680 | ||
3681 | funcs = value_computed_funcs (arg); | |
3682 | if (funcs->coerce_ref == NULL) | |
3683 | return NULL; | |
3684 | ||
3685 | return funcs->coerce_ref (arg); | |
3686 | } | |
3687 | ||
dfcee124 AG |
3688 | /* Look at value.h for description. */ |
3689 | ||
3690 | struct value * | |
3691 | readjust_indirect_value_type (struct value *value, struct type *enc_type, | |
3692 | struct type *original_type, | |
3693 | struct value *original_value) | |
3694 | { | |
3695 | /* Re-adjust type. */ | |
3696 | deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type)); | |
3697 | ||
3698 | /* Add embedding info. */ | |
3699 | set_value_enclosing_type (value, enc_type); | |
3700 | set_value_embedded_offset (value, value_pointed_to_offset (original_value)); | |
3701 | ||
3702 | /* We may be pointing to an object of some derived type. */ | |
3703 | return value_full_object (value, NULL, 0, 0, 0); | |
3704 | } | |
3705 | ||
994b9211 AC |
3706 | struct value * |
3707 | coerce_ref (struct value *arg) | |
3708 | { | |
df407dfe | 3709 | struct type *value_type_arg_tmp = check_typedef (value_type (arg)); |
a471c594 | 3710 | struct value *retval; |
dfcee124 | 3711 | struct type *enc_type; |
a109c7c1 | 3712 | |
a471c594 JK |
3713 | retval = coerce_ref_if_computed (arg); |
3714 | if (retval) | |
3715 | return retval; | |
3716 | ||
3717 | if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF) | |
3718 | return arg; | |
3719 | ||
dfcee124 AG |
3720 | enc_type = check_typedef (value_enclosing_type (arg)); |
3721 | enc_type = TYPE_TARGET_TYPE (enc_type); | |
3722 | ||
3723 | retval = value_at_lazy (enc_type, | |
3724 | unpack_pointer (value_type (arg), | |
3725 | value_contents (arg))); | |
9f1f738a | 3726 | enc_type = value_type (retval); |
dfcee124 AG |
3727 | return readjust_indirect_value_type (retval, enc_type, |
3728 | value_type_arg_tmp, arg); | |
994b9211 AC |
3729 | } |
3730 | ||
3731 | struct value * | |
3732 | coerce_array (struct value *arg) | |
3733 | { | |
f3134b88 TT |
3734 | struct type *type; |
3735 | ||
994b9211 | 3736 | arg = coerce_ref (arg); |
f3134b88 TT |
3737 | type = check_typedef (value_type (arg)); |
3738 | ||
3739 | switch (TYPE_CODE (type)) | |
3740 | { | |
3741 | case TYPE_CODE_ARRAY: | |
7346b668 | 3742 | if (!TYPE_VECTOR (type) && current_language->c_style_arrays) |
f3134b88 TT |
3743 | arg = value_coerce_array (arg); |
3744 | break; | |
3745 | case TYPE_CODE_FUNC: | |
3746 | arg = value_coerce_function (arg); | |
3747 | break; | |
3748 | } | |
994b9211 AC |
3749 | return arg; |
3750 | } | |
c906108c | 3751 | \f |
c906108c | 3752 | |
bbfdfe1c DM |
3753 | /* Return the return value convention that will be used for the |
3754 | specified type. */ | |
3755 | ||
3756 | enum return_value_convention | |
3757 | struct_return_convention (struct gdbarch *gdbarch, | |
3758 | struct value *function, struct type *value_type) | |
3759 | { | |
3760 | enum type_code code = TYPE_CODE (value_type); | |
3761 | ||
3762 | if (code == TYPE_CODE_ERROR) | |
3763 | error (_("Function return type unknown.")); | |
3764 | ||
3765 | /* Probe the architecture for the return-value convention. */ | |
3766 | return gdbarch_return_value (gdbarch, function, value_type, | |
3767 | NULL, NULL, NULL); | |
3768 | } | |
3769 | ||
48436ce6 AC |
3770 | /* Return true if the function returning the specified type is using |
3771 | the convention of returning structures in memory (passing in the | |
82585c72 | 3772 | address as a hidden first parameter). */ |
c906108c SS |
3773 | |
3774 | int | |
d80b854b | 3775 | using_struct_return (struct gdbarch *gdbarch, |
6a3a010b | 3776 | struct value *function, struct type *value_type) |
c906108c | 3777 | { |
bbfdfe1c | 3778 | if (TYPE_CODE (value_type) == TYPE_CODE_VOID) |
667e784f | 3779 | /* A void return value is never in memory. See also corresponding |
44e5158b | 3780 | code in "print_return_value". */ |
667e784f AC |
3781 | return 0; |
3782 | ||
bbfdfe1c | 3783 | return (struct_return_convention (gdbarch, function, value_type) |
31db7b6c | 3784 | != RETURN_VALUE_REGISTER_CONVENTION); |
c906108c SS |
3785 | } |
3786 | ||
42be36b3 CT |
3787 | /* Set the initialized field in a value struct. */ |
3788 | ||
3789 | void | |
3790 | set_value_initialized (struct value *val, int status) | |
3791 | { | |
3792 | val->initialized = status; | |
3793 | } | |
3794 | ||
3795 | /* Return the initialized field in a value struct. */ | |
3796 | ||
3797 | int | |
3798 | value_initialized (struct value *val) | |
3799 | { | |
3800 | return val->initialized; | |
3801 | } | |
3802 | ||
a844296a SM |
3803 | /* Load the actual content of a lazy value. Fetch the data from the |
3804 | user's process and clear the lazy flag to indicate that the data in | |
3805 | the buffer is valid. | |
a58e2656 AB |
3806 | |
3807 | If the value is zero-length, we avoid calling read_memory, which | |
3808 | would abort. We mark the value as fetched anyway -- all 0 bytes of | |
a844296a | 3809 | it. */ |
a58e2656 | 3810 | |
a844296a | 3811 | void |
a58e2656 AB |
3812 | value_fetch_lazy (struct value *val) |
3813 | { | |
3814 | gdb_assert (value_lazy (val)); | |
3815 | allocate_value_contents (val); | |
9a0dc9e3 PA |
3816 | /* A value is either lazy, or fully fetched. The |
3817 | availability/validity is only established as we try to fetch a | |
3818 | value. */ | |
3819 | gdb_assert (VEC_empty (range_s, val->optimized_out)); | |
3820 | gdb_assert (VEC_empty (range_s, val->unavailable)); | |
a58e2656 AB |
3821 | if (value_bitsize (val)) |
3822 | { | |
3823 | /* To read a lazy bitfield, read the entire enclosing value. This | |
3824 | prevents reading the same block of (possibly volatile) memory once | |
3825 | per bitfield. It would be even better to read only the containing | |
3826 | word, but we have no way to record that just specific bits of a | |
3827 | value have been fetched. */ | |
3828 | struct type *type = check_typedef (value_type (val)); | |
a58e2656 | 3829 | struct value *parent = value_parent (val); |
a58e2656 | 3830 | |
b0c54aa5 AB |
3831 | if (value_lazy (parent)) |
3832 | value_fetch_lazy (parent); | |
3833 | ||
4875ffdb PA |
3834 | unpack_value_bitfield (val, |
3835 | value_bitpos (val), value_bitsize (val), | |
3836 | value_contents_for_printing (parent), | |
3837 | value_offset (val), parent); | |
a58e2656 AB |
3838 | } |
3839 | else if (VALUE_LVAL (val) == lval_memory) | |
3840 | { | |
3841 | CORE_ADDR addr = value_address (val); | |
3842 | struct type *type = check_typedef (value_enclosing_type (val)); | |
3843 | ||
3844 | if (TYPE_LENGTH (type)) | |
3845 | read_value_memory (val, 0, value_stack (val), | |
3846 | addr, value_contents_all_raw (val), | |
3ae385af | 3847 | type_length_units (type)); |
a58e2656 AB |
3848 | } |
3849 | else if (VALUE_LVAL (val) == lval_register) | |
3850 | { | |
3851 | struct frame_info *frame; | |
3852 | int regnum; | |
3853 | struct type *type = check_typedef (value_type (val)); | |
3854 | struct value *new_val = val, *mark = value_mark (); | |
3855 | ||
3856 | /* Offsets are not supported here; lazy register values must | |
3857 | refer to the entire register. */ | |
3858 | gdb_assert (value_offset (val) == 0); | |
3859 | ||
3860 | while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val)) | |
3861 | { | |
6eeee81c TT |
3862 | struct frame_id frame_id = VALUE_FRAME_ID (new_val); |
3863 | ||
3864 | frame = frame_find_by_id (frame_id); | |
a58e2656 AB |
3865 | regnum = VALUE_REGNUM (new_val); |
3866 | ||
3867 | gdb_assert (frame != NULL); | |
3868 | ||
3869 | /* Convertible register routines are used for multi-register | |
3870 | values and for interpretation in different types | |
3871 | (e.g. float or int from a double register). Lazy | |
3872 | register values should have the register's natural type, | |
3873 | so they do not apply. */ | |
3874 | gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame), | |
3875 | regnum, type)); | |
3876 | ||
3877 | new_val = get_frame_register_value (frame, regnum); | |
6eeee81c TT |
3878 | |
3879 | /* If we get another lazy lval_register value, it means the | |
3880 | register is found by reading it from the next frame. | |
3881 | get_frame_register_value should never return a value with | |
3882 | the frame id pointing to FRAME. If it does, it means we | |
3883 | either have two consecutive frames with the same frame id | |
3884 | in the frame chain, or some code is trying to unwind | |
3885 | behind get_prev_frame's back (e.g., a frame unwind | |
3886 | sniffer trying to unwind), bypassing its validations. In | |
3887 | any case, it should always be an internal error to end up | |
3888 | in this situation. */ | |
3889 | if (VALUE_LVAL (new_val) == lval_register | |
3890 | && value_lazy (new_val) | |
3891 | && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id)) | |
3892 | internal_error (__FILE__, __LINE__, | |
3893 | _("infinite loop while fetching a register")); | |
a58e2656 AB |
3894 | } |
3895 | ||
3896 | /* If it's still lazy (for instance, a saved register on the | |
3897 | stack), fetch it. */ | |
3898 | if (value_lazy (new_val)) | |
3899 | value_fetch_lazy (new_val); | |
3900 | ||
9a0dc9e3 PA |
3901 | /* Copy the contents and the unavailability/optimized-out |
3902 | meta-data from NEW_VAL to VAL. */ | |
3903 | set_value_lazy (val, 0); | |
3904 | value_contents_copy (val, value_embedded_offset (val), | |
3905 | new_val, value_embedded_offset (new_val), | |
3ae385af | 3906 | type_length_units (type)); |
a58e2656 AB |
3907 | |
3908 | if (frame_debug) | |
3909 | { | |
3910 | struct gdbarch *gdbarch; | |
3911 | frame = frame_find_by_id (VALUE_FRAME_ID (val)); | |
3912 | regnum = VALUE_REGNUM (val); | |
3913 | gdbarch = get_frame_arch (frame); | |
3914 | ||
3915 | fprintf_unfiltered (gdb_stdlog, | |
3916 | "{ value_fetch_lazy " | |
3917 | "(frame=%d,regnum=%d(%s),...) ", | |
3918 | frame_relative_level (frame), regnum, | |
3919 | user_reg_map_regnum_to_name (gdbarch, regnum)); | |
3920 | ||
3921 | fprintf_unfiltered (gdb_stdlog, "->"); | |
3922 | if (value_optimized_out (new_val)) | |
f6c01fc5 AB |
3923 | { |
3924 | fprintf_unfiltered (gdb_stdlog, " "); | |
3925 | val_print_optimized_out (new_val, gdb_stdlog); | |
3926 | } | |
a58e2656 AB |
3927 | else |
3928 | { | |
3929 | int i; | |
3930 | const gdb_byte *buf = value_contents (new_val); | |
3931 | ||
3932 | if (VALUE_LVAL (new_val) == lval_register) | |
3933 | fprintf_unfiltered (gdb_stdlog, " register=%d", | |
3934 | VALUE_REGNUM (new_val)); | |
3935 | else if (VALUE_LVAL (new_val) == lval_memory) | |
3936 | fprintf_unfiltered (gdb_stdlog, " address=%s", | |
3937 | paddress (gdbarch, | |
3938 | value_address (new_val))); | |
3939 | else | |
3940 | fprintf_unfiltered (gdb_stdlog, " computed"); | |
3941 | ||
3942 | fprintf_unfiltered (gdb_stdlog, " bytes="); | |
3943 | fprintf_unfiltered (gdb_stdlog, "["); | |
3944 | for (i = 0; i < register_size (gdbarch, regnum); i++) | |
3945 | fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); | |
3946 | fprintf_unfiltered (gdb_stdlog, "]"); | |
3947 | } | |
3948 | ||
3949 | fprintf_unfiltered (gdb_stdlog, " }\n"); | |
3950 | } | |
3951 | ||
3952 | /* Dispose of the intermediate values. This prevents | |
3953 | watchpoints from trying to watch the saved frame pointer. */ | |
3954 | value_free_to_mark (mark); | |
3955 | } | |
3956 | else if (VALUE_LVAL (val) == lval_computed | |
3957 | && value_computed_funcs (val)->read != NULL) | |
3958 | value_computed_funcs (val)->read (val); | |
a58e2656 AB |
3959 | else |
3960 | internal_error (__FILE__, __LINE__, _("Unexpected lazy value type.")); | |
3961 | ||
3962 | set_value_lazy (val, 0); | |
a58e2656 AB |
3963 | } |
3964 | ||
a280dbd1 SDJ |
3965 | /* Implementation of the convenience function $_isvoid. */ |
3966 | ||
3967 | static struct value * | |
3968 | isvoid_internal_fn (struct gdbarch *gdbarch, | |
3969 | const struct language_defn *language, | |
3970 | void *cookie, int argc, struct value **argv) | |
3971 | { | |
3972 | int ret; | |
3973 | ||
3974 | if (argc != 1) | |
6bc305f5 | 3975 | error (_("You must provide one argument for $_isvoid.")); |
a280dbd1 SDJ |
3976 | |
3977 | ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID; | |
3978 | ||
3979 | return value_from_longest (builtin_type (gdbarch)->builtin_int, ret); | |
3980 | } | |
3981 | ||
c906108c | 3982 | void |
fba45db2 | 3983 | _initialize_values (void) |
c906108c | 3984 | { |
1a966eab | 3985 | add_cmd ("convenience", no_class, show_convenience, _("\ |
f47f77df DE |
3986 | Debugger convenience (\"$foo\") variables and functions.\n\ |
3987 | Convenience variables are created when you assign them values;\n\ | |
3988 | thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\ | |
1a966eab | 3989 | \n\ |
c906108c SS |
3990 | A few convenience variables are given values automatically:\n\ |
3991 | \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\ | |
f47f77df DE |
3992 | \"$__\" holds the contents of the last address examined with \"x\"." |
3993 | #ifdef HAVE_PYTHON | |
3994 | "\n\n\ | |
3995 | Convenience functions are defined via the Python API." | |
3996 | #endif | |
3997 | ), &showlist); | |
7e20dfcd | 3998 | add_alias_cmd ("conv", "convenience", no_class, 1, &showlist); |
c906108c | 3999 | |
db5f229b | 4000 | add_cmd ("values", no_set_class, show_values, _("\ |
3e43a32a | 4001 | Elements of value history around item number IDX (or last ten)."), |
c906108c | 4002 | &showlist); |
53e5f3cf AS |
4003 | |
4004 | add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\ | |
4005 | Initialize a convenience variable if necessary.\n\ | |
4006 | init-if-undefined VARIABLE = EXPRESSION\n\ | |
4007 | Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\ | |
4008 | exist or does not contain a value. The EXPRESSION is not evaluated if the\n\ | |
4009 | VARIABLE is already initialized.")); | |
bc3b79fd TJB |
4010 | |
4011 | add_prefix_cmd ("function", no_class, function_command, _("\ | |
4012 | Placeholder command for showing help on convenience functions."), | |
4013 | &functionlist, "function ", 0, &cmdlist); | |
a280dbd1 SDJ |
4014 | |
4015 | add_internal_function ("_isvoid", _("\ | |
4016 | Check whether an expression is void.\n\ | |
4017 | Usage: $_isvoid (expression)\n\ | |
4018 | Return 1 if the expression is void, zero otherwise."), | |
4019 | isvoid_internal_fn, NULL); | |
c906108c | 4020 | } |