Commit | Line | Data |
---|---|---|
852483bc MK |
1 | /* DWARF 2 Expression Evaluator. |
2 | ||
32d0add0 | 3 | Copyright (C) 2001-2015 Free Software Foundation, Inc. |
852483bc | 4 | |
4c2df51b DJ |
5 | Contributed by Daniel Berlin (dan@dberlin.org) |
6 | ||
7 | This file is part of GDB. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 11 | the Free Software Foundation; either version 3 of the License, or |
4c2df51b DJ |
12 | (at your option) any later version. |
13 | ||
14 | This program is distributed in the hope that it will be useful, | |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
a9762ec7 | 20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
4c2df51b DJ |
21 | |
22 | #include "defs.h" | |
23 | #include "symtab.h" | |
24 | #include "gdbtypes.h" | |
25 | #include "value.h" | |
26 | #include "gdbcore.h" | |
fa8f86ff | 27 | #include "dwarf2.h" |
4c2df51b DJ |
28 | #include "dwarf2expr.h" |
29 | ||
30 | /* Local prototypes. */ | |
31 | ||
32 | static void execute_stack_op (struct dwarf_expr_context *, | |
0d45f56e | 33 | const gdb_byte *, const gdb_byte *); |
4c2df51b | 34 | |
8a9b8146 TT |
35 | /* Cookie for gdbarch data. */ |
36 | ||
37 | static struct gdbarch_data *dwarf_arch_cookie; | |
38 | ||
39 | /* This holds gdbarch-specific types used by the DWARF expression | |
40 | evaluator. See comments in execute_stack_op. */ | |
41 | ||
42 | struct dwarf_gdbarch_types | |
43 | { | |
44 | struct type *dw_types[3]; | |
45 | }; | |
46 | ||
47 | /* Allocate and fill in dwarf_gdbarch_types for an arch. */ | |
48 | ||
49 | static void * | |
50 | dwarf_gdbarch_types_init (struct gdbarch *gdbarch) | |
51 | { | |
52 | struct dwarf_gdbarch_types *types | |
53 | = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types); | |
54 | ||
55 | /* The types themselves are lazily initialized. */ | |
56 | ||
57 | return types; | |
58 | } | |
59 | ||
60 | /* Return the type used for DWARF operations where the type is | |
61 | unspecified in the DWARF spec. Only certain sizes are | |
62 | supported. */ | |
63 | ||
64 | static struct type * | |
65 | dwarf_expr_address_type (struct dwarf_expr_context *ctx) | |
66 | { | |
9a3c8263 SM |
67 | struct dwarf_gdbarch_types *types |
68 | = (struct dwarf_gdbarch_types *) gdbarch_data (ctx->gdbarch, | |
69 | dwarf_arch_cookie); | |
8a9b8146 TT |
70 | int ndx; |
71 | ||
72 | if (ctx->addr_size == 2) | |
73 | ndx = 0; | |
74 | else if (ctx->addr_size == 4) | |
75 | ndx = 1; | |
76 | else if (ctx->addr_size == 8) | |
77 | ndx = 2; | |
78 | else | |
79 | error (_("Unsupported address size in DWARF expressions: %d bits"), | |
80 | 8 * ctx->addr_size); | |
81 | ||
82 | if (types->dw_types[ndx] == NULL) | |
83 | types->dw_types[ndx] | |
84 | = arch_integer_type (ctx->gdbarch, | |
85 | 8 * ctx->addr_size, | |
86 | 0, "<signed DWARF address type>"); | |
87 | ||
88 | return types->dw_types[ndx]; | |
89 | } | |
90 | ||
4c2df51b DJ |
91 | /* Create a new context for the expression evaluator. */ |
92 | ||
93 | struct dwarf_expr_context * | |
e4adbba9 | 94 | new_dwarf_expr_context (void) |
4c2df51b DJ |
95 | { |
96 | struct dwarf_expr_context *retval; | |
9a619af0 | 97 | |
8d749320 | 98 | retval = XCNEW (struct dwarf_expr_context); |
18ec9831 KB |
99 | retval->stack_len = 0; |
100 | retval->stack_allocated = 10; | |
8d749320 | 101 | retval->stack = XNEWVEC (struct dwarf_stack_value, retval->stack_allocated); |
87808bd6 JB |
102 | retval->num_pieces = 0; |
103 | retval->pieces = 0; | |
1e3a102a | 104 | retval->max_recursion_depth = 0x100; |
4c2df51b DJ |
105 | return retval; |
106 | } | |
107 | ||
108 | /* Release the memory allocated to CTX. */ | |
109 | ||
110 | void | |
111 | free_dwarf_expr_context (struct dwarf_expr_context *ctx) | |
112 | { | |
113 | xfree (ctx->stack); | |
87808bd6 | 114 | xfree (ctx->pieces); |
4c2df51b DJ |
115 | xfree (ctx); |
116 | } | |
117 | ||
4a227398 TT |
118 | /* Helper for make_cleanup_free_dwarf_expr_context. */ |
119 | ||
120 | static void | |
121 | free_dwarf_expr_context_cleanup (void *arg) | |
122 | { | |
9a3c8263 | 123 | free_dwarf_expr_context ((struct dwarf_expr_context *) arg); |
4a227398 TT |
124 | } |
125 | ||
126 | /* Return a cleanup that calls free_dwarf_expr_context. */ | |
127 | ||
128 | struct cleanup * | |
129 | make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx) | |
130 | { | |
131 | return make_cleanup (free_dwarf_expr_context_cleanup, ctx); | |
132 | } | |
133 | ||
4c2df51b DJ |
134 | /* Expand the memory allocated to CTX's stack to contain at least |
135 | NEED more elements than are currently used. */ | |
136 | ||
137 | static void | |
138 | dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need) | |
139 | { | |
140 | if (ctx->stack_len + need > ctx->stack_allocated) | |
141 | { | |
18ec9831 | 142 | size_t newlen = ctx->stack_len + need + 10; |
9a619af0 | 143 | |
224c3ddb | 144 | ctx->stack = XRESIZEVEC (struct dwarf_stack_value, ctx->stack, newlen); |
18ec9831 | 145 | ctx->stack_allocated = newlen; |
4c2df51b DJ |
146 | } |
147 | } | |
148 | ||
149 | /* Push VALUE onto CTX's stack. */ | |
150 | ||
8a9b8146 TT |
151 | static void |
152 | dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value, | |
44353522 | 153 | int in_stack_memory) |
4c2df51b | 154 | { |
44353522 DE |
155 | struct dwarf_stack_value *v; |
156 | ||
4c2df51b | 157 | dwarf_expr_grow_stack (ctx, 1); |
44353522 DE |
158 | v = &ctx->stack[ctx->stack_len++]; |
159 | v->value = value; | |
160 | v->in_stack_memory = in_stack_memory; | |
4c2df51b DJ |
161 | } |
162 | ||
8a9b8146 | 163 | /* Push VALUE onto CTX's stack. */ |
4c2df51b DJ |
164 | |
165 | void | |
8a9b8146 TT |
166 | dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value, |
167 | int in_stack_memory) | |
168 | { | |
169 | dwarf_expr_push (ctx, | |
170 | value_from_ulongest (dwarf_expr_address_type (ctx), value), | |
171 | in_stack_memory); | |
172 | } | |
173 | ||
174 | /* Pop the top item off of CTX's stack. */ | |
175 | ||
176 | static void | |
4c2df51b DJ |
177 | dwarf_expr_pop (struct dwarf_expr_context *ctx) |
178 | { | |
179 | if (ctx->stack_len <= 0) | |
8a3fe4f8 | 180 | error (_("dwarf expression stack underflow")); |
4c2df51b DJ |
181 | ctx->stack_len--; |
182 | } | |
183 | ||
184 | /* Retrieve the N'th item on CTX's stack. */ | |
185 | ||
8a9b8146 | 186 | struct value * |
4c2df51b DJ |
187 | dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n) |
188 | { | |
ef0fdf07 | 189 | if (ctx->stack_len <= n) |
3e43a32a MS |
190 | error (_("Asked for position %d of stack, " |
191 | "stack only has %d elements on it."), | |
4c2df51b | 192 | n, ctx->stack_len); |
44353522 | 193 | return ctx->stack[ctx->stack_len - (1 + n)].value; |
8a9b8146 TT |
194 | } |
195 | ||
196 | /* Require that TYPE be an integral type; throw an exception if not. */ | |
44353522 | 197 | |
8a9b8146 TT |
198 | static void |
199 | dwarf_require_integral (struct type *type) | |
200 | { | |
201 | if (TYPE_CODE (type) != TYPE_CODE_INT | |
202 | && TYPE_CODE (type) != TYPE_CODE_CHAR | |
203 | && TYPE_CODE (type) != TYPE_CODE_BOOL) | |
204 | error (_("integral type expected in DWARF expression")); | |
205 | } | |
206 | ||
207 | /* Return the unsigned form of TYPE. TYPE is necessarily an integral | |
208 | type. */ | |
209 | ||
210 | static struct type * | |
211 | get_unsigned_type (struct gdbarch *gdbarch, struct type *type) | |
212 | { | |
213 | switch (TYPE_LENGTH (type)) | |
214 | { | |
215 | case 1: | |
216 | return builtin_type (gdbarch)->builtin_uint8; | |
217 | case 2: | |
218 | return builtin_type (gdbarch)->builtin_uint16; | |
219 | case 4: | |
220 | return builtin_type (gdbarch)->builtin_uint32; | |
221 | case 8: | |
222 | return builtin_type (gdbarch)->builtin_uint64; | |
223 | default: | |
224 | error (_("no unsigned variant found for type, while evaluating " | |
225 | "DWARF expression")); | |
226 | } | |
44353522 DE |
227 | } |
228 | ||
8ddd9a20 TT |
229 | /* Return the signed form of TYPE. TYPE is necessarily an integral |
230 | type. */ | |
231 | ||
232 | static struct type * | |
233 | get_signed_type (struct gdbarch *gdbarch, struct type *type) | |
234 | { | |
235 | switch (TYPE_LENGTH (type)) | |
236 | { | |
237 | case 1: | |
238 | return builtin_type (gdbarch)->builtin_int8; | |
239 | case 2: | |
240 | return builtin_type (gdbarch)->builtin_int16; | |
241 | case 4: | |
242 | return builtin_type (gdbarch)->builtin_int32; | |
243 | case 8: | |
244 | return builtin_type (gdbarch)->builtin_int64; | |
245 | default: | |
246 | error (_("no signed variant found for type, while evaluating " | |
247 | "DWARF expression")); | |
248 | } | |
249 | } | |
250 | ||
f2c7657e UW |
251 | /* Retrieve the N'th item on CTX's stack, converted to an address. */ |
252 | ||
253 | CORE_ADDR | |
254 | dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n) | |
255 | { | |
8a9b8146 TT |
256 | struct value *result_val = dwarf_expr_fetch (ctx, n); |
257 | enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); | |
258 | ULONGEST result; | |
259 | ||
260 | dwarf_require_integral (value_type (result_val)); | |
261 | result = extract_unsigned_integer (value_contents (result_val), | |
262 | TYPE_LENGTH (value_type (result_val)), | |
263 | byte_order); | |
f2c7657e UW |
264 | |
265 | /* For most architectures, calling extract_unsigned_integer() alone | |
266 | is sufficient for extracting an address. However, some | |
267 | architectures (e.g. MIPS) use signed addresses and using | |
268 | extract_unsigned_integer() will not produce a correct | |
269 | result. Make sure we invoke gdbarch_integer_to_address() | |
270 | for those architectures which require it. */ | |
271 | if (gdbarch_integer_to_address_p (ctx->gdbarch)) | |
272 | { | |
224c3ddb | 273 | gdb_byte *buf = (gdb_byte *) alloca (ctx->addr_size); |
8a9b8146 TT |
274 | struct type *int_type = get_unsigned_type (ctx->gdbarch, |
275 | value_type (result_val)); | |
f2c7657e UW |
276 | |
277 | store_unsigned_integer (buf, ctx->addr_size, byte_order, result); | |
278 | return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf); | |
279 | } | |
280 | ||
281 | return (CORE_ADDR) result; | |
282 | } | |
283 | ||
44353522 DE |
284 | /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */ |
285 | ||
286 | int | |
287 | dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n) | |
288 | { | |
289 | if (ctx->stack_len <= n) | |
3e43a32a MS |
290 | error (_("Asked for position %d of stack, " |
291 | "stack only has %d elements on it."), | |
44353522 DE |
292 | n, ctx->stack_len); |
293 | return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory; | |
4c2df51b DJ |
294 | } |
295 | ||
cb826367 TT |
296 | /* Return true if the expression stack is empty. */ |
297 | ||
298 | static int | |
299 | dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx) | |
300 | { | |
301 | return ctx->stack_len == 0; | |
302 | } | |
303 | ||
87808bd6 JB |
304 | /* Add a new piece to CTX's piece list. */ |
305 | static void | |
d3b1e874 | 306 | add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset) |
87808bd6 JB |
307 | { |
308 | struct dwarf_expr_piece *p; | |
309 | ||
310 | ctx->num_pieces++; | |
311 | ||
224c3ddb SM |
312 | ctx->pieces |
313 | = XRESIZEVEC (struct dwarf_expr_piece, ctx->pieces, ctx->num_pieces); | |
87808bd6 JB |
314 | |
315 | p = &ctx->pieces[ctx->num_pieces - 1]; | |
cec03d70 | 316 | p->location = ctx->location; |
87808bd6 | 317 | p->size = size; |
d3b1e874 TT |
318 | p->offset = offset; |
319 | ||
cec03d70 TT |
320 | if (p->location == DWARF_VALUE_LITERAL) |
321 | { | |
322 | p->v.literal.data = ctx->data; | |
323 | p->v.literal.length = ctx->len; | |
324 | } | |
cb826367 TT |
325 | else if (dwarf_expr_stack_empty_p (ctx)) |
326 | { | |
327 | p->location = DWARF_VALUE_OPTIMIZED_OUT; | |
328 | /* Also reset the context's location, for our callers. This is | |
329 | a somewhat strange approach, but this lets us avoid setting | |
330 | the location to DWARF_VALUE_MEMORY in all the individual | |
331 | cases in the evaluator. */ | |
332 | ctx->location = DWARF_VALUE_OPTIMIZED_OUT; | |
333 | } | |
f2c7657e UW |
334 | else if (p->location == DWARF_VALUE_MEMORY) |
335 | { | |
336 | p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0); | |
337 | p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); | |
338 | } | |
8cf6f0b1 TT |
339 | else if (p->location == DWARF_VALUE_IMPLICIT_POINTER) |
340 | { | |
8b9737bf | 341 | p->v.ptr.die.sect_off = ctx->len; |
8a9b8146 | 342 | p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0)); |
8cf6f0b1 | 343 | } |
8a9b8146 TT |
344 | else if (p->location == DWARF_VALUE_REGISTER) |
345 | p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0)); | |
cec03d70 | 346 | else |
44353522 | 347 | { |
f2c7657e | 348 | p->v.value = dwarf_expr_fetch (ctx, 0); |
44353522 | 349 | } |
87808bd6 JB |
350 | } |
351 | ||
4c2df51b DJ |
352 | /* Evaluate the expression at ADDR (LEN bytes long) using the context |
353 | CTX. */ | |
354 | ||
355 | void | |
0d45f56e TT |
356 | dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr, |
357 | size_t len) | |
4c2df51b | 358 | { |
1e3a102a JK |
359 | int old_recursion_depth = ctx->recursion_depth; |
360 | ||
4c2df51b | 361 | execute_stack_op (ctx, addr, addr + len); |
1e3a102a JK |
362 | |
363 | /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */ | |
364 | ||
365 | gdb_assert (ctx->recursion_depth == old_recursion_depth); | |
4c2df51b DJ |
366 | } |
367 | ||
f664829e | 368 | /* Helper to read a uleb128 value or throw an error. */ |
4c2df51b | 369 | |
0d45f56e | 370 | const gdb_byte * |
f664829e | 371 | safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, |
9fccedf7 | 372 | uint64_t *r) |
4c2df51b | 373 | { |
f664829e DE |
374 | buf = gdb_read_uleb128 (buf, buf_end, r); |
375 | if (buf == NULL) | |
376 | error (_("DWARF expression error: ran off end of buffer reading uleb128 value")); | |
4c2df51b DJ |
377 | return buf; |
378 | } | |
379 | ||
f664829e | 380 | /* Helper to read a sleb128 value or throw an error. */ |
4c2df51b | 381 | |
0d45f56e | 382 | const gdb_byte * |
f664829e | 383 | safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, |
9fccedf7 | 384 | int64_t *r) |
4c2df51b | 385 | { |
f664829e DE |
386 | buf = gdb_read_sleb128 (buf, buf_end, r); |
387 | if (buf == NULL) | |
388 | error (_("DWARF expression error: ran off end of buffer reading sleb128 value")); | |
389 | return buf; | |
390 | } | |
4c2df51b | 391 | |
f664829e DE |
392 | const gdb_byte * |
393 | safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end) | |
394 | { | |
395 | buf = gdb_skip_leb128 (buf, buf_end); | |
396 | if (buf == NULL) | |
397 | error (_("DWARF expression error: ran off end of buffer reading leb128 value")); | |
4c2df51b DJ |
398 | return buf; |
399 | } | |
4c2df51b | 400 | \f |
cec03d70 TT |
401 | |
402 | /* Check that the current operator is either at the end of an | |
403 | expression, or that it is followed by a composition operator. */ | |
404 | ||
3cf03773 TT |
405 | void |
406 | dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end, | |
407 | const char *op_name) | |
cec03d70 TT |
408 | { |
409 | /* It seems like DW_OP_GNU_uninit should be handled here. However, | |
410 | it doesn't seem to make sense for DW_OP_*_value, and it was not | |
411 | checked at the other place that this function is called. */ | |
412 | if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece) | |
413 | error (_("DWARF-2 expression error: `%s' operations must be " | |
64b9b334 | 414 | "used either alone or in conjunction with DW_OP_piece " |
cec03d70 TT |
415 | "or DW_OP_bit_piece."), |
416 | op_name); | |
417 | } | |
418 | ||
8a9b8146 TT |
419 | /* Return true iff the types T1 and T2 are "the same". This only does |
420 | checks that might reasonably be needed to compare DWARF base | |
421 | types. */ | |
422 | ||
423 | static int | |
424 | base_types_equal_p (struct type *t1, struct type *t2) | |
425 | { | |
426 | if (TYPE_CODE (t1) != TYPE_CODE (t2)) | |
427 | return 0; | |
428 | if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) | |
429 | return 0; | |
430 | return TYPE_LENGTH (t1) == TYPE_LENGTH (t2); | |
431 | } | |
432 | ||
433 | /* A convenience function to call get_base_type on CTX and return the | |
434 | result. DIE is the DIE whose type we need. SIZE is non-zero if | |
435 | this function should verify that the resulting type has the correct | |
436 | size. */ | |
437 | ||
438 | static struct type * | |
b64f50a1 | 439 | dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size) |
8a9b8146 TT |
440 | { |
441 | struct type *result; | |
442 | ||
9e8b7a03 | 443 | if (ctx->funcs->get_base_type) |
8a9b8146 | 444 | { |
9e8b7a03 | 445 | result = ctx->funcs->get_base_type (ctx, die); |
9ff3b74f TT |
446 | if (result == NULL) |
447 | error (_("Could not find type for DW_OP_GNU_const_type")); | |
8a9b8146 TT |
448 | if (size != 0 && TYPE_LENGTH (result) != size) |
449 | error (_("DW_OP_GNU_const_type has different sizes for type and data")); | |
450 | } | |
451 | else | |
452 | /* Anything will do. */ | |
453 | result = builtin_type (ctx->gdbarch)->builtin_int; | |
454 | ||
455 | return result; | |
456 | } | |
457 | ||
8e3b41a9 JK |
458 | /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the |
459 | DWARF register number. Otherwise return -1. */ | |
460 | ||
461 | int | |
462 | dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end) | |
463 | { | |
9fccedf7 | 464 | uint64_t dwarf_reg; |
8e3b41a9 JK |
465 | |
466 | if (buf_end <= buf) | |
467 | return -1; | |
468 | if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31) | |
469 | { | |
470 | if (buf_end - buf != 1) | |
471 | return -1; | |
472 | return *buf - DW_OP_reg0; | |
473 | } | |
474 | ||
475 | if (*buf == DW_OP_GNU_regval_type) | |
476 | { | |
477 | buf++; | |
f664829e DE |
478 | buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); |
479 | if (buf == NULL) | |
480 | return -1; | |
481 | buf = gdb_skip_leb128 (buf, buf_end); | |
482 | if (buf == NULL) | |
483 | return -1; | |
8e3b41a9 JK |
484 | } |
485 | else if (*buf == DW_OP_regx) | |
486 | { | |
487 | buf++; | |
f664829e DE |
488 | buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); |
489 | if (buf == NULL) | |
490 | return -1; | |
8e3b41a9 JK |
491 | } |
492 | else | |
493 | return -1; | |
494 | if (buf != buf_end || (int) dwarf_reg != dwarf_reg) | |
495 | return -1; | |
496 | return dwarf_reg; | |
497 | } | |
498 | ||
a471c594 JK |
499 | /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and |
500 | DW_OP_deref* return the DWARF register number. Otherwise return -1. | |
501 | DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the | |
502 | size from DW_OP_deref_size. */ | |
503 | ||
504 | int | |
505 | dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end, | |
506 | CORE_ADDR *deref_size_return) | |
507 | { | |
9fccedf7 DE |
508 | uint64_t dwarf_reg; |
509 | int64_t offset; | |
a471c594 JK |
510 | |
511 | if (buf_end <= buf) | |
512 | return -1; | |
f664829e | 513 | |
a471c594 JK |
514 | if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31) |
515 | { | |
516 | dwarf_reg = *buf - DW_OP_breg0; | |
517 | buf++; | |
f664829e DE |
518 | if (buf >= buf_end) |
519 | return -1; | |
a471c594 JK |
520 | } |
521 | else if (*buf == DW_OP_bregx) | |
522 | { | |
523 | buf++; | |
f664829e DE |
524 | buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); |
525 | if (buf == NULL) | |
526 | return -1; | |
a471c594 JK |
527 | if ((int) dwarf_reg != dwarf_reg) |
528 | return -1; | |
529 | } | |
530 | else | |
531 | return -1; | |
532 | ||
f664829e DE |
533 | buf = gdb_read_sleb128 (buf, buf_end, &offset); |
534 | if (buf == NULL) | |
a471c594 | 535 | return -1; |
f664829e | 536 | if (offset != 0) |
a471c594 JK |
537 | return -1; |
538 | ||
539 | if (*buf == DW_OP_deref) | |
540 | { | |
541 | buf++; | |
542 | *deref_size_return = -1; | |
543 | } | |
544 | else if (*buf == DW_OP_deref_size) | |
545 | { | |
546 | buf++; | |
547 | if (buf >= buf_end) | |
548 | return -1; | |
549 | *deref_size_return = *buf++; | |
550 | } | |
551 | else | |
552 | return -1; | |
553 | ||
554 | if (buf != buf_end) | |
555 | return -1; | |
556 | ||
557 | return dwarf_reg; | |
558 | } | |
559 | ||
e18b2753 JK |
560 | /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill |
561 | in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */ | |
562 | ||
563 | int | |
564 | dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end, | |
565 | CORE_ADDR *fb_offset_return) | |
566 | { | |
9fccedf7 | 567 | int64_t fb_offset; |
e18b2753 JK |
568 | |
569 | if (buf_end <= buf) | |
570 | return 0; | |
571 | ||
572 | if (*buf != DW_OP_fbreg) | |
573 | return 0; | |
574 | buf++; | |
575 | ||
f664829e DE |
576 | buf = gdb_read_sleb128 (buf, buf_end, &fb_offset); |
577 | if (buf == NULL) | |
578 | return 0; | |
e18b2753 JK |
579 | *fb_offset_return = fb_offset; |
580 | if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return) | |
581 | return 0; | |
582 | ||
583 | return 1; | |
584 | } | |
585 | ||
586 | /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill | |
587 | in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. | |
588 | The matched SP register number depends on GDBARCH. */ | |
589 | ||
590 | int | |
591 | dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf, | |
592 | const gdb_byte *buf_end, CORE_ADDR *sp_offset_return) | |
593 | { | |
9fccedf7 DE |
594 | uint64_t dwarf_reg; |
595 | int64_t sp_offset; | |
e18b2753 JK |
596 | |
597 | if (buf_end <= buf) | |
598 | return 0; | |
599 | if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31) | |
600 | { | |
601 | dwarf_reg = *buf - DW_OP_breg0; | |
602 | buf++; | |
603 | } | |
604 | else | |
605 | { | |
606 | if (*buf != DW_OP_bregx) | |
607 | return 0; | |
608 | buf++; | |
f664829e DE |
609 | buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg); |
610 | if (buf == NULL) | |
611 | return 0; | |
e18b2753 JK |
612 | } |
613 | ||
614 | if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg) | |
615 | != gdbarch_sp_regnum (gdbarch)) | |
616 | return 0; | |
617 | ||
f664829e DE |
618 | buf = gdb_read_sleb128 (buf, buf_end, &sp_offset); |
619 | if (buf == NULL) | |
620 | return 0; | |
e18b2753 JK |
621 | *sp_offset_return = sp_offset; |
622 | if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return) | |
623 | return 0; | |
624 | ||
625 | return 1; | |
626 | } | |
627 | ||
4c2df51b DJ |
628 | /* The engine for the expression evaluator. Using the context in CTX, |
629 | evaluate the expression between OP_PTR and OP_END. */ | |
630 | ||
631 | static void | |
852483bc | 632 | execute_stack_op (struct dwarf_expr_context *ctx, |
0d45f56e | 633 | const gdb_byte *op_ptr, const gdb_byte *op_end) |
4c2df51b | 634 | { |
e17a4113 | 635 | enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); |
8a9b8146 TT |
636 | /* Old-style "untyped" DWARF values need special treatment in a |
637 | couple of places, specifically DW_OP_mod and DW_OP_shr. We need | |
638 | a special type for these values so we can distinguish them from | |
639 | values that have an explicit type, because explicitly-typed | |
640 | values do not need special treatment. This special type must be | |
641 | different (in the `==' sense) from any base type coming from the | |
642 | CU. */ | |
643 | struct type *address_type = dwarf_expr_address_type (ctx); | |
9a619af0 | 644 | |
cec03d70 | 645 | ctx->location = DWARF_VALUE_MEMORY; |
42be36b3 | 646 | ctx->initialized = 1; /* Default is initialized. */ |
18ec9831 | 647 | |
1e3a102a JK |
648 | if (ctx->recursion_depth > ctx->max_recursion_depth) |
649 | error (_("DWARF-2 expression error: Loop detected (%d)."), | |
650 | ctx->recursion_depth); | |
651 | ctx->recursion_depth++; | |
652 | ||
4c2df51b DJ |
653 | while (op_ptr < op_end) |
654 | { | |
aead7601 | 655 | enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++; |
f2c7657e | 656 | ULONGEST result; |
44353522 DE |
657 | /* Assume the value is not in stack memory. |
658 | Code that knows otherwise sets this to 1. | |
659 | Some arithmetic on stack addresses can probably be assumed to still | |
660 | be a stack address, but we skip this complication for now. | |
661 | This is just an optimization, so it's always ok to punt | |
662 | and leave this as 0. */ | |
663 | int in_stack_memory = 0; | |
9fccedf7 DE |
664 | uint64_t uoffset, reg; |
665 | int64_t offset; | |
8a9b8146 | 666 | struct value *result_val = NULL; |
4c2df51b | 667 | |
e0e9434c TT |
668 | /* The DWARF expression might have a bug causing an infinite |
669 | loop. In that case, quitting is the only way out. */ | |
670 | QUIT; | |
671 | ||
4c2df51b DJ |
672 | switch (op) |
673 | { | |
674 | case DW_OP_lit0: | |
675 | case DW_OP_lit1: | |
676 | case DW_OP_lit2: | |
677 | case DW_OP_lit3: | |
678 | case DW_OP_lit4: | |
679 | case DW_OP_lit5: | |
680 | case DW_OP_lit6: | |
681 | case DW_OP_lit7: | |
682 | case DW_OP_lit8: | |
683 | case DW_OP_lit9: | |
684 | case DW_OP_lit10: | |
685 | case DW_OP_lit11: | |
686 | case DW_OP_lit12: | |
687 | case DW_OP_lit13: | |
688 | case DW_OP_lit14: | |
689 | case DW_OP_lit15: | |
690 | case DW_OP_lit16: | |
691 | case DW_OP_lit17: | |
692 | case DW_OP_lit18: | |
693 | case DW_OP_lit19: | |
694 | case DW_OP_lit20: | |
695 | case DW_OP_lit21: | |
696 | case DW_OP_lit22: | |
697 | case DW_OP_lit23: | |
698 | case DW_OP_lit24: | |
699 | case DW_OP_lit25: | |
700 | case DW_OP_lit26: | |
701 | case DW_OP_lit27: | |
702 | case DW_OP_lit28: | |
703 | case DW_OP_lit29: | |
704 | case DW_OP_lit30: | |
705 | case DW_OP_lit31: | |
706 | result = op - DW_OP_lit0; | |
8a9b8146 | 707 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
708 | break; |
709 | ||
710 | case DW_OP_addr: | |
f2c7657e UW |
711 | result = extract_unsigned_integer (op_ptr, |
712 | ctx->addr_size, byte_order); | |
ae0d2f24 | 713 | op_ptr += ctx->addr_size; |
ac56253d TT |
714 | /* Some versions of GCC emit DW_OP_addr before |
715 | DW_OP_GNU_push_tls_address. In this case the value is an | |
716 | index, not an address. We don't support things like | |
717 | branching between the address and the TLS op. */ | |
718 | if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address) | |
719 | result += ctx->offset; | |
8a9b8146 | 720 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
721 | break; |
722 | ||
3019eac3 | 723 | case DW_OP_GNU_addr_index: |
49f6c839 DE |
724 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); |
725 | result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset); | |
726 | result += ctx->offset; | |
727 | result_val = value_from_ulongest (address_type, result); | |
728 | break; | |
729 | case DW_OP_GNU_const_index: | |
f664829e | 730 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); |
3019eac3 DE |
731 | result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset); |
732 | result_val = value_from_ulongest (address_type, result); | |
733 | break; | |
734 | ||
4c2df51b | 735 | case DW_OP_const1u: |
e17a4113 | 736 | result = extract_unsigned_integer (op_ptr, 1, byte_order); |
8a9b8146 | 737 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
738 | op_ptr += 1; |
739 | break; | |
740 | case DW_OP_const1s: | |
e17a4113 | 741 | result = extract_signed_integer (op_ptr, 1, byte_order); |
8a9b8146 | 742 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
743 | op_ptr += 1; |
744 | break; | |
745 | case DW_OP_const2u: | |
e17a4113 | 746 | result = extract_unsigned_integer (op_ptr, 2, byte_order); |
8a9b8146 | 747 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
748 | op_ptr += 2; |
749 | break; | |
750 | case DW_OP_const2s: | |
e17a4113 | 751 | result = extract_signed_integer (op_ptr, 2, byte_order); |
8a9b8146 | 752 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
753 | op_ptr += 2; |
754 | break; | |
755 | case DW_OP_const4u: | |
e17a4113 | 756 | result = extract_unsigned_integer (op_ptr, 4, byte_order); |
8a9b8146 | 757 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
758 | op_ptr += 4; |
759 | break; | |
760 | case DW_OP_const4s: | |
e17a4113 | 761 | result = extract_signed_integer (op_ptr, 4, byte_order); |
8a9b8146 | 762 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
763 | op_ptr += 4; |
764 | break; | |
765 | case DW_OP_const8u: | |
e17a4113 | 766 | result = extract_unsigned_integer (op_ptr, 8, byte_order); |
8a9b8146 | 767 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
768 | op_ptr += 8; |
769 | break; | |
770 | case DW_OP_const8s: | |
e17a4113 | 771 | result = extract_signed_integer (op_ptr, 8, byte_order); |
8a9b8146 | 772 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
773 | op_ptr += 8; |
774 | break; | |
775 | case DW_OP_constu: | |
f664829e | 776 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); |
4c2df51b | 777 | result = uoffset; |
8a9b8146 | 778 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
779 | break; |
780 | case DW_OP_consts: | |
f664829e | 781 | op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); |
4c2df51b | 782 | result = offset; |
8a9b8146 | 783 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
784 | break; |
785 | ||
786 | /* The DW_OP_reg operations are required to occur alone in | |
787 | location expressions. */ | |
788 | case DW_OP_reg0: | |
789 | case DW_OP_reg1: | |
790 | case DW_OP_reg2: | |
791 | case DW_OP_reg3: | |
792 | case DW_OP_reg4: | |
793 | case DW_OP_reg5: | |
794 | case DW_OP_reg6: | |
795 | case DW_OP_reg7: | |
796 | case DW_OP_reg8: | |
797 | case DW_OP_reg9: | |
798 | case DW_OP_reg10: | |
799 | case DW_OP_reg11: | |
800 | case DW_OP_reg12: | |
801 | case DW_OP_reg13: | |
802 | case DW_OP_reg14: | |
803 | case DW_OP_reg15: | |
804 | case DW_OP_reg16: | |
805 | case DW_OP_reg17: | |
806 | case DW_OP_reg18: | |
807 | case DW_OP_reg19: | |
808 | case DW_OP_reg20: | |
809 | case DW_OP_reg21: | |
810 | case DW_OP_reg22: | |
811 | case DW_OP_reg23: | |
812 | case DW_OP_reg24: | |
813 | case DW_OP_reg25: | |
814 | case DW_OP_reg26: | |
815 | case DW_OP_reg27: | |
816 | case DW_OP_reg28: | |
817 | case DW_OP_reg29: | |
818 | case DW_OP_reg30: | |
819 | case DW_OP_reg31: | |
42be36b3 CT |
820 | if (op_ptr != op_end |
821 | && *op_ptr != DW_OP_piece | |
d3b1e874 | 822 | && *op_ptr != DW_OP_bit_piece |
42be36b3 | 823 | && *op_ptr != DW_OP_GNU_uninit) |
8a3fe4f8 | 824 | error (_("DWARF-2 expression error: DW_OP_reg operations must be " |
64b9b334 | 825 | "used either alone or in conjunction with DW_OP_piece " |
d3b1e874 | 826 | "or DW_OP_bit_piece.")); |
4c2df51b | 827 | |
61fbb938 | 828 | result = op - DW_OP_reg0; |
8a9b8146 | 829 | result_val = value_from_ulongest (address_type, result); |
cec03d70 | 830 | ctx->location = DWARF_VALUE_REGISTER; |
4c2df51b DJ |
831 | break; |
832 | ||
833 | case DW_OP_regx: | |
f664829e | 834 | op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); |
3cf03773 | 835 | dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx"); |
4c2df51b | 836 | |
61fbb938 | 837 | result = reg; |
8a9b8146 | 838 | result_val = value_from_ulongest (address_type, result); |
cec03d70 | 839 | ctx->location = DWARF_VALUE_REGISTER; |
4c2df51b DJ |
840 | break; |
841 | ||
cec03d70 TT |
842 | case DW_OP_implicit_value: |
843 | { | |
9fccedf7 | 844 | uint64_t len; |
9a619af0 | 845 | |
f664829e | 846 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &len); |
cec03d70 TT |
847 | if (op_ptr + len > op_end) |
848 | error (_("DW_OP_implicit_value: too few bytes available.")); | |
849 | ctx->len = len; | |
850 | ctx->data = op_ptr; | |
851 | ctx->location = DWARF_VALUE_LITERAL; | |
852 | op_ptr += len; | |
3cf03773 TT |
853 | dwarf_expr_require_composition (op_ptr, op_end, |
854 | "DW_OP_implicit_value"); | |
cec03d70 TT |
855 | } |
856 | goto no_push; | |
857 | ||
858 | case DW_OP_stack_value: | |
859 | ctx->location = DWARF_VALUE_STACK; | |
3cf03773 | 860 | dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value"); |
cec03d70 TT |
861 | goto no_push; |
862 | ||
8cf6f0b1 TT |
863 | case DW_OP_GNU_implicit_pointer: |
864 | { | |
9fccedf7 | 865 | int64_t len; |
8cf6f0b1 | 866 | |
181cebd4 JK |
867 | if (ctx->ref_addr_size == -1) |
868 | error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer " | |
869 | "is not allowed in frame context")); | |
870 | ||
8b9737bf | 871 | /* The referred-to DIE of sect_offset kind. */ |
181cebd4 | 872 | ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size, |
8cf6f0b1 | 873 | byte_order); |
181cebd4 | 874 | op_ptr += ctx->ref_addr_size; |
8cf6f0b1 TT |
875 | |
876 | /* The byte offset into the data. */ | |
f664829e | 877 | op_ptr = safe_read_sleb128 (op_ptr, op_end, &len); |
8cf6f0b1 | 878 | result = (ULONGEST) len; |
8a9b8146 | 879 | result_val = value_from_ulongest (address_type, result); |
8cf6f0b1 TT |
880 | |
881 | ctx->location = DWARF_VALUE_IMPLICIT_POINTER; | |
882 | dwarf_expr_require_composition (op_ptr, op_end, | |
883 | "DW_OP_GNU_implicit_pointer"); | |
884 | } | |
885 | break; | |
886 | ||
4c2df51b DJ |
887 | case DW_OP_breg0: |
888 | case DW_OP_breg1: | |
889 | case DW_OP_breg2: | |
890 | case DW_OP_breg3: | |
891 | case DW_OP_breg4: | |
892 | case DW_OP_breg5: | |
893 | case DW_OP_breg6: | |
894 | case DW_OP_breg7: | |
895 | case DW_OP_breg8: | |
896 | case DW_OP_breg9: | |
897 | case DW_OP_breg10: | |
898 | case DW_OP_breg11: | |
899 | case DW_OP_breg12: | |
900 | case DW_OP_breg13: | |
901 | case DW_OP_breg14: | |
902 | case DW_OP_breg15: | |
903 | case DW_OP_breg16: | |
904 | case DW_OP_breg17: | |
905 | case DW_OP_breg18: | |
906 | case DW_OP_breg19: | |
907 | case DW_OP_breg20: | |
908 | case DW_OP_breg21: | |
909 | case DW_OP_breg22: | |
910 | case DW_OP_breg23: | |
911 | case DW_OP_breg24: | |
912 | case DW_OP_breg25: | |
913 | case DW_OP_breg26: | |
914 | case DW_OP_breg27: | |
915 | case DW_OP_breg28: | |
916 | case DW_OP_breg29: | |
917 | case DW_OP_breg30: | |
918 | case DW_OP_breg31: | |
919 | { | |
f664829e | 920 | op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); |
b1370418 JB |
921 | result = (ctx->funcs->read_addr_from_reg) (ctx->baton, |
922 | op - DW_OP_breg0); | |
4c2df51b | 923 | result += offset; |
8a9b8146 | 924 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
925 | } |
926 | break; | |
927 | case DW_OP_bregx: | |
928 | { | |
f664829e DE |
929 | op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); |
930 | op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); | |
b1370418 | 931 | result = (ctx->funcs->read_addr_from_reg) (ctx->baton, reg); |
4c2df51b | 932 | result += offset; |
8a9b8146 | 933 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
934 | } |
935 | break; | |
936 | case DW_OP_fbreg: | |
937 | { | |
0d45f56e | 938 | const gdb_byte *datastart; |
4c2df51b DJ |
939 | size_t datalen; |
940 | unsigned int before_stack_len; | |
941 | ||
f664829e | 942 | op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset); |
4c2df51b DJ |
943 | /* Rather than create a whole new context, we simply |
944 | record the stack length before execution, then reset it | |
945 | afterwards, effectively erasing whatever the recursive | |
946 | call put there. */ | |
947 | before_stack_len = ctx->stack_len; | |
da62e633 AC |
948 | /* FIXME: cagney/2003-03-26: This code should be using |
949 | get_frame_base_address(), and then implement a dwarf2 | |
950 | specific this_base method. */ | |
9e8b7a03 | 951 | (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen); |
4c2df51b | 952 | dwarf_expr_eval (ctx, datastart, datalen); |
f2c7657e UW |
953 | if (ctx->location == DWARF_VALUE_MEMORY) |
954 | result = dwarf_expr_fetch_address (ctx, 0); | |
955 | else if (ctx->location == DWARF_VALUE_REGISTER) | |
b1370418 JB |
956 | result = (ctx->funcs->read_addr_from_reg) |
957 | (ctx->baton, | |
958 | value_as_long (dwarf_expr_fetch (ctx, 0))); | |
f2c7657e | 959 | else |
3e43a32a MS |
960 | error (_("Not implemented: computing frame " |
961 | "base using explicit value operator")); | |
4c2df51b | 962 | result = result + offset; |
8a9b8146 | 963 | result_val = value_from_ulongest (address_type, result); |
44353522 | 964 | in_stack_memory = 1; |
4c2df51b | 965 | ctx->stack_len = before_stack_len; |
cec03d70 | 966 | ctx->location = DWARF_VALUE_MEMORY; |
4c2df51b DJ |
967 | } |
968 | break; | |
44353522 | 969 | |
4c2df51b | 970 | case DW_OP_dup: |
8a9b8146 | 971 | result_val = dwarf_expr_fetch (ctx, 0); |
44353522 | 972 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); |
4c2df51b DJ |
973 | break; |
974 | ||
975 | case DW_OP_drop: | |
976 | dwarf_expr_pop (ctx); | |
977 | goto no_push; | |
978 | ||
979 | case DW_OP_pick: | |
980 | offset = *op_ptr++; | |
8a9b8146 | 981 | result_val = dwarf_expr_fetch (ctx, offset); |
44353522 | 982 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset); |
4c2df51b | 983 | break; |
9f3fe11c TG |
984 | |
985 | case DW_OP_swap: | |
986 | { | |
44353522 | 987 | struct dwarf_stack_value t1, t2; |
9f3fe11c TG |
988 | |
989 | if (ctx->stack_len < 2) | |
3e43a32a | 990 | error (_("Not enough elements for " |
0963b4bd | 991 | "DW_OP_swap. Need 2, have %d."), |
9f3fe11c TG |
992 | ctx->stack_len); |
993 | t1 = ctx->stack[ctx->stack_len - 1]; | |
994 | t2 = ctx->stack[ctx->stack_len - 2]; | |
995 | ctx->stack[ctx->stack_len - 1] = t2; | |
996 | ctx->stack[ctx->stack_len - 2] = t1; | |
997 | goto no_push; | |
998 | } | |
4c2df51b DJ |
999 | |
1000 | case DW_OP_over: | |
8a9b8146 | 1001 | result_val = dwarf_expr_fetch (ctx, 1); |
44353522 | 1002 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1); |
4c2df51b DJ |
1003 | break; |
1004 | ||
1005 | case DW_OP_rot: | |
1006 | { | |
44353522 | 1007 | struct dwarf_stack_value t1, t2, t3; |
4c2df51b DJ |
1008 | |
1009 | if (ctx->stack_len < 3) | |
0963b4bd MS |
1010 | error (_("Not enough elements for " |
1011 | "DW_OP_rot. Need 3, have %d."), | |
4c2df51b DJ |
1012 | ctx->stack_len); |
1013 | t1 = ctx->stack[ctx->stack_len - 1]; | |
1014 | t2 = ctx->stack[ctx->stack_len - 2]; | |
1015 | t3 = ctx->stack[ctx->stack_len - 3]; | |
1016 | ctx->stack[ctx->stack_len - 1] = t2; | |
1017 | ctx->stack[ctx->stack_len - 2] = t3; | |
1018 | ctx->stack[ctx->stack_len - 3] = t1; | |
1019 | goto no_push; | |
1020 | } | |
1021 | ||
1022 | case DW_OP_deref: | |
1023 | case DW_OP_deref_size: | |
8a9b8146 | 1024 | case DW_OP_GNU_deref_type: |
f2c7657e UW |
1025 | { |
1026 | int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++); | |
224c3ddb | 1027 | gdb_byte *buf = (gdb_byte *) alloca (addr_size); |
f2c7657e | 1028 | CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0); |
8a9b8146 TT |
1029 | struct type *type; |
1030 | ||
f2c7657e UW |
1031 | dwarf_expr_pop (ctx); |
1032 | ||
8a9b8146 TT |
1033 | if (op == DW_OP_GNU_deref_type) |
1034 | { | |
b64f50a1 | 1035 | cu_offset type_die; |
8a9b8146 | 1036 | |
f664829e | 1037 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); |
b64f50a1 | 1038 | type_die.cu_off = uoffset; |
8a9b8146 TT |
1039 | type = dwarf_get_base_type (ctx, type_die, 0); |
1040 | } | |
1041 | else | |
1042 | type = address_type; | |
1043 | ||
9e8b7a03 | 1044 | (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size); |
325663dc JB |
1045 | |
1046 | /* If the size of the object read from memory is different | |
1047 | from the type length, we need to zero-extend it. */ | |
1048 | if (TYPE_LENGTH (type) != addr_size) | |
1049 | { | |
1050 | ULONGEST result = | |
1051 | extract_unsigned_integer (buf, addr_size, byte_order); | |
1052 | ||
224c3ddb | 1053 | buf = (gdb_byte *) alloca (TYPE_LENGTH (type)); |
325663dc JB |
1054 | store_unsigned_integer (buf, TYPE_LENGTH (type), |
1055 | byte_order, result); | |
1056 | } | |
1057 | ||
8a9b8146 | 1058 | result_val = value_from_contents_and_address (type, buf, addr); |
f2c7657e UW |
1059 | break; |
1060 | } | |
1061 | ||
4c2df51b DJ |
1062 | case DW_OP_abs: |
1063 | case DW_OP_neg: | |
1064 | case DW_OP_not: | |
1065 | case DW_OP_plus_uconst: | |
8a9b8146 TT |
1066 | { |
1067 | /* Unary operations. */ | |
1068 | result_val = dwarf_expr_fetch (ctx, 0); | |
1069 | dwarf_expr_pop (ctx); | |
4c2df51b | 1070 | |
8a9b8146 TT |
1071 | switch (op) |
1072 | { | |
1073 | case DW_OP_abs: | |
1074 | if (value_less (result_val, | |
1075 | value_zero (value_type (result_val), not_lval))) | |
1076 | result_val = value_neg (result_val); | |
1077 | break; | |
1078 | case DW_OP_neg: | |
1079 | result_val = value_neg (result_val); | |
1080 | break; | |
1081 | case DW_OP_not: | |
1082 | dwarf_require_integral (value_type (result_val)); | |
1083 | result_val = value_complement (result_val); | |
1084 | break; | |
1085 | case DW_OP_plus_uconst: | |
1086 | dwarf_require_integral (value_type (result_val)); | |
1087 | result = value_as_long (result_val); | |
f664829e | 1088 | op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); |
8a9b8146 TT |
1089 | result += reg; |
1090 | result_val = value_from_ulongest (address_type, result); | |
1091 | break; | |
1092 | } | |
1093 | } | |
4c2df51b DJ |
1094 | break; |
1095 | ||
1096 | case DW_OP_and: | |
1097 | case DW_OP_div: | |
1098 | case DW_OP_minus: | |
1099 | case DW_OP_mod: | |
1100 | case DW_OP_mul: | |
1101 | case DW_OP_or: | |
1102 | case DW_OP_plus: | |
1103 | case DW_OP_shl: | |
1104 | case DW_OP_shr: | |
1105 | case DW_OP_shra: | |
1106 | case DW_OP_xor: | |
1107 | case DW_OP_le: | |
1108 | case DW_OP_ge: | |
1109 | case DW_OP_eq: | |
1110 | case DW_OP_lt: | |
1111 | case DW_OP_gt: | |
1112 | case DW_OP_ne: | |
1113 | { | |
f2c7657e | 1114 | /* Binary operations. */ |
8a9b8146 | 1115 | struct value *first, *second; |
4c2df51b DJ |
1116 | |
1117 | second = dwarf_expr_fetch (ctx, 0); | |
1118 | dwarf_expr_pop (ctx); | |
1119 | ||
b263358a | 1120 | first = dwarf_expr_fetch (ctx, 0); |
4c2df51b DJ |
1121 | dwarf_expr_pop (ctx); |
1122 | ||
8a9b8146 TT |
1123 | if (! base_types_equal_p (value_type (first), value_type (second))) |
1124 | error (_("Incompatible types on DWARF stack")); | |
1125 | ||
4c2df51b DJ |
1126 | switch (op) |
1127 | { | |
1128 | case DW_OP_and: | |
8a9b8146 TT |
1129 | dwarf_require_integral (value_type (first)); |
1130 | dwarf_require_integral (value_type (second)); | |
1131 | result_val = value_binop (first, second, BINOP_BITWISE_AND); | |
4c2df51b DJ |
1132 | break; |
1133 | case DW_OP_div: | |
8a9b8146 | 1134 | result_val = value_binop (first, second, BINOP_DIV); |
99c87dab | 1135 | break; |
4c2df51b | 1136 | case DW_OP_minus: |
8a9b8146 | 1137 | result_val = value_binop (first, second, BINOP_SUB); |
4c2df51b DJ |
1138 | break; |
1139 | case DW_OP_mod: | |
8a9b8146 TT |
1140 | { |
1141 | int cast_back = 0; | |
1142 | struct type *orig_type = value_type (first); | |
1143 | ||
1144 | /* We have to special-case "old-style" untyped values | |
1145 | -- these must have mod computed using unsigned | |
1146 | math. */ | |
1147 | if (orig_type == address_type) | |
1148 | { | |
1149 | struct type *utype | |
1150 | = get_unsigned_type (ctx->gdbarch, orig_type); | |
1151 | ||
1152 | cast_back = 1; | |
1153 | first = value_cast (utype, first); | |
1154 | second = value_cast (utype, second); | |
1155 | } | |
1156 | /* Note that value_binop doesn't handle float or | |
1157 | decimal float here. This seems unimportant. */ | |
1158 | result_val = value_binop (first, second, BINOP_MOD); | |
1159 | if (cast_back) | |
1160 | result_val = value_cast (orig_type, result_val); | |
1161 | } | |
4c2df51b DJ |
1162 | break; |
1163 | case DW_OP_mul: | |
8a9b8146 | 1164 | result_val = value_binop (first, second, BINOP_MUL); |
4c2df51b DJ |
1165 | break; |
1166 | case DW_OP_or: | |
8a9b8146 TT |
1167 | dwarf_require_integral (value_type (first)); |
1168 | dwarf_require_integral (value_type (second)); | |
1169 | result_val = value_binop (first, second, BINOP_BITWISE_IOR); | |
4c2df51b DJ |
1170 | break; |
1171 | case DW_OP_plus: | |
8a9b8146 | 1172 | result_val = value_binop (first, second, BINOP_ADD); |
4c2df51b DJ |
1173 | break; |
1174 | case DW_OP_shl: | |
8a9b8146 TT |
1175 | dwarf_require_integral (value_type (first)); |
1176 | dwarf_require_integral (value_type (second)); | |
1177 | result_val = value_binop (first, second, BINOP_LSH); | |
4c2df51b DJ |
1178 | break; |
1179 | case DW_OP_shr: | |
8a9b8146 TT |
1180 | dwarf_require_integral (value_type (first)); |
1181 | dwarf_require_integral (value_type (second)); | |
b087e0ed | 1182 | if (!TYPE_UNSIGNED (value_type (first))) |
8a9b8146 TT |
1183 | { |
1184 | struct type *utype | |
1185 | = get_unsigned_type (ctx->gdbarch, value_type (first)); | |
1186 | ||
1187 | first = value_cast (utype, first); | |
1188 | } | |
1189 | ||
1190 | result_val = value_binop (first, second, BINOP_RSH); | |
1191 | /* Make sure we wind up with the same type we started | |
1192 | with. */ | |
1193 | if (value_type (result_val) != value_type (second)) | |
1194 | result_val = value_cast (value_type (second), result_val); | |
99c87dab | 1195 | break; |
4c2df51b | 1196 | case DW_OP_shra: |
8a9b8146 TT |
1197 | dwarf_require_integral (value_type (first)); |
1198 | dwarf_require_integral (value_type (second)); | |
8ddd9a20 TT |
1199 | if (TYPE_UNSIGNED (value_type (first))) |
1200 | { | |
1201 | struct type *stype | |
1202 | = get_signed_type (ctx->gdbarch, value_type (first)); | |
1203 | ||
1204 | first = value_cast (stype, first); | |
1205 | } | |
1206 | ||
8a9b8146 | 1207 | result_val = value_binop (first, second, BINOP_RSH); |
8ddd9a20 TT |
1208 | /* Make sure we wind up with the same type we started |
1209 | with. */ | |
1210 | if (value_type (result_val) != value_type (second)) | |
1211 | result_val = value_cast (value_type (second), result_val); | |
4c2df51b DJ |
1212 | break; |
1213 | case DW_OP_xor: | |
8a9b8146 TT |
1214 | dwarf_require_integral (value_type (first)); |
1215 | dwarf_require_integral (value_type (second)); | |
1216 | result_val = value_binop (first, second, BINOP_BITWISE_XOR); | |
4c2df51b DJ |
1217 | break; |
1218 | case DW_OP_le: | |
8a9b8146 TT |
1219 | /* A <= B is !(B < A). */ |
1220 | result = ! value_less (second, first); | |
1221 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1222 | break; |
1223 | case DW_OP_ge: | |
8a9b8146 TT |
1224 | /* A >= B is !(A < B). */ |
1225 | result = ! value_less (first, second); | |
1226 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1227 | break; |
1228 | case DW_OP_eq: | |
8a9b8146 TT |
1229 | result = value_equal (first, second); |
1230 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1231 | break; |
1232 | case DW_OP_lt: | |
8a9b8146 TT |
1233 | result = value_less (first, second); |
1234 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1235 | break; |
1236 | case DW_OP_gt: | |
8a9b8146 TT |
1237 | /* A > B is B < A. */ |
1238 | result = value_less (second, first); | |
1239 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1240 | break; |
1241 | case DW_OP_ne: | |
8a9b8146 TT |
1242 | result = ! value_equal (first, second); |
1243 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1244 | break; |
1245 | default: | |
1246 | internal_error (__FILE__, __LINE__, | |
e2e0b3e5 | 1247 | _("Can't be reached.")); |
4c2df51b | 1248 | } |
4c2df51b DJ |
1249 | } |
1250 | break; | |
1251 | ||
e7802207 | 1252 | case DW_OP_call_frame_cfa: |
9e8b7a03 | 1253 | result = (ctx->funcs->get_frame_cfa) (ctx->baton); |
8a9b8146 | 1254 | result_val = value_from_ulongest (address_type, result); |
44353522 | 1255 | in_stack_memory = 1; |
e7802207 TT |
1256 | break; |
1257 | ||
4c2df51b | 1258 | case DW_OP_GNU_push_tls_address: |
c3228f12 EZ |
1259 | /* Variable is at a constant offset in the thread-local |
1260 | storage block into the objfile for the current thread and | |
0963b4bd | 1261 | the dynamic linker module containing this expression. Here |
c3228f12 EZ |
1262 | we return returns the offset from that base. The top of the |
1263 | stack has the offset from the beginning of the thread | |
1264 | control block at which the variable is located. Nothing | |
1265 | should follow this operator, so the top of stack would be | |
1266 | returned. */ | |
8a9b8146 | 1267 | result = value_as_long (dwarf_expr_fetch (ctx, 0)); |
4c2df51b | 1268 | dwarf_expr_pop (ctx); |
9e8b7a03 | 1269 | result = (ctx->funcs->get_tls_address) (ctx->baton, result); |
8a9b8146 | 1270 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
1271 | break; |
1272 | ||
1273 | case DW_OP_skip: | |
e17a4113 | 1274 | offset = extract_signed_integer (op_ptr, 2, byte_order); |
4c2df51b DJ |
1275 | op_ptr += 2; |
1276 | op_ptr += offset; | |
1277 | goto no_push; | |
1278 | ||
1279 | case DW_OP_bra: | |
8a9b8146 TT |
1280 | { |
1281 | struct value *val; | |
1282 | ||
1283 | offset = extract_signed_integer (op_ptr, 2, byte_order); | |
1284 | op_ptr += 2; | |
1285 | val = dwarf_expr_fetch (ctx, 0); | |
1286 | dwarf_require_integral (value_type (val)); | |
1287 | if (value_as_long (val) != 0) | |
1288 | op_ptr += offset; | |
1289 | dwarf_expr_pop (ctx); | |
1290 | } | |
4c2df51b DJ |
1291 | goto no_push; |
1292 | ||
1293 | case DW_OP_nop: | |
1294 | goto no_push; | |
1295 | ||
87808bd6 JB |
1296 | case DW_OP_piece: |
1297 | { | |
9fccedf7 | 1298 | uint64_t size; |
87808bd6 JB |
1299 | |
1300 | /* Record the piece. */ | |
f664829e | 1301 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &size); |
d3b1e874 | 1302 | add_piece (ctx, 8 * size, 0); |
87808bd6 | 1303 | |
cec03d70 TT |
1304 | /* Pop off the address/regnum, and reset the location |
1305 | type. */ | |
cb826367 TT |
1306 | if (ctx->location != DWARF_VALUE_LITERAL |
1307 | && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) | |
cec03d70 TT |
1308 | dwarf_expr_pop (ctx); |
1309 | ctx->location = DWARF_VALUE_MEMORY; | |
87808bd6 JB |
1310 | } |
1311 | goto no_push; | |
1312 | ||
d3b1e874 TT |
1313 | case DW_OP_bit_piece: |
1314 | { | |
9fccedf7 | 1315 | uint64_t size, offset; |
d3b1e874 TT |
1316 | |
1317 | /* Record the piece. */ | |
f664829e DE |
1318 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &size); |
1319 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset); | |
d3b1e874 TT |
1320 | add_piece (ctx, size, offset); |
1321 | ||
1322 | /* Pop off the address/regnum, and reset the location | |
1323 | type. */ | |
1324 | if (ctx->location != DWARF_VALUE_LITERAL | |
1325 | && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) | |
1326 | dwarf_expr_pop (ctx); | |
1327 | ctx->location = DWARF_VALUE_MEMORY; | |
1328 | } | |
1329 | goto no_push; | |
1330 | ||
42be36b3 CT |
1331 | case DW_OP_GNU_uninit: |
1332 | if (op_ptr != op_end) | |
9c482037 | 1333 | error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always " |
42be36b3 CT |
1334 | "be the very last op.")); |
1335 | ||
1336 | ctx->initialized = 0; | |
1337 | goto no_push; | |
1338 | ||
5c631832 | 1339 | case DW_OP_call2: |
b64f50a1 JK |
1340 | { |
1341 | cu_offset offset; | |
1342 | ||
1343 | offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order); | |
1344 | op_ptr += 2; | |
1345 | ctx->funcs->dwarf_call (ctx, offset); | |
1346 | } | |
5c631832 JK |
1347 | goto no_push; |
1348 | ||
1349 | case DW_OP_call4: | |
b64f50a1 JK |
1350 | { |
1351 | cu_offset offset; | |
1352 | ||
1353 | offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order); | |
1354 | op_ptr += 4; | |
1355 | ctx->funcs->dwarf_call (ctx, offset); | |
1356 | } | |
5c631832 | 1357 | goto no_push; |
dd90784c JK |
1358 | |
1359 | case DW_OP_GNU_entry_value: | |
8e3b41a9 | 1360 | { |
9fccedf7 | 1361 | uint64_t len; |
8e3b41a9 | 1362 | CORE_ADDR deref_size; |
24c5c679 | 1363 | union call_site_parameter_u kind_u; |
8e3b41a9 | 1364 | |
f664829e | 1365 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &len); |
8e3b41a9 JK |
1366 | if (op_ptr + len > op_end) |
1367 | error (_("DW_OP_GNU_entry_value: too few bytes available.")); | |
1368 | ||
24c5c679 JK |
1369 | kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len); |
1370 | if (kind_u.dwarf_reg != -1) | |
8e3b41a9 JK |
1371 | { |
1372 | op_ptr += len; | |
24c5c679 JK |
1373 | ctx->funcs->push_dwarf_reg_entry_value (ctx, |
1374 | CALL_SITE_PARAMETER_DWARF_REG, | |
1375 | kind_u, | |
a471c594 JK |
1376 | -1 /* deref_size */); |
1377 | goto no_push; | |
1378 | } | |
1379 | ||
24c5c679 JK |
1380 | kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr, |
1381 | op_ptr + len, | |
1382 | &deref_size); | |
1383 | if (kind_u.dwarf_reg != -1) | |
a471c594 JK |
1384 | { |
1385 | if (deref_size == -1) | |
1386 | deref_size = ctx->addr_size; | |
1387 | op_ptr += len; | |
24c5c679 JK |
1388 | ctx->funcs->push_dwarf_reg_entry_value (ctx, |
1389 | CALL_SITE_PARAMETER_DWARF_REG, | |
1390 | kind_u, deref_size); | |
8e3b41a9 JK |
1391 | goto no_push; |
1392 | } | |
1393 | ||
1394 | error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is " | |
a471c594 JK |
1395 | "supported only for single DW_OP_reg* " |
1396 | "or for DW_OP_breg*(0)+DW_OP_deref*")); | |
8e3b41a9 | 1397 | } |
5c631832 | 1398 | |
1788b2d3 JK |
1399 | case DW_OP_GNU_parameter_ref: |
1400 | { | |
1401 | union call_site_parameter_u kind_u; | |
1402 | ||
1403 | kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4, | |
1404 | byte_order); | |
1405 | op_ptr += 4; | |
1406 | ctx->funcs->push_dwarf_reg_entry_value (ctx, | |
1407 | CALL_SITE_PARAMETER_PARAM_OFFSET, | |
1408 | kind_u, | |
1409 | -1 /* deref_size */); | |
1410 | } | |
1411 | goto no_push; | |
1412 | ||
8a9b8146 TT |
1413 | case DW_OP_GNU_const_type: |
1414 | { | |
b64f50a1 | 1415 | cu_offset type_die; |
8a9b8146 TT |
1416 | int n; |
1417 | const gdb_byte *data; | |
1418 | struct type *type; | |
1419 | ||
f664829e | 1420 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); |
b64f50a1 | 1421 | type_die.cu_off = uoffset; |
8a9b8146 TT |
1422 | n = *op_ptr++; |
1423 | data = op_ptr; | |
1424 | op_ptr += n; | |
1425 | ||
1426 | type = dwarf_get_base_type (ctx, type_die, n); | |
1427 | result_val = value_from_contents (type, data); | |
1428 | } | |
1429 | break; | |
1430 | ||
1431 | case DW_OP_GNU_regval_type: | |
1432 | { | |
b64f50a1 | 1433 | cu_offset type_die; |
8a9b8146 TT |
1434 | struct type *type; |
1435 | ||
f664829e DE |
1436 | op_ptr = safe_read_uleb128 (op_ptr, op_end, ®); |
1437 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); | |
b64f50a1 | 1438 | type_die.cu_off = uoffset; |
8a9b8146 TT |
1439 | |
1440 | type = dwarf_get_base_type (ctx, type_die, 0); | |
0acf8b65 | 1441 | result_val = ctx->funcs->get_reg_value (ctx->baton, type, reg); |
8a9b8146 TT |
1442 | } |
1443 | break; | |
1444 | ||
1445 | case DW_OP_GNU_convert: | |
1446 | case DW_OP_GNU_reinterpret: | |
1447 | { | |
b64f50a1 | 1448 | cu_offset type_die; |
8a9b8146 TT |
1449 | struct type *type; |
1450 | ||
f664829e | 1451 | op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset); |
b64f50a1 | 1452 | type_die.cu_off = uoffset; |
8a9b8146 | 1453 | |
b64f50a1 | 1454 | if (type_die.cu_off == 0) |
c38c4bc5 TT |
1455 | type = address_type; |
1456 | else | |
1457 | type = dwarf_get_base_type (ctx, type_die, 0); | |
8a9b8146 TT |
1458 | |
1459 | result_val = dwarf_expr_fetch (ctx, 0); | |
1460 | dwarf_expr_pop (ctx); | |
1461 | ||
1462 | if (op == DW_OP_GNU_convert) | |
1463 | result_val = value_cast (type, result_val); | |
1464 | else if (type == value_type (result_val)) | |
1465 | { | |
1466 | /* Nothing. */ | |
1467 | } | |
1468 | else if (TYPE_LENGTH (type) | |
1469 | != TYPE_LENGTH (value_type (result_val))) | |
1470 | error (_("DW_OP_GNU_reinterpret has wrong size")); | |
1471 | else | |
1472 | result_val | |
1473 | = value_from_contents (type, | |
1474 | value_contents_all (result_val)); | |
1475 | } | |
1476 | break; | |
1477 | ||
08412b07 JB |
1478 | case DW_OP_push_object_address: |
1479 | /* Return the address of the object we are currently observing. */ | |
1480 | result = (ctx->funcs->get_object_address) (ctx->baton); | |
1481 | result_val = value_from_ulongest (address_type, result); | |
1482 | break; | |
1483 | ||
4c2df51b | 1484 | default: |
8a3fe4f8 | 1485 | error (_("Unhandled dwarf expression opcode 0x%x"), op); |
4c2df51b DJ |
1486 | } |
1487 | ||
1488 | /* Most things push a result value. */ | |
8a9b8146 TT |
1489 | gdb_assert (result_val != NULL); |
1490 | dwarf_expr_push (ctx, result_val, in_stack_memory); | |
82ae4854 | 1491 | no_push: |
b27cf2b3 | 1492 | ; |
4c2df51b | 1493 | } |
1e3a102a | 1494 | |
8cf6f0b1 TT |
1495 | /* To simplify our main caller, if the result is an implicit |
1496 | pointer, then make a pieced value. This is ok because we can't | |
1497 | have implicit pointers in contexts where pieces are invalid. */ | |
1498 | if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER) | |
1499 | add_piece (ctx, 8 * ctx->addr_size, 0); | |
1500 | ||
dd90784c | 1501 | abort_expression: |
1e3a102a JK |
1502 | ctx->recursion_depth--; |
1503 | gdb_assert (ctx->recursion_depth >= 0); | |
8a9b8146 TT |
1504 | } |
1505 | ||
523f3620 JK |
1506 | /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */ |
1507 | ||
1508 | void | |
1509 | ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length) | |
1510 | { | |
1511 | error (_("%s is invalid in this context"), "DW_OP_fbreg"); | |
1512 | } | |
1513 | ||
1514 | /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */ | |
1515 | ||
1516 | CORE_ADDR | |
1517 | ctx_no_get_frame_cfa (void *baton) | |
1518 | { | |
1519 | error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa"); | |
1520 | } | |
1521 | ||
1522 | /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */ | |
1523 | ||
1524 | CORE_ADDR | |
1525 | ctx_no_get_frame_pc (void *baton) | |
1526 | { | |
1527 | error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer"); | |
1528 | } | |
1529 | ||
1530 | /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */ | |
1531 | ||
1532 | CORE_ADDR | |
1533 | ctx_no_get_tls_address (void *baton, CORE_ADDR offset) | |
1534 | { | |
1535 | error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address"); | |
1536 | } | |
1537 | ||
1538 | /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */ | |
1539 | ||
1540 | void | |
b64f50a1 | 1541 | ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset) |
523f3620 JK |
1542 | { |
1543 | error (_("%s is invalid in this context"), "DW_OP_call*"); | |
1544 | } | |
1545 | ||
1546 | /* Stub dwarf_expr_context_funcs.get_base_type implementation. */ | |
1547 | ||
1548 | struct type * | |
b64f50a1 | 1549 | ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die) |
523f3620 JK |
1550 | { |
1551 | error (_("Support for typed DWARF is not supported in this context")); | |
1552 | } | |
1553 | ||
8e3b41a9 JK |
1554 | /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value |
1555 | implementation. */ | |
1556 | ||
1557 | void | |
1558 | ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx, | |
24c5c679 JK |
1559 | enum call_site_parameter_kind kind, |
1560 | union call_site_parameter_u kind_u, | |
a471c594 | 1561 | int deref_size) |
8e3b41a9 JK |
1562 | { |
1563 | internal_error (__FILE__, __LINE__, | |
1564 | _("Support for DW_OP_GNU_entry_value is unimplemented")); | |
1565 | } | |
1566 | ||
3019eac3 DE |
1567 | /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */ |
1568 | ||
1569 | CORE_ADDR | |
1570 | ctx_no_get_addr_index (void *baton, unsigned int index) | |
1571 | { | |
1572 | error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index"); | |
1573 | } | |
1574 | ||
70221824 PA |
1575 | /* Provide a prototype to silence -Wmissing-prototypes. */ |
1576 | extern initialize_file_ftype _initialize_dwarf2expr; | |
1577 | ||
8a9b8146 TT |
1578 | void |
1579 | _initialize_dwarf2expr (void) | |
1580 | { | |
1581 | dwarf_arch_cookie | |
1582 | = gdbarch_data_register_post_init (dwarf_gdbarch_types_init); | |
4c2df51b | 1583 | } |