Commit | Line | Data |
---|---|---|
852483bc MK |
1 | /* DWARF 2 Expression Evaluator. |
2 | ||
0b302171 JB |
3 | Copyright (C) 2001-2003, 2005, 2007-2012 Free Software Foundation, |
4 | Inc. | |
852483bc | 5 | |
4c2df51b DJ |
6 | Contributed by Daniel Berlin (dan@dberlin.org) |
7 | ||
8 | This file is part of GDB. | |
9 | ||
10 | This program is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 12 | the Free Software Foundation; either version 3 of the License, or |
4c2df51b DJ |
13 | (at your option) any later version. |
14 | ||
15 | This program is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
a9762ec7 | 21 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
4c2df51b DJ |
22 | |
23 | #include "defs.h" | |
24 | #include "symtab.h" | |
25 | #include "gdbtypes.h" | |
26 | #include "value.h" | |
27 | #include "gdbcore.h" | |
fa8f86ff | 28 | #include "dwarf2.h" |
4c2df51b | 29 | #include "dwarf2expr.h" |
1e3a102a | 30 | #include "gdb_assert.h" |
4c2df51b DJ |
31 | |
32 | /* Local prototypes. */ | |
33 | ||
34 | static void execute_stack_op (struct dwarf_expr_context *, | |
0d45f56e | 35 | const gdb_byte *, const gdb_byte *); |
4c2df51b | 36 | |
8a9b8146 TT |
37 | /* Cookie for gdbarch data. */ |
38 | ||
39 | static struct gdbarch_data *dwarf_arch_cookie; | |
40 | ||
41 | /* This holds gdbarch-specific types used by the DWARF expression | |
42 | evaluator. See comments in execute_stack_op. */ | |
43 | ||
44 | struct dwarf_gdbarch_types | |
45 | { | |
46 | struct type *dw_types[3]; | |
47 | }; | |
48 | ||
49 | /* Allocate and fill in dwarf_gdbarch_types for an arch. */ | |
50 | ||
51 | static void * | |
52 | dwarf_gdbarch_types_init (struct gdbarch *gdbarch) | |
53 | { | |
54 | struct dwarf_gdbarch_types *types | |
55 | = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types); | |
56 | ||
57 | /* The types themselves are lazily initialized. */ | |
58 | ||
59 | return types; | |
60 | } | |
61 | ||
62 | /* Return the type used for DWARF operations where the type is | |
63 | unspecified in the DWARF spec. Only certain sizes are | |
64 | supported. */ | |
65 | ||
66 | static struct type * | |
67 | dwarf_expr_address_type (struct dwarf_expr_context *ctx) | |
68 | { | |
69 | struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch, | |
70 | dwarf_arch_cookie); | |
71 | int ndx; | |
72 | ||
73 | if (ctx->addr_size == 2) | |
74 | ndx = 0; | |
75 | else if (ctx->addr_size == 4) | |
76 | ndx = 1; | |
77 | else if (ctx->addr_size == 8) | |
78 | ndx = 2; | |
79 | else | |
80 | error (_("Unsupported address size in DWARF expressions: %d bits"), | |
81 | 8 * ctx->addr_size); | |
82 | ||
83 | if (types->dw_types[ndx] == NULL) | |
84 | types->dw_types[ndx] | |
85 | = arch_integer_type (ctx->gdbarch, | |
86 | 8 * ctx->addr_size, | |
87 | 0, "<signed DWARF address type>"); | |
88 | ||
89 | return types->dw_types[ndx]; | |
90 | } | |
91 | ||
4c2df51b DJ |
92 | /* Create a new context for the expression evaluator. */ |
93 | ||
94 | struct dwarf_expr_context * | |
e4adbba9 | 95 | new_dwarf_expr_context (void) |
4c2df51b DJ |
96 | { |
97 | struct dwarf_expr_context *retval; | |
9a619af0 | 98 | |
4c2df51b | 99 | retval = xcalloc (1, sizeof (struct dwarf_expr_context)); |
18ec9831 KB |
100 | retval->stack_len = 0; |
101 | retval->stack_allocated = 10; | |
b966cb8a TT |
102 | retval->stack = xmalloc (retval->stack_allocated |
103 | * sizeof (struct dwarf_stack_value)); | |
87808bd6 JB |
104 | retval->num_pieces = 0; |
105 | retval->pieces = 0; | |
1e3a102a | 106 | retval->max_recursion_depth = 0x100; |
4c2df51b DJ |
107 | return retval; |
108 | } | |
109 | ||
110 | /* Release the memory allocated to CTX. */ | |
111 | ||
112 | void | |
113 | free_dwarf_expr_context (struct dwarf_expr_context *ctx) | |
114 | { | |
115 | xfree (ctx->stack); | |
87808bd6 | 116 | xfree (ctx->pieces); |
4c2df51b DJ |
117 | xfree (ctx); |
118 | } | |
119 | ||
4a227398 TT |
120 | /* Helper for make_cleanup_free_dwarf_expr_context. */ |
121 | ||
122 | static void | |
123 | free_dwarf_expr_context_cleanup (void *arg) | |
124 | { | |
125 | free_dwarf_expr_context (arg); | |
126 | } | |
127 | ||
128 | /* Return a cleanup that calls free_dwarf_expr_context. */ | |
129 | ||
130 | struct cleanup * | |
131 | make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx) | |
132 | { | |
133 | return make_cleanup (free_dwarf_expr_context_cleanup, ctx); | |
134 | } | |
135 | ||
4c2df51b DJ |
136 | /* Expand the memory allocated to CTX's stack to contain at least |
137 | NEED more elements than are currently used. */ | |
138 | ||
139 | static void | |
140 | dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need) | |
141 | { | |
142 | if (ctx->stack_len + need > ctx->stack_allocated) | |
143 | { | |
18ec9831 | 144 | size_t newlen = ctx->stack_len + need + 10; |
9a619af0 | 145 | |
4c2df51b | 146 | ctx->stack = xrealloc (ctx->stack, |
44353522 | 147 | newlen * sizeof (struct dwarf_stack_value)); |
18ec9831 | 148 | ctx->stack_allocated = newlen; |
4c2df51b DJ |
149 | } |
150 | } | |
151 | ||
152 | /* Push VALUE onto CTX's stack. */ | |
153 | ||
8a9b8146 TT |
154 | static void |
155 | dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value, | |
44353522 | 156 | int in_stack_memory) |
4c2df51b | 157 | { |
44353522 DE |
158 | struct dwarf_stack_value *v; |
159 | ||
4c2df51b | 160 | dwarf_expr_grow_stack (ctx, 1); |
44353522 DE |
161 | v = &ctx->stack[ctx->stack_len++]; |
162 | v->value = value; | |
163 | v->in_stack_memory = in_stack_memory; | |
4c2df51b DJ |
164 | } |
165 | ||
8a9b8146 | 166 | /* Push VALUE onto CTX's stack. */ |
4c2df51b DJ |
167 | |
168 | void | |
8a9b8146 TT |
169 | dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value, |
170 | int in_stack_memory) | |
171 | { | |
172 | dwarf_expr_push (ctx, | |
173 | value_from_ulongest (dwarf_expr_address_type (ctx), value), | |
174 | in_stack_memory); | |
175 | } | |
176 | ||
177 | /* Pop the top item off of CTX's stack. */ | |
178 | ||
179 | static void | |
4c2df51b DJ |
180 | dwarf_expr_pop (struct dwarf_expr_context *ctx) |
181 | { | |
182 | if (ctx->stack_len <= 0) | |
8a3fe4f8 | 183 | error (_("dwarf expression stack underflow")); |
4c2df51b DJ |
184 | ctx->stack_len--; |
185 | } | |
186 | ||
187 | /* Retrieve the N'th item on CTX's stack. */ | |
188 | ||
8a9b8146 | 189 | struct value * |
4c2df51b DJ |
190 | dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n) |
191 | { | |
ef0fdf07 | 192 | if (ctx->stack_len <= n) |
3e43a32a MS |
193 | error (_("Asked for position %d of stack, " |
194 | "stack only has %d elements on it."), | |
4c2df51b | 195 | n, ctx->stack_len); |
44353522 | 196 | return ctx->stack[ctx->stack_len - (1 + n)].value; |
8a9b8146 TT |
197 | } |
198 | ||
199 | /* Require that TYPE be an integral type; throw an exception if not. */ | |
44353522 | 200 | |
8a9b8146 TT |
201 | static void |
202 | dwarf_require_integral (struct type *type) | |
203 | { | |
204 | if (TYPE_CODE (type) != TYPE_CODE_INT | |
205 | && TYPE_CODE (type) != TYPE_CODE_CHAR | |
206 | && TYPE_CODE (type) != TYPE_CODE_BOOL) | |
207 | error (_("integral type expected in DWARF expression")); | |
208 | } | |
209 | ||
210 | /* Return the unsigned form of TYPE. TYPE is necessarily an integral | |
211 | type. */ | |
212 | ||
213 | static struct type * | |
214 | get_unsigned_type (struct gdbarch *gdbarch, struct type *type) | |
215 | { | |
216 | switch (TYPE_LENGTH (type)) | |
217 | { | |
218 | case 1: | |
219 | return builtin_type (gdbarch)->builtin_uint8; | |
220 | case 2: | |
221 | return builtin_type (gdbarch)->builtin_uint16; | |
222 | case 4: | |
223 | return builtin_type (gdbarch)->builtin_uint32; | |
224 | case 8: | |
225 | return builtin_type (gdbarch)->builtin_uint64; | |
226 | default: | |
227 | error (_("no unsigned variant found for type, while evaluating " | |
228 | "DWARF expression")); | |
229 | } | |
44353522 DE |
230 | } |
231 | ||
8ddd9a20 TT |
232 | /* Return the signed form of TYPE. TYPE is necessarily an integral |
233 | type. */ | |
234 | ||
235 | static struct type * | |
236 | get_signed_type (struct gdbarch *gdbarch, struct type *type) | |
237 | { | |
238 | switch (TYPE_LENGTH (type)) | |
239 | { | |
240 | case 1: | |
241 | return builtin_type (gdbarch)->builtin_int8; | |
242 | case 2: | |
243 | return builtin_type (gdbarch)->builtin_int16; | |
244 | case 4: | |
245 | return builtin_type (gdbarch)->builtin_int32; | |
246 | case 8: | |
247 | return builtin_type (gdbarch)->builtin_int64; | |
248 | default: | |
249 | error (_("no signed variant found for type, while evaluating " | |
250 | "DWARF expression")); | |
251 | } | |
252 | } | |
253 | ||
f2c7657e UW |
254 | /* Retrieve the N'th item on CTX's stack, converted to an address. */ |
255 | ||
256 | CORE_ADDR | |
257 | dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n) | |
258 | { | |
8a9b8146 TT |
259 | struct value *result_val = dwarf_expr_fetch (ctx, n); |
260 | enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); | |
261 | ULONGEST result; | |
262 | ||
263 | dwarf_require_integral (value_type (result_val)); | |
264 | result = extract_unsigned_integer (value_contents (result_val), | |
265 | TYPE_LENGTH (value_type (result_val)), | |
266 | byte_order); | |
f2c7657e UW |
267 | |
268 | /* For most architectures, calling extract_unsigned_integer() alone | |
269 | is sufficient for extracting an address. However, some | |
270 | architectures (e.g. MIPS) use signed addresses and using | |
271 | extract_unsigned_integer() will not produce a correct | |
272 | result. Make sure we invoke gdbarch_integer_to_address() | |
273 | for those architectures which require it. */ | |
274 | if (gdbarch_integer_to_address_p (ctx->gdbarch)) | |
275 | { | |
f2c7657e | 276 | gdb_byte *buf = alloca (ctx->addr_size); |
8a9b8146 TT |
277 | struct type *int_type = get_unsigned_type (ctx->gdbarch, |
278 | value_type (result_val)); | |
f2c7657e UW |
279 | |
280 | store_unsigned_integer (buf, ctx->addr_size, byte_order, result); | |
281 | return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf); | |
282 | } | |
283 | ||
284 | return (CORE_ADDR) result; | |
285 | } | |
286 | ||
44353522 DE |
287 | /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */ |
288 | ||
289 | int | |
290 | dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n) | |
291 | { | |
292 | if (ctx->stack_len <= n) | |
3e43a32a MS |
293 | error (_("Asked for position %d of stack, " |
294 | "stack only has %d elements on it."), | |
44353522 DE |
295 | n, ctx->stack_len); |
296 | return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory; | |
4c2df51b DJ |
297 | } |
298 | ||
cb826367 TT |
299 | /* Return true if the expression stack is empty. */ |
300 | ||
301 | static int | |
302 | dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx) | |
303 | { | |
304 | return ctx->stack_len == 0; | |
305 | } | |
306 | ||
87808bd6 JB |
307 | /* Add a new piece to CTX's piece list. */ |
308 | static void | |
d3b1e874 | 309 | add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset) |
87808bd6 JB |
310 | { |
311 | struct dwarf_expr_piece *p; | |
312 | ||
313 | ctx->num_pieces++; | |
314 | ||
d3b1e874 TT |
315 | ctx->pieces = xrealloc (ctx->pieces, |
316 | (ctx->num_pieces | |
317 | * sizeof (struct dwarf_expr_piece))); | |
87808bd6 JB |
318 | |
319 | p = &ctx->pieces[ctx->num_pieces - 1]; | |
cec03d70 | 320 | p->location = ctx->location; |
87808bd6 | 321 | p->size = size; |
d3b1e874 TT |
322 | p->offset = offset; |
323 | ||
cec03d70 TT |
324 | if (p->location == DWARF_VALUE_LITERAL) |
325 | { | |
326 | p->v.literal.data = ctx->data; | |
327 | p->v.literal.length = ctx->len; | |
328 | } | |
cb826367 TT |
329 | else if (dwarf_expr_stack_empty_p (ctx)) |
330 | { | |
331 | p->location = DWARF_VALUE_OPTIMIZED_OUT; | |
332 | /* Also reset the context's location, for our callers. This is | |
333 | a somewhat strange approach, but this lets us avoid setting | |
334 | the location to DWARF_VALUE_MEMORY in all the individual | |
335 | cases in the evaluator. */ | |
336 | ctx->location = DWARF_VALUE_OPTIMIZED_OUT; | |
337 | } | |
f2c7657e UW |
338 | else if (p->location == DWARF_VALUE_MEMORY) |
339 | { | |
340 | p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0); | |
341 | p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); | |
342 | } | |
8cf6f0b1 TT |
343 | else if (p->location == DWARF_VALUE_IMPLICIT_POINTER) |
344 | { | |
b64f50a1 | 345 | p->v.ptr.die.cu_off = ctx->len; |
8a9b8146 | 346 | p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0)); |
8cf6f0b1 | 347 | } |
8a9b8146 TT |
348 | else if (p->location == DWARF_VALUE_REGISTER) |
349 | p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0)); | |
cec03d70 | 350 | else |
44353522 | 351 | { |
f2c7657e | 352 | p->v.value = dwarf_expr_fetch (ctx, 0); |
44353522 | 353 | } |
87808bd6 JB |
354 | } |
355 | ||
4c2df51b DJ |
356 | /* Evaluate the expression at ADDR (LEN bytes long) using the context |
357 | CTX. */ | |
358 | ||
359 | void | |
0d45f56e TT |
360 | dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr, |
361 | size_t len) | |
4c2df51b | 362 | { |
1e3a102a JK |
363 | int old_recursion_depth = ctx->recursion_depth; |
364 | ||
4c2df51b | 365 | execute_stack_op (ctx, addr, addr + len); |
1e3a102a JK |
366 | |
367 | /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */ | |
368 | ||
369 | gdb_assert (ctx->recursion_depth == old_recursion_depth); | |
4c2df51b DJ |
370 | } |
371 | ||
372 | /* Decode the unsigned LEB128 constant at BUF into the variable pointed to | |
373 | by R, and return the new value of BUF. Verify that it doesn't extend | |
8e3b41a9 | 374 | past BUF_END. R can be NULL, the constant is then only skipped. */ |
4c2df51b | 375 | |
0d45f56e TT |
376 | const gdb_byte * |
377 | read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r) | |
4c2df51b DJ |
378 | { |
379 | unsigned shift = 0; | |
380 | ULONGEST result = 0; | |
852483bc | 381 | gdb_byte byte; |
4c2df51b DJ |
382 | |
383 | while (1) | |
384 | { | |
385 | if (buf >= buf_end) | |
8a3fe4f8 | 386 | error (_("read_uleb128: Corrupted DWARF expression.")); |
4c2df51b DJ |
387 | |
388 | byte = *buf++; | |
9930639c | 389 | result |= ((ULONGEST) (byte & 0x7f)) << shift; |
4c2df51b DJ |
390 | if ((byte & 0x80) == 0) |
391 | break; | |
392 | shift += 7; | |
393 | } | |
8e3b41a9 JK |
394 | if (r) |
395 | *r = result; | |
4c2df51b DJ |
396 | return buf; |
397 | } | |
398 | ||
399 | /* Decode the signed LEB128 constant at BUF into the variable pointed to | |
400 | by R, and return the new value of BUF. Verify that it doesn't extend | |
8e3b41a9 | 401 | past BUF_END. R can be NULL, the constant is then only skipped. */ |
4c2df51b | 402 | |
0d45f56e TT |
403 | const gdb_byte * |
404 | read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r) | |
4c2df51b DJ |
405 | { |
406 | unsigned shift = 0; | |
407 | LONGEST result = 0; | |
852483bc | 408 | gdb_byte byte; |
4c2df51b DJ |
409 | |
410 | while (1) | |
411 | { | |
412 | if (buf >= buf_end) | |
8a3fe4f8 | 413 | error (_("read_sleb128: Corrupted DWARF expression.")); |
4c2df51b DJ |
414 | |
415 | byte = *buf++; | |
9930639c | 416 | result |= ((ULONGEST) (byte & 0x7f)) << shift; |
4c2df51b DJ |
417 | shift += 7; |
418 | if ((byte & 0x80) == 0) | |
419 | break; | |
420 | } | |
421 | if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0) | |
04ad99e6 | 422 | result |= -(((LONGEST) 1) << shift); |
4c2df51b | 423 | |
8e3b41a9 JK |
424 | if (r) |
425 | *r = result; | |
4c2df51b DJ |
426 | return buf; |
427 | } | |
4c2df51b | 428 | \f |
cec03d70 TT |
429 | |
430 | /* Check that the current operator is either at the end of an | |
431 | expression, or that it is followed by a composition operator. */ | |
432 | ||
3cf03773 TT |
433 | void |
434 | dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end, | |
435 | const char *op_name) | |
cec03d70 TT |
436 | { |
437 | /* It seems like DW_OP_GNU_uninit should be handled here. However, | |
438 | it doesn't seem to make sense for DW_OP_*_value, and it was not | |
439 | checked at the other place that this function is called. */ | |
440 | if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece) | |
441 | error (_("DWARF-2 expression error: `%s' operations must be " | |
64b9b334 | 442 | "used either alone or in conjunction with DW_OP_piece " |
cec03d70 TT |
443 | "or DW_OP_bit_piece."), |
444 | op_name); | |
445 | } | |
446 | ||
8a9b8146 TT |
447 | /* Return true iff the types T1 and T2 are "the same". This only does |
448 | checks that might reasonably be needed to compare DWARF base | |
449 | types. */ | |
450 | ||
451 | static int | |
452 | base_types_equal_p (struct type *t1, struct type *t2) | |
453 | { | |
454 | if (TYPE_CODE (t1) != TYPE_CODE (t2)) | |
455 | return 0; | |
456 | if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) | |
457 | return 0; | |
458 | return TYPE_LENGTH (t1) == TYPE_LENGTH (t2); | |
459 | } | |
460 | ||
461 | /* A convenience function to call get_base_type on CTX and return the | |
462 | result. DIE is the DIE whose type we need. SIZE is non-zero if | |
463 | this function should verify that the resulting type has the correct | |
464 | size. */ | |
465 | ||
466 | static struct type * | |
b64f50a1 | 467 | dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size) |
8a9b8146 TT |
468 | { |
469 | struct type *result; | |
470 | ||
9e8b7a03 | 471 | if (ctx->funcs->get_base_type) |
8a9b8146 | 472 | { |
9e8b7a03 | 473 | result = ctx->funcs->get_base_type (ctx, die); |
9ff3b74f TT |
474 | if (result == NULL) |
475 | error (_("Could not find type for DW_OP_GNU_const_type")); | |
8a9b8146 TT |
476 | if (size != 0 && TYPE_LENGTH (result) != size) |
477 | error (_("DW_OP_GNU_const_type has different sizes for type and data")); | |
478 | } | |
479 | else | |
480 | /* Anything will do. */ | |
481 | result = builtin_type (ctx->gdbarch)->builtin_int; | |
482 | ||
483 | return result; | |
484 | } | |
485 | ||
8e3b41a9 JK |
486 | /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the |
487 | DWARF register number. Otherwise return -1. */ | |
488 | ||
489 | int | |
490 | dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end) | |
491 | { | |
492 | ULONGEST dwarf_reg; | |
493 | ||
494 | if (buf_end <= buf) | |
495 | return -1; | |
496 | if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31) | |
497 | { | |
498 | if (buf_end - buf != 1) | |
499 | return -1; | |
500 | return *buf - DW_OP_reg0; | |
501 | } | |
502 | ||
503 | if (*buf == DW_OP_GNU_regval_type) | |
504 | { | |
505 | buf++; | |
506 | buf = read_uleb128 (buf, buf_end, &dwarf_reg); | |
507 | buf = read_uleb128 (buf, buf_end, NULL); | |
508 | } | |
509 | else if (*buf == DW_OP_regx) | |
510 | { | |
511 | buf++; | |
512 | buf = read_uleb128 (buf, buf_end, &dwarf_reg); | |
513 | } | |
514 | else | |
515 | return -1; | |
516 | if (buf != buf_end || (int) dwarf_reg != dwarf_reg) | |
517 | return -1; | |
518 | return dwarf_reg; | |
519 | } | |
520 | ||
a471c594 JK |
521 | /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and |
522 | DW_OP_deref* return the DWARF register number. Otherwise return -1. | |
523 | DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the | |
524 | size from DW_OP_deref_size. */ | |
525 | ||
526 | int | |
527 | dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end, | |
528 | CORE_ADDR *deref_size_return) | |
529 | { | |
530 | ULONGEST dwarf_reg; | |
531 | LONGEST offset; | |
532 | ||
533 | if (buf_end <= buf) | |
534 | return -1; | |
535 | if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31) | |
536 | { | |
537 | dwarf_reg = *buf - DW_OP_breg0; | |
538 | buf++; | |
539 | } | |
540 | else if (*buf == DW_OP_bregx) | |
541 | { | |
542 | buf++; | |
543 | buf = read_uleb128 (buf, buf_end, &dwarf_reg); | |
544 | if ((int) dwarf_reg != dwarf_reg) | |
545 | return -1; | |
546 | } | |
547 | else | |
548 | return -1; | |
549 | ||
550 | buf = read_sleb128 (buf, buf_end, &offset); | |
551 | if (offset != 0) | |
552 | return -1; | |
553 | ||
554 | if (buf >= buf_end) | |
555 | return -1; | |
556 | ||
557 | if (*buf == DW_OP_deref) | |
558 | { | |
559 | buf++; | |
560 | *deref_size_return = -1; | |
561 | } | |
562 | else if (*buf == DW_OP_deref_size) | |
563 | { | |
564 | buf++; | |
565 | if (buf >= buf_end) | |
566 | return -1; | |
567 | *deref_size_return = *buf++; | |
568 | } | |
569 | else | |
570 | return -1; | |
571 | ||
572 | if (buf != buf_end) | |
573 | return -1; | |
574 | ||
575 | return dwarf_reg; | |
576 | } | |
577 | ||
e18b2753 JK |
578 | /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill |
579 | in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */ | |
580 | ||
581 | int | |
582 | dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end, | |
583 | CORE_ADDR *fb_offset_return) | |
584 | { | |
585 | LONGEST fb_offset; | |
586 | ||
587 | if (buf_end <= buf) | |
588 | return 0; | |
589 | ||
590 | if (*buf != DW_OP_fbreg) | |
591 | return 0; | |
592 | buf++; | |
593 | ||
594 | buf = read_sleb128 (buf, buf_end, &fb_offset); | |
595 | *fb_offset_return = fb_offset; | |
596 | if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return) | |
597 | return 0; | |
598 | ||
599 | return 1; | |
600 | } | |
601 | ||
602 | /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill | |
603 | in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. | |
604 | The matched SP register number depends on GDBARCH. */ | |
605 | ||
606 | int | |
607 | dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf, | |
608 | const gdb_byte *buf_end, CORE_ADDR *sp_offset_return) | |
609 | { | |
610 | ULONGEST dwarf_reg; | |
611 | LONGEST sp_offset; | |
612 | ||
613 | if (buf_end <= buf) | |
614 | return 0; | |
615 | if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31) | |
616 | { | |
617 | dwarf_reg = *buf - DW_OP_breg0; | |
618 | buf++; | |
619 | } | |
620 | else | |
621 | { | |
622 | if (*buf != DW_OP_bregx) | |
623 | return 0; | |
624 | buf++; | |
625 | buf = read_uleb128 (buf, buf_end, &dwarf_reg); | |
626 | } | |
627 | ||
628 | if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg) | |
629 | != gdbarch_sp_regnum (gdbarch)) | |
630 | return 0; | |
631 | ||
632 | buf = read_sleb128 (buf, buf_end, &sp_offset); | |
633 | *sp_offset_return = sp_offset; | |
634 | if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return) | |
635 | return 0; | |
636 | ||
637 | return 1; | |
638 | } | |
639 | ||
4c2df51b DJ |
640 | /* The engine for the expression evaluator. Using the context in CTX, |
641 | evaluate the expression between OP_PTR and OP_END. */ | |
642 | ||
643 | static void | |
852483bc | 644 | execute_stack_op (struct dwarf_expr_context *ctx, |
0d45f56e | 645 | const gdb_byte *op_ptr, const gdb_byte *op_end) |
4c2df51b | 646 | { |
e17a4113 | 647 | enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); |
8a9b8146 TT |
648 | /* Old-style "untyped" DWARF values need special treatment in a |
649 | couple of places, specifically DW_OP_mod and DW_OP_shr. We need | |
650 | a special type for these values so we can distinguish them from | |
651 | values that have an explicit type, because explicitly-typed | |
652 | values do not need special treatment. This special type must be | |
653 | different (in the `==' sense) from any base type coming from the | |
654 | CU. */ | |
655 | struct type *address_type = dwarf_expr_address_type (ctx); | |
9a619af0 | 656 | |
cec03d70 | 657 | ctx->location = DWARF_VALUE_MEMORY; |
42be36b3 | 658 | ctx->initialized = 1; /* Default is initialized. */ |
18ec9831 | 659 | |
1e3a102a JK |
660 | if (ctx->recursion_depth > ctx->max_recursion_depth) |
661 | error (_("DWARF-2 expression error: Loop detected (%d)."), | |
662 | ctx->recursion_depth); | |
663 | ctx->recursion_depth++; | |
664 | ||
4c2df51b DJ |
665 | while (op_ptr < op_end) |
666 | { | |
667 | enum dwarf_location_atom op = *op_ptr++; | |
f2c7657e | 668 | ULONGEST result; |
44353522 DE |
669 | /* Assume the value is not in stack memory. |
670 | Code that knows otherwise sets this to 1. | |
671 | Some arithmetic on stack addresses can probably be assumed to still | |
672 | be a stack address, but we skip this complication for now. | |
673 | This is just an optimization, so it's always ok to punt | |
674 | and leave this as 0. */ | |
675 | int in_stack_memory = 0; | |
4c2df51b DJ |
676 | ULONGEST uoffset, reg; |
677 | LONGEST offset; | |
8a9b8146 | 678 | struct value *result_val = NULL; |
4c2df51b | 679 | |
e0e9434c TT |
680 | /* The DWARF expression might have a bug causing an infinite |
681 | loop. In that case, quitting is the only way out. */ | |
682 | QUIT; | |
683 | ||
4c2df51b DJ |
684 | switch (op) |
685 | { | |
686 | case DW_OP_lit0: | |
687 | case DW_OP_lit1: | |
688 | case DW_OP_lit2: | |
689 | case DW_OP_lit3: | |
690 | case DW_OP_lit4: | |
691 | case DW_OP_lit5: | |
692 | case DW_OP_lit6: | |
693 | case DW_OP_lit7: | |
694 | case DW_OP_lit8: | |
695 | case DW_OP_lit9: | |
696 | case DW_OP_lit10: | |
697 | case DW_OP_lit11: | |
698 | case DW_OP_lit12: | |
699 | case DW_OP_lit13: | |
700 | case DW_OP_lit14: | |
701 | case DW_OP_lit15: | |
702 | case DW_OP_lit16: | |
703 | case DW_OP_lit17: | |
704 | case DW_OP_lit18: | |
705 | case DW_OP_lit19: | |
706 | case DW_OP_lit20: | |
707 | case DW_OP_lit21: | |
708 | case DW_OP_lit22: | |
709 | case DW_OP_lit23: | |
710 | case DW_OP_lit24: | |
711 | case DW_OP_lit25: | |
712 | case DW_OP_lit26: | |
713 | case DW_OP_lit27: | |
714 | case DW_OP_lit28: | |
715 | case DW_OP_lit29: | |
716 | case DW_OP_lit30: | |
717 | case DW_OP_lit31: | |
718 | result = op - DW_OP_lit0; | |
8a9b8146 | 719 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
720 | break; |
721 | ||
722 | case DW_OP_addr: | |
f2c7657e UW |
723 | result = extract_unsigned_integer (op_ptr, |
724 | ctx->addr_size, byte_order); | |
ae0d2f24 | 725 | op_ptr += ctx->addr_size; |
ac56253d TT |
726 | /* Some versions of GCC emit DW_OP_addr before |
727 | DW_OP_GNU_push_tls_address. In this case the value is an | |
728 | index, not an address. We don't support things like | |
729 | branching between the address and the TLS op. */ | |
730 | if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address) | |
731 | result += ctx->offset; | |
8a9b8146 | 732 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
733 | break; |
734 | ||
3019eac3 DE |
735 | case DW_OP_GNU_addr_index: |
736 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); | |
737 | result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset); | |
738 | result_val = value_from_ulongest (address_type, result); | |
739 | break; | |
740 | ||
4c2df51b | 741 | case DW_OP_const1u: |
e17a4113 | 742 | result = extract_unsigned_integer (op_ptr, 1, byte_order); |
8a9b8146 | 743 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
744 | op_ptr += 1; |
745 | break; | |
746 | case DW_OP_const1s: | |
e17a4113 | 747 | result = extract_signed_integer (op_ptr, 1, byte_order); |
8a9b8146 | 748 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
749 | op_ptr += 1; |
750 | break; | |
751 | case DW_OP_const2u: | |
e17a4113 | 752 | result = extract_unsigned_integer (op_ptr, 2, byte_order); |
8a9b8146 | 753 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
754 | op_ptr += 2; |
755 | break; | |
756 | case DW_OP_const2s: | |
e17a4113 | 757 | result = extract_signed_integer (op_ptr, 2, byte_order); |
8a9b8146 | 758 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
759 | op_ptr += 2; |
760 | break; | |
761 | case DW_OP_const4u: | |
e17a4113 | 762 | result = extract_unsigned_integer (op_ptr, 4, byte_order); |
8a9b8146 | 763 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
764 | op_ptr += 4; |
765 | break; | |
766 | case DW_OP_const4s: | |
e17a4113 | 767 | result = extract_signed_integer (op_ptr, 4, byte_order); |
8a9b8146 | 768 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
769 | op_ptr += 4; |
770 | break; | |
771 | case DW_OP_const8u: | |
e17a4113 | 772 | result = extract_unsigned_integer (op_ptr, 8, byte_order); |
8a9b8146 | 773 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
774 | op_ptr += 8; |
775 | break; | |
776 | case DW_OP_const8s: | |
e17a4113 | 777 | result = extract_signed_integer (op_ptr, 8, byte_order); |
8a9b8146 | 778 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
779 | op_ptr += 8; |
780 | break; | |
781 | case DW_OP_constu: | |
782 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); | |
783 | result = uoffset; | |
8a9b8146 | 784 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
785 | break; |
786 | case DW_OP_consts: | |
787 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
788 | result = offset; | |
8a9b8146 | 789 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
790 | break; |
791 | ||
792 | /* The DW_OP_reg operations are required to occur alone in | |
793 | location expressions. */ | |
794 | case DW_OP_reg0: | |
795 | case DW_OP_reg1: | |
796 | case DW_OP_reg2: | |
797 | case DW_OP_reg3: | |
798 | case DW_OP_reg4: | |
799 | case DW_OP_reg5: | |
800 | case DW_OP_reg6: | |
801 | case DW_OP_reg7: | |
802 | case DW_OP_reg8: | |
803 | case DW_OP_reg9: | |
804 | case DW_OP_reg10: | |
805 | case DW_OP_reg11: | |
806 | case DW_OP_reg12: | |
807 | case DW_OP_reg13: | |
808 | case DW_OP_reg14: | |
809 | case DW_OP_reg15: | |
810 | case DW_OP_reg16: | |
811 | case DW_OP_reg17: | |
812 | case DW_OP_reg18: | |
813 | case DW_OP_reg19: | |
814 | case DW_OP_reg20: | |
815 | case DW_OP_reg21: | |
816 | case DW_OP_reg22: | |
817 | case DW_OP_reg23: | |
818 | case DW_OP_reg24: | |
819 | case DW_OP_reg25: | |
820 | case DW_OP_reg26: | |
821 | case DW_OP_reg27: | |
822 | case DW_OP_reg28: | |
823 | case DW_OP_reg29: | |
824 | case DW_OP_reg30: | |
825 | case DW_OP_reg31: | |
42be36b3 CT |
826 | if (op_ptr != op_end |
827 | && *op_ptr != DW_OP_piece | |
d3b1e874 | 828 | && *op_ptr != DW_OP_bit_piece |
42be36b3 | 829 | && *op_ptr != DW_OP_GNU_uninit) |
8a3fe4f8 | 830 | error (_("DWARF-2 expression error: DW_OP_reg operations must be " |
64b9b334 | 831 | "used either alone or in conjunction with DW_OP_piece " |
d3b1e874 | 832 | "or DW_OP_bit_piece.")); |
4c2df51b | 833 | |
61fbb938 | 834 | result = op - DW_OP_reg0; |
8a9b8146 | 835 | result_val = value_from_ulongest (address_type, result); |
cec03d70 | 836 | ctx->location = DWARF_VALUE_REGISTER; |
4c2df51b DJ |
837 | break; |
838 | ||
839 | case DW_OP_regx: | |
840 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
3cf03773 | 841 | dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx"); |
4c2df51b | 842 | |
61fbb938 | 843 | result = reg; |
8a9b8146 | 844 | result_val = value_from_ulongest (address_type, result); |
cec03d70 | 845 | ctx->location = DWARF_VALUE_REGISTER; |
4c2df51b DJ |
846 | break; |
847 | ||
cec03d70 TT |
848 | case DW_OP_implicit_value: |
849 | { | |
850 | ULONGEST len; | |
9a619af0 | 851 | |
cec03d70 TT |
852 | op_ptr = read_uleb128 (op_ptr, op_end, &len); |
853 | if (op_ptr + len > op_end) | |
854 | error (_("DW_OP_implicit_value: too few bytes available.")); | |
855 | ctx->len = len; | |
856 | ctx->data = op_ptr; | |
857 | ctx->location = DWARF_VALUE_LITERAL; | |
858 | op_ptr += len; | |
3cf03773 TT |
859 | dwarf_expr_require_composition (op_ptr, op_end, |
860 | "DW_OP_implicit_value"); | |
cec03d70 TT |
861 | } |
862 | goto no_push; | |
863 | ||
864 | case DW_OP_stack_value: | |
865 | ctx->location = DWARF_VALUE_STACK; | |
3cf03773 | 866 | dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value"); |
cec03d70 TT |
867 | goto no_push; |
868 | ||
8cf6f0b1 TT |
869 | case DW_OP_GNU_implicit_pointer: |
870 | { | |
871 | ULONGEST die; | |
872 | LONGEST len; | |
873 | ||
181cebd4 JK |
874 | if (ctx->ref_addr_size == -1) |
875 | error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer " | |
876 | "is not allowed in frame context")); | |
877 | ||
b64f50a1 | 878 | /* The referred-to DIE of cu_offset kind. */ |
181cebd4 | 879 | ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size, |
8cf6f0b1 | 880 | byte_order); |
181cebd4 | 881 | op_ptr += ctx->ref_addr_size; |
8cf6f0b1 TT |
882 | |
883 | /* The byte offset into the data. */ | |
884 | op_ptr = read_sleb128 (op_ptr, op_end, &len); | |
885 | result = (ULONGEST) len; | |
8a9b8146 | 886 | result_val = value_from_ulongest (address_type, result); |
8cf6f0b1 TT |
887 | |
888 | ctx->location = DWARF_VALUE_IMPLICIT_POINTER; | |
889 | dwarf_expr_require_composition (op_ptr, op_end, | |
890 | "DW_OP_GNU_implicit_pointer"); | |
891 | } | |
892 | break; | |
893 | ||
4c2df51b DJ |
894 | case DW_OP_breg0: |
895 | case DW_OP_breg1: | |
896 | case DW_OP_breg2: | |
897 | case DW_OP_breg3: | |
898 | case DW_OP_breg4: | |
899 | case DW_OP_breg5: | |
900 | case DW_OP_breg6: | |
901 | case DW_OP_breg7: | |
902 | case DW_OP_breg8: | |
903 | case DW_OP_breg9: | |
904 | case DW_OP_breg10: | |
905 | case DW_OP_breg11: | |
906 | case DW_OP_breg12: | |
907 | case DW_OP_breg13: | |
908 | case DW_OP_breg14: | |
909 | case DW_OP_breg15: | |
910 | case DW_OP_breg16: | |
911 | case DW_OP_breg17: | |
912 | case DW_OP_breg18: | |
913 | case DW_OP_breg19: | |
914 | case DW_OP_breg20: | |
915 | case DW_OP_breg21: | |
916 | case DW_OP_breg22: | |
917 | case DW_OP_breg23: | |
918 | case DW_OP_breg24: | |
919 | case DW_OP_breg25: | |
920 | case DW_OP_breg26: | |
921 | case DW_OP_breg27: | |
922 | case DW_OP_breg28: | |
923 | case DW_OP_breg29: | |
924 | case DW_OP_breg30: | |
925 | case DW_OP_breg31: | |
926 | { | |
927 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
9e8b7a03 | 928 | result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0); |
4c2df51b | 929 | result += offset; |
8a9b8146 | 930 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
931 | } |
932 | break; | |
933 | case DW_OP_bregx: | |
934 | { | |
935 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
936 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
9e8b7a03 | 937 | result = (ctx->funcs->read_reg) (ctx->baton, reg); |
4c2df51b | 938 | result += offset; |
8a9b8146 | 939 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
940 | } |
941 | break; | |
942 | case DW_OP_fbreg: | |
943 | { | |
0d45f56e | 944 | const gdb_byte *datastart; |
4c2df51b DJ |
945 | size_t datalen; |
946 | unsigned int before_stack_len; | |
947 | ||
948 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
949 | /* Rather than create a whole new context, we simply | |
950 | record the stack length before execution, then reset it | |
951 | afterwards, effectively erasing whatever the recursive | |
952 | call put there. */ | |
953 | before_stack_len = ctx->stack_len; | |
da62e633 AC |
954 | /* FIXME: cagney/2003-03-26: This code should be using |
955 | get_frame_base_address(), and then implement a dwarf2 | |
956 | specific this_base method. */ | |
9e8b7a03 | 957 | (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen); |
4c2df51b | 958 | dwarf_expr_eval (ctx, datastart, datalen); |
f2c7657e UW |
959 | if (ctx->location == DWARF_VALUE_MEMORY) |
960 | result = dwarf_expr_fetch_address (ctx, 0); | |
961 | else if (ctx->location == DWARF_VALUE_REGISTER) | |
9e8b7a03 JK |
962 | result = (ctx->funcs->read_reg) (ctx->baton, |
963 | value_as_long (dwarf_expr_fetch (ctx, 0))); | |
f2c7657e | 964 | else |
3e43a32a MS |
965 | error (_("Not implemented: computing frame " |
966 | "base using explicit value operator")); | |
4c2df51b | 967 | result = result + offset; |
8a9b8146 | 968 | result_val = value_from_ulongest (address_type, result); |
44353522 | 969 | in_stack_memory = 1; |
4c2df51b | 970 | ctx->stack_len = before_stack_len; |
cec03d70 | 971 | ctx->location = DWARF_VALUE_MEMORY; |
4c2df51b DJ |
972 | } |
973 | break; | |
44353522 | 974 | |
4c2df51b | 975 | case DW_OP_dup: |
8a9b8146 | 976 | result_val = dwarf_expr_fetch (ctx, 0); |
44353522 | 977 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); |
4c2df51b DJ |
978 | break; |
979 | ||
980 | case DW_OP_drop: | |
981 | dwarf_expr_pop (ctx); | |
982 | goto no_push; | |
983 | ||
984 | case DW_OP_pick: | |
985 | offset = *op_ptr++; | |
8a9b8146 | 986 | result_val = dwarf_expr_fetch (ctx, offset); |
44353522 | 987 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset); |
4c2df51b | 988 | break; |
9f3fe11c TG |
989 | |
990 | case DW_OP_swap: | |
991 | { | |
44353522 | 992 | struct dwarf_stack_value t1, t2; |
9f3fe11c TG |
993 | |
994 | if (ctx->stack_len < 2) | |
3e43a32a | 995 | error (_("Not enough elements for " |
0963b4bd | 996 | "DW_OP_swap. Need 2, have %d."), |
9f3fe11c TG |
997 | ctx->stack_len); |
998 | t1 = ctx->stack[ctx->stack_len - 1]; | |
999 | t2 = ctx->stack[ctx->stack_len - 2]; | |
1000 | ctx->stack[ctx->stack_len - 1] = t2; | |
1001 | ctx->stack[ctx->stack_len - 2] = t1; | |
1002 | goto no_push; | |
1003 | } | |
4c2df51b DJ |
1004 | |
1005 | case DW_OP_over: | |
8a9b8146 | 1006 | result_val = dwarf_expr_fetch (ctx, 1); |
44353522 | 1007 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1); |
4c2df51b DJ |
1008 | break; |
1009 | ||
1010 | case DW_OP_rot: | |
1011 | { | |
44353522 | 1012 | struct dwarf_stack_value t1, t2, t3; |
4c2df51b DJ |
1013 | |
1014 | if (ctx->stack_len < 3) | |
0963b4bd MS |
1015 | error (_("Not enough elements for " |
1016 | "DW_OP_rot. Need 3, have %d."), | |
4c2df51b DJ |
1017 | ctx->stack_len); |
1018 | t1 = ctx->stack[ctx->stack_len - 1]; | |
1019 | t2 = ctx->stack[ctx->stack_len - 2]; | |
1020 | t3 = ctx->stack[ctx->stack_len - 3]; | |
1021 | ctx->stack[ctx->stack_len - 1] = t2; | |
1022 | ctx->stack[ctx->stack_len - 2] = t3; | |
1023 | ctx->stack[ctx->stack_len - 3] = t1; | |
1024 | goto no_push; | |
1025 | } | |
1026 | ||
1027 | case DW_OP_deref: | |
1028 | case DW_OP_deref_size: | |
8a9b8146 | 1029 | case DW_OP_GNU_deref_type: |
f2c7657e UW |
1030 | { |
1031 | int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++); | |
1032 | gdb_byte *buf = alloca (addr_size); | |
1033 | CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0); | |
8a9b8146 TT |
1034 | struct type *type; |
1035 | ||
f2c7657e UW |
1036 | dwarf_expr_pop (ctx); |
1037 | ||
8a9b8146 TT |
1038 | if (op == DW_OP_GNU_deref_type) |
1039 | { | |
b64f50a1 | 1040 | cu_offset type_die; |
8a9b8146 | 1041 | |
b64f50a1 JK |
1042 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); |
1043 | type_die.cu_off = uoffset; | |
8a9b8146 TT |
1044 | type = dwarf_get_base_type (ctx, type_die, 0); |
1045 | } | |
1046 | else | |
1047 | type = address_type; | |
1048 | ||
9e8b7a03 | 1049 | (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size); |
325663dc JB |
1050 | |
1051 | /* If the size of the object read from memory is different | |
1052 | from the type length, we need to zero-extend it. */ | |
1053 | if (TYPE_LENGTH (type) != addr_size) | |
1054 | { | |
1055 | ULONGEST result = | |
1056 | extract_unsigned_integer (buf, addr_size, byte_order); | |
1057 | ||
1058 | buf = alloca (TYPE_LENGTH (type)); | |
1059 | store_unsigned_integer (buf, TYPE_LENGTH (type), | |
1060 | byte_order, result); | |
1061 | } | |
1062 | ||
8a9b8146 | 1063 | result_val = value_from_contents_and_address (type, buf, addr); |
f2c7657e UW |
1064 | break; |
1065 | } | |
1066 | ||
4c2df51b DJ |
1067 | case DW_OP_abs: |
1068 | case DW_OP_neg: | |
1069 | case DW_OP_not: | |
1070 | case DW_OP_plus_uconst: | |
8a9b8146 TT |
1071 | { |
1072 | /* Unary operations. */ | |
1073 | result_val = dwarf_expr_fetch (ctx, 0); | |
1074 | dwarf_expr_pop (ctx); | |
4c2df51b | 1075 | |
8a9b8146 TT |
1076 | switch (op) |
1077 | { | |
1078 | case DW_OP_abs: | |
1079 | if (value_less (result_val, | |
1080 | value_zero (value_type (result_val), not_lval))) | |
1081 | result_val = value_neg (result_val); | |
1082 | break; | |
1083 | case DW_OP_neg: | |
1084 | result_val = value_neg (result_val); | |
1085 | break; | |
1086 | case DW_OP_not: | |
1087 | dwarf_require_integral (value_type (result_val)); | |
1088 | result_val = value_complement (result_val); | |
1089 | break; | |
1090 | case DW_OP_plus_uconst: | |
1091 | dwarf_require_integral (value_type (result_val)); | |
1092 | result = value_as_long (result_val); | |
1093 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
1094 | result += reg; | |
1095 | result_val = value_from_ulongest (address_type, result); | |
1096 | break; | |
1097 | } | |
1098 | } | |
4c2df51b DJ |
1099 | break; |
1100 | ||
1101 | case DW_OP_and: | |
1102 | case DW_OP_div: | |
1103 | case DW_OP_minus: | |
1104 | case DW_OP_mod: | |
1105 | case DW_OP_mul: | |
1106 | case DW_OP_or: | |
1107 | case DW_OP_plus: | |
1108 | case DW_OP_shl: | |
1109 | case DW_OP_shr: | |
1110 | case DW_OP_shra: | |
1111 | case DW_OP_xor: | |
1112 | case DW_OP_le: | |
1113 | case DW_OP_ge: | |
1114 | case DW_OP_eq: | |
1115 | case DW_OP_lt: | |
1116 | case DW_OP_gt: | |
1117 | case DW_OP_ne: | |
1118 | { | |
f2c7657e | 1119 | /* Binary operations. */ |
8a9b8146 | 1120 | struct value *first, *second; |
4c2df51b DJ |
1121 | |
1122 | second = dwarf_expr_fetch (ctx, 0); | |
1123 | dwarf_expr_pop (ctx); | |
1124 | ||
b263358a | 1125 | first = dwarf_expr_fetch (ctx, 0); |
4c2df51b DJ |
1126 | dwarf_expr_pop (ctx); |
1127 | ||
8a9b8146 TT |
1128 | if (! base_types_equal_p (value_type (first), value_type (second))) |
1129 | error (_("Incompatible types on DWARF stack")); | |
1130 | ||
4c2df51b DJ |
1131 | switch (op) |
1132 | { | |
1133 | case DW_OP_and: | |
8a9b8146 TT |
1134 | dwarf_require_integral (value_type (first)); |
1135 | dwarf_require_integral (value_type (second)); | |
1136 | result_val = value_binop (first, second, BINOP_BITWISE_AND); | |
4c2df51b DJ |
1137 | break; |
1138 | case DW_OP_div: | |
8a9b8146 | 1139 | result_val = value_binop (first, second, BINOP_DIV); |
99c87dab | 1140 | break; |
4c2df51b | 1141 | case DW_OP_minus: |
8a9b8146 | 1142 | result_val = value_binop (first, second, BINOP_SUB); |
4c2df51b DJ |
1143 | break; |
1144 | case DW_OP_mod: | |
8a9b8146 TT |
1145 | { |
1146 | int cast_back = 0; | |
1147 | struct type *orig_type = value_type (first); | |
1148 | ||
1149 | /* We have to special-case "old-style" untyped values | |
1150 | -- these must have mod computed using unsigned | |
1151 | math. */ | |
1152 | if (orig_type == address_type) | |
1153 | { | |
1154 | struct type *utype | |
1155 | = get_unsigned_type (ctx->gdbarch, orig_type); | |
1156 | ||
1157 | cast_back = 1; | |
1158 | first = value_cast (utype, first); | |
1159 | second = value_cast (utype, second); | |
1160 | } | |
1161 | /* Note that value_binop doesn't handle float or | |
1162 | decimal float here. This seems unimportant. */ | |
1163 | result_val = value_binop (first, second, BINOP_MOD); | |
1164 | if (cast_back) | |
1165 | result_val = value_cast (orig_type, result_val); | |
1166 | } | |
4c2df51b DJ |
1167 | break; |
1168 | case DW_OP_mul: | |
8a9b8146 | 1169 | result_val = value_binop (first, second, BINOP_MUL); |
4c2df51b DJ |
1170 | break; |
1171 | case DW_OP_or: | |
8a9b8146 TT |
1172 | dwarf_require_integral (value_type (first)); |
1173 | dwarf_require_integral (value_type (second)); | |
1174 | result_val = value_binop (first, second, BINOP_BITWISE_IOR); | |
4c2df51b DJ |
1175 | break; |
1176 | case DW_OP_plus: | |
8a9b8146 | 1177 | result_val = value_binop (first, second, BINOP_ADD); |
4c2df51b DJ |
1178 | break; |
1179 | case DW_OP_shl: | |
8a9b8146 TT |
1180 | dwarf_require_integral (value_type (first)); |
1181 | dwarf_require_integral (value_type (second)); | |
1182 | result_val = value_binop (first, second, BINOP_LSH); | |
4c2df51b DJ |
1183 | break; |
1184 | case DW_OP_shr: | |
8a9b8146 TT |
1185 | dwarf_require_integral (value_type (first)); |
1186 | dwarf_require_integral (value_type (second)); | |
b087e0ed | 1187 | if (!TYPE_UNSIGNED (value_type (first))) |
8a9b8146 TT |
1188 | { |
1189 | struct type *utype | |
1190 | = get_unsigned_type (ctx->gdbarch, value_type (first)); | |
1191 | ||
1192 | first = value_cast (utype, first); | |
1193 | } | |
1194 | ||
1195 | result_val = value_binop (first, second, BINOP_RSH); | |
1196 | /* Make sure we wind up with the same type we started | |
1197 | with. */ | |
1198 | if (value_type (result_val) != value_type (second)) | |
1199 | result_val = value_cast (value_type (second), result_val); | |
99c87dab | 1200 | break; |
4c2df51b | 1201 | case DW_OP_shra: |
8a9b8146 TT |
1202 | dwarf_require_integral (value_type (first)); |
1203 | dwarf_require_integral (value_type (second)); | |
8ddd9a20 TT |
1204 | if (TYPE_UNSIGNED (value_type (first))) |
1205 | { | |
1206 | struct type *stype | |
1207 | = get_signed_type (ctx->gdbarch, value_type (first)); | |
1208 | ||
1209 | first = value_cast (stype, first); | |
1210 | } | |
1211 | ||
8a9b8146 | 1212 | result_val = value_binop (first, second, BINOP_RSH); |
8ddd9a20 TT |
1213 | /* Make sure we wind up with the same type we started |
1214 | with. */ | |
1215 | if (value_type (result_val) != value_type (second)) | |
1216 | result_val = value_cast (value_type (second), result_val); | |
4c2df51b DJ |
1217 | break; |
1218 | case DW_OP_xor: | |
8a9b8146 TT |
1219 | dwarf_require_integral (value_type (first)); |
1220 | dwarf_require_integral (value_type (second)); | |
1221 | result_val = value_binop (first, second, BINOP_BITWISE_XOR); | |
4c2df51b DJ |
1222 | break; |
1223 | case DW_OP_le: | |
8a9b8146 TT |
1224 | /* A <= B is !(B < A). */ |
1225 | result = ! value_less (second, first); | |
1226 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1227 | break; |
1228 | case DW_OP_ge: | |
8a9b8146 TT |
1229 | /* A >= B is !(A < B). */ |
1230 | result = ! value_less (first, second); | |
1231 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1232 | break; |
1233 | case DW_OP_eq: | |
8a9b8146 TT |
1234 | result = value_equal (first, second); |
1235 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1236 | break; |
1237 | case DW_OP_lt: | |
8a9b8146 TT |
1238 | result = value_less (first, second); |
1239 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1240 | break; |
1241 | case DW_OP_gt: | |
8a9b8146 TT |
1242 | /* A > B is B < A. */ |
1243 | result = value_less (second, first); | |
1244 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1245 | break; |
1246 | case DW_OP_ne: | |
8a9b8146 TT |
1247 | result = ! value_equal (first, second); |
1248 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1249 | break; |
1250 | default: | |
1251 | internal_error (__FILE__, __LINE__, | |
e2e0b3e5 | 1252 | _("Can't be reached.")); |
4c2df51b | 1253 | } |
4c2df51b DJ |
1254 | } |
1255 | break; | |
1256 | ||
e7802207 | 1257 | case DW_OP_call_frame_cfa: |
9e8b7a03 | 1258 | result = (ctx->funcs->get_frame_cfa) (ctx->baton); |
8a9b8146 | 1259 | result_val = value_from_ulongest (address_type, result); |
44353522 | 1260 | in_stack_memory = 1; |
e7802207 TT |
1261 | break; |
1262 | ||
4c2df51b | 1263 | case DW_OP_GNU_push_tls_address: |
c3228f12 EZ |
1264 | /* Variable is at a constant offset in the thread-local |
1265 | storage block into the objfile for the current thread and | |
0963b4bd | 1266 | the dynamic linker module containing this expression. Here |
c3228f12 EZ |
1267 | we return returns the offset from that base. The top of the |
1268 | stack has the offset from the beginning of the thread | |
1269 | control block at which the variable is located. Nothing | |
1270 | should follow this operator, so the top of stack would be | |
1271 | returned. */ | |
8a9b8146 | 1272 | result = value_as_long (dwarf_expr_fetch (ctx, 0)); |
4c2df51b | 1273 | dwarf_expr_pop (ctx); |
9e8b7a03 | 1274 | result = (ctx->funcs->get_tls_address) (ctx->baton, result); |
8a9b8146 | 1275 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
1276 | break; |
1277 | ||
1278 | case DW_OP_skip: | |
e17a4113 | 1279 | offset = extract_signed_integer (op_ptr, 2, byte_order); |
4c2df51b DJ |
1280 | op_ptr += 2; |
1281 | op_ptr += offset; | |
1282 | goto no_push; | |
1283 | ||
1284 | case DW_OP_bra: | |
8a9b8146 TT |
1285 | { |
1286 | struct value *val; | |
1287 | ||
1288 | offset = extract_signed_integer (op_ptr, 2, byte_order); | |
1289 | op_ptr += 2; | |
1290 | val = dwarf_expr_fetch (ctx, 0); | |
1291 | dwarf_require_integral (value_type (val)); | |
1292 | if (value_as_long (val) != 0) | |
1293 | op_ptr += offset; | |
1294 | dwarf_expr_pop (ctx); | |
1295 | } | |
4c2df51b DJ |
1296 | goto no_push; |
1297 | ||
1298 | case DW_OP_nop: | |
1299 | goto no_push; | |
1300 | ||
87808bd6 JB |
1301 | case DW_OP_piece: |
1302 | { | |
1303 | ULONGEST size; | |
87808bd6 JB |
1304 | |
1305 | /* Record the piece. */ | |
1306 | op_ptr = read_uleb128 (op_ptr, op_end, &size); | |
d3b1e874 | 1307 | add_piece (ctx, 8 * size, 0); |
87808bd6 | 1308 | |
cec03d70 TT |
1309 | /* Pop off the address/regnum, and reset the location |
1310 | type. */ | |
cb826367 TT |
1311 | if (ctx->location != DWARF_VALUE_LITERAL |
1312 | && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) | |
cec03d70 TT |
1313 | dwarf_expr_pop (ctx); |
1314 | ctx->location = DWARF_VALUE_MEMORY; | |
87808bd6 JB |
1315 | } |
1316 | goto no_push; | |
1317 | ||
d3b1e874 TT |
1318 | case DW_OP_bit_piece: |
1319 | { | |
1320 | ULONGEST size, offset; | |
1321 | ||
1322 | /* Record the piece. */ | |
1323 | op_ptr = read_uleb128 (op_ptr, op_end, &size); | |
1324 | op_ptr = read_uleb128 (op_ptr, op_end, &offset); | |
1325 | add_piece (ctx, size, offset); | |
1326 | ||
1327 | /* Pop off the address/regnum, and reset the location | |
1328 | type. */ | |
1329 | if (ctx->location != DWARF_VALUE_LITERAL | |
1330 | && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) | |
1331 | dwarf_expr_pop (ctx); | |
1332 | ctx->location = DWARF_VALUE_MEMORY; | |
1333 | } | |
1334 | goto no_push; | |
1335 | ||
42be36b3 CT |
1336 | case DW_OP_GNU_uninit: |
1337 | if (op_ptr != op_end) | |
9c482037 | 1338 | error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always " |
42be36b3 CT |
1339 | "be the very last op.")); |
1340 | ||
1341 | ctx->initialized = 0; | |
1342 | goto no_push; | |
1343 | ||
5c631832 | 1344 | case DW_OP_call2: |
b64f50a1 JK |
1345 | { |
1346 | cu_offset offset; | |
1347 | ||
1348 | offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order); | |
1349 | op_ptr += 2; | |
1350 | ctx->funcs->dwarf_call (ctx, offset); | |
1351 | } | |
5c631832 JK |
1352 | goto no_push; |
1353 | ||
1354 | case DW_OP_call4: | |
b64f50a1 JK |
1355 | { |
1356 | cu_offset offset; | |
1357 | ||
1358 | offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order); | |
1359 | op_ptr += 4; | |
1360 | ctx->funcs->dwarf_call (ctx, offset); | |
1361 | } | |
5c631832 | 1362 | goto no_push; |
dd90784c JK |
1363 | |
1364 | case DW_OP_GNU_entry_value: | |
8e3b41a9 JK |
1365 | { |
1366 | ULONGEST len; | |
1367 | int dwarf_reg; | |
1368 | CORE_ADDR deref_size; | |
1369 | ||
1370 | op_ptr = read_uleb128 (op_ptr, op_end, &len); | |
1371 | if (op_ptr + len > op_end) | |
1372 | error (_("DW_OP_GNU_entry_value: too few bytes available.")); | |
1373 | ||
1374 | dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len); | |
1375 | if (dwarf_reg != -1) | |
1376 | { | |
1377 | op_ptr += len; | |
1378 | ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg, | |
a471c594 JK |
1379 | 0 /* unused */, |
1380 | -1 /* deref_size */); | |
1381 | goto no_push; | |
1382 | } | |
1383 | ||
1384 | dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr, op_ptr + len, | |
1385 | &deref_size); | |
1386 | if (dwarf_reg != -1) | |
1387 | { | |
1388 | if (deref_size == -1) | |
1389 | deref_size = ctx->addr_size; | |
1390 | op_ptr += len; | |
1391 | ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg, | |
1392 | 0 /* unused */, | |
1393 | deref_size); | |
8e3b41a9 JK |
1394 | goto no_push; |
1395 | } | |
1396 | ||
1397 | error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is " | |
a471c594 JK |
1398 | "supported only for single DW_OP_reg* " |
1399 | "or for DW_OP_breg*(0)+DW_OP_deref*")); | |
8e3b41a9 | 1400 | } |
5c631832 | 1401 | |
8a9b8146 TT |
1402 | case DW_OP_GNU_const_type: |
1403 | { | |
b64f50a1 | 1404 | cu_offset type_die; |
8a9b8146 TT |
1405 | int n; |
1406 | const gdb_byte *data; | |
1407 | struct type *type; | |
1408 | ||
b64f50a1 JK |
1409 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); |
1410 | type_die.cu_off = uoffset; | |
8a9b8146 TT |
1411 | n = *op_ptr++; |
1412 | data = op_ptr; | |
1413 | op_ptr += n; | |
1414 | ||
1415 | type = dwarf_get_base_type (ctx, type_die, n); | |
1416 | result_val = value_from_contents (type, data); | |
1417 | } | |
1418 | break; | |
1419 | ||
1420 | case DW_OP_GNU_regval_type: | |
1421 | { | |
b64f50a1 | 1422 | cu_offset type_die; |
8a9b8146 TT |
1423 | struct type *type; |
1424 | ||
1425 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
b64f50a1 JK |
1426 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); |
1427 | type_die.cu_off = uoffset; | |
8a9b8146 TT |
1428 | |
1429 | type = dwarf_get_base_type (ctx, type_die, 0); | |
9e8b7a03 | 1430 | result = (ctx->funcs->read_reg) (ctx->baton, reg); |
d1b66e6d TT |
1431 | result_val = value_from_ulongest (address_type, result); |
1432 | result_val = value_from_contents (type, | |
1433 | value_contents_all (result_val)); | |
8a9b8146 TT |
1434 | } |
1435 | break; | |
1436 | ||
1437 | case DW_OP_GNU_convert: | |
1438 | case DW_OP_GNU_reinterpret: | |
1439 | { | |
b64f50a1 | 1440 | cu_offset type_die; |
8a9b8146 TT |
1441 | struct type *type; |
1442 | ||
b64f50a1 JK |
1443 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); |
1444 | type_die.cu_off = uoffset; | |
8a9b8146 | 1445 | |
b64f50a1 | 1446 | if (type_die.cu_off == 0) |
c38c4bc5 TT |
1447 | type = address_type; |
1448 | else | |
1449 | type = dwarf_get_base_type (ctx, type_die, 0); | |
8a9b8146 TT |
1450 | |
1451 | result_val = dwarf_expr_fetch (ctx, 0); | |
1452 | dwarf_expr_pop (ctx); | |
1453 | ||
1454 | if (op == DW_OP_GNU_convert) | |
1455 | result_val = value_cast (type, result_val); | |
1456 | else if (type == value_type (result_val)) | |
1457 | { | |
1458 | /* Nothing. */ | |
1459 | } | |
1460 | else if (TYPE_LENGTH (type) | |
1461 | != TYPE_LENGTH (value_type (result_val))) | |
1462 | error (_("DW_OP_GNU_reinterpret has wrong size")); | |
1463 | else | |
1464 | result_val | |
1465 | = value_from_contents (type, | |
1466 | value_contents_all (result_val)); | |
1467 | } | |
1468 | break; | |
1469 | ||
4c2df51b | 1470 | default: |
8a3fe4f8 | 1471 | error (_("Unhandled dwarf expression opcode 0x%x"), op); |
4c2df51b DJ |
1472 | } |
1473 | ||
1474 | /* Most things push a result value. */ | |
8a9b8146 TT |
1475 | gdb_assert (result_val != NULL); |
1476 | dwarf_expr_push (ctx, result_val, in_stack_memory); | |
82ae4854 | 1477 | no_push: |
b27cf2b3 | 1478 | ; |
4c2df51b | 1479 | } |
1e3a102a | 1480 | |
8cf6f0b1 TT |
1481 | /* To simplify our main caller, if the result is an implicit |
1482 | pointer, then make a pieced value. This is ok because we can't | |
1483 | have implicit pointers in contexts where pieces are invalid. */ | |
1484 | if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER) | |
1485 | add_piece (ctx, 8 * ctx->addr_size, 0); | |
1486 | ||
dd90784c | 1487 | abort_expression: |
1e3a102a JK |
1488 | ctx->recursion_depth--; |
1489 | gdb_assert (ctx->recursion_depth >= 0); | |
8a9b8146 TT |
1490 | } |
1491 | ||
523f3620 JK |
1492 | /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */ |
1493 | ||
1494 | void | |
1495 | ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length) | |
1496 | { | |
1497 | error (_("%s is invalid in this context"), "DW_OP_fbreg"); | |
1498 | } | |
1499 | ||
1500 | /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */ | |
1501 | ||
1502 | CORE_ADDR | |
1503 | ctx_no_get_frame_cfa (void *baton) | |
1504 | { | |
1505 | error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa"); | |
1506 | } | |
1507 | ||
1508 | /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */ | |
1509 | ||
1510 | CORE_ADDR | |
1511 | ctx_no_get_frame_pc (void *baton) | |
1512 | { | |
1513 | error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer"); | |
1514 | } | |
1515 | ||
1516 | /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */ | |
1517 | ||
1518 | CORE_ADDR | |
1519 | ctx_no_get_tls_address (void *baton, CORE_ADDR offset) | |
1520 | { | |
1521 | error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address"); | |
1522 | } | |
1523 | ||
1524 | /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */ | |
1525 | ||
1526 | void | |
b64f50a1 | 1527 | ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset) |
523f3620 JK |
1528 | { |
1529 | error (_("%s is invalid in this context"), "DW_OP_call*"); | |
1530 | } | |
1531 | ||
1532 | /* Stub dwarf_expr_context_funcs.get_base_type implementation. */ | |
1533 | ||
1534 | struct type * | |
b64f50a1 | 1535 | ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die) |
523f3620 JK |
1536 | { |
1537 | error (_("Support for typed DWARF is not supported in this context")); | |
1538 | } | |
1539 | ||
8e3b41a9 JK |
1540 | /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value |
1541 | implementation. */ | |
1542 | ||
1543 | void | |
1544 | ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx, | |
a471c594 JK |
1545 | int dwarf_reg, CORE_ADDR fb_offset, |
1546 | int deref_size) | |
8e3b41a9 JK |
1547 | { |
1548 | internal_error (__FILE__, __LINE__, | |
1549 | _("Support for DW_OP_GNU_entry_value is unimplemented")); | |
1550 | } | |
1551 | ||
3019eac3 DE |
1552 | /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */ |
1553 | ||
1554 | CORE_ADDR | |
1555 | ctx_no_get_addr_index (void *baton, unsigned int index) | |
1556 | { | |
1557 | error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index"); | |
1558 | } | |
1559 | ||
70221824 PA |
1560 | /* Provide a prototype to silence -Wmissing-prototypes. */ |
1561 | extern initialize_file_ftype _initialize_dwarf2expr; | |
1562 | ||
8a9b8146 TT |
1563 | void |
1564 | _initialize_dwarf2expr (void) | |
1565 | { | |
1566 | dwarf_arch_cookie | |
1567 | = gdbarch_data_register_post_init (dwarf_gdbarch_types_init); | |
4c2df51b | 1568 | } |