gdb/
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 const gdb_byte *, const gdb_byte *);
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66 static struct type *
67 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68 {
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 return retval;
108 }
109
110 /* Release the memory allocated to CTX. */
111
112 void
113 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114 {
115 xfree (ctx->stack);
116 xfree (ctx->pieces);
117 xfree (ctx);
118 }
119
120 /* Helper for make_cleanup_free_dwarf_expr_context. */
121
122 static void
123 free_dwarf_expr_context_cleanup (void *arg)
124 {
125 free_dwarf_expr_context (arg);
126 }
127
128 /* Return a cleanup that calls free_dwarf_expr_context. */
129
130 struct cleanup *
131 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132 {
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134 }
135
136 /* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139 static void
140 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141 {
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
144 size_t newlen = ctx->stack_len + need + 10;
145
146 ctx->stack = xrealloc (ctx->stack,
147 newlen * sizeof (struct dwarf_stack_value));
148 ctx->stack_allocated = newlen;
149 }
150 }
151
152 /* Push VALUE onto CTX's stack. */
153
154 static void
155 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
156 int in_stack_memory)
157 {
158 struct dwarf_stack_value *v;
159
160 dwarf_expr_grow_stack (ctx, 1);
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
164 }
165
166 /* Push VALUE onto CTX's stack. */
167
168 void
169 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171 {
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175 }
176
177 /* Pop the top item off of CTX's stack. */
178
179 static void
180 dwarf_expr_pop (struct dwarf_expr_context *ctx)
181 {
182 if (ctx->stack_len <= 0)
183 error (_("dwarf expression stack underflow"));
184 ctx->stack_len--;
185 }
186
187 /* Retrieve the N'th item on CTX's stack. */
188
189 struct value *
190 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191 {
192 if (ctx->stack_len <= n)
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
195 n, ctx->stack_len);
196 return ctx->stack[ctx->stack_len - (1 + n)].value;
197 }
198
199 /* Require that TYPE be an integral type; throw an exception if not. */
200
201 static void
202 dwarf_require_integral (struct type *type)
203 {
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208 }
209
210 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213 static struct type *
214 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215 {
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
230 }
231
232 /* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235 static struct type *
236 get_signed_type (struct gdbarch *gdbarch, struct type *type)
237 {
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
252 }
253
254 /* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256 CORE_ADDR
257 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258 {
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
276 gdb_byte *buf = alloca (ctx->addr_size);
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285 }
286
287 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289 int
290 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291 {
292 if (ctx->stack_len <= n)
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
297 }
298
299 /* Return true if the expression stack is empty. */
300
301 static int
302 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303 {
304 return ctx->stack_len == 0;
305 }
306
307 /* Add a new piece to CTX's piece list. */
308 static void
309 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
310 {
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
320 p->location = ctx->location;
321 p->size = size;
322 p->offset = offset;
323
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die = ctx->len;
346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
347 }
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
350 else
351 {
352 p->v.value = dwarf_expr_fetch (ctx, 0);
353 }
354 }
355
356 /* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359 void
360 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
362 {
363 int old_recursion_depth = ctx->recursion_depth;
364
365 execute_stack_op (ctx, addr, addr + len);
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
370 }
371
372 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
373 by R, and return the new value of BUF. Verify that it doesn't extend
374 past BUF_END. R can be NULL, the constant is then only skipped. */
375
376 const gdb_byte *
377 read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
378 {
379 unsigned shift = 0;
380 ULONGEST result = 0;
381 gdb_byte byte;
382
383 while (1)
384 {
385 if (buf >= buf_end)
386 error (_("read_uleb128: Corrupted DWARF expression."));
387
388 byte = *buf++;
389 result |= ((ULONGEST) (byte & 0x7f)) << shift;
390 if ((byte & 0x80) == 0)
391 break;
392 shift += 7;
393 }
394 if (r)
395 *r = result;
396 return buf;
397 }
398
399 /* Decode the signed LEB128 constant at BUF into the variable pointed to
400 by R, and return the new value of BUF. Verify that it doesn't extend
401 past BUF_END. R can be NULL, the constant is then only skipped. */
402
403 const gdb_byte *
404 read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
405 {
406 unsigned shift = 0;
407 LONGEST result = 0;
408 gdb_byte byte;
409
410 while (1)
411 {
412 if (buf >= buf_end)
413 error (_("read_sleb128: Corrupted DWARF expression."));
414
415 byte = *buf++;
416 result |= ((ULONGEST) (byte & 0x7f)) << shift;
417 shift += 7;
418 if ((byte & 0x80) == 0)
419 break;
420 }
421 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
422 result |= -(((LONGEST) 1) << shift);
423
424 if (r)
425 *r = result;
426 return buf;
427 }
428 \f
429
430 /* Check that the current operator is either at the end of an
431 expression, or that it is followed by a composition operator. */
432
433 void
434 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
435 const char *op_name)
436 {
437 /* It seems like DW_OP_GNU_uninit should be handled here. However,
438 it doesn't seem to make sense for DW_OP_*_value, and it was not
439 checked at the other place that this function is called. */
440 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
441 error (_("DWARF-2 expression error: `%s' operations must be "
442 "used either alone or in conjunction with DW_OP_piece "
443 "or DW_OP_bit_piece."),
444 op_name);
445 }
446
447 /* Return true iff the types T1 and T2 are "the same". This only does
448 checks that might reasonably be needed to compare DWARF base
449 types. */
450
451 static int
452 base_types_equal_p (struct type *t1, struct type *t2)
453 {
454 if (TYPE_CODE (t1) != TYPE_CODE (t2))
455 return 0;
456 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
457 return 0;
458 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
459 }
460
461 /* A convenience function to call get_base_type on CTX and return the
462 result. DIE is the DIE whose type we need. SIZE is non-zero if
463 this function should verify that the resulting type has the correct
464 size. */
465
466 static struct type *
467 dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size)
468 {
469 struct type *result;
470
471 if (ctx->funcs->get_base_type)
472 {
473 result = ctx->funcs->get_base_type (ctx, die);
474 if (result == NULL)
475 error (_("Could not find type for DW_OP_GNU_const_type"));
476 if (size != 0 && TYPE_LENGTH (result) != size)
477 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
478 }
479 else
480 /* Anything will do. */
481 result = builtin_type (ctx->gdbarch)->builtin_int;
482
483 return result;
484 }
485
486 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
487 DWARF register number. Otherwise return -1. */
488
489 int
490 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
491 {
492 ULONGEST dwarf_reg;
493
494 if (buf_end <= buf)
495 return -1;
496 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
497 {
498 if (buf_end - buf != 1)
499 return -1;
500 return *buf - DW_OP_reg0;
501 }
502
503 if (*buf == DW_OP_GNU_regval_type)
504 {
505 buf++;
506 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
507 buf = read_uleb128 (buf, buf_end, NULL);
508 }
509 else if (*buf == DW_OP_regx)
510 {
511 buf++;
512 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
513 }
514 else
515 return -1;
516 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
517 return -1;
518 return dwarf_reg;
519 }
520
521 /* The engine for the expression evaluator. Using the context in CTX,
522 evaluate the expression between OP_PTR and OP_END. */
523
524 static void
525 execute_stack_op (struct dwarf_expr_context *ctx,
526 const gdb_byte *op_ptr, const gdb_byte *op_end)
527 {
528 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
529 /* Old-style "untyped" DWARF values need special treatment in a
530 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
531 a special type for these values so we can distinguish them from
532 values that have an explicit type, because explicitly-typed
533 values do not need special treatment. This special type must be
534 different (in the `==' sense) from any base type coming from the
535 CU. */
536 struct type *address_type = dwarf_expr_address_type (ctx);
537
538 ctx->location = DWARF_VALUE_MEMORY;
539 ctx->initialized = 1; /* Default is initialized. */
540
541 if (ctx->recursion_depth > ctx->max_recursion_depth)
542 error (_("DWARF-2 expression error: Loop detected (%d)."),
543 ctx->recursion_depth);
544 ctx->recursion_depth++;
545
546 while (op_ptr < op_end)
547 {
548 enum dwarf_location_atom op = *op_ptr++;
549 ULONGEST result;
550 /* Assume the value is not in stack memory.
551 Code that knows otherwise sets this to 1.
552 Some arithmetic on stack addresses can probably be assumed to still
553 be a stack address, but we skip this complication for now.
554 This is just an optimization, so it's always ok to punt
555 and leave this as 0. */
556 int in_stack_memory = 0;
557 ULONGEST uoffset, reg;
558 LONGEST offset;
559 struct value *result_val = NULL;
560
561 /* The DWARF expression might have a bug causing an infinite
562 loop. In that case, quitting is the only way out. */
563 QUIT;
564
565 switch (op)
566 {
567 case DW_OP_lit0:
568 case DW_OP_lit1:
569 case DW_OP_lit2:
570 case DW_OP_lit3:
571 case DW_OP_lit4:
572 case DW_OP_lit5:
573 case DW_OP_lit6:
574 case DW_OP_lit7:
575 case DW_OP_lit8:
576 case DW_OP_lit9:
577 case DW_OP_lit10:
578 case DW_OP_lit11:
579 case DW_OP_lit12:
580 case DW_OP_lit13:
581 case DW_OP_lit14:
582 case DW_OP_lit15:
583 case DW_OP_lit16:
584 case DW_OP_lit17:
585 case DW_OP_lit18:
586 case DW_OP_lit19:
587 case DW_OP_lit20:
588 case DW_OP_lit21:
589 case DW_OP_lit22:
590 case DW_OP_lit23:
591 case DW_OP_lit24:
592 case DW_OP_lit25:
593 case DW_OP_lit26:
594 case DW_OP_lit27:
595 case DW_OP_lit28:
596 case DW_OP_lit29:
597 case DW_OP_lit30:
598 case DW_OP_lit31:
599 result = op - DW_OP_lit0;
600 result_val = value_from_ulongest (address_type, result);
601 break;
602
603 case DW_OP_addr:
604 result = extract_unsigned_integer (op_ptr,
605 ctx->addr_size, byte_order);
606 op_ptr += ctx->addr_size;
607 /* Some versions of GCC emit DW_OP_addr before
608 DW_OP_GNU_push_tls_address. In this case the value is an
609 index, not an address. We don't support things like
610 branching between the address and the TLS op. */
611 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
612 result += ctx->offset;
613 result_val = value_from_ulongest (address_type, result);
614 break;
615
616 case DW_OP_const1u:
617 result = extract_unsigned_integer (op_ptr, 1, byte_order);
618 result_val = value_from_ulongest (address_type, result);
619 op_ptr += 1;
620 break;
621 case DW_OP_const1s:
622 result = extract_signed_integer (op_ptr, 1, byte_order);
623 result_val = value_from_ulongest (address_type, result);
624 op_ptr += 1;
625 break;
626 case DW_OP_const2u:
627 result = extract_unsigned_integer (op_ptr, 2, byte_order);
628 result_val = value_from_ulongest (address_type, result);
629 op_ptr += 2;
630 break;
631 case DW_OP_const2s:
632 result = extract_signed_integer (op_ptr, 2, byte_order);
633 result_val = value_from_ulongest (address_type, result);
634 op_ptr += 2;
635 break;
636 case DW_OP_const4u:
637 result = extract_unsigned_integer (op_ptr, 4, byte_order);
638 result_val = value_from_ulongest (address_type, result);
639 op_ptr += 4;
640 break;
641 case DW_OP_const4s:
642 result = extract_signed_integer (op_ptr, 4, byte_order);
643 result_val = value_from_ulongest (address_type, result);
644 op_ptr += 4;
645 break;
646 case DW_OP_const8u:
647 result = extract_unsigned_integer (op_ptr, 8, byte_order);
648 result_val = value_from_ulongest (address_type, result);
649 op_ptr += 8;
650 break;
651 case DW_OP_const8s:
652 result = extract_signed_integer (op_ptr, 8, byte_order);
653 result_val = value_from_ulongest (address_type, result);
654 op_ptr += 8;
655 break;
656 case DW_OP_constu:
657 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
658 result = uoffset;
659 result_val = value_from_ulongest (address_type, result);
660 break;
661 case DW_OP_consts:
662 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
663 result = offset;
664 result_val = value_from_ulongest (address_type, result);
665 break;
666
667 /* The DW_OP_reg operations are required to occur alone in
668 location expressions. */
669 case DW_OP_reg0:
670 case DW_OP_reg1:
671 case DW_OP_reg2:
672 case DW_OP_reg3:
673 case DW_OP_reg4:
674 case DW_OP_reg5:
675 case DW_OP_reg6:
676 case DW_OP_reg7:
677 case DW_OP_reg8:
678 case DW_OP_reg9:
679 case DW_OP_reg10:
680 case DW_OP_reg11:
681 case DW_OP_reg12:
682 case DW_OP_reg13:
683 case DW_OP_reg14:
684 case DW_OP_reg15:
685 case DW_OP_reg16:
686 case DW_OP_reg17:
687 case DW_OP_reg18:
688 case DW_OP_reg19:
689 case DW_OP_reg20:
690 case DW_OP_reg21:
691 case DW_OP_reg22:
692 case DW_OP_reg23:
693 case DW_OP_reg24:
694 case DW_OP_reg25:
695 case DW_OP_reg26:
696 case DW_OP_reg27:
697 case DW_OP_reg28:
698 case DW_OP_reg29:
699 case DW_OP_reg30:
700 case DW_OP_reg31:
701 if (op_ptr != op_end
702 && *op_ptr != DW_OP_piece
703 && *op_ptr != DW_OP_bit_piece
704 && *op_ptr != DW_OP_GNU_uninit)
705 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
706 "used either alone or in conjunction with DW_OP_piece "
707 "or DW_OP_bit_piece."));
708
709 result = op - DW_OP_reg0;
710 result_val = value_from_ulongest (address_type, result);
711 ctx->location = DWARF_VALUE_REGISTER;
712 break;
713
714 case DW_OP_regx:
715 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
716 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
717
718 result = reg;
719 result_val = value_from_ulongest (address_type, result);
720 ctx->location = DWARF_VALUE_REGISTER;
721 break;
722
723 case DW_OP_implicit_value:
724 {
725 ULONGEST len;
726
727 op_ptr = read_uleb128 (op_ptr, op_end, &len);
728 if (op_ptr + len > op_end)
729 error (_("DW_OP_implicit_value: too few bytes available."));
730 ctx->len = len;
731 ctx->data = op_ptr;
732 ctx->location = DWARF_VALUE_LITERAL;
733 op_ptr += len;
734 dwarf_expr_require_composition (op_ptr, op_end,
735 "DW_OP_implicit_value");
736 }
737 goto no_push;
738
739 case DW_OP_stack_value:
740 ctx->location = DWARF_VALUE_STACK;
741 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
742 goto no_push;
743
744 case DW_OP_GNU_implicit_pointer:
745 {
746 ULONGEST die;
747 LONGEST len;
748
749 if (ctx->ref_addr_size == -1)
750 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
751 "is not allowed in frame context"));
752
753 /* The referred-to DIE. */
754 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
755 byte_order);
756 op_ptr += ctx->ref_addr_size;
757
758 /* The byte offset into the data. */
759 op_ptr = read_sleb128 (op_ptr, op_end, &len);
760 result = (ULONGEST) len;
761 result_val = value_from_ulongest (address_type, result);
762
763 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
764 dwarf_expr_require_composition (op_ptr, op_end,
765 "DW_OP_GNU_implicit_pointer");
766 }
767 break;
768
769 case DW_OP_breg0:
770 case DW_OP_breg1:
771 case DW_OP_breg2:
772 case DW_OP_breg3:
773 case DW_OP_breg4:
774 case DW_OP_breg5:
775 case DW_OP_breg6:
776 case DW_OP_breg7:
777 case DW_OP_breg8:
778 case DW_OP_breg9:
779 case DW_OP_breg10:
780 case DW_OP_breg11:
781 case DW_OP_breg12:
782 case DW_OP_breg13:
783 case DW_OP_breg14:
784 case DW_OP_breg15:
785 case DW_OP_breg16:
786 case DW_OP_breg17:
787 case DW_OP_breg18:
788 case DW_OP_breg19:
789 case DW_OP_breg20:
790 case DW_OP_breg21:
791 case DW_OP_breg22:
792 case DW_OP_breg23:
793 case DW_OP_breg24:
794 case DW_OP_breg25:
795 case DW_OP_breg26:
796 case DW_OP_breg27:
797 case DW_OP_breg28:
798 case DW_OP_breg29:
799 case DW_OP_breg30:
800 case DW_OP_breg31:
801 {
802 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
803 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0);
804 result += offset;
805 result_val = value_from_ulongest (address_type, result);
806 }
807 break;
808 case DW_OP_bregx:
809 {
810 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
811 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
812 result = (ctx->funcs->read_reg) (ctx->baton, reg);
813 result += offset;
814 result_val = value_from_ulongest (address_type, result);
815 }
816 break;
817 case DW_OP_fbreg:
818 {
819 const gdb_byte *datastart;
820 size_t datalen;
821 unsigned int before_stack_len;
822
823 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
824 /* Rather than create a whole new context, we simply
825 record the stack length before execution, then reset it
826 afterwards, effectively erasing whatever the recursive
827 call put there. */
828 before_stack_len = ctx->stack_len;
829 /* FIXME: cagney/2003-03-26: This code should be using
830 get_frame_base_address(), and then implement a dwarf2
831 specific this_base method. */
832 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
833 dwarf_expr_eval (ctx, datastart, datalen);
834 if (ctx->location == DWARF_VALUE_MEMORY)
835 result = dwarf_expr_fetch_address (ctx, 0);
836 else if (ctx->location == DWARF_VALUE_REGISTER)
837 result = (ctx->funcs->read_reg) (ctx->baton,
838 value_as_long (dwarf_expr_fetch (ctx, 0)));
839 else
840 error (_("Not implemented: computing frame "
841 "base using explicit value operator"));
842 result = result + offset;
843 result_val = value_from_ulongest (address_type, result);
844 in_stack_memory = 1;
845 ctx->stack_len = before_stack_len;
846 ctx->location = DWARF_VALUE_MEMORY;
847 }
848 break;
849
850 case DW_OP_dup:
851 result_val = dwarf_expr_fetch (ctx, 0);
852 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
853 break;
854
855 case DW_OP_drop:
856 dwarf_expr_pop (ctx);
857 goto no_push;
858
859 case DW_OP_pick:
860 offset = *op_ptr++;
861 result_val = dwarf_expr_fetch (ctx, offset);
862 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
863 break;
864
865 case DW_OP_swap:
866 {
867 struct dwarf_stack_value t1, t2;
868
869 if (ctx->stack_len < 2)
870 error (_("Not enough elements for "
871 "DW_OP_swap. Need 2, have %d."),
872 ctx->stack_len);
873 t1 = ctx->stack[ctx->stack_len - 1];
874 t2 = ctx->stack[ctx->stack_len - 2];
875 ctx->stack[ctx->stack_len - 1] = t2;
876 ctx->stack[ctx->stack_len - 2] = t1;
877 goto no_push;
878 }
879
880 case DW_OP_over:
881 result_val = dwarf_expr_fetch (ctx, 1);
882 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
883 break;
884
885 case DW_OP_rot:
886 {
887 struct dwarf_stack_value t1, t2, t3;
888
889 if (ctx->stack_len < 3)
890 error (_("Not enough elements for "
891 "DW_OP_rot. Need 3, have %d."),
892 ctx->stack_len);
893 t1 = ctx->stack[ctx->stack_len - 1];
894 t2 = ctx->stack[ctx->stack_len - 2];
895 t3 = ctx->stack[ctx->stack_len - 3];
896 ctx->stack[ctx->stack_len - 1] = t2;
897 ctx->stack[ctx->stack_len - 2] = t3;
898 ctx->stack[ctx->stack_len - 3] = t1;
899 goto no_push;
900 }
901
902 case DW_OP_deref:
903 case DW_OP_deref_size:
904 case DW_OP_GNU_deref_type:
905 {
906 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
907 gdb_byte *buf = alloca (addr_size);
908 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
909 struct type *type;
910
911 dwarf_expr_pop (ctx);
912
913 if (op == DW_OP_GNU_deref_type)
914 {
915 ULONGEST type_die;
916
917 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
918 type = dwarf_get_base_type (ctx, type_die, 0);
919 }
920 else
921 type = address_type;
922
923 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
924
925 /* If the size of the object read from memory is different
926 from the type length, we need to zero-extend it. */
927 if (TYPE_LENGTH (type) != addr_size)
928 {
929 ULONGEST result =
930 extract_unsigned_integer (buf, addr_size, byte_order);
931
932 buf = alloca (TYPE_LENGTH (type));
933 store_unsigned_integer (buf, TYPE_LENGTH (type),
934 byte_order, result);
935 }
936
937 result_val = value_from_contents_and_address (type, buf, addr);
938 break;
939 }
940
941 case DW_OP_abs:
942 case DW_OP_neg:
943 case DW_OP_not:
944 case DW_OP_plus_uconst:
945 {
946 /* Unary operations. */
947 result_val = dwarf_expr_fetch (ctx, 0);
948 dwarf_expr_pop (ctx);
949
950 switch (op)
951 {
952 case DW_OP_abs:
953 if (value_less (result_val,
954 value_zero (value_type (result_val), not_lval)))
955 result_val = value_neg (result_val);
956 break;
957 case DW_OP_neg:
958 result_val = value_neg (result_val);
959 break;
960 case DW_OP_not:
961 dwarf_require_integral (value_type (result_val));
962 result_val = value_complement (result_val);
963 break;
964 case DW_OP_plus_uconst:
965 dwarf_require_integral (value_type (result_val));
966 result = value_as_long (result_val);
967 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
968 result += reg;
969 result_val = value_from_ulongest (address_type, result);
970 break;
971 }
972 }
973 break;
974
975 case DW_OP_and:
976 case DW_OP_div:
977 case DW_OP_minus:
978 case DW_OP_mod:
979 case DW_OP_mul:
980 case DW_OP_or:
981 case DW_OP_plus:
982 case DW_OP_shl:
983 case DW_OP_shr:
984 case DW_OP_shra:
985 case DW_OP_xor:
986 case DW_OP_le:
987 case DW_OP_ge:
988 case DW_OP_eq:
989 case DW_OP_lt:
990 case DW_OP_gt:
991 case DW_OP_ne:
992 {
993 /* Binary operations. */
994 struct value *first, *second;
995
996 second = dwarf_expr_fetch (ctx, 0);
997 dwarf_expr_pop (ctx);
998
999 first = dwarf_expr_fetch (ctx, 0);
1000 dwarf_expr_pop (ctx);
1001
1002 if (! base_types_equal_p (value_type (first), value_type (second)))
1003 error (_("Incompatible types on DWARF stack"));
1004
1005 switch (op)
1006 {
1007 case DW_OP_and:
1008 dwarf_require_integral (value_type (first));
1009 dwarf_require_integral (value_type (second));
1010 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1011 break;
1012 case DW_OP_div:
1013 result_val = value_binop (first, second, BINOP_DIV);
1014 break;
1015 case DW_OP_minus:
1016 result_val = value_binop (first, second, BINOP_SUB);
1017 break;
1018 case DW_OP_mod:
1019 {
1020 int cast_back = 0;
1021 struct type *orig_type = value_type (first);
1022
1023 /* We have to special-case "old-style" untyped values
1024 -- these must have mod computed using unsigned
1025 math. */
1026 if (orig_type == address_type)
1027 {
1028 struct type *utype
1029 = get_unsigned_type (ctx->gdbarch, orig_type);
1030
1031 cast_back = 1;
1032 first = value_cast (utype, first);
1033 second = value_cast (utype, second);
1034 }
1035 /* Note that value_binop doesn't handle float or
1036 decimal float here. This seems unimportant. */
1037 result_val = value_binop (first, second, BINOP_MOD);
1038 if (cast_back)
1039 result_val = value_cast (orig_type, result_val);
1040 }
1041 break;
1042 case DW_OP_mul:
1043 result_val = value_binop (first, second, BINOP_MUL);
1044 break;
1045 case DW_OP_or:
1046 dwarf_require_integral (value_type (first));
1047 dwarf_require_integral (value_type (second));
1048 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1049 break;
1050 case DW_OP_plus:
1051 result_val = value_binop (first, second, BINOP_ADD);
1052 break;
1053 case DW_OP_shl:
1054 dwarf_require_integral (value_type (first));
1055 dwarf_require_integral (value_type (second));
1056 result_val = value_binop (first, second, BINOP_LSH);
1057 break;
1058 case DW_OP_shr:
1059 dwarf_require_integral (value_type (first));
1060 dwarf_require_integral (value_type (second));
1061 if (!TYPE_UNSIGNED (value_type (first)))
1062 {
1063 struct type *utype
1064 = get_unsigned_type (ctx->gdbarch, value_type (first));
1065
1066 first = value_cast (utype, first);
1067 }
1068
1069 result_val = value_binop (first, second, BINOP_RSH);
1070 /* Make sure we wind up with the same type we started
1071 with. */
1072 if (value_type (result_val) != value_type (second))
1073 result_val = value_cast (value_type (second), result_val);
1074 break;
1075 case DW_OP_shra:
1076 dwarf_require_integral (value_type (first));
1077 dwarf_require_integral (value_type (second));
1078 if (TYPE_UNSIGNED (value_type (first)))
1079 {
1080 struct type *stype
1081 = get_signed_type (ctx->gdbarch, value_type (first));
1082
1083 first = value_cast (stype, first);
1084 }
1085
1086 result_val = value_binop (first, second, BINOP_RSH);
1087 /* Make sure we wind up with the same type we started
1088 with. */
1089 if (value_type (result_val) != value_type (second))
1090 result_val = value_cast (value_type (second), result_val);
1091 break;
1092 case DW_OP_xor:
1093 dwarf_require_integral (value_type (first));
1094 dwarf_require_integral (value_type (second));
1095 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1096 break;
1097 case DW_OP_le:
1098 /* A <= B is !(B < A). */
1099 result = ! value_less (second, first);
1100 result_val = value_from_ulongest (address_type, result);
1101 break;
1102 case DW_OP_ge:
1103 /* A >= B is !(A < B). */
1104 result = ! value_less (first, second);
1105 result_val = value_from_ulongest (address_type, result);
1106 break;
1107 case DW_OP_eq:
1108 result = value_equal (first, second);
1109 result_val = value_from_ulongest (address_type, result);
1110 break;
1111 case DW_OP_lt:
1112 result = value_less (first, second);
1113 result_val = value_from_ulongest (address_type, result);
1114 break;
1115 case DW_OP_gt:
1116 /* A > B is B < A. */
1117 result = value_less (second, first);
1118 result_val = value_from_ulongest (address_type, result);
1119 break;
1120 case DW_OP_ne:
1121 result = ! value_equal (first, second);
1122 result_val = value_from_ulongest (address_type, result);
1123 break;
1124 default:
1125 internal_error (__FILE__, __LINE__,
1126 _("Can't be reached."));
1127 }
1128 }
1129 break;
1130
1131 case DW_OP_call_frame_cfa:
1132 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1133 result_val = value_from_ulongest (address_type, result);
1134 in_stack_memory = 1;
1135 break;
1136
1137 case DW_OP_GNU_push_tls_address:
1138 /* Variable is at a constant offset in the thread-local
1139 storage block into the objfile for the current thread and
1140 the dynamic linker module containing this expression. Here
1141 we return returns the offset from that base. The top of the
1142 stack has the offset from the beginning of the thread
1143 control block at which the variable is located. Nothing
1144 should follow this operator, so the top of stack would be
1145 returned. */
1146 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1147 dwarf_expr_pop (ctx);
1148 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1149 result_val = value_from_ulongest (address_type, result);
1150 break;
1151
1152 case DW_OP_skip:
1153 offset = extract_signed_integer (op_ptr, 2, byte_order);
1154 op_ptr += 2;
1155 op_ptr += offset;
1156 goto no_push;
1157
1158 case DW_OP_bra:
1159 {
1160 struct value *val;
1161
1162 offset = extract_signed_integer (op_ptr, 2, byte_order);
1163 op_ptr += 2;
1164 val = dwarf_expr_fetch (ctx, 0);
1165 dwarf_require_integral (value_type (val));
1166 if (value_as_long (val) != 0)
1167 op_ptr += offset;
1168 dwarf_expr_pop (ctx);
1169 }
1170 goto no_push;
1171
1172 case DW_OP_nop:
1173 goto no_push;
1174
1175 case DW_OP_piece:
1176 {
1177 ULONGEST size;
1178
1179 /* Record the piece. */
1180 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1181 add_piece (ctx, 8 * size, 0);
1182
1183 /* Pop off the address/regnum, and reset the location
1184 type. */
1185 if (ctx->location != DWARF_VALUE_LITERAL
1186 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1187 dwarf_expr_pop (ctx);
1188 ctx->location = DWARF_VALUE_MEMORY;
1189 }
1190 goto no_push;
1191
1192 case DW_OP_bit_piece:
1193 {
1194 ULONGEST size, offset;
1195
1196 /* Record the piece. */
1197 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1198 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1199 add_piece (ctx, size, offset);
1200
1201 /* Pop off the address/regnum, and reset the location
1202 type. */
1203 if (ctx->location != DWARF_VALUE_LITERAL
1204 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1205 dwarf_expr_pop (ctx);
1206 ctx->location = DWARF_VALUE_MEMORY;
1207 }
1208 goto no_push;
1209
1210 case DW_OP_GNU_uninit:
1211 if (op_ptr != op_end)
1212 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1213 "be the very last op."));
1214
1215 ctx->initialized = 0;
1216 goto no_push;
1217
1218 case DW_OP_call2:
1219 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1220 op_ptr += 2;
1221 ctx->funcs->dwarf_call (ctx, result);
1222 goto no_push;
1223
1224 case DW_OP_call4:
1225 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1226 op_ptr += 4;
1227 ctx->funcs->dwarf_call (ctx, result);
1228 goto no_push;
1229
1230 case DW_OP_GNU_entry_value:
1231 {
1232 ULONGEST len;
1233 int dwarf_reg;
1234 CORE_ADDR deref_size;
1235
1236 op_ptr = read_uleb128 (op_ptr, op_end, &len);
1237 if (op_ptr + len > op_end)
1238 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1239
1240 dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1241 if (dwarf_reg != -1)
1242 {
1243 op_ptr += len;
1244 ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg,
1245 0 /* unused */);
1246 goto no_push;
1247 }
1248
1249 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1250 "supported only for single DW_OP_reg*"));
1251 }
1252
1253 case DW_OP_GNU_const_type:
1254 {
1255 ULONGEST type_die;
1256 int n;
1257 const gdb_byte *data;
1258 struct type *type;
1259
1260 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1261 n = *op_ptr++;
1262 data = op_ptr;
1263 op_ptr += n;
1264
1265 type = dwarf_get_base_type (ctx, type_die, n);
1266 result_val = value_from_contents (type, data);
1267 }
1268 break;
1269
1270 case DW_OP_GNU_regval_type:
1271 {
1272 ULONGEST type_die;
1273 struct type *type;
1274
1275 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1276 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1277
1278 type = dwarf_get_base_type (ctx, type_die, 0);
1279 result = (ctx->funcs->read_reg) (ctx->baton, reg);
1280 result_val = value_from_ulongest (address_type, result);
1281 result_val = value_from_contents (type,
1282 value_contents_all (result_val));
1283 }
1284 break;
1285
1286 case DW_OP_GNU_convert:
1287 case DW_OP_GNU_reinterpret:
1288 {
1289 ULONGEST type_die;
1290 struct type *type;
1291
1292 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1293
1294 if (type_die == 0)
1295 type = address_type;
1296 else
1297 type = dwarf_get_base_type (ctx, type_die, 0);
1298
1299 result_val = dwarf_expr_fetch (ctx, 0);
1300 dwarf_expr_pop (ctx);
1301
1302 if (op == DW_OP_GNU_convert)
1303 result_val = value_cast (type, result_val);
1304 else if (type == value_type (result_val))
1305 {
1306 /* Nothing. */
1307 }
1308 else if (TYPE_LENGTH (type)
1309 != TYPE_LENGTH (value_type (result_val)))
1310 error (_("DW_OP_GNU_reinterpret has wrong size"));
1311 else
1312 result_val
1313 = value_from_contents (type,
1314 value_contents_all (result_val));
1315 }
1316 break;
1317
1318 default:
1319 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1320 }
1321
1322 /* Most things push a result value. */
1323 gdb_assert (result_val != NULL);
1324 dwarf_expr_push (ctx, result_val, in_stack_memory);
1325 no_push:
1326 ;
1327 }
1328
1329 /* To simplify our main caller, if the result is an implicit
1330 pointer, then make a pieced value. This is ok because we can't
1331 have implicit pointers in contexts where pieces are invalid. */
1332 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1333 add_piece (ctx, 8 * ctx->addr_size, 0);
1334
1335 abort_expression:
1336 ctx->recursion_depth--;
1337 gdb_assert (ctx->recursion_depth >= 0);
1338 }
1339
1340 /* Stub dwarf_expr_context_funcs.read_reg implementation. */
1341
1342 CORE_ADDR
1343 ctx_no_read_reg (void *baton, int regnum)
1344 {
1345 error (_("Registers access is invalid in this context"));
1346 }
1347
1348 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1349
1350 void
1351 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1352 {
1353 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1354 }
1355
1356 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1357
1358 CORE_ADDR
1359 ctx_no_get_frame_cfa (void *baton)
1360 {
1361 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1362 }
1363
1364 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1365
1366 CORE_ADDR
1367 ctx_no_get_frame_pc (void *baton)
1368 {
1369 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1370 }
1371
1372 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1373
1374 CORE_ADDR
1375 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1376 {
1377 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1378 }
1379
1380 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1381
1382 void
1383 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, size_t die_offset)
1384 {
1385 error (_("%s is invalid in this context"), "DW_OP_call*");
1386 }
1387
1388 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1389
1390 struct type *
1391 ctx_no_get_base_type (struct dwarf_expr_context *ctx, size_t die)
1392 {
1393 error (_("Support for typed DWARF is not supported in this context"));
1394 }
1395
1396 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1397 implementation. */
1398
1399 void
1400 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1401 int dwarf_reg, CORE_ADDR fb_offset)
1402 {
1403 internal_error (__FILE__, __LINE__,
1404 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1405 }
1406
1407 void
1408 _initialize_dwarf2expr (void)
1409 {
1410 dwarf_arch_cookie
1411 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1412 }
This page took 0.061429 seconds and 5 git commands to generate.