1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
5 Contributed by Daniel Berlin (dan@dberlin.org)
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
31 /* Local prototypes. */
33 static void execute_stack_op (struct dwarf_expr_context
*,
34 const gdb_byte
*, const gdb_byte
*);
36 /* Cookie for gdbarch data. */
38 static struct gdbarch_data
*dwarf_arch_cookie
;
40 /* This holds gdbarch-specific types used by the DWARF expression
41 evaluator. See comments in execute_stack_op. */
43 struct dwarf_gdbarch_types
45 struct type
*dw_types
[3];
48 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
51 dwarf_gdbarch_types_init (struct gdbarch
*gdbarch
)
53 struct dwarf_gdbarch_types
*types
54 = GDBARCH_OBSTACK_ZALLOC (gdbarch
, struct dwarf_gdbarch_types
);
56 /* The types themselves are lazily initialized. */
61 /* Return the type used for DWARF operations where the type is
62 unspecified in the DWARF spec. Only certain sizes are
66 dwarf_expr_address_type (struct dwarf_expr_context
*ctx
)
68 struct dwarf_gdbarch_types
*types
69 = (struct dwarf_gdbarch_types
*) gdbarch_data (ctx
->gdbarch
,
73 if (ctx
->addr_size
== 2)
75 else if (ctx
->addr_size
== 4)
77 else if (ctx
->addr_size
== 8)
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
83 if (types
->dw_types
[ndx
] == NULL
)
85 = arch_integer_type (ctx
->gdbarch
,
87 0, "<signed DWARF address type>");
89 return types
->dw_types
[ndx
];
92 /* Create a new context for the expression evaluator. */
94 struct dwarf_expr_context
*
95 new_dwarf_expr_context (void)
97 struct dwarf_expr_context
*retval
;
99 retval
= XCNEW (struct dwarf_expr_context
);
100 retval
->stack_len
= 0;
101 retval
->stack_allocated
= 10;
102 retval
->stack
= XNEWVEC (struct dwarf_stack_value
, retval
->stack_allocated
);
103 retval
->num_pieces
= 0;
105 retval
->max_recursion_depth
= 0x100;
109 /* Release the memory allocated to CTX. */
112 free_dwarf_expr_context (struct dwarf_expr_context
*ctx
)
119 /* Helper for make_cleanup_free_dwarf_expr_context. */
122 free_dwarf_expr_context_cleanup (void *arg
)
124 free_dwarf_expr_context ((struct dwarf_expr_context
*) arg
);
127 /* Return a cleanup that calls free_dwarf_expr_context. */
130 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context
*ctx
)
132 return make_cleanup (free_dwarf_expr_context_cleanup
, ctx
);
135 /* Expand the memory allocated to CTX's stack to contain at least
136 NEED more elements than are currently used. */
139 dwarf_expr_grow_stack (struct dwarf_expr_context
*ctx
, size_t need
)
141 if (ctx
->stack_len
+ need
> ctx
->stack_allocated
)
143 size_t newlen
= ctx
->stack_len
+ need
+ 10;
145 ctx
->stack
= XRESIZEVEC (struct dwarf_stack_value
, ctx
->stack
, newlen
);
146 ctx
->stack_allocated
= newlen
;
150 /* Push VALUE onto CTX's stack. */
153 dwarf_expr_push (struct dwarf_expr_context
*ctx
, struct value
*value
,
156 struct dwarf_stack_value
*v
;
158 dwarf_expr_grow_stack (ctx
, 1);
159 v
= &ctx
->stack
[ctx
->stack_len
++];
161 v
->in_stack_memory
= in_stack_memory
;
164 /* Push VALUE onto CTX's stack. */
167 dwarf_expr_push_address (struct dwarf_expr_context
*ctx
, CORE_ADDR value
,
170 dwarf_expr_push (ctx
,
171 value_from_ulongest (dwarf_expr_address_type (ctx
), value
),
175 /* Pop the top item off of CTX's stack. */
178 dwarf_expr_pop (struct dwarf_expr_context
*ctx
)
180 if (ctx
->stack_len
<= 0)
181 error (_("dwarf expression stack underflow"));
185 /* Retrieve the N'th item on CTX's stack. */
188 dwarf_expr_fetch (struct dwarf_expr_context
*ctx
, int n
)
190 if (ctx
->stack_len
<= n
)
191 error (_("Asked for position %d of stack, "
192 "stack only has %d elements on it."),
194 return ctx
->stack
[ctx
->stack_len
- (1 + n
)].value
;
197 /* Require that TYPE be an integral type; throw an exception if not. */
200 dwarf_require_integral (struct type
*type
)
202 if (TYPE_CODE (type
) != TYPE_CODE_INT
203 && TYPE_CODE (type
) != TYPE_CODE_CHAR
204 && TYPE_CODE (type
) != TYPE_CODE_BOOL
)
205 error (_("integral type expected in DWARF expression"));
208 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
212 get_unsigned_type (struct gdbarch
*gdbarch
, struct type
*type
)
214 switch (TYPE_LENGTH (type
))
217 return builtin_type (gdbarch
)->builtin_uint8
;
219 return builtin_type (gdbarch
)->builtin_uint16
;
221 return builtin_type (gdbarch
)->builtin_uint32
;
223 return builtin_type (gdbarch
)->builtin_uint64
;
225 error (_("no unsigned variant found for type, while evaluating "
226 "DWARF expression"));
230 /* Return the signed form of TYPE. TYPE is necessarily an integral
234 get_signed_type (struct gdbarch
*gdbarch
, struct type
*type
)
236 switch (TYPE_LENGTH (type
))
239 return builtin_type (gdbarch
)->builtin_int8
;
241 return builtin_type (gdbarch
)->builtin_int16
;
243 return builtin_type (gdbarch
)->builtin_int32
;
245 return builtin_type (gdbarch
)->builtin_int64
;
247 error (_("no signed variant found for type, while evaluating "
248 "DWARF expression"));
252 /* Retrieve the N'th item on CTX's stack, converted to an address. */
255 dwarf_expr_fetch_address (struct dwarf_expr_context
*ctx
, int n
)
257 struct value
*result_val
= dwarf_expr_fetch (ctx
, n
);
258 enum bfd_endian byte_order
= gdbarch_byte_order (ctx
->gdbarch
);
261 dwarf_require_integral (value_type (result_val
));
262 result
= extract_unsigned_integer (value_contents (result_val
),
263 TYPE_LENGTH (value_type (result_val
)),
266 /* For most architectures, calling extract_unsigned_integer() alone
267 is sufficient for extracting an address. However, some
268 architectures (e.g. MIPS) use signed addresses and using
269 extract_unsigned_integer() will not produce a correct
270 result. Make sure we invoke gdbarch_integer_to_address()
271 for those architectures which require it. */
272 if (gdbarch_integer_to_address_p (ctx
->gdbarch
))
274 gdb_byte
*buf
= (gdb_byte
*) alloca (ctx
->addr_size
);
275 struct type
*int_type
= get_unsigned_type (ctx
->gdbarch
,
276 value_type (result_val
));
278 store_unsigned_integer (buf
, ctx
->addr_size
, byte_order
, result
);
279 return gdbarch_integer_to_address (ctx
->gdbarch
, int_type
, buf
);
282 return (CORE_ADDR
) result
;
285 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context
*ctx
, int n
)
290 if (ctx
->stack_len
<= n
)
291 error (_("Asked for position %d of stack, "
292 "stack only has %d elements on it."),
294 return ctx
->stack
[ctx
->stack_len
- (1 + n
)].in_stack_memory
;
297 /* Return true if the expression stack is empty. */
300 dwarf_expr_stack_empty_p (struct dwarf_expr_context
*ctx
)
302 return ctx
->stack_len
== 0;
305 /* Add a new piece to CTX's piece list. */
307 add_piece (struct dwarf_expr_context
*ctx
, ULONGEST size
, ULONGEST offset
)
309 struct dwarf_expr_piece
*p
;
314 = XRESIZEVEC (struct dwarf_expr_piece
, ctx
->pieces
, ctx
->num_pieces
);
316 p
= &ctx
->pieces
[ctx
->num_pieces
- 1];
317 p
->location
= ctx
->location
;
321 if (p
->location
== DWARF_VALUE_LITERAL
)
323 p
->v
.literal
.data
= ctx
->data
;
324 p
->v
.literal
.length
= ctx
->len
;
326 else if (dwarf_expr_stack_empty_p (ctx
))
328 p
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
329 /* Also reset the context's location, for our callers. This is
330 a somewhat strange approach, but this lets us avoid setting
331 the location to DWARF_VALUE_MEMORY in all the individual
332 cases in the evaluator. */
333 ctx
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
335 else if (p
->location
== DWARF_VALUE_MEMORY
)
337 p
->v
.mem
.addr
= dwarf_expr_fetch_address (ctx
, 0);
338 p
->v
.mem
.in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 0);
340 else if (p
->location
== DWARF_VALUE_IMPLICIT_POINTER
)
342 p
->v
.ptr
.die
.sect_off
= ctx
->len
;
343 p
->v
.ptr
.offset
= value_as_long (dwarf_expr_fetch (ctx
, 0));
345 else if (p
->location
== DWARF_VALUE_REGISTER
)
346 p
->v
.regno
= value_as_long (dwarf_expr_fetch (ctx
, 0));
349 p
->v
.value
= dwarf_expr_fetch (ctx
, 0);
353 /* Evaluate the expression at ADDR (LEN bytes long) using the context
357 dwarf_expr_eval (struct dwarf_expr_context
*ctx
, const gdb_byte
*addr
,
360 int old_recursion_depth
= ctx
->recursion_depth
;
362 execute_stack_op (ctx
, addr
, addr
+ len
);
364 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
366 gdb_assert (ctx
->recursion_depth
== old_recursion_depth
);
369 /* Helper to read a uleb128 value or throw an error. */
372 safe_read_uleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
375 buf
= gdb_read_uleb128 (buf
, buf_end
, r
);
377 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
381 /* Helper to read a sleb128 value or throw an error. */
384 safe_read_sleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
387 buf
= gdb_read_sleb128 (buf
, buf_end
, r
);
389 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
394 safe_skip_leb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
)
396 buf
= gdb_skip_leb128 (buf
, buf_end
);
398 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
403 /* Check that the current operator is either at the end of an
404 expression, or that it is followed by a composition operator. */
407 dwarf_expr_require_composition (const gdb_byte
*op_ptr
, const gdb_byte
*op_end
,
410 /* It seems like DW_OP_GNU_uninit should be handled here. However,
411 it doesn't seem to make sense for DW_OP_*_value, and it was not
412 checked at the other place that this function is called. */
413 if (op_ptr
!= op_end
&& *op_ptr
!= DW_OP_piece
&& *op_ptr
!= DW_OP_bit_piece
)
414 error (_("DWARF-2 expression error: `%s' operations must be "
415 "used either alone or in conjunction with DW_OP_piece "
416 "or DW_OP_bit_piece."),
420 /* Return true iff the types T1 and T2 are "the same". This only does
421 checks that might reasonably be needed to compare DWARF base
425 base_types_equal_p (struct type
*t1
, struct type
*t2
)
427 if (TYPE_CODE (t1
) != TYPE_CODE (t2
))
429 if (TYPE_UNSIGNED (t1
) != TYPE_UNSIGNED (t2
))
431 return TYPE_LENGTH (t1
) == TYPE_LENGTH (t2
);
434 /* A convenience function to call get_base_type on CTX and return the
435 result. DIE is the DIE whose type we need. SIZE is non-zero if
436 this function should verify that the resulting type has the correct
440 dwarf_get_base_type (struct dwarf_expr_context
*ctx
, cu_offset die
, int size
)
444 if (ctx
->funcs
->get_base_type
)
446 result
= ctx
->funcs
->get_base_type (ctx
, die
);
448 error (_("Could not find type for DW_OP_GNU_const_type"));
449 if (size
!= 0 && TYPE_LENGTH (result
) != size
)
450 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
453 /* Anything will do. */
454 result
= builtin_type (ctx
->gdbarch
)->builtin_int
;
459 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
460 DWARF register number. Otherwise return -1. */
463 dwarf_block_to_dwarf_reg (const gdb_byte
*buf
, const gdb_byte
*buf_end
)
469 if (*buf
>= DW_OP_reg0
&& *buf
<= DW_OP_reg31
)
471 if (buf_end
- buf
!= 1)
473 return *buf
- DW_OP_reg0
;
476 if (*buf
== DW_OP_GNU_regval_type
)
479 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
482 buf
= gdb_skip_leb128 (buf
, buf_end
);
486 else if (*buf
== DW_OP_regx
)
489 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
495 if (buf
!= buf_end
|| (int) dwarf_reg
!= dwarf_reg
)
500 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
501 DW_OP_deref* return the DWARF register number. Otherwise return -1.
502 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
503 size from DW_OP_deref_size. */
506 dwarf_block_to_dwarf_reg_deref (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
507 CORE_ADDR
*deref_size_return
)
515 if (*buf
>= DW_OP_breg0
&& *buf
<= DW_OP_breg31
)
517 dwarf_reg
= *buf
- DW_OP_breg0
;
522 else if (*buf
== DW_OP_bregx
)
525 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
528 if ((int) dwarf_reg
!= dwarf_reg
)
534 buf
= gdb_read_sleb128 (buf
, buf_end
, &offset
);
540 if (*buf
== DW_OP_deref
)
543 *deref_size_return
= -1;
545 else if (*buf
== DW_OP_deref_size
)
550 *deref_size_return
= *buf
++;
561 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
562 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
565 dwarf_block_to_fb_offset (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
566 CORE_ADDR
*fb_offset_return
)
573 if (*buf
!= DW_OP_fbreg
)
577 buf
= gdb_read_sleb128 (buf
, buf_end
, &fb_offset
);
580 *fb_offset_return
= fb_offset
;
581 if (buf
!= buf_end
|| fb_offset
!= (LONGEST
) *fb_offset_return
)
587 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
588 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
589 The matched SP register number depends on GDBARCH. */
592 dwarf_block_to_sp_offset (struct gdbarch
*gdbarch
, const gdb_byte
*buf
,
593 const gdb_byte
*buf_end
, CORE_ADDR
*sp_offset_return
)
600 if (*buf
>= DW_OP_breg0
&& *buf
<= DW_OP_breg31
)
602 dwarf_reg
= *buf
- DW_OP_breg0
;
607 if (*buf
!= DW_OP_bregx
)
610 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
615 if (dwarf_reg_to_regnum (gdbarch
, dwarf_reg
)
616 != gdbarch_sp_regnum (gdbarch
))
619 buf
= gdb_read_sleb128 (buf
, buf_end
, &sp_offset
);
622 *sp_offset_return
= sp_offset
;
623 if (buf
!= buf_end
|| sp_offset
!= (LONGEST
) *sp_offset_return
)
629 /* The engine for the expression evaluator. Using the context in CTX,
630 evaluate the expression between OP_PTR and OP_END. */
633 execute_stack_op (struct dwarf_expr_context
*ctx
,
634 const gdb_byte
*op_ptr
, const gdb_byte
*op_end
)
636 enum bfd_endian byte_order
= gdbarch_byte_order (ctx
->gdbarch
);
637 /* Old-style "untyped" DWARF values need special treatment in a
638 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
639 a special type for these values so we can distinguish them from
640 values that have an explicit type, because explicitly-typed
641 values do not need special treatment. This special type must be
642 different (in the `==' sense) from any base type coming from the
644 struct type
*address_type
= dwarf_expr_address_type (ctx
);
646 ctx
->location
= DWARF_VALUE_MEMORY
;
647 ctx
->initialized
= 1; /* Default is initialized. */
649 if (ctx
->recursion_depth
> ctx
->max_recursion_depth
)
650 error (_("DWARF-2 expression error: Loop detected (%d)."),
651 ctx
->recursion_depth
);
652 ctx
->recursion_depth
++;
654 while (op_ptr
< op_end
)
656 enum dwarf_location_atom op
= (enum dwarf_location_atom
) *op_ptr
++;
658 /* Assume the value is not in stack memory.
659 Code that knows otherwise sets this to 1.
660 Some arithmetic on stack addresses can probably be assumed to still
661 be a stack address, but we skip this complication for now.
662 This is just an optimization, so it's always ok to punt
663 and leave this as 0. */
664 int in_stack_memory
= 0;
665 uint64_t uoffset
, reg
;
667 struct value
*result_val
= NULL
;
669 /* The DWARF expression might have a bug causing an infinite
670 loop. In that case, quitting is the only way out. */
707 result
= op
- DW_OP_lit0
;
708 result_val
= value_from_ulongest (address_type
, result
);
712 result
= extract_unsigned_integer (op_ptr
,
713 ctx
->addr_size
, byte_order
);
714 op_ptr
+= ctx
->addr_size
;
715 /* Some versions of GCC emit DW_OP_addr before
716 DW_OP_GNU_push_tls_address. In this case the value is an
717 index, not an address. We don't support things like
718 branching between the address and the TLS op. */
719 if (op_ptr
>= op_end
|| *op_ptr
!= DW_OP_GNU_push_tls_address
)
720 result
+= ctx
->offset
;
721 result_val
= value_from_ulongest (address_type
, result
);
724 case DW_OP_GNU_addr_index
:
725 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
726 result
= (ctx
->funcs
->get_addr_index
) (ctx
->baton
, uoffset
);
727 result
+= ctx
->offset
;
728 result_val
= value_from_ulongest (address_type
, result
);
730 case DW_OP_GNU_const_index
:
731 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
732 result
= (ctx
->funcs
->get_addr_index
) (ctx
->baton
, uoffset
);
733 result_val
= value_from_ulongest (address_type
, result
);
737 result
= extract_unsigned_integer (op_ptr
, 1, byte_order
);
738 result_val
= value_from_ulongest (address_type
, result
);
742 result
= extract_signed_integer (op_ptr
, 1, byte_order
);
743 result_val
= value_from_ulongest (address_type
, result
);
747 result
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
748 result_val
= value_from_ulongest (address_type
, result
);
752 result
= extract_signed_integer (op_ptr
, 2, byte_order
);
753 result_val
= value_from_ulongest (address_type
, result
);
757 result
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
758 result_val
= value_from_ulongest (address_type
, result
);
762 result
= extract_signed_integer (op_ptr
, 4, byte_order
);
763 result_val
= value_from_ulongest (address_type
, result
);
767 result
= extract_unsigned_integer (op_ptr
, 8, byte_order
);
768 result_val
= value_from_ulongest (address_type
, result
);
772 result
= extract_signed_integer (op_ptr
, 8, byte_order
);
773 result_val
= value_from_ulongest (address_type
, result
);
777 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
779 result_val
= value_from_ulongest (address_type
, result
);
782 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
784 result_val
= value_from_ulongest (address_type
, result
);
787 /* The DW_OP_reg operations are required to occur alone in
788 location expressions. */
822 && *op_ptr
!= DW_OP_piece
823 && *op_ptr
!= DW_OP_bit_piece
824 && *op_ptr
!= DW_OP_GNU_uninit
)
825 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
826 "used either alone or in conjunction with DW_OP_piece "
827 "or DW_OP_bit_piece."));
829 result
= op
- DW_OP_reg0
;
830 result_val
= value_from_ulongest (address_type
, result
);
831 ctx
->location
= DWARF_VALUE_REGISTER
;
835 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
836 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_regx");
839 result_val
= value_from_ulongest (address_type
, result
);
840 ctx
->location
= DWARF_VALUE_REGISTER
;
843 case DW_OP_implicit_value
:
847 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &len
);
848 if (op_ptr
+ len
> op_end
)
849 error (_("DW_OP_implicit_value: too few bytes available."));
852 ctx
->location
= DWARF_VALUE_LITERAL
;
854 dwarf_expr_require_composition (op_ptr
, op_end
,
855 "DW_OP_implicit_value");
859 case DW_OP_stack_value
:
860 ctx
->location
= DWARF_VALUE_STACK
;
861 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_stack_value");
864 case DW_OP_GNU_implicit_pointer
:
868 if (ctx
->ref_addr_size
== -1)
869 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
870 "is not allowed in frame context"));
872 /* The referred-to DIE of sect_offset kind. */
873 ctx
->len
= extract_unsigned_integer (op_ptr
, ctx
->ref_addr_size
,
875 op_ptr
+= ctx
->ref_addr_size
;
877 /* The byte offset into the data. */
878 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &len
);
879 result
= (ULONGEST
) len
;
880 result_val
= value_from_ulongest (address_type
, result
);
882 ctx
->location
= DWARF_VALUE_IMPLICIT_POINTER
;
883 dwarf_expr_require_composition (op_ptr
, op_end
,
884 "DW_OP_GNU_implicit_pointer");
921 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
922 result
= (ctx
->funcs
->read_addr_from_reg
) (ctx
->baton
,
925 result_val
= value_from_ulongest (address_type
, result
);
930 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
931 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
932 result
= (ctx
->funcs
->read_addr_from_reg
) (ctx
->baton
, reg
);
934 result_val
= value_from_ulongest (address_type
, result
);
939 const gdb_byte
*datastart
;
941 unsigned int before_stack_len
;
943 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
944 /* Rather than create a whole new context, we simply
945 record the stack length before execution, then reset it
946 afterwards, effectively erasing whatever the recursive
948 before_stack_len
= ctx
->stack_len
;
949 /* FIXME: cagney/2003-03-26: This code should be using
950 get_frame_base_address(), and then implement a dwarf2
951 specific this_base method. */
952 (ctx
->funcs
->get_frame_base
) (ctx
->baton
, &datastart
, &datalen
);
953 dwarf_expr_eval (ctx
, datastart
, datalen
);
954 if (ctx
->location
== DWARF_VALUE_MEMORY
)
955 result
= dwarf_expr_fetch_address (ctx
, 0);
956 else if (ctx
->location
== DWARF_VALUE_REGISTER
)
957 result
= (ctx
->funcs
->read_addr_from_reg
)
959 value_as_long (dwarf_expr_fetch (ctx
, 0)));
961 error (_("Not implemented: computing frame "
962 "base using explicit value operator"));
963 result
= result
+ offset
;
964 result_val
= value_from_ulongest (address_type
, result
);
966 ctx
->stack_len
= before_stack_len
;
967 ctx
->location
= DWARF_VALUE_MEMORY
;
972 result_val
= dwarf_expr_fetch (ctx
, 0);
973 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 0);
977 dwarf_expr_pop (ctx
);
982 result_val
= dwarf_expr_fetch (ctx
, offset
);
983 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, offset
);
988 struct dwarf_stack_value t1
, t2
;
990 if (ctx
->stack_len
< 2)
991 error (_("Not enough elements for "
992 "DW_OP_swap. Need 2, have %d."),
994 t1
= ctx
->stack
[ctx
->stack_len
- 1];
995 t2
= ctx
->stack
[ctx
->stack_len
- 2];
996 ctx
->stack
[ctx
->stack_len
- 1] = t2
;
997 ctx
->stack
[ctx
->stack_len
- 2] = t1
;
1002 result_val
= dwarf_expr_fetch (ctx
, 1);
1003 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 1);
1008 struct dwarf_stack_value t1
, t2
, t3
;
1010 if (ctx
->stack_len
< 3)
1011 error (_("Not enough elements for "
1012 "DW_OP_rot. Need 3, have %d."),
1014 t1
= ctx
->stack
[ctx
->stack_len
- 1];
1015 t2
= ctx
->stack
[ctx
->stack_len
- 2];
1016 t3
= ctx
->stack
[ctx
->stack_len
- 3];
1017 ctx
->stack
[ctx
->stack_len
- 1] = t2
;
1018 ctx
->stack
[ctx
->stack_len
- 2] = t3
;
1019 ctx
->stack
[ctx
->stack_len
- 3] = t1
;
1024 case DW_OP_deref_size
:
1025 case DW_OP_GNU_deref_type
:
1027 int addr_size
= (op
== DW_OP_deref
? ctx
->addr_size
: *op_ptr
++);
1028 gdb_byte
*buf
= (gdb_byte
*) alloca (addr_size
);
1029 CORE_ADDR addr
= dwarf_expr_fetch_address (ctx
, 0);
1032 dwarf_expr_pop (ctx
);
1034 if (op
== DW_OP_GNU_deref_type
)
1038 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1039 type_die
.cu_off
= uoffset
;
1040 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1043 type
= address_type
;
1045 (ctx
->funcs
->read_mem
) (ctx
->baton
, buf
, addr
, addr_size
);
1047 /* If the size of the object read from memory is different
1048 from the type length, we need to zero-extend it. */
1049 if (TYPE_LENGTH (type
) != addr_size
)
1052 extract_unsigned_integer (buf
, addr_size
, byte_order
);
1054 buf
= (gdb_byte
*) alloca (TYPE_LENGTH (type
));
1055 store_unsigned_integer (buf
, TYPE_LENGTH (type
),
1056 byte_order
, result
);
1059 result_val
= value_from_contents_and_address (type
, buf
, addr
);
1066 case DW_OP_plus_uconst
:
1068 /* Unary operations. */
1069 result_val
= dwarf_expr_fetch (ctx
, 0);
1070 dwarf_expr_pop (ctx
);
1075 if (value_less (result_val
,
1076 value_zero (value_type (result_val
), not_lval
)))
1077 result_val
= value_neg (result_val
);
1080 result_val
= value_neg (result_val
);
1083 dwarf_require_integral (value_type (result_val
));
1084 result_val
= value_complement (result_val
);
1086 case DW_OP_plus_uconst
:
1087 dwarf_require_integral (value_type (result_val
));
1088 result
= value_as_long (result_val
);
1089 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1091 result_val
= value_from_ulongest (address_type
, result
);
1115 /* Binary operations. */
1116 struct value
*first
, *second
;
1118 second
= dwarf_expr_fetch (ctx
, 0);
1119 dwarf_expr_pop (ctx
);
1121 first
= dwarf_expr_fetch (ctx
, 0);
1122 dwarf_expr_pop (ctx
);
1124 if (! base_types_equal_p (value_type (first
), value_type (second
)))
1125 error (_("Incompatible types on DWARF stack"));
1130 dwarf_require_integral (value_type (first
));
1131 dwarf_require_integral (value_type (second
));
1132 result_val
= value_binop (first
, second
, BINOP_BITWISE_AND
);
1135 result_val
= value_binop (first
, second
, BINOP_DIV
);
1138 result_val
= value_binop (first
, second
, BINOP_SUB
);
1143 struct type
*orig_type
= value_type (first
);
1145 /* We have to special-case "old-style" untyped values
1146 -- these must have mod computed using unsigned
1148 if (orig_type
== address_type
)
1151 = get_unsigned_type (ctx
->gdbarch
, orig_type
);
1154 first
= value_cast (utype
, first
);
1155 second
= value_cast (utype
, second
);
1157 /* Note that value_binop doesn't handle float or
1158 decimal float here. This seems unimportant. */
1159 result_val
= value_binop (first
, second
, BINOP_MOD
);
1161 result_val
= value_cast (orig_type
, result_val
);
1165 result_val
= value_binop (first
, second
, BINOP_MUL
);
1168 dwarf_require_integral (value_type (first
));
1169 dwarf_require_integral (value_type (second
));
1170 result_val
= value_binop (first
, second
, BINOP_BITWISE_IOR
);
1173 result_val
= value_binop (first
, second
, BINOP_ADD
);
1176 dwarf_require_integral (value_type (first
));
1177 dwarf_require_integral (value_type (second
));
1178 result_val
= value_binop (first
, second
, BINOP_LSH
);
1181 dwarf_require_integral (value_type (first
));
1182 dwarf_require_integral (value_type (second
));
1183 if (!TYPE_UNSIGNED (value_type (first
)))
1186 = get_unsigned_type (ctx
->gdbarch
, value_type (first
));
1188 first
= value_cast (utype
, first
);
1191 result_val
= value_binop (first
, second
, BINOP_RSH
);
1192 /* Make sure we wind up with the same type we started
1194 if (value_type (result_val
) != value_type (second
))
1195 result_val
= value_cast (value_type (second
), result_val
);
1198 dwarf_require_integral (value_type (first
));
1199 dwarf_require_integral (value_type (second
));
1200 if (TYPE_UNSIGNED (value_type (first
)))
1203 = get_signed_type (ctx
->gdbarch
, value_type (first
));
1205 first
= value_cast (stype
, first
);
1208 result_val
= value_binop (first
, second
, BINOP_RSH
);
1209 /* Make sure we wind up with the same type we started
1211 if (value_type (result_val
) != value_type (second
))
1212 result_val
= value_cast (value_type (second
), result_val
);
1215 dwarf_require_integral (value_type (first
));
1216 dwarf_require_integral (value_type (second
));
1217 result_val
= value_binop (first
, second
, BINOP_BITWISE_XOR
);
1220 /* A <= B is !(B < A). */
1221 result
= ! value_less (second
, first
);
1222 result_val
= value_from_ulongest (address_type
, result
);
1225 /* A >= B is !(A < B). */
1226 result
= ! value_less (first
, second
);
1227 result_val
= value_from_ulongest (address_type
, result
);
1230 result
= value_equal (first
, second
);
1231 result_val
= value_from_ulongest (address_type
, result
);
1234 result
= value_less (first
, second
);
1235 result_val
= value_from_ulongest (address_type
, result
);
1238 /* A > B is B < A. */
1239 result
= value_less (second
, first
);
1240 result_val
= value_from_ulongest (address_type
, result
);
1243 result
= ! value_equal (first
, second
);
1244 result_val
= value_from_ulongest (address_type
, result
);
1247 internal_error (__FILE__
, __LINE__
,
1248 _("Can't be reached."));
1253 case DW_OP_call_frame_cfa
:
1254 result
= (ctx
->funcs
->get_frame_cfa
) (ctx
->baton
);
1255 result_val
= value_from_ulongest (address_type
, result
);
1256 in_stack_memory
= 1;
1259 case DW_OP_GNU_push_tls_address
:
1260 /* Variable is at a constant offset in the thread-local
1261 storage block into the objfile for the current thread and
1262 the dynamic linker module containing this expression. Here
1263 we return returns the offset from that base. The top of the
1264 stack has the offset from the beginning of the thread
1265 control block at which the variable is located. Nothing
1266 should follow this operator, so the top of stack would be
1268 result
= value_as_long (dwarf_expr_fetch (ctx
, 0));
1269 dwarf_expr_pop (ctx
);
1270 result
= (ctx
->funcs
->get_tls_address
) (ctx
->baton
, result
);
1271 result_val
= value_from_ulongest (address_type
, result
);
1275 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
1284 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
1286 val
= dwarf_expr_fetch (ctx
, 0);
1287 dwarf_require_integral (value_type (val
));
1288 if (value_as_long (val
) != 0)
1290 dwarf_expr_pop (ctx
);
1301 /* Record the piece. */
1302 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &size
);
1303 add_piece (ctx
, 8 * size
, 0);
1305 /* Pop off the address/regnum, and reset the location
1307 if (ctx
->location
!= DWARF_VALUE_LITERAL
1308 && ctx
->location
!= DWARF_VALUE_OPTIMIZED_OUT
)
1309 dwarf_expr_pop (ctx
);
1310 ctx
->location
= DWARF_VALUE_MEMORY
;
1314 case DW_OP_bit_piece
:
1316 uint64_t size
, offset
;
1318 /* Record the piece. */
1319 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &size
);
1320 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &offset
);
1321 add_piece (ctx
, size
, offset
);
1323 /* Pop off the address/regnum, and reset the location
1325 if (ctx
->location
!= DWARF_VALUE_LITERAL
1326 && ctx
->location
!= DWARF_VALUE_OPTIMIZED_OUT
)
1327 dwarf_expr_pop (ctx
);
1328 ctx
->location
= DWARF_VALUE_MEMORY
;
1332 case DW_OP_GNU_uninit
:
1333 if (op_ptr
!= op_end
)
1334 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1335 "be the very last op."));
1337 ctx
->initialized
= 0;
1344 offset
.cu_off
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
1346 ctx
->funcs
->dwarf_call (ctx
, offset
);
1354 offset
.cu_off
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
1356 ctx
->funcs
->dwarf_call (ctx
, offset
);
1360 case DW_OP_GNU_entry_value
:
1363 CORE_ADDR deref_size
;
1364 union call_site_parameter_u kind_u
;
1366 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &len
);
1367 if (op_ptr
+ len
> op_end
)
1368 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1370 kind_u
.dwarf_reg
= dwarf_block_to_dwarf_reg (op_ptr
, op_ptr
+ len
);
1371 if (kind_u
.dwarf_reg
!= -1)
1374 ctx
->funcs
->push_dwarf_reg_entry_value (ctx
,
1375 CALL_SITE_PARAMETER_DWARF_REG
,
1377 -1 /* deref_size */);
1381 kind_u
.dwarf_reg
= dwarf_block_to_dwarf_reg_deref (op_ptr
,
1384 if (kind_u
.dwarf_reg
!= -1)
1386 if (deref_size
== -1)
1387 deref_size
= ctx
->addr_size
;
1389 ctx
->funcs
->push_dwarf_reg_entry_value (ctx
,
1390 CALL_SITE_PARAMETER_DWARF_REG
,
1391 kind_u
, deref_size
);
1395 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1396 "supported only for single DW_OP_reg* "
1397 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1400 case DW_OP_GNU_parameter_ref
:
1402 union call_site_parameter_u kind_u
;
1404 kind_u
.param_offset
.cu_off
= extract_unsigned_integer (op_ptr
, 4,
1407 ctx
->funcs
->push_dwarf_reg_entry_value (ctx
,
1408 CALL_SITE_PARAMETER_PARAM_OFFSET
,
1410 -1 /* deref_size */);
1414 case DW_OP_GNU_const_type
:
1418 const gdb_byte
*data
;
1421 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1422 type_die
.cu_off
= uoffset
;
1427 type
= dwarf_get_base_type (ctx
, type_die
, n
);
1428 result_val
= value_from_contents (type
, data
);
1432 case DW_OP_GNU_regval_type
:
1437 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1438 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1439 type_die
.cu_off
= uoffset
;
1441 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1442 result_val
= ctx
->funcs
->get_reg_value (ctx
->baton
, type
, reg
);
1446 case DW_OP_GNU_convert
:
1447 case DW_OP_GNU_reinterpret
:
1452 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1453 type_die
.cu_off
= uoffset
;
1455 if (type_die
.cu_off
== 0)
1456 type
= address_type
;
1458 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1460 result_val
= dwarf_expr_fetch (ctx
, 0);
1461 dwarf_expr_pop (ctx
);
1463 if (op
== DW_OP_GNU_convert
)
1464 result_val
= value_cast (type
, result_val
);
1465 else if (type
== value_type (result_val
))
1469 else if (TYPE_LENGTH (type
)
1470 != TYPE_LENGTH (value_type (result_val
)))
1471 error (_("DW_OP_GNU_reinterpret has wrong size"));
1474 = value_from_contents (type
,
1475 value_contents_all (result_val
));
1479 case DW_OP_push_object_address
:
1480 /* Return the address of the object we are currently observing. */
1481 result
= (ctx
->funcs
->get_object_address
) (ctx
->baton
);
1482 result_val
= value_from_ulongest (address_type
, result
);
1486 error (_("Unhandled dwarf expression opcode 0x%x"), op
);
1489 /* Most things push a result value. */
1490 gdb_assert (result_val
!= NULL
);
1491 dwarf_expr_push (ctx
, result_val
, in_stack_memory
);
1496 /* To simplify our main caller, if the result is an implicit
1497 pointer, then make a pieced value. This is ok because we can't
1498 have implicit pointers in contexts where pieces are invalid. */
1499 if (ctx
->location
== DWARF_VALUE_IMPLICIT_POINTER
)
1500 add_piece (ctx
, 8 * ctx
->addr_size
, 0);
1503 ctx
->recursion_depth
--;
1504 gdb_assert (ctx
->recursion_depth
>= 0);
1507 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1510 ctx_no_get_frame_base (void *baton
, const gdb_byte
**start
, size_t *length
)
1512 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1515 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1518 ctx_no_get_frame_cfa (void *baton
)
1520 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1523 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1526 ctx_no_get_frame_pc (void *baton
)
1528 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1531 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1534 ctx_no_get_tls_address (void *baton
, CORE_ADDR offset
)
1536 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1539 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1542 ctx_no_dwarf_call (struct dwarf_expr_context
*ctx
, cu_offset die_offset
)
1544 error (_("%s is invalid in this context"), "DW_OP_call*");
1547 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1550 ctx_no_get_base_type (struct dwarf_expr_context
*ctx
, cu_offset die
)
1552 error (_("Support for typed DWARF is not supported in this context"));
1555 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1559 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context
*ctx
,
1560 enum call_site_parameter_kind kind
,
1561 union call_site_parameter_u kind_u
,
1564 internal_error (__FILE__
, __LINE__
,
1565 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1568 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1571 ctx_no_get_addr_index (void *baton
, unsigned int index
)
1573 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1576 /* Provide a prototype to silence -Wmissing-prototypes. */
1577 extern initialize_file_ftype _initialize_dwarf2expr
;
1580 _initialize_dwarf2expr (void)
1583 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init
);