* dwarf2expr.c (get_signed_type): New function.
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 const gdb_byte *, const gdb_byte *);
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66 static struct type *
67 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68 {
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 return retval;
108 }
109
110 /* Release the memory allocated to CTX. */
111
112 void
113 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114 {
115 xfree (ctx->stack);
116 xfree (ctx->pieces);
117 xfree (ctx);
118 }
119
120 /* Helper for make_cleanup_free_dwarf_expr_context. */
121
122 static void
123 free_dwarf_expr_context_cleanup (void *arg)
124 {
125 free_dwarf_expr_context (arg);
126 }
127
128 /* Return a cleanup that calls free_dwarf_expr_context. */
129
130 struct cleanup *
131 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132 {
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134 }
135
136 /* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139 static void
140 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141 {
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
144 size_t newlen = ctx->stack_len + need + 10;
145
146 ctx->stack = xrealloc (ctx->stack,
147 newlen * sizeof (struct dwarf_stack_value));
148 ctx->stack_allocated = newlen;
149 }
150 }
151
152 /* Push VALUE onto CTX's stack. */
153
154 static void
155 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
156 int in_stack_memory)
157 {
158 struct dwarf_stack_value *v;
159
160 dwarf_expr_grow_stack (ctx, 1);
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
164 }
165
166 /* Push VALUE onto CTX's stack. */
167
168 void
169 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171 {
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175 }
176
177 /* Pop the top item off of CTX's stack. */
178
179 static void
180 dwarf_expr_pop (struct dwarf_expr_context *ctx)
181 {
182 if (ctx->stack_len <= 0)
183 error (_("dwarf expression stack underflow"));
184 ctx->stack_len--;
185 }
186
187 /* Retrieve the N'th item on CTX's stack. */
188
189 struct value *
190 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191 {
192 if (ctx->stack_len <= n)
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
195 n, ctx->stack_len);
196 return ctx->stack[ctx->stack_len - (1 + n)].value;
197 }
198
199 /* Require that TYPE be an integral type; throw an exception if not. */
200
201 static void
202 dwarf_require_integral (struct type *type)
203 {
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208 }
209
210 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213 static struct type *
214 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215 {
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
230 }
231
232 /* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235 static struct type *
236 get_signed_type (struct gdbarch *gdbarch, struct type *type)
237 {
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
252 }
253
254 /* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256 CORE_ADDR
257 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258 {
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
276 gdb_byte *buf = alloca (ctx->addr_size);
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285 }
286
287 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289 int
290 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291 {
292 if (ctx->stack_len <= n)
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
297 }
298
299 /* Return true if the expression stack is empty. */
300
301 static int
302 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303 {
304 return ctx->stack_len == 0;
305 }
306
307 /* Add a new piece to CTX's piece list. */
308 static void
309 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
310 {
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
320 p->location = ctx->location;
321 p->size = size;
322 p->offset = offset;
323
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die = ctx->len;
346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
347 }
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
350 else
351 {
352 p->v.value = dwarf_expr_fetch (ctx, 0);
353 }
354 }
355
356 /* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359 void
360 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
362 {
363 int old_recursion_depth = ctx->recursion_depth;
364
365 execute_stack_op (ctx, addr, addr + len);
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
370 }
371
372 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
373 by R, and return the new value of BUF. Verify that it doesn't extend
374 past BUF_END. */
375
376 const gdb_byte *
377 read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
378 {
379 unsigned shift = 0;
380 ULONGEST result = 0;
381 gdb_byte byte;
382
383 while (1)
384 {
385 if (buf >= buf_end)
386 error (_("read_uleb128: Corrupted DWARF expression."));
387
388 byte = *buf++;
389 result |= ((ULONGEST) (byte & 0x7f)) << shift;
390 if ((byte & 0x80) == 0)
391 break;
392 shift += 7;
393 }
394 *r = result;
395 return buf;
396 }
397
398 /* Decode the signed LEB128 constant at BUF into the variable pointed to
399 by R, and return the new value of BUF. Verify that it doesn't extend
400 past BUF_END. */
401
402 const gdb_byte *
403 read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
404 {
405 unsigned shift = 0;
406 LONGEST result = 0;
407 gdb_byte byte;
408
409 while (1)
410 {
411 if (buf >= buf_end)
412 error (_("read_sleb128: Corrupted DWARF expression."));
413
414 byte = *buf++;
415 result |= ((ULONGEST) (byte & 0x7f)) << shift;
416 shift += 7;
417 if ((byte & 0x80) == 0)
418 break;
419 }
420 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
421 result |= -(1 << shift);
422
423 *r = result;
424 return buf;
425 }
426 \f
427
428 /* Check that the current operator is either at the end of an
429 expression, or that it is followed by a composition operator. */
430
431 void
432 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
433 const char *op_name)
434 {
435 /* It seems like DW_OP_GNU_uninit should be handled here. However,
436 it doesn't seem to make sense for DW_OP_*_value, and it was not
437 checked at the other place that this function is called. */
438 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
439 error (_("DWARF-2 expression error: `%s' operations must be "
440 "used either alone or in conjuction with DW_OP_piece "
441 "or DW_OP_bit_piece."),
442 op_name);
443 }
444
445 /* Return true iff the types T1 and T2 are "the same". This only does
446 checks that might reasonably be needed to compare DWARF base
447 types. */
448
449 static int
450 base_types_equal_p (struct type *t1, struct type *t2)
451 {
452 if (TYPE_CODE (t1) != TYPE_CODE (t2))
453 return 0;
454 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
455 return 0;
456 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
457 }
458
459 /* A convenience function to call get_base_type on CTX and return the
460 result. DIE is the DIE whose type we need. SIZE is non-zero if
461 this function should verify that the resulting type has the correct
462 size. */
463
464 static struct type *
465 dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size)
466 {
467 struct type *result;
468
469 if (ctx->get_base_type)
470 {
471 result = ctx->get_base_type (ctx, die);
472 if (result == NULL)
473 error (_("Could not find type for DW_OP_GNU_const_type"));
474 if (size != 0 && TYPE_LENGTH (result) != size)
475 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
476 }
477 else
478 /* Anything will do. */
479 result = builtin_type (ctx->gdbarch)->builtin_int;
480
481 return result;
482 }
483
484 /* The engine for the expression evaluator. Using the context in CTX,
485 evaluate the expression between OP_PTR and OP_END. */
486
487 static void
488 execute_stack_op (struct dwarf_expr_context *ctx,
489 const gdb_byte *op_ptr, const gdb_byte *op_end)
490 {
491 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
492 /* Old-style "untyped" DWARF values need special treatment in a
493 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
494 a special type for these values so we can distinguish them from
495 values that have an explicit type, because explicitly-typed
496 values do not need special treatment. This special type must be
497 different (in the `==' sense) from any base type coming from the
498 CU. */
499 struct type *address_type = dwarf_expr_address_type (ctx);
500
501 ctx->location = DWARF_VALUE_MEMORY;
502 ctx->initialized = 1; /* Default is initialized. */
503
504 if (ctx->recursion_depth > ctx->max_recursion_depth)
505 error (_("DWARF-2 expression error: Loop detected (%d)."),
506 ctx->recursion_depth);
507 ctx->recursion_depth++;
508
509 while (op_ptr < op_end)
510 {
511 enum dwarf_location_atom op = *op_ptr++;
512 ULONGEST result;
513 /* Assume the value is not in stack memory.
514 Code that knows otherwise sets this to 1.
515 Some arithmetic on stack addresses can probably be assumed to still
516 be a stack address, but we skip this complication for now.
517 This is just an optimization, so it's always ok to punt
518 and leave this as 0. */
519 int in_stack_memory = 0;
520 ULONGEST uoffset, reg;
521 LONGEST offset;
522 struct value *result_val = NULL;
523
524 switch (op)
525 {
526 case DW_OP_lit0:
527 case DW_OP_lit1:
528 case DW_OP_lit2:
529 case DW_OP_lit3:
530 case DW_OP_lit4:
531 case DW_OP_lit5:
532 case DW_OP_lit6:
533 case DW_OP_lit7:
534 case DW_OP_lit8:
535 case DW_OP_lit9:
536 case DW_OP_lit10:
537 case DW_OP_lit11:
538 case DW_OP_lit12:
539 case DW_OP_lit13:
540 case DW_OP_lit14:
541 case DW_OP_lit15:
542 case DW_OP_lit16:
543 case DW_OP_lit17:
544 case DW_OP_lit18:
545 case DW_OP_lit19:
546 case DW_OP_lit20:
547 case DW_OP_lit21:
548 case DW_OP_lit22:
549 case DW_OP_lit23:
550 case DW_OP_lit24:
551 case DW_OP_lit25:
552 case DW_OP_lit26:
553 case DW_OP_lit27:
554 case DW_OP_lit28:
555 case DW_OP_lit29:
556 case DW_OP_lit30:
557 case DW_OP_lit31:
558 result = op - DW_OP_lit0;
559 result_val = value_from_ulongest (address_type, result);
560 break;
561
562 case DW_OP_addr:
563 result = extract_unsigned_integer (op_ptr,
564 ctx->addr_size, byte_order);
565 op_ptr += ctx->addr_size;
566 /* Some versions of GCC emit DW_OP_addr before
567 DW_OP_GNU_push_tls_address. In this case the value is an
568 index, not an address. We don't support things like
569 branching between the address and the TLS op. */
570 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
571 result += ctx->offset;
572 result_val = value_from_ulongest (address_type, result);
573 break;
574
575 case DW_OP_const1u:
576 result = extract_unsigned_integer (op_ptr, 1, byte_order);
577 result_val = value_from_ulongest (address_type, result);
578 op_ptr += 1;
579 break;
580 case DW_OP_const1s:
581 result = extract_signed_integer (op_ptr, 1, byte_order);
582 result_val = value_from_ulongest (address_type, result);
583 op_ptr += 1;
584 break;
585 case DW_OP_const2u:
586 result = extract_unsigned_integer (op_ptr, 2, byte_order);
587 result_val = value_from_ulongest (address_type, result);
588 op_ptr += 2;
589 break;
590 case DW_OP_const2s:
591 result = extract_signed_integer (op_ptr, 2, byte_order);
592 result_val = value_from_ulongest (address_type, result);
593 op_ptr += 2;
594 break;
595 case DW_OP_const4u:
596 result = extract_unsigned_integer (op_ptr, 4, byte_order);
597 result_val = value_from_ulongest (address_type, result);
598 op_ptr += 4;
599 break;
600 case DW_OP_const4s:
601 result = extract_signed_integer (op_ptr, 4, byte_order);
602 result_val = value_from_ulongest (address_type, result);
603 op_ptr += 4;
604 break;
605 case DW_OP_const8u:
606 result = extract_unsigned_integer (op_ptr, 8, byte_order);
607 result_val = value_from_ulongest (address_type, result);
608 op_ptr += 8;
609 break;
610 case DW_OP_const8s:
611 result = extract_signed_integer (op_ptr, 8, byte_order);
612 result_val = value_from_ulongest (address_type, result);
613 op_ptr += 8;
614 break;
615 case DW_OP_constu:
616 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
617 result = uoffset;
618 result_val = value_from_ulongest (address_type, result);
619 break;
620 case DW_OP_consts:
621 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
622 result = offset;
623 result_val = value_from_ulongest (address_type, result);
624 break;
625
626 /* The DW_OP_reg operations are required to occur alone in
627 location expressions. */
628 case DW_OP_reg0:
629 case DW_OP_reg1:
630 case DW_OP_reg2:
631 case DW_OP_reg3:
632 case DW_OP_reg4:
633 case DW_OP_reg5:
634 case DW_OP_reg6:
635 case DW_OP_reg7:
636 case DW_OP_reg8:
637 case DW_OP_reg9:
638 case DW_OP_reg10:
639 case DW_OP_reg11:
640 case DW_OP_reg12:
641 case DW_OP_reg13:
642 case DW_OP_reg14:
643 case DW_OP_reg15:
644 case DW_OP_reg16:
645 case DW_OP_reg17:
646 case DW_OP_reg18:
647 case DW_OP_reg19:
648 case DW_OP_reg20:
649 case DW_OP_reg21:
650 case DW_OP_reg22:
651 case DW_OP_reg23:
652 case DW_OP_reg24:
653 case DW_OP_reg25:
654 case DW_OP_reg26:
655 case DW_OP_reg27:
656 case DW_OP_reg28:
657 case DW_OP_reg29:
658 case DW_OP_reg30:
659 case DW_OP_reg31:
660 if (op_ptr != op_end
661 && *op_ptr != DW_OP_piece
662 && *op_ptr != DW_OP_bit_piece
663 && *op_ptr != DW_OP_GNU_uninit)
664 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
665 "used either alone or in conjuction with DW_OP_piece "
666 "or DW_OP_bit_piece."));
667
668 result = op - DW_OP_reg0;
669 result_val = value_from_ulongest (address_type, result);
670 ctx->location = DWARF_VALUE_REGISTER;
671 break;
672
673 case DW_OP_regx:
674 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
675 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
676
677 result = reg;
678 result_val = value_from_ulongest (address_type, result);
679 ctx->location = DWARF_VALUE_REGISTER;
680 break;
681
682 case DW_OP_implicit_value:
683 {
684 ULONGEST len;
685
686 op_ptr = read_uleb128 (op_ptr, op_end, &len);
687 if (op_ptr + len > op_end)
688 error (_("DW_OP_implicit_value: too few bytes available."));
689 ctx->len = len;
690 ctx->data = op_ptr;
691 ctx->location = DWARF_VALUE_LITERAL;
692 op_ptr += len;
693 dwarf_expr_require_composition (op_ptr, op_end,
694 "DW_OP_implicit_value");
695 }
696 goto no_push;
697
698 case DW_OP_stack_value:
699 ctx->location = DWARF_VALUE_STACK;
700 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
701 goto no_push;
702
703 case DW_OP_GNU_implicit_pointer:
704 {
705 ULONGEST die;
706 LONGEST len;
707
708 /* The referred-to DIE. */
709 ctx->len = extract_unsigned_integer (op_ptr, ctx->addr_size,
710 byte_order);
711 op_ptr += ctx->addr_size;
712
713 /* The byte offset into the data. */
714 op_ptr = read_sleb128 (op_ptr, op_end, &len);
715 result = (ULONGEST) len;
716 result_val = value_from_ulongest (address_type, result);
717
718 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
719 dwarf_expr_require_composition (op_ptr, op_end,
720 "DW_OP_GNU_implicit_pointer");
721 }
722 break;
723
724 case DW_OP_breg0:
725 case DW_OP_breg1:
726 case DW_OP_breg2:
727 case DW_OP_breg3:
728 case DW_OP_breg4:
729 case DW_OP_breg5:
730 case DW_OP_breg6:
731 case DW_OP_breg7:
732 case DW_OP_breg8:
733 case DW_OP_breg9:
734 case DW_OP_breg10:
735 case DW_OP_breg11:
736 case DW_OP_breg12:
737 case DW_OP_breg13:
738 case DW_OP_breg14:
739 case DW_OP_breg15:
740 case DW_OP_breg16:
741 case DW_OP_breg17:
742 case DW_OP_breg18:
743 case DW_OP_breg19:
744 case DW_OP_breg20:
745 case DW_OP_breg21:
746 case DW_OP_breg22:
747 case DW_OP_breg23:
748 case DW_OP_breg24:
749 case DW_OP_breg25:
750 case DW_OP_breg26:
751 case DW_OP_breg27:
752 case DW_OP_breg28:
753 case DW_OP_breg29:
754 case DW_OP_breg30:
755 case DW_OP_breg31:
756 {
757 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
758 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
759 result += offset;
760 result_val = value_from_ulongest (address_type, result);
761 }
762 break;
763 case DW_OP_bregx:
764 {
765 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
766 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
767 result = (ctx->read_reg) (ctx->baton, reg);
768 result += offset;
769 result_val = value_from_ulongest (address_type, result);
770 }
771 break;
772 case DW_OP_fbreg:
773 {
774 const gdb_byte *datastart;
775 size_t datalen;
776 unsigned int before_stack_len;
777
778 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
779 /* Rather than create a whole new context, we simply
780 record the stack length before execution, then reset it
781 afterwards, effectively erasing whatever the recursive
782 call put there. */
783 before_stack_len = ctx->stack_len;
784 /* FIXME: cagney/2003-03-26: This code should be using
785 get_frame_base_address(), and then implement a dwarf2
786 specific this_base method. */
787 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
788 dwarf_expr_eval (ctx, datastart, datalen);
789 if (ctx->location == DWARF_VALUE_MEMORY)
790 result = dwarf_expr_fetch_address (ctx, 0);
791 else if (ctx->location == DWARF_VALUE_REGISTER)
792 result
793 = (ctx->read_reg) (ctx->baton,
794 value_as_long (dwarf_expr_fetch (ctx, 0)));
795 else
796 error (_("Not implemented: computing frame "
797 "base using explicit value operator"));
798 result = result + offset;
799 result_val = value_from_ulongest (address_type, result);
800 in_stack_memory = 1;
801 ctx->stack_len = before_stack_len;
802 ctx->location = DWARF_VALUE_MEMORY;
803 }
804 break;
805
806 case DW_OP_dup:
807 result_val = dwarf_expr_fetch (ctx, 0);
808 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
809 break;
810
811 case DW_OP_drop:
812 dwarf_expr_pop (ctx);
813 goto no_push;
814
815 case DW_OP_pick:
816 offset = *op_ptr++;
817 result_val = dwarf_expr_fetch (ctx, offset);
818 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
819 break;
820
821 case DW_OP_swap:
822 {
823 struct dwarf_stack_value t1, t2;
824
825 if (ctx->stack_len < 2)
826 error (_("Not enough elements for "
827 "DW_OP_swap. Need 2, have %d."),
828 ctx->stack_len);
829 t1 = ctx->stack[ctx->stack_len - 1];
830 t2 = ctx->stack[ctx->stack_len - 2];
831 ctx->stack[ctx->stack_len - 1] = t2;
832 ctx->stack[ctx->stack_len - 2] = t1;
833 goto no_push;
834 }
835
836 case DW_OP_over:
837 result_val = dwarf_expr_fetch (ctx, 1);
838 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
839 break;
840
841 case DW_OP_rot:
842 {
843 struct dwarf_stack_value t1, t2, t3;
844
845 if (ctx->stack_len < 3)
846 error (_("Not enough elements for "
847 "DW_OP_rot. Need 3, have %d."),
848 ctx->stack_len);
849 t1 = ctx->stack[ctx->stack_len - 1];
850 t2 = ctx->stack[ctx->stack_len - 2];
851 t3 = ctx->stack[ctx->stack_len - 3];
852 ctx->stack[ctx->stack_len - 1] = t2;
853 ctx->stack[ctx->stack_len - 2] = t3;
854 ctx->stack[ctx->stack_len - 3] = t1;
855 goto no_push;
856 }
857
858 case DW_OP_deref:
859 case DW_OP_deref_size:
860 case DW_OP_GNU_deref_type:
861 {
862 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
863 gdb_byte *buf = alloca (addr_size);
864 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
865 struct type *type;
866
867 dwarf_expr_pop (ctx);
868
869 if (op == DW_OP_GNU_deref_type)
870 {
871 ULONGEST type_die;
872
873 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
874 type = dwarf_get_base_type (ctx, type_die, 0);
875 }
876 else
877 type = address_type;
878
879 (ctx->read_mem) (ctx->baton, buf, addr, addr_size);
880 result_val = value_from_contents_and_address (type, buf, addr);
881 break;
882 }
883
884 case DW_OP_abs:
885 case DW_OP_neg:
886 case DW_OP_not:
887 case DW_OP_plus_uconst:
888 {
889 /* Unary operations. */
890 result_val = dwarf_expr_fetch (ctx, 0);
891 dwarf_expr_pop (ctx);
892
893 switch (op)
894 {
895 case DW_OP_abs:
896 if (value_less (result_val,
897 value_zero (value_type (result_val), not_lval)))
898 result_val = value_neg (result_val);
899 break;
900 case DW_OP_neg:
901 result_val = value_neg (result_val);
902 break;
903 case DW_OP_not:
904 dwarf_require_integral (value_type (result_val));
905 result_val = value_complement (result_val);
906 break;
907 case DW_OP_plus_uconst:
908 dwarf_require_integral (value_type (result_val));
909 result = value_as_long (result_val);
910 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
911 result += reg;
912 result_val = value_from_ulongest (address_type, result);
913 break;
914 }
915 }
916 break;
917
918 case DW_OP_and:
919 case DW_OP_div:
920 case DW_OP_minus:
921 case DW_OP_mod:
922 case DW_OP_mul:
923 case DW_OP_or:
924 case DW_OP_plus:
925 case DW_OP_shl:
926 case DW_OP_shr:
927 case DW_OP_shra:
928 case DW_OP_xor:
929 case DW_OP_le:
930 case DW_OP_ge:
931 case DW_OP_eq:
932 case DW_OP_lt:
933 case DW_OP_gt:
934 case DW_OP_ne:
935 {
936 /* Binary operations. */
937 struct value *first, *second;
938
939 second = dwarf_expr_fetch (ctx, 0);
940 dwarf_expr_pop (ctx);
941
942 first = dwarf_expr_fetch (ctx, 0);
943 dwarf_expr_pop (ctx);
944
945 if (! base_types_equal_p (value_type (first), value_type (second)))
946 error (_("Incompatible types on DWARF stack"));
947
948 switch (op)
949 {
950 case DW_OP_and:
951 dwarf_require_integral (value_type (first));
952 dwarf_require_integral (value_type (second));
953 result_val = value_binop (first, second, BINOP_BITWISE_AND);
954 break;
955 case DW_OP_div:
956 result_val = value_binop (first, second, BINOP_DIV);
957 break;
958 case DW_OP_minus:
959 result_val = value_binop (first, second, BINOP_SUB);
960 break;
961 case DW_OP_mod:
962 {
963 int cast_back = 0;
964 struct type *orig_type = value_type (first);
965
966 /* We have to special-case "old-style" untyped values
967 -- these must have mod computed using unsigned
968 math. */
969 if (orig_type == address_type)
970 {
971 struct type *utype
972 = get_unsigned_type (ctx->gdbarch, orig_type);
973
974 cast_back = 1;
975 first = value_cast (utype, first);
976 second = value_cast (utype, second);
977 }
978 /* Note that value_binop doesn't handle float or
979 decimal float here. This seems unimportant. */
980 result_val = value_binop (first, second, BINOP_MOD);
981 if (cast_back)
982 result_val = value_cast (orig_type, result_val);
983 }
984 break;
985 case DW_OP_mul:
986 result_val = value_binop (first, second, BINOP_MUL);
987 break;
988 case DW_OP_or:
989 dwarf_require_integral (value_type (first));
990 dwarf_require_integral (value_type (second));
991 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
992 break;
993 case DW_OP_plus:
994 result_val = value_binop (first, second, BINOP_ADD);
995 break;
996 case DW_OP_shl:
997 dwarf_require_integral (value_type (first));
998 dwarf_require_integral (value_type (second));
999 result_val = value_binop (first, second, BINOP_LSH);
1000 break;
1001 case DW_OP_shr:
1002 dwarf_require_integral (value_type (first));
1003 dwarf_require_integral (value_type (second));
1004 if (!TYPE_UNSIGNED (value_type (first)))
1005 {
1006 struct type *utype
1007 = get_unsigned_type (ctx->gdbarch, value_type (first));
1008
1009 first = value_cast (utype, first);
1010 }
1011
1012 result_val = value_binop (first, second, BINOP_RSH);
1013 /* Make sure we wind up with the same type we started
1014 with. */
1015 if (value_type (result_val) != value_type (second))
1016 result_val = value_cast (value_type (second), result_val);
1017 break;
1018 case DW_OP_shra:
1019 dwarf_require_integral (value_type (first));
1020 dwarf_require_integral (value_type (second));
1021 if (TYPE_UNSIGNED (value_type (first)))
1022 {
1023 struct type *stype
1024 = get_signed_type (ctx->gdbarch, value_type (first));
1025
1026 first = value_cast (stype, first);
1027 }
1028
1029 result_val = value_binop (first, second, BINOP_RSH);
1030 /* Make sure we wind up with the same type we started
1031 with. */
1032 if (value_type (result_val) != value_type (second))
1033 result_val = value_cast (value_type (second), result_val);
1034 break;
1035 case DW_OP_xor:
1036 dwarf_require_integral (value_type (first));
1037 dwarf_require_integral (value_type (second));
1038 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1039 break;
1040 case DW_OP_le:
1041 /* A <= B is !(B < A). */
1042 result = ! value_less (second, first);
1043 result_val = value_from_ulongest (address_type, result);
1044 break;
1045 case DW_OP_ge:
1046 /* A >= B is !(A < B). */
1047 result = ! value_less (first, second);
1048 result_val = value_from_ulongest (address_type, result);
1049 break;
1050 case DW_OP_eq:
1051 result = value_equal (first, second);
1052 result_val = value_from_ulongest (address_type, result);
1053 break;
1054 case DW_OP_lt:
1055 result = value_less (first, second);
1056 result_val = value_from_ulongest (address_type, result);
1057 break;
1058 case DW_OP_gt:
1059 /* A > B is B < A. */
1060 result = value_less (second, first);
1061 result_val = value_from_ulongest (address_type, result);
1062 break;
1063 case DW_OP_ne:
1064 result = ! value_equal (first, second);
1065 result_val = value_from_ulongest (address_type, result);
1066 break;
1067 default:
1068 internal_error (__FILE__, __LINE__,
1069 _("Can't be reached."));
1070 }
1071 }
1072 break;
1073
1074 case DW_OP_call_frame_cfa:
1075 result = (ctx->get_frame_cfa) (ctx->baton);
1076 result_val = value_from_ulongest (address_type, result);
1077 in_stack_memory = 1;
1078 break;
1079
1080 case DW_OP_GNU_push_tls_address:
1081 /* Variable is at a constant offset in the thread-local
1082 storage block into the objfile for the current thread and
1083 the dynamic linker module containing this expression. Here
1084 we return returns the offset from that base. The top of the
1085 stack has the offset from the beginning of the thread
1086 control block at which the variable is located. Nothing
1087 should follow this operator, so the top of stack would be
1088 returned. */
1089 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1090 dwarf_expr_pop (ctx);
1091 result = (ctx->get_tls_address) (ctx->baton, result);
1092 result_val = value_from_ulongest (address_type, result);
1093 break;
1094
1095 case DW_OP_skip:
1096 offset = extract_signed_integer (op_ptr, 2, byte_order);
1097 op_ptr += 2;
1098 op_ptr += offset;
1099 goto no_push;
1100
1101 case DW_OP_bra:
1102 {
1103 struct value *val;
1104
1105 offset = extract_signed_integer (op_ptr, 2, byte_order);
1106 op_ptr += 2;
1107 val = dwarf_expr_fetch (ctx, 0);
1108 dwarf_require_integral (value_type (val));
1109 if (value_as_long (val) != 0)
1110 op_ptr += offset;
1111 dwarf_expr_pop (ctx);
1112 }
1113 goto no_push;
1114
1115 case DW_OP_nop:
1116 goto no_push;
1117
1118 case DW_OP_piece:
1119 {
1120 ULONGEST size;
1121
1122 /* Record the piece. */
1123 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1124 add_piece (ctx, 8 * size, 0);
1125
1126 /* Pop off the address/regnum, and reset the location
1127 type. */
1128 if (ctx->location != DWARF_VALUE_LITERAL
1129 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1130 dwarf_expr_pop (ctx);
1131 ctx->location = DWARF_VALUE_MEMORY;
1132 }
1133 goto no_push;
1134
1135 case DW_OP_bit_piece:
1136 {
1137 ULONGEST size, offset;
1138
1139 /* Record the piece. */
1140 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1141 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1142 add_piece (ctx, size, offset);
1143
1144 /* Pop off the address/regnum, and reset the location
1145 type. */
1146 if (ctx->location != DWARF_VALUE_LITERAL
1147 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1148 dwarf_expr_pop (ctx);
1149 ctx->location = DWARF_VALUE_MEMORY;
1150 }
1151 goto no_push;
1152
1153 case DW_OP_GNU_uninit:
1154 if (op_ptr != op_end)
1155 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1156 "be the very last op."));
1157
1158 ctx->initialized = 0;
1159 goto no_push;
1160
1161 case DW_OP_call2:
1162 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1163 op_ptr += 2;
1164 ctx->dwarf_call (ctx, result);
1165 goto no_push;
1166
1167 case DW_OP_call4:
1168 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1169 op_ptr += 4;
1170 ctx->dwarf_call (ctx, result);
1171 goto no_push;
1172
1173 case DW_OP_GNU_entry_value:
1174 /* This operation is not yet supported by GDB. */
1175 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
1176 ctx->stack_len = 0;
1177 ctx->num_pieces = 0;
1178 goto abort_expression;
1179
1180 case DW_OP_GNU_const_type:
1181 {
1182 ULONGEST type_die;
1183 int n;
1184 const gdb_byte *data;
1185 struct type *type;
1186
1187 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1188 n = *op_ptr++;
1189 data = op_ptr;
1190 op_ptr += n;
1191
1192 type = dwarf_get_base_type (ctx, type_die, n);
1193 result_val = value_from_contents (type, data);
1194 }
1195 break;
1196
1197 case DW_OP_GNU_regval_type:
1198 {
1199 ULONGEST type_die;
1200 struct type *type;
1201
1202 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1203 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1204
1205 type = dwarf_get_base_type (ctx, type_die, 0);
1206 result = (ctx->read_reg) (ctx->baton, reg);
1207 result_val = value_from_ulongest (type, result);
1208 }
1209 break;
1210
1211 case DW_OP_GNU_convert:
1212 case DW_OP_GNU_reinterpret:
1213 {
1214 ULONGEST type_die;
1215 struct type *type;
1216
1217 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1218
1219 type = dwarf_get_base_type (ctx, type_die, 0);
1220
1221 result_val = dwarf_expr_fetch (ctx, 0);
1222 dwarf_expr_pop (ctx);
1223
1224 if (op == DW_OP_GNU_convert)
1225 result_val = value_cast (type, result_val);
1226 else if (type == value_type (result_val))
1227 {
1228 /* Nothing. */
1229 }
1230 else if (TYPE_LENGTH (type)
1231 != TYPE_LENGTH (value_type (result_val)))
1232 error (_("DW_OP_GNU_reinterpret has wrong size"));
1233 else
1234 result_val
1235 = value_from_contents (type,
1236 value_contents_all (result_val));
1237 }
1238 break;
1239
1240 default:
1241 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1242 }
1243
1244 /* Most things push a result value. */
1245 gdb_assert (result_val != NULL);
1246 dwarf_expr_push (ctx, result_val, in_stack_memory);
1247 no_push:
1248 ;
1249 }
1250
1251 /* To simplify our main caller, if the result is an implicit
1252 pointer, then make a pieced value. This is ok because we can't
1253 have implicit pointers in contexts where pieces are invalid. */
1254 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1255 add_piece (ctx, 8 * ctx->addr_size, 0);
1256
1257 abort_expression:
1258 ctx->recursion_depth--;
1259 gdb_assert (ctx->recursion_depth >= 0);
1260 }
1261
1262 void
1263 _initialize_dwarf2expr (void)
1264 {
1265 dwarf_arch_cookie
1266 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1267 }
This page took 0.071464 seconds and 5 git commands to generate.