*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 const gdb_byte *, const gdb_byte *);
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66 static struct type *
67 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68 {
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 return retval;
108 }
109
110 /* Release the memory allocated to CTX. */
111
112 void
113 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114 {
115 xfree (ctx->stack);
116 xfree (ctx->pieces);
117 xfree (ctx);
118 }
119
120 /* Helper for make_cleanup_free_dwarf_expr_context. */
121
122 static void
123 free_dwarf_expr_context_cleanup (void *arg)
124 {
125 free_dwarf_expr_context (arg);
126 }
127
128 /* Return a cleanup that calls free_dwarf_expr_context. */
129
130 struct cleanup *
131 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132 {
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134 }
135
136 /* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139 static void
140 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141 {
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
144 size_t newlen = ctx->stack_len + need + 10;
145
146 ctx->stack = xrealloc (ctx->stack,
147 newlen * sizeof (struct dwarf_stack_value));
148 ctx->stack_allocated = newlen;
149 }
150 }
151
152 /* Push VALUE onto CTX's stack. */
153
154 static void
155 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
156 int in_stack_memory)
157 {
158 struct dwarf_stack_value *v;
159
160 dwarf_expr_grow_stack (ctx, 1);
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
164 }
165
166 /* Push VALUE onto CTX's stack. */
167
168 void
169 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171 {
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175 }
176
177 /* Pop the top item off of CTX's stack. */
178
179 static void
180 dwarf_expr_pop (struct dwarf_expr_context *ctx)
181 {
182 if (ctx->stack_len <= 0)
183 error (_("dwarf expression stack underflow"));
184 ctx->stack_len--;
185 }
186
187 /* Retrieve the N'th item on CTX's stack. */
188
189 struct value *
190 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191 {
192 if (ctx->stack_len <= n)
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
195 n, ctx->stack_len);
196 return ctx->stack[ctx->stack_len - (1 + n)].value;
197 }
198
199 /* Require that TYPE be an integral type; throw an exception if not. */
200
201 static void
202 dwarf_require_integral (struct type *type)
203 {
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208 }
209
210 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213 static struct type *
214 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215 {
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
230 }
231
232 /* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235 static struct type *
236 get_signed_type (struct gdbarch *gdbarch, struct type *type)
237 {
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
252 }
253
254 /* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256 CORE_ADDR
257 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258 {
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
276 gdb_byte *buf = alloca (ctx->addr_size);
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285 }
286
287 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289 int
290 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291 {
292 if (ctx->stack_len <= n)
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
297 }
298
299 /* Return true if the expression stack is empty. */
300
301 static int
302 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303 {
304 return ctx->stack_len == 0;
305 }
306
307 /* Add a new piece to CTX's piece list. */
308 static void
309 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
310 {
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
320 p->location = ctx->location;
321 p->size = size;
322 p->offset = offset;
323
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die = ctx->len;
346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
347 }
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
350 else
351 {
352 p->v.value = dwarf_expr_fetch (ctx, 0);
353 }
354 }
355
356 /* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359 void
360 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
362 {
363 int old_recursion_depth = ctx->recursion_depth;
364
365 execute_stack_op (ctx, addr, addr + len);
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
370 }
371
372 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
373 by R, and return the new value of BUF. Verify that it doesn't extend
374 past BUF_END. */
375
376 const gdb_byte *
377 read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
378 {
379 unsigned shift = 0;
380 ULONGEST result = 0;
381 gdb_byte byte;
382
383 while (1)
384 {
385 if (buf >= buf_end)
386 error (_("read_uleb128: Corrupted DWARF expression."));
387
388 byte = *buf++;
389 result |= ((ULONGEST) (byte & 0x7f)) << shift;
390 if ((byte & 0x80) == 0)
391 break;
392 shift += 7;
393 }
394 *r = result;
395 return buf;
396 }
397
398 /* Decode the signed LEB128 constant at BUF into the variable pointed to
399 by R, and return the new value of BUF. Verify that it doesn't extend
400 past BUF_END. */
401
402 const gdb_byte *
403 read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
404 {
405 unsigned shift = 0;
406 LONGEST result = 0;
407 gdb_byte byte;
408
409 while (1)
410 {
411 if (buf >= buf_end)
412 error (_("read_sleb128: Corrupted DWARF expression."));
413
414 byte = *buf++;
415 result |= ((ULONGEST) (byte & 0x7f)) << shift;
416 shift += 7;
417 if ((byte & 0x80) == 0)
418 break;
419 }
420 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
421 result |= -(1 << shift);
422
423 *r = result;
424 return buf;
425 }
426 \f
427
428 /* Check that the current operator is either at the end of an
429 expression, or that it is followed by a composition operator. */
430
431 void
432 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
433 const char *op_name)
434 {
435 /* It seems like DW_OP_GNU_uninit should be handled here. However,
436 it doesn't seem to make sense for DW_OP_*_value, and it was not
437 checked at the other place that this function is called. */
438 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
439 error (_("DWARF-2 expression error: `%s' operations must be "
440 "used either alone or in conjunction with DW_OP_piece "
441 "or DW_OP_bit_piece."),
442 op_name);
443 }
444
445 /* Return true iff the types T1 and T2 are "the same". This only does
446 checks that might reasonably be needed to compare DWARF base
447 types. */
448
449 static int
450 base_types_equal_p (struct type *t1, struct type *t2)
451 {
452 if (TYPE_CODE (t1) != TYPE_CODE (t2))
453 return 0;
454 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
455 return 0;
456 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
457 }
458
459 /* A convenience function to call get_base_type on CTX and return the
460 result. DIE is the DIE whose type we need. SIZE is non-zero if
461 this function should verify that the resulting type has the correct
462 size. */
463
464 static struct type *
465 dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size)
466 {
467 struct type *result;
468
469 if (ctx->get_base_type)
470 {
471 result = ctx->get_base_type (ctx, die);
472 if (result == NULL)
473 error (_("Could not find type for DW_OP_GNU_const_type"));
474 if (size != 0 && TYPE_LENGTH (result) != size)
475 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
476 }
477 else
478 /* Anything will do. */
479 result = builtin_type (ctx->gdbarch)->builtin_int;
480
481 return result;
482 }
483
484 /* The engine for the expression evaluator. Using the context in CTX,
485 evaluate the expression between OP_PTR and OP_END. */
486
487 static void
488 execute_stack_op (struct dwarf_expr_context *ctx,
489 const gdb_byte *op_ptr, const gdb_byte *op_end)
490 {
491 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
492 /* Old-style "untyped" DWARF values need special treatment in a
493 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
494 a special type for these values so we can distinguish them from
495 values that have an explicit type, because explicitly-typed
496 values do not need special treatment. This special type must be
497 different (in the `==' sense) from any base type coming from the
498 CU. */
499 struct type *address_type = dwarf_expr_address_type (ctx);
500
501 ctx->location = DWARF_VALUE_MEMORY;
502 ctx->initialized = 1; /* Default is initialized. */
503
504 if (ctx->recursion_depth > ctx->max_recursion_depth)
505 error (_("DWARF-2 expression error: Loop detected (%d)."),
506 ctx->recursion_depth);
507 ctx->recursion_depth++;
508
509 while (op_ptr < op_end)
510 {
511 enum dwarf_location_atom op = *op_ptr++;
512 ULONGEST result;
513 /* Assume the value is not in stack memory.
514 Code that knows otherwise sets this to 1.
515 Some arithmetic on stack addresses can probably be assumed to still
516 be a stack address, but we skip this complication for now.
517 This is just an optimization, so it's always ok to punt
518 and leave this as 0. */
519 int in_stack_memory = 0;
520 ULONGEST uoffset, reg;
521 LONGEST offset;
522 struct value *result_val = NULL;
523
524 /* The DWARF expression might have a bug causing an infinite
525 loop. In that case, quitting is the only way out. */
526 QUIT;
527
528 switch (op)
529 {
530 case DW_OP_lit0:
531 case DW_OP_lit1:
532 case DW_OP_lit2:
533 case DW_OP_lit3:
534 case DW_OP_lit4:
535 case DW_OP_lit5:
536 case DW_OP_lit6:
537 case DW_OP_lit7:
538 case DW_OP_lit8:
539 case DW_OP_lit9:
540 case DW_OP_lit10:
541 case DW_OP_lit11:
542 case DW_OP_lit12:
543 case DW_OP_lit13:
544 case DW_OP_lit14:
545 case DW_OP_lit15:
546 case DW_OP_lit16:
547 case DW_OP_lit17:
548 case DW_OP_lit18:
549 case DW_OP_lit19:
550 case DW_OP_lit20:
551 case DW_OP_lit21:
552 case DW_OP_lit22:
553 case DW_OP_lit23:
554 case DW_OP_lit24:
555 case DW_OP_lit25:
556 case DW_OP_lit26:
557 case DW_OP_lit27:
558 case DW_OP_lit28:
559 case DW_OP_lit29:
560 case DW_OP_lit30:
561 case DW_OP_lit31:
562 result = op - DW_OP_lit0;
563 result_val = value_from_ulongest (address_type, result);
564 break;
565
566 case DW_OP_addr:
567 result = extract_unsigned_integer (op_ptr,
568 ctx->addr_size, byte_order);
569 op_ptr += ctx->addr_size;
570 /* Some versions of GCC emit DW_OP_addr before
571 DW_OP_GNU_push_tls_address. In this case the value is an
572 index, not an address. We don't support things like
573 branching between the address and the TLS op. */
574 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
575 result += ctx->offset;
576 result_val = value_from_ulongest (address_type, result);
577 break;
578
579 case DW_OP_const1u:
580 result = extract_unsigned_integer (op_ptr, 1, byte_order);
581 result_val = value_from_ulongest (address_type, result);
582 op_ptr += 1;
583 break;
584 case DW_OP_const1s:
585 result = extract_signed_integer (op_ptr, 1, byte_order);
586 result_val = value_from_ulongest (address_type, result);
587 op_ptr += 1;
588 break;
589 case DW_OP_const2u:
590 result = extract_unsigned_integer (op_ptr, 2, byte_order);
591 result_val = value_from_ulongest (address_type, result);
592 op_ptr += 2;
593 break;
594 case DW_OP_const2s:
595 result = extract_signed_integer (op_ptr, 2, byte_order);
596 result_val = value_from_ulongest (address_type, result);
597 op_ptr += 2;
598 break;
599 case DW_OP_const4u:
600 result = extract_unsigned_integer (op_ptr, 4, byte_order);
601 result_val = value_from_ulongest (address_type, result);
602 op_ptr += 4;
603 break;
604 case DW_OP_const4s:
605 result = extract_signed_integer (op_ptr, 4, byte_order);
606 result_val = value_from_ulongest (address_type, result);
607 op_ptr += 4;
608 break;
609 case DW_OP_const8u:
610 result = extract_unsigned_integer (op_ptr, 8, byte_order);
611 result_val = value_from_ulongest (address_type, result);
612 op_ptr += 8;
613 break;
614 case DW_OP_const8s:
615 result = extract_signed_integer (op_ptr, 8, byte_order);
616 result_val = value_from_ulongest (address_type, result);
617 op_ptr += 8;
618 break;
619 case DW_OP_constu:
620 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
621 result = uoffset;
622 result_val = value_from_ulongest (address_type, result);
623 break;
624 case DW_OP_consts:
625 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
626 result = offset;
627 result_val = value_from_ulongest (address_type, result);
628 break;
629
630 /* The DW_OP_reg operations are required to occur alone in
631 location expressions. */
632 case DW_OP_reg0:
633 case DW_OP_reg1:
634 case DW_OP_reg2:
635 case DW_OP_reg3:
636 case DW_OP_reg4:
637 case DW_OP_reg5:
638 case DW_OP_reg6:
639 case DW_OP_reg7:
640 case DW_OP_reg8:
641 case DW_OP_reg9:
642 case DW_OP_reg10:
643 case DW_OP_reg11:
644 case DW_OP_reg12:
645 case DW_OP_reg13:
646 case DW_OP_reg14:
647 case DW_OP_reg15:
648 case DW_OP_reg16:
649 case DW_OP_reg17:
650 case DW_OP_reg18:
651 case DW_OP_reg19:
652 case DW_OP_reg20:
653 case DW_OP_reg21:
654 case DW_OP_reg22:
655 case DW_OP_reg23:
656 case DW_OP_reg24:
657 case DW_OP_reg25:
658 case DW_OP_reg26:
659 case DW_OP_reg27:
660 case DW_OP_reg28:
661 case DW_OP_reg29:
662 case DW_OP_reg30:
663 case DW_OP_reg31:
664 if (op_ptr != op_end
665 && *op_ptr != DW_OP_piece
666 && *op_ptr != DW_OP_bit_piece
667 && *op_ptr != DW_OP_GNU_uninit)
668 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
669 "used either alone or in conjunction with DW_OP_piece "
670 "or DW_OP_bit_piece."));
671
672 result = op - DW_OP_reg0;
673 result_val = value_from_ulongest (address_type, result);
674 ctx->location = DWARF_VALUE_REGISTER;
675 break;
676
677 case DW_OP_regx:
678 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
679 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
680
681 result = reg;
682 result_val = value_from_ulongest (address_type, result);
683 ctx->location = DWARF_VALUE_REGISTER;
684 break;
685
686 case DW_OP_implicit_value:
687 {
688 ULONGEST len;
689
690 op_ptr = read_uleb128 (op_ptr, op_end, &len);
691 if (op_ptr + len > op_end)
692 error (_("DW_OP_implicit_value: too few bytes available."));
693 ctx->len = len;
694 ctx->data = op_ptr;
695 ctx->location = DWARF_VALUE_LITERAL;
696 op_ptr += len;
697 dwarf_expr_require_composition (op_ptr, op_end,
698 "DW_OP_implicit_value");
699 }
700 goto no_push;
701
702 case DW_OP_stack_value:
703 ctx->location = DWARF_VALUE_STACK;
704 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
705 goto no_push;
706
707 case DW_OP_GNU_implicit_pointer:
708 {
709 ULONGEST die;
710 LONGEST len;
711
712 /* The referred-to DIE. */
713 ctx->len = extract_unsigned_integer (op_ptr, ctx->addr_size,
714 byte_order);
715 op_ptr += ctx->addr_size;
716
717 /* The byte offset into the data. */
718 op_ptr = read_sleb128 (op_ptr, op_end, &len);
719 result = (ULONGEST) len;
720 result_val = value_from_ulongest (address_type, result);
721
722 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
723 dwarf_expr_require_composition (op_ptr, op_end,
724 "DW_OP_GNU_implicit_pointer");
725 }
726 break;
727
728 case DW_OP_breg0:
729 case DW_OP_breg1:
730 case DW_OP_breg2:
731 case DW_OP_breg3:
732 case DW_OP_breg4:
733 case DW_OP_breg5:
734 case DW_OP_breg6:
735 case DW_OP_breg7:
736 case DW_OP_breg8:
737 case DW_OP_breg9:
738 case DW_OP_breg10:
739 case DW_OP_breg11:
740 case DW_OP_breg12:
741 case DW_OP_breg13:
742 case DW_OP_breg14:
743 case DW_OP_breg15:
744 case DW_OP_breg16:
745 case DW_OP_breg17:
746 case DW_OP_breg18:
747 case DW_OP_breg19:
748 case DW_OP_breg20:
749 case DW_OP_breg21:
750 case DW_OP_breg22:
751 case DW_OP_breg23:
752 case DW_OP_breg24:
753 case DW_OP_breg25:
754 case DW_OP_breg26:
755 case DW_OP_breg27:
756 case DW_OP_breg28:
757 case DW_OP_breg29:
758 case DW_OP_breg30:
759 case DW_OP_breg31:
760 {
761 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
762 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
763 result += offset;
764 result_val = value_from_ulongest (address_type, result);
765 }
766 break;
767 case DW_OP_bregx:
768 {
769 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
770 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
771 result = (ctx->read_reg) (ctx->baton, reg);
772 result += offset;
773 result_val = value_from_ulongest (address_type, result);
774 }
775 break;
776 case DW_OP_fbreg:
777 {
778 const gdb_byte *datastart;
779 size_t datalen;
780 unsigned int before_stack_len;
781
782 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
783 /* Rather than create a whole new context, we simply
784 record the stack length before execution, then reset it
785 afterwards, effectively erasing whatever the recursive
786 call put there. */
787 before_stack_len = ctx->stack_len;
788 /* FIXME: cagney/2003-03-26: This code should be using
789 get_frame_base_address(), and then implement a dwarf2
790 specific this_base method. */
791 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
792 dwarf_expr_eval (ctx, datastart, datalen);
793 if (ctx->location == DWARF_VALUE_MEMORY)
794 result = dwarf_expr_fetch_address (ctx, 0);
795 else if (ctx->location == DWARF_VALUE_REGISTER)
796 result
797 = (ctx->read_reg) (ctx->baton,
798 value_as_long (dwarf_expr_fetch (ctx, 0)));
799 else
800 error (_("Not implemented: computing frame "
801 "base using explicit value operator"));
802 result = result + offset;
803 result_val = value_from_ulongest (address_type, result);
804 in_stack_memory = 1;
805 ctx->stack_len = before_stack_len;
806 ctx->location = DWARF_VALUE_MEMORY;
807 }
808 break;
809
810 case DW_OP_dup:
811 result_val = dwarf_expr_fetch (ctx, 0);
812 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
813 break;
814
815 case DW_OP_drop:
816 dwarf_expr_pop (ctx);
817 goto no_push;
818
819 case DW_OP_pick:
820 offset = *op_ptr++;
821 result_val = dwarf_expr_fetch (ctx, offset);
822 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
823 break;
824
825 case DW_OP_swap:
826 {
827 struct dwarf_stack_value t1, t2;
828
829 if (ctx->stack_len < 2)
830 error (_("Not enough elements for "
831 "DW_OP_swap. Need 2, have %d."),
832 ctx->stack_len);
833 t1 = ctx->stack[ctx->stack_len - 1];
834 t2 = ctx->stack[ctx->stack_len - 2];
835 ctx->stack[ctx->stack_len - 1] = t2;
836 ctx->stack[ctx->stack_len - 2] = t1;
837 goto no_push;
838 }
839
840 case DW_OP_over:
841 result_val = dwarf_expr_fetch (ctx, 1);
842 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
843 break;
844
845 case DW_OP_rot:
846 {
847 struct dwarf_stack_value t1, t2, t3;
848
849 if (ctx->stack_len < 3)
850 error (_("Not enough elements for "
851 "DW_OP_rot. Need 3, have %d."),
852 ctx->stack_len);
853 t1 = ctx->stack[ctx->stack_len - 1];
854 t2 = ctx->stack[ctx->stack_len - 2];
855 t3 = ctx->stack[ctx->stack_len - 3];
856 ctx->stack[ctx->stack_len - 1] = t2;
857 ctx->stack[ctx->stack_len - 2] = t3;
858 ctx->stack[ctx->stack_len - 3] = t1;
859 goto no_push;
860 }
861
862 case DW_OP_deref:
863 case DW_OP_deref_size:
864 case DW_OP_GNU_deref_type:
865 {
866 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
867 gdb_byte *buf = alloca (addr_size);
868 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
869 struct type *type;
870
871 dwarf_expr_pop (ctx);
872
873 if (op == DW_OP_GNU_deref_type)
874 {
875 ULONGEST type_die;
876
877 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
878 type = dwarf_get_base_type (ctx, type_die, 0);
879 }
880 else
881 type = address_type;
882
883 (ctx->read_mem) (ctx->baton, buf, addr, addr_size);
884
885 /* If the size of the object read from memory is different
886 from the type length, we need to zero-extend it. */
887 if (TYPE_LENGTH (type) != addr_size)
888 {
889 ULONGEST result =
890 extract_unsigned_integer (buf, addr_size, byte_order);
891
892 buf = alloca (TYPE_LENGTH (type));
893 store_unsigned_integer (buf, TYPE_LENGTH (type),
894 byte_order, result);
895 }
896
897 result_val = value_from_contents_and_address (type, buf, addr);
898 break;
899 }
900
901 case DW_OP_abs:
902 case DW_OP_neg:
903 case DW_OP_not:
904 case DW_OP_plus_uconst:
905 {
906 /* Unary operations. */
907 result_val = dwarf_expr_fetch (ctx, 0);
908 dwarf_expr_pop (ctx);
909
910 switch (op)
911 {
912 case DW_OP_abs:
913 if (value_less (result_val,
914 value_zero (value_type (result_val), not_lval)))
915 result_val = value_neg (result_val);
916 break;
917 case DW_OP_neg:
918 result_val = value_neg (result_val);
919 break;
920 case DW_OP_not:
921 dwarf_require_integral (value_type (result_val));
922 result_val = value_complement (result_val);
923 break;
924 case DW_OP_plus_uconst:
925 dwarf_require_integral (value_type (result_val));
926 result = value_as_long (result_val);
927 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
928 result += reg;
929 result_val = value_from_ulongest (address_type, result);
930 break;
931 }
932 }
933 break;
934
935 case DW_OP_and:
936 case DW_OP_div:
937 case DW_OP_minus:
938 case DW_OP_mod:
939 case DW_OP_mul:
940 case DW_OP_or:
941 case DW_OP_plus:
942 case DW_OP_shl:
943 case DW_OP_shr:
944 case DW_OP_shra:
945 case DW_OP_xor:
946 case DW_OP_le:
947 case DW_OP_ge:
948 case DW_OP_eq:
949 case DW_OP_lt:
950 case DW_OP_gt:
951 case DW_OP_ne:
952 {
953 /* Binary operations. */
954 struct value *first, *second;
955
956 second = dwarf_expr_fetch (ctx, 0);
957 dwarf_expr_pop (ctx);
958
959 first = dwarf_expr_fetch (ctx, 0);
960 dwarf_expr_pop (ctx);
961
962 if (! base_types_equal_p (value_type (first), value_type (second)))
963 error (_("Incompatible types on DWARF stack"));
964
965 switch (op)
966 {
967 case DW_OP_and:
968 dwarf_require_integral (value_type (first));
969 dwarf_require_integral (value_type (second));
970 result_val = value_binop (first, second, BINOP_BITWISE_AND);
971 break;
972 case DW_OP_div:
973 result_val = value_binop (first, second, BINOP_DIV);
974 break;
975 case DW_OP_minus:
976 result_val = value_binop (first, second, BINOP_SUB);
977 break;
978 case DW_OP_mod:
979 {
980 int cast_back = 0;
981 struct type *orig_type = value_type (first);
982
983 /* We have to special-case "old-style" untyped values
984 -- these must have mod computed using unsigned
985 math. */
986 if (orig_type == address_type)
987 {
988 struct type *utype
989 = get_unsigned_type (ctx->gdbarch, orig_type);
990
991 cast_back = 1;
992 first = value_cast (utype, first);
993 second = value_cast (utype, second);
994 }
995 /* Note that value_binop doesn't handle float or
996 decimal float here. This seems unimportant. */
997 result_val = value_binop (first, second, BINOP_MOD);
998 if (cast_back)
999 result_val = value_cast (orig_type, result_val);
1000 }
1001 break;
1002 case DW_OP_mul:
1003 result_val = value_binop (first, second, BINOP_MUL);
1004 break;
1005 case DW_OP_or:
1006 dwarf_require_integral (value_type (first));
1007 dwarf_require_integral (value_type (second));
1008 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1009 break;
1010 case DW_OP_plus:
1011 result_val = value_binop (first, second, BINOP_ADD);
1012 break;
1013 case DW_OP_shl:
1014 dwarf_require_integral (value_type (first));
1015 dwarf_require_integral (value_type (second));
1016 result_val = value_binop (first, second, BINOP_LSH);
1017 break;
1018 case DW_OP_shr:
1019 dwarf_require_integral (value_type (first));
1020 dwarf_require_integral (value_type (second));
1021 if (!TYPE_UNSIGNED (value_type (first)))
1022 {
1023 struct type *utype
1024 = get_unsigned_type (ctx->gdbarch, value_type (first));
1025
1026 first = value_cast (utype, first);
1027 }
1028
1029 result_val = value_binop (first, second, BINOP_RSH);
1030 /* Make sure we wind up with the same type we started
1031 with. */
1032 if (value_type (result_val) != value_type (second))
1033 result_val = value_cast (value_type (second), result_val);
1034 break;
1035 case DW_OP_shra:
1036 dwarf_require_integral (value_type (first));
1037 dwarf_require_integral (value_type (second));
1038 if (TYPE_UNSIGNED (value_type (first)))
1039 {
1040 struct type *stype
1041 = get_signed_type (ctx->gdbarch, value_type (first));
1042
1043 first = value_cast (stype, first);
1044 }
1045
1046 result_val = value_binop (first, second, BINOP_RSH);
1047 /* Make sure we wind up with the same type we started
1048 with. */
1049 if (value_type (result_val) != value_type (second))
1050 result_val = value_cast (value_type (second), result_val);
1051 break;
1052 case DW_OP_xor:
1053 dwarf_require_integral (value_type (first));
1054 dwarf_require_integral (value_type (second));
1055 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1056 break;
1057 case DW_OP_le:
1058 /* A <= B is !(B < A). */
1059 result = ! value_less (second, first);
1060 result_val = value_from_ulongest (address_type, result);
1061 break;
1062 case DW_OP_ge:
1063 /* A >= B is !(A < B). */
1064 result = ! value_less (first, second);
1065 result_val = value_from_ulongest (address_type, result);
1066 break;
1067 case DW_OP_eq:
1068 result = value_equal (first, second);
1069 result_val = value_from_ulongest (address_type, result);
1070 break;
1071 case DW_OP_lt:
1072 result = value_less (first, second);
1073 result_val = value_from_ulongest (address_type, result);
1074 break;
1075 case DW_OP_gt:
1076 /* A > B is B < A. */
1077 result = value_less (second, first);
1078 result_val = value_from_ulongest (address_type, result);
1079 break;
1080 case DW_OP_ne:
1081 result = ! value_equal (first, second);
1082 result_val = value_from_ulongest (address_type, result);
1083 break;
1084 default:
1085 internal_error (__FILE__, __LINE__,
1086 _("Can't be reached."));
1087 }
1088 }
1089 break;
1090
1091 case DW_OP_call_frame_cfa:
1092 result = (ctx->get_frame_cfa) (ctx->baton);
1093 result_val = value_from_ulongest (address_type, result);
1094 in_stack_memory = 1;
1095 break;
1096
1097 case DW_OP_GNU_push_tls_address:
1098 /* Variable is at a constant offset in the thread-local
1099 storage block into the objfile for the current thread and
1100 the dynamic linker module containing this expression. Here
1101 we return returns the offset from that base. The top of the
1102 stack has the offset from the beginning of the thread
1103 control block at which the variable is located. Nothing
1104 should follow this operator, so the top of stack would be
1105 returned. */
1106 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1107 dwarf_expr_pop (ctx);
1108 result = (ctx->get_tls_address) (ctx->baton, result);
1109 result_val = value_from_ulongest (address_type, result);
1110 break;
1111
1112 case DW_OP_skip:
1113 offset = extract_signed_integer (op_ptr, 2, byte_order);
1114 op_ptr += 2;
1115 op_ptr += offset;
1116 goto no_push;
1117
1118 case DW_OP_bra:
1119 {
1120 struct value *val;
1121
1122 offset = extract_signed_integer (op_ptr, 2, byte_order);
1123 op_ptr += 2;
1124 val = dwarf_expr_fetch (ctx, 0);
1125 dwarf_require_integral (value_type (val));
1126 if (value_as_long (val) != 0)
1127 op_ptr += offset;
1128 dwarf_expr_pop (ctx);
1129 }
1130 goto no_push;
1131
1132 case DW_OP_nop:
1133 goto no_push;
1134
1135 case DW_OP_piece:
1136 {
1137 ULONGEST size;
1138
1139 /* Record the piece. */
1140 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1141 add_piece (ctx, 8 * size, 0);
1142
1143 /* Pop off the address/regnum, and reset the location
1144 type. */
1145 if (ctx->location != DWARF_VALUE_LITERAL
1146 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1147 dwarf_expr_pop (ctx);
1148 ctx->location = DWARF_VALUE_MEMORY;
1149 }
1150 goto no_push;
1151
1152 case DW_OP_bit_piece:
1153 {
1154 ULONGEST size, offset;
1155
1156 /* Record the piece. */
1157 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1158 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1159 add_piece (ctx, size, offset);
1160
1161 /* Pop off the address/regnum, and reset the location
1162 type. */
1163 if (ctx->location != DWARF_VALUE_LITERAL
1164 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1165 dwarf_expr_pop (ctx);
1166 ctx->location = DWARF_VALUE_MEMORY;
1167 }
1168 goto no_push;
1169
1170 case DW_OP_GNU_uninit:
1171 if (op_ptr != op_end)
1172 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1173 "be the very last op."));
1174
1175 ctx->initialized = 0;
1176 goto no_push;
1177
1178 case DW_OP_call2:
1179 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1180 op_ptr += 2;
1181 ctx->dwarf_call (ctx, result);
1182 goto no_push;
1183
1184 case DW_OP_call4:
1185 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1186 op_ptr += 4;
1187 ctx->dwarf_call (ctx, result);
1188 goto no_push;
1189
1190 case DW_OP_GNU_entry_value:
1191 /* This operation is not yet supported by GDB. */
1192 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
1193 ctx->stack_len = 0;
1194 ctx->num_pieces = 0;
1195 goto abort_expression;
1196
1197 case DW_OP_GNU_const_type:
1198 {
1199 ULONGEST type_die;
1200 int n;
1201 const gdb_byte *data;
1202 struct type *type;
1203
1204 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1205 n = *op_ptr++;
1206 data = op_ptr;
1207 op_ptr += n;
1208
1209 type = dwarf_get_base_type (ctx, type_die, n);
1210 result_val = value_from_contents (type, data);
1211 }
1212 break;
1213
1214 case DW_OP_GNU_regval_type:
1215 {
1216 ULONGEST type_die;
1217 struct type *type;
1218
1219 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1220 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1221
1222 type = dwarf_get_base_type (ctx, type_die, 0);
1223 result = (ctx->read_reg) (ctx->baton, reg);
1224 result_val = value_from_ulongest (type, result);
1225 }
1226 break;
1227
1228 case DW_OP_GNU_convert:
1229 case DW_OP_GNU_reinterpret:
1230 {
1231 ULONGEST type_die;
1232 struct type *type;
1233
1234 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1235
1236 if (type_die == 0)
1237 type = address_type;
1238 else
1239 type = dwarf_get_base_type (ctx, type_die, 0);
1240
1241 result_val = dwarf_expr_fetch (ctx, 0);
1242 dwarf_expr_pop (ctx);
1243
1244 if (op == DW_OP_GNU_convert)
1245 result_val = value_cast (type, result_val);
1246 else if (type == value_type (result_val))
1247 {
1248 /* Nothing. */
1249 }
1250 else if (TYPE_LENGTH (type)
1251 != TYPE_LENGTH (value_type (result_val)))
1252 error (_("DW_OP_GNU_reinterpret has wrong size"));
1253 else
1254 result_val
1255 = value_from_contents (type,
1256 value_contents_all (result_val));
1257 }
1258 break;
1259
1260 default:
1261 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1262 }
1263
1264 /* Most things push a result value. */
1265 gdb_assert (result_val != NULL);
1266 dwarf_expr_push (ctx, result_val, in_stack_memory);
1267 no_push:
1268 ;
1269 }
1270
1271 /* To simplify our main caller, if the result is an implicit
1272 pointer, then make a pieced value. This is ok because we can't
1273 have implicit pointers in contexts where pieces are invalid. */
1274 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1275 add_piece (ctx, 8 * ctx->addr_size, 0);
1276
1277 abort_expression:
1278 ctx->recursion_depth--;
1279 gdb_assert (ctx->recursion_depth >= 0);
1280 }
1281
1282 void
1283 _initialize_dwarf2expr (void)
1284 {
1285 dwarf_arch_cookie
1286 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1287 }
This page took 0.085041 seconds and 4 git commands to generate.