gdb
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 const gdb_byte *, const gdb_byte *);
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66 static struct type *
67 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68 {
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 retval->mark = value_mark ();
108 return retval;
109 }
110
111 /* Release the memory allocated to CTX. */
112
113 void
114 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
115 {
116 value_free_to_mark (ctx->mark);
117 xfree (ctx->stack);
118 xfree (ctx->pieces);
119 xfree (ctx);
120 }
121
122 /* Helper for make_cleanup_free_dwarf_expr_context. */
123
124 static void
125 free_dwarf_expr_context_cleanup (void *arg)
126 {
127 free_dwarf_expr_context (arg);
128 }
129
130 /* Return a cleanup that calls free_dwarf_expr_context. */
131
132 struct cleanup *
133 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
134 {
135 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
136 }
137
138 /* Expand the memory allocated to CTX's stack to contain at least
139 NEED more elements than are currently used. */
140
141 static void
142 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
143 {
144 if (ctx->stack_len + need > ctx->stack_allocated)
145 {
146 size_t newlen = ctx->stack_len + need + 10;
147
148 ctx->stack = xrealloc (ctx->stack,
149 newlen * sizeof (struct dwarf_stack_value));
150 ctx->stack_allocated = newlen;
151 }
152 }
153
154 /* Push VALUE onto CTX's stack. */
155
156 static void
157 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
158 int in_stack_memory)
159 {
160 struct dwarf_stack_value *v;
161
162 dwarf_expr_grow_stack (ctx, 1);
163 v = &ctx->stack[ctx->stack_len++];
164 v->value = value;
165 v->in_stack_memory = in_stack_memory;
166 }
167
168 /* Push VALUE onto CTX's stack. */
169
170 void
171 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
172 int in_stack_memory)
173 {
174 dwarf_expr_push (ctx,
175 value_from_ulongest (dwarf_expr_address_type (ctx), value),
176 in_stack_memory);
177 }
178
179 /* Pop the top item off of CTX's stack. */
180
181 static void
182 dwarf_expr_pop (struct dwarf_expr_context *ctx)
183 {
184 if (ctx->stack_len <= 0)
185 error (_("dwarf expression stack underflow"));
186 ctx->stack_len--;
187 }
188
189 /* Retrieve the N'th item on CTX's stack. */
190
191 struct value *
192 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
193 {
194 if (ctx->stack_len <= n)
195 error (_("Asked for position %d of stack, "
196 "stack only has %d elements on it."),
197 n, ctx->stack_len);
198 return ctx->stack[ctx->stack_len - (1 + n)].value;
199 }
200
201 /* Require that TYPE be an integral type; throw an exception if not. */
202
203 static void
204 dwarf_require_integral (struct type *type)
205 {
206 if (TYPE_CODE (type) != TYPE_CODE_INT
207 && TYPE_CODE (type) != TYPE_CODE_CHAR
208 && TYPE_CODE (type) != TYPE_CODE_BOOL)
209 error (_("integral type expected in DWARF expression"));
210 }
211
212 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
213 type. */
214
215 static struct type *
216 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
217 {
218 switch (TYPE_LENGTH (type))
219 {
220 case 1:
221 return builtin_type (gdbarch)->builtin_uint8;
222 case 2:
223 return builtin_type (gdbarch)->builtin_uint16;
224 case 4:
225 return builtin_type (gdbarch)->builtin_uint32;
226 case 8:
227 return builtin_type (gdbarch)->builtin_uint64;
228 default:
229 error (_("no unsigned variant found for type, while evaluating "
230 "DWARF expression"));
231 }
232 }
233
234 /* Retrieve the N'th item on CTX's stack, converted to an address. */
235
236 CORE_ADDR
237 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
238 {
239 struct value *result_val = dwarf_expr_fetch (ctx, n);
240 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
241 ULONGEST result;
242
243 dwarf_require_integral (value_type (result_val));
244 result = extract_unsigned_integer (value_contents (result_val),
245 TYPE_LENGTH (value_type (result_val)),
246 byte_order);
247
248 /* For most architectures, calling extract_unsigned_integer() alone
249 is sufficient for extracting an address. However, some
250 architectures (e.g. MIPS) use signed addresses and using
251 extract_unsigned_integer() will not produce a correct
252 result. Make sure we invoke gdbarch_integer_to_address()
253 for those architectures which require it. */
254 if (gdbarch_integer_to_address_p (ctx->gdbarch))
255 {
256 gdb_byte *buf = alloca (ctx->addr_size);
257 struct type *int_type = get_unsigned_type (ctx->gdbarch,
258 value_type (result_val));
259
260 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
261 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
262 }
263
264 return (CORE_ADDR) result;
265 }
266
267 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
268
269 int
270 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
271 {
272 if (ctx->stack_len <= n)
273 error (_("Asked for position %d of stack, "
274 "stack only has %d elements on it."),
275 n, ctx->stack_len);
276 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
277 }
278
279 /* Return true if the expression stack is empty. */
280
281 static int
282 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
283 {
284 return ctx->stack_len == 0;
285 }
286
287 /* Add a new piece to CTX's piece list. */
288 static void
289 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
290 {
291 struct dwarf_expr_piece *p;
292
293 ctx->num_pieces++;
294
295 ctx->pieces = xrealloc (ctx->pieces,
296 (ctx->num_pieces
297 * sizeof (struct dwarf_expr_piece)));
298
299 p = &ctx->pieces[ctx->num_pieces - 1];
300 p->location = ctx->location;
301 p->size = size;
302 p->offset = offset;
303
304 if (p->location == DWARF_VALUE_LITERAL)
305 {
306 p->v.literal.data = ctx->data;
307 p->v.literal.length = ctx->len;
308 }
309 else if (dwarf_expr_stack_empty_p (ctx))
310 {
311 p->location = DWARF_VALUE_OPTIMIZED_OUT;
312 /* Also reset the context's location, for our callers. This is
313 a somewhat strange approach, but this lets us avoid setting
314 the location to DWARF_VALUE_MEMORY in all the individual
315 cases in the evaluator. */
316 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
317 }
318 else if (p->location == DWARF_VALUE_MEMORY)
319 {
320 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
321 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
322 }
323 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
324 {
325 p->v.ptr.die = ctx->len;
326 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
327 }
328 else if (p->location == DWARF_VALUE_REGISTER)
329 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
330 else
331 {
332 p->v.value = dwarf_expr_fetch (ctx, 0);
333 }
334 }
335
336 /* Evaluate the expression at ADDR (LEN bytes long) using the context
337 CTX. */
338
339 void
340 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
341 size_t len)
342 {
343 int old_recursion_depth = ctx->recursion_depth;
344
345 execute_stack_op (ctx, addr, addr + len);
346
347 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
348
349 gdb_assert (ctx->recursion_depth == old_recursion_depth);
350 }
351
352 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
353 by R, and return the new value of BUF. Verify that it doesn't extend
354 past BUF_END. */
355
356 const gdb_byte *
357 read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
358 {
359 unsigned shift = 0;
360 ULONGEST result = 0;
361 gdb_byte byte;
362
363 while (1)
364 {
365 if (buf >= buf_end)
366 error (_("read_uleb128: Corrupted DWARF expression."));
367
368 byte = *buf++;
369 result |= (byte & 0x7f) << shift;
370 if ((byte & 0x80) == 0)
371 break;
372 shift += 7;
373 }
374 *r = result;
375 return buf;
376 }
377
378 /* Decode the signed LEB128 constant at BUF into the variable pointed to
379 by R, and return the new value of BUF. Verify that it doesn't extend
380 past BUF_END. */
381
382 const gdb_byte *
383 read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
384 {
385 unsigned shift = 0;
386 LONGEST result = 0;
387 gdb_byte byte;
388
389 while (1)
390 {
391 if (buf >= buf_end)
392 error (_("read_sleb128: Corrupted DWARF expression."));
393
394 byte = *buf++;
395 result |= (byte & 0x7f) << shift;
396 shift += 7;
397 if ((byte & 0x80) == 0)
398 break;
399 }
400 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
401 result |= -(1 << shift);
402
403 *r = result;
404 return buf;
405 }
406 \f
407
408 /* Check that the current operator is either at the end of an
409 expression, or that it is followed by a composition operator. */
410
411 void
412 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
413 const char *op_name)
414 {
415 /* It seems like DW_OP_GNU_uninit should be handled here. However,
416 it doesn't seem to make sense for DW_OP_*_value, and it was not
417 checked at the other place that this function is called. */
418 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
419 error (_("DWARF-2 expression error: `%s' operations must be "
420 "used either alone or in conjuction with DW_OP_piece "
421 "or DW_OP_bit_piece."),
422 op_name);
423 }
424
425 /* Return true iff the types T1 and T2 are "the same". This only does
426 checks that might reasonably be needed to compare DWARF base
427 types. */
428
429 static int
430 base_types_equal_p (struct type *t1, struct type *t2)
431 {
432 if (TYPE_CODE (t1) != TYPE_CODE (t2))
433 return 0;
434 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
435 return 0;
436 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
437 }
438
439 /* A convenience function to call get_base_type on CTX and return the
440 result. DIE is the DIE whose type we need. SIZE is non-zero if
441 this function should verify that the resulting type has the correct
442 size. */
443
444 static struct type *
445 dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size)
446 {
447 struct type *result;
448
449 if (ctx->get_base_type)
450 {
451 result = ctx->get_base_type (ctx, die);
452 if (size != 0 && TYPE_LENGTH (result) != size)
453 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
454 }
455 else
456 /* Anything will do. */
457 result = builtin_type (ctx->gdbarch)->builtin_int;
458
459 return result;
460 }
461
462 /* The engine for the expression evaluator. Using the context in CTX,
463 evaluate the expression between OP_PTR and OP_END. */
464
465 static void
466 execute_stack_op (struct dwarf_expr_context *ctx,
467 const gdb_byte *op_ptr, const gdb_byte *op_end)
468 {
469 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
470 /* Old-style "untyped" DWARF values need special treatment in a
471 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
472 a special type for these values so we can distinguish them from
473 values that have an explicit type, because explicitly-typed
474 values do not need special treatment. This special type must be
475 different (in the `==' sense) from any base type coming from the
476 CU. */
477 struct type *address_type = dwarf_expr_address_type (ctx);
478
479 ctx->location = DWARF_VALUE_MEMORY;
480 ctx->initialized = 1; /* Default is initialized. */
481
482 if (ctx->recursion_depth > ctx->max_recursion_depth)
483 error (_("DWARF-2 expression error: Loop detected (%d)."),
484 ctx->recursion_depth);
485 ctx->recursion_depth++;
486
487 while (op_ptr < op_end)
488 {
489 enum dwarf_location_atom op = *op_ptr++;
490 ULONGEST result;
491 /* Assume the value is not in stack memory.
492 Code that knows otherwise sets this to 1.
493 Some arithmetic on stack addresses can probably be assumed to still
494 be a stack address, but we skip this complication for now.
495 This is just an optimization, so it's always ok to punt
496 and leave this as 0. */
497 int in_stack_memory = 0;
498 ULONGEST uoffset, reg;
499 LONGEST offset;
500 struct value *result_val = NULL;
501
502 switch (op)
503 {
504 case DW_OP_lit0:
505 case DW_OP_lit1:
506 case DW_OP_lit2:
507 case DW_OP_lit3:
508 case DW_OP_lit4:
509 case DW_OP_lit5:
510 case DW_OP_lit6:
511 case DW_OP_lit7:
512 case DW_OP_lit8:
513 case DW_OP_lit9:
514 case DW_OP_lit10:
515 case DW_OP_lit11:
516 case DW_OP_lit12:
517 case DW_OP_lit13:
518 case DW_OP_lit14:
519 case DW_OP_lit15:
520 case DW_OP_lit16:
521 case DW_OP_lit17:
522 case DW_OP_lit18:
523 case DW_OP_lit19:
524 case DW_OP_lit20:
525 case DW_OP_lit21:
526 case DW_OP_lit22:
527 case DW_OP_lit23:
528 case DW_OP_lit24:
529 case DW_OP_lit25:
530 case DW_OP_lit26:
531 case DW_OP_lit27:
532 case DW_OP_lit28:
533 case DW_OP_lit29:
534 case DW_OP_lit30:
535 case DW_OP_lit31:
536 result = op - DW_OP_lit0;
537 result_val = value_from_ulongest (address_type, result);
538 break;
539
540 case DW_OP_addr:
541 result = extract_unsigned_integer (op_ptr,
542 ctx->addr_size, byte_order);
543 op_ptr += ctx->addr_size;
544 /* Some versions of GCC emit DW_OP_addr before
545 DW_OP_GNU_push_tls_address. In this case the value is an
546 index, not an address. We don't support things like
547 branching between the address and the TLS op. */
548 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
549 result += ctx->offset;
550 result_val = value_from_ulongest (address_type, result);
551 break;
552
553 case DW_OP_const1u:
554 result = extract_unsigned_integer (op_ptr, 1, byte_order);
555 result_val = value_from_ulongest (address_type, result);
556 op_ptr += 1;
557 break;
558 case DW_OP_const1s:
559 result = extract_signed_integer (op_ptr, 1, byte_order);
560 result_val = value_from_ulongest (address_type, result);
561 op_ptr += 1;
562 break;
563 case DW_OP_const2u:
564 result = extract_unsigned_integer (op_ptr, 2, byte_order);
565 result_val = value_from_ulongest (address_type, result);
566 op_ptr += 2;
567 break;
568 case DW_OP_const2s:
569 result = extract_signed_integer (op_ptr, 2, byte_order);
570 result_val = value_from_ulongest (address_type, result);
571 op_ptr += 2;
572 break;
573 case DW_OP_const4u:
574 result = extract_unsigned_integer (op_ptr, 4, byte_order);
575 result_val = value_from_ulongest (address_type, result);
576 op_ptr += 4;
577 break;
578 case DW_OP_const4s:
579 result = extract_signed_integer (op_ptr, 4, byte_order);
580 result_val = value_from_ulongest (address_type, result);
581 op_ptr += 4;
582 break;
583 case DW_OP_const8u:
584 result = extract_unsigned_integer (op_ptr, 8, byte_order);
585 result_val = value_from_ulongest (address_type, result);
586 op_ptr += 8;
587 break;
588 case DW_OP_const8s:
589 result = extract_signed_integer (op_ptr, 8, byte_order);
590 result_val = value_from_ulongest (address_type, result);
591 op_ptr += 8;
592 break;
593 case DW_OP_constu:
594 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
595 result = uoffset;
596 result_val = value_from_ulongest (address_type, result);
597 break;
598 case DW_OP_consts:
599 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
600 result = offset;
601 result_val = value_from_ulongest (address_type, result);
602 break;
603
604 /* The DW_OP_reg operations are required to occur alone in
605 location expressions. */
606 case DW_OP_reg0:
607 case DW_OP_reg1:
608 case DW_OP_reg2:
609 case DW_OP_reg3:
610 case DW_OP_reg4:
611 case DW_OP_reg5:
612 case DW_OP_reg6:
613 case DW_OP_reg7:
614 case DW_OP_reg8:
615 case DW_OP_reg9:
616 case DW_OP_reg10:
617 case DW_OP_reg11:
618 case DW_OP_reg12:
619 case DW_OP_reg13:
620 case DW_OP_reg14:
621 case DW_OP_reg15:
622 case DW_OP_reg16:
623 case DW_OP_reg17:
624 case DW_OP_reg18:
625 case DW_OP_reg19:
626 case DW_OP_reg20:
627 case DW_OP_reg21:
628 case DW_OP_reg22:
629 case DW_OP_reg23:
630 case DW_OP_reg24:
631 case DW_OP_reg25:
632 case DW_OP_reg26:
633 case DW_OP_reg27:
634 case DW_OP_reg28:
635 case DW_OP_reg29:
636 case DW_OP_reg30:
637 case DW_OP_reg31:
638 if (op_ptr != op_end
639 && *op_ptr != DW_OP_piece
640 && *op_ptr != DW_OP_bit_piece
641 && *op_ptr != DW_OP_GNU_uninit)
642 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
643 "used either alone or in conjuction with DW_OP_piece "
644 "or DW_OP_bit_piece."));
645
646 result = op - DW_OP_reg0;
647 result_val = value_from_ulongest (address_type, result);
648 ctx->location = DWARF_VALUE_REGISTER;
649 break;
650
651 case DW_OP_regx:
652 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
653 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
654
655 result = reg;
656 result_val = value_from_ulongest (address_type, result);
657 ctx->location = DWARF_VALUE_REGISTER;
658 break;
659
660 case DW_OP_implicit_value:
661 {
662 ULONGEST len;
663
664 op_ptr = read_uleb128 (op_ptr, op_end, &len);
665 if (op_ptr + len > op_end)
666 error (_("DW_OP_implicit_value: too few bytes available."));
667 ctx->len = len;
668 ctx->data = op_ptr;
669 ctx->location = DWARF_VALUE_LITERAL;
670 op_ptr += len;
671 dwarf_expr_require_composition (op_ptr, op_end,
672 "DW_OP_implicit_value");
673 }
674 goto no_push;
675
676 case DW_OP_stack_value:
677 ctx->location = DWARF_VALUE_STACK;
678 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
679 goto no_push;
680
681 case DW_OP_GNU_implicit_pointer:
682 {
683 ULONGEST die;
684 LONGEST len;
685
686 /* The referred-to DIE. */
687 ctx->len = extract_unsigned_integer (op_ptr, ctx->addr_size,
688 byte_order);
689 op_ptr += ctx->addr_size;
690
691 /* The byte offset into the data. */
692 op_ptr = read_sleb128 (op_ptr, op_end, &len);
693 result = (ULONGEST) len;
694 result_val = value_from_ulongest (address_type, result);
695
696 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
697 dwarf_expr_require_composition (op_ptr, op_end,
698 "DW_OP_GNU_implicit_pointer");
699 }
700 break;
701
702 case DW_OP_breg0:
703 case DW_OP_breg1:
704 case DW_OP_breg2:
705 case DW_OP_breg3:
706 case DW_OP_breg4:
707 case DW_OP_breg5:
708 case DW_OP_breg6:
709 case DW_OP_breg7:
710 case DW_OP_breg8:
711 case DW_OP_breg9:
712 case DW_OP_breg10:
713 case DW_OP_breg11:
714 case DW_OP_breg12:
715 case DW_OP_breg13:
716 case DW_OP_breg14:
717 case DW_OP_breg15:
718 case DW_OP_breg16:
719 case DW_OP_breg17:
720 case DW_OP_breg18:
721 case DW_OP_breg19:
722 case DW_OP_breg20:
723 case DW_OP_breg21:
724 case DW_OP_breg22:
725 case DW_OP_breg23:
726 case DW_OP_breg24:
727 case DW_OP_breg25:
728 case DW_OP_breg26:
729 case DW_OP_breg27:
730 case DW_OP_breg28:
731 case DW_OP_breg29:
732 case DW_OP_breg30:
733 case DW_OP_breg31:
734 {
735 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
736 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
737 result += offset;
738 result_val = value_from_ulongest (address_type, result);
739 }
740 break;
741 case DW_OP_bregx:
742 {
743 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
744 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
745 result = (ctx->read_reg) (ctx->baton, reg);
746 result += offset;
747 result_val = value_from_ulongest (address_type, result);
748 }
749 break;
750 case DW_OP_fbreg:
751 {
752 const gdb_byte *datastart;
753 size_t datalen;
754 unsigned int before_stack_len;
755
756 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
757 /* Rather than create a whole new context, we simply
758 record the stack length before execution, then reset it
759 afterwards, effectively erasing whatever the recursive
760 call put there. */
761 before_stack_len = ctx->stack_len;
762 /* FIXME: cagney/2003-03-26: This code should be using
763 get_frame_base_address(), and then implement a dwarf2
764 specific this_base method. */
765 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
766 dwarf_expr_eval (ctx, datastart, datalen);
767 if (ctx->location == DWARF_VALUE_MEMORY)
768 result = dwarf_expr_fetch_address (ctx, 0);
769 else if (ctx->location == DWARF_VALUE_REGISTER)
770 result
771 = (ctx->read_reg) (ctx->baton,
772 value_as_long (dwarf_expr_fetch (ctx, 0)));
773 else
774 error (_("Not implemented: computing frame "
775 "base using explicit value operator"));
776 result = result + offset;
777 result_val = value_from_ulongest (address_type, result);
778 in_stack_memory = 1;
779 ctx->stack_len = before_stack_len;
780 ctx->location = DWARF_VALUE_MEMORY;
781 }
782 break;
783
784 case DW_OP_dup:
785 result_val = dwarf_expr_fetch (ctx, 0);
786 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
787 break;
788
789 case DW_OP_drop:
790 dwarf_expr_pop (ctx);
791 goto no_push;
792
793 case DW_OP_pick:
794 offset = *op_ptr++;
795 result_val = dwarf_expr_fetch (ctx, offset);
796 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
797 break;
798
799 case DW_OP_swap:
800 {
801 struct dwarf_stack_value t1, t2;
802
803 if (ctx->stack_len < 2)
804 error (_("Not enough elements for "
805 "DW_OP_swap. Need 2, have %d."),
806 ctx->stack_len);
807 t1 = ctx->stack[ctx->stack_len - 1];
808 t2 = ctx->stack[ctx->stack_len - 2];
809 ctx->stack[ctx->stack_len - 1] = t2;
810 ctx->stack[ctx->stack_len - 2] = t1;
811 goto no_push;
812 }
813
814 case DW_OP_over:
815 result_val = dwarf_expr_fetch (ctx, 1);
816 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
817 break;
818
819 case DW_OP_rot:
820 {
821 struct dwarf_stack_value t1, t2, t3;
822
823 if (ctx->stack_len < 3)
824 error (_("Not enough elements for "
825 "DW_OP_rot. Need 3, have %d."),
826 ctx->stack_len);
827 t1 = ctx->stack[ctx->stack_len - 1];
828 t2 = ctx->stack[ctx->stack_len - 2];
829 t3 = ctx->stack[ctx->stack_len - 3];
830 ctx->stack[ctx->stack_len - 1] = t2;
831 ctx->stack[ctx->stack_len - 2] = t3;
832 ctx->stack[ctx->stack_len - 3] = t1;
833 goto no_push;
834 }
835
836 case DW_OP_deref:
837 case DW_OP_deref_size:
838 case DW_OP_GNU_deref_type:
839 {
840 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
841 gdb_byte *buf = alloca (addr_size);
842 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
843 struct type *type;
844
845 dwarf_expr_pop (ctx);
846
847 if (op == DW_OP_GNU_deref_type)
848 {
849 ULONGEST type_die;
850
851 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
852 type = dwarf_get_base_type (ctx, type_die, 0);
853 }
854 else
855 type = address_type;
856
857 (ctx->read_mem) (ctx->baton, buf, addr, addr_size);
858 result_val = value_from_contents_and_address (type, buf, addr);
859 break;
860 }
861
862 case DW_OP_abs:
863 case DW_OP_neg:
864 case DW_OP_not:
865 case DW_OP_plus_uconst:
866 {
867 /* Unary operations. */
868 result_val = dwarf_expr_fetch (ctx, 0);
869 dwarf_expr_pop (ctx);
870
871 switch (op)
872 {
873 case DW_OP_abs:
874 if (value_less (result_val,
875 value_zero (value_type (result_val), not_lval)))
876 result_val = value_neg (result_val);
877 break;
878 case DW_OP_neg:
879 result_val = value_neg (result_val);
880 break;
881 case DW_OP_not:
882 dwarf_require_integral (value_type (result_val));
883 result_val = value_complement (result_val);
884 break;
885 case DW_OP_plus_uconst:
886 dwarf_require_integral (value_type (result_val));
887 result = value_as_long (result_val);
888 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
889 result += reg;
890 result_val = value_from_ulongest (address_type, result);
891 break;
892 }
893 }
894 break;
895
896 case DW_OP_and:
897 case DW_OP_div:
898 case DW_OP_minus:
899 case DW_OP_mod:
900 case DW_OP_mul:
901 case DW_OP_or:
902 case DW_OP_plus:
903 case DW_OP_shl:
904 case DW_OP_shr:
905 case DW_OP_shra:
906 case DW_OP_xor:
907 case DW_OP_le:
908 case DW_OP_ge:
909 case DW_OP_eq:
910 case DW_OP_lt:
911 case DW_OP_gt:
912 case DW_OP_ne:
913 {
914 /* Binary operations. */
915 struct value *first, *second;
916
917 second = dwarf_expr_fetch (ctx, 0);
918 dwarf_expr_pop (ctx);
919
920 first = dwarf_expr_fetch (ctx, 0);
921 dwarf_expr_pop (ctx);
922
923 if (! base_types_equal_p (value_type (first), value_type (second)))
924 error (_("Incompatible types on DWARF stack"));
925
926 switch (op)
927 {
928 case DW_OP_and:
929 dwarf_require_integral (value_type (first));
930 dwarf_require_integral (value_type (second));
931 result_val = value_binop (first, second, BINOP_BITWISE_AND);
932 break;
933 case DW_OP_div:
934 result_val = value_binop (first, second, BINOP_DIV);
935 break;
936 case DW_OP_minus:
937 result_val = value_binop (first, second, BINOP_SUB);
938 break;
939 case DW_OP_mod:
940 {
941 int cast_back = 0;
942 struct type *orig_type = value_type (first);
943
944 /* We have to special-case "old-style" untyped values
945 -- these must have mod computed using unsigned
946 math. */
947 if (orig_type == address_type)
948 {
949 struct type *utype
950 = get_unsigned_type (ctx->gdbarch, orig_type);
951
952 cast_back = 1;
953 first = value_cast (utype, first);
954 second = value_cast (utype, second);
955 }
956 /* Note that value_binop doesn't handle float or
957 decimal float here. This seems unimportant. */
958 result_val = value_binop (first, second, BINOP_MOD);
959 if (cast_back)
960 result_val = value_cast (orig_type, result_val);
961 }
962 break;
963 case DW_OP_mul:
964 result_val = value_binop (first, second, BINOP_MUL);
965 break;
966 case DW_OP_or:
967 dwarf_require_integral (value_type (first));
968 dwarf_require_integral (value_type (second));
969 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
970 break;
971 case DW_OP_plus:
972 result_val = value_binop (first, second, BINOP_ADD);
973 break;
974 case DW_OP_shl:
975 dwarf_require_integral (value_type (first));
976 dwarf_require_integral (value_type (second));
977 result_val = value_binop (first, second, BINOP_LSH);
978 break;
979 case DW_OP_shr:
980 dwarf_require_integral (value_type (first));
981 dwarf_require_integral (value_type (second));
982 if (value_type (first) == address_type)
983 {
984 struct type *utype
985 = get_unsigned_type (ctx->gdbarch, value_type (first));
986
987 first = value_cast (utype, first);
988 }
989
990 result_val = value_binop (first, second, BINOP_RSH);
991 /* Make sure we wind up with the same type we started
992 with. */
993 if (value_type (result_val) != value_type (second))
994 result_val = value_cast (value_type (second), result_val);
995 break;
996 case DW_OP_shra:
997 dwarf_require_integral (value_type (first));
998 dwarf_require_integral (value_type (second));
999 result_val = value_binop (first, second, BINOP_RSH);
1000 break;
1001 case DW_OP_xor:
1002 dwarf_require_integral (value_type (first));
1003 dwarf_require_integral (value_type (second));
1004 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1005 break;
1006 case DW_OP_le:
1007 /* A <= B is !(B < A). */
1008 result = ! value_less (second, first);
1009 result_val = value_from_ulongest (address_type, result);
1010 break;
1011 case DW_OP_ge:
1012 /* A >= B is !(A < B). */
1013 result = ! value_less (first, second);
1014 result_val = value_from_ulongest (address_type, result);
1015 break;
1016 case DW_OP_eq:
1017 result = value_equal (first, second);
1018 result_val = value_from_ulongest (address_type, result);
1019 break;
1020 case DW_OP_lt:
1021 result = value_less (first, second);
1022 result_val = value_from_ulongest (address_type, result);
1023 break;
1024 case DW_OP_gt:
1025 /* A > B is B < A. */
1026 result = value_less (second, first);
1027 result_val = value_from_ulongest (address_type, result);
1028 break;
1029 case DW_OP_ne:
1030 result = ! value_equal (first, second);
1031 result_val = value_from_ulongest (address_type, result);
1032 break;
1033 default:
1034 internal_error (__FILE__, __LINE__,
1035 _("Can't be reached."));
1036 }
1037 }
1038 break;
1039
1040 case DW_OP_call_frame_cfa:
1041 result = (ctx->get_frame_cfa) (ctx->baton);
1042 result_val = value_from_ulongest (address_type, result);
1043 in_stack_memory = 1;
1044 break;
1045
1046 case DW_OP_GNU_push_tls_address:
1047 /* Variable is at a constant offset in the thread-local
1048 storage block into the objfile for the current thread and
1049 the dynamic linker module containing this expression. Here
1050 we return returns the offset from that base. The top of the
1051 stack has the offset from the beginning of the thread
1052 control block at which the variable is located. Nothing
1053 should follow this operator, so the top of stack would be
1054 returned. */
1055 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1056 dwarf_expr_pop (ctx);
1057 result = (ctx->get_tls_address) (ctx->baton, result);
1058 result_val = value_from_ulongest (address_type, result);
1059 break;
1060
1061 case DW_OP_skip:
1062 offset = extract_signed_integer (op_ptr, 2, byte_order);
1063 op_ptr += 2;
1064 op_ptr += offset;
1065 goto no_push;
1066
1067 case DW_OP_bra:
1068 {
1069 struct value *val;
1070
1071 offset = extract_signed_integer (op_ptr, 2, byte_order);
1072 op_ptr += 2;
1073 val = dwarf_expr_fetch (ctx, 0);
1074 dwarf_require_integral (value_type (val));
1075 if (value_as_long (val) != 0)
1076 op_ptr += offset;
1077 dwarf_expr_pop (ctx);
1078 }
1079 goto no_push;
1080
1081 case DW_OP_nop:
1082 goto no_push;
1083
1084 case DW_OP_piece:
1085 {
1086 ULONGEST size;
1087
1088 /* Record the piece. */
1089 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1090 add_piece (ctx, 8 * size, 0);
1091
1092 /* Pop off the address/regnum, and reset the location
1093 type. */
1094 if (ctx->location != DWARF_VALUE_LITERAL
1095 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1096 dwarf_expr_pop (ctx);
1097 ctx->location = DWARF_VALUE_MEMORY;
1098 }
1099 goto no_push;
1100
1101 case DW_OP_bit_piece:
1102 {
1103 ULONGEST size, offset;
1104
1105 /* Record the piece. */
1106 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1107 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1108 add_piece (ctx, size, offset);
1109
1110 /* Pop off the address/regnum, and reset the location
1111 type. */
1112 if (ctx->location != DWARF_VALUE_LITERAL
1113 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1114 dwarf_expr_pop (ctx);
1115 ctx->location = DWARF_VALUE_MEMORY;
1116 }
1117 goto no_push;
1118
1119 case DW_OP_GNU_uninit:
1120 if (op_ptr != op_end)
1121 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1122 "be the very last op."));
1123
1124 ctx->initialized = 0;
1125 goto no_push;
1126
1127 case DW_OP_call2:
1128 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1129 op_ptr += 2;
1130 ctx->dwarf_call (ctx, result);
1131 goto no_push;
1132
1133 case DW_OP_call4:
1134 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1135 op_ptr += 4;
1136 ctx->dwarf_call (ctx, result);
1137 goto no_push;
1138
1139 case DW_OP_GNU_entry_value:
1140 /* This operation is not yet supported by GDB. */
1141 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
1142 ctx->stack_len = 0;
1143 ctx->num_pieces = 0;
1144 goto abort_expression;
1145
1146 case DW_OP_GNU_const_type:
1147 {
1148 ULONGEST type_die;
1149 int n;
1150 const gdb_byte *data;
1151 struct type *type;
1152
1153 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1154 n = *op_ptr++;
1155 data = op_ptr;
1156 op_ptr += n;
1157
1158 type = dwarf_get_base_type (ctx, type_die, n);
1159 result_val = value_from_contents (type, data);
1160 }
1161 break;
1162
1163 case DW_OP_GNU_regval_type:
1164 {
1165 ULONGEST type_die;
1166 struct type *type;
1167
1168 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1169 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1170
1171 type = dwarf_get_base_type (ctx, type_die, 0);
1172 result = (ctx->read_reg) (ctx->baton, reg);
1173 result_val = value_from_ulongest (type, result);
1174 }
1175 break;
1176
1177 case DW_OP_GNU_convert:
1178 case DW_OP_GNU_reinterpret:
1179 {
1180 ULONGEST type_die;
1181 struct type *type;
1182
1183 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1184
1185 type = dwarf_get_base_type (ctx, type_die, 0);
1186
1187 result_val = dwarf_expr_fetch (ctx, 0);
1188 dwarf_expr_pop (ctx);
1189
1190 if (op == DW_OP_GNU_convert)
1191 result_val = value_cast (type, result_val);
1192 else if (type == value_type (result_val))
1193 {
1194 /* Nothing. */
1195 }
1196 else if (TYPE_LENGTH (type)
1197 != TYPE_LENGTH (value_type (result_val)))
1198 error (_("DW_OP_GNU_reinterpret has wrong size"));
1199 else
1200 result_val
1201 = value_from_contents (type,
1202 value_contents_all (result_val));
1203 }
1204 break;
1205
1206 default:
1207 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1208 }
1209
1210 /* Most things push a result value. */
1211 gdb_assert (result_val != NULL);
1212 dwarf_expr_push (ctx, result_val, in_stack_memory);
1213 no_push:
1214 ;
1215 }
1216
1217 /* To simplify our main caller, if the result is an implicit
1218 pointer, then make a pieced value. This is ok because we can't
1219 have implicit pointers in contexts where pieces are invalid. */
1220 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1221 add_piece (ctx, 8 * ctx->addr_size, 0);
1222
1223 abort_expression:
1224 ctx->recursion_depth--;
1225 gdb_assert (ctx->recursion_depth >= 0);
1226 }
1227
1228 void
1229 _initialize_dwarf2expr (void)
1230 {
1231 dwarf_arch_cookie
1232 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1233 }
This page took 0.053858 seconds and 4 git commands to generate.