2005-02-11 Andrew Cagney <cagney@gnu.org>
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* Dwarf2 Expression Evaluator
2 Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin (dan@dberlin.org)
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "elf/dwarf2.h"
28 #include "dwarf2expr.h"
29
30 /* Local prototypes. */
31
32 static void execute_stack_op (struct dwarf_expr_context *,
33 unsigned char *, unsigned char *);
34
35 /* Create a new context for the expression evaluator. */
36
37 struct dwarf_expr_context *
38 new_dwarf_expr_context (void)
39 {
40 struct dwarf_expr_context *retval;
41 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
42 retval->stack_len = 0;
43 retval->stack_allocated = 10;
44 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
45 retval->num_pieces = 0;
46 retval->pieces = 0;
47 return retval;
48 }
49
50 /* Release the memory allocated to CTX. */
51
52 void
53 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
54 {
55 xfree (ctx->stack);
56 xfree (ctx->pieces);
57 xfree (ctx);
58 }
59
60 /* Expand the memory allocated to CTX's stack to contain at least
61 NEED more elements than are currently used. */
62
63 static void
64 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
65 {
66 if (ctx->stack_len + need > ctx->stack_allocated)
67 {
68 size_t newlen = ctx->stack_len + need + 10;
69 ctx->stack = xrealloc (ctx->stack,
70 newlen * sizeof (CORE_ADDR));
71 ctx->stack_allocated = newlen;
72 }
73 }
74
75 /* Push VALUE onto CTX's stack. */
76
77 void
78 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
79 {
80 dwarf_expr_grow_stack (ctx, 1);
81 ctx->stack[ctx->stack_len++] = value;
82 }
83
84 /* Pop the top item off of CTX's stack. */
85
86 void
87 dwarf_expr_pop (struct dwarf_expr_context *ctx)
88 {
89 if (ctx->stack_len <= 0)
90 error (_("dwarf expression stack underflow"));
91 ctx->stack_len--;
92 }
93
94 /* Retrieve the N'th item on CTX's stack. */
95
96 CORE_ADDR
97 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
98 {
99 if (ctx->stack_len < n)
100 error (_("Asked for position %d of stack, stack only has %d elements on it."),
101 n, ctx->stack_len);
102 return ctx->stack[ctx->stack_len - (1 + n)];
103
104 }
105
106 /* Add a new piece to CTX's piece list. */
107 static void
108 add_piece (struct dwarf_expr_context *ctx,
109 int in_reg, CORE_ADDR value, ULONGEST size)
110 {
111 struct dwarf_expr_piece *p;
112
113 ctx->num_pieces++;
114
115 if (ctx->pieces)
116 ctx->pieces = xrealloc (ctx->pieces,
117 (ctx->num_pieces
118 * sizeof (struct dwarf_expr_piece)));
119 else
120 ctx->pieces = xmalloc (ctx->num_pieces
121 * sizeof (struct dwarf_expr_piece));
122
123 p = &ctx->pieces[ctx->num_pieces - 1];
124 p->in_reg = in_reg;
125 p->value = value;
126 p->size = size;
127 }
128
129 /* Evaluate the expression at ADDR (LEN bytes long) using the context
130 CTX. */
131
132 void
133 dwarf_expr_eval (struct dwarf_expr_context *ctx, unsigned char *addr,
134 size_t len)
135 {
136 execute_stack_op (ctx, addr, addr + len);
137 }
138
139 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
140 by R, and return the new value of BUF. Verify that it doesn't extend
141 past BUF_END. */
142
143 unsigned char *
144 read_uleb128 (unsigned char *buf, unsigned char *buf_end, ULONGEST * r)
145 {
146 unsigned shift = 0;
147 ULONGEST result = 0;
148 unsigned char byte;
149
150 while (1)
151 {
152 if (buf >= buf_end)
153 error (_("read_uleb128: Corrupted DWARF expression."));
154
155 byte = *buf++;
156 result |= (byte & 0x7f) << shift;
157 if ((byte & 0x80) == 0)
158 break;
159 shift += 7;
160 }
161 *r = result;
162 return buf;
163 }
164
165 /* Decode the signed LEB128 constant at BUF into the variable pointed to
166 by R, and return the new value of BUF. Verify that it doesn't extend
167 past BUF_END. */
168
169 unsigned char *
170 read_sleb128 (unsigned char *buf, unsigned char *buf_end, LONGEST * r)
171 {
172 unsigned shift = 0;
173 LONGEST result = 0;
174 unsigned char byte;
175
176 while (1)
177 {
178 if (buf >= buf_end)
179 error (_("read_sleb128: Corrupted DWARF expression."));
180
181 byte = *buf++;
182 result |= (byte & 0x7f) << shift;
183 shift += 7;
184 if ((byte & 0x80) == 0)
185 break;
186 }
187 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
188 result |= -(1 << shift);
189
190 *r = result;
191 return buf;
192 }
193
194 /* Read an address from BUF, and verify that it doesn't extend past
195 BUF_END. The address is returned, and *BYTES_READ is set to the
196 number of bytes read from BUF. */
197
198 CORE_ADDR
199 dwarf2_read_address (unsigned char *buf, unsigned char *buf_end, int *bytes_read)
200 {
201 CORE_ADDR result;
202
203 if (buf_end - buf < TARGET_ADDR_BIT / TARGET_CHAR_BIT)
204 error (_("dwarf2_read_address: Corrupted DWARF expression."));
205
206 *bytes_read = TARGET_ADDR_BIT / TARGET_CHAR_BIT;
207 /* NOTE: cagney/2003-05-22: This extract is assuming that a DWARF 2
208 address is always unsigned. That may or may not be true. */
209 result = extract_unsigned_integer (buf, TARGET_ADDR_BIT / TARGET_CHAR_BIT);
210 return result;
211 }
212
213 /* Return the type of an address, for unsigned arithmetic. */
214
215 static struct type *
216 unsigned_address_type (void)
217 {
218 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
219 {
220 case 2:
221 return builtin_type_uint16;
222 case 4:
223 return builtin_type_uint32;
224 case 8:
225 return builtin_type_uint64;
226 default:
227 internal_error (__FILE__, __LINE__,
228 _("Unsupported address size.\n"));
229 }
230 }
231
232 /* Return the type of an address, for signed arithmetic. */
233
234 static struct type *
235 signed_address_type (void)
236 {
237 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
238 {
239 case 2:
240 return builtin_type_int16;
241 case 4:
242 return builtin_type_int32;
243 case 8:
244 return builtin_type_int64;
245 default:
246 internal_error (__FILE__, __LINE__,
247 _("Unsupported address size.\n"));
248 }
249 }
250 \f
251 /* The engine for the expression evaluator. Using the context in CTX,
252 evaluate the expression between OP_PTR and OP_END. */
253
254 static void
255 execute_stack_op (struct dwarf_expr_context *ctx, unsigned char *op_ptr,
256 unsigned char *op_end)
257 {
258 ctx->in_reg = 0;
259
260 while (op_ptr < op_end)
261 {
262 enum dwarf_location_atom op = *op_ptr++;
263 CORE_ADDR result;
264 ULONGEST uoffset, reg;
265 LONGEST offset;
266 int bytes_read;
267
268 switch (op)
269 {
270 case DW_OP_lit0:
271 case DW_OP_lit1:
272 case DW_OP_lit2:
273 case DW_OP_lit3:
274 case DW_OP_lit4:
275 case DW_OP_lit5:
276 case DW_OP_lit6:
277 case DW_OP_lit7:
278 case DW_OP_lit8:
279 case DW_OP_lit9:
280 case DW_OP_lit10:
281 case DW_OP_lit11:
282 case DW_OP_lit12:
283 case DW_OP_lit13:
284 case DW_OP_lit14:
285 case DW_OP_lit15:
286 case DW_OP_lit16:
287 case DW_OP_lit17:
288 case DW_OP_lit18:
289 case DW_OP_lit19:
290 case DW_OP_lit20:
291 case DW_OP_lit21:
292 case DW_OP_lit22:
293 case DW_OP_lit23:
294 case DW_OP_lit24:
295 case DW_OP_lit25:
296 case DW_OP_lit26:
297 case DW_OP_lit27:
298 case DW_OP_lit28:
299 case DW_OP_lit29:
300 case DW_OP_lit30:
301 case DW_OP_lit31:
302 result = op - DW_OP_lit0;
303 break;
304
305 case DW_OP_addr:
306 result = dwarf2_read_address (op_ptr, op_end, &bytes_read);
307 op_ptr += bytes_read;
308 break;
309
310 case DW_OP_const1u:
311 result = extract_unsigned_integer (op_ptr, 1);
312 op_ptr += 1;
313 break;
314 case DW_OP_const1s:
315 result = extract_signed_integer (op_ptr, 1);
316 op_ptr += 1;
317 break;
318 case DW_OP_const2u:
319 result = extract_unsigned_integer (op_ptr, 2);
320 op_ptr += 2;
321 break;
322 case DW_OP_const2s:
323 result = extract_signed_integer (op_ptr, 2);
324 op_ptr += 2;
325 break;
326 case DW_OP_const4u:
327 result = extract_unsigned_integer (op_ptr, 4);
328 op_ptr += 4;
329 break;
330 case DW_OP_const4s:
331 result = extract_signed_integer (op_ptr, 4);
332 op_ptr += 4;
333 break;
334 case DW_OP_const8u:
335 result = extract_unsigned_integer (op_ptr, 8);
336 op_ptr += 8;
337 break;
338 case DW_OP_const8s:
339 result = extract_signed_integer (op_ptr, 8);
340 op_ptr += 8;
341 break;
342 case DW_OP_constu:
343 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
344 result = uoffset;
345 break;
346 case DW_OP_consts:
347 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
348 result = offset;
349 break;
350
351 /* The DW_OP_reg operations are required to occur alone in
352 location expressions. */
353 case DW_OP_reg0:
354 case DW_OP_reg1:
355 case DW_OP_reg2:
356 case DW_OP_reg3:
357 case DW_OP_reg4:
358 case DW_OP_reg5:
359 case DW_OP_reg6:
360 case DW_OP_reg7:
361 case DW_OP_reg8:
362 case DW_OP_reg9:
363 case DW_OP_reg10:
364 case DW_OP_reg11:
365 case DW_OP_reg12:
366 case DW_OP_reg13:
367 case DW_OP_reg14:
368 case DW_OP_reg15:
369 case DW_OP_reg16:
370 case DW_OP_reg17:
371 case DW_OP_reg18:
372 case DW_OP_reg19:
373 case DW_OP_reg20:
374 case DW_OP_reg21:
375 case DW_OP_reg22:
376 case DW_OP_reg23:
377 case DW_OP_reg24:
378 case DW_OP_reg25:
379 case DW_OP_reg26:
380 case DW_OP_reg27:
381 case DW_OP_reg28:
382 case DW_OP_reg29:
383 case DW_OP_reg30:
384 case DW_OP_reg31:
385 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
386 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
387 "used either alone or in conjuction with DW_OP_piece."));
388
389 result = op - DW_OP_reg0;
390 ctx->in_reg = 1;
391
392 break;
393
394 case DW_OP_regx:
395 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
396 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
397 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
398 "used either alone or in conjuction with DW_OP_piece."));
399
400 result = reg;
401 ctx->in_reg = 1;
402 break;
403
404 case DW_OP_breg0:
405 case DW_OP_breg1:
406 case DW_OP_breg2:
407 case DW_OP_breg3:
408 case DW_OP_breg4:
409 case DW_OP_breg5:
410 case DW_OP_breg6:
411 case DW_OP_breg7:
412 case DW_OP_breg8:
413 case DW_OP_breg9:
414 case DW_OP_breg10:
415 case DW_OP_breg11:
416 case DW_OP_breg12:
417 case DW_OP_breg13:
418 case DW_OP_breg14:
419 case DW_OP_breg15:
420 case DW_OP_breg16:
421 case DW_OP_breg17:
422 case DW_OP_breg18:
423 case DW_OP_breg19:
424 case DW_OP_breg20:
425 case DW_OP_breg21:
426 case DW_OP_breg22:
427 case DW_OP_breg23:
428 case DW_OP_breg24:
429 case DW_OP_breg25:
430 case DW_OP_breg26:
431 case DW_OP_breg27:
432 case DW_OP_breg28:
433 case DW_OP_breg29:
434 case DW_OP_breg30:
435 case DW_OP_breg31:
436 {
437 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
438 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
439 result += offset;
440 }
441 break;
442 case DW_OP_bregx:
443 {
444 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
445 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
446 result = (ctx->read_reg) (ctx->baton, reg);
447 result += offset;
448 }
449 break;
450 case DW_OP_fbreg:
451 {
452 unsigned char *datastart;
453 size_t datalen;
454 unsigned int before_stack_len;
455
456 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
457 /* Rather than create a whole new context, we simply
458 record the stack length before execution, then reset it
459 afterwards, effectively erasing whatever the recursive
460 call put there. */
461 before_stack_len = ctx->stack_len;
462 /* FIXME: cagney/2003-03-26: This code should be using
463 get_frame_base_address(), and then implement a dwarf2
464 specific this_base method. */
465 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
466 dwarf_expr_eval (ctx, datastart, datalen);
467 result = dwarf_expr_fetch (ctx, 0);
468 if (ctx->in_reg)
469 result = (ctx->read_reg) (ctx->baton, result);
470 result = result + offset;
471 ctx->stack_len = before_stack_len;
472 ctx->in_reg = 0;
473 }
474 break;
475 case DW_OP_dup:
476 result = dwarf_expr_fetch (ctx, 0);
477 break;
478
479 case DW_OP_drop:
480 dwarf_expr_pop (ctx);
481 goto no_push;
482
483 case DW_OP_pick:
484 offset = *op_ptr++;
485 result = dwarf_expr_fetch (ctx, offset);
486 break;
487
488 case DW_OP_over:
489 result = dwarf_expr_fetch (ctx, 1);
490 break;
491
492 case DW_OP_rot:
493 {
494 CORE_ADDR t1, t2, t3;
495
496 if (ctx->stack_len < 3)
497 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
498 ctx->stack_len);
499 t1 = ctx->stack[ctx->stack_len - 1];
500 t2 = ctx->stack[ctx->stack_len - 2];
501 t3 = ctx->stack[ctx->stack_len - 3];
502 ctx->stack[ctx->stack_len - 1] = t2;
503 ctx->stack[ctx->stack_len - 2] = t3;
504 ctx->stack[ctx->stack_len - 3] = t1;
505 goto no_push;
506 }
507
508 case DW_OP_deref:
509 case DW_OP_deref_size:
510 case DW_OP_abs:
511 case DW_OP_neg:
512 case DW_OP_not:
513 case DW_OP_plus_uconst:
514 /* Unary operations. */
515 result = dwarf_expr_fetch (ctx, 0);
516 dwarf_expr_pop (ctx);
517
518 switch (op)
519 {
520 case DW_OP_deref:
521 {
522 char *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
523 int bytes_read;
524
525 (ctx->read_mem) (ctx->baton, buf, result,
526 TARGET_ADDR_BIT / TARGET_CHAR_BIT);
527 result = dwarf2_read_address (buf,
528 buf + (TARGET_ADDR_BIT
529 / TARGET_CHAR_BIT),
530 &bytes_read);
531 }
532 break;
533
534 case DW_OP_deref_size:
535 {
536 char *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
537 int bytes_read;
538
539 (ctx->read_mem) (ctx->baton, buf, result, *op_ptr++);
540 result = dwarf2_read_address (buf,
541 buf + (TARGET_ADDR_BIT
542 / TARGET_CHAR_BIT),
543 &bytes_read);
544 }
545 break;
546
547 case DW_OP_abs:
548 if ((signed int) result < 0)
549 result = -result;
550 break;
551 case DW_OP_neg:
552 result = -result;
553 break;
554 case DW_OP_not:
555 result = ~result;
556 break;
557 case DW_OP_plus_uconst:
558 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
559 result += reg;
560 break;
561 }
562 break;
563
564 case DW_OP_and:
565 case DW_OP_div:
566 case DW_OP_minus:
567 case DW_OP_mod:
568 case DW_OP_mul:
569 case DW_OP_or:
570 case DW_OP_plus:
571 case DW_OP_shl:
572 case DW_OP_shr:
573 case DW_OP_shra:
574 case DW_OP_xor:
575 case DW_OP_le:
576 case DW_OP_ge:
577 case DW_OP_eq:
578 case DW_OP_lt:
579 case DW_OP_gt:
580 case DW_OP_ne:
581 {
582 /* Binary operations. Use the value engine to do computations in
583 the right width. */
584 CORE_ADDR first, second;
585 enum exp_opcode binop;
586 struct value *val1, *val2;
587
588 second = dwarf_expr_fetch (ctx, 0);
589 dwarf_expr_pop (ctx);
590
591 first = dwarf_expr_fetch (ctx, 0);
592 dwarf_expr_pop (ctx);
593
594 val1 = value_from_longest (unsigned_address_type (), first);
595 val2 = value_from_longest (unsigned_address_type (), second);
596
597 switch (op)
598 {
599 case DW_OP_and:
600 binop = BINOP_BITWISE_AND;
601 break;
602 case DW_OP_div:
603 binop = BINOP_DIV;
604 break;
605 case DW_OP_minus:
606 binop = BINOP_SUB;
607 break;
608 case DW_OP_mod:
609 binop = BINOP_MOD;
610 break;
611 case DW_OP_mul:
612 binop = BINOP_MUL;
613 break;
614 case DW_OP_or:
615 binop = BINOP_BITWISE_IOR;
616 break;
617 case DW_OP_plus:
618 binop = BINOP_ADD;
619 break;
620 case DW_OP_shl:
621 binop = BINOP_LSH;
622 break;
623 case DW_OP_shr:
624 binop = BINOP_RSH;
625 break;
626 case DW_OP_shra:
627 binop = BINOP_RSH;
628 val1 = value_from_longest (signed_address_type (), first);
629 break;
630 case DW_OP_xor:
631 binop = BINOP_BITWISE_XOR;
632 break;
633 case DW_OP_le:
634 binop = BINOP_LEQ;
635 break;
636 case DW_OP_ge:
637 binop = BINOP_GEQ;
638 break;
639 case DW_OP_eq:
640 binop = BINOP_EQUAL;
641 break;
642 case DW_OP_lt:
643 binop = BINOP_LESS;
644 break;
645 case DW_OP_gt:
646 binop = BINOP_GTR;
647 break;
648 case DW_OP_ne:
649 binop = BINOP_NOTEQUAL;
650 break;
651 default:
652 internal_error (__FILE__, __LINE__,
653 _("Can't be reached."));
654 }
655 result = value_as_long (value_binop (val1, val2, binop));
656 }
657 break;
658
659 case DW_OP_GNU_push_tls_address:
660 /* Variable is at a constant offset in the thread-local
661 storage block into the objfile for the current thread and
662 the dynamic linker module containing this expression. Here
663 we return returns the offset from that base. The top of the
664 stack has the offset from the beginning of the thread
665 control block at which the variable is located. Nothing
666 should follow this operator, so the top of stack would be
667 returned. */
668 result = dwarf_expr_fetch (ctx, 0);
669 dwarf_expr_pop (ctx);
670 result = (ctx->get_tls_address) (ctx->baton, result);
671 break;
672
673 case DW_OP_skip:
674 offset = extract_signed_integer (op_ptr, 2);
675 op_ptr += 2;
676 op_ptr += offset;
677 goto no_push;
678
679 case DW_OP_bra:
680 offset = extract_signed_integer (op_ptr, 2);
681 op_ptr += 2;
682 if (dwarf_expr_fetch (ctx, 0) != 0)
683 op_ptr += offset;
684 dwarf_expr_pop (ctx);
685 goto no_push;
686
687 case DW_OP_nop:
688 goto no_push;
689
690 case DW_OP_piece:
691 {
692 ULONGEST size;
693 CORE_ADDR addr_or_regnum;
694
695 /* Record the piece. */
696 op_ptr = read_uleb128 (op_ptr, op_end, &size);
697 addr_or_regnum = dwarf_expr_fetch (ctx, 0);
698 add_piece (ctx, ctx->in_reg, addr_or_regnum, size);
699
700 /* Pop off the address/regnum, and clear the in_reg flag. */
701 dwarf_expr_pop (ctx);
702 ctx->in_reg = 0;
703 }
704 goto no_push;
705
706 default:
707 error (_("Unhandled dwarf expression opcode 0x%x"), op);
708 }
709
710 /* Most things push a result value. */
711 dwarf_expr_push (ctx, result);
712 no_push:;
713 }
714 }
This page took 0.06707 seconds and 4 git commands to generate.