2010-05-06 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (struct gdbarch *, int);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
45 retval->stack_len = 0;
46 retval->stack_allocated = 10;
47 retval->stack = xmalloc (retval->stack_allocated
48 * sizeof (struct dwarf_stack_value));
49 retval->num_pieces = 0;
50 retval->pieces = 0;
51 retval->max_recursion_depth = 0x100;
52 return retval;
53 }
54
55 /* Release the memory allocated to CTX. */
56
57 void
58 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
59 {
60 xfree (ctx->stack);
61 xfree (ctx->pieces);
62 xfree (ctx);
63 }
64
65 /* Helper for make_cleanup_free_dwarf_expr_context. */
66
67 static void
68 free_dwarf_expr_context_cleanup (void *arg)
69 {
70 free_dwarf_expr_context (arg);
71 }
72
73 /* Return a cleanup that calls free_dwarf_expr_context. */
74
75 struct cleanup *
76 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
77 {
78 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
79 }
80
81 /* Expand the memory allocated to CTX's stack to contain at least
82 NEED more elements than are currently used. */
83
84 static void
85 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
86 {
87 if (ctx->stack_len + need > ctx->stack_allocated)
88 {
89 size_t newlen = ctx->stack_len + need + 10;
90 ctx->stack = xrealloc (ctx->stack,
91 newlen * sizeof (struct dwarf_stack_value));
92 ctx->stack_allocated = newlen;
93 }
94 }
95
96 /* Push VALUE onto CTX's stack. */
97
98 void
99 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value,
100 int in_stack_memory)
101 {
102 struct dwarf_stack_value *v;
103
104 dwarf_expr_grow_stack (ctx, 1);
105 v = &ctx->stack[ctx->stack_len++];
106 v->value = value;
107 v->in_stack_memory = in_stack_memory;
108 }
109
110 /* Pop the top item off of CTX's stack. */
111
112 void
113 dwarf_expr_pop (struct dwarf_expr_context *ctx)
114 {
115 if (ctx->stack_len <= 0)
116 error (_("dwarf expression stack underflow"));
117 ctx->stack_len--;
118 }
119
120 /* Retrieve the N'th item on CTX's stack. */
121
122 CORE_ADDR
123 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
124 {
125 if (ctx->stack_len <= n)
126 error (_("Asked for position %d of stack, stack only has %d elements on it."),
127 n, ctx->stack_len);
128 return ctx->stack[ctx->stack_len - (1 + n)].value;
129
130 }
131
132 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
133
134 int
135 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
136 {
137 if (ctx->stack_len <= n)
138 error (_("Asked for position %d of stack, stack only has %d elements on it."),
139 n, ctx->stack_len);
140 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
141
142 }
143
144 /* Add a new piece to CTX's piece list. */
145 static void
146 add_piece (struct dwarf_expr_context *ctx, ULONGEST size)
147 {
148 struct dwarf_expr_piece *p;
149
150 ctx->num_pieces++;
151
152 if (ctx->pieces)
153 ctx->pieces = xrealloc (ctx->pieces,
154 (ctx->num_pieces
155 * sizeof (struct dwarf_expr_piece)));
156 else
157 ctx->pieces = xmalloc (ctx->num_pieces
158 * sizeof (struct dwarf_expr_piece));
159
160 p = &ctx->pieces[ctx->num_pieces - 1];
161 p->location = ctx->location;
162 p->size = size;
163 if (p->location == DWARF_VALUE_LITERAL)
164 {
165 p->v.literal.data = ctx->data;
166 p->v.literal.length = ctx->len;
167 }
168 else
169 {
170 p->v.expr.value = dwarf_expr_fetch (ctx, 0);
171 p->v.expr.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
172 }
173 }
174
175 /* Evaluate the expression at ADDR (LEN bytes long) using the context
176 CTX. */
177
178 void
179 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
180 {
181 int old_recursion_depth = ctx->recursion_depth;
182
183 execute_stack_op (ctx, addr, addr + len);
184
185 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
186
187 gdb_assert (ctx->recursion_depth == old_recursion_depth);
188 }
189
190 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
191 by R, and return the new value of BUF. Verify that it doesn't extend
192 past BUF_END. */
193
194 gdb_byte *
195 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
196 {
197 unsigned shift = 0;
198 ULONGEST result = 0;
199 gdb_byte byte;
200
201 while (1)
202 {
203 if (buf >= buf_end)
204 error (_("read_uleb128: Corrupted DWARF expression."));
205
206 byte = *buf++;
207 result |= (byte & 0x7f) << shift;
208 if ((byte & 0x80) == 0)
209 break;
210 shift += 7;
211 }
212 *r = result;
213 return buf;
214 }
215
216 /* Decode the signed LEB128 constant at BUF into the variable pointed to
217 by R, and return the new value of BUF. Verify that it doesn't extend
218 past BUF_END. */
219
220 gdb_byte *
221 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
222 {
223 unsigned shift = 0;
224 LONGEST result = 0;
225 gdb_byte byte;
226
227 while (1)
228 {
229 if (buf >= buf_end)
230 error (_("read_sleb128: Corrupted DWARF expression."));
231
232 byte = *buf++;
233 result |= (byte & 0x7f) << shift;
234 shift += 7;
235 if ((byte & 0x80) == 0)
236 break;
237 }
238 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
239 result |= -(1 << shift);
240
241 *r = result;
242 return buf;
243 }
244
245 /* Read an address of size ADDR_SIZE from BUF, and verify that it
246 doesn't extend past BUF_END. */
247
248 CORE_ADDR
249 dwarf2_read_address (struct gdbarch *gdbarch, gdb_byte *buf,
250 gdb_byte *buf_end, int addr_size)
251 {
252 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
253
254 if (buf_end - buf < addr_size)
255 error (_("dwarf2_read_address: Corrupted DWARF expression."));
256
257 /* For most architectures, calling extract_unsigned_integer() alone
258 is sufficient for extracting an address. However, some
259 architectures (e.g. MIPS) use signed addresses and using
260 extract_unsigned_integer() will not produce a correct
261 result. Make sure we invoke gdbarch_integer_to_address()
262 for those architectures which require it.
263
264 The use of `unsigned_address_type' in the code below refers to
265 the type of buf and has no bearing on the signedness of the
266 address being returned. */
267
268 if (gdbarch_integer_to_address_p (gdbarch))
269 return gdbarch_integer_to_address
270 (gdbarch, unsigned_address_type (gdbarch, addr_size), buf);
271
272 return extract_unsigned_integer (buf, addr_size, byte_order);
273 }
274
275 /* Return the type of an address of size ADDR_SIZE,
276 for unsigned arithmetic. */
277
278 static struct type *
279 unsigned_address_type (struct gdbarch *gdbarch, int addr_size)
280 {
281 switch (addr_size)
282 {
283 case 2:
284 return builtin_type (gdbarch)->builtin_uint16;
285 case 4:
286 return builtin_type (gdbarch)->builtin_uint32;
287 case 8:
288 return builtin_type (gdbarch)->builtin_uint64;
289 default:
290 internal_error (__FILE__, __LINE__,
291 _("Unsupported address size.\n"));
292 }
293 }
294
295 /* Return the type of an address of size ADDR_SIZE,
296 for signed arithmetic. */
297
298 static struct type *
299 signed_address_type (struct gdbarch *gdbarch, int addr_size)
300 {
301 switch (addr_size)
302 {
303 case 2:
304 return builtin_type (gdbarch)->builtin_int16;
305 case 4:
306 return builtin_type (gdbarch)->builtin_int32;
307 case 8:
308 return builtin_type (gdbarch)->builtin_int64;
309 default:
310 internal_error (__FILE__, __LINE__,
311 _("Unsupported address size.\n"));
312 }
313 }
314 \f
315
316 /* Check that the current operator is either at the end of an
317 expression, or that it is followed by a composition operator. */
318
319 static void
320 require_composition (gdb_byte *op_ptr, gdb_byte *op_end, const char *op_name)
321 {
322 /* It seems like DW_OP_GNU_uninit should be handled here. However,
323 it doesn't seem to make sense for DW_OP_*_value, and it was not
324 checked at the other place that this function is called. */
325 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
326 error (_("DWARF-2 expression error: `%s' operations must be "
327 "used either alone or in conjuction with DW_OP_piece "
328 "or DW_OP_bit_piece."),
329 op_name);
330 }
331
332 /* The engine for the expression evaluator. Using the context in CTX,
333 evaluate the expression between OP_PTR and OP_END. */
334
335 static void
336 execute_stack_op (struct dwarf_expr_context *ctx,
337 gdb_byte *op_ptr, gdb_byte *op_end)
338 {
339 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
340 ctx->location = DWARF_VALUE_MEMORY;
341 ctx->initialized = 1; /* Default is initialized. */
342
343 if (ctx->recursion_depth > ctx->max_recursion_depth)
344 error (_("DWARF-2 expression error: Loop detected (%d)."),
345 ctx->recursion_depth);
346 ctx->recursion_depth++;
347
348 while (op_ptr < op_end)
349 {
350 enum dwarf_location_atom op = *op_ptr++;
351 CORE_ADDR result;
352 /* Assume the value is not in stack memory.
353 Code that knows otherwise sets this to 1.
354 Some arithmetic on stack addresses can probably be assumed to still
355 be a stack address, but we skip this complication for now.
356 This is just an optimization, so it's always ok to punt
357 and leave this as 0. */
358 int in_stack_memory = 0;
359 ULONGEST uoffset, reg;
360 LONGEST offset;
361
362 switch (op)
363 {
364 case DW_OP_lit0:
365 case DW_OP_lit1:
366 case DW_OP_lit2:
367 case DW_OP_lit3:
368 case DW_OP_lit4:
369 case DW_OP_lit5:
370 case DW_OP_lit6:
371 case DW_OP_lit7:
372 case DW_OP_lit8:
373 case DW_OP_lit9:
374 case DW_OP_lit10:
375 case DW_OP_lit11:
376 case DW_OP_lit12:
377 case DW_OP_lit13:
378 case DW_OP_lit14:
379 case DW_OP_lit15:
380 case DW_OP_lit16:
381 case DW_OP_lit17:
382 case DW_OP_lit18:
383 case DW_OP_lit19:
384 case DW_OP_lit20:
385 case DW_OP_lit21:
386 case DW_OP_lit22:
387 case DW_OP_lit23:
388 case DW_OP_lit24:
389 case DW_OP_lit25:
390 case DW_OP_lit26:
391 case DW_OP_lit27:
392 case DW_OP_lit28:
393 case DW_OP_lit29:
394 case DW_OP_lit30:
395 case DW_OP_lit31:
396 result = op - DW_OP_lit0;
397 break;
398
399 case DW_OP_addr:
400 result = dwarf2_read_address (ctx->gdbarch,
401 op_ptr, op_end, ctx->addr_size);
402 op_ptr += ctx->addr_size;
403 break;
404
405 case DW_OP_const1u:
406 result = extract_unsigned_integer (op_ptr, 1, byte_order);
407 op_ptr += 1;
408 break;
409 case DW_OP_const1s:
410 result = extract_signed_integer (op_ptr, 1, byte_order);
411 op_ptr += 1;
412 break;
413 case DW_OP_const2u:
414 result = extract_unsigned_integer (op_ptr, 2, byte_order);
415 op_ptr += 2;
416 break;
417 case DW_OP_const2s:
418 result = extract_signed_integer (op_ptr, 2, byte_order);
419 op_ptr += 2;
420 break;
421 case DW_OP_const4u:
422 result = extract_unsigned_integer (op_ptr, 4, byte_order);
423 op_ptr += 4;
424 break;
425 case DW_OP_const4s:
426 result = extract_signed_integer (op_ptr, 4, byte_order);
427 op_ptr += 4;
428 break;
429 case DW_OP_const8u:
430 result = extract_unsigned_integer (op_ptr, 8, byte_order);
431 op_ptr += 8;
432 break;
433 case DW_OP_const8s:
434 result = extract_signed_integer (op_ptr, 8, byte_order);
435 op_ptr += 8;
436 break;
437 case DW_OP_constu:
438 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
439 result = uoffset;
440 break;
441 case DW_OP_consts:
442 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
443 result = offset;
444 break;
445
446 /* The DW_OP_reg operations are required to occur alone in
447 location expressions. */
448 case DW_OP_reg0:
449 case DW_OP_reg1:
450 case DW_OP_reg2:
451 case DW_OP_reg3:
452 case DW_OP_reg4:
453 case DW_OP_reg5:
454 case DW_OP_reg6:
455 case DW_OP_reg7:
456 case DW_OP_reg8:
457 case DW_OP_reg9:
458 case DW_OP_reg10:
459 case DW_OP_reg11:
460 case DW_OP_reg12:
461 case DW_OP_reg13:
462 case DW_OP_reg14:
463 case DW_OP_reg15:
464 case DW_OP_reg16:
465 case DW_OP_reg17:
466 case DW_OP_reg18:
467 case DW_OP_reg19:
468 case DW_OP_reg20:
469 case DW_OP_reg21:
470 case DW_OP_reg22:
471 case DW_OP_reg23:
472 case DW_OP_reg24:
473 case DW_OP_reg25:
474 case DW_OP_reg26:
475 case DW_OP_reg27:
476 case DW_OP_reg28:
477 case DW_OP_reg29:
478 case DW_OP_reg30:
479 case DW_OP_reg31:
480 if (op_ptr != op_end
481 && *op_ptr != DW_OP_piece
482 && *op_ptr != DW_OP_GNU_uninit)
483 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
484 "used either alone or in conjuction with DW_OP_piece."));
485
486 result = op - DW_OP_reg0;
487 ctx->location = DWARF_VALUE_REGISTER;
488 break;
489
490 case DW_OP_regx:
491 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
492 require_composition (op_ptr, op_end, "DW_OP_regx");
493
494 result = reg;
495 ctx->location = DWARF_VALUE_REGISTER;
496 break;
497
498 case DW_OP_implicit_value:
499 {
500 ULONGEST len;
501 op_ptr = read_uleb128 (op_ptr, op_end, &len);
502 if (op_ptr + len > op_end)
503 error (_("DW_OP_implicit_value: too few bytes available."));
504 ctx->len = len;
505 ctx->data = op_ptr;
506 ctx->location = DWARF_VALUE_LITERAL;
507 op_ptr += len;
508 require_composition (op_ptr, op_end, "DW_OP_implicit_value");
509 }
510 goto no_push;
511
512 case DW_OP_stack_value:
513 ctx->location = DWARF_VALUE_STACK;
514 require_composition (op_ptr, op_end, "DW_OP_stack_value");
515 goto no_push;
516
517 case DW_OP_breg0:
518 case DW_OP_breg1:
519 case DW_OP_breg2:
520 case DW_OP_breg3:
521 case DW_OP_breg4:
522 case DW_OP_breg5:
523 case DW_OP_breg6:
524 case DW_OP_breg7:
525 case DW_OP_breg8:
526 case DW_OP_breg9:
527 case DW_OP_breg10:
528 case DW_OP_breg11:
529 case DW_OP_breg12:
530 case DW_OP_breg13:
531 case DW_OP_breg14:
532 case DW_OP_breg15:
533 case DW_OP_breg16:
534 case DW_OP_breg17:
535 case DW_OP_breg18:
536 case DW_OP_breg19:
537 case DW_OP_breg20:
538 case DW_OP_breg21:
539 case DW_OP_breg22:
540 case DW_OP_breg23:
541 case DW_OP_breg24:
542 case DW_OP_breg25:
543 case DW_OP_breg26:
544 case DW_OP_breg27:
545 case DW_OP_breg28:
546 case DW_OP_breg29:
547 case DW_OP_breg30:
548 case DW_OP_breg31:
549 {
550 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
551 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
552 result += offset;
553 }
554 break;
555 case DW_OP_bregx:
556 {
557 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
558 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
559 result = (ctx->read_reg) (ctx->baton, reg);
560 result += offset;
561 }
562 break;
563 case DW_OP_fbreg:
564 {
565 gdb_byte *datastart;
566 size_t datalen;
567 unsigned int before_stack_len;
568
569 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
570 /* Rather than create a whole new context, we simply
571 record the stack length before execution, then reset it
572 afterwards, effectively erasing whatever the recursive
573 call put there. */
574 before_stack_len = ctx->stack_len;
575 /* FIXME: cagney/2003-03-26: This code should be using
576 get_frame_base_address(), and then implement a dwarf2
577 specific this_base method. */
578 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
579 dwarf_expr_eval (ctx, datastart, datalen);
580 if (ctx->location == DWARF_VALUE_LITERAL
581 || ctx->location == DWARF_VALUE_STACK)
582 error (_("Not implemented: computing frame base using explicit value operator"));
583 result = dwarf_expr_fetch (ctx, 0);
584 if (ctx->location == DWARF_VALUE_REGISTER)
585 result = (ctx->read_reg) (ctx->baton, result);
586 result = result + offset;
587 in_stack_memory = 1;
588 ctx->stack_len = before_stack_len;
589 ctx->location = DWARF_VALUE_MEMORY;
590 }
591 break;
592
593 case DW_OP_dup:
594 result = dwarf_expr_fetch (ctx, 0);
595 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
596 break;
597
598 case DW_OP_drop:
599 dwarf_expr_pop (ctx);
600 goto no_push;
601
602 case DW_OP_pick:
603 offset = *op_ptr++;
604 result = dwarf_expr_fetch (ctx, offset);
605 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
606 break;
607
608 case DW_OP_swap:
609 {
610 struct dwarf_stack_value t1, t2;
611
612 if (ctx->stack_len < 2)
613 error (_("Not enough elements for DW_OP_swap. Need 2, have %d."),
614 ctx->stack_len);
615 t1 = ctx->stack[ctx->stack_len - 1];
616 t2 = ctx->stack[ctx->stack_len - 2];
617 ctx->stack[ctx->stack_len - 1] = t2;
618 ctx->stack[ctx->stack_len - 2] = t1;
619 goto no_push;
620 }
621
622 case DW_OP_over:
623 result = dwarf_expr_fetch (ctx, 1);
624 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
625 break;
626
627 case DW_OP_rot:
628 {
629 struct dwarf_stack_value t1, t2, t3;
630
631 if (ctx->stack_len < 3)
632 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
633 ctx->stack_len);
634 t1 = ctx->stack[ctx->stack_len - 1];
635 t2 = ctx->stack[ctx->stack_len - 2];
636 t3 = ctx->stack[ctx->stack_len - 3];
637 ctx->stack[ctx->stack_len - 1] = t2;
638 ctx->stack[ctx->stack_len - 2] = t3;
639 ctx->stack[ctx->stack_len - 3] = t1;
640 goto no_push;
641 }
642
643 case DW_OP_deref:
644 case DW_OP_deref_size:
645 case DW_OP_abs:
646 case DW_OP_neg:
647 case DW_OP_not:
648 case DW_OP_plus_uconst:
649 /* Unary operations. */
650 result = dwarf_expr_fetch (ctx, 0);
651 dwarf_expr_pop (ctx);
652
653 switch (op)
654 {
655 case DW_OP_deref:
656 {
657 gdb_byte *buf = alloca (ctx->addr_size);
658 (ctx->read_mem) (ctx->baton, buf, result, ctx->addr_size);
659 result = dwarf2_read_address (ctx->gdbarch,
660 buf, buf + ctx->addr_size,
661 ctx->addr_size);
662 }
663 break;
664
665 case DW_OP_deref_size:
666 {
667 int addr_size = *op_ptr++;
668 gdb_byte *buf = alloca (addr_size);
669 (ctx->read_mem) (ctx->baton, buf, result, addr_size);
670 result = dwarf2_read_address (ctx->gdbarch,
671 buf, buf + addr_size,
672 addr_size);
673 }
674 break;
675
676 case DW_OP_abs:
677 if ((signed int) result < 0)
678 result = -result;
679 break;
680 case DW_OP_neg:
681 result = -result;
682 break;
683 case DW_OP_not:
684 result = ~result;
685 break;
686 case DW_OP_plus_uconst:
687 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
688 result += reg;
689 break;
690 }
691 break;
692
693 case DW_OP_and:
694 case DW_OP_div:
695 case DW_OP_minus:
696 case DW_OP_mod:
697 case DW_OP_mul:
698 case DW_OP_or:
699 case DW_OP_plus:
700 case DW_OP_shl:
701 case DW_OP_shr:
702 case DW_OP_shra:
703 case DW_OP_xor:
704 case DW_OP_le:
705 case DW_OP_ge:
706 case DW_OP_eq:
707 case DW_OP_lt:
708 case DW_OP_gt:
709 case DW_OP_ne:
710 {
711 /* Binary operations. Use the value engine to do computations in
712 the right width. */
713 CORE_ADDR first, second;
714 enum exp_opcode binop;
715 struct value *val1 = NULL, *val2 = NULL;
716 struct type *stype, *utype;
717
718 second = dwarf_expr_fetch (ctx, 0);
719 dwarf_expr_pop (ctx);
720
721 first = dwarf_expr_fetch (ctx, 0);
722 dwarf_expr_pop (ctx);
723
724 utype = unsigned_address_type (ctx->gdbarch, ctx->addr_size);
725 stype = signed_address_type (ctx->gdbarch, ctx->addr_size);
726
727 switch (op)
728 {
729 case DW_OP_and:
730 binop = BINOP_BITWISE_AND;
731 break;
732 case DW_OP_div:
733 binop = BINOP_DIV;
734 val1 = value_from_longest (stype, first);
735 val2 = value_from_longest (stype, second);
736 break;
737 case DW_OP_minus:
738 binop = BINOP_SUB;
739 break;
740 case DW_OP_mod:
741 binop = BINOP_MOD;
742 break;
743 case DW_OP_mul:
744 binop = BINOP_MUL;
745 break;
746 case DW_OP_or:
747 binop = BINOP_BITWISE_IOR;
748 break;
749 case DW_OP_plus:
750 binop = BINOP_ADD;
751 break;
752 case DW_OP_shl:
753 binop = BINOP_LSH;
754 break;
755 case DW_OP_shr:
756 binop = BINOP_RSH;
757 break;
758 case DW_OP_shra:
759 binop = BINOP_RSH;
760 val1 = value_from_longest (stype, first);
761 break;
762 case DW_OP_xor:
763 binop = BINOP_BITWISE_XOR;
764 break;
765 case DW_OP_le:
766 binop = BINOP_LEQ;
767 val1 = value_from_longest (stype, first);
768 val2 = value_from_longest (stype, second);
769 break;
770 case DW_OP_ge:
771 binop = BINOP_GEQ;
772 val1 = value_from_longest (stype, first);
773 val2 = value_from_longest (stype, second);
774 break;
775 case DW_OP_eq:
776 binop = BINOP_EQUAL;
777 val1 = value_from_longest (stype, first);
778 val2 = value_from_longest (stype, second);
779 break;
780 case DW_OP_lt:
781 binop = BINOP_LESS;
782 val1 = value_from_longest (stype, first);
783 val2 = value_from_longest (stype, second);
784 break;
785 case DW_OP_gt:
786 binop = BINOP_GTR;
787 val1 = value_from_longest (stype, first);
788 val2 = value_from_longest (stype, second);
789 break;
790 case DW_OP_ne:
791 binop = BINOP_NOTEQUAL;
792 val1 = value_from_longest (stype, first);
793 val2 = value_from_longest (stype, second);
794 break;
795 default:
796 internal_error (__FILE__, __LINE__,
797 _("Can't be reached."));
798 }
799
800 /* We use unsigned operands by default. */
801 if (val1 == NULL)
802 val1 = value_from_longest (utype, first);
803 if (val2 == NULL)
804 val2 = value_from_longest (utype, second);
805
806 result = value_as_long (value_binop (val1, val2, binop));
807 }
808 break;
809
810 case DW_OP_call_frame_cfa:
811 result = (ctx->get_frame_cfa) (ctx->baton);
812 in_stack_memory = 1;
813 break;
814
815 case DW_OP_GNU_push_tls_address:
816 /* Variable is at a constant offset in the thread-local
817 storage block into the objfile for the current thread and
818 the dynamic linker module containing this expression. Here
819 we return returns the offset from that base. The top of the
820 stack has the offset from the beginning of the thread
821 control block at which the variable is located. Nothing
822 should follow this operator, so the top of stack would be
823 returned. */
824 result = dwarf_expr_fetch (ctx, 0);
825 dwarf_expr_pop (ctx);
826 result = (ctx->get_tls_address) (ctx->baton, result);
827 break;
828
829 case DW_OP_skip:
830 offset = extract_signed_integer (op_ptr, 2, byte_order);
831 op_ptr += 2;
832 op_ptr += offset;
833 goto no_push;
834
835 case DW_OP_bra:
836 offset = extract_signed_integer (op_ptr, 2, byte_order);
837 op_ptr += 2;
838 if (dwarf_expr_fetch (ctx, 0) != 0)
839 op_ptr += offset;
840 dwarf_expr_pop (ctx);
841 goto no_push;
842
843 case DW_OP_nop:
844 goto no_push;
845
846 case DW_OP_piece:
847 {
848 ULONGEST size;
849
850 /* Record the piece. */
851 op_ptr = read_uleb128 (op_ptr, op_end, &size);
852 add_piece (ctx, size);
853
854 /* Pop off the address/regnum, and reset the location
855 type. */
856 if (ctx->location != DWARF_VALUE_LITERAL)
857 dwarf_expr_pop (ctx);
858 ctx->location = DWARF_VALUE_MEMORY;
859 }
860 goto no_push;
861
862 case DW_OP_GNU_uninit:
863 if (op_ptr != op_end)
864 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
865 "be the very last op."));
866
867 ctx->initialized = 0;
868 goto no_push;
869
870 default:
871 error (_("Unhandled dwarf expression opcode 0x%x"), op);
872 }
873
874 /* Most things push a result value. */
875 dwarf_expr_push (ctx, result, in_stack_memory);
876 no_push:;
877 }
878
879 ctx->recursion_depth--;
880 gdb_assert (ctx->recursion_depth >= 0);
881 }
This page took 0.048234 seconds and 5 git commands to generate.