* dwarf2expr.c (execute_stack_op, case DW_OP_piece): Delete unused
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (struct gdbarch *, int);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
45 retval->stack_len = 0;
46 retval->stack_allocated = 10;
47 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
48 retval->num_pieces = 0;
49 retval->pieces = 0;
50 retval->max_recursion_depth = 0x100;
51 return retval;
52 }
53
54 /* Release the memory allocated to CTX. */
55
56 void
57 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
58 {
59 xfree (ctx->stack);
60 xfree (ctx->pieces);
61 xfree (ctx);
62 }
63
64 /* Helper for make_cleanup_free_dwarf_expr_context. */
65
66 static void
67 free_dwarf_expr_context_cleanup (void *arg)
68 {
69 free_dwarf_expr_context (arg);
70 }
71
72 /* Return a cleanup that calls free_dwarf_expr_context. */
73
74 struct cleanup *
75 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
76 {
77 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
78 }
79
80 /* Expand the memory allocated to CTX's stack to contain at least
81 NEED more elements than are currently used. */
82
83 static void
84 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
85 {
86 if (ctx->stack_len + need > ctx->stack_allocated)
87 {
88 size_t newlen = ctx->stack_len + need + 10;
89 ctx->stack = xrealloc (ctx->stack,
90 newlen * sizeof (CORE_ADDR));
91 ctx->stack_allocated = newlen;
92 }
93 }
94
95 /* Push VALUE onto CTX's stack. */
96
97 void
98 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
99 {
100 dwarf_expr_grow_stack (ctx, 1);
101 ctx->stack[ctx->stack_len++] = value;
102 }
103
104 /* Pop the top item off of CTX's stack. */
105
106 void
107 dwarf_expr_pop (struct dwarf_expr_context *ctx)
108 {
109 if (ctx->stack_len <= 0)
110 error (_("dwarf expression stack underflow"));
111 ctx->stack_len--;
112 }
113
114 /* Retrieve the N'th item on CTX's stack. */
115
116 CORE_ADDR
117 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
118 {
119 if (ctx->stack_len <= n)
120 error (_("Asked for position %d of stack, stack only has %d elements on it."),
121 n, ctx->stack_len);
122 return ctx->stack[ctx->stack_len - (1 + n)];
123
124 }
125
126 /* Add a new piece to CTX's piece list. */
127 static void
128 add_piece (struct dwarf_expr_context *ctx, ULONGEST size)
129 {
130 struct dwarf_expr_piece *p;
131
132 ctx->num_pieces++;
133
134 if (ctx->pieces)
135 ctx->pieces = xrealloc (ctx->pieces,
136 (ctx->num_pieces
137 * sizeof (struct dwarf_expr_piece)));
138 else
139 ctx->pieces = xmalloc (ctx->num_pieces
140 * sizeof (struct dwarf_expr_piece));
141
142 p = &ctx->pieces[ctx->num_pieces - 1];
143 p->location = ctx->location;
144 p->size = size;
145 if (p->location == DWARF_VALUE_LITERAL)
146 {
147 p->v.literal.data = ctx->data;
148 p->v.literal.length = ctx->len;
149 }
150 else
151 p->v.value = dwarf_expr_fetch (ctx, 0);
152 }
153
154 /* Evaluate the expression at ADDR (LEN bytes long) using the context
155 CTX. */
156
157 void
158 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
159 {
160 int old_recursion_depth = ctx->recursion_depth;
161
162 execute_stack_op (ctx, addr, addr + len);
163
164 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
165
166 gdb_assert (ctx->recursion_depth == old_recursion_depth);
167 }
168
169 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
170 by R, and return the new value of BUF. Verify that it doesn't extend
171 past BUF_END. */
172
173 gdb_byte *
174 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
175 {
176 unsigned shift = 0;
177 ULONGEST result = 0;
178 gdb_byte byte;
179
180 while (1)
181 {
182 if (buf >= buf_end)
183 error (_("read_uleb128: Corrupted DWARF expression."));
184
185 byte = *buf++;
186 result |= (byte & 0x7f) << shift;
187 if ((byte & 0x80) == 0)
188 break;
189 shift += 7;
190 }
191 *r = result;
192 return buf;
193 }
194
195 /* Decode the signed LEB128 constant at BUF into the variable pointed to
196 by R, and return the new value of BUF. Verify that it doesn't extend
197 past BUF_END. */
198
199 gdb_byte *
200 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
201 {
202 unsigned shift = 0;
203 LONGEST result = 0;
204 gdb_byte byte;
205
206 while (1)
207 {
208 if (buf >= buf_end)
209 error (_("read_sleb128: Corrupted DWARF expression."));
210
211 byte = *buf++;
212 result |= (byte & 0x7f) << shift;
213 shift += 7;
214 if ((byte & 0x80) == 0)
215 break;
216 }
217 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
218 result |= -(1 << shift);
219
220 *r = result;
221 return buf;
222 }
223
224 /* Read an address of size ADDR_SIZE from BUF, and verify that it
225 doesn't extend past BUF_END. */
226
227 CORE_ADDR
228 dwarf2_read_address (struct gdbarch *gdbarch, gdb_byte *buf,
229 gdb_byte *buf_end, int addr_size)
230 {
231 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
232 CORE_ADDR result;
233
234 if (buf_end - buf < addr_size)
235 error (_("dwarf2_read_address: Corrupted DWARF expression."));
236
237 /* For most architectures, calling extract_unsigned_integer() alone
238 is sufficient for extracting an address. However, some
239 architectures (e.g. MIPS) use signed addresses and using
240 extract_unsigned_integer() will not produce a correct
241 result. Make sure we invoke gdbarch_integer_to_address()
242 for those architectures which require it.
243
244 The use of `unsigned_address_type' in the code below refers to
245 the type of buf and has no bearing on the signedness of the
246 address being returned. */
247
248 if (gdbarch_integer_to_address_p (gdbarch))
249 return gdbarch_integer_to_address
250 (gdbarch, unsigned_address_type (gdbarch, addr_size), buf);
251
252 return extract_unsigned_integer (buf, addr_size, byte_order);
253 }
254
255 /* Return the type of an address of size ADDR_SIZE,
256 for unsigned arithmetic. */
257
258 static struct type *
259 unsigned_address_type (struct gdbarch *gdbarch, int addr_size)
260 {
261 switch (addr_size)
262 {
263 case 2:
264 return builtin_type (gdbarch)->builtin_uint16;
265 case 4:
266 return builtin_type (gdbarch)->builtin_uint32;
267 case 8:
268 return builtin_type (gdbarch)->builtin_uint64;
269 default:
270 internal_error (__FILE__, __LINE__,
271 _("Unsupported address size.\n"));
272 }
273 }
274
275 /* Return the type of an address of size ADDR_SIZE,
276 for signed arithmetic. */
277
278 static struct type *
279 signed_address_type (struct gdbarch *gdbarch, int addr_size)
280 {
281 switch (addr_size)
282 {
283 case 2:
284 return builtin_type (gdbarch)->builtin_int16;
285 case 4:
286 return builtin_type (gdbarch)->builtin_int32;
287 case 8:
288 return builtin_type (gdbarch)->builtin_int64;
289 default:
290 internal_error (__FILE__, __LINE__,
291 _("Unsupported address size.\n"));
292 }
293 }
294 \f
295
296 /* Check that the current operator is either at the end of an
297 expression, or that it is followed by a composition operator. */
298
299 static void
300 require_composition (gdb_byte *op_ptr, gdb_byte *op_end, const char *op_name)
301 {
302 /* It seems like DW_OP_GNU_uninit should be handled here. However,
303 it doesn't seem to make sense for DW_OP_*_value, and it was not
304 checked at the other place that this function is called. */
305 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
306 error (_("DWARF-2 expression error: `%s' operations must be "
307 "used either alone or in conjuction with DW_OP_piece "
308 "or DW_OP_bit_piece."),
309 op_name);
310 }
311
312 /* The engine for the expression evaluator. Using the context in CTX,
313 evaluate the expression between OP_PTR and OP_END. */
314
315 static void
316 execute_stack_op (struct dwarf_expr_context *ctx,
317 gdb_byte *op_ptr, gdb_byte *op_end)
318 {
319 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
320 ctx->location = DWARF_VALUE_MEMORY;
321 ctx->initialized = 1; /* Default is initialized. */
322
323 if (ctx->recursion_depth > ctx->max_recursion_depth)
324 error (_("DWARF-2 expression error: Loop detected (%d)."),
325 ctx->recursion_depth);
326 ctx->recursion_depth++;
327
328 while (op_ptr < op_end)
329 {
330 enum dwarf_location_atom op = *op_ptr++;
331 CORE_ADDR result;
332 ULONGEST uoffset, reg;
333 LONGEST offset;
334
335 switch (op)
336 {
337 case DW_OP_lit0:
338 case DW_OP_lit1:
339 case DW_OP_lit2:
340 case DW_OP_lit3:
341 case DW_OP_lit4:
342 case DW_OP_lit5:
343 case DW_OP_lit6:
344 case DW_OP_lit7:
345 case DW_OP_lit8:
346 case DW_OP_lit9:
347 case DW_OP_lit10:
348 case DW_OP_lit11:
349 case DW_OP_lit12:
350 case DW_OP_lit13:
351 case DW_OP_lit14:
352 case DW_OP_lit15:
353 case DW_OP_lit16:
354 case DW_OP_lit17:
355 case DW_OP_lit18:
356 case DW_OP_lit19:
357 case DW_OP_lit20:
358 case DW_OP_lit21:
359 case DW_OP_lit22:
360 case DW_OP_lit23:
361 case DW_OP_lit24:
362 case DW_OP_lit25:
363 case DW_OP_lit26:
364 case DW_OP_lit27:
365 case DW_OP_lit28:
366 case DW_OP_lit29:
367 case DW_OP_lit30:
368 case DW_OP_lit31:
369 result = op - DW_OP_lit0;
370 break;
371
372 case DW_OP_addr:
373 result = dwarf2_read_address (ctx->gdbarch,
374 op_ptr, op_end, ctx->addr_size);
375 op_ptr += ctx->addr_size;
376 break;
377
378 case DW_OP_const1u:
379 result = extract_unsigned_integer (op_ptr, 1, byte_order);
380 op_ptr += 1;
381 break;
382 case DW_OP_const1s:
383 result = extract_signed_integer (op_ptr, 1, byte_order);
384 op_ptr += 1;
385 break;
386 case DW_OP_const2u:
387 result = extract_unsigned_integer (op_ptr, 2, byte_order);
388 op_ptr += 2;
389 break;
390 case DW_OP_const2s:
391 result = extract_signed_integer (op_ptr, 2, byte_order);
392 op_ptr += 2;
393 break;
394 case DW_OP_const4u:
395 result = extract_unsigned_integer (op_ptr, 4, byte_order);
396 op_ptr += 4;
397 break;
398 case DW_OP_const4s:
399 result = extract_signed_integer (op_ptr, 4, byte_order);
400 op_ptr += 4;
401 break;
402 case DW_OP_const8u:
403 result = extract_unsigned_integer (op_ptr, 8, byte_order);
404 op_ptr += 8;
405 break;
406 case DW_OP_const8s:
407 result = extract_signed_integer (op_ptr, 8, byte_order);
408 op_ptr += 8;
409 break;
410 case DW_OP_constu:
411 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
412 result = uoffset;
413 break;
414 case DW_OP_consts:
415 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
416 result = offset;
417 break;
418
419 /* The DW_OP_reg operations are required to occur alone in
420 location expressions. */
421 case DW_OP_reg0:
422 case DW_OP_reg1:
423 case DW_OP_reg2:
424 case DW_OP_reg3:
425 case DW_OP_reg4:
426 case DW_OP_reg5:
427 case DW_OP_reg6:
428 case DW_OP_reg7:
429 case DW_OP_reg8:
430 case DW_OP_reg9:
431 case DW_OP_reg10:
432 case DW_OP_reg11:
433 case DW_OP_reg12:
434 case DW_OP_reg13:
435 case DW_OP_reg14:
436 case DW_OP_reg15:
437 case DW_OP_reg16:
438 case DW_OP_reg17:
439 case DW_OP_reg18:
440 case DW_OP_reg19:
441 case DW_OP_reg20:
442 case DW_OP_reg21:
443 case DW_OP_reg22:
444 case DW_OP_reg23:
445 case DW_OP_reg24:
446 case DW_OP_reg25:
447 case DW_OP_reg26:
448 case DW_OP_reg27:
449 case DW_OP_reg28:
450 case DW_OP_reg29:
451 case DW_OP_reg30:
452 case DW_OP_reg31:
453 if (op_ptr != op_end
454 && *op_ptr != DW_OP_piece
455 && *op_ptr != DW_OP_GNU_uninit)
456 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
457 "used either alone or in conjuction with DW_OP_piece."));
458
459 result = op - DW_OP_reg0;
460 ctx->location = DWARF_VALUE_REGISTER;
461 break;
462
463 case DW_OP_regx:
464 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
465 require_composition (op_ptr, op_end, "DW_OP_regx");
466
467 result = reg;
468 ctx->location = DWARF_VALUE_REGISTER;
469 break;
470
471 case DW_OP_implicit_value:
472 {
473 ULONGEST len;
474 op_ptr = read_uleb128 (op_ptr, op_end, &len);
475 if (op_ptr + len > op_end)
476 error (_("DW_OP_implicit_value: too few bytes available."));
477 ctx->len = len;
478 ctx->data = op_ptr;
479 ctx->location = DWARF_VALUE_LITERAL;
480 op_ptr += len;
481 require_composition (op_ptr, op_end, "DW_OP_implicit_value");
482 }
483 goto no_push;
484
485 case DW_OP_stack_value:
486 ctx->location = DWARF_VALUE_STACK;
487 require_composition (op_ptr, op_end, "DW_OP_stack_value");
488 goto no_push;
489
490 case DW_OP_breg0:
491 case DW_OP_breg1:
492 case DW_OP_breg2:
493 case DW_OP_breg3:
494 case DW_OP_breg4:
495 case DW_OP_breg5:
496 case DW_OP_breg6:
497 case DW_OP_breg7:
498 case DW_OP_breg8:
499 case DW_OP_breg9:
500 case DW_OP_breg10:
501 case DW_OP_breg11:
502 case DW_OP_breg12:
503 case DW_OP_breg13:
504 case DW_OP_breg14:
505 case DW_OP_breg15:
506 case DW_OP_breg16:
507 case DW_OP_breg17:
508 case DW_OP_breg18:
509 case DW_OP_breg19:
510 case DW_OP_breg20:
511 case DW_OP_breg21:
512 case DW_OP_breg22:
513 case DW_OP_breg23:
514 case DW_OP_breg24:
515 case DW_OP_breg25:
516 case DW_OP_breg26:
517 case DW_OP_breg27:
518 case DW_OP_breg28:
519 case DW_OP_breg29:
520 case DW_OP_breg30:
521 case DW_OP_breg31:
522 {
523 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
524 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
525 result += offset;
526 }
527 break;
528 case DW_OP_bregx:
529 {
530 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
531 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
532 result = (ctx->read_reg) (ctx->baton, reg);
533 result += offset;
534 }
535 break;
536 case DW_OP_fbreg:
537 {
538 gdb_byte *datastart;
539 size_t datalen;
540 unsigned int before_stack_len;
541
542 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
543 /* Rather than create a whole new context, we simply
544 record the stack length before execution, then reset it
545 afterwards, effectively erasing whatever the recursive
546 call put there. */
547 before_stack_len = ctx->stack_len;
548 /* FIXME: cagney/2003-03-26: This code should be using
549 get_frame_base_address(), and then implement a dwarf2
550 specific this_base method. */
551 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
552 dwarf_expr_eval (ctx, datastart, datalen);
553 if (ctx->location == DWARF_VALUE_LITERAL
554 || ctx->location == DWARF_VALUE_STACK)
555 error (_("Not implemented: computing frame base using explicit value operator"));
556 result = dwarf_expr_fetch (ctx, 0);
557 if (ctx->location == DWARF_VALUE_REGISTER)
558 result = (ctx->read_reg) (ctx->baton, result);
559 result = result + offset;
560 ctx->stack_len = before_stack_len;
561 ctx->location = DWARF_VALUE_MEMORY;
562 }
563 break;
564 case DW_OP_dup:
565 result = dwarf_expr_fetch (ctx, 0);
566 break;
567
568 case DW_OP_drop:
569 dwarf_expr_pop (ctx);
570 goto no_push;
571
572 case DW_OP_pick:
573 offset = *op_ptr++;
574 result = dwarf_expr_fetch (ctx, offset);
575 break;
576
577 case DW_OP_swap:
578 {
579 CORE_ADDR t1, t2;
580
581 if (ctx->stack_len < 2)
582 error (_("Not enough elements for DW_OP_swap. Need 2, have %d."),
583 ctx->stack_len);
584 t1 = ctx->stack[ctx->stack_len - 1];
585 t2 = ctx->stack[ctx->stack_len - 2];
586 ctx->stack[ctx->stack_len - 1] = t2;
587 ctx->stack[ctx->stack_len - 2] = t1;
588 goto no_push;
589 }
590
591 case DW_OP_over:
592 result = dwarf_expr_fetch (ctx, 1);
593 break;
594
595 case DW_OP_rot:
596 {
597 CORE_ADDR t1, t2, t3;
598
599 if (ctx->stack_len < 3)
600 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
601 ctx->stack_len);
602 t1 = ctx->stack[ctx->stack_len - 1];
603 t2 = ctx->stack[ctx->stack_len - 2];
604 t3 = ctx->stack[ctx->stack_len - 3];
605 ctx->stack[ctx->stack_len - 1] = t2;
606 ctx->stack[ctx->stack_len - 2] = t3;
607 ctx->stack[ctx->stack_len - 3] = t1;
608 goto no_push;
609 }
610
611 case DW_OP_deref:
612 case DW_OP_deref_size:
613 case DW_OP_abs:
614 case DW_OP_neg:
615 case DW_OP_not:
616 case DW_OP_plus_uconst:
617 /* Unary operations. */
618 result = dwarf_expr_fetch (ctx, 0);
619 dwarf_expr_pop (ctx);
620
621 switch (op)
622 {
623 case DW_OP_deref:
624 {
625 gdb_byte *buf = alloca (ctx->addr_size);
626 (ctx->read_mem) (ctx->baton, buf, result, ctx->addr_size);
627 result = dwarf2_read_address (ctx->gdbarch,
628 buf, buf + ctx->addr_size,
629 ctx->addr_size);
630 }
631 break;
632
633 case DW_OP_deref_size:
634 {
635 int addr_size = *op_ptr++;
636 gdb_byte *buf = alloca (addr_size);
637 (ctx->read_mem) (ctx->baton, buf, result, addr_size);
638 result = dwarf2_read_address (ctx->gdbarch,
639 buf, buf + addr_size,
640 addr_size);
641 }
642 break;
643
644 case DW_OP_abs:
645 if ((signed int) result < 0)
646 result = -result;
647 break;
648 case DW_OP_neg:
649 result = -result;
650 break;
651 case DW_OP_not:
652 result = ~result;
653 break;
654 case DW_OP_plus_uconst:
655 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
656 result += reg;
657 break;
658 }
659 break;
660
661 case DW_OP_and:
662 case DW_OP_div:
663 case DW_OP_minus:
664 case DW_OP_mod:
665 case DW_OP_mul:
666 case DW_OP_or:
667 case DW_OP_plus:
668 case DW_OP_shl:
669 case DW_OP_shr:
670 case DW_OP_shra:
671 case DW_OP_xor:
672 case DW_OP_le:
673 case DW_OP_ge:
674 case DW_OP_eq:
675 case DW_OP_lt:
676 case DW_OP_gt:
677 case DW_OP_ne:
678 {
679 /* Binary operations. Use the value engine to do computations in
680 the right width. */
681 CORE_ADDR first, second;
682 enum exp_opcode binop;
683 struct value *val1, *val2;
684 struct type *stype, *utype;
685
686 second = dwarf_expr_fetch (ctx, 0);
687 dwarf_expr_pop (ctx);
688
689 first = dwarf_expr_fetch (ctx, 0);
690 dwarf_expr_pop (ctx);
691
692 utype = unsigned_address_type (ctx->gdbarch, ctx->addr_size);
693 stype = signed_address_type (ctx->gdbarch, ctx->addr_size);
694 val1 = value_from_longest (utype, first);
695 val2 = value_from_longest (utype, second);
696
697 switch (op)
698 {
699 case DW_OP_and:
700 binop = BINOP_BITWISE_AND;
701 break;
702 case DW_OP_div:
703 binop = BINOP_DIV;
704 break;
705 case DW_OP_minus:
706 binop = BINOP_SUB;
707 break;
708 case DW_OP_mod:
709 binop = BINOP_MOD;
710 break;
711 case DW_OP_mul:
712 binop = BINOP_MUL;
713 break;
714 case DW_OP_or:
715 binop = BINOP_BITWISE_IOR;
716 break;
717 case DW_OP_plus:
718 binop = BINOP_ADD;
719 break;
720 case DW_OP_shl:
721 binop = BINOP_LSH;
722 break;
723 case DW_OP_shr:
724 binop = BINOP_RSH;
725 break;
726 case DW_OP_shra:
727 binop = BINOP_RSH;
728 val1 = value_from_longest (stype, first);
729 break;
730 case DW_OP_xor:
731 binop = BINOP_BITWISE_XOR;
732 break;
733 case DW_OP_le:
734 binop = BINOP_LEQ;
735 break;
736 case DW_OP_ge:
737 binop = BINOP_GEQ;
738 break;
739 case DW_OP_eq:
740 binop = BINOP_EQUAL;
741 break;
742 case DW_OP_lt:
743 binop = BINOP_LESS;
744 break;
745 case DW_OP_gt:
746 binop = BINOP_GTR;
747 break;
748 case DW_OP_ne:
749 binop = BINOP_NOTEQUAL;
750 break;
751 default:
752 internal_error (__FILE__, __LINE__,
753 _("Can't be reached."));
754 }
755 result = value_as_long (value_binop (val1, val2, binop));
756 }
757 break;
758
759 case DW_OP_call_frame_cfa:
760 result = (ctx->get_frame_cfa) (ctx->baton);
761 break;
762
763 case DW_OP_GNU_push_tls_address:
764 /* Variable is at a constant offset in the thread-local
765 storage block into the objfile for the current thread and
766 the dynamic linker module containing this expression. Here
767 we return returns the offset from that base. The top of the
768 stack has the offset from the beginning of the thread
769 control block at which the variable is located. Nothing
770 should follow this operator, so the top of stack would be
771 returned. */
772 result = dwarf_expr_fetch (ctx, 0);
773 dwarf_expr_pop (ctx);
774 result = (ctx->get_tls_address) (ctx->baton, result);
775 break;
776
777 case DW_OP_skip:
778 offset = extract_signed_integer (op_ptr, 2, byte_order);
779 op_ptr += 2;
780 op_ptr += offset;
781 goto no_push;
782
783 case DW_OP_bra:
784 offset = extract_signed_integer (op_ptr, 2, byte_order);
785 op_ptr += 2;
786 if (dwarf_expr_fetch (ctx, 0) != 0)
787 op_ptr += offset;
788 dwarf_expr_pop (ctx);
789 goto no_push;
790
791 case DW_OP_nop:
792 goto no_push;
793
794 case DW_OP_piece:
795 {
796 ULONGEST size;
797
798 /* Record the piece. */
799 op_ptr = read_uleb128 (op_ptr, op_end, &size);
800 add_piece (ctx, size);
801
802 /* Pop off the address/regnum, and reset the location
803 type. */
804 if (ctx->location != DWARF_VALUE_LITERAL)
805 dwarf_expr_pop (ctx);
806 ctx->location = DWARF_VALUE_MEMORY;
807 }
808 goto no_push;
809
810 case DW_OP_GNU_uninit:
811 if (op_ptr != op_end)
812 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
813 "be the very last op."));
814
815 ctx->initialized = 0;
816 goto no_push;
817
818 default:
819 error (_("Unhandled dwarf expression opcode 0x%x"), op);
820 }
821
822 /* Most things push a result value. */
823 dwarf_expr_push (ctx, result);
824 no_push:;
825 }
826
827 ctx->recursion_depth--;
828 gdb_assert (ctx->recursion_depth >= 0);
829 }
This page took 0.052955 seconds and 5 git commands to generate.