Regenerate tree using Autoconf 2.64 and Automake 1.11.
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (struct gdbarch *, int);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
45 retval->stack_len = 0;
46 retval->stack_allocated = 10;
47 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
48 retval->num_pieces = 0;
49 retval->pieces = 0;
50 retval->max_recursion_depth = 0x100;
51 return retval;
52 }
53
54 /* Release the memory allocated to CTX. */
55
56 void
57 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
58 {
59 xfree (ctx->stack);
60 xfree (ctx->pieces);
61 xfree (ctx);
62 }
63
64 /* Helper for make_cleanup_free_dwarf_expr_context. */
65
66 static void
67 free_dwarf_expr_context_cleanup (void *arg)
68 {
69 free_dwarf_expr_context (arg);
70 }
71
72 /* Return a cleanup that calls free_dwarf_expr_context. */
73
74 struct cleanup *
75 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
76 {
77 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
78 }
79
80 /* Expand the memory allocated to CTX's stack to contain at least
81 NEED more elements than are currently used. */
82
83 static void
84 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
85 {
86 if (ctx->stack_len + need > ctx->stack_allocated)
87 {
88 size_t newlen = ctx->stack_len + need + 10;
89 ctx->stack = xrealloc (ctx->stack,
90 newlen * sizeof (CORE_ADDR));
91 ctx->stack_allocated = newlen;
92 }
93 }
94
95 /* Push VALUE onto CTX's stack. */
96
97 void
98 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
99 {
100 dwarf_expr_grow_stack (ctx, 1);
101 ctx->stack[ctx->stack_len++] = value;
102 }
103
104 /* Pop the top item off of CTX's stack. */
105
106 void
107 dwarf_expr_pop (struct dwarf_expr_context *ctx)
108 {
109 if (ctx->stack_len <= 0)
110 error (_("dwarf expression stack underflow"));
111 ctx->stack_len--;
112 }
113
114 /* Retrieve the N'th item on CTX's stack. */
115
116 CORE_ADDR
117 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
118 {
119 if (ctx->stack_len <= n)
120 error (_("Asked for position %d of stack, stack only has %d elements on it."),
121 n, ctx->stack_len);
122 return ctx->stack[ctx->stack_len - (1 + n)];
123
124 }
125
126 /* Add a new piece to CTX's piece list. */
127 static void
128 add_piece (struct dwarf_expr_context *ctx,
129 int in_reg, CORE_ADDR value, ULONGEST size)
130 {
131 struct dwarf_expr_piece *p;
132
133 ctx->num_pieces++;
134
135 if (ctx->pieces)
136 ctx->pieces = xrealloc (ctx->pieces,
137 (ctx->num_pieces
138 * sizeof (struct dwarf_expr_piece)));
139 else
140 ctx->pieces = xmalloc (ctx->num_pieces
141 * sizeof (struct dwarf_expr_piece));
142
143 p = &ctx->pieces[ctx->num_pieces - 1];
144 p->in_reg = in_reg;
145 p->value = value;
146 p->size = size;
147 }
148
149 /* Evaluate the expression at ADDR (LEN bytes long) using the context
150 CTX. */
151
152 void
153 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
154 {
155 int old_recursion_depth = ctx->recursion_depth;
156
157 execute_stack_op (ctx, addr, addr + len);
158
159 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
160
161 gdb_assert (ctx->recursion_depth == old_recursion_depth);
162 }
163
164 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
165 by R, and return the new value of BUF. Verify that it doesn't extend
166 past BUF_END. */
167
168 gdb_byte *
169 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
170 {
171 unsigned shift = 0;
172 ULONGEST result = 0;
173 gdb_byte byte;
174
175 while (1)
176 {
177 if (buf >= buf_end)
178 error (_("read_uleb128: Corrupted DWARF expression."));
179
180 byte = *buf++;
181 result |= (byte & 0x7f) << shift;
182 if ((byte & 0x80) == 0)
183 break;
184 shift += 7;
185 }
186 *r = result;
187 return buf;
188 }
189
190 /* Decode the signed LEB128 constant at BUF into the variable pointed to
191 by R, and return the new value of BUF. Verify that it doesn't extend
192 past BUF_END. */
193
194 gdb_byte *
195 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
196 {
197 unsigned shift = 0;
198 LONGEST result = 0;
199 gdb_byte byte;
200
201 while (1)
202 {
203 if (buf >= buf_end)
204 error (_("read_sleb128: Corrupted DWARF expression."));
205
206 byte = *buf++;
207 result |= (byte & 0x7f) << shift;
208 shift += 7;
209 if ((byte & 0x80) == 0)
210 break;
211 }
212 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
213 result |= -(1 << shift);
214
215 *r = result;
216 return buf;
217 }
218
219 /* Read an address of size ADDR_SIZE from BUF, and verify that it
220 doesn't extend past BUF_END. */
221
222 CORE_ADDR
223 dwarf2_read_address (struct gdbarch *gdbarch, gdb_byte *buf,
224 gdb_byte *buf_end, int addr_size)
225 {
226 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
227 CORE_ADDR result;
228
229 if (buf_end - buf < addr_size)
230 error (_("dwarf2_read_address: Corrupted DWARF expression."));
231
232 /* For most architectures, calling extract_unsigned_integer() alone
233 is sufficient for extracting an address. However, some
234 architectures (e.g. MIPS) use signed addresses and using
235 extract_unsigned_integer() will not produce a correct
236 result. Make sure we invoke gdbarch_integer_to_address()
237 for those architectures which require it.
238
239 The use of `unsigned_address_type' in the code below refers to
240 the type of buf and has no bearing on the signedness of the
241 address being returned. */
242
243 if (gdbarch_integer_to_address_p (gdbarch))
244 return gdbarch_integer_to_address
245 (gdbarch, unsigned_address_type (gdbarch, addr_size), buf);
246
247 return extract_unsigned_integer (buf, addr_size, byte_order);
248 }
249
250 /* Return the type of an address of size ADDR_SIZE,
251 for unsigned arithmetic. */
252
253 static struct type *
254 unsigned_address_type (struct gdbarch *gdbarch, int addr_size)
255 {
256 switch (addr_size)
257 {
258 case 2:
259 return builtin_type (gdbarch)->builtin_uint16;
260 case 4:
261 return builtin_type (gdbarch)->builtin_uint32;
262 case 8:
263 return builtin_type (gdbarch)->builtin_uint64;
264 default:
265 internal_error (__FILE__, __LINE__,
266 _("Unsupported address size.\n"));
267 }
268 }
269
270 /* Return the type of an address of size ADDR_SIZE,
271 for signed arithmetic. */
272
273 static struct type *
274 signed_address_type (struct gdbarch *gdbarch, int addr_size)
275 {
276 switch (addr_size)
277 {
278 case 2:
279 return builtin_type (gdbarch)->builtin_int16;
280 case 4:
281 return builtin_type (gdbarch)->builtin_int32;
282 case 8:
283 return builtin_type (gdbarch)->builtin_int64;
284 default:
285 internal_error (__FILE__, __LINE__,
286 _("Unsupported address size.\n"));
287 }
288 }
289 \f
290 /* The engine for the expression evaluator. Using the context in CTX,
291 evaluate the expression between OP_PTR and OP_END. */
292
293 static void
294 execute_stack_op (struct dwarf_expr_context *ctx,
295 gdb_byte *op_ptr, gdb_byte *op_end)
296 {
297 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
298
299 ctx->in_reg = 0;
300 ctx->initialized = 1; /* Default is initialized. */
301
302 if (ctx->recursion_depth > ctx->max_recursion_depth)
303 error (_("DWARF-2 expression error: Loop detected (%d)."),
304 ctx->recursion_depth);
305 ctx->recursion_depth++;
306
307 while (op_ptr < op_end)
308 {
309 enum dwarf_location_atom op = *op_ptr++;
310 CORE_ADDR result;
311 ULONGEST uoffset, reg;
312 LONGEST offset;
313
314 switch (op)
315 {
316 case DW_OP_lit0:
317 case DW_OP_lit1:
318 case DW_OP_lit2:
319 case DW_OP_lit3:
320 case DW_OP_lit4:
321 case DW_OP_lit5:
322 case DW_OP_lit6:
323 case DW_OP_lit7:
324 case DW_OP_lit8:
325 case DW_OP_lit9:
326 case DW_OP_lit10:
327 case DW_OP_lit11:
328 case DW_OP_lit12:
329 case DW_OP_lit13:
330 case DW_OP_lit14:
331 case DW_OP_lit15:
332 case DW_OP_lit16:
333 case DW_OP_lit17:
334 case DW_OP_lit18:
335 case DW_OP_lit19:
336 case DW_OP_lit20:
337 case DW_OP_lit21:
338 case DW_OP_lit22:
339 case DW_OP_lit23:
340 case DW_OP_lit24:
341 case DW_OP_lit25:
342 case DW_OP_lit26:
343 case DW_OP_lit27:
344 case DW_OP_lit28:
345 case DW_OP_lit29:
346 case DW_OP_lit30:
347 case DW_OP_lit31:
348 result = op - DW_OP_lit0;
349 break;
350
351 case DW_OP_addr:
352 result = dwarf2_read_address (ctx->gdbarch,
353 op_ptr, op_end, ctx->addr_size);
354 op_ptr += ctx->addr_size;
355 break;
356
357 case DW_OP_const1u:
358 result = extract_unsigned_integer (op_ptr, 1, byte_order);
359 op_ptr += 1;
360 break;
361 case DW_OP_const1s:
362 result = extract_signed_integer (op_ptr, 1, byte_order);
363 op_ptr += 1;
364 break;
365 case DW_OP_const2u:
366 result = extract_unsigned_integer (op_ptr, 2, byte_order);
367 op_ptr += 2;
368 break;
369 case DW_OP_const2s:
370 result = extract_signed_integer (op_ptr, 2, byte_order);
371 op_ptr += 2;
372 break;
373 case DW_OP_const4u:
374 result = extract_unsigned_integer (op_ptr, 4, byte_order);
375 op_ptr += 4;
376 break;
377 case DW_OP_const4s:
378 result = extract_signed_integer (op_ptr, 4, byte_order);
379 op_ptr += 4;
380 break;
381 case DW_OP_const8u:
382 result = extract_unsigned_integer (op_ptr, 8, byte_order);
383 op_ptr += 8;
384 break;
385 case DW_OP_const8s:
386 result = extract_signed_integer (op_ptr, 8, byte_order);
387 op_ptr += 8;
388 break;
389 case DW_OP_constu:
390 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
391 result = uoffset;
392 break;
393 case DW_OP_consts:
394 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
395 result = offset;
396 break;
397
398 /* The DW_OP_reg operations are required to occur alone in
399 location expressions. */
400 case DW_OP_reg0:
401 case DW_OP_reg1:
402 case DW_OP_reg2:
403 case DW_OP_reg3:
404 case DW_OP_reg4:
405 case DW_OP_reg5:
406 case DW_OP_reg6:
407 case DW_OP_reg7:
408 case DW_OP_reg8:
409 case DW_OP_reg9:
410 case DW_OP_reg10:
411 case DW_OP_reg11:
412 case DW_OP_reg12:
413 case DW_OP_reg13:
414 case DW_OP_reg14:
415 case DW_OP_reg15:
416 case DW_OP_reg16:
417 case DW_OP_reg17:
418 case DW_OP_reg18:
419 case DW_OP_reg19:
420 case DW_OP_reg20:
421 case DW_OP_reg21:
422 case DW_OP_reg22:
423 case DW_OP_reg23:
424 case DW_OP_reg24:
425 case DW_OP_reg25:
426 case DW_OP_reg26:
427 case DW_OP_reg27:
428 case DW_OP_reg28:
429 case DW_OP_reg29:
430 case DW_OP_reg30:
431 case DW_OP_reg31:
432 if (op_ptr != op_end
433 && *op_ptr != DW_OP_piece
434 && *op_ptr != DW_OP_GNU_uninit)
435 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
436 "used either alone or in conjuction with DW_OP_piece."));
437
438 result = op - DW_OP_reg0;
439 ctx->in_reg = 1;
440
441 break;
442
443 case DW_OP_regx:
444 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
445 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
446 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
447 "used either alone or in conjuction with DW_OP_piece."));
448
449 result = reg;
450 ctx->in_reg = 1;
451 break;
452
453 case DW_OP_breg0:
454 case DW_OP_breg1:
455 case DW_OP_breg2:
456 case DW_OP_breg3:
457 case DW_OP_breg4:
458 case DW_OP_breg5:
459 case DW_OP_breg6:
460 case DW_OP_breg7:
461 case DW_OP_breg8:
462 case DW_OP_breg9:
463 case DW_OP_breg10:
464 case DW_OP_breg11:
465 case DW_OP_breg12:
466 case DW_OP_breg13:
467 case DW_OP_breg14:
468 case DW_OP_breg15:
469 case DW_OP_breg16:
470 case DW_OP_breg17:
471 case DW_OP_breg18:
472 case DW_OP_breg19:
473 case DW_OP_breg20:
474 case DW_OP_breg21:
475 case DW_OP_breg22:
476 case DW_OP_breg23:
477 case DW_OP_breg24:
478 case DW_OP_breg25:
479 case DW_OP_breg26:
480 case DW_OP_breg27:
481 case DW_OP_breg28:
482 case DW_OP_breg29:
483 case DW_OP_breg30:
484 case DW_OP_breg31:
485 {
486 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
487 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
488 result += offset;
489 }
490 break;
491 case DW_OP_bregx:
492 {
493 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
494 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
495 result = (ctx->read_reg) (ctx->baton, reg);
496 result += offset;
497 }
498 break;
499 case DW_OP_fbreg:
500 {
501 gdb_byte *datastart;
502 size_t datalen;
503 unsigned int before_stack_len;
504
505 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
506 /* Rather than create a whole new context, we simply
507 record the stack length before execution, then reset it
508 afterwards, effectively erasing whatever the recursive
509 call put there. */
510 before_stack_len = ctx->stack_len;
511 /* FIXME: cagney/2003-03-26: This code should be using
512 get_frame_base_address(), and then implement a dwarf2
513 specific this_base method. */
514 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
515 dwarf_expr_eval (ctx, datastart, datalen);
516 result = dwarf_expr_fetch (ctx, 0);
517 if (ctx->in_reg)
518 result = (ctx->read_reg) (ctx->baton, result);
519 result = result + offset;
520 ctx->stack_len = before_stack_len;
521 ctx->in_reg = 0;
522 }
523 break;
524 case DW_OP_dup:
525 result = dwarf_expr_fetch (ctx, 0);
526 break;
527
528 case DW_OP_drop:
529 dwarf_expr_pop (ctx);
530 goto no_push;
531
532 case DW_OP_pick:
533 offset = *op_ptr++;
534 result = dwarf_expr_fetch (ctx, offset);
535 break;
536
537 case DW_OP_swap:
538 {
539 CORE_ADDR t1, t2;
540
541 if (ctx->stack_len < 2)
542 error (_("Not enough elements for DW_OP_swap. Need 2, have %d."),
543 ctx->stack_len);
544 t1 = ctx->stack[ctx->stack_len - 1];
545 t2 = ctx->stack[ctx->stack_len - 2];
546 ctx->stack[ctx->stack_len - 1] = t2;
547 ctx->stack[ctx->stack_len - 2] = t1;
548 goto no_push;
549 }
550
551 case DW_OP_over:
552 result = dwarf_expr_fetch (ctx, 1);
553 break;
554
555 case DW_OP_rot:
556 {
557 CORE_ADDR t1, t2, t3;
558
559 if (ctx->stack_len < 3)
560 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
561 ctx->stack_len);
562 t1 = ctx->stack[ctx->stack_len - 1];
563 t2 = ctx->stack[ctx->stack_len - 2];
564 t3 = ctx->stack[ctx->stack_len - 3];
565 ctx->stack[ctx->stack_len - 1] = t2;
566 ctx->stack[ctx->stack_len - 2] = t3;
567 ctx->stack[ctx->stack_len - 3] = t1;
568 goto no_push;
569 }
570
571 case DW_OP_deref:
572 case DW_OP_deref_size:
573 case DW_OP_abs:
574 case DW_OP_neg:
575 case DW_OP_not:
576 case DW_OP_plus_uconst:
577 /* Unary operations. */
578 result = dwarf_expr_fetch (ctx, 0);
579 dwarf_expr_pop (ctx);
580
581 switch (op)
582 {
583 case DW_OP_deref:
584 {
585 gdb_byte *buf = alloca (ctx->addr_size);
586 (ctx->read_mem) (ctx->baton, buf, result, ctx->addr_size);
587 result = dwarf2_read_address (ctx->gdbarch,
588 buf, buf + ctx->addr_size,
589 ctx->addr_size);
590 }
591 break;
592
593 case DW_OP_deref_size:
594 {
595 int addr_size = *op_ptr++;
596 gdb_byte *buf = alloca (addr_size);
597 (ctx->read_mem) (ctx->baton, buf, result, addr_size);
598 result = dwarf2_read_address (ctx->gdbarch,
599 buf, buf + addr_size,
600 addr_size);
601 }
602 break;
603
604 case DW_OP_abs:
605 if ((signed int) result < 0)
606 result = -result;
607 break;
608 case DW_OP_neg:
609 result = -result;
610 break;
611 case DW_OP_not:
612 result = ~result;
613 break;
614 case DW_OP_plus_uconst:
615 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
616 result += reg;
617 break;
618 }
619 break;
620
621 case DW_OP_and:
622 case DW_OP_div:
623 case DW_OP_minus:
624 case DW_OP_mod:
625 case DW_OP_mul:
626 case DW_OP_or:
627 case DW_OP_plus:
628 case DW_OP_shl:
629 case DW_OP_shr:
630 case DW_OP_shra:
631 case DW_OP_xor:
632 case DW_OP_le:
633 case DW_OP_ge:
634 case DW_OP_eq:
635 case DW_OP_lt:
636 case DW_OP_gt:
637 case DW_OP_ne:
638 {
639 /* Binary operations. Use the value engine to do computations in
640 the right width. */
641 CORE_ADDR first, second;
642 enum exp_opcode binop;
643 struct value *val1, *val2;
644 struct type *stype, *utype;
645
646 second = dwarf_expr_fetch (ctx, 0);
647 dwarf_expr_pop (ctx);
648
649 first = dwarf_expr_fetch (ctx, 0);
650 dwarf_expr_pop (ctx);
651
652 utype = unsigned_address_type (ctx->gdbarch, ctx->addr_size);
653 stype = signed_address_type (ctx->gdbarch, ctx->addr_size);
654 val1 = value_from_longest (utype, first);
655 val2 = value_from_longest (utype, second);
656
657 switch (op)
658 {
659 case DW_OP_and:
660 binop = BINOP_BITWISE_AND;
661 break;
662 case DW_OP_div:
663 binop = BINOP_DIV;
664 break;
665 case DW_OP_minus:
666 binop = BINOP_SUB;
667 break;
668 case DW_OP_mod:
669 binop = BINOP_MOD;
670 break;
671 case DW_OP_mul:
672 binop = BINOP_MUL;
673 break;
674 case DW_OP_or:
675 binop = BINOP_BITWISE_IOR;
676 break;
677 case DW_OP_plus:
678 binop = BINOP_ADD;
679 break;
680 case DW_OP_shl:
681 binop = BINOP_LSH;
682 break;
683 case DW_OP_shr:
684 binop = BINOP_RSH;
685 break;
686 case DW_OP_shra:
687 binop = BINOP_RSH;
688 val1 = value_from_longest (stype, first);
689 break;
690 case DW_OP_xor:
691 binop = BINOP_BITWISE_XOR;
692 break;
693 case DW_OP_le:
694 binop = BINOP_LEQ;
695 break;
696 case DW_OP_ge:
697 binop = BINOP_GEQ;
698 break;
699 case DW_OP_eq:
700 binop = BINOP_EQUAL;
701 break;
702 case DW_OP_lt:
703 binop = BINOP_LESS;
704 break;
705 case DW_OP_gt:
706 binop = BINOP_GTR;
707 break;
708 case DW_OP_ne:
709 binop = BINOP_NOTEQUAL;
710 break;
711 default:
712 internal_error (__FILE__, __LINE__,
713 _("Can't be reached."));
714 }
715 result = value_as_long (value_binop (val1, val2, binop));
716 }
717 break;
718
719 case DW_OP_GNU_push_tls_address:
720 /* Variable is at a constant offset in the thread-local
721 storage block into the objfile for the current thread and
722 the dynamic linker module containing this expression. Here
723 we return returns the offset from that base. The top of the
724 stack has the offset from the beginning of the thread
725 control block at which the variable is located. Nothing
726 should follow this operator, so the top of stack would be
727 returned. */
728 result = dwarf_expr_fetch (ctx, 0);
729 dwarf_expr_pop (ctx);
730 result = (ctx->get_tls_address) (ctx->baton, result);
731 break;
732
733 case DW_OP_skip:
734 offset = extract_signed_integer (op_ptr, 2, byte_order);
735 op_ptr += 2;
736 op_ptr += offset;
737 goto no_push;
738
739 case DW_OP_bra:
740 offset = extract_signed_integer (op_ptr, 2, byte_order);
741 op_ptr += 2;
742 if (dwarf_expr_fetch (ctx, 0) != 0)
743 op_ptr += offset;
744 dwarf_expr_pop (ctx);
745 goto no_push;
746
747 case DW_OP_nop:
748 goto no_push;
749
750 case DW_OP_piece:
751 {
752 ULONGEST size;
753 CORE_ADDR addr_or_regnum;
754
755 /* Record the piece. */
756 op_ptr = read_uleb128 (op_ptr, op_end, &size);
757 addr_or_regnum = dwarf_expr_fetch (ctx, 0);
758 add_piece (ctx, ctx->in_reg, addr_or_regnum, size);
759
760 /* Pop off the address/regnum, and clear the in_reg flag. */
761 dwarf_expr_pop (ctx);
762 ctx->in_reg = 0;
763 }
764 goto no_push;
765
766 case DW_OP_GNU_uninit:
767 if (op_ptr != op_end)
768 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
769 "be the very last op."));
770
771 ctx->initialized = 0;
772 goto no_push;
773
774 default:
775 error (_("Unhandled dwarf expression opcode 0x%x"), op);
776 }
777
778 /* Most things push a result value. */
779 dwarf_expr_push (ctx, result);
780 no_push:;
781 }
782
783 ctx->recursion_depth--;
784 gdb_assert (ctx->recursion_depth >= 0);
785 }
This page took 0.050596 seconds and 4 git commands to generate.