Remove unnecessary null_cleanup
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30
31 /* Local prototypes. */
32
33 static void execute_stack_op (struct dwarf_expr_context *,
34 const gdb_byte *, const gdb_byte *);
35
36 /* Cookie for gdbarch data. */
37
38 static struct gdbarch_data *dwarf_arch_cookie;
39
40 /* This holds gdbarch-specific types used by the DWARF expression
41 evaluator. See comments in execute_stack_op. */
42
43 struct dwarf_gdbarch_types
44 {
45 struct type *dw_types[3];
46 };
47
48 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
49
50 static void *
51 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
52 {
53 struct dwarf_gdbarch_types *types
54 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
55
56 /* The types themselves are lazily initialized. */
57
58 return types;
59 }
60
61 /* Return the type used for DWARF operations where the type is
62 unspecified in the DWARF spec. Only certain sizes are
63 supported. */
64
65 static struct type *
66 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
67 {
68 struct dwarf_gdbarch_types *types
69 = (struct dwarf_gdbarch_types *) gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = XCNEW (struct dwarf_expr_context);
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = XNEWVEC (struct dwarf_stack_value, retval->stack_allocated);
103 retval->num_pieces = 0;
104 retval->pieces = 0;
105 retval->max_recursion_depth = 0x100;
106 return retval;
107 }
108
109 /* Release the memory allocated to CTX. */
110
111 void
112 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
113 {
114 xfree (ctx->stack);
115 xfree (ctx->pieces);
116 xfree (ctx);
117 }
118
119 /* Helper for make_cleanup_free_dwarf_expr_context. */
120
121 static void
122 free_dwarf_expr_context_cleanup (void *arg)
123 {
124 free_dwarf_expr_context ((struct dwarf_expr_context *) arg);
125 }
126
127 /* Return a cleanup that calls free_dwarf_expr_context. */
128
129 struct cleanup *
130 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
131 {
132 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
133 }
134
135 /* Expand the memory allocated to CTX's stack to contain at least
136 NEED more elements than are currently used. */
137
138 static void
139 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
140 {
141 if (ctx->stack_len + need > ctx->stack_allocated)
142 {
143 size_t newlen = ctx->stack_len + need + 10;
144
145 ctx->stack = XRESIZEVEC (struct dwarf_stack_value, ctx->stack, newlen);
146 ctx->stack_allocated = newlen;
147 }
148 }
149
150 /* Push VALUE onto CTX's stack. */
151
152 static void
153 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
154 int in_stack_memory)
155 {
156 struct dwarf_stack_value *v;
157
158 dwarf_expr_grow_stack (ctx, 1);
159 v = &ctx->stack[ctx->stack_len++];
160 v->value = value;
161 v->in_stack_memory = in_stack_memory;
162 }
163
164 /* Push VALUE onto CTX's stack. */
165
166 void
167 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
168 int in_stack_memory)
169 {
170 dwarf_expr_push (ctx,
171 value_from_ulongest (dwarf_expr_address_type (ctx), value),
172 in_stack_memory);
173 }
174
175 /* Pop the top item off of CTX's stack. */
176
177 static void
178 dwarf_expr_pop (struct dwarf_expr_context *ctx)
179 {
180 if (ctx->stack_len <= 0)
181 error (_("dwarf expression stack underflow"));
182 ctx->stack_len--;
183 }
184
185 /* Retrieve the N'th item on CTX's stack. */
186
187 struct value *
188 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
189 {
190 if (ctx->stack_len <= n)
191 error (_("Asked for position %d of stack, "
192 "stack only has %d elements on it."),
193 n, ctx->stack_len);
194 return ctx->stack[ctx->stack_len - (1 + n)].value;
195 }
196
197 /* Require that TYPE be an integral type; throw an exception if not. */
198
199 static void
200 dwarf_require_integral (struct type *type)
201 {
202 if (TYPE_CODE (type) != TYPE_CODE_INT
203 && TYPE_CODE (type) != TYPE_CODE_CHAR
204 && TYPE_CODE (type) != TYPE_CODE_BOOL)
205 error (_("integral type expected in DWARF expression"));
206 }
207
208 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
209 type. */
210
211 static struct type *
212 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
213 {
214 switch (TYPE_LENGTH (type))
215 {
216 case 1:
217 return builtin_type (gdbarch)->builtin_uint8;
218 case 2:
219 return builtin_type (gdbarch)->builtin_uint16;
220 case 4:
221 return builtin_type (gdbarch)->builtin_uint32;
222 case 8:
223 return builtin_type (gdbarch)->builtin_uint64;
224 default:
225 error (_("no unsigned variant found for type, while evaluating "
226 "DWARF expression"));
227 }
228 }
229
230 /* Return the signed form of TYPE. TYPE is necessarily an integral
231 type. */
232
233 static struct type *
234 get_signed_type (struct gdbarch *gdbarch, struct type *type)
235 {
236 switch (TYPE_LENGTH (type))
237 {
238 case 1:
239 return builtin_type (gdbarch)->builtin_int8;
240 case 2:
241 return builtin_type (gdbarch)->builtin_int16;
242 case 4:
243 return builtin_type (gdbarch)->builtin_int32;
244 case 8:
245 return builtin_type (gdbarch)->builtin_int64;
246 default:
247 error (_("no signed variant found for type, while evaluating "
248 "DWARF expression"));
249 }
250 }
251
252 /* Retrieve the N'th item on CTX's stack, converted to an address. */
253
254 CORE_ADDR
255 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
256 {
257 struct value *result_val = dwarf_expr_fetch (ctx, n);
258 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
259 ULONGEST result;
260
261 dwarf_require_integral (value_type (result_val));
262 result = extract_unsigned_integer (value_contents (result_val),
263 TYPE_LENGTH (value_type (result_val)),
264 byte_order);
265
266 /* For most architectures, calling extract_unsigned_integer() alone
267 is sufficient for extracting an address. However, some
268 architectures (e.g. MIPS) use signed addresses and using
269 extract_unsigned_integer() will not produce a correct
270 result. Make sure we invoke gdbarch_integer_to_address()
271 for those architectures which require it. */
272 if (gdbarch_integer_to_address_p (ctx->gdbarch))
273 {
274 gdb_byte *buf = (gdb_byte *) alloca (ctx->addr_size);
275 struct type *int_type = get_unsigned_type (ctx->gdbarch,
276 value_type (result_val));
277
278 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
279 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
280 }
281
282 return (CORE_ADDR) result;
283 }
284
285 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
286
287 int
288 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
289 {
290 if (ctx->stack_len <= n)
291 error (_("Asked for position %d of stack, "
292 "stack only has %d elements on it."),
293 n, ctx->stack_len);
294 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
295 }
296
297 /* Return true if the expression stack is empty. */
298
299 static int
300 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
301 {
302 return ctx->stack_len == 0;
303 }
304
305 /* Add a new piece to CTX's piece list. */
306 static void
307 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
308 {
309 struct dwarf_expr_piece *p;
310
311 ctx->num_pieces++;
312
313 ctx->pieces
314 = XRESIZEVEC (struct dwarf_expr_piece, ctx->pieces, ctx->num_pieces);
315
316 p = &ctx->pieces[ctx->num_pieces - 1];
317 p->location = ctx->location;
318 p->size = size;
319 p->offset = offset;
320
321 if (p->location == DWARF_VALUE_LITERAL)
322 {
323 p->v.literal.data = ctx->data;
324 p->v.literal.length = ctx->len;
325 }
326 else if (dwarf_expr_stack_empty_p (ctx))
327 {
328 p->location = DWARF_VALUE_OPTIMIZED_OUT;
329 /* Also reset the context's location, for our callers. This is
330 a somewhat strange approach, but this lets us avoid setting
331 the location to DWARF_VALUE_MEMORY in all the individual
332 cases in the evaluator. */
333 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
334 }
335 else if (p->location == DWARF_VALUE_MEMORY)
336 {
337 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
338 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
339 }
340 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
341 {
342 p->v.ptr.die.sect_off = ctx->len;
343 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
344 }
345 else if (p->location == DWARF_VALUE_REGISTER)
346 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
347 else
348 {
349 p->v.value = dwarf_expr_fetch (ctx, 0);
350 }
351 }
352
353 /* Evaluate the expression at ADDR (LEN bytes long) using the context
354 CTX. */
355
356 void
357 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
358 size_t len)
359 {
360 int old_recursion_depth = ctx->recursion_depth;
361
362 execute_stack_op (ctx, addr, addr + len);
363
364 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
365
366 gdb_assert (ctx->recursion_depth == old_recursion_depth);
367 }
368
369 /* Helper to read a uleb128 value or throw an error. */
370
371 const gdb_byte *
372 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
373 uint64_t *r)
374 {
375 buf = gdb_read_uleb128 (buf, buf_end, r);
376 if (buf == NULL)
377 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
378 return buf;
379 }
380
381 /* Helper to read a sleb128 value or throw an error. */
382
383 const gdb_byte *
384 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
385 int64_t *r)
386 {
387 buf = gdb_read_sleb128 (buf, buf_end, r);
388 if (buf == NULL)
389 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
390 return buf;
391 }
392
393 const gdb_byte *
394 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
395 {
396 buf = gdb_skip_leb128 (buf, buf_end);
397 if (buf == NULL)
398 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
399 return buf;
400 }
401 \f
402
403 /* Check that the current operator is either at the end of an
404 expression, or that it is followed by a composition operator or by
405 DW_OP_GNU_uninit (which should terminate the expression). */
406
407 void
408 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
409 const char *op_name)
410 {
411 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
412 && *op_ptr != DW_OP_GNU_uninit)
413 error (_("DWARF-2 expression error: `%s' operations must be "
414 "used either alone or in conjunction with DW_OP_piece "
415 "or DW_OP_bit_piece."),
416 op_name);
417 }
418
419 /* Return true iff the types T1 and T2 are "the same". This only does
420 checks that might reasonably be needed to compare DWARF base
421 types. */
422
423 static int
424 base_types_equal_p (struct type *t1, struct type *t2)
425 {
426 if (TYPE_CODE (t1) != TYPE_CODE (t2))
427 return 0;
428 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
429 return 0;
430 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
431 }
432
433 /* A convenience function to call get_base_type on CTX and return the
434 result. DIE is the DIE whose type we need. SIZE is non-zero if
435 this function should verify that the resulting type has the correct
436 size. */
437
438 static struct type *
439 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
440 {
441 struct type *result;
442
443 if (ctx->funcs->get_base_type)
444 {
445 result = ctx->funcs->get_base_type (ctx, die);
446 if (result == NULL)
447 error (_("Could not find type for DW_OP_GNU_const_type"));
448 if (size != 0 && TYPE_LENGTH (result) != size)
449 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
450 }
451 else
452 /* Anything will do. */
453 result = builtin_type (ctx->gdbarch)->builtin_int;
454
455 return result;
456 }
457
458 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
459 DWARF register number. Otherwise return -1. */
460
461 int
462 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
463 {
464 uint64_t dwarf_reg;
465
466 if (buf_end <= buf)
467 return -1;
468 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
469 {
470 if (buf_end - buf != 1)
471 return -1;
472 return *buf - DW_OP_reg0;
473 }
474
475 if (*buf == DW_OP_GNU_regval_type)
476 {
477 buf++;
478 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
479 if (buf == NULL)
480 return -1;
481 buf = gdb_skip_leb128 (buf, buf_end);
482 if (buf == NULL)
483 return -1;
484 }
485 else if (*buf == DW_OP_regx)
486 {
487 buf++;
488 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
489 if (buf == NULL)
490 return -1;
491 }
492 else
493 return -1;
494 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
495 return -1;
496 return dwarf_reg;
497 }
498
499 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
500 DW_OP_deref* return the DWARF register number. Otherwise return -1.
501 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
502 size from DW_OP_deref_size. */
503
504 int
505 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
506 CORE_ADDR *deref_size_return)
507 {
508 uint64_t dwarf_reg;
509 int64_t offset;
510
511 if (buf_end <= buf)
512 return -1;
513
514 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
515 {
516 dwarf_reg = *buf - DW_OP_breg0;
517 buf++;
518 if (buf >= buf_end)
519 return -1;
520 }
521 else if (*buf == DW_OP_bregx)
522 {
523 buf++;
524 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
525 if (buf == NULL)
526 return -1;
527 if ((int) dwarf_reg != dwarf_reg)
528 return -1;
529 }
530 else
531 return -1;
532
533 buf = gdb_read_sleb128 (buf, buf_end, &offset);
534 if (buf == NULL)
535 return -1;
536 if (offset != 0)
537 return -1;
538
539 if (*buf == DW_OP_deref)
540 {
541 buf++;
542 *deref_size_return = -1;
543 }
544 else if (*buf == DW_OP_deref_size)
545 {
546 buf++;
547 if (buf >= buf_end)
548 return -1;
549 *deref_size_return = *buf++;
550 }
551 else
552 return -1;
553
554 if (buf != buf_end)
555 return -1;
556
557 return dwarf_reg;
558 }
559
560 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
561 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
562
563 int
564 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
565 CORE_ADDR *fb_offset_return)
566 {
567 int64_t fb_offset;
568
569 if (buf_end <= buf)
570 return 0;
571
572 if (*buf != DW_OP_fbreg)
573 return 0;
574 buf++;
575
576 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
577 if (buf == NULL)
578 return 0;
579 *fb_offset_return = fb_offset;
580 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
581 return 0;
582
583 return 1;
584 }
585
586 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
587 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
588 The matched SP register number depends on GDBARCH. */
589
590 int
591 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
592 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
593 {
594 uint64_t dwarf_reg;
595 int64_t sp_offset;
596
597 if (buf_end <= buf)
598 return 0;
599 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
600 {
601 dwarf_reg = *buf - DW_OP_breg0;
602 buf++;
603 }
604 else
605 {
606 if (*buf != DW_OP_bregx)
607 return 0;
608 buf++;
609 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
610 if (buf == NULL)
611 return 0;
612 }
613
614 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
615 != gdbarch_sp_regnum (gdbarch))
616 return 0;
617
618 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
619 if (buf == NULL)
620 return 0;
621 *sp_offset_return = sp_offset;
622 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
623 return 0;
624
625 return 1;
626 }
627
628 /* The engine for the expression evaluator. Using the context in CTX,
629 evaluate the expression between OP_PTR and OP_END. */
630
631 static void
632 execute_stack_op (struct dwarf_expr_context *ctx,
633 const gdb_byte *op_ptr, const gdb_byte *op_end)
634 {
635 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
636 /* Old-style "untyped" DWARF values need special treatment in a
637 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
638 a special type for these values so we can distinguish them from
639 values that have an explicit type, because explicitly-typed
640 values do not need special treatment. This special type must be
641 different (in the `==' sense) from any base type coming from the
642 CU. */
643 struct type *address_type = dwarf_expr_address_type (ctx);
644
645 ctx->location = DWARF_VALUE_MEMORY;
646 ctx->initialized = 1; /* Default is initialized. */
647
648 if (ctx->recursion_depth > ctx->max_recursion_depth)
649 error (_("DWARF-2 expression error: Loop detected (%d)."),
650 ctx->recursion_depth);
651 ctx->recursion_depth++;
652
653 while (op_ptr < op_end)
654 {
655 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
656 ULONGEST result;
657 /* Assume the value is not in stack memory.
658 Code that knows otherwise sets this to 1.
659 Some arithmetic on stack addresses can probably be assumed to still
660 be a stack address, but we skip this complication for now.
661 This is just an optimization, so it's always ok to punt
662 and leave this as 0. */
663 int in_stack_memory = 0;
664 uint64_t uoffset, reg;
665 int64_t offset;
666 struct value *result_val = NULL;
667
668 /* The DWARF expression might have a bug causing an infinite
669 loop. In that case, quitting is the only way out. */
670 QUIT;
671
672 switch (op)
673 {
674 case DW_OP_lit0:
675 case DW_OP_lit1:
676 case DW_OP_lit2:
677 case DW_OP_lit3:
678 case DW_OP_lit4:
679 case DW_OP_lit5:
680 case DW_OP_lit6:
681 case DW_OP_lit7:
682 case DW_OP_lit8:
683 case DW_OP_lit9:
684 case DW_OP_lit10:
685 case DW_OP_lit11:
686 case DW_OP_lit12:
687 case DW_OP_lit13:
688 case DW_OP_lit14:
689 case DW_OP_lit15:
690 case DW_OP_lit16:
691 case DW_OP_lit17:
692 case DW_OP_lit18:
693 case DW_OP_lit19:
694 case DW_OP_lit20:
695 case DW_OP_lit21:
696 case DW_OP_lit22:
697 case DW_OP_lit23:
698 case DW_OP_lit24:
699 case DW_OP_lit25:
700 case DW_OP_lit26:
701 case DW_OP_lit27:
702 case DW_OP_lit28:
703 case DW_OP_lit29:
704 case DW_OP_lit30:
705 case DW_OP_lit31:
706 result = op - DW_OP_lit0;
707 result_val = value_from_ulongest (address_type, result);
708 break;
709
710 case DW_OP_addr:
711 result = extract_unsigned_integer (op_ptr,
712 ctx->addr_size, byte_order);
713 op_ptr += ctx->addr_size;
714 /* Some versions of GCC emit DW_OP_addr before
715 DW_OP_GNU_push_tls_address. In this case the value is an
716 index, not an address. We don't support things like
717 branching between the address and the TLS op. */
718 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
719 result += ctx->offset;
720 result_val = value_from_ulongest (address_type, result);
721 break;
722
723 case DW_OP_GNU_addr_index:
724 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
725 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
726 result += ctx->offset;
727 result_val = value_from_ulongest (address_type, result);
728 break;
729 case DW_OP_GNU_const_index:
730 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
731 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
732 result_val = value_from_ulongest (address_type, result);
733 break;
734
735 case DW_OP_const1u:
736 result = extract_unsigned_integer (op_ptr, 1, byte_order);
737 result_val = value_from_ulongest (address_type, result);
738 op_ptr += 1;
739 break;
740 case DW_OP_const1s:
741 result = extract_signed_integer (op_ptr, 1, byte_order);
742 result_val = value_from_ulongest (address_type, result);
743 op_ptr += 1;
744 break;
745 case DW_OP_const2u:
746 result = extract_unsigned_integer (op_ptr, 2, byte_order);
747 result_val = value_from_ulongest (address_type, result);
748 op_ptr += 2;
749 break;
750 case DW_OP_const2s:
751 result = extract_signed_integer (op_ptr, 2, byte_order);
752 result_val = value_from_ulongest (address_type, result);
753 op_ptr += 2;
754 break;
755 case DW_OP_const4u:
756 result = extract_unsigned_integer (op_ptr, 4, byte_order);
757 result_val = value_from_ulongest (address_type, result);
758 op_ptr += 4;
759 break;
760 case DW_OP_const4s:
761 result = extract_signed_integer (op_ptr, 4, byte_order);
762 result_val = value_from_ulongest (address_type, result);
763 op_ptr += 4;
764 break;
765 case DW_OP_const8u:
766 result = extract_unsigned_integer (op_ptr, 8, byte_order);
767 result_val = value_from_ulongest (address_type, result);
768 op_ptr += 8;
769 break;
770 case DW_OP_const8s:
771 result = extract_signed_integer (op_ptr, 8, byte_order);
772 result_val = value_from_ulongest (address_type, result);
773 op_ptr += 8;
774 break;
775 case DW_OP_constu:
776 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
777 result = uoffset;
778 result_val = value_from_ulongest (address_type, result);
779 break;
780 case DW_OP_consts:
781 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
782 result = offset;
783 result_val = value_from_ulongest (address_type, result);
784 break;
785
786 /* The DW_OP_reg operations are required to occur alone in
787 location expressions. */
788 case DW_OP_reg0:
789 case DW_OP_reg1:
790 case DW_OP_reg2:
791 case DW_OP_reg3:
792 case DW_OP_reg4:
793 case DW_OP_reg5:
794 case DW_OP_reg6:
795 case DW_OP_reg7:
796 case DW_OP_reg8:
797 case DW_OP_reg9:
798 case DW_OP_reg10:
799 case DW_OP_reg11:
800 case DW_OP_reg12:
801 case DW_OP_reg13:
802 case DW_OP_reg14:
803 case DW_OP_reg15:
804 case DW_OP_reg16:
805 case DW_OP_reg17:
806 case DW_OP_reg18:
807 case DW_OP_reg19:
808 case DW_OP_reg20:
809 case DW_OP_reg21:
810 case DW_OP_reg22:
811 case DW_OP_reg23:
812 case DW_OP_reg24:
813 case DW_OP_reg25:
814 case DW_OP_reg26:
815 case DW_OP_reg27:
816 case DW_OP_reg28:
817 case DW_OP_reg29:
818 case DW_OP_reg30:
819 case DW_OP_reg31:
820 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
821
822 result = op - DW_OP_reg0;
823 result_val = value_from_ulongest (address_type, result);
824 ctx->location = DWARF_VALUE_REGISTER;
825 break;
826
827 case DW_OP_regx:
828 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
829 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
830
831 result = reg;
832 result_val = value_from_ulongest (address_type, result);
833 ctx->location = DWARF_VALUE_REGISTER;
834 break;
835
836 case DW_OP_implicit_value:
837 {
838 uint64_t len;
839
840 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
841 if (op_ptr + len > op_end)
842 error (_("DW_OP_implicit_value: too few bytes available."));
843 ctx->len = len;
844 ctx->data = op_ptr;
845 ctx->location = DWARF_VALUE_LITERAL;
846 op_ptr += len;
847 dwarf_expr_require_composition (op_ptr, op_end,
848 "DW_OP_implicit_value");
849 }
850 goto no_push;
851
852 case DW_OP_stack_value:
853 ctx->location = DWARF_VALUE_STACK;
854 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
855 goto no_push;
856
857 case DW_OP_GNU_implicit_pointer:
858 {
859 int64_t len;
860
861 if (ctx->ref_addr_size == -1)
862 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
863 "is not allowed in frame context"));
864
865 /* The referred-to DIE of sect_offset kind. */
866 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
867 byte_order);
868 op_ptr += ctx->ref_addr_size;
869
870 /* The byte offset into the data. */
871 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
872 result = (ULONGEST) len;
873 result_val = value_from_ulongest (address_type, result);
874
875 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
876 dwarf_expr_require_composition (op_ptr, op_end,
877 "DW_OP_GNU_implicit_pointer");
878 }
879 break;
880
881 case DW_OP_breg0:
882 case DW_OP_breg1:
883 case DW_OP_breg2:
884 case DW_OP_breg3:
885 case DW_OP_breg4:
886 case DW_OP_breg5:
887 case DW_OP_breg6:
888 case DW_OP_breg7:
889 case DW_OP_breg8:
890 case DW_OP_breg9:
891 case DW_OP_breg10:
892 case DW_OP_breg11:
893 case DW_OP_breg12:
894 case DW_OP_breg13:
895 case DW_OP_breg14:
896 case DW_OP_breg15:
897 case DW_OP_breg16:
898 case DW_OP_breg17:
899 case DW_OP_breg18:
900 case DW_OP_breg19:
901 case DW_OP_breg20:
902 case DW_OP_breg21:
903 case DW_OP_breg22:
904 case DW_OP_breg23:
905 case DW_OP_breg24:
906 case DW_OP_breg25:
907 case DW_OP_breg26:
908 case DW_OP_breg27:
909 case DW_OP_breg28:
910 case DW_OP_breg29:
911 case DW_OP_breg30:
912 case DW_OP_breg31:
913 {
914 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
915 result = (ctx->funcs->read_addr_from_reg) (ctx->baton,
916 op - DW_OP_breg0);
917 result += offset;
918 result_val = value_from_ulongest (address_type, result);
919 }
920 break;
921 case DW_OP_bregx:
922 {
923 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
924 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
925 result = (ctx->funcs->read_addr_from_reg) (ctx->baton, reg);
926 result += offset;
927 result_val = value_from_ulongest (address_type, result);
928 }
929 break;
930 case DW_OP_fbreg:
931 {
932 const gdb_byte *datastart;
933 size_t datalen;
934 unsigned int before_stack_len;
935
936 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
937 /* Rather than create a whole new context, we simply
938 record the stack length before execution, then reset it
939 afterwards, effectively erasing whatever the recursive
940 call put there. */
941 before_stack_len = ctx->stack_len;
942 /* FIXME: cagney/2003-03-26: This code should be using
943 get_frame_base_address(), and then implement a dwarf2
944 specific this_base method. */
945 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
946 dwarf_expr_eval (ctx, datastart, datalen);
947 if (ctx->location == DWARF_VALUE_MEMORY)
948 result = dwarf_expr_fetch_address (ctx, 0);
949 else if (ctx->location == DWARF_VALUE_REGISTER)
950 result = (ctx->funcs->read_addr_from_reg)
951 (ctx->baton,
952 value_as_long (dwarf_expr_fetch (ctx, 0)));
953 else
954 error (_("Not implemented: computing frame "
955 "base using explicit value operator"));
956 result = result + offset;
957 result_val = value_from_ulongest (address_type, result);
958 in_stack_memory = 1;
959 ctx->stack_len = before_stack_len;
960 ctx->location = DWARF_VALUE_MEMORY;
961 }
962 break;
963
964 case DW_OP_dup:
965 result_val = dwarf_expr_fetch (ctx, 0);
966 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
967 break;
968
969 case DW_OP_drop:
970 dwarf_expr_pop (ctx);
971 goto no_push;
972
973 case DW_OP_pick:
974 offset = *op_ptr++;
975 result_val = dwarf_expr_fetch (ctx, offset);
976 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
977 break;
978
979 case DW_OP_swap:
980 {
981 struct dwarf_stack_value t1, t2;
982
983 if (ctx->stack_len < 2)
984 error (_("Not enough elements for "
985 "DW_OP_swap. Need 2, have %d."),
986 ctx->stack_len);
987 t1 = ctx->stack[ctx->stack_len - 1];
988 t2 = ctx->stack[ctx->stack_len - 2];
989 ctx->stack[ctx->stack_len - 1] = t2;
990 ctx->stack[ctx->stack_len - 2] = t1;
991 goto no_push;
992 }
993
994 case DW_OP_over:
995 result_val = dwarf_expr_fetch (ctx, 1);
996 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
997 break;
998
999 case DW_OP_rot:
1000 {
1001 struct dwarf_stack_value t1, t2, t3;
1002
1003 if (ctx->stack_len < 3)
1004 error (_("Not enough elements for "
1005 "DW_OP_rot. Need 3, have %d."),
1006 ctx->stack_len);
1007 t1 = ctx->stack[ctx->stack_len - 1];
1008 t2 = ctx->stack[ctx->stack_len - 2];
1009 t3 = ctx->stack[ctx->stack_len - 3];
1010 ctx->stack[ctx->stack_len - 1] = t2;
1011 ctx->stack[ctx->stack_len - 2] = t3;
1012 ctx->stack[ctx->stack_len - 3] = t1;
1013 goto no_push;
1014 }
1015
1016 case DW_OP_deref:
1017 case DW_OP_deref_size:
1018 case DW_OP_GNU_deref_type:
1019 {
1020 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1021 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1022 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1023 struct type *type;
1024
1025 dwarf_expr_pop (ctx);
1026
1027 if (op == DW_OP_GNU_deref_type)
1028 {
1029 cu_offset type_die;
1030
1031 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1032 type_die.cu_off = uoffset;
1033 type = dwarf_get_base_type (ctx, type_die, 0);
1034 }
1035 else
1036 type = address_type;
1037
1038 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1039
1040 /* If the size of the object read from memory is different
1041 from the type length, we need to zero-extend it. */
1042 if (TYPE_LENGTH (type) != addr_size)
1043 {
1044 ULONGEST result =
1045 extract_unsigned_integer (buf, addr_size, byte_order);
1046
1047 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
1048 store_unsigned_integer (buf, TYPE_LENGTH (type),
1049 byte_order, result);
1050 }
1051
1052 result_val = value_from_contents_and_address (type, buf, addr);
1053 break;
1054 }
1055
1056 case DW_OP_abs:
1057 case DW_OP_neg:
1058 case DW_OP_not:
1059 case DW_OP_plus_uconst:
1060 {
1061 /* Unary operations. */
1062 result_val = dwarf_expr_fetch (ctx, 0);
1063 dwarf_expr_pop (ctx);
1064
1065 switch (op)
1066 {
1067 case DW_OP_abs:
1068 if (value_less (result_val,
1069 value_zero (value_type (result_val), not_lval)))
1070 result_val = value_neg (result_val);
1071 break;
1072 case DW_OP_neg:
1073 result_val = value_neg (result_val);
1074 break;
1075 case DW_OP_not:
1076 dwarf_require_integral (value_type (result_val));
1077 result_val = value_complement (result_val);
1078 break;
1079 case DW_OP_plus_uconst:
1080 dwarf_require_integral (value_type (result_val));
1081 result = value_as_long (result_val);
1082 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1083 result += reg;
1084 result_val = value_from_ulongest (address_type, result);
1085 break;
1086 }
1087 }
1088 break;
1089
1090 case DW_OP_and:
1091 case DW_OP_div:
1092 case DW_OP_minus:
1093 case DW_OP_mod:
1094 case DW_OP_mul:
1095 case DW_OP_or:
1096 case DW_OP_plus:
1097 case DW_OP_shl:
1098 case DW_OP_shr:
1099 case DW_OP_shra:
1100 case DW_OP_xor:
1101 case DW_OP_le:
1102 case DW_OP_ge:
1103 case DW_OP_eq:
1104 case DW_OP_lt:
1105 case DW_OP_gt:
1106 case DW_OP_ne:
1107 {
1108 /* Binary operations. */
1109 struct value *first, *second;
1110
1111 second = dwarf_expr_fetch (ctx, 0);
1112 dwarf_expr_pop (ctx);
1113
1114 first = dwarf_expr_fetch (ctx, 0);
1115 dwarf_expr_pop (ctx);
1116
1117 if (! base_types_equal_p (value_type (first), value_type (second)))
1118 error (_("Incompatible types on DWARF stack"));
1119
1120 switch (op)
1121 {
1122 case DW_OP_and:
1123 dwarf_require_integral (value_type (first));
1124 dwarf_require_integral (value_type (second));
1125 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1126 break;
1127 case DW_OP_div:
1128 result_val = value_binop (first, second, BINOP_DIV);
1129 break;
1130 case DW_OP_minus:
1131 result_val = value_binop (first, second, BINOP_SUB);
1132 break;
1133 case DW_OP_mod:
1134 {
1135 int cast_back = 0;
1136 struct type *orig_type = value_type (first);
1137
1138 /* We have to special-case "old-style" untyped values
1139 -- these must have mod computed using unsigned
1140 math. */
1141 if (orig_type == address_type)
1142 {
1143 struct type *utype
1144 = get_unsigned_type (ctx->gdbarch, orig_type);
1145
1146 cast_back = 1;
1147 first = value_cast (utype, first);
1148 second = value_cast (utype, second);
1149 }
1150 /* Note that value_binop doesn't handle float or
1151 decimal float here. This seems unimportant. */
1152 result_val = value_binop (first, second, BINOP_MOD);
1153 if (cast_back)
1154 result_val = value_cast (orig_type, result_val);
1155 }
1156 break;
1157 case DW_OP_mul:
1158 result_val = value_binop (first, second, BINOP_MUL);
1159 break;
1160 case DW_OP_or:
1161 dwarf_require_integral (value_type (first));
1162 dwarf_require_integral (value_type (second));
1163 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1164 break;
1165 case DW_OP_plus:
1166 result_val = value_binop (first, second, BINOP_ADD);
1167 break;
1168 case DW_OP_shl:
1169 dwarf_require_integral (value_type (first));
1170 dwarf_require_integral (value_type (second));
1171 result_val = value_binop (first, second, BINOP_LSH);
1172 break;
1173 case DW_OP_shr:
1174 dwarf_require_integral (value_type (first));
1175 dwarf_require_integral (value_type (second));
1176 if (!TYPE_UNSIGNED (value_type (first)))
1177 {
1178 struct type *utype
1179 = get_unsigned_type (ctx->gdbarch, value_type (first));
1180
1181 first = value_cast (utype, first);
1182 }
1183
1184 result_val = value_binop (first, second, BINOP_RSH);
1185 /* Make sure we wind up with the same type we started
1186 with. */
1187 if (value_type (result_val) != value_type (second))
1188 result_val = value_cast (value_type (second), result_val);
1189 break;
1190 case DW_OP_shra:
1191 dwarf_require_integral (value_type (first));
1192 dwarf_require_integral (value_type (second));
1193 if (TYPE_UNSIGNED (value_type (first)))
1194 {
1195 struct type *stype
1196 = get_signed_type (ctx->gdbarch, value_type (first));
1197
1198 first = value_cast (stype, first);
1199 }
1200
1201 result_val = value_binop (first, second, BINOP_RSH);
1202 /* Make sure we wind up with the same type we started
1203 with. */
1204 if (value_type (result_val) != value_type (second))
1205 result_val = value_cast (value_type (second), result_val);
1206 break;
1207 case DW_OP_xor:
1208 dwarf_require_integral (value_type (first));
1209 dwarf_require_integral (value_type (second));
1210 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1211 break;
1212 case DW_OP_le:
1213 /* A <= B is !(B < A). */
1214 result = ! value_less (second, first);
1215 result_val = value_from_ulongest (address_type, result);
1216 break;
1217 case DW_OP_ge:
1218 /* A >= B is !(A < B). */
1219 result = ! value_less (first, second);
1220 result_val = value_from_ulongest (address_type, result);
1221 break;
1222 case DW_OP_eq:
1223 result = value_equal (first, second);
1224 result_val = value_from_ulongest (address_type, result);
1225 break;
1226 case DW_OP_lt:
1227 result = value_less (first, second);
1228 result_val = value_from_ulongest (address_type, result);
1229 break;
1230 case DW_OP_gt:
1231 /* A > B is B < A. */
1232 result = value_less (second, first);
1233 result_val = value_from_ulongest (address_type, result);
1234 break;
1235 case DW_OP_ne:
1236 result = ! value_equal (first, second);
1237 result_val = value_from_ulongest (address_type, result);
1238 break;
1239 default:
1240 internal_error (__FILE__, __LINE__,
1241 _("Can't be reached."));
1242 }
1243 }
1244 break;
1245
1246 case DW_OP_call_frame_cfa:
1247 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1248 result_val = value_from_ulongest (address_type, result);
1249 in_stack_memory = 1;
1250 break;
1251
1252 case DW_OP_GNU_push_tls_address:
1253 case DW_OP_form_tls_address:
1254 /* Variable is at a constant offset in the thread-local
1255 storage block into the objfile for the current thread and
1256 the dynamic linker module containing this expression. Here
1257 we return returns the offset from that base. The top of the
1258 stack has the offset from the beginning of the thread
1259 control block at which the variable is located. Nothing
1260 should follow this operator, so the top of stack would be
1261 returned. */
1262 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1263 dwarf_expr_pop (ctx);
1264 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1265 result_val = value_from_ulongest (address_type, result);
1266 break;
1267
1268 case DW_OP_skip:
1269 offset = extract_signed_integer (op_ptr, 2, byte_order);
1270 op_ptr += 2;
1271 op_ptr += offset;
1272 goto no_push;
1273
1274 case DW_OP_bra:
1275 {
1276 struct value *val;
1277
1278 offset = extract_signed_integer (op_ptr, 2, byte_order);
1279 op_ptr += 2;
1280 val = dwarf_expr_fetch (ctx, 0);
1281 dwarf_require_integral (value_type (val));
1282 if (value_as_long (val) != 0)
1283 op_ptr += offset;
1284 dwarf_expr_pop (ctx);
1285 }
1286 goto no_push;
1287
1288 case DW_OP_nop:
1289 goto no_push;
1290
1291 case DW_OP_piece:
1292 {
1293 uint64_t size;
1294
1295 /* Record the piece. */
1296 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1297 add_piece (ctx, 8 * size, 0);
1298
1299 /* Pop off the address/regnum, and reset the location
1300 type. */
1301 if (ctx->location != DWARF_VALUE_LITERAL
1302 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1303 dwarf_expr_pop (ctx);
1304 ctx->location = DWARF_VALUE_MEMORY;
1305 }
1306 goto no_push;
1307
1308 case DW_OP_bit_piece:
1309 {
1310 uint64_t size, offset;
1311
1312 /* Record the piece. */
1313 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1314 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1315 add_piece (ctx, size, offset);
1316
1317 /* Pop off the address/regnum, and reset the location
1318 type. */
1319 if (ctx->location != DWARF_VALUE_LITERAL
1320 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1321 dwarf_expr_pop (ctx);
1322 ctx->location = DWARF_VALUE_MEMORY;
1323 }
1324 goto no_push;
1325
1326 case DW_OP_GNU_uninit:
1327 if (op_ptr != op_end)
1328 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1329 "be the very last op."));
1330
1331 ctx->initialized = 0;
1332 goto no_push;
1333
1334 case DW_OP_call2:
1335 {
1336 cu_offset offset;
1337
1338 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1339 op_ptr += 2;
1340 ctx->funcs->dwarf_call (ctx, offset);
1341 }
1342 goto no_push;
1343
1344 case DW_OP_call4:
1345 {
1346 cu_offset offset;
1347
1348 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1349 op_ptr += 4;
1350 ctx->funcs->dwarf_call (ctx, offset);
1351 }
1352 goto no_push;
1353
1354 case DW_OP_GNU_entry_value:
1355 {
1356 uint64_t len;
1357 CORE_ADDR deref_size;
1358 union call_site_parameter_u kind_u;
1359
1360 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1361 if (op_ptr + len > op_end)
1362 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1363
1364 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1365 if (kind_u.dwarf_reg != -1)
1366 {
1367 op_ptr += len;
1368 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1369 CALL_SITE_PARAMETER_DWARF_REG,
1370 kind_u,
1371 -1 /* deref_size */);
1372 goto no_push;
1373 }
1374
1375 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1376 op_ptr + len,
1377 &deref_size);
1378 if (kind_u.dwarf_reg != -1)
1379 {
1380 if (deref_size == -1)
1381 deref_size = ctx->addr_size;
1382 op_ptr += len;
1383 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1384 CALL_SITE_PARAMETER_DWARF_REG,
1385 kind_u, deref_size);
1386 goto no_push;
1387 }
1388
1389 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1390 "supported only for single DW_OP_reg* "
1391 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1392 }
1393
1394 case DW_OP_GNU_parameter_ref:
1395 {
1396 union call_site_parameter_u kind_u;
1397
1398 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1399 byte_order);
1400 op_ptr += 4;
1401 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1402 CALL_SITE_PARAMETER_PARAM_OFFSET,
1403 kind_u,
1404 -1 /* deref_size */);
1405 }
1406 goto no_push;
1407
1408 case DW_OP_GNU_const_type:
1409 {
1410 cu_offset type_die;
1411 int n;
1412 const gdb_byte *data;
1413 struct type *type;
1414
1415 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1416 type_die.cu_off = uoffset;
1417 n = *op_ptr++;
1418 data = op_ptr;
1419 op_ptr += n;
1420
1421 type = dwarf_get_base_type (ctx, type_die, n);
1422 result_val = value_from_contents (type, data);
1423 }
1424 break;
1425
1426 case DW_OP_GNU_regval_type:
1427 {
1428 cu_offset type_die;
1429 struct type *type;
1430
1431 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1432 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1433 type_die.cu_off = uoffset;
1434
1435 type = dwarf_get_base_type (ctx, type_die, 0);
1436 result_val = ctx->funcs->get_reg_value (ctx->baton, type, reg);
1437 }
1438 break;
1439
1440 case DW_OP_GNU_convert:
1441 case DW_OP_GNU_reinterpret:
1442 {
1443 cu_offset type_die;
1444 struct type *type;
1445
1446 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1447 type_die.cu_off = uoffset;
1448
1449 if (type_die.cu_off == 0)
1450 type = address_type;
1451 else
1452 type = dwarf_get_base_type (ctx, type_die, 0);
1453
1454 result_val = dwarf_expr_fetch (ctx, 0);
1455 dwarf_expr_pop (ctx);
1456
1457 if (op == DW_OP_GNU_convert)
1458 result_val = value_cast (type, result_val);
1459 else if (type == value_type (result_val))
1460 {
1461 /* Nothing. */
1462 }
1463 else if (TYPE_LENGTH (type)
1464 != TYPE_LENGTH (value_type (result_val)))
1465 error (_("DW_OP_GNU_reinterpret has wrong size"));
1466 else
1467 result_val
1468 = value_from_contents (type,
1469 value_contents_all (result_val));
1470 }
1471 break;
1472
1473 case DW_OP_push_object_address:
1474 /* Return the address of the object we are currently observing. */
1475 result = (ctx->funcs->get_object_address) (ctx->baton);
1476 result_val = value_from_ulongest (address_type, result);
1477 break;
1478
1479 default:
1480 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1481 }
1482
1483 /* Most things push a result value. */
1484 gdb_assert (result_val != NULL);
1485 dwarf_expr_push (ctx, result_val, in_stack_memory);
1486 no_push:
1487 ;
1488 }
1489
1490 /* To simplify our main caller, if the result is an implicit
1491 pointer, then make a pieced value. This is ok because we can't
1492 have implicit pointers in contexts where pieces are invalid. */
1493 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1494 add_piece (ctx, 8 * ctx->addr_size, 0);
1495
1496 abort_expression:
1497 ctx->recursion_depth--;
1498 gdb_assert (ctx->recursion_depth >= 0);
1499 }
1500
1501 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1502
1503 void
1504 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1505 {
1506 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1507 }
1508
1509 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1510
1511 CORE_ADDR
1512 ctx_no_get_frame_cfa (void *baton)
1513 {
1514 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1515 }
1516
1517 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1518
1519 CORE_ADDR
1520 ctx_no_get_frame_pc (void *baton)
1521 {
1522 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1523 }
1524
1525 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1526
1527 CORE_ADDR
1528 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1529 {
1530 error (_("%s is invalid in this context"), "DW_OP_form_tls_address");
1531 }
1532
1533 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1534
1535 void
1536 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1537 {
1538 error (_("%s is invalid in this context"), "DW_OP_call*");
1539 }
1540
1541 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1542
1543 struct type *
1544 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1545 {
1546 error (_("Support for typed DWARF is not supported in this context"));
1547 }
1548
1549 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1550 implementation. */
1551
1552 void
1553 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1554 enum call_site_parameter_kind kind,
1555 union call_site_parameter_u kind_u,
1556 int deref_size)
1557 {
1558 internal_error (__FILE__, __LINE__,
1559 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1560 }
1561
1562 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1563
1564 CORE_ADDR
1565 ctx_no_get_addr_index (void *baton, unsigned int index)
1566 {
1567 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1568 }
1569
1570 /* Provide a prototype to silence -Wmissing-prototypes. */
1571 extern initialize_file_ftype _initialize_dwarf2expr;
1572
1573 void
1574 _initialize_dwarf2expr (void)
1575 {
1576 dwarf_arch_cookie
1577 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1578 }
This page took 0.061525 seconds and 4 git commands to generate.