[Ada] Better handling of dynamic types in ada_value_primitive_packed_val
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29
30 /* Local prototypes. */
31
32 static void execute_stack_op (struct dwarf_expr_context *,
33 const gdb_byte *, const gdb_byte *);
34
35 /* Cookie for gdbarch data. */
36
37 static struct gdbarch_data *dwarf_arch_cookie;
38
39 /* This holds gdbarch-specific types used by the DWARF expression
40 evaluator. See comments in execute_stack_op. */
41
42 struct dwarf_gdbarch_types
43 {
44 struct type *dw_types[3];
45 };
46
47 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
48
49 static void *
50 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
51 {
52 struct dwarf_gdbarch_types *types
53 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
54
55 /* The types themselves are lazily initialized. */
56
57 return types;
58 }
59
60 /* Return the type used for DWARF operations where the type is
61 unspecified in the DWARF spec. Only certain sizes are
62 supported. */
63
64 static struct type *
65 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
66 {
67 struct dwarf_gdbarch_types *types
68 = (struct dwarf_gdbarch_types *) gdbarch_data (ctx->gdbarch,
69 dwarf_arch_cookie);
70 int ndx;
71
72 if (ctx->addr_size == 2)
73 ndx = 0;
74 else if (ctx->addr_size == 4)
75 ndx = 1;
76 else if (ctx->addr_size == 8)
77 ndx = 2;
78 else
79 error (_("Unsupported address size in DWARF expressions: %d bits"),
80 8 * ctx->addr_size);
81
82 if (types->dw_types[ndx] == NULL)
83 types->dw_types[ndx]
84 = arch_integer_type (ctx->gdbarch,
85 8 * ctx->addr_size,
86 0, "<signed DWARF address type>");
87
88 return types->dw_types[ndx];
89 }
90
91 /* Create a new context for the expression evaluator. */
92
93 struct dwarf_expr_context *
94 new_dwarf_expr_context (void)
95 {
96 struct dwarf_expr_context *retval;
97
98 retval = XCNEW (struct dwarf_expr_context);
99 retval->stack_len = 0;
100 retval->stack_allocated = 10;
101 retval->stack = XNEWVEC (struct dwarf_stack_value, retval->stack_allocated);
102 retval->num_pieces = 0;
103 retval->pieces = 0;
104 retval->max_recursion_depth = 0x100;
105 return retval;
106 }
107
108 /* Release the memory allocated to CTX. */
109
110 void
111 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
112 {
113 xfree (ctx->stack);
114 xfree (ctx->pieces);
115 xfree (ctx);
116 }
117
118 /* Helper for make_cleanup_free_dwarf_expr_context. */
119
120 static void
121 free_dwarf_expr_context_cleanup (void *arg)
122 {
123 free_dwarf_expr_context ((struct dwarf_expr_context *) arg);
124 }
125
126 /* Return a cleanup that calls free_dwarf_expr_context. */
127
128 struct cleanup *
129 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
130 {
131 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
132 }
133
134 /* Expand the memory allocated to CTX's stack to contain at least
135 NEED more elements than are currently used. */
136
137 static void
138 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
139 {
140 if (ctx->stack_len + need > ctx->stack_allocated)
141 {
142 size_t newlen = ctx->stack_len + need + 10;
143
144 ctx->stack = XRESIZEVEC (struct dwarf_stack_value, ctx->stack, newlen);
145 ctx->stack_allocated = newlen;
146 }
147 }
148
149 /* Push VALUE onto CTX's stack. */
150
151 static void
152 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
153 int in_stack_memory)
154 {
155 struct dwarf_stack_value *v;
156
157 dwarf_expr_grow_stack (ctx, 1);
158 v = &ctx->stack[ctx->stack_len++];
159 v->value = value;
160 v->in_stack_memory = in_stack_memory;
161 }
162
163 /* Push VALUE onto CTX's stack. */
164
165 void
166 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
167 int in_stack_memory)
168 {
169 dwarf_expr_push (ctx,
170 value_from_ulongest (dwarf_expr_address_type (ctx), value),
171 in_stack_memory);
172 }
173
174 /* Pop the top item off of CTX's stack. */
175
176 static void
177 dwarf_expr_pop (struct dwarf_expr_context *ctx)
178 {
179 if (ctx->stack_len <= 0)
180 error (_("dwarf expression stack underflow"));
181 ctx->stack_len--;
182 }
183
184 /* Retrieve the N'th item on CTX's stack. */
185
186 struct value *
187 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
188 {
189 if (ctx->stack_len <= n)
190 error (_("Asked for position %d of stack, "
191 "stack only has %d elements on it."),
192 n, ctx->stack_len);
193 return ctx->stack[ctx->stack_len - (1 + n)].value;
194 }
195
196 /* Require that TYPE be an integral type; throw an exception if not. */
197
198 static void
199 dwarf_require_integral (struct type *type)
200 {
201 if (TYPE_CODE (type) != TYPE_CODE_INT
202 && TYPE_CODE (type) != TYPE_CODE_CHAR
203 && TYPE_CODE (type) != TYPE_CODE_BOOL)
204 error (_("integral type expected in DWARF expression"));
205 }
206
207 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
208 type. */
209
210 static struct type *
211 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
212 {
213 switch (TYPE_LENGTH (type))
214 {
215 case 1:
216 return builtin_type (gdbarch)->builtin_uint8;
217 case 2:
218 return builtin_type (gdbarch)->builtin_uint16;
219 case 4:
220 return builtin_type (gdbarch)->builtin_uint32;
221 case 8:
222 return builtin_type (gdbarch)->builtin_uint64;
223 default:
224 error (_("no unsigned variant found for type, while evaluating "
225 "DWARF expression"));
226 }
227 }
228
229 /* Return the signed form of TYPE. TYPE is necessarily an integral
230 type. */
231
232 static struct type *
233 get_signed_type (struct gdbarch *gdbarch, struct type *type)
234 {
235 switch (TYPE_LENGTH (type))
236 {
237 case 1:
238 return builtin_type (gdbarch)->builtin_int8;
239 case 2:
240 return builtin_type (gdbarch)->builtin_int16;
241 case 4:
242 return builtin_type (gdbarch)->builtin_int32;
243 case 8:
244 return builtin_type (gdbarch)->builtin_int64;
245 default:
246 error (_("no signed variant found for type, while evaluating "
247 "DWARF expression"));
248 }
249 }
250
251 /* Retrieve the N'th item on CTX's stack, converted to an address. */
252
253 CORE_ADDR
254 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
255 {
256 struct value *result_val = dwarf_expr_fetch (ctx, n);
257 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
258 ULONGEST result;
259
260 dwarf_require_integral (value_type (result_val));
261 result = extract_unsigned_integer (value_contents (result_val),
262 TYPE_LENGTH (value_type (result_val)),
263 byte_order);
264
265 /* For most architectures, calling extract_unsigned_integer() alone
266 is sufficient for extracting an address. However, some
267 architectures (e.g. MIPS) use signed addresses and using
268 extract_unsigned_integer() will not produce a correct
269 result. Make sure we invoke gdbarch_integer_to_address()
270 for those architectures which require it. */
271 if (gdbarch_integer_to_address_p (ctx->gdbarch))
272 {
273 gdb_byte *buf = (gdb_byte *) alloca (ctx->addr_size);
274 struct type *int_type = get_unsigned_type (ctx->gdbarch,
275 value_type (result_val));
276
277 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
278 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
279 }
280
281 return (CORE_ADDR) result;
282 }
283
284 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
285
286 int
287 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
288 {
289 if (ctx->stack_len <= n)
290 error (_("Asked for position %d of stack, "
291 "stack only has %d elements on it."),
292 n, ctx->stack_len);
293 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
294 }
295
296 /* Return true if the expression stack is empty. */
297
298 static int
299 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
300 {
301 return ctx->stack_len == 0;
302 }
303
304 /* Add a new piece to CTX's piece list. */
305 static void
306 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
307 {
308 struct dwarf_expr_piece *p;
309
310 ctx->num_pieces++;
311
312 ctx->pieces
313 = XRESIZEVEC (struct dwarf_expr_piece, ctx->pieces, ctx->num_pieces);
314
315 p = &ctx->pieces[ctx->num_pieces - 1];
316 p->location = ctx->location;
317 p->size = size;
318 p->offset = offset;
319
320 if (p->location == DWARF_VALUE_LITERAL)
321 {
322 p->v.literal.data = ctx->data;
323 p->v.literal.length = ctx->len;
324 }
325 else if (dwarf_expr_stack_empty_p (ctx))
326 {
327 p->location = DWARF_VALUE_OPTIMIZED_OUT;
328 /* Also reset the context's location, for our callers. This is
329 a somewhat strange approach, but this lets us avoid setting
330 the location to DWARF_VALUE_MEMORY in all the individual
331 cases in the evaluator. */
332 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
333 }
334 else if (p->location == DWARF_VALUE_MEMORY)
335 {
336 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
337 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
338 }
339 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
340 {
341 p->v.ptr.die.sect_off = ctx->len;
342 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
343 }
344 else if (p->location == DWARF_VALUE_REGISTER)
345 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
346 else
347 {
348 p->v.value = dwarf_expr_fetch (ctx, 0);
349 }
350 }
351
352 /* Evaluate the expression at ADDR (LEN bytes long) using the context
353 CTX. */
354
355 void
356 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
357 size_t len)
358 {
359 int old_recursion_depth = ctx->recursion_depth;
360
361 execute_stack_op (ctx, addr, addr + len);
362
363 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
364
365 gdb_assert (ctx->recursion_depth == old_recursion_depth);
366 }
367
368 /* Helper to read a uleb128 value or throw an error. */
369
370 const gdb_byte *
371 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
372 uint64_t *r)
373 {
374 buf = gdb_read_uleb128 (buf, buf_end, r);
375 if (buf == NULL)
376 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
377 return buf;
378 }
379
380 /* Helper to read a sleb128 value or throw an error. */
381
382 const gdb_byte *
383 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
384 int64_t *r)
385 {
386 buf = gdb_read_sleb128 (buf, buf_end, r);
387 if (buf == NULL)
388 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
389 return buf;
390 }
391
392 const gdb_byte *
393 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
394 {
395 buf = gdb_skip_leb128 (buf, buf_end);
396 if (buf == NULL)
397 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
398 return buf;
399 }
400 \f
401
402 /* Check that the current operator is either at the end of an
403 expression, or that it is followed by a composition operator. */
404
405 void
406 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
407 const char *op_name)
408 {
409 /* It seems like DW_OP_GNU_uninit should be handled here. However,
410 it doesn't seem to make sense for DW_OP_*_value, and it was not
411 checked at the other place that this function is called. */
412 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
413 error (_("DWARF-2 expression error: `%s' operations must be "
414 "used either alone or in conjunction with DW_OP_piece "
415 "or DW_OP_bit_piece."),
416 op_name);
417 }
418
419 /* Return true iff the types T1 and T2 are "the same". This only does
420 checks that might reasonably be needed to compare DWARF base
421 types. */
422
423 static int
424 base_types_equal_p (struct type *t1, struct type *t2)
425 {
426 if (TYPE_CODE (t1) != TYPE_CODE (t2))
427 return 0;
428 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
429 return 0;
430 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
431 }
432
433 /* A convenience function to call get_base_type on CTX and return the
434 result. DIE is the DIE whose type we need. SIZE is non-zero if
435 this function should verify that the resulting type has the correct
436 size. */
437
438 static struct type *
439 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
440 {
441 struct type *result;
442
443 if (ctx->funcs->get_base_type)
444 {
445 result = ctx->funcs->get_base_type (ctx, die);
446 if (result == NULL)
447 error (_("Could not find type for DW_OP_GNU_const_type"));
448 if (size != 0 && TYPE_LENGTH (result) != size)
449 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
450 }
451 else
452 /* Anything will do. */
453 result = builtin_type (ctx->gdbarch)->builtin_int;
454
455 return result;
456 }
457
458 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
459 DWARF register number. Otherwise return -1. */
460
461 int
462 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
463 {
464 uint64_t dwarf_reg;
465
466 if (buf_end <= buf)
467 return -1;
468 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
469 {
470 if (buf_end - buf != 1)
471 return -1;
472 return *buf - DW_OP_reg0;
473 }
474
475 if (*buf == DW_OP_GNU_regval_type)
476 {
477 buf++;
478 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
479 if (buf == NULL)
480 return -1;
481 buf = gdb_skip_leb128 (buf, buf_end);
482 if (buf == NULL)
483 return -1;
484 }
485 else if (*buf == DW_OP_regx)
486 {
487 buf++;
488 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
489 if (buf == NULL)
490 return -1;
491 }
492 else
493 return -1;
494 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
495 return -1;
496 return dwarf_reg;
497 }
498
499 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
500 DW_OP_deref* return the DWARF register number. Otherwise return -1.
501 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
502 size from DW_OP_deref_size. */
503
504 int
505 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
506 CORE_ADDR *deref_size_return)
507 {
508 uint64_t dwarf_reg;
509 int64_t offset;
510
511 if (buf_end <= buf)
512 return -1;
513
514 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
515 {
516 dwarf_reg = *buf - DW_OP_breg0;
517 buf++;
518 if (buf >= buf_end)
519 return -1;
520 }
521 else if (*buf == DW_OP_bregx)
522 {
523 buf++;
524 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
525 if (buf == NULL)
526 return -1;
527 if ((int) dwarf_reg != dwarf_reg)
528 return -1;
529 }
530 else
531 return -1;
532
533 buf = gdb_read_sleb128 (buf, buf_end, &offset);
534 if (buf == NULL)
535 return -1;
536 if (offset != 0)
537 return -1;
538
539 if (*buf == DW_OP_deref)
540 {
541 buf++;
542 *deref_size_return = -1;
543 }
544 else if (*buf == DW_OP_deref_size)
545 {
546 buf++;
547 if (buf >= buf_end)
548 return -1;
549 *deref_size_return = *buf++;
550 }
551 else
552 return -1;
553
554 if (buf != buf_end)
555 return -1;
556
557 return dwarf_reg;
558 }
559
560 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
561 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
562
563 int
564 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
565 CORE_ADDR *fb_offset_return)
566 {
567 int64_t fb_offset;
568
569 if (buf_end <= buf)
570 return 0;
571
572 if (*buf != DW_OP_fbreg)
573 return 0;
574 buf++;
575
576 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
577 if (buf == NULL)
578 return 0;
579 *fb_offset_return = fb_offset;
580 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
581 return 0;
582
583 return 1;
584 }
585
586 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
587 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
588 The matched SP register number depends on GDBARCH. */
589
590 int
591 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
592 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
593 {
594 uint64_t dwarf_reg;
595 int64_t sp_offset;
596
597 if (buf_end <= buf)
598 return 0;
599 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
600 {
601 dwarf_reg = *buf - DW_OP_breg0;
602 buf++;
603 }
604 else
605 {
606 if (*buf != DW_OP_bregx)
607 return 0;
608 buf++;
609 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
610 if (buf == NULL)
611 return 0;
612 }
613
614 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
615 != gdbarch_sp_regnum (gdbarch))
616 return 0;
617
618 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
619 if (buf == NULL)
620 return 0;
621 *sp_offset_return = sp_offset;
622 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
623 return 0;
624
625 return 1;
626 }
627
628 /* The engine for the expression evaluator. Using the context in CTX,
629 evaluate the expression between OP_PTR and OP_END. */
630
631 static void
632 execute_stack_op (struct dwarf_expr_context *ctx,
633 const gdb_byte *op_ptr, const gdb_byte *op_end)
634 {
635 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
636 /* Old-style "untyped" DWARF values need special treatment in a
637 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
638 a special type for these values so we can distinguish them from
639 values that have an explicit type, because explicitly-typed
640 values do not need special treatment. This special type must be
641 different (in the `==' sense) from any base type coming from the
642 CU. */
643 struct type *address_type = dwarf_expr_address_type (ctx);
644
645 ctx->location = DWARF_VALUE_MEMORY;
646 ctx->initialized = 1; /* Default is initialized. */
647
648 if (ctx->recursion_depth > ctx->max_recursion_depth)
649 error (_("DWARF-2 expression error: Loop detected (%d)."),
650 ctx->recursion_depth);
651 ctx->recursion_depth++;
652
653 while (op_ptr < op_end)
654 {
655 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
656 ULONGEST result;
657 /* Assume the value is not in stack memory.
658 Code that knows otherwise sets this to 1.
659 Some arithmetic on stack addresses can probably be assumed to still
660 be a stack address, but we skip this complication for now.
661 This is just an optimization, so it's always ok to punt
662 and leave this as 0. */
663 int in_stack_memory = 0;
664 uint64_t uoffset, reg;
665 int64_t offset;
666 struct value *result_val = NULL;
667
668 /* The DWARF expression might have a bug causing an infinite
669 loop. In that case, quitting is the only way out. */
670 QUIT;
671
672 switch (op)
673 {
674 case DW_OP_lit0:
675 case DW_OP_lit1:
676 case DW_OP_lit2:
677 case DW_OP_lit3:
678 case DW_OP_lit4:
679 case DW_OP_lit5:
680 case DW_OP_lit6:
681 case DW_OP_lit7:
682 case DW_OP_lit8:
683 case DW_OP_lit9:
684 case DW_OP_lit10:
685 case DW_OP_lit11:
686 case DW_OP_lit12:
687 case DW_OP_lit13:
688 case DW_OP_lit14:
689 case DW_OP_lit15:
690 case DW_OP_lit16:
691 case DW_OP_lit17:
692 case DW_OP_lit18:
693 case DW_OP_lit19:
694 case DW_OP_lit20:
695 case DW_OP_lit21:
696 case DW_OP_lit22:
697 case DW_OP_lit23:
698 case DW_OP_lit24:
699 case DW_OP_lit25:
700 case DW_OP_lit26:
701 case DW_OP_lit27:
702 case DW_OP_lit28:
703 case DW_OP_lit29:
704 case DW_OP_lit30:
705 case DW_OP_lit31:
706 result = op - DW_OP_lit0;
707 result_val = value_from_ulongest (address_type, result);
708 break;
709
710 case DW_OP_addr:
711 result = extract_unsigned_integer (op_ptr,
712 ctx->addr_size, byte_order);
713 op_ptr += ctx->addr_size;
714 /* Some versions of GCC emit DW_OP_addr before
715 DW_OP_GNU_push_tls_address. In this case the value is an
716 index, not an address. We don't support things like
717 branching between the address and the TLS op. */
718 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
719 result += ctx->offset;
720 result_val = value_from_ulongest (address_type, result);
721 break;
722
723 case DW_OP_GNU_addr_index:
724 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
725 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
726 result += ctx->offset;
727 result_val = value_from_ulongest (address_type, result);
728 break;
729 case DW_OP_GNU_const_index:
730 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
731 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
732 result_val = value_from_ulongest (address_type, result);
733 break;
734
735 case DW_OP_const1u:
736 result = extract_unsigned_integer (op_ptr, 1, byte_order);
737 result_val = value_from_ulongest (address_type, result);
738 op_ptr += 1;
739 break;
740 case DW_OP_const1s:
741 result = extract_signed_integer (op_ptr, 1, byte_order);
742 result_val = value_from_ulongest (address_type, result);
743 op_ptr += 1;
744 break;
745 case DW_OP_const2u:
746 result = extract_unsigned_integer (op_ptr, 2, byte_order);
747 result_val = value_from_ulongest (address_type, result);
748 op_ptr += 2;
749 break;
750 case DW_OP_const2s:
751 result = extract_signed_integer (op_ptr, 2, byte_order);
752 result_val = value_from_ulongest (address_type, result);
753 op_ptr += 2;
754 break;
755 case DW_OP_const4u:
756 result = extract_unsigned_integer (op_ptr, 4, byte_order);
757 result_val = value_from_ulongest (address_type, result);
758 op_ptr += 4;
759 break;
760 case DW_OP_const4s:
761 result = extract_signed_integer (op_ptr, 4, byte_order);
762 result_val = value_from_ulongest (address_type, result);
763 op_ptr += 4;
764 break;
765 case DW_OP_const8u:
766 result = extract_unsigned_integer (op_ptr, 8, byte_order);
767 result_val = value_from_ulongest (address_type, result);
768 op_ptr += 8;
769 break;
770 case DW_OP_const8s:
771 result = extract_signed_integer (op_ptr, 8, byte_order);
772 result_val = value_from_ulongest (address_type, result);
773 op_ptr += 8;
774 break;
775 case DW_OP_constu:
776 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
777 result = uoffset;
778 result_val = value_from_ulongest (address_type, result);
779 break;
780 case DW_OP_consts:
781 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
782 result = offset;
783 result_val = value_from_ulongest (address_type, result);
784 break;
785
786 /* The DW_OP_reg operations are required to occur alone in
787 location expressions. */
788 case DW_OP_reg0:
789 case DW_OP_reg1:
790 case DW_OP_reg2:
791 case DW_OP_reg3:
792 case DW_OP_reg4:
793 case DW_OP_reg5:
794 case DW_OP_reg6:
795 case DW_OP_reg7:
796 case DW_OP_reg8:
797 case DW_OP_reg9:
798 case DW_OP_reg10:
799 case DW_OP_reg11:
800 case DW_OP_reg12:
801 case DW_OP_reg13:
802 case DW_OP_reg14:
803 case DW_OP_reg15:
804 case DW_OP_reg16:
805 case DW_OP_reg17:
806 case DW_OP_reg18:
807 case DW_OP_reg19:
808 case DW_OP_reg20:
809 case DW_OP_reg21:
810 case DW_OP_reg22:
811 case DW_OP_reg23:
812 case DW_OP_reg24:
813 case DW_OP_reg25:
814 case DW_OP_reg26:
815 case DW_OP_reg27:
816 case DW_OP_reg28:
817 case DW_OP_reg29:
818 case DW_OP_reg30:
819 case DW_OP_reg31:
820 if (op_ptr != op_end
821 && *op_ptr != DW_OP_piece
822 && *op_ptr != DW_OP_bit_piece
823 && *op_ptr != DW_OP_GNU_uninit)
824 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
825 "used either alone or in conjunction with DW_OP_piece "
826 "or DW_OP_bit_piece."));
827
828 result = op - DW_OP_reg0;
829 result_val = value_from_ulongest (address_type, result);
830 ctx->location = DWARF_VALUE_REGISTER;
831 break;
832
833 case DW_OP_regx:
834 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
835 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
836
837 result = reg;
838 result_val = value_from_ulongest (address_type, result);
839 ctx->location = DWARF_VALUE_REGISTER;
840 break;
841
842 case DW_OP_implicit_value:
843 {
844 uint64_t len;
845
846 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
847 if (op_ptr + len > op_end)
848 error (_("DW_OP_implicit_value: too few bytes available."));
849 ctx->len = len;
850 ctx->data = op_ptr;
851 ctx->location = DWARF_VALUE_LITERAL;
852 op_ptr += len;
853 dwarf_expr_require_composition (op_ptr, op_end,
854 "DW_OP_implicit_value");
855 }
856 goto no_push;
857
858 case DW_OP_stack_value:
859 ctx->location = DWARF_VALUE_STACK;
860 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
861 goto no_push;
862
863 case DW_OP_GNU_implicit_pointer:
864 {
865 int64_t len;
866
867 if (ctx->ref_addr_size == -1)
868 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
869 "is not allowed in frame context"));
870
871 /* The referred-to DIE of sect_offset kind. */
872 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
873 byte_order);
874 op_ptr += ctx->ref_addr_size;
875
876 /* The byte offset into the data. */
877 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
878 result = (ULONGEST) len;
879 result_val = value_from_ulongest (address_type, result);
880
881 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
882 dwarf_expr_require_composition (op_ptr, op_end,
883 "DW_OP_GNU_implicit_pointer");
884 }
885 break;
886
887 case DW_OP_breg0:
888 case DW_OP_breg1:
889 case DW_OP_breg2:
890 case DW_OP_breg3:
891 case DW_OP_breg4:
892 case DW_OP_breg5:
893 case DW_OP_breg6:
894 case DW_OP_breg7:
895 case DW_OP_breg8:
896 case DW_OP_breg9:
897 case DW_OP_breg10:
898 case DW_OP_breg11:
899 case DW_OP_breg12:
900 case DW_OP_breg13:
901 case DW_OP_breg14:
902 case DW_OP_breg15:
903 case DW_OP_breg16:
904 case DW_OP_breg17:
905 case DW_OP_breg18:
906 case DW_OP_breg19:
907 case DW_OP_breg20:
908 case DW_OP_breg21:
909 case DW_OP_breg22:
910 case DW_OP_breg23:
911 case DW_OP_breg24:
912 case DW_OP_breg25:
913 case DW_OP_breg26:
914 case DW_OP_breg27:
915 case DW_OP_breg28:
916 case DW_OP_breg29:
917 case DW_OP_breg30:
918 case DW_OP_breg31:
919 {
920 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
921 result = (ctx->funcs->read_addr_from_reg) (ctx->baton,
922 op - DW_OP_breg0);
923 result += offset;
924 result_val = value_from_ulongest (address_type, result);
925 }
926 break;
927 case DW_OP_bregx:
928 {
929 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
930 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
931 result = (ctx->funcs->read_addr_from_reg) (ctx->baton, reg);
932 result += offset;
933 result_val = value_from_ulongest (address_type, result);
934 }
935 break;
936 case DW_OP_fbreg:
937 {
938 const gdb_byte *datastart;
939 size_t datalen;
940 unsigned int before_stack_len;
941
942 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
943 /* Rather than create a whole new context, we simply
944 record the stack length before execution, then reset it
945 afterwards, effectively erasing whatever the recursive
946 call put there. */
947 before_stack_len = ctx->stack_len;
948 /* FIXME: cagney/2003-03-26: This code should be using
949 get_frame_base_address(), and then implement a dwarf2
950 specific this_base method. */
951 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
952 dwarf_expr_eval (ctx, datastart, datalen);
953 if (ctx->location == DWARF_VALUE_MEMORY)
954 result = dwarf_expr_fetch_address (ctx, 0);
955 else if (ctx->location == DWARF_VALUE_REGISTER)
956 result = (ctx->funcs->read_addr_from_reg)
957 (ctx->baton,
958 value_as_long (dwarf_expr_fetch (ctx, 0)));
959 else
960 error (_("Not implemented: computing frame "
961 "base using explicit value operator"));
962 result = result + offset;
963 result_val = value_from_ulongest (address_type, result);
964 in_stack_memory = 1;
965 ctx->stack_len = before_stack_len;
966 ctx->location = DWARF_VALUE_MEMORY;
967 }
968 break;
969
970 case DW_OP_dup:
971 result_val = dwarf_expr_fetch (ctx, 0);
972 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
973 break;
974
975 case DW_OP_drop:
976 dwarf_expr_pop (ctx);
977 goto no_push;
978
979 case DW_OP_pick:
980 offset = *op_ptr++;
981 result_val = dwarf_expr_fetch (ctx, offset);
982 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
983 break;
984
985 case DW_OP_swap:
986 {
987 struct dwarf_stack_value t1, t2;
988
989 if (ctx->stack_len < 2)
990 error (_("Not enough elements for "
991 "DW_OP_swap. Need 2, have %d."),
992 ctx->stack_len);
993 t1 = ctx->stack[ctx->stack_len - 1];
994 t2 = ctx->stack[ctx->stack_len - 2];
995 ctx->stack[ctx->stack_len - 1] = t2;
996 ctx->stack[ctx->stack_len - 2] = t1;
997 goto no_push;
998 }
999
1000 case DW_OP_over:
1001 result_val = dwarf_expr_fetch (ctx, 1);
1002 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1003 break;
1004
1005 case DW_OP_rot:
1006 {
1007 struct dwarf_stack_value t1, t2, t3;
1008
1009 if (ctx->stack_len < 3)
1010 error (_("Not enough elements for "
1011 "DW_OP_rot. Need 3, have %d."),
1012 ctx->stack_len);
1013 t1 = ctx->stack[ctx->stack_len - 1];
1014 t2 = ctx->stack[ctx->stack_len - 2];
1015 t3 = ctx->stack[ctx->stack_len - 3];
1016 ctx->stack[ctx->stack_len - 1] = t2;
1017 ctx->stack[ctx->stack_len - 2] = t3;
1018 ctx->stack[ctx->stack_len - 3] = t1;
1019 goto no_push;
1020 }
1021
1022 case DW_OP_deref:
1023 case DW_OP_deref_size:
1024 case DW_OP_GNU_deref_type:
1025 {
1026 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1027 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1028 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1029 struct type *type;
1030
1031 dwarf_expr_pop (ctx);
1032
1033 if (op == DW_OP_GNU_deref_type)
1034 {
1035 cu_offset type_die;
1036
1037 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1038 type_die.cu_off = uoffset;
1039 type = dwarf_get_base_type (ctx, type_die, 0);
1040 }
1041 else
1042 type = address_type;
1043
1044 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1045
1046 /* If the size of the object read from memory is different
1047 from the type length, we need to zero-extend it. */
1048 if (TYPE_LENGTH (type) != addr_size)
1049 {
1050 ULONGEST result =
1051 extract_unsigned_integer (buf, addr_size, byte_order);
1052
1053 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
1054 store_unsigned_integer (buf, TYPE_LENGTH (type),
1055 byte_order, result);
1056 }
1057
1058 result_val = value_from_contents_and_address (type, buf, addr);
1059 break;
1060 }
1061
1062 case DW_OP_abs:
1063 case DW_OP_neg:
1064 case DW_OP_not:
1065 case DW_OP_plus_uconst:
1066 {
1067 /* Unary operations. */
1068 result_val = dwarf_expr_fetch (ctx, 0);
1069 dwarf_expr_pop (ctx);
1070
1071 switch (op)
1072 {
1073 case DW_OP_abs:
1074 if (value_less (result_val,
1075 value_zero (value_type (result_val), not_lval)))
1076 result_val = value_neg (result_val);
1077 break;
1078 case DW_OP_neg:
1079 result_val = value_neg (result_val);
1080 break;
1081 case DW_OP_not:
1082 dwarf_require_integral (value_type (result_val));
1083 result_val = value_complement (result_val);
1084 break;
1085 case DW_OP_plus_uconst:
1086 dwarf_require_integral (value_type (result_val));
1087 result = value_as_long (result_val);
1088 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1089 result += reg;
1090 result_val = value_from_ulongest (address_type, result);
1091 break;
1092 }
1093 }
1094 break;
1095
1096 case DW_OP_and:
1097 case DW_OP_div:
1098 case DW_OP_minus:
1099 case DW_OP_mod:
1100 case DW_OP_mul:
1101 case DW_OP_or:
1102 case DW_OP_plus:
1103 case DW_OP_shl:
1104 case DW_OP_shr:
1105 case DW_OP_shra:
1106 case DW_OP_xor:
1107 case DW_OP_le:
1108 case DW_OP_ge:
1109 case DW_OP_eq:
1110 case DW_OP_lt:
1111 case DW_OP_gt:
1112 case DW_OP_ne:
1113 {
1114 /* Binary operations. */
1115 struct value *first, *second;
1116
1117 second = dwarf_expr_fetch (ctx, 0);
1118 dwarf_expr_pop (ctx);
1119
1120 first = dwarf_expr_fetch (ctx, 0);
1121 dwarf_expr_pop (ctx);
1122
1123 if (! base_types_equal_p (value_type (first), value_type (second)))
1124 error (_("Incompatible types on DWARF stack"));
1125
1126 switch (op)
1127 {
1128 case DW_OP_and:
1129 dwarf_require_integral (value_type (first));
1130 dwarf_require_integral (value_type (second));
1131 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1132 break;
1133 case DW_OP_div:
1134 result_val = value_binop (first, second, BINOP_DIV);
1135 break;
1136 case DW_OP_minus:
1137 result_val = value_binop (first, second, BINOP_SUB);
1138 break;
1139 case DW_OP_mod:
1140 {
1141 int cast_back = 0;
1142 struct type *orig_type = value_type (first);
1143
1144 /* We have to special-case "old-style" untyped values
1145 -- these must have mod computed using unsigned
1146 math. */
1147 if (orig_type == address_type)
1148 {
1149 struct type *utype
1150 = get_unsigned_type (ctx->gdbarch, orig_type);
1151
1152 cast_back = 1;
1153 first = value_cast (utype, first);
1154 second = value_cast (utype, second);
1155 }
1156 /* Note that value_binop doesn't handle float or
1157 decimal float here. This seems unimportant. */
1158 result_val = value_binop (first, second, BINOP_MOD);
1159 if (cast_back)
1160 result_val = value_cast (orig_type, result_val);
1161 }
1162 break;
1163 case DW_OP_mul:
1164 result_val = value_binop (first, second, BINOP_MUL);
1165 break;
1166 case DW_OP_or:
1167 dwarf_require_integral (value_type (first));
1168 dwarf_require_integral (value_type (second));
1169 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1170 break;
1171 case DW_OP_plus:
1172 result_val = value_binop (first, second, BINOP_ADD);
1173 break;
1174 case DW_OP_shl:
1175 dwarf_require_integral (value_type (first));
1176 dwarf_require_integral (value_type (second));
1177 result_val = value_binop (first, second, BINOP_LSH);
1178 break;
1179 case DW_OP_shr:
1180 dwarf_require_integral (value_type (first));
1181 dwarf_require_integral (value_type (second));
1182 if (!TYPE_UNSIGNED (value_type (first)))
1183 {
1184 struct type *utype
1185 = get_unsigned_type (ctx->gdbarch, value_type (first));
1186
1187 first = value_cast (utype, first);
1188 }
1189
1190 result_val = value_binop (first, second, BINOP_RSH);
1191 /* Make sure we wind up with the same type we started
1192 with. */
1193 if (value_type (result_val) != value_type (second))
1194 result_val = value_cast (value_type (second), result_val);
1195 break;
1196 case DW_OP_shra:
1197 dwarf_require_integral (value_type (first));
1198 dwarf_require_integral (value_type (second));
1199 if (TYPE_UNSIGNED (value_type (first)))
1200 {
1201 struct type *stype
1202 = get_signed_type (ctx->gdbarch, value_type (first));
1203
1204 first = value_cast (stype, first);
1205 }
1206
1207 result_val = value_binop (first, second, BINOP_RSH);
1208 /* Make sure we wind up with the same type we started
1209 with. */
1210 if (value_type (result_val) != value_type (second))
1211 result_val = value_cast (value_type (second), result_val);
1212 break;
1213 case DW_OP_xor:
1214 dwarf_require_integral (value_type (first));
1215 dwarf_require_integral (value_type (second));
1216 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1217 break;
1218 case DW_OP_le:
1219 /* A <= B is !(B < A). */
1220 result = ! value_less (second, first);
1221 result_val = value_from_ulongest (address_type, result);
1222 break;
1223 case DW_OP_ge:
1224 /* A >= B is !(A < B). */
1225 result = ! value_less (first, second);
1226 result_val = value_from_ulongest (address_type, result);
1227 break;
1228 case DW_OP_eq:
1229 result = value_equal (first, second);
1230 result_val = value_from_ulongest (address_type, result);
1231 break;
1232 case DW_OP_lt:
1233 result = value_less (first, second);
1234 result_val = value_from_ulongest (address_type, result);
1235 break;
1236 case DW_OP_gt:
1237 /* A > B is B < A. */
1238 result = value_less (second, first);
1239 result_val = value_from_ulongest (address_type, result);
1240 break;
1241 case DW_OP_ne:
1242 result = ! value_equal (first, second);
1243 result_val = value_from_ulongest (address_type, result);
1244 break;
1245 default:
1246 internal_error (__FILE__, __LINE__,
1247 _("Can't be reached."));
1248 }
1249 }
1250 break;
1251
1252 case DW_OP_call_frame_cfa:
1253 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1254 result_val = value_from_ulongest (address_type, result);
1255 in_stack_memory = 1;
1256 break;
1257
1258 case DW_OP_GNU_push_tls_address:
1259 /* Variable is at a constant offset in the thread-local
1260 storage block into the objfile for the current thread and
1261 the dynamic linker module containing this expression. Here
1262 we return returns the offset from that base. The top of the
1263 stack has the offset from the beginning of the thread
1264 control block at which the variable is located. Nothing
1265 should follow this operator, so the top of stack would be
1266 returned. */
1267 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1268 dwarf_expr_pop (ctx);
1269 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1270 result_val = value_from_ulongest (address_type, result);
1271 break;
1272
1273 case DW_OP_skip:
1274 offset = extract_signed_integer (op_ptr, 2, byte_order);
1275 op_ptr += 2;
1276 op_ptr += offset;
1277 goto no_push;
1278
1279 case DW_OP_bra:
1280 {
1281 struct value *val;
1282
1283 offset = extract_signed_integer (op_ptr, 2, byte_order);
1284 op_ptr += 2;
1285 val = dwarf_expr_fetch (ctx, 0);
1286 dwarf_require_integral (value_type (val));
1287 if (value_as_long (val) != 0)
1288 op_ptr += offset;
1289 dwarf_expr_pop (ctx);
1290 }
1291 goto no_push;
1292
1293 case DW_OP_nop:
1294 goto no_push;
1295
1296 case DW_OP_piece:
1297 {
1298 uint64_t size;
1299
1300 /* Record the piece. */
1301 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1302 add_piece (ctx, 8 * size, 0);
1303
1304 /* Pop off the address/regnum, and reset the location
1305 type. */
1306 if (ctx->location != DWARF_VALUE_LITERAL
1307 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1308 dwarf_expr_pop (ctx);
1309 ctx->location = DWARF_VALUE_MEMORY;
1310 }
1311 goto no_push;
1312
1313 case DW_OP_bit_piece:
1314 {
1315 uint64_t size, offset;
1316
1317 /* Record the piece. */
1318 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1319 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1320 add_piece (ctx, size, offset);
1321
1322 /* Pop off the address/regnum, and reset the location
1323 type. */
1324 if (ctx->location != DWARF_VALUE_LITERAL
1325 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1326 dwarf_expr_pop (ctx);
1327 ctx->location = DWARF_VALUE_MEMORY;
1328 }
1329 goto no_push;
1330
1331 case DW_OP_GNU_uninit:
1332 if (op_ptr != op_end)
1333 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1334 "be the very last op."));
1335
1336 ctx->initialized = 0;
1337 goto no_push;
1338
1339 case DW_OP_call2:
1340 {
1341 cu_offset offset;
1342
1343 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1344 op_ptr += 2;
1345 ctx->funcs->dwarf_call (ctx, offset);
1346 }
1347 goto no_push;
1348
1349 case DW_OP_call4:
1350 {
1351 cu_offset offset;
1352
1353 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1354 op_ptr += 4;
1355 ctx->funcs->dwarf_call (ctx, offset);
1356 }
1357 goto no_push;
1358
1359 case DW_OP_GNU_entry_value:
1360 {
1361 uint64_t len;
1362 CORE_ADDR deref_size;
1363 union call_site_parameter_u kind_u;
1364
1365 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1366 if (op_ptr + len > op_end)
1367 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1368
1369 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1370 if (kind_u.dwarf_reg != -1)
1371 {
1372 op_ptr += len;
1373 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1374 CALL_SITE_PARAMETER_DWARF_REG,
1375 kind_u,
1376 -1 /* deref_size */);
1377 goto no_push;
1378 }
1379
1380 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1381 op_ptr + len,
1382 &deref_size);
1383 if (kind_u.dwarf_reg != -1)
1384 {
1385 if (deref_size == -1)
1386 deref_size = ctx->addr_size;
1387 op_ptr += len;
1388 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1389 CALL_SITE_PARAMETER_DWARF_REG,
1390 kind_u, deref_size);
1391 goto no_push;
1392 }
1393
1394 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1395 "supported only for single DW_OP_reg* "
1396 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1397 }
1398
1399 case DW_OP_GNU_parameter_ref:
1400 {
1401 union call_site_parameter_u kind_u;
1402
1403 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1404 byte_order);
1405 op_ptr += 4;
1406 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1407 CALL_SITE_PARAMETER_PARAM_OFFSET,
1408 kind_u,
1409 -1 /* deref_size */);
1410 }
1411 goto no_push;
1412
1413 case DW_OP_GNU_const_type:
1414 {
1415 cu_offset type_die;
1416 int n;
1417 const gdb_byte *data;
1418 struct type *type;
1419
1420 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1421 type_die.cu_off = uoffset;
1422 n = *op_ptr++;
1423 data = op_ptr;
1424 op_ptr += n;
1425
1426 type = dwarf_get_base_type (ctx, type_die, n);
1427 result_val = value_from_contents (type, data);
1428 }
1429 break;
1430
1431 case DW_OP_GNU_regval_type:
1432 {
1433 cu_offset type_die;
1434 struct type *type;
1435
1436 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1437 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1438 type_die.cu_off = uoffset;
1439
1440 type = dwarf_get_base_type (ctx, type_die, 0);
1441 result_val = ctx->funcs->get_reg_value (ctx->baton, type, reg);
1442 }
1443 break;
1444
1445 case DW_OP_GNU_convert:
1446 case DW_OP_GNU_reinterpret:
1447 {
1448 cu_offset type_die;
1449 struct type *type;
1450
1451 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1452 type_die.cu_off = uoffset;
1453
1454 if (type_die.cu_off == 0)
1455 type = address_type;
1456 else
1457 type = dwarf_get_base_type (ctx, type_die, 0);
1458
1459 result_val = dwarf_expr_fetch (ctx, 0);
1460 dwarf_expr_pop (ctx);
1461
1462 if (op == DW_OP_GNU_convert)
1463 result_val = value_cast (type, result_val);
1464 else if (type == value_type (result_val))
1465 {
1466 /* Nothing. */
1467 }
1468 else if (TYPE_LENGTH (type)
1469 != TYPE_LENGTH (value_type (result_val)))
1470 error (_("DW_OP_GNU_reinterpret has wrong size"));
1471 else
1472 result_val
1473 = value_from_contents (type,
1474 value_contents_all (result_val));
1475 }
1476 break;
1477
1478 case DW_OP_push_object_address:
1479 /* Return the address of the object we are currently observing. */
1480 result = (ctx->funcs->get_object_address) (ctx->baton);
1481 result_val = value_from_ulongest (address_type, result);
1482 break;
1483
1484 default:
1485 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1486 }
1487
1488 /* Most things push a result value. */
1489 gdb_assert (result_val != NULL);
1490 dwarf_expr_push (ctx, result_val, in_stack_memory);
1491 no_push:
1492 ;
1493 }
1494
1495 /* To simplify our main caller, if the result is an implicit
1496 pointer, then make a pieced value. This is ok because we can't
1497 have implicit pointers in contexts where pieces are invalid. */
1498 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1499 add_piece (ctx, 8 * ctx->addr_size, 0);
1500
1501 abort_expression:
1502 ctx->recursion_depth--;
1503 gdb_assert (ctx->recursion_depth >= 0);
1504 }
1505
1506 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1507
1508 void
1509 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1510 {
1511 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1512 }
1513
1514 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1515
1516 CORE_ADDR
1517 ctx_no_get_frame_cfa (void *baton)
1518 {
1519 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1520 }
1521
1522 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1523
1524 CORE_ADDR
1525 ctx_no_get_frame_pc (void *baton)
1526 {
1527 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1528 }
1529
1530 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1531
1532 CORE_ADDR
1533 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1534 {
1535 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1536 }
1537
1538 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1539
1540 void
1541 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1542 {
1543 error (_("%s is invalid in this context"), "DW_OP_call*");
1544 }
1545
1546 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1547
1548 struct type *
1549 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1550 {
1551 error (_("Support for typed DWARF is not supported in this context"));
1552 }
1553
1554 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1555 implementation. */
1556
1557 void
1558 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1559 enum call_site_parameter_kind kind,
1560 union call_site_parameter_u kind_u,
1561 int deref_size)
1562 {
1563 internal_error (__FILE__, __LINE__,
1564 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1565 }
1566
1567 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1568
1569 CORE_ADDR
1570 ctx_no_get_addr_index (void *baton, unsigned int index)
1571 {
1572 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1573 }
1574
1575 /* Provide a prototype to silence -Wmissing-prototypes. */
1576 extern initialize_file_ftype _initialize_dwarf2expr;
1577
1578 void
1579 _initialize_dwarf2expr (void)
1580 {
1581 dwarf_arch_cookie
1582 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1583 }
This page took 0.062694 seconds and 4 git commands to generate.