Add casts for legitimate integer to enum conversions
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29
30 /* Local prototypes. */
31
32 static void execute_stack_op (struct dwarf_expr_context *,
33 const gdb_byte *, const gdb_byte *);
34
35 /* Cookie for gdbarch data. */
36
37 static struct gdbarch_data *dwarf_arch_cookie;
38
39 /* This holds gdbarch-specific types used by the DWARF expression
40 evaluator. See comments in execute_stack_op. */
41
42 struct dwarf_gdbarch_types
43 {
44 struct type *dw_types[3];
45 };
46
47 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
48
49 static void *
50 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
51 {
52 struct dwarf_gdbarch_types *types
53 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
54
55 /* The types themselves are lazily initialized. */
56
57 return types;
58 }
59
60 /* Return the type used for DWARF operations where the type is
61 unspecified in the DWARF spec. Only certain sizes are
62 supported. */
63
64 static struct type *
65 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
66 {
67 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
68 dwarf_arch_cookie);
69 int ndx;
70
71 if (ctx->addr_size == 2)
72 ndx = 0;
73 else if (ctx->addr_size == 4)
74 ndx = 1;
75 else if (ctx->addr_size == 8)
76 ndx = 2;
77 else
78 error (_("Unsupported address size in DWARF expressions: %d bits"),
79 8 * ctx->addr_size);
80
81 if (types->dw_types[ndx] == NULL)
82 types->dw_types[ndx]
83 = arch_integer_type (ctx->gdbarch,
84 8 * ctx->addr_size,
85 0, "<signed DWARF address type>");
86
87 return types->dw_types[ndx];
88 }
89
90 /* Create a new context for the expression evaluator. */
91
92 struct dwarf_expr_context *
93 new_dwarf_expr_context (void)
94 {
95 struct dwarf_expr_context *retval;
96
97 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
98 retval->stack_len = 0;
99 retval->stack_allocated = 10;
100 retval->stack = xmalloc (retval->stack_allocated
101 * sizeof (struct dwarf_stack_value));
102 retval->num_pieces = 0;
103 retval->pieces = 0;
104 retval->max_recursion_depth = 0x100;
105 return retval;
106 }
107
108 /* Release the memory allocated to CTX. */
109
110 void
111 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
112 {
113 xfree (ctx->stack);
114 xfree (ctx->pieces);
115 xfree (ctx);
116 }
117
118 /* Helper for make_cleanup_free_dwarf_expr_context. */
119
120 static void
121 free_dwarf_expr_context_cleanup (void *arg)
122 {
123 free_dwarf_expr_context (arg);
124 }
125
126 /* Return a cleanup that calls free_dwarf_expr_context. */
127
128 struct cleanup *
129 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
130 {
131 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
132 }
133
134 /* Expand the memory allocated to CTX's stack to contain at least
135 NEED more elements than are currently used. */
136
137 static void
138 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
139 {
140 if (ctx->stack_len + need > ctx->stack_allocated)
141 {
142 size_t newlen = ctx->stack_len + need + 10;
143
144 ctx->stack = xrealloc (ctx->stack,
145 newlen * sizeof (struct dwarf_stack_value));
146 ctx->stack_allocated = newlen;
147 }
148 }
149
150 /* Push VALUE onto CTX's stack. */
151
152 static void
153 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
154 int in_stack_memory)
155 {
156 struct dwarf_stack_value *v;
157
158 dwarf_expr_grow_stack (ctx, 1);
159 v = &ctx->stack[ctx->stack_len++];
160 v->value = value;
161 v->in_stack_memory = in_stack_memory;
162 }
163
164 /* Push VALUE onto CTX's stack. */
165
166 void
167 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
168 int in_stack_memory)
169 {
170 dwarf_expr_push (ctx,
171 value_from_ulongest (dwarf_expr_address_type (ctx), value),
172 in_stack_memory);
173 }
174
175 /* Pop the top item off of CTX's stack. */
176
177 static void
178 dwarf_expr_pop (struct dwarf_expr_context *ctx)
179 {
180 if (ctx->stack_len <= 0)
181 error (_("dwarf expression stack underflow"));
182 ctx->stack_len--;
183 }
184
185 /* Retrieve the N'th item on CTX's stack. */
186
187 struct value *
188 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
189 {
190 if (ctx->stack_len <= n)
191 error (_("Asked for position %d of stack, "
192 "stack only has %d elements on it."),
193 n, ctx->stack_len);
194 return ctx->stack[ctx->stack_len - (1 + n)].value;
195 }
196
197 /* Require that TYPE be an integral type; throw an exception if not. */
198
199 static void
200 dwarf_require_integral (struct type *type)
201 {
202 if (TYPE_CODE (type) != TYPE_CODE_INT
203 && TYPE_CODE (type) != TYPE_CODE_CHAR
204 && TYPE_CODE (type) != TYPE_CODE_BOOL)
205 error (_("integral type expected in DWARF expression"));
206 }
207
208 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
209 type. */
210
211 static struct type *
212 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
213 {
214 switch (TYPE_LENGTH (type))
215 {
216 case 1:
217 return builtin_type (gdbarch)->builtin_uint8;
218 case 2:
219 return builtin_type (gdbarch)->builtin_uint16;
220 case 4:
221 return builtin_type (gdbarch)->builtin_uint32;
222 case 8:
223 return builtin_type (gdbarch)->builtin_uint64;
224 default:
225 error (_("no unsigned variant found for type, while evaluating "
226 "DWARF expression"));
227 }
228 }
229
230 /* Return the signed form of TYPE. TYPE is necessarily an integral
231 type. */
232
233 static struct type *
234 get_signed_type (struct gdbarch *gdbarch, struct type *type)
235 {
236 switch (TYPE_LENGTH (type))
237 {
238 case 1:
239 return builtin_type (gdbarch)->builtin_int8;
240 case 2:
241 return builtin_type (gdbarch)->builtin_int16;
242 case 4:
243 return builtin_type (gdbarch)->builtin_int32;
244 case 8:
245 return builtin_type (gdbarch)->builtin_int64;
246 default:
247 error (_("no signed variant found for type, while evaluating "
248 "DWARF expression"));
249 }
250 }
251
252 /* Retrieve the N'th item on CTX's stack, converted to an address. */
253
254 CORE_ADDR
255 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
256 {
257 struct value *result_val = dwarf_expr_fetch (ctx, n);
258 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
259 ULONGEST result;
260
261 dwarf_require_integral (value_type (result_val));
262 result = extract_unsigned_integer (value_contents (result_val),
263 TYPE_LENGTH (value_type (result_val)),
264 byte_order);
265
266 /* For most architectures, calling extract_unsigned_integer() alone
267 is sufficient for extracting an address. However, some
268 architectures (e.g. MIPS) use signed addresses and using
269 extract_unsigned_integer() will not produce a correct
270 result. Make sure we invoke gdbarch_integer_to_address()
271 for those architectures which require it. */
272 if (gdbarch_integer_to_address_p (ctx->gdbarch))
273 {
274 gdb_byte *buf = alloca (ctx->addr_size);
275 struct type *int_type = get_unsigned_type (ctx->gdbarch,
276 value_type (result_val));
277
278 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
279 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
280 }
281
282 return (CORE_ADDR) result;
283 }
284
285 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
286
287 int
288 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
289 {
290 if (ctx->stack_len <= n)
291 error (_("Asked for position %d of stack, "
292 "stack only has %d elements on it."),
293 n, ctx->stack_len);
294 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
295 }
296
297 /* Return true if the expression stack is empty. */
298
299 static int
300 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
301 {
302 return ctx->stack_len == 0;
303 }
304
305 /* Add a new piece to CTX's piece list. */
306 static void
307 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
308 {
309 struct dwarf_expr_piece *p;
310
311 ctx->num_pieces++;
312
313 ctx->pieces = xrealloc (ctx->pieces,
314 (ctx->num_pieces
315 * sizeof (struct dwarf_expr_piece)));
316
317 p = &ctx->pieces[ctx->num_pieces - 1];
318 p->location = ctx->location;
319 p->size = size;
320 p->offset = offset;
321
322 if (p->location == DWARF_VALUE_LITERAL)
323 {
324 p->v.literal.data = ctx->data;
325 p->v.literal.length = ctx->len;
326 }
327 else if (dwarf_expr_stack_empty_p (ctx))
328 {
329 p->location = DWARF_VALUE_OPTIMIZED_OUT;
330 /* Also reset the context's location, for our callers. This is
331 a somewhat strange approach, but this lets us avoid setting
332 the location to DWARF_VALUE_MEMORY in all the individual
333 cases in the evaluator. */
334 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
335 }
336 else if (p->location == DWARF_VALUE_MEMORY)
337 {
338 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
339 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
340 }
341 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
342 {
343 p->v.ptr.die.sect_off = ctx->len;
344 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
345 }
346 else if (p->location == DWARF_VALUE_REGISTER)
347 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
348 else
349 {
350 p->v.value = dwarf_expr_fetch (ctx, 0);
351 }
352 }
353
354 /* Evaluate the expression at ADDR (LEN bytes long) using the context
355 CTX. */
356
357 void
358 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
359 size_t len)
360 {
361 int old_recursion_depth = ctx->recursion_depth;
362
363 execute_stack_op (ctx, addr, addr + len);
364
365 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
366
367 gdb_assert (ctx->recursion_depth == old_recursion_depth);
368 }
369
370 /* Helper to read a uleb128 value or throw an error. */
371
372 const gdb_byte *
373 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
374 uint64_t *r)
375 {
376 buf = gdb_read_uleb128 (buf, buf_end, r);
377 if (buf == NULL)
378 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
379 return buf;
380 }
381
382 /* Helper to read a sleb128 value or throw an error. */
383
384 const gdb_byte *
385 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
386 int64_t *r)
387 {
388 buf = gdb_read_sleb128 (buf, buf_end, r);
389 if (buf == NULL)
390 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
391 return buf;
392 }
393
394 const gdb_byte *
395 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
396 {
397 buf = gdb_skip_leb128 (buf, buf_end);
398 if (buf == NULL)
399 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
400 return buf;
401 }
402 \f
403
404 /* Check that the current operator is either at the end of an
405 expression, or that it is followed by a composition operator. */
406
407 void
408 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
409 const char *op_name)
410 {
411 /* It seems like DW_OP_GNU_uninit should be handled here. However,
412 it doesn't seem to make sense for DW_OP_*_value, and it was not
413 checked at the other place that this function is called. */
414 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
415 error (_("DWARF-2 expression error: `%s' operations must be "
416 "used either alone or in conjunction with DW_OP_piece "
417 "or DW_OP_bit_piece."),
418 op_name);
419 }
420
421 /* Return true iff the types T1 and T2 are "the same". This only does
422 checks that might reasonably be needed to compare DWARF base
423 types. */
424
425 static int
426 base_types_equal_p (struct type *t1, struct type *t2)
427 {
428 if (TYPE_CODE (t1) != TYPE_CODE (t2))
429 return 0;
430 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
431 return 0;
432 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
433 }
434
435 /* A convenience function to call get_base_type on CTX and return the
436 result. DIE is the DIE whose type we need. SIZE is non-zero if
437 this function should verify that the resulting type has the correct
438 size. */
439
440 static struct type *
441 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
442 {
443 struct type *result;
444
445 if (ctx->funcs->get_base_type)
446 {
447 result = ctx->funcs->get_base_type (ctx, die);
448 if (result == NULL)
449 error (_("Could not find type for DW_OP_GNU_const_type"));
450 if (size != 0 && TYPE_LENGTH (result) != size)
451 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
452 }
453 else
454 /* Anything will do. */
455 result = builtin_type (ctx->gdbarch)->builtin_int;
456
457 return result;
458 }
459
460 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
461 DWARF register number. Otherwise return -1. */
462
463 int
464 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
465 {
466 uint64_t dwarf_reg;
467
468 if (buf_end <= buf)
469 return -1;
470 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
471 {
472 if (buf_end - buf != 1)
473 return -1;
474 return *buf - DW_OP_reg0;
475 }
476
477 if (*buf == DW_OP_GNU_regval_type)
478 {
479 buf++;
480 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
481 if (buf == NULL)
482 return -1;
483 buf = gdb_skip_leb128 (buf, buf_end);
484 if (buf == NULL)
485 return -1;
486 }
487 else if (*buf == DW_OP_regx)
488 {
489 buf++;
490 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
491 if (buf == NULL)
492 return -1;
493 }
494 else
495 return -1;
496 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
497 return -1;
498 return dwarf_reg;
499 }
500
501 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
502 DW_OP_deref* return the DWARF register number. Otherwise return -1.
503 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
504 size from DW_OP_deref_size. */
505
506 int
507 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
508 CORE_ADDR *deref_size_return)
509 {
510 uint64_t dwarf_reg;
511 int64_t offset;
512
513 if (buf_end <= buf)
514 return -1;
515
516 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
517 {
518 dwarf_reg = *buf - DW_OP_breg0;
519 buf++;
520 if (buf >= buf_end)
521 return -1;
522 }
523 else if (*buf == DW_OP_bregx)
524 {
525 buf++;
526 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
527 if (buf == NULL)
528 return -1;
529 if ((int) dwarf_reg != dwarf_reg)
530 return -1;
531 }
532 else
533 return -1;
534
535 buf = gdb_read_sleb128 (buf, buf_end, &offset);
536 if (buf == NULL)
537 return -1;
538 if (offset != 0)
539 return -1;
540
541 if (*buf == DW_OP_deref)
542 {
543 buf++;
544 *deref_size_return = -1;
545 }
546 else if (*buf == DW_OP_deref_size)
547 {
548 buf++;
549 if (buf >= buf_end)
550 return -1;
551 *deref_size_return = *buf++;
552 }
553 else
554 return -1;
555
556 if (buf != buf_end)
557 return -1;
558
559 return dwarf_reg;
560 }
561
562 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
563 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
564
565 int
566 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
567 CORE_ADDR *fb_offset_return)
568 {
569 int64_t fb_offset;
570
571 if (buf_end <= buf)
572 return 0;
573
574 if (*buf != DW_OP_fbreg)
575 return 0;
576 buf++;
577
578 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
579 if (buf == NULL)
580 return 0;
581 *fb_offset_return = fb_offset;
582 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
583 return 0;
584
585 return 1;
586 }
587
588 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
589 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
590 The matched SP register number depends on GDBARCH. */
591
592 int
593 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
594 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
595 {
596 uint64_t dwarf_reg;
597 int64_t sp_offset;
598
599 if (buf_end <= buf)
600 return 0;
601 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
602 {
603 dwarf_reg = *buf - DW_OP_breg0;
604 buf++;
605 }
606 else
607 {
608 if (*buf != DW_OP_bregx)
609 return 0;
610 buf++;
611 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
612 if (buf == NULL)
613 return 0;
614 }
615
616 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
617 != gdbarch_sp_regnum (gdbarch))
618 return 0;
619
620 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
621 if (buf == NULL)
622 return 0;
623 *sp_offset_return = sp_offset;
624 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
625 return 0;
626
627 return 1;
628 }
629
630 /* The engine for the expression evaluator. Using the context in CTX,
631 evaluate the expression between OP_PTR and OP_END. */
632
633 static void
634 execute_stack_op (struct dwarf_expr_context *ctx,
635 const gdb_byte *op_ptr, const gdb_byte *op_end)
636 {
637 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
638 /* Old-style "untyped" DWARF values need special treatment in a
639 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
640 a special type for these values so we can distinguish them from
641 values that have an explicit type, because explicitly-typed
642 values do not need special treatment. This special type must be
643 different (in the `==' sense) from any base type coming from the
644 CU. */
645 struct type *address_type = dwarf_expr_address_type (ctx);
646
647 ctx->location = DWARF_VALUE_MEMORY;
648 ctx->initialized = 1; /* Default is initialized. */
649
650 if (ctx->recursion_depth > ctx->max_recursion_depth)
651 error (_("DWARF-2 expression error: Loop detected (%d)."),
652 ctx->recursion_depth);
653 ctx->recursion_depth++;
654
655 while (op_ptr < op_end)
656 {
657 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
658 ULONGEST result;
659 /* Assume the value is not in stack memory.
660 Code that knows otherwise sets this to 1.
661 Some arithmetic on stack addresses can probably be assumed to still
662 be a stack address, but we skip this complication for now.
663 This is just an optimization, so it's always ok to punt
664 and leave this as 0. */
665 int in_stack_memory = 0;
666 uint64_t uoffset, reg;
667 int64_t offset;
668 struct value *result_val = NULL;
669
670 /* The DWARF expression might have a bug causing an infinite
671 loop. In that case, quitting is the only way out. */
672 QUIT;
673
674 switch (op)
675 {
676 case DW_OP_lit0:
677 case DW_OP_lit1:
678 case DW_OP_lit2:
679 case DW_OP_lit3:
680 case DW_OP_lit4:
681 case DW_OP_lit5:
682 case DW_OP_lit6:
683 case DW_OP_lit7:
684 case DW_OP_lit8:
685 case DW_OP_lit9:
686 case DW_OP_lit10:
687 case DW_OP_lit11:
688 case DW_OP_lit12:
689 case DW_OP_lit13:
690 case DW_OP_lit14:
691 case DW_OP_lit15:
692 case DW_OP_lit16:
693 case DW_OP_lit17:
694 case DW_OP_lit18:
695 case DW_OP_lit19:
696 case DW_OP_lit20:
697 case DW_OP_lit21:
698 case DW_OP_lit22:
699 case DW_OP_lit23:
700 case DW_OP_lit24:
701 case DW_OP_lit25:
702 case DW_OP_lit26:
703 case DW_OP_lit27:
704 case DW_OP_lit28:
705 case DW_OP_lit29:
706 case DW_OP_lit30:
707 case DW_OP_lit31:
708 result = op - DW_OP_lit0;
709 result_val = value_from_ulongest (address_type, result);
710 break;
711
712 case DW_OP_addr:
713 result = extract_unsigned_integer (op_ptr,
714 ctx->addr_size, byte_order);
715 op_ptr += ctx->addr_size;
716 /* Some versions of GCC emit DW_OP_addr before
717 DW_OP_GNU_push_tls_address. In this case the value is an
718 index, not an address. We don't support things like
719 branching between the address and the TLS op. */
720 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
721 result += ctx->offset;
722 result_val = value_from_ulongest (address_type, result);
723 break;
724
725 case DW_OP_GNU_addr_index:
726 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
727 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
728 result += ctx->offset;
729 result_val = value_from_ulongest (address_type, result);
730 break;
731 case DW_OP_GNU_const_index:
732 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
733 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
734 result_val = value_from_ulongest (address_type, result);
735 break;
736
737 case DW_OP_const1u:
738 result = extract_unsigned_integer (op_ptr, 1, byte_order);
739 result_val = value_from_ulongest (address_type, result);
740 op_ptr += 1;
741 break;
742 case DW_OP_const1s:
743 result = extract_signed_integer (op_ptr, 1, byte_order);
744 result_val = value_from_ulongest (address_type, result);
745 op_ptr += 1;
746 break;
747 case DW_OP_const2u:
748 result = extract_unsigned_integer (op_ptr, 2, byte_order);
749 result_val = value_from_ulongest (address_type, result);
750 op_ptr += 2;
751 break;
752 case DW_OP_const2s:
753 result = extract_signed_integer (op_ptr, 2, byte_order);
754 result_val = value_from_ulongest (address_type, result);
755 op_ptr += 2;
756 break;
757 case DW_OP_const4u:
758 result = extract_unsigned_integer (op_ptr, 4, byte_order);
759 result_val = value_from_ulongest (address_type, result);
760 op_ptr += 4;
761 break;
762 case DW_OP_const4s:
763 result = extract_signed_integer (op_ptr, 4, byte_order);
764 result_val = value_from_ulongest (address_type, result);
765 op_ptr += 4;
766 break;
767 case DW_OP_const8u:
768 result = extract_unsigned_integer (op_ptr, 8, byte_order);
769 result_val = value_from_ulongest (address_type, result);
770 op_ptr += 8;
771 break;
772 case DW_OP_const8s:
773 result = extract_signed_integer (op_ptr, 8, byte_order);
774 result_val = value_from_ulongest (address_type, result);
775 op_ptr += 8;
776 break;
777 case DW_OP_constu:
778 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
779 result = uoffset;
780 result_val = value_from_ulongest (address_type, result);
781 break;
782 case DW_OP_consts:
783 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
784 result = offset;
785 result_val = value_from_ulongest (address_type, result);
786 break;
787
788 /* The DW_OP_reg operations are required to occur alone in
789 location expressions. */
790 case DW_OP_reg0:
791 case DW_OP_reg1:
792 case DW_OP_reg2:
793 case DW_OP_reg3:
794 case DW_OP_reg4:
795 case DW_OP_reg5:
796 case DW_OP_reg6:
797 case DW_OP_reg7:
798 case DW_OP_reg8:
799 case DW_OP_reg9:
800 case DW_OP_reg10:
801 case DW_OP_reg11:
802 case DW_OP_reg12:
803 case DW_OP_reg13:
804 case DW_OP_reg14:
805 case DW_OP_reg15:
806 case DW_OP_reg16:
807 case DW_OP_reg17:
808 case DW_OP_reg18:
809 case DW_OP_reg19:
810 case DW_OP_reg20:
811 case DW_OP_reg21:
812 case DW_OP_reg22:
813 case DW_OP_reg23:
814 case DW_OP_reg24:
815 case DW_OP_reg25:
816 case DW_OP_reg26:
817 case DW_OP_reg27:
818 case DW_OP_reg28:
819 case DW_OP_reg29:
820 case DW_OP_reg30:
821 case DW_OP_reg31:
822 if (op_ptr != op_end
823 && *op_ptr != DW_OP_piece
824 && *op_ptr != DW_OP_bit_piece
825 && *op_ptr != DW_OP_GNU_uninit)
826 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
827 "used either alone or in conjunction with DW_OP_piece "
828 "or DW_OP_bit_piece."));
829
830 result = op - DW_OP_reg0;
831 result_val = value_from_ulongest (address_type, result);
832 ctx->location = DWARF_VALUE_REGISTER;
833 break;
834
835 case DW_OP_regx:
836 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
837 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
838
839 result = reg;
840 result_val = value_from_ulongest (address_type, result);
841 ctx->location = DWARF_VALUE_REGISTER;
842 break;
843
844 case DW_OP_implicit_value:
845 {
846 uint64_t len;
847
848 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
849 if (op_ptr + len > op_end)
850 error (_("DW_OP_implicit_value: too few bytes available."));
851 ctx->len = len;
852 ctx->data = op_ptr;
853 ctx->location = DWARF_VALUE_LITERAL;
854 op_ptr += len;
855 dwarf_expr_require_composition (op_ptr, op_end,
856 "DW_OP_implicit_value");
857 }
858 goto no_push;
859
860 case DW_OP_stack_value:
861 ctx->location = DWARF_VALUE_STACK;
862 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
863 goto no_push;
864
865 case DW_OP_GNU_implicit_pointer:
866 {
867 int64_t len;
868
869 if (ctx->ref_addr_size == -1)
870 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
871 "is not allowed in frame context"));
872
873 /* The referred-to DIE of sect_offset kind. */
874 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
875 byte_order);
876 op_ptr += ctx->ref_addr_size;
877
878 /* The byte offset into the data. */
879 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
880 result = (ULONGEST) len;
881 result_val = value_from_ulongest (address_type, result);
882
883 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
884 dwarf_expr_require_composition (op_ptr, op_end,
885 "DW_OP_GNU_implicit_pointer");
886 }
887 break;
888
889 case DW_OP_breg0:
890 case DW_OP_breg1:
891 case DW_OP_breg2:
892 case DW_OP_breg3:
893 case DW_OP_breg4:
894 case DW_OP_breg5:
895 case DW_OP_breg6:
896 case DW_OP_breg7:
897 case DW_OP_breg8:
898 case DW_OP_breg9:
899 case DW_OP_breg10:
900 case DW_OP_breg11:
901 case DW_OP_breg12:
902 case DW_OP_breg13:
903 case DW_OP_breg14:
904 case DW_OP_breg15:
905 case DW_OP_breg16:
906 case DW_OP_breg17:
907 case DW_OP_breg18:
908 case DW_OP_breg19:
909 case DW_OP_breg20:
910 case DW_OP_breg21:
911 case DW_OP_breg22:
912 case DW_OP_breg23:
913 case DW_OP_breg24:
914 case DW_OP_breg25:
915 case DW_OP_breg26:
916 case DW_OP_breg27:
917 case DW_OP_breg28:
918 case DW_OP_breg29:
919 case DW_OP_breg30:
920 case DW_OP_breg31:
921 {
922 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
923 result = (ctx->funcs->read_addr_from_reg) (ctx->baton,
924 op - DW_OP_breg0);
925 result += offset;
926 result_val = value_from_ulongest (address_type, result);
927 }
928 break;
929 case DW_OP_bregx:
930 {
931 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
932 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
933 result = (ctx->funcs->read_addr_from_reg) (ctx->baton, reg);
934 result += offset;
935 result_val = value_from_ulongest (address_type, result);
936 }
937 break;
938 case DW_OP_fbreg:
939 {
940 const gdb_byte *datastart;
941 size_t datalen;
942 unsigned int before_stack_len;
943
944 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
945 /* Rather than create a whole new context, we simply
946 record the stack length before execution, then reset it
947 afterwards, effectively erasing whatever the recursive
948 call put there. */
949 before_stack_len = ctx->stack_len;
950 /* FIXME: cagney/2003-03-26: This code should be using
951 get_frame_base_address(), and then implement a dwarf2
952 specific this_base method. */
953 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
954 dwarf_expr_eval (ctx, datastart, datalen);
955 if (ctx->location == DWARF_VALUE_MEMORY)
956 result = dwarf_expr_fetch_address (ctx, 0);
957 else if (ctx->location == DWARF_VALUE_REGISTER)
958 result = (ctx->funcs->read_addr_from_reg)
959 (ctx->baton,
960 value_as_long (dwarf_expr_fetch (ctx, 0)));
961 else
962 error (_("Not implemented: computing frame "
963 "base using explicit value operator"));
964 result = result + offset;
965 result_val = value_from_ulongest (address_type, result);
966 in_stack_memory = 1;
967 ctx->stack_len = before_stack_len;
968 ctx->location = DWARF_VALUE_MEMORY;
969 }
970 break;
971
972 case DW_OP_dup:
973 result_val = dwarf_expr_fetch (ctx, 0);
974 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
975 break;
976
977 case DW_OP_drop:
978 dwarf_expr_pop (ctx);
979 goto no_push;
980
981 case DW_OP_pick:
982 offset = *op_ptr++;
983 result_val = dwarf_expr_fetch (ctx, offset);
984 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
985 break;
986
987 case DW_OP_swap:
988 {
989 struct dwarf_stack_value t1, t2;
990
991 if (ctx->stack_len < 2)
992 error (_("Not enough elements for "
993 "DW_OP_swap. Need 2, have %d."),
994 ctx->stack_len);
995 t1 = ctx->stack[ctx->stack_len - 1];
996 t2 = ctx->stack[ctx->stack_len - 2];
997 ctx->stack[ctx->stack_len - 1] = t2;
998 ctx->stack[ctx->stack_len - 2] = t1;
999 goto no_push;
1000 }
1001
1002 case DW_OP_over:
1003 result_val = dwarf_expr_fetch (ctx, 1);
1004 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1005 break;
1006
1007 case DW_OP_rot:
1008 {
1009 struct dwarf_stack_value t1, t2, t3;
1010
1011 if (ctx->stack_len < 3)
1012 error (_("Not enough elements for "
1013 "DW_OP_rot. Need 3, have %d."),
1014 ctx->stack_len);
1015 t1 = ctx->stack[ctx->stack_len - 1];
1016 t2 = ctx->stack[ctx->stack_len - 2];
1017 t3 = ctx->stack[ctx->stack_len - 3];
1018 ctx->stack[ctx->stack_len - 1] = t2;
1019 ctx->stack[ctx->stack_len - 2] = t3;
1020 ctx->stack[ctx->stack_len - 3] = t1;
1021 goto no_push;
1022 }
1023
1024 case DW_OP_deref:
1025 case DW_OP_deref_size:
1026 case DW_OP_GNU_deref_type:
1027 {
1028 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1029 gdb_byte *buf = alloca (addr_size);
1030 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1031 struct type *type;
1032
1033 dwarf_expr_pop (ctx);
1034
1035 if (op == DW_OP_GNU_deref_type)
1036 {
1037 cu_offset type_die;
1038
1039 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1040 type_die.cu_off = uoffset;
1041 type = dwarf_get_base_type (ctx, type_die, 0);
1042 }
1043 else
1044 type = address_type;
1045
1046 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1047
1048 /* If the size of the object read from memory is different
1049 from the type length, we need to zero-extend it. */
1050 if (TYPE_LENGTH (type) != addr_size)
1051 {
1052 ULONGEST result =
1053 extract_unsigned_integer (buf, addr_size, byte_order);
1054
1055 buf = alloca (TYPE_LENGTH (type));
1056 store_unsigned_integer (buf, TYPE_LENGTH (type),
1057 byte_order, result);
1058 }
1059
1060 result_val = value_from_contents_and_address (type, buf, addr);
1061 break;
1062 }
1063
1064 case DW_OP_abs:
1065 case DW_OP_neg:
1066 case DW_OP_not:
1067 case DW_OP_plus_uconst:
1068 {
1069 /* Unary operations. */
1070 result_val = dwarf_expr_fetch (ctx, 0);
1071 dwarf_expr_pop (ctx);
1072
1073 switch (op)
1074 {
1075 case DW_OP_abs:
1076 if (value_less (result_val,
1077 value_zero (value_type (result_val), not_lval)))
1078 result_val = value_neg (result_val);
1079 break;
1080 case DW_OP_neg:
1081 result_val = value_neg (result_val);
1082 break;
1083 case DW_OP_not:
1084 dwarf_require_integral (value_type (result_val));
1085 result_val = value_complement (result_val);
1086 break;
1087 case DW_OP_plus_uconst:
1088 dwarf_require_integral (value_type (result_val));
1089 result = value_as_long (result_val);
1090 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1091 result += reg;
1092 result_val = value_from_ulongest (address_type, result);
1093 break;
1094 }
1095 }
1096 break;
1097
1098 case DW_OP_and:
1099 case DW_OP_div:
1100 case DW_OP_minus:
1101 case DW_OP_mod:
1102 case DW_OP_mul:
1103 case DW_OP_or:
1104 case DW_OP_plus:
1105 case DW_OP_shl:
1106 case DW_OP_shr:
1107 case DW_OP_shra:
1108 case DW_OP_xor:
1109 case DW_OP_le:
1110 case DW_OP_ge:
1111 case DW_OP_eq:
1112 case DW_OP_lt:
1113 case DW_OP_gt:
1114 case DW_OP_ne:
1115 {
1116 /* Binary operations. */
1117 struct value *first, *second;
1118
1119 second = dwarf_expr_fetch (ctx, 0);
1120 dwarf_expr_pop (ctx);
1121
1122 first = dwarf_expr_fetch (ctx, 0);
1123 dwarf_expr_pop (ctx);
1124
1125 if (! base_types_equal_p (value_type (first), value_type (second)))
1126 error (_("Incompatible types on DWARF stack"));
1127
1128 switch (op)
1129 {
1130 case DW_OP_and:
1131 dwarf_require_integral (value_type (first));
1132 dwarf_require_integral (value_type (second));
1133 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1134 break;
1135 case DW_OP_div:
1136 result_val = value_binop (first, second, BINOP_DIV);
1137 break;
1138 case DW_OP_minus:
1139 result_val = value_binop (first, second, BINOP_SUB);
1140 break;
1141 case DW_OP_mod:
1142 {
1143 int cast_back = 0;
1144 struct type *orig_type = value_type (first);
1145
1146 /* We have to special-case "old-style" untyped values
1147 -- these must have mod computed using unsigned
1148 math. */
1149 if (orig_type == address_type)
1150 {
1151 struct type *utype
1152 = get_unsigned_type (ctx->gdbarch, orig_type);
1153
1154 cast_back = 1;
1155 first = value_cast (utype, first);
1156 second = value_cast (utype, second);
1157 }
1158 /* Note that value_binop doesn't handle float or
1159 decimal float here. This seems unimportant. */
1160 result_val = value_binop (first, second, BINOP_MOD);
1161 if (cast_back)
1162 result_val = value_cast (orig_type, result_val);
1163 }
1164 break;
1165 case DW_OP_mul:
1166 result_val = value_binop (first, second, BINOP_MUL);
1167 break;
1168 case DW_OP_or:
1169 dwarf_require_integral (value_type (first));
1170 dwarf_require_integral (value_type (second));
1171 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1172 break;
1173 case DW_OP_plus:
1174 result_val = value_binop (first, second, BINOP_ADD);
1175 break;
1176 case DW_OP_shl:
1177 dwarf_require_integral (value_type (first));
1178 dwarf_require_integral (value_type (second));
1179 result_val = value_binop (first, second, BINOP_LSH);
1180 break;
1181 case DW_OP_shr:
1182 dwarf_require_integral (value_type (first));
1183 dwarf_require_integral (value_type (second));
1184 if (!TYPE_UNSIGNED (value_type (first)))
1185 {
1186 struct type *utype
1187 = get_unsigned_type (ctx->gdbarch, value_type (first));
1188
1189 first = value_cast (utype, first);
1190 }
1191
1192 result_val = value_binop (first, second, BINOP_RSH);
1193 /* Make sure we wind up with the same type we started
1194 with. */
1195 if (value_type (result_val) != value_type (second))
1196 result_val = value_cast (value_type (second), result_val);
1197 break;
1198 case DW_OP_shra:
1199 dwarf_require_integral (value_type (first));
1200 dwarf_require_integral (value_type (second));
1201 if (TYPE_UNSIGNED (value_type (first)))
1202 {
1203 struct type *stype
1204 = get_signed_type (ctx->gdbarch, value_type (first));
1205
1206 first = value_cast (stype, first);
1207 }
1208
1209 result_val = value_binop (first, second, BINOP_RSH);
1210 /* Make sure we wind up with the same type we started
1211 with. */
1212 if (value_type (result_val) != value_type (second))
1213 result_val = value_cast (value_type (second), result_val);
1214 break;
1215 case DW_OP_xor:
1216 dwarf_require_integral (value_type (first));
1217 dwarf_require_integral (value_type (second));
1218 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1219 break;
1220 case DW_OP_le:
1221 /* A <= B is !(B < A). */
1222 result = ! value_less (second, first);
1223 result_val = value_from_ulongest (address_type, result);
1224 break;
1225 case DW_OP_ge:
1226 /* A >= B is !(A < B). */
1227 result = ! value_less (first, second);
1228 result_val = value_from_ulongest (address_type, result);
1229 break;
1230 case DW_OP_eq:
1231 result = value_equal (first, second);
1232 result_val = value_from_ulongest (address_type, result);
1233 break;
1234 case DW_OP_lt:
1235 result = value_less (first, second);
1236 result_val = value_from_ulongest (address_type, result);
1237 break;
1238 case DW_OP_gt:
1239 /* A > B is B < A. */
1240 result = value_less (second, first);
1241 result_val = value_from_ulongest (address_type, result);
1242 break;
1243 case DW_OP_ne:
1244 result = ! value_equal (first, second);
1245 result_val = value_from_ulongest (address_type, result);
1246 break;
1247 default:
1248 internal_error (__FILE__, __LINE__,
1249 _("Can't be reached."));
1250 }
1251 }
1252 break;
1253
1254 case DW_OP_call_frame_cfa:
1255 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1256 result_val = value_from_ulongest (address_type, result);
1257 in_stack_memory = 1;
1258 break;
1259
1260 case DW_OP_GNU_push_tls_address:
1261 /* Variable is at a constant offset in the thread-local
1262 storage block into the objfile for the current thread and
1263 the dynamic linker module containing this expression. Here
1264 we return returns the offset from that base. The top of the
1265 stack has the offset from the beginning of the thread
1266 control block at which the variable is located. Nothing
1267 should follow this operator, so the top of stack would be
1268 returned. */
1269 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1270 dwarf_expr_pop (ctx);
1271 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1272 result_val = value_from_ulongest (address_type, result);
1273 break;
1274
1275 case DW_OP_skip:
1276 offset = extract_signed_integer (op_ptr, 2, byte_order);
1277 op_ptr += 2;
1278 op_ptr += offset;
1279 goto no_push;
1280
1281 case DW_OP_bra:
1282 {
1283 struct value *val;
1284
1285 offset = extract_signed_integer (op_ptr, 2, byte_order);
1286 op_ptr += 2;
1287 val = dwarf_expr_fetch (ctx, 0);
1288 dwarf_require_integral (value_type (val));
1289 if (value_as_long (val) != 0)
1290 op_ptr += offset;
1291 dwarf_expr_pop (ctx);
1292 }
1293 goto no_push;
1294
1295 case DW_OP_nop:
1296 goto no_push;
1297
1298 case DW_OP_piece:
1299 {
1300 uint64_t size;
1301
1302 /* Record the piece. */
1303 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1304 add_piece (ctx, 8 * size, 0);
1305
1306 /* Pop off the address/regnum, and reset the location
1307 type. */
1308 if (ctx->location != DWARF_VALUE_LITERAL
1309 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1310 dwarf_expr_pop (ctx);
1311 ctx->location = DWARF_VALUE_MEMORY;
1312 }
1313 goto no_push;
1314
1315 case DW_OP_bit_piece:
1316 {
1317 uint64_t size, offset;
1318
1319 /* Record the piece. */
1320 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1321 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1322 add_piece (ctx, size, offset);
1323
1324 /* Pop off the address/regnum, and reset the location
1325 type. */
1326 if (ctx->location != DWARF_VALUE_LITERAL
1327 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1328 dwarf_expr_pop (ctx);
1329 ctx->location = DWARF_VALUE_MEMORY;
1330 }
1331 goto no_push;
1332
1333 case DW_OP_GNU_uninit:
1334 if (op_ptr != op_end)
1335 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1336 "be the very last op."));
1337
1338 ctx->initialized = 0;
1339 goto no_push;
1340
1341 case DW_OP_call2:
1342 {
1343 cu_offset offset;
1344
1345 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1346 op_ptr += 2;
1347 ctx->funcs->dwarf_call (ctx, offset);
1348 }
1349 goto no_push;
1350
1351 case DW_OP_call4:
1352 {
1353 cu_offset offset;
1354
1355 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1356 op_ptr += 4;
1357 ctx->funcs->dwarf_call (ctx, offset);
1358 }
1359 goto no_push;
1360
1361 case DW_OP_GNU_entry_value:
1362 {
1363 uint64_t len;
1364 CORE_ADDR deref_size;
1365 union call_site_parameter_u kind_u;
1366
1367 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1368 if (op_ptr + len > op_end)
1369 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1370
1371 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1372 if (kind_u.dwarf_reg != -1)
1373 {
1374 op_ptr += len;
1375 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1376 CALL_SITE_PARAMETER_DWARF_REG,
1377 kind_u,
1378 -1 /* deref_size */);
1379 goto no_push;
1380 }
1381
1382 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1383 op_ptr + len,
1384 &deref_size);
1385 if (kind_u.dwarf_reg != -1)
1386 {
1387 if (deref_size == -1)
1388 deref_size = ctx->addr_size;
1389 op_ptr += len;
1390 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1391 CALL_SITE_PARAMETER_DWARF_REG,
1392 kind_u, deref_size);
1393 goto no_push;
1394 }
1395
1396 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1397 "supported only for single DW_OP_reg* "
1398 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1399 }
1400
1401 case DW_OP_GNU_parameter_ref:
1402 {
1403 union call_site_parameter_u kind_u;
1404
1405 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1406 byte_order);
1407 op_ptr += 4;
1408 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1409 CALL_SITE_PARAMETER_PARAM_OFFSET,
1410 kind_u,
1411 -1 /* deref_size */);
1412 }
1413 goto no_push;
1414
1415 case DW_OP_GNU_const_type:
1416 {
1417 cu_offset type_die;
1418 int n;
1419 const gdb_byte *data;
1420 struct type *type;
1421
1422 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1423 type_die.cu_off = uoffset;
1424 n = *op_ptr++;
1425 data = op_ptr;
1426 op_ptr += n;
1427
1428 type = dwarf_get_base_type (ctx, type_die, n);
1429 result_val = value_from_contents (type, data);
1430 }
1431 break;
1432
1433 case DW_OP_GNU_regval_type:
1434 {
1435 cu_offset type_die;
1436 struct type *type;
1437
1438 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1439 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1440 type_die.cu_off = uoffset;
1441
1442 type = dwarf_get_base_type (ctx, type_die, 0);
1443 result_val = ctx->funcs->get_reg_value (ctx->baton, type, reg);
1444 }
1445 break;
1446
1447 case DW_OP_GNU_convert:
1448 case DW_OP_GNU_reinterpret:
1449 {
1450 cu_offset type_die;
1451 struct type *type;
1452
1453 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1454 type_die.cu_off = uoffset;
1455
1456 if (type_die.cu_off == 0)
1457 type = address_type;
1458 else
1459 type = dwarf_get_base_type (ctx, type_die, 0);
1460
1461 result_val = dwarf_expr_fetch (ctx, 0);
1462 dwarf_expr_pop (ctx);
1463
1464 if (op == DW_OP_GNU_convert)
1465 result_val = value_cast (type, result_val);
1466 else if (type == value_type (result_val))
1467 {
1468 /* Nothing. */
1469 }
1470 else if (TYPE_LENGTH (type)
1471 != TYPE_LENGTH (value_type (result_val)))
1472 error (_("DW_OP_GNU_reinterpret has wrong size"));
1473 else
1474 result_val
1475 = value_from_contents (type,
1476 value_contents_all (result_val));
1477 }
1478 break;
1479
1480 case DW_OP_push_object_address:
1481 /* Return the address of the object we are currently observing. */
1482 result = (ctx->funcs->get_object_address) (ctx->baton);
1483 result_val = value_from_ulongest (address_type, result);
1484 break;
1485
1486 default:
1487 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1488 }
1489
1490 /* Most things push a result value. */
1491 gdb_assert (result_val != NULL);
1492 dwarf_expr_push (ctx, result_val, in_stack_memory);
1493 no_push:
1494 ;
1495 }
1496
1497 /* To simplify our main caller, if the result is an implicit
1498 pointer, then make a pieced value. This is ok because we can't
1499 have implicit pointers in contexts where pieces are invalid. */
1500 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1501 add_piece (ctx, 8 * ctx->addr_size, 0);
1502
1503 abort_expression:
1504 ctx->recursion_depth--;
1505 gdb_assert (ctx->recursion_depth >= 0);
1506 }
1507
1508 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1509
1510 void
1511 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1512 {
1513 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1514 }
1515
1516 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1517
1518 CORE_ADDR
1519 ctx_no_get_frame_cfa (void *baton)
1520 {
1521 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1522 }
1523
1524 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1525
1526 CORE_ADDR
1527 ctx_no_get_frame_pc (void *baton)
1528 {
1529 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1530 }
1531
1532 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1533
1534 CORE_ADDR
1535 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1536 {
1537 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1538 }
1539
1540 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1541
1542 void
1543 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1544 {
1545 error (_("%s is invalid in this context"), "DW_OP_call*");
1546 }
1547
1548 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1549
1550 struct type *
1551 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1552 {
1553 error (_("Support for typed DWARF is not supported in this context"));
1554 }
1555
1556 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1557 implementation. */
1558
1559 void
1560 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1561 enum call_site_parameter_kind kind,
1562 union call_site_parameter_u kind_u,
1563 int deref_size)
1564 {
1565 internal_error (__FILE__, __LINE__,
1566 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1567 }
1568
1569 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1570
1571 CORE_ADDR
1572 ctx_no_get_addr_index (void *baton, unsigned int index)
1573 {
1574 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1575 }
1576
1577 /* Provide a prototype to silence -Wmissing-prototypes. */
1578 extern initialize_file_ftype _initialize_dwarf2expr;
1579
1580 void
1581 _initialize_dwarf2expr (void)
1582 {
1583 dwarf_arch_cookie
1584 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1585 }
This page took 0.082034 seconds and 4 git commands to generate.