Support 128-bit IEEE floating-point types on Intel and Power
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
... / ...
CommitLineData
1/* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "symtab.h"
24#include "gdbtypes.h"
25#include "value.h"
26#include "gdbcore.h"
27#include "dwarf2.h"
28#include "dwarf2expr.h"
29#include "dwarf2loc.h"
30
31/* Local prototypes. */
32
33static void execute_stack_op (struct dwarf_expr_context *,
34 const gdb_byte *, const gdb_byte *);
35
36/* Cookie for gdbarch data. */
37
38static struct gdbarch_data *dwarf_arch_cookie;
39
40/* This holds gdbarch-specific types used by the DWARF expression
41 evaluator. See comments in execute_stack_op. */
42
43struct dwarf_gdbarch_types
44{
45 struct type *dw_types[3];
46};
47
48/* Allocate and fill in dwarf_gdbarch_types for an arch. */
49
50static void *
51dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
52{
53 struct dwarf_gdbarch_types *types
54 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
55
56 /* The types themselves are lazily initialized. */
57
58 return types;
59}
60
61/* Return the type used for DWARF operations where the type is
62 unspecified in the DWARF spec. Only certain sizes are
63 supported. */
64
65static struct type *
66dwarf_expr_address_type (struct dwarf_expr_context *ctx)
67{
68 struct dwarf_gdbarch_types *types
69 = (struct dwarf_gdbarch_types *) gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90}
91
92/* Create a new context for the expression evaluator. */
93
94struct dwarf_expr_context *
95new_dwarf_expr_context (void)
96{
97 struct dwarf_expr_context *retval;
98
99 retval = XCNEW (struct dwarf_expr_context);
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = XNEWVEC (struct dwarf_stack_value, retval->stack_allocated);
103 retval->num_pieces = 0;
104 retval->pieces = 0;
105 retval->max_recursion_depth = 0x100;
106 return retval;
107}
108
109/* Release the memory allocated to CTX. */
110
111void
112free_dwarf_expr_context (struct dwarf_expr_context *ctx)
113{
114 xfree (ctx->stack);
115 xfree (ctx->pieces);
116 xfree (ctx);
117}
118
119/* Helper for make_cleanup_free_dwarf_expr_context. */
120
121static void
122free_dwarf_expr_context_cleanup (void *arg)
123{
124 free_dwarf_expr_context ((struct dwarf_expr_context *) arg);
125}
126
127/* Return a cleanup that calls free_dwarf_expr_context. */
128
129struct cleanup *
130make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
131{
132 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
133}
134
135/* Expand the memory allocated to CTX's stack to contain at least
136 NEED more elements than are currently used. */
137
138static void
139dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
140{
141 if (ctx->stack_len + need > ctx->stack_allocated)
142 {
143 size_t newlen = ctx->stack_len + need + 10;
144
145 ctx->stack = XRESIZEVEC (struct dwarf_stack_value, ctx->stack, newlen);
146 ctx->stack_allocated = newlen;
147 }
148}
149
150/* Push VALUE onto CTX's stack. */
151
152static void
153dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
154 int in_stack_memory)
155{
156 struct dwarf_stack_value *v;
157
158 dwarf_expr_grow_stack (ctx, 1);
159 v = &ctx->stack[ctx->stack_len++];
160 v->value = value;
161 v->in_stack_memory = in_stack_memory;
162}
163
164/* Push VALUE onto CTX's stack. */
165
166void
167dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
168 int in_stack_memory)
169{
170 dwarf_expr_push (ctx,
171 value_from_ulongest (dwarf_expr_address_type (ctx), value),
172 in_stack_memory);
173}
174
175/* Pop the top item off of CTX's stack. */
176
177static void
178dwarf_expr_pop (struct dwarf_expr_context *ctx)
179{
180 if (ctx->stack_len <= 0)
181 error (_("dwarf expression stack underflow"));
182 ctx->stack_len--;
183}
184
185/* Retrieve the N'th item on CTX's stack. */
186
187struct value *
188dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
189{
190 if (ctx->stack_len <= n)
191 error (_("Asked for position %d of stack, "
192 "stack only has %d elements on it."),
193 n, ctx->stack_len);
194 return ctx->stack[ctx->stack_len - (1 + n)].value;
195}
196
197/* Require that TYPE be an integral type; throw an exception if not. */
198
199static void
200dwarf_require_integral (struct type *type)
201{
202 if (TYPE_CODE (type) != TYPE_CODE_INT
203 && TYPE_CODE (type) != TYPE_CODE_CHAR
204 && TYPE_CODE (type) != TYPE_CODE_BOOL)
205 error (_("integral type expected in DWARF expression"));
206}
207
208/* Return the unsigned form of TYPE. TYPE is necessarily an integral
209 type. */
210
211static struct type *
212get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
213{
214 switch (TYPE_LENGTH (type))
215 {
216 case 1:
217 return builtin_type (gdbarch)->builtin_uint8;
218 case 2:
219 return builtin_type (gdbarch)->builtin_uint16;
220 case 4:
221 return builtin_type (gdbarch)->builtin_uint32;
222 case 8:
223 return builtin_type (gdbarch)->builtin_uint64;
224 default:
225 error (_("no unsigned variant found for type, while evaluating "
226 "DWARF expression"));
227 }
228}
229
230/* Return the signed form of TYPE. TYPE is necessarily an integral
231 type. */
232
233static struct type *
234get_signed_type (struct gdbarch *gdbarch, struct type *type)
235{
236 switch (TYPE_LENGTH (type))
237 {
238 case 1:
239 return builtin_type (gdbarch)->builtin_int8;
240 case 2:
241 return builtin_type (gdbarch)->builtin_int16;
242 case 4:
243 return builtin_type (gdbarch)->builtin_int32;
244 case 8:
245 return builtin_type (gdbarch)->builtin_int64;
246 default:
247 error (_("no signed variant found for type, while evaluating "
248 "DWARF expression"));
249 }
250}
251
252/* Retrieve the N'th item on CTX's stack, converted to an address. */
253
254CORE_ADDR
255dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
256{
257 struct value *result_val = dwarf_expr_fetch (ctx, n);
258 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
259 ULONGEST result;
260
261 dwarf_require_integral (value_type (result_val));
262 result = extract_unsigned_integer (value_contents (result_val),
263 TYPE_LENGTH (value_type (result_val)),
264 byte_order);
265
266 /* For most architectures, calling extract_unsigned_integer() alone
267 is sufficient for extracting an address. However, some
268 architectures (e.g. MIPS) use signed addresses and using
269 extract_unsigned_integer() will not produce a correct
270 result. Make sure we invoke gdbarch_integer_to_address()
271 for those architectures which require it. */
272 if (gdbarch_integer_to_address_p (ctx->gdbarch))
273 {
274 gdb_byte *buf = (gdb_byte *) alloca (ctx->addr_size);
275 struct type *int_type = get_unsigned_type (ctx->gdbarch,
276 value_type (result_val));
277
278 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
279 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
280 }
281
282 return (CORE_ADDR) result;
283}
284
285/* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
286
287int
288dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
289{
290 if (ctx->stack_len <= n)
291 error (_("Asked for position %d of stack, "
292 "stack only has %d elements on it."),
293 n, ctx->stack_len);
294 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
295}
296
297/* Return true if the expression stack is empty. */
298
299static int
300dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
301{
302 return ctx->stack_len == 0;
303}
304
305/* Add a new piece to CTX's piece list. */
306static void
307add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
308{
309 struct dwarf_expr_piece *p;
310
311 ctx->num_pieces++;
312
313 ctx->pieces
314 = XRESIZEVEC (struct dwarf_expr_piece, ctx->pieces, ctx->num_pieces);
315
316 p = &ctx->pieces[ctx->num_pieces - 1];
317 p->location = ctx->location;
318 p->size = size;
319 p->offset = offset;
320
321 if (p->location == DWARF_VALUE_LITERAL)
322 {
323 p->v.literal.data = ctx->data;
324 p->v.literal.length = ctx->len;
325 }
326 else if (dwarf_expr_stack_empty_p (ctx))
327 {
328 p->location = DWARF_VALUE_OPTIMIZED_OUT;
329 /* Also reset the context's location, for our callers. This is
330 a somewhat strange approach, but this lets us avoid setting
331 the location to DWARF_VALUE_MEMORY in all the individual
332 cases in the evaluator. */
333 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
334 }
335 else if (p->location == DWARF_VALUE_MEMORY)
336 {
337 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
338 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
339 }
340 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
341 {
342 p->v.ptr.die.sect_off = ctx->len;
343 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
344 }
345 else if (p->location == DWARF_VALUE_REGISTER)
346 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
347 else
348 {
349 p->v.value = dwarf_expr_fetch (ctx, 0);
350 }
351}
352
353/* Evaluate the expression at ADDR (LEN bytes long) using the context
354 CTX. */
355
356void
357dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
358 size_t len)
359{
360 int old_recursion_depth = ctx->recursion_depth;
361
362 execute_stack_op (ctx, addr, addr + len);
363
364 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
365
366 gdb_assert (ctx->recursion_depth == old_recursion_depth);
367}
368
369/* Helper to read a uleb128 value or throw an error. */
370
371const gdb_byte *
372safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
373 uint64_t *r)
374{
375 buf = gdb_read_uleb128 (buf, buf_end, r);
376 if (buf == NULL)
377 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
378 return buf;
379}
380
381/* Helper to read a sleb128 value or throw an error. */
382
383const gdb_byte *
384safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
385 int64_t *r)
386{
387 buf = gdb_read_sleb128 (buf, buf_end, r);
388 if (buf == NULL)
389 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
390 return buf;
391}
392
393const gdb_byte *
394safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
395{
396 buf = gdb_skip_leb128 (buf, buf_end);
397 if (buf == NULL)
398 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
399 return buf;
400}
401\f
402
403/* Check that the current operator is either at the end of an
404 expression, or that it is followed by a composition operator. */
405
406void
407dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
408 const char *op_name)
409{
410 /* It seems like DW_OP_GNU_uninit should be handled here. However,
411 it doesn't seem to make sense for DW_OP_*_value, and it was not
412 checked at the other place that this function is called. */
413 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
414 error (_("DWARF-2 expression error: `%s' operations must be "
415 "used either alone or in conjunction with DW_OP_piece "
416 "or DW_OP_bit_piece."),
417 op_name);
418}
419
420/* Return true iff the types T1 and T2 are "the same". This only does
421 checks that might reasonably be needed to compare DWARF base
422 types. */
423
424static int
425base_types_equal_p (struct type *t1, struct type *t2)
426{
427 if (TYPE_CODE (t1) != TYPE_CODE (t2))
428 return 0;
429 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
430 return 0;
431 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
432}
433
434/* A convenience function to call get_base_type on CTX and return the
435 result. DIE is the DIE whose type we need. SIZE is non-zero if
436 this function should verify that the resulting type has the correct
437 size. */
438
439static struct type *
440dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
441{
442 struct type *result;
443
444 if (ctx->funcs->get_base_type)
445 {
446 result = ctx->funcs->get_base_type (ctx, die);
447 if (result == NULL)
448 error (_("Could not find type for DW_OP_GNU_const_type"));
449 if (size != 0 && TYPE_LENGTH (result) != size)
450 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
451 }
452 else
453 /* Anything will do. */
454 result = builtin_type (ctx->gdbarch)->builtin_int;
455
456 return result;
457}
458
459/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
460 DWARF register number. Otherwise return -1. */
461
462int
463dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
464{
465 uint64_t dwarf_reg;
466
467 if (buf_end <= buf)
468 return -1;
469 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
470 {
471 if (buf_end - buf != 1)
472 return -1;
473 return *buf - DW_OP_reg0;
474 }
475
476 if (*buf == DW_OP_GNU_regval_type)
477 {
478 buf++;
479 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
480 if (buf == NULL)
481 return -1;
482 buf = gdb_skip_leb128 (buf, buf_end);
483 if (buf == NULL)
484 return -1;
485 }
486 else if (*buf == DW_OP_regx)
487 {
488 buf++;
489 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
490 if (buf == NULL)
491 return -1;
492 }
493 else
494 return -1;
495 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
496 return -1;
497 return dwarf_reg;
498}
499
500/* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
501 DW_OP_deref* return the DWARF register number. Otherwise return -1.
502 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
503 size from DW_OP_deref_size. */
504
505int
506dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
507 CORE_ADDR *deref_size_return)
508{
509 uint64_t dwarf_reg;
510 int64_t offset;
511
512 if (buf_end <= buf)
513 return -1;
514
515 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
516 {
517 dwarf_reg = *buf - DW_OP_breg0;
518 buf++;
519 if (buf >= buf_end)
520 return -1;
521 }
522 else if (*buf == DW_OP_bregx)
523 {
524 buf++;
525 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
526 if (buf == NULL)
527 return -1;
528 if ((int) dwarf_reg != dwarf_reg)
529 return -1;
530 }
531 else
532 return -1;
533
534 buf = gdb_read_sleb128 (buf, buf_end, &offset);
535 if (buf == NULL)
536 return -1;
537 if (offset != 0)
538 return -1;
539
540 if (*buf == DW_OP_deref)
541 {
542 buf++;
543 *deref_size_return = -1;
544 }
545 else if (*buf == DW_OP_deref_size)
546 {
547 buf++;
548 if (buf >= buf_end)
549 return -1;
550 *deref_size_return = *buf++;
551 }
552 else
553 return -1;
554
555 if (buf != buf_end)
556 return -1;
557
558 return dwarf_reg;
559}
560
561/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
562 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
563
564int
565dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
566 CORE_ADDR *fb_offset_return)
567{
568 int64_t fb_offset;
569
570 if (buf_end <= buf)
571 return 0;
572
573 if (*buf != DW_OP_fbreg)
574 return 0;
575 buf++;
576
577 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
578 if (buf == NULL)
579 return 0;
580 *fb_offset_return = fb_offset;
581 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
582 return 0;
583
584 return 1;
585}
586
587/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
588 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
589 The matched SP register number depends on GDBARCH. */
590
591int
592dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
593 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
594{
595 uint64_t dwarf_reg;
596 int64_t sp_offset;
597
598 if (buf_end <= buf)
599 return 0;
600 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
601 {
602 dwarf_reg = *buf - DW_OP_breg0;
603 buf++;
604 }
605 else
606 {
607 if (*buf != DW_OP_bregx)
608 return 0;
609 buf++;
610 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
611 if (buf == NULL)
612 return 0;
613 }
614
615 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
616 != gdbarch_sp_regnum (gdbarch))
617 return 0;
618
619 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
620 if (buf == NULL)
621 return 0;
622 *sp_offset_return = sp_offset;
623 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
624 return 0;
625
626 return 1;
627}
628
629/* The engine for the expression evaluator. Using the context in CTX,
630 evaluate the expression between OP_PTR and OP_END. */
631
632static void
633execute_stack_op (struct dwarf_expr_context *ctx,
634 const gdb_byte *op_ptr, const gdb_byte *op_end)
635{
636 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
637 /* Old-style "untyped" DWARF values need special treatment in a
638 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
639 a special type for these values so we can distinguish them from
640 values that have an explicit type, because explicitly-typed
641 values do not need special treatment. This special type must be
642 different (in the `==' sense) from any base type coming from the
643 CU. */
644 struct type *address_type = dwarf_expr_address_type (ctx);
645
646 ctx->location = DWARF_VALUE_MEMORY;
647 ctx->initialized = 1; /* Default is initialized. */
648
649 if (ctx->recursion_depth > ctx->max_recursion_depth)
650 error (_("DWARF-2 expression error: Loop detected (%d)."),
651 ctx->recursion_depth);
652 ctx->recursion_depth++;
653
654 while (op_ptr < op_end)
655 {
656 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
657 ULONGEST result;
658 /* Assume the value is not in stack memory.
659 Code that knows otherwise sets this to 1.
660 Some arithmetic on stack addresses can probably be assumed to still
661 be a stack address, but we skip this complication for now.
662 This is just an optimization, so it's always ok to punt
663 and leave this as 0. */
664 int in_stack_memory = 0;
665 uint64_t uoffset, reg;
666 int64_t offset;
667 struct value *result_val = NULL;
668
669 /* The DWARF expression might have a bug causing an infinite
670 loop. In that case, quitting is the only way out. */
671 QUIT;
672
673 switch (op)
674 {
675 case DW_OP_lit0:
676 case DW_OP_lit1:
677 case DW_OP_lit2:
678 case DW_OP_lit3:
679 case DW_OP_lit4:
680 case DW_OP_lit5:
681 case DW_OP_lit6:
682 case DW_OP_lit7:
683 case DW_OP_lit8:
684 case DW_OP_lit9:
685 case DW_OP_lit10:
686 case DW_OP_lit11:
687 case DW_OP_lit12:
688 case DW_OP_lit13:
689 case DW_OP_lit14:
690 case DW_OP_lit15:
691 case DW_OP_lit16:
692 case DW_OP_lit17:
693 case DW_OP_lit18:
694 case DW_OP_lit19:
695 case DW_OP_lit20:
696 case DW_OP_lit21:
697 case DW_OP_lit22:
698 case DW_OP_lit23:
699 case DW_OP_lit24:
700 case DW_OP_lit25:
701 case DW_OP_lit26:
702 case DW_OP_lit27:
703 case DW_OP_lit28:
704 case DW_OP_lit29:
705 case DW_OP_lit30:
706 case DW_OP_lit31:
707 result = op - DW_OP_lit0;
708 result_val = value_from_ulongest (address_type, result);
709 break;
710
711 case DW_OP_addr:
712 result = extract_unsigned_integer (op_ptr,
713 ctx->addr_size, byte_order);
714 op_ptr += ctx->addr_size;
715 /* Some versions of GCC emit DW_OP_addr before
716 DW_OP_GNU_push_tls_address. In this case the value is an
717 index, not an address. We don't support things like
718 branching between the address and the TLS op. */
719 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
720 result += ctx->offset;
721 result_val = value_from_ulongest (address_type, result);
722 break;
723
724 case DW_OP_GNU_addr_index:
725 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
726 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
727 result += ctx->offset;
728 result_val = value_from_ulongest (address_type, result);
729 break;
730 case DW_OP_GNU_const_index:
731 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
732 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
733 result_val = value_from_ulongest (address_type, result);
734 break;
735
736 case DW_OP_const1u:
737 result = extract_unsigned_integer (op_ptr, 1, byte_order);
738 result_val = value_from_ulongest (address_type, result);
739 op_ptr += 1;
740 break;
741 case DW_OP_const1s:
742 result = extract_signed_integer (op_ptr, 1, byte_order);
743 result_val = value_from_ulongest (address_type, result);
744 op_ptr += 1;
745 break;
746 case DW_OP_const2u:
747 result = extract_unsigned_integer (op_ptr, 2, byte_order);
748 result_val = value_from_ulongest (address_type, result);
749 op_ptr += 2;
750 break;
751 case DW_OP_const2s:
752 result = extract_signed_integer (op_ptr, 2, byte_order);
753 result_val = value_from_ulongest (address_type, result);
754 op_ptr += 2;
755 break;
756 case DW_OP_const4u:
757 result = extract_unsigned_integer (op_ptr, 4, byte_order);
758 result_val = value_from_ulongest (address_type, result);
759 op_ptr += 4;
760 break;
761 case DW_OP_const4s:
762 result = extract_signed_integer (op_ptr, 4, byte_order);
763 result_val = value_from_ulongest (address_type, result);
764 op_ptr += 4;
765 break;
766 case DW_OP_const8u:
767 result = extract_unsigned_integer (op_ptr, 8, byte_order);
768 result_val = value_from_ulongest (address_type, result);
769 op_ptr += 8;
770 break;
771 case DW_OP_const8s:
772 result = extract_signed_integer (op_ptr, 8, byte_order);
773 result_val = value_from_ulongest (address_type, result);
774 op_ptr += 8;
775 break;
776 case DW_OP_constu:
777 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
778 result = uoffset;
779 result_val = value_from_ulongest (address_type, result);
780 break;
781 case DW_OP_consts:
782 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
783 result = offset;
784 result_val = value_from_ulongest (address_type, result);
785 break;
786
787 /* The DW_OP_reg operations are required to occur alone in
788 location expressions. */
789 case DW_OP_reg0:
790 case DW_OP_reg1:
791 case DW_OP_reg2:
792 case DW_OP_reg3:
793 case DW_OP_reg4:
794 case DW_OP_reg5:
795 case DW_OP_reg6:
796 case DW_OP_reg7:
797 case DW_OP_reg8:
798 case DW_OP_reg9:
799 case DW_OP_reg10:
800 case DW_OP_reg11:
801 case DW_OP_reg12:
802 case DW_OP_reg13:
803 case DW_OP_reg14:
804 case DW_OP_reg15:
805 case DW_OP_reg16:
806 case DW_OP_reg17:
807 case DW_OP_reg18:
808 case DW_OP_reg19:
809 case DW_OP_reg20:
810 case DW_OP_reg21:
811 case DW_OP_reg22:
812 case DW_OP_reg23:
813 case DW_OP_reg24:
814 case DW_OP_reg25:
815 case DW_OP_reg26:
816 case DW_OP_reg27:
817 case DW_OP_reg28:
818 case DW_OP_reg29:
819 case DW_OP_reg30:
820 case DW_OP_reg31:
821 if (op_ptr != op_end
822 && *op_ptr != DW_OP_piece
823 && *op_ptr != DW_OP_bit_piece
824 && *op_ptr != DW_OP_GNU_uninit)
825 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
826 "used either alone or in conjunction with DW_OP_piece "
827 "or DW_OP_bit_piece."));
828
829 result = op - DW_OP_reg0;
830 result_val = value_from_ulongest (address_type, result);
831 ctx->location = DWARF_VALUE_REGISTER;
832 break;
833
834 case DW_OP_regx:
835 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
836 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
837
838 result = reg;
839 result_val = value_from_ulongest (address_type, result);
840 ctx->location = DWARF_VALUE_REGISTER;
841 break;
842
843 case DW_OP_implicit_value:
844 {
845 uint64_t len;
846
847 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
848 if (op_ptr + len > op_end)
849 error (_("DW_OP_implicit_value: too few bytes available."));
850 ctx->len = len;
851 ctx->data = op_ptr;
852 ctx->location = DWARF_VALUE_LITERAL;
853 op_ptr += len;
854 dwarf_expr_require_composition (op_ptr, op_end,
855 "DW_OP_implicit_value");
856 }
857 goto no_push;
858
859 case DW_OP_stack_value:
860 ctx->location = DWARF_VALUE_STACK;
861 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
862 goto no_push;
863
864 case DW_OP_GNU_implicit_pointer:
865 {
866 int64_t len;
867
868 if (ctx->ref_addr_size == -1)
869 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
870 "is not allowed in frame context"));
871
872 /* The referred-to DIE of sect_offset kind. */
873 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
874 byte_order);
875 op_ptr += ctx->ref_addr_size;
876
877 /* The byte offset into the data. */
878 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
879 result = (ULONGEST) len;
880 result_val = value_from_ulongest (address_type, result);
881
882 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
883 dwarf_expr_require_composition (op_ptr, op_end,
884 "DW_OP_GNU_implicit_pointer");
885 }
886 break;
887
888 case DW_OP_breg0:
889 case DW_OP_breg1:
890 case DW_OP_breg2:
891 case DW_OP_breg3:
892 case DW_OP_breg4:
893 case DW_OP_breg5:
894 case DW_OP_breg6:
895 case DW_OP_breg7:
896 case DW_OP_breg8:
897 case DW_OP_breg9:
898 case DW_OP_breg10:
899 case DW_OP_breg11:
900 case DW_OP_breg12:
901 case DW_OP_breg13:
902 case DW_OP_breg14:
903 case DW_OP_breg15:
904 case DW_OP_breg16:
905 case DW_OP_breg17:
906 case DW_OP_breg18:
907 case DW_OP_breg19:
908 case DW_OP_breg20:
909 case DW_OP_breg21:
910 case DW_OP_breg22:
911 case DW_OP_breg23:
912 case DW_OP_breg24:
913 case DW_OP_breg25:
914 case DW_OP_breg26:
915 case DW_OP_breg27:
916 case DW_OP_breg28:
917 case DW_OP_breg29:
918 case DW_OP_breg30:
919 case DW_OP_breg31:
920 {
921 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
922 result = (ctx->funcs->read_addr_from_reg) (ctx->baton,
923 op - DW_OP_breg0);
924 result += offset;
925 result_val = value_from_ulongest (address_type, result);
926 }
927 break;
928 case DW_OP_bregx:
929 {
930 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
931 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
932 result = (ctx->funcs->read_addr_from_reg) (ctx->baton, reg);
933 result += offset;
934 result_val = value_from_ulongest (address_type, result);
935 }
936 break;
937 case DW_OP_fbreg:
938 {
939 const gdb_byte *datastart;
940 size_t datalen;
941 unsigned int before_stack_len;
942
943 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
944 /* Rather than create a whole new context, we simply
945 record the stack length before execution, then reset it
946 afterwards, effectively erasing whatever the recursive
947 call put there. */
948 before_stack_len = ctx->stack_len;
949 /* FIXME: cagney/2003-03-26: This code should be using
950 get_frame_base_address(), and then implement a dwarf2
951 specific this_base method. */
952 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
953 dwarf_expr_eval (ctx, datastart, datalen);
954 if (ctx->location == DWARF_VALUE_MEMORY)
955 result = dwarf_expr_fetch_address (ctx, 0);
956 else if (ctx->location == DWARF_VALUE_REGISTER)
957 result = (ctx->funcs->read_addr_from_reg)
958 (ctx->baton,
959 value_as_long (dwarf_expr_fetch (ctx, 0)));
960 else
961 error (_("Not implemented: computing frame "
962 "base using explicit value operator"));
963 result = result + offset;
964 result_val = value_from_ulongest (address_type, result);
965 in_stack_memory = 1;
966 ctx->stack_len = before_stack_len;
967 ctx->location = DWARF_VALUE_MEMORY;
968 }
969 break;
970
971 case DW_OP_dup:
972 result_val = dwarf_expr_fetch (ctx, 0);
973 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
974 break;
975
976 case DW_OP_drop:
977 dwarf_expr_pop (ctx);
978 goto no_push;
979
980 case DW_OP_pick:
981 offset = *op_ptr++;
982 result_val = dwarf_expr_fetch (ctx, offset);
983 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
984 break;
985
986 case DW_OP_swap:
987 {
988 struct dwarf_stack_value t1, t2;
989
990 if (ctx->stack_len < 2)
991 error (_("Not enough elements for "
992 "DW_OP_swap. Need 2, have %d."),
993 ctx->stack_len);
994 t1 = ctx->stack[ctx->stack_len - 1];
995 t2 = ctx->stack[ctx->stack_len - 2];
996 ctx->stack[ctx->stack_len - 1] = t2;
997 ctx->stack[ctx->stack_len - 2] = t1;
998 goto no_push;
999 }
1000
1001 case DW_OP_over:
1002 result_val = dwarf_expr_fetch (ctx, 1);
1003 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1004 break;
1005
1006 case DW_OP_rot:
1007 {
1008 struct dwarf_stack_value t1, t2, t3;
1009
1010 if (ctx->stack_len < 3)
1011 error (_("Not enough elements for "
1012 "DW_OP_rot. Need 3, have %d."),
1013 ctx->stack_len);
1014 t1 = ctx->stack[ctx->stack_len - 1];
1015 t2 = ctx->stack[ctx->stack_len - 2];
1016 t3 = ctx->stack[ctx->stack_len - 3];
1017 ctx->stack[ctx->stack_len - 1] = t2;
1018 ctx->stack[ctx->stack_len - 2] = t3;
1019 ctx->stack[ctx->stack_len - 3] = t1;
1020 goto no_push;
1021 }
1022
1023 case DW_OP_deref:
1024 case DW_OP_deref_size:
1025 case DW_OP_GNU_deref_type:
1026 {
1027 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1028 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1029 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1030 struct type *type;
1031
1032 dwarf_expr_pop (ctx);
1033
1034 if (op == DW_OP_GNU_deref_type)
1035 {
1036 cu_offset type_die;
1037
1038 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1039 type_die.cu_off = uoffset;
1040 type = dwarf_get_base_type (ctx, type_die, 0);
1041 }
1042 else
1043 type = address_type;
1044
1045 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1046
1047 /* If the size of the object read from memory is different
1048 from the type length, we need to zero-extend it. */
1049 if (TYPE_LENGTH (type) != addr_size)
1050 {
1051 ULONGEST result =
1052 extract_unsigned_integer (buf, addr_size, byte_order);
1053
1054 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
1055 store_unsigned_integer (buf, TYPE_LENGTH (type),
1056 byte_order, result);
1057 }
1058
1059 result_val = value_from_contents_and_address (type, buf, addr);
1060 break;
1061 }
1062
1063 case DW_OP_abs:
1064 case DW_OP_neg:
1065 case DW_OP_not:
1066 case DW_OP_plus_uconst:
1067 {
1068 /* Unary operations. */
1069 result_val = dwarf_expr_fetch (ctx, 0);
1070 dwarf_expr_pop (ctx);
1071
1072 switch (op)
1073 {
1074 case DW_OP_abs:
1075 if (value_less (result_val,
1076 value_zero (value_type (result_val), not_lval)))
1077 result_val = value_neg (result_val);
1078 break;
1079 case DW_OP_neg:
1080 result_val = value_neg (result_val);
1081 break;
1082 case DW_OP_not:
1083 dwarf_require_integral (value_type (result_val));
1084 result_val = value_complement (result_val);
1085 break;
1086 case DW_OP_plus_uconst:
1087 dwarf_require_integral (value_type (result_val));
1088 result = value_as_long (result_val);
1089 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1090 result += reg;
1091 result_val = value_from_ulongest (address_type, result);
1092 break;
1093 }
1094 }
1095 break;
1096
1097 case DW_OP_and:
1098 case DW_OP_div:
1099 case DW_OP_minus:
1100 case DW_OP_mod:
1101 case DW_OP_mul:
1102 case DW_OP_or:
1103 case DW_OP_plus:
1104 case DW_OP_shl:
1105 case DW_OP_shr:
1106 case DW_OP_shra:
1107 case DW_OP_xor:
1108 case DW_OP_le:
1109 case DW_OP_ge:
1110 case DW_OP_eq:
1111 case DW_OP_lt:
1112 case DW_OP_gt:
1113 case DW_OP_ne:
1114 {
1115 /* Binary operations. */
1116 struct value *first, *second;
1117
1118 second = dwarf_expr_fetch (ctx, 0);
1119 dwarf_expr_pop (ctx);
1120
1121 first = dwarf_expr_fetch (ctx, 0);
1122 dwarf_expr_pop (ctx);
1123
1124 if (! base_types_equal_p (value_type (first), value_type (second)))
1125 error (_("Incompatible types on DWARF stack"));
1126
1127 switch (op)
1128 {
1129 case DW_OP_and:
1130 dwarf_require_integral (value_type (first));
1131 dwarf_require_integral (value_type (second));
1132 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1133 break;
1134 case DW_OP_div:
1135 result_val = value_binop (first, second, BINOP_DIV);
1136 break;
1137 case DW_OP_minus:
1138 result_val = value_binop (first, second, BINOP_SUB);
1139 break;
1140 case DW_OP_mod:
1141 {
1142 int cast_back = 0;
1143 struct type *orig_type = value_type (first);
1144
1145 /* We have to special-case "old-style" untyped values
1146 -- these must have mod computed using unsigned
1147 math. */
1148 if (orig_type == address_type)
1149 {
1150 struct type *utype
1151 = get_unsigned_type (ctx->gdbarch, orig_type);
1152
1153 cast_back = 1;
1154 first = value_cast (utype, first);
1155 second = value_cast (utype, second);
1156 }
1157 /* Note that value_binop doesn't handle float or
1158 decimal float here. This seems unimportant. */
1159 result_val = value_binop (first, second, BINOP_MOD);
1160 if (cast_back)
1161 result_val = value_cast (orig_type, result_val);
1162 }
1163 break;
1164 case DW_OP_mul:
1165 result_val = value_binop (first, second, BINOP_MUL);
1166 break;
1167 case DW_OP_or:
1168 dwarf_require_integral (value_type (first));
1169 dwarf_require_integral (value_type (second));
1170 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1171 break;
1172 case DW_OP_plus:
1173 result_val = value_binop (first, second, BINOP_ADD);
1174 break;
1175 case DW_OP_shl:
1176 dwarf_require_integral (value_type (first));
1177 dwarf_require_integral (value_type (second));
1178 result_val = value_binop (first, second, BINOP_LSH);
1179 break;
1180 case DW_OP_shr:
1181 dwarf_require_integral (value_type (first));
1182 dwarf_require_integral (value_type (second));
1183 if (!TYPE_UNSIGNED (value_type (first)))
1184 {
1185 struct type *utype
1186 = get_unsigned_type (ctx->gdbarch, value_type (first));
1187
1188 first = value_cast (utype, first);
1189 }
1190
1191 result_val = value_binop (first, second, BINOP_RSH);
1192 /* Make sure we wind up with the same type we started
1193 with. */
1194 if (value_type (result_val) != value_type (second))
1195 result_val = value_cast (value_type (second), result_val);
1196 break;
1197 case DW_OP_shra:
1198 dwarf_require_integral (value_type (first));
1199 dwarf_require_integral (value_type (second));
1200 if (TYPE_UNSIGNED (value_type (first)))
1201 {
1202 struct type *stype
1203 = get_signed_type (ctx->gdbarch, value_type (first));
1204
1205 first = value_cast (stype, first);
1206 }
1207
1208 result_val = value_binop (first, second, BINOP_RSH);
1209 /* Make sure we wind up with the same type we started
1210 with. */
1211 if (value_type (result_val) != value_type (second))
1212 result_val = value_cast (value_type (second), result_val);
1213 break;
1214 case DW_OP_xor:
1215 dwarf_require_integral (value_type (first));
1216 dwarf_require_integral (value_type (second));
1217 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1218 break;
1219 case DW_OP_le:
1220 /* A <= B is !(B < A). */
1221 result = ! value_less (second, first);
1222 result_val = value_from_ulongest (address_type, result);
1223 break;
1224 case DW_OP_ge:
1225 /* A >= B is !(A < B). */
1226 result = ! value_less (first, second);
1227 result_val = value_from_ulongest (address_type, result);
1228 break;
1229 case DW_OP_eq:
1230 result = value_equal (first, second);
1231 result_val = value_from_ulongest (address_type, result);
1232 break;
1233 case DW_OP_lt:
1234 result = value_less (first, second);
1235 result_val = value_from_ulongest (address_type, result);
1236 break;
1237 case DW_OP_gt:
1238 /* A > B is B < A. */
1239 result = value_less (second, first);
1240 result_val = value_from_ulongest (address_type, result);
1241 break;
1242 case DW_OP_ne:
1243 result = ! value_equal (first, second);
1244 result_val = value_from_ulongest (address_type, result);
1245 break;
1246 default:
1247 internal_error (__FILE__, __LINE__,
1248 _("Can't be reached."));
1249 }
1250 }
1251 break;
1252
1253 case DW_OP_call_frame_cfa:
1254 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1255 result_val = value_from_ulongest (address_type, result);
1256 in_stack_memory = 1;
1257 break;
1258
1259 case DW_OP_GNU_push_tls_address:
1260 case DW_OP_form_tls_address:
1261 /* Variable is at a constant offset in the thread-local
1262 storage block into the objfile for the current thread and
1263 the dynamic linker module containing this expression. Here
1264 we return returns the offset from that base. The top of the
1265 stack has the offset from the beginning of the thread
1266 control block at which the variable is located. Nothing
1267 should follow this operator, so the top of stack would be
1268 returned. */
1269 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1270 dwarf_expr_pop (ctx);
1271 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1272 result_val = value_from_ulongest (address_type, result);
1273 break;
1274
1275 case DW_OP_skip:
1276 offset = extract_signed_integer (op_ptr, 2, byte_order);
1277 op_ptr += 2;
1278 op_ptr += offset;
1279 goto no_push;
1280
1281 case DW_OP_bra:
1282 {
1283 struct value *val;
1284
1285 offset = extract_signed_integer (op_ptr, 2, byte_order);
1286 op_ptr += 2;
1287 val = dwarf_expr_fetch (ctx, 0);
1288 dwarf_require_integral (value_type (val));
1289 if (value_as_long (val) != 0)
1290 op_ptr += offset;
1291 dwarf_expr_pop (ctx);
1292 }
1293 goto no_push;
1294
1295 case DW_OP_nop:
1296 goto no_push;
1297
1298 case DW_OP_piece:
1299 {
1300 uint64_t size;
1301
1302 /* Record the piece. */
1303 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1304 add_piece (ctx, 8 * size, 0);
1305
1306 /* Pop off the address/regnum, and reset the location
1307 type. */
1308 if (ctx->location != DWARF_VALUE_LITERAL
1309 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1310 dwarf_expr_pop (ctx);
1311 ctx->location = DWARF_VALUE_MEMORY;
1312 }
1313 goto no_push;
1314
1315 case DW_OP_bit_piece:
1316 {
1317 uint64_t size, offset;
1318
1319 /* Record the piece. */
1320 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1321 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1322 add_piece (ctx, size, offset);
1323
1324 /* Pop off the address/regnum, and reset the location
1325 type. */
1326 if (ctx->location != DWARF_VALUE_LITERAL
1327 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1328 dwarf_expr_pop (ctx);
1329 ctx->location = DWARF_VALUE_MEMORY;
1330 }
1331 goto no_push;
1332
1333 case DW_OP_GNU_uninit:
1334 if (op_ptr != op_end)
1335 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1336 "be the very last op."));
1337
1338 ctx->initialized = 0;
1339 goto no_push;
1340
1341 case DW_OP_call2:
1342 {
1343 cu_offset offset;
1344
1345 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1346 op_ptr += 2;
1347 ctx->funcs->dwarf_call (ctx, offset);
1348 }
1349 goto no_push;
1350
1351 case DW_OP_call4:
1352 {
1353 cu_offset offset;
1354
1355 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1356 op_ptr += 4;
1357 ctx->funcs->dwarf_call (ctx, offset);
1358 }
1359 goto no_push;
1360
1361 case DW_OP_GNU_entry_value:
1362 {
1363 uint64_t len;
1364 CORE_ADDR deref_size;
1365 union call_site_parameter_u kind_u;
1366
1367 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1368 if (op_ptr + len > op_end)
1369 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1370
1371 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1372 if (kind_u.dwarf_reg != -1)
1373 {
1374 op_ptr += len;
1375 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1376 CALL_SITE_PARAMETER_DWARF_REG,
1377 kind_u,
1378 -1 /* deref_size */);
1379 goto no_push;
1380 }
1381
1382 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1383 op_ptr + len,
1384 &deref_size);
1385 if (kind_u.dwarf_reg != -1)
1386 {
1387 if (deref_size == -1)
1388 deref_size = ctx->addr_size;
1389 op_ptr += len;
1390 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1391 CALL_SITE_PARAMETER_DWARF_REG,
1392 kind_u, deref_size);
1393 goto no_push;
1394 }
1395
1396 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1397 "supported only for single DW_OP_reg* "
1398 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1399 }
1400
1401 case DW_OP_GNU_parameter_ref:
1402 {
1403 union call_site_parameter_u kind_u;
1404
1405 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1406 byte_order);
1407 op_ptr += 4;
1408 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1409 CALL_SITE_PARAMETER_PARAM_OFFSET,
1410 kind_u,
1411 -1 /* deref_size */);
1412 }
1413 goto no_push;
1414
1415 case DW_OP_GNU_const_type:
1416 {
1417 cu_offset type_die;
1418 int n;
1419 const gdb_byte *data;
1420 struct type *type;
1421
1422 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1423 type_die.cu_off = uoffset;
1424 n = *op_ptr++;
1425 data = op_ptr;
1426 op_ptr += n;
1427
1428 type = dwarf_get_base_type (ctx, type_die, n);
1429 result_val = value_from_contents (type, data);
1430 }
1431 break;
1432
1433 case DW_OP_GNU_regval_type:
1434 {
1435 cu_offset type_die;
1436 struct type *type;
1437
1438 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1439 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1440 type_die.cu_off = uoffset;
1441
1442 type = dwarf_get_base_type (ctx, type_die, 0);
1443 result_val = ctx->funcs->get_reg_value (ctx->baton, type, reg);
1444 }
1445 break;
1446
1447 case DW_OP_GNU_convert:
1448 case DW_OP_GNU_reinterpret:
1449 {
1450 cu_offset type_die;
1451 struct type *type;
1452
1453 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1454 type_die.cu_off = uoffset;
1455
1456 if (type_die.cu_off == 0)
1457 type = address_type;
1458 else
1459 type = dwarf_get_base_type (ctx, type_die, 0);
1460
1461 result_val = dwarf_expr_fetch (ctx, 0);
1462 dwarf_expr_pop (ctx);
1463
1464 if (op == DW_OP_GNU_convert)
1465 result_val = value_cast (type, result_val);
1466 else if (type == value_type (result_val))
1467 {
1468 /* Nothing. */
1469 }
1470 else if (TYPE_LENGTH (type)
1471 != TYPE_LENGTH (value_type (result_val)))
1472 error (_("DW_OP_GNU_reinterpret has wrong size"));
1473 else
1474 result_val
1475 = value_from_contents (type,
1476 value_contents_all (result_val));
1477 }
1478 break;
1479
1480 case DW_OP_push_object_address:
1481 /* Return the address of the object we are currently observing. */
1482 result = (ctx->funcs->get_object_address) (ctx->baton);
1483 result_val = value_from_ulongest (address_type, result);
1484 break;
1485
1486 default:
1487 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1488 }
1489
1490 /* Most things push a result value. */
1491 gdb_assert (result_val != NULL);
1492 dwarf_expr_push (ctx, result_val, in_stack_memory);
1493 no_push:
1494 ;
1495 }
1496
1497 /* To simplify our main caller, if the result is an implicit
1498 pointer, then make a pieced value. This is ok because we can't
1499 have implicit pointers in contexts where pieces are invalid. */
1500 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1501 add_piece (ctx, 8 * ctx->addr_size, 0);
1502
1503abort_expression:
1504 ctx->recursion_depth--;
1505 gdb_assert (ctx->recursion_depth >= 0);
1506}
1507
1508/* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1509
1510void
1511ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1512{
1513 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1514}
1515
1516/* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1517
1518CORE_ADDR
1519ctx_no_get_frame_cfa (void *baton)
1520{
1521 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1522}
1523
1524/* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1525
1526CORE_ADDR
1527ctx_no_get_frame_pc (void *baton)
1528{
1529 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1530}
1531
1532/* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1533
1534CORE_ADDR
1535ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1536{
1537 error (_("%s is invalid in this context"), "DW_OP_form_tls_address");
1538}
1539
1540/* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1541
1542void
1543ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1544{
1545 error (_("%s is invalid in this context"), "DW_OP_call*");
1546}
1547
1548/* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1549
1550struct type *
1551ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1552{
1553 error (_("Support for typed DWARF is not supported in this context"));
1554}
1555
1556/* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1557 implementation. */
1558
1559void
1560ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1561 enum call_site_parameter_kind kind,
1562 union call_site_parameter_u kind_u,
1563 int deref_size)
1564{
1565 internal_error (__FILE__, __LINE__,
1566 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1567}
1568
1569/* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1570
1571CORE_ADDR
1572ctx_no_get_addr_index (void *baton, unsigned int index)
1573{
1574 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1575}
1576
1577/* Provide a prototype to silence -Wmissing-prototypes. */
1578extern initialize_file_ftype _initialize_dwarf2expr;
1579
1580void
1581_initialize_dwarf2expr (void)
1582{
1583 dwarf_arch_cookie
1584 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1585}
This page took 0.027375 seconds and 4 git commands to generate.