Remove ARI check for multiple calls to warning or error
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2019 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30 #include "gdbsupport/underlying.h"
31 #include "gdbarch.h"
32
33 /* Cookie for gdbarch data. */
34
35 static struct gdbarch_data *dwarf_arch_cookie;
36
37 /* This holds gdbarch-specific types used by the DWARF expression
38 evaluator. See comments in execute_stack_op. */
39
40 struct dwarf_gdbarch_types
41 {
42 struct type *dw_types[3];
43 };
44
45 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
46
47 static void *
48 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
49 {
50 struct dwarf_gdbarch_types *types
51 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
52
53 /* The types themselves are lazily initialized. */
54
55 return types;
56 }
57
58 /* Return the type used for DWARF operations where the type is
59 unspecified in the DWARF spec. Only certain sizes are
60 supported. */
61
62 struct type *
63 dwarf_expr_context::address_type () const
64 {
65 struct dwarf_gdbarch_types *types
66 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
67 dwarf_arch_cookie);
68 int ndx;
69
70 if (this->addr_size == 2)
71 ndx = 0;
72 else if (this->addr_size == 4)
73 ndx = 1;
74 else if (this->addr_size == 8)
75 ndx = 2;
76 else
77 error (_("Unsupported address size in DWARF expressions: %d bits"),
78 8 * this->addr_size);
79
80 if (types->dw_types[ndx] == NULL)
81 types->dw_types[ndx]
82 = arch_integer_type (this->gdbarch,
83 8 * this->addr_size,
84 0, "<signed DWARF address type>");
85
86 return types->dw_types[ndx];
87 }
88
89 /* Create a new context for the expression evaluator. */
90
91 dwarf_expr_context::dwarf_expr_context ()
92 : gdbarch (NULL),
93 addr_size (0),
94 ref_addr_size (0),
95 offset (0),
96 recursion_depth (0),
97 max_recursion_depth (0x100),
98 location (DWARF_VALUE_MEMORY),
99 len (0),
100 data (NULL),
101 initialized (0)
102 {
103 }
104
105 /* Push VALUE onto the stack. */
106
107 void
108 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
109 {
110 stack.emplace_back (value, in_stack_memory);
111 }
112
113 /* Push VALUE onto the stack. */
114
115 void
116 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
117 {
118 push (value_from_ulongest (address_type (), value), in_stack_memory);
119 }
120
121 /* Pop the top item off of the stack. */
122
123 void
124 dwarf_expr_context::pop ()
125 {
126 if (stack.empty ())
127 error (_("dwarf expression stack underflow"));
128
129 stack.pop_back ();
130 }
131
132 /* Retrieve the N'th item on the stack. */
133
134 struct value *
135 dwarf_expr_context::fetch (int n)
136 {
137 if (stack.size () <= n)
138 error (_("Asked for position %d of stack, "
139 "stack only has %zu elements on it."),
140 n, stack.size ());
141 return stack[stack.size () - (1 + n)].value;
142 }
143
144 /* Require that TYPE be an integral type; throw an exception if not. */
145
146 static void
147 dwarf_require_integral (struct type *type)
148 {
149 if (TYPE_CODE (type) != TYPE_CODE_INT
150 && TYPE_CODE (type) != TYPE_CODE_CHAR
151 && TYPE_CODE (type) != TYPE_CODE_BOOL)
152 error (_("integral type expected in DWARF expression"));
153 }
154
155 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
156 type. */
157
158 static struct type *
159 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
160 {
161 switch (TYPE_LENGTH (type))
162 {
163 case 1:
164 return builtin_type (gdbarch)->builtin_uint8;
165 case 2:
166 return builtin_type (gdbarch)->builtin_uint16;
167 case 4:
168 return builtin_type (gdbarch)->builtin_uint32;
169 case 8:
170 return builtin_type (gdbarch)->builtin_uint64;
171 default:
172 error (_("no unsigned variant found for type, while evaluating "
173 "DWARF expression"));
174 }
175 }
176
177 /* Return the signed form of TYPE. TYPE is necessarily an integral
178 type. */
179
180 static struct type *
181 get_signed_type (struct gdbarch *gdbarch, struct type *type)
182 {
183 switch (TYPE_LENGTH (type))
184 {
185 case 1:
186 return builtin_type (gdbarch)->builtin_int8;
187 case 2:
188 return builtin_type (gdbarch)->builtin_int16;
189 case 4:
190 return builtin_type (gdbarch)->builtin_int32;
191 case 8:
192 return builtin_type (gdbarch)->builtin_int64;
193 default:
194 error (_("no signed variant found for type, while evaluating "
195 "DWARF expression"));
196 }
197 }
198
199 /* Retrieve the N'th item on the stack, converted to an address. */
200
201 CORE_ADDR
202 dwarf_expr_context::fetch_address (int n)
203 {
204 struct value *result_val = fetch (n);
205 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
206 ULONGEST result;
207
208 dwarf_require_integral (value_type (result_val));
209 result = extract_unsigned_integer (value_contents (result_val),
210 TYPE_LENGTH (value_type (result_val)),
211 byte_order);
212
213 /* For most architectures, calling extract_unsigned_integer() alone
214 is sufficient for extracting an address. However, some
215 architectures (e.g. MIPS) use signed addresses and using
216 extract_unsigned_integer() will not produce a correct
217 result. Make sure we invoke gdbarch_integer_to_address()
218 for those architectures which require it. */
219 if (gdbarch_integer_to_address_p (this->gdbarch))
220 {
221 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
222 struct type *int_type = get_unsigned_type (this->gdbarch,
223 value_type (result_val));
224
225 store_unsigned_integer (buf, this->addr_size, byte_order, result);
226 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
227 }
228
229 return (CORE_ADDR) result;
230 }
231
232 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
233
234 bool
235 dwarf_expr_context::fetch_in_stack_memory (int n)
236 {
237 if (stack.size () <= n)
238 error (_("Asked for position %d of stack, "
239 "stack only has %zu elements on it."),
240 n, stack.size ());
241 return stack[stack.size () - (1 + n)].in_stack_memory;
242 }
243
244 /* Return true if the expression stack is empty. */
245
246 bool
247 dwarf_expr_context::stack_empty_p () const
248 {
249 return stack.empty ();
250 }
251
252 /* Add a new piece to the dwarf_expr_context's piece list. */
253 void
254 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
255 {
256 this->pieces.emplace_back ();
257 dwarf_expr_piece &p = this->pieces.back ();
258
259 p.location = this->location;
260 p.size = size;
261 p.offset = offset;
262
263 if (p.location == DWARF_VALUE_LITERAL)
264 {
265 p.v.literal.data = this->data;
266 p.v.literal.length = this->len;
267 }
268 else if (stack_empty_p ())
269 {
270 p.location = DWARF_VALUE_OPTIMIZED_OUT;
271 /* Also reset the context's location, for our callers. This is
272 a somewhat strange approach, but this lets us avoid setting
273 the location to DWARF_VALUE_MEMORY in all the individual
274 cases in the evaluator. */
275 this->location = DWARF_VALUE_OPTIMIZED_OUT;
276 }
277 else if (p.location == DWARF_VALUE_MEMORY)
278 {
279 p.v.mem.addr = fetch_address (0);
280 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
281 }
282 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
283 {
284 p.v.ptr.die_sect_off = (sect_offset) this->len;
285 p.v.ptr.offset = value_as_long (fetch (0));
286 }
287 else if (p.location == DWARF_VALUE_REGISTER)
288 p.v.regno = value_as_long (fetch (0));
289 else
290 {
291 p.v.value = fetch (0);
292 }
293 }
294
295 /* Evaluate the expression at ADDR (LEN bytes long). */
296
297 void
298 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
299 {
300 int old_recursion_depth = this->recursion_depth;
301
302 execute_stack_op (addr, addr + len);
303
304 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
305
306 gdb_assert (this->recursion_depth == old_recursion_depth);
307 }
308
309 /* Helper to read a uleb128 value or throw an error. */
310
311 const gdb_byte *
312 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
313 uint64_t *r)
314 {
315 buf = gdb_read_uleb128 (buf, buf_end, r);
316 if (buf == NULL)
317 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
318 return buf;
319 }
320
321 /* Helper to read a sleb128 value or throw an error. */
322
323 const gdb_byte *
324 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
325 int64_t *r)
326 {
327 buf = gdb_read_sleb128 (buf, buf_end, r);
328 if (buf == NULL)
329 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
330 return buf;
331 }
332
333 const gdb_byte *
334 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
335 {
336 buf = gdb_skip_leb128 (buf, buf_end);
337 if (buf == NULL)
338 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
339 return buf;
340 }
341 \f
342
343 /* Check that the current operator is either at the end of an
344 expression, or that it is followed by a composition operator or by
345 DW_OP_GNU_uninit (which should terminate the expression). */
346
347 void
348 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
349 const char *op_name)
350 {
351 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
352 && *op_ptr != DW_OP_GNU_uninit)
353 error (_("DWARF-2 expression error: `%s' operations must be "
354 "used either alone or in conjunction with DW_OP_piece "
355 "or DW_OP_bit_piece."),
356 op_name);
357 }
358
359 /* Return true iff the types T1 and T2 are "the same". This only does
360 checks that might reasonably be needed to compare DWARF base
361 types. */
362
363 static int
364 base_types_equal_p (struct type *t1, struct type *t2)
365 {
366 if (TYPE_CODE (t1) != TYPE_CODE (t2))
367 return 0;
368 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
369 return 0;
370 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
371 }
372
373 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
374 DWARF register number. Otherwise return -1. */
375
376 int
377 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
378 {
379 uint64_t dwarf_reg;
380
381 if (buf_end <= buf)
382 return -1;
383 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
384 {
385 if (buf_end - buf != 1)
386 return -1;
387 return *buf - DW_OP_reg0;
388 }
389
390 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
391 {
392 buf++;
393 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
394 if (buf == NULL)
395 return -1;
396 buf = gdb_skip_leb128 (buf, buf_end);
397 if (buf == NULL)
398 return -1;
399 }
400 else if (*buf == DW_OP_regx)
401 {
402 buf++;
403 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
404 if (buf == NULL)
405 return -1;
406 }
407 else
408 return -1;
409 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
410 return -1;
411 return dwarf_reg;
412 }
413
414 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
415 DW_OP_deref* return the DWARF register number. Otherwise return -1.
416 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
417 size from DW_OP_deref_size. */
418
419 int
420 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
421 CORE_ADDR *deref_size_return)
422 {
423 uint64_t dwarf_reg;
424 int64_t offset;
425
426 if (buf_end <= buf)
427 return -1;
428
429 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
430 {
431 dwarf_reg = *buf - DW_OP_breg0;
432 buf++;
433 if (buf >= buf_end)
434 return -1;
435 }
436 else if (*buf == DW_OP_bregx)
437 {
438 buf++;
439 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
440 if (buf == NULL)
441 return -1;
442 if ((int) dwarf_reg != dwarf_reg)
443 return -1;
444 }
445 else
446 return -1;
447
448 buf = gdb_read_sleb128 (buf, buf_end, &offset);
449 if (buf == NULL)
450 return -1;
451 if (offset != 0)
452 return -1;
453
454 if (*buf == DW_OP_deref)
455 {
456 buf++;
457 *deref_size_return = -1;
458 }
459 else if (*buf == DW_OP_deref_size)
460 {
461 buf++;
462 if (buf >= buf_end)
463 return -1;
464 *deref_size_return = *buf++;
465 }
466 else
467 return -1;
468
469 if (buf != buf_end)
470 return -1;
471
472 return dwarf_reg;
473 }
474
475 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
476 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
477
478 int
479 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
480 CORE_ADDR *fb_offset_return)
481 {
482 int64_t fb_offset;
483
484 if (buf_end <= buf)
485 return 0;
486
487 if (*buf != DW_OP_fbreg)
488 return 0;
489 buf++;
490
491 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
492 if (buf == NULL)
493 return 0;
494 *fb_offset_return = fb_offset;
495 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
496 return 0;
497
498 return 1;
499 }
500
501 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
502 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
503 The matched SP register number depends on GDBARCH. */
504
505 int
506 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
507 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
508 {
509 uint64_t dwarf_reg;
510 int64_t sp_offset;
511
512 if (buf_end <= buf)
513 return 0;
514 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
515 {
516 dwarf_reg = *buf - DW_OP_breg0;
517 buf++;
518 }
519 else
520 {
521 if (*buf != DW_OP_bregx)
522 return 0;
523 buf++;
524 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
525 if (buf == NULL)
526 return 0;
527 }
528
529 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
530 != gdbarch_sp_regnum (gdbarch))
531 return 0;
532
533 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
534 if (buf == NULL)
535 return 0;
536 *sp_offset_return = sp_offset;
537 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
538 return 0;
539
540 return 1;
541 }
542
543 /* The engine for the expression evaluator. Using the context in this
544 object, evaluate the expression between OP_PTR and OP_END. */
545
546 void
547 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
548 const gdb_byte *op_end)
549 {
550 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
551 /* Old-style "untyped" DWARF values need special treatment in a
552 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
553 a special type for these values so we can distinguish them from
554 values that have an explicit type, because explicitly-typed
555 values do not need special treatment. This special type must be
556 different (in the `==' sense) from any base type coming from the
557 CU. */
558 struct type *address_type = this->address_type ();
559
560 this->location = DWARF_VALUE_MEMORY;
561 this->initialized = 1; /* Default is initialized. */
562
563 if (this->recursion_depth > this->max_recursion_depth)
564 error (_("DWARF-2 expression error: Loop detected (%d)."),
565 this->recursion_depth);
566 this->recursion_depth++;
567
568 while (op_ptr < op_end)
569 {
570 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
571 ULONGEST result;
572 /* Assume the value is not in stack memory.
573 Code that knows otherwise sets this to true.
574 Some arithmetic on stack addresses can probably be assumed to still
575 be a stack address, but we skip this complication for now.
576 This is just an optimization, so it's always ok to punt
577 and leave this as false. */
578 bool in_stack_memory = false;
579 uint64_t uoffset, reg;
580 int64_t offset;
581 struct value *result_val = NULL;
582
583 /* The DWARF expression might have a bug causing an infinite
584 loop. In that case, quitting is the only way out. */
585 QUIT;
586
587 switch (op)
588 {
589 case DW_OP_lit0:
590 case DW_OP_lit1:
591 case DW_OP_lit2:
592 case DW_OP_lit3:
593 case DW_OP_lit4:
594 case DW_OP_lit5:
595 case DW_OP_lit6:
596 case DW_OP_lit7:
597 case DW_OP_lit8:
598 case DW_OP_lit9:
599 case DW_OP_lit10:
600 case DW_OP_lit11:
601 case DW_OP_lit12:
602 case DW_OP_lit13:
603 case DW_OP_lit14:
604 case DW_OP_lit15:
605 case DW_OP_lit16:
606 case DW_OP_lit17:
607 case DW_OP_lit18:
608 case DW_OP_lit19:
609 case DW_OP_lit20:
610 case DW_OP_lit21:
611 case DW_OP_lit22:
612 case DW_OP_lit23:
613 case DW_OP_lit24:
614 case DW_OP_lit25:
615 case DW_OP_lit26:
616 case DW_OP_lit27:
617 case DW_OP_lit28:
618 case DW_OP_lit29:
619 case DW_OP_lit30:
620 case DW_OP_lit31:
621 result = op - DW_OP_lit0;
622 result_val = value_from_ulongest (address_type, result);
623 break;
624
625 case DW_OP_addr:
626 result = extract_unsigned_integer (op_ptr,
627 this->addr_size, byte_order);
628 op_ptr += this->addr_size;
629 /* Some versions of GCC emit DW_OP_addr before
630 DW_OP_GNU_push_tls_address. In this case the value is an
631 index, not an address. We don't support things like
632 branching between the address and the TLS op. */
633 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
634 result += this->offset;
635 result_val = value_from_ulongest (address_type, result);
636 break;
637
638 case DW_OP_addrx:
639 case DW_OP_GNU_addr_index:
640 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
641 result = this->get_addr_index (uoffset);
642 result += this->offset;
643 result_val = value_from_ulongest (address_type, result);
644 break;
645 case DW_OP_GNU_const_index:
646 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
647 result = this->get_addr_index (uoffset);
648 result_val = value_from_ulongest (address_type, result);
649 break;
650
651 case DW_OP_const1u:
652 result = extract_unsigned_integer (op_ptr, 1, byte_order);
653 result_val = value_from_ulongest (address_type, result);
654 op_ptr += 1;
655 break;
656 case DW_OP_const1s:
657 result = extract_signed_integer (op_ptr, 1, byte_order);
658 result_val = value_from_ulongest (address_type, result);
659 op_ptr += 1;
660 break;
661 case DW_OP_const2u:
662 result = extract_unsigned_integer (op_ptr, 2, byte_order);
663 result_val = value_from_ulongest (address_type, result);
664 op_ptr += 2;
665 break;
666 case DW_OP_const2s:
667 result = extract_signed_integer (op_ptr, 2, byte_order);
668 result_val = value_from_ulongest (address_type, result);
669 op_ptr += 2;
670 break;
671 case DW_OP_const4u:
672 result = extract_unsigned_integer (op_ptr, 4, byte_order);
673 result_val = value_from_ulongest (address_type, result);
674 op_ptr += 4;
675 break;
676 case DW_OP_const4s:
677 result = extract_signed_integer (op_ptr, 4, byte_order);
678 result_val = value_from_ulongest (address_type, result);
679 op_ptr += 4;
680 break;
681 case DW_OP_const8u:
682 result = extract_unsigned_integer (op_ptr, 8, byte_order);
683 result_val = value_from_ulongest (address_type, result);
684 op_ptr += 8;
685 break;
686 case DW_OP_const8s:
687 result = extract_signed_integer (op_ptr, 8, byte_order);
688 result_val = value_from_ulongest (address_type, result);
689 op_ptr += 8;
690 break;
691 case DW_OP_constu:
692 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
693 result = uoffset;
694 result_val = value_from_ulongest (address_type, result);
695 break;
696 case DW_OP_consts:
697 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
698 result = offset;
699 result_val = value_from_ulongest (address_type, result);
700 break;
701
702 /* The DW_OP_reg operations are required to occur alone in
703 location expressions. */
704 case DW_OP_reg0:
705 case DW_OP_reg1:
706 case DW_OP_reg2:
707 case DW_OP_reg3:
708 case DW_OP_reg4:
709 case DW_OP_reg5:
710 case DW_OP_reg6:
711 case DW_OP_reg7:
712 case DW_OP_reg8:
713 case DW_OP_reg9:
714 case DW_OP_reg10:
715 case DW_OP_reg11:
716 case DW_OP_reg12:
717 case DW_OP_reg13:
718 case DW_OP_reg14:
719 case DW_OP_reg15:
720 case DW_OP_reg16:
721 case DW_OP_reg17:
722 case DW_OP_reg18:
723 case DW_OP_reg19:
724 case DW_OP_reg20:
725 case DW_OP_reg21:
726 case DW_OP_reg22:
727 case DW_OP_reg23:
728 case DW_OP_reg24:
729 case DW_OP_reg25:
730 case DW_OP_reg26:
731 case DW_OP_reg27:
732 case DW_OP_reg28:
733 case DW_OP_reg29:
734 case DW_OP_reg30:
735 case DW_OP_reg31:
736 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
737
738 result = op - DW_OP_reg0;
739 result_val = value_from_ulongest (address_type, result);
740 this->location = DWARF_VALUE_REGISTER;
741 break;
742
743 case DW_OP_regx:
744 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
745 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
746
747 result = reg;
748 result_val = value_from_ulongest (address_type, result);
749 this->location = DWARF_VALUE_REGISTER;
750 break;
751
752 case DW_OP_implicit_value:
753 {
754 uint64_t len;
755
756 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
757 if (op_ptr + len > op_end)
758 error (_("DW_OP_implicit_value: too few bytes available."));
759 this->len = len;
760 this->data = op_ptr;
761 this->location = DWARF_VALUE_LITERAL;
762 op_ptr += len;
763 dwarf_expr_require_composition (op_ptr, op_end,
764 "DW_OP_implicit_value");
765 }
766 goto no_push;
767
768 case DW_OP_stack_value:
769 this->location = DWARF_VALUE_STACK;
770 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
771 goto no_push;
772
773 case DW_OP_implicit_pointer:
774 case DW_OP_GNU_implicit_pointer:
775 {
776 int64_t len;
777
778 if (this->ref_addr_size == -1)
779 error (_("DWARF-2 expression error: DW_OP_implicit_pointer "
780 "is not allowed in frame context"));
781
782 /* The referred-to DIE of sect_offset kind. */
783 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
784 byte_order);
785 op_ptr += this->ref_addr_size;
786
787 /* The byte offset into the data. */
788 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
789 result = (ULONGEST) len;
790 result_val = value_from_ulongest (address_type, result);
791
792 this->location = DWARF_VALUE_IMPLICIT_POINTER;
793 dwarf_expr_require_composition (op_ptr, op_end,
794 "DW_OP_implicit_pointer");
795 }
796 break;
797
798 case DW_OP_breg0:
799 case DW_OP_breg1:
800 case DW_OP_breg2:
801 case DW_OP_breg3:
802 case DW_OP_breg4:
803 case DW_OP_breg5:
804 case DW_OP_breg6:
805 case DW_OP_breg7:
806 case DW_OP_breg8:
807 case DW_OP_breg9:
808 case DW_OP_breg10:
809 case DW_OP_breg11:
810 case DW_OP_breg12:
811 case DW_OP_breg13:
812 case DW_OP_breg14:
813 case DW_OP_breg15:
814 case DW_OP_breg16:
815 case DW_OP_breg17:
816 case DW_OP_breg18:
817 case DW_OP_breg19:
818 case DW_OP_breg20:
819 case DW_OP_breg21:
820 case DW_OP_breg22:
821 case DW_OP_breg23:
822 case DW_OP_breg24:
823 case DW_OP_breg25:
824 case DW_OP_breg26:
825 case DW_OP_breg27:
826 case DW_OP_breg28:
827 case DW_OP_breg29:
828 case DW_OP_breg30:
829 case DW_OP_breg31:
830 {
831 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
832 result = this->read_addr_from_reg (op - DW_OP_breg0);
833 result += offset;
834 result_val = value_from_ulongest (address_type, result);
835 }
836 break;
837 case DW_OP_bregx:
838 {
839 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
840 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
841 result = this->read_addr_from_reg (reg);
842 result += offset;
843 result_val = value_from_ulongest (address_type, result);
844 }
845 break;
846 case DW_OP_fbreg:
847 {
848 const gdb_byte *datastart;
849 size_t datalen;
850
851 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
852
853 /* Rather than create a whole new context, we simply
854 backup the current stack locally and install a new empty stack,
855 then reset it afterwards, effectively erasing whatever the
856 recursive call put there. */
857 std::vector<dwarf_stack_value> saved_stack = std::move (stack);
858 stack.clear ();
859
860 /* FIXME: cagney/2003-03-26: This code should be using
861 get_frame_base_address(), and then implement a dwarf2
862 specific this_base method. */
863 this->get_frame_base (&datastart, &datalen);
864 eval (datastart, datalen);
865 if (this->location == DWARF_VALUE_MEMORY)
866 result = fetch_address (0);
867 else if (this->location == DWARF_VALUE_REGISTER)
868 result = this->read_addr_from_reg (value_as_long (fetch (0)));
869 else
870 error (_("Not implemented: computing frame "
871 "base using explicit value operator"));
872 result = result + offset;
873 result_val = value_from_ulongest (address_type, result);
874 in_stack_memory = true;
875
876 /* Restore the content of the original stack. */
877 stack = std::move (saved_stack);
878
879 this->location = DWARF_VALUE_MEMORY;
880 }
881 break;
882
883 case DW_OP_dup:
884 result_val = fetch (0);
885 in_stack_memory = fetch_in_stack_memory (0);
886 break;
887
888 case DW_OP_drop:
889 pop ();
890 goto no_push;
891
892 case DW_OP_pick:
893 offset = *op_ptr++;
894 result_val = fetch (offset);
895 in_stack_memory = fetch_in_stack_memory (offset);
896 break;
897
898 case DW_OP_swap:
899 {
900 if (stack.size () < 2)
901 error (_("Not enough elements for "
902 "DW_OP_swap. Need 2, have %zu."),
903 stack.size ());
904
905 dwarf_stack_value &t1 = stack[stack.size () - 1];
906 dwarf_stack_value &t2 = stack[stack.size () - 2];
907 std::swap (t1, t2);
908 goto no_push;
909 }
910
911 case DW_OP_over:
912 result_val = fetch (1);
913 in_stack_memory = fetch_in_stack_memory (1);
914 break;
915
916 case DW_OP_rot:
917 {
918 if (stack.size () < 3)
919 error (_("Not enough elements for "
920 "DW_OP_rot. Need 3, have %zu."),
921 stack.size ());
922
923 dwarf_stack_value temp = stack[stack.size () - 1];
924 stack[stack.size () - 1] = stack[stack.size () - 2];
925 stack[stack.size () - 2] = stack[stack.size () - 3];
926 stack[stack.size () - 3] = temp;
927 goto no_push;
928 }
929
930 case DW_OP_deref:
931 case DW_OP_deref_size:
932 case DW_OP_deref_type:
933 case DW_OP_GNU_deref_type:
934 {
935 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
936 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
937 CORE_ADDR addr = fetch_address (0);
938 struct type *type;
939
940 pop ();
941
942 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
943 {
944 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
945 cu_offset type_die_cu_off = (cu_offset) uoffset;
946 type = get_base_type (type_die_cu_off, 0);
947 }
948 else
949 type = address_type;
950
951 this->read_mem (buf, addr, addr_size);
952
953 /* If the size of the object read from memory is different
954 from the type length, we need to zero-extend it. */
955 if (TYPE_LENGTH (type) != addr_size)
956 {
957 ULONGEST datum =
958 extract_unsigned_integer (buf, addr_size, byte_order);
959
960 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
961 store_unsigned_integer (buf, TYPE_LENGTH (type),
962 byte_order, datum);
963 }
964
965 result_val = value_from_contents_and_address (type, buf, addr);
966 break;
967 }
968
969 case DW_OP_abs:
970 case DW_OP_neg:
971 case DW_OP_not:
972 case DW_OP_plus_uconst:
973 {
974 /* Unary operations. */
975 result_val = fetch (0);
976 pop ();
977
978 switch (op)
979 {
980 case DW_OP_abs:
981 if (value_less (result_val,
982 value_zero (value_type (result_val), not_lval)))
983 result_val = value_neg (result_val);
984 break;
985 case DW_OP_neg:
986 result_val = value_neg (result_val);
987 break;
988 case DW_OP_not:
989 dwarf_require_integral (value_type (result_val));
990 result_val = value_complement (result_val);
991 break;
992 case DW_OP_plus_uconst:
993 dwarf_require_integral (value_type (result_val));
994 result = value_as_long (result_val);
995 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
996 result += reg;
997 result_val = value_from_ulongest (address_type, result);
998 break;
999 }
1000 }
1001 break;
1002
1003 case DW_OP_and:
1004 case DW_OP_div:
1005 case DW_OP_minus:
1006 case DW_OP_mod:
1007 case DW_OP_mul:
1008 case DW_OP_or:
1009 case DW_OP_plus:
1010 case DW_OP_shl:
1011 case DW_OP_shr:
1012 case DW_OP_shra:
1013 case DW_OP_xor:
1014 case DW_OP_le:
1015 case DW_OP_ge:
1016 case DW_OP_eq:
1017 case DW_OP_lt:
1018 case DW_OP_gt:
1019 case DW_OP_ne:
1020 {
1021 /* Binary operations. */
1022 struct value *first, *second;
1023
1024 second = fetch (0);
1025 pop ();
1026
1027 first = fetch (0);
1028 pop ();
1029
1030 if (! base_types_equal_p (value_type (first), value_type (second)))
1031 error (_("Incompatible types on DWARF stack"));
1032
1033 switch (op)
1034 {
1035 case DW_OP_and:
1036 dwarf_require_integral (value_type (first));
1037 dwarf_require_integral (value_type (second));
1038 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1039 break;
1040 case DW_OP_div:
1041 result_val = value_binop (first, second, BINOP_DIV);
1042 break;
1043 case DW_OP_minus:
1044 result_val = value_binop (first, second, BINOP_SUB);
1045 break;
1046 case DW_OP_mod:
1047 {
1048 int cast_back = 0;
1049 struct type *orig_type = value_type (first);
1050
1051 /* We have to special-case "old-style" untyped values
1052 -- these must have mod computed using unsigned
1053 math. */
1054 if (orig_type == address_type)
1055 {
1056 struct type *utype
1057 = get_unsigned_type (this->gdbarch, orig_type);
1058
1059 cast_back = 1;
1060 first = value_cast (utype, first);
1061 second = value_cast (utype, second);
1062 }
1063 /* Note that value_binop doesn't handle float or
1064 decimal float here. This seems unimportant. */
1065 result_val = value_binop (first, second, BINOP_MOD);
1066 if (cast_back)
1067 result_val = value_cast (orig_type, result_val);
1068 }
1069 break;
1070 case DW_OP_mul:
1071 result_val = value_binop (first, second, BINOP_MUL);
1072 break;
1073 case DW_OP_or:
1074 dwarf_require_integral (value_type (first));
1075 dwarf_require_integral (value_type (second));
1076 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1077 break;
1078 case DW_OP_plus:
1079 result_val = value_binop (first, second, BINOP_ADD);
1080 break;
1081 case DW_OP_shl:
1082 dwarf_require_integral (value_type (first));
1083 dwarf_require_integral (value_type (second));
1084 result_val = value_binop (first, second, BINOP_LSH);
1085 break;
1086 case DW_OP_shr:
1087 dwarf_require_integral (value_type (first));
1088 dwarf_require_integral (value_type (second));
1089 if (!TYPE_UNSIGNED (value_type (first)))
1090 {
1091 struct type *utype
1092 = get_unsigned_type (this->gdbarch, value_type (first));
1093
1094 first = value_cast (utype, first);
1095 }
1096
1097 result_val = value_binop (first, second, BINOP_RSH);
1098 /* Make sure we wind up with the same type we started
1099 with. */
1100 if (value_type (result_val) != value_type (second))
1101 result_val = value_cast (value_type (second), result_val);
1102 break;
1103 case DW_OP_shra:
1104 dwarf_require_integral (value_type (first));
1105 dwarf_require_integral (value_type (second));
1106 if (TYPE_UNSIGNED (value_type (first)))
1107 {
1108 struct type *stype
1109 = get_signed_type (this->gdbarch, value_type (first));
1110
1111 first = value_cast (stype, first);
1112 }
1113
1114 result_val = value_binop (first, second, BINOP_RSH);
1115 /* Make sure we wind up with the same type we started
1116 with. */
1117 if (value_type (result_val) != value_type (second))
1118 result_val = value_cast (value_type (second), result_val);
1119 break;
1120 case DW_OP_xor:
1121 dwarf_require_integral (value_type (first));
1122 dwarf_require_integral (value_type (second));
1123 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1124 break;
1125 case DW_OP_le:
1126 /* A <= B is !(B < A). */
1127 result = ! value_less (second, first);
1128 result_val = value_from_ulongest (address_type, result);
1129 break;
1130 case DW_OP_ge:
1131 /* A >= B is !(A < B). */
1132 result = ! value_less (first, second);
1133 result_val = value_from_ulongest (address_type, result);
1134 break;
1135 case DW_OP_eq:
1136 result = value_equal (first, second);
1137 result_val = value_from_ulongest (address_type, result);
1138 break;
1139 case DW_OP_lt:
1140 result = value_less (first, second);
1141 result_val = value_from_ulongest (address_type, result);
1142 break;
1143 case DW_OP_gt:
1144 /* A > B is B < A. */
1145 result = value_less (second, first);
1146 result_val = value_from_ulongest (address_type, result);
1147 break;
1148 case DW_OP_ne:
1149 result = ! value_equal (first, second);
1150 result_val = value_from_ulongest (address_type, result);
1151 break;
1152 default:
1153 internal_error (__FILE__, __LINE__,
1154 _("Can't be reached."));
1155 }
1156 }
1157 break;
1158
1159 case DW_OP_call_frame_cfa:
1160 result = this->get_frame_cfa ();
1161 result_val = value_from_ulongest (address_type, result);
1162 in_stack_memory = true;
1163 break;
1164
1165 case DW_OP_GNU_push_tls_address:
1166 case DW_OP_form_tls_address:
1167 /* Variable is at a constant offset in the thread-local
1168 storage block into the objfile for the current thread and
1169 the dynamic linker module containing this expression. Here
1170 we return returns the offset from that base. The top of the
1171 stack has the offset from the beginning of the thread
1172 control block at which the variable is located. Nothing
1173 should follow this operator, so the top of stack would be
1174 returned. */
1175 result = value_as_long (fetch (0));
1176 pop ();
1177 result = this->get_tls_address (result);
1178 result_val = value_from_ulongest (address_type, result);
1179 break;
1180
1181 case DW_OP_skip:
1182 offset = extract_signed_integer (op_ptr, 2, byte_order);
1183 op_ptr += 2;
1184 op_ptr += offset;
1185 goto no_push;
1186
1187 case DW_OP_bra:
1188 {
1189 struct value *val;
1190
1191 offset = extract_signed_integer (op_ptr, 2, byte_order);
1192 op_ptr += 2;
1193 val = fetch (0);
1194 dwarf_require_integral (value_type (val));
1195 if (value_as_long (val) != 0)
1196 op_ptr += offset;
1197 pop ();
1198 }
1199 goto no_push;
1200
1201 case DW_OP_nop:
1202 goto no_push;
1203
1204 case DW_OP_piece:
1205 {
1206 uint64_t size;
1207
1208 /* Record the piece. */
1209 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1210 add_piece (8 * size, 0);
1211
1212 /* Pop off the address/regnum, and reset the location
1213 type. */
1214 if (this->location != DWARF_VALUE_LITERAL
1215 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1216 pop ();
1217 this->location = DWARF_VALUE_MEMORY;
1218 }
1219 goto no_push;
1220
1221 case DW_OP_bit_piece:
1222 {
1223 uint64_t size, uleb_offset;
1224
1225 /* Record the piece. */
1226 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1227 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
1228 add_piece (size, uleb_offset);
1229
1230 /* Pop off the address/regnum, and reset the location
1231 type. */
1232 if (this->location != DWARF_VALUE_LITERAL
1233 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1234 pop ();
1235 this->location = DWARF_VALUE_MEMORY;
1236 }
1237 goto no_push;
1238
1239 case DW_OP_GNU_uninit:
1240 if (op_ptr != op_end)
1241 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1242 "be the very last op."));
1243
1244 this->initialized = 0;
1245 goto no_push;
1246
1247 case DW_OP_call2:
1248 {
1249 cu_offset cu_off
1250 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
1251 op_ptr += 2;
1252 this->dwarf_call (cu_off);
1253 }
1254 goto no_push;
1255
1256 case DW_OP_call4:
1257 {
1258 cu_offset cu_off
1259 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1260 op_ptr += 4;
1261 this->dwarf_call (cu_off);
1262 }
1263 goto no_push;
1264
1265 case DW_OP_GNU_variable_value:
1266 {
1267 sect_offset sect_off
1268 = (sect_offset) extract_unsigned_integer (op_ptr,
1269 this->ref_addr_size,
1270 byte_order);
1271 op_ptr += this->ref_addr_size;
1272 result_val = this->dwarf_variable_value (sect_off);
1273 }
1274 break;
1275
1276 case DW_OP_entry_value:
1277 case DW_OP_GNU_entry_value:
1278 {
1279 uint64_t len;
1280 CORE_ADDR deref_size;
1281 union call_site_parameter_u kind_u;
1282
1283 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1284 if (op_ptr + len > op_end)
1285 error (_("DW_OP_entry_value: too few bytes available."));
1286
1287 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1288 if (kind_u.dwarf_reg != -1)
1289 {
1290 op_ptr += len;
1291 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1292 kind_u,
1293 -1 /* deref_size */);
1294 goto no_push;
1295 }
1296
1297 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1298 op_ptr + len,
1299 &deref_size);
1300 if (kind_u.dwarf_reg != -1)
1301 {
1302 if (deref_size == -1)
1303 deref_size = this->addr_size;
1304 op_ptr += len;
1305 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1306 kind_u, deref_size);
1307 goto no_push;
1308 }
1309
1310 error (_("DWARF-2 expression error: DW_OP_entry_value is "
1311 "supported only for single DW_OP_reg* "
1312 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1313 }
1314
1315 case DW_OP_GNU_parameter_ref:
1316 {
1317 union call_site_parameter_u kind_u;
1318
1319 kind_u.param_cu_off
1320 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1321 op_ptr += 4;
1322 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1323 kind_u,
1324 -1 /* deref_size */);
1325 }
1326 goto no_push;
1327
1328 case DW_OP_const_type:
1329 case DW_OP_GNU_const_type:
1330 {
1331 int n;
1332 const gdb_byte *data;
1333 struct type *type;
1334
1335 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1336 cu_offset type_die_cu_off = (cu_offset) uoffset;
1337
1338 n = *op_ptr++;
1339 data = op_ptr;
1340 op_ptr += n;
1341
1342 type = get_base_type (type_die_cu_off, n);
1343 result_val = value_from_contents (type, data);
1344 }
1345 break;
1346
1347 case DW_OP_regval_type:
1348 case DW_OP_GNU_regval_type:
1349 {
1350 struct type *type;
1351
1352 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1353 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1354 cu_offset type_die_cu_off = (cu_offset) uoffset;
1355
1356 type = get_base_type (type_die_cu_off, 0);
1357 result_val = this->get_reg_value (type, reg);
1358 }
1359 break;
1360
1361 case DW_OP_convert:
1362 case DW_OP_GNU_convert:
1363 case DW_OP_reinterpret:
1364 case DW_OP_GNU_reinterpret:
1365 {
1366 struct type *type;
1367
1368 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1369 cu_offset type_die_cu_off = (cu_offset) uoffset;
1370
1371 if (to_underlying (type_die_cu_off) == 0)
1372 type = address_type;
1373 else
1374 type = get_base_type (type_die_cu_off, 0);
1375
1376 result_val = fetch (0);
1377 pop ();
1378
1379 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
1380 result_val = value_cast (type, result_val);
1381 else if (type == value_type (result_val))
1382 {
1383 /* Nothing. */
1384 }
1385 else if (TYPE_LENGTH (type)
1386 != TYPE_LENGTH (value_type (result_val)))
1387 error (_("DW_OP_reinterpret has wrong size"));
1388 else
1389 result_val
1390 = value_from_contents (type,
1391 value_contents_all (result_val));
1392 }
1393 break;
1394
1395 case DW_OP_push_object_address:
1396 /* Return the address of the object we are currently observing. */
1397 result = this->get_object_address ();
1398 result_val = value_from_ulongest (address_type, result);
1399 break;
1400
1401 default:
1402 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1403 }
1404
1405 /* Most things push a result value. */
1406 gdb_assert (result_val != NULL);
1407 push (result_val, in_stack_memory);
1408 no_push:
1409 ;
1410 }
1411
1412 /* To simplify our main caller, if the result is an implicit
1413 pointer, then make a pieced value. This is ok because we can't
1414 have implicit pointers in contexts where pieces are invalid. */
1415 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1416 add_piece (8 * this->addr_size, 0);
1417
1418 this->recursion_depth--;
1419 gdb_assert (this->recursion_depth >= 0);
1420 }
1421
1422 void
1423 _initialize_dwarf2expr (void)
1424 {
1425 dwarf_arch_cookie
1426 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1427 }
This page took 0.098908 seconds and 4 git commands to generate.