Sort includes for files gdb/[a-f]*.[chyl].
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2019 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23
24 /* Local non-gdb includes. */
25 #include "common/underlying.h"
26 #include "dwarf2.h"
27 #include "dwarf2expr.h"
28 #include "dwarf2loc.h"
29 #include "gdbcore.h"
30 #include "gdbtypes.h"
31 #include "symtab.h"
32 #include "value.h"
33
34 /* Cookie for gdbarch data. */
35
36 static struct gdbarch_data *dwarf_arch_cookie;
37
38 /* This holds gdbarch-specific types used by the DWARF expression
39 evaluator. See comments in execute_stack_op. */
40
41 struct dwarf_gdbarch_types
42 {
43 struct type *dw_types[3];
44 };
45
46 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
47
48 static void *
49 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
50 {
51 struct dwarf_gdbarch_types *types
52 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
53
54 /* The types themselves are lazily initialized. */
55
56 return types;
57 }
58
59 /* Return the type used for DWARF operations where the type is
60 unspecified in the DWARF spec. Only certain sizes are
61 supported. */
62
63 struct type *
64 dwarf_expr_context::address_type () const
65 {
66 struct dwarf_gdbarch_types *types
67 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
68 dwarf_arch_cookie);
69 int ndx;
70
71 if (this->addr_size == 2)
72 ndx = 0;
73 else if (this->addr_size == 4)
74 ndx = 1;
75 else if (this->addr_size == 8)
76 ndx = 2;
77 else
78 error (_("Unsupported address size in DWARF expressions: %d bits"),
79 8 * this->addr_size);
80
81 if (types->dw_types[ndx] == NULL)
82 types->dw_types[ndx]
83 = arch_integer_type (this->gdbarch,
84 8 * this->addr_size,
85 0, "<signed DWARF address type>");
86
87 return types->dw_types[ndx];
88 }
89
90 /* Create a new context for the expression evaluator. */
91
92 dwarf_expr_context::dwarf_expr_context ()
93 : gdbarch (NULL),
94 addr_size (0),
95 ref_addr_size (0),
96 offset (0),
97 recursion_depth (0),
98 max_recursion_depth (0x100),
99 location (DWARF_VALUE_MEMORY),
100 len (0),
101 data (NULL),
102 initialized (0)
103 {
104 }
105
106 /* Push VALUE onto the stack. */
107
108 void
109 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
110 {
111 stack.emplace_back (value, in_stack_memory);
112 }
113
114 /* Push VALUE onto the stack. */
115
116 void
117 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
118 {
119 push (value_from_ulongest (address_type (), value), in_stack_memory);
120 }
121
122 /* Pop the top item off of the stack. */
123
124 void
125 dwarf_expr_context::pop ()
126 {
127 if (stack.empty ())
128 error (_("dwarf expression stack underflow"));
129
130 stack.pop_back ();
131 }
132
133 /* Retrieve the N'th item on the stack. */
134
135 struct value *
136 dwarf_expr_context::fetch (int n)
137 {
138 if (stack.size () <= n)
139 error (_("Asked for position %d of stack, "
140 "stack only has %zu elements on it."),
141 n, stack.size ());
142 return stack[stack.size () - (1 + n)].value;
143 }
144
145 /* Require that TYPE be an integral type; throw an exception if not. */
146
147 static void
148 dwarf_require_integral (struct type *type)
149 {
150 if (TYPE_CODE (type) != TYPE_CODE_INT
151 && TYPE_CODE (type) != TYPE_CODE_CHAR
152 && TYPE_CODE (type) != TYPE_CODE_BOOL)
153 error (_("integral type expected in DWARF expression"));
154 }
155
156 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
157 type. */
158
159 static struct type *
160 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
161 {
162 switch (TYPE_LENGTH (type))
163 {
164 case 1:
165 return builtin_type (gdbarch)->builtin_uint8;
166 case 2:
167 return builtin_type (gdbarch)->builtin_uint16;
168 case 4:
169 return builtin_type (gdbarch)->builtin_uint32;
170 case 8:
171 return builtin_type (gdbarch)->builtin_uint64;
172 default:
173 error (_("no unsigned variant found for type, while evaluating "
174 "DWARF expression"));
175 }
176 }
177
178 /* Return the signed form of TYPE. TYPE is necessarily an integral
179 type. */
180
181 static struct type *
182 get_signed_type (struct gdbarch *gdbarch, struct type *type)
183 {
184 switch (TYPE_LENGTH (type))
185 {
186 case 1:
187 return builtin_type (gdbarch)->builtin_int8;
188 case 2:
189 return builtin_type (gdbarch)->builtin_int16;
190 case 4:
191 return builtin_type (gdbarch)->builtin_int32;
192 case 8:
193 return builtin_type (gdbarch)->builtin_int64;
194 default:
195 error (_("no signed variant found for type, while evaluating "
196 "DWARF expression"));
197 }
198 }
199
200 /* Retrieve the N'th item on the stack, converted to an address. */
201
202 CORE_ADDR
203 dwarf_expr_context::fetch_address (int n)
204 {
205 struct value *result_val = fetch (n);
206 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
207 ULONGEST result;
208
209 dwarf_require_integral (value_type (result_val));
210 result = extract_unsigned_integer (value_contents (result_val),
211 TYPE_LENGTH (value_type (result_val)),
212 byte_order);
213
214 /* For most architectures, calling extract_unsigned_integer() alone
215 is sufficient for extracting an address. However, some
216 architectures (e.g. MIPS) use signed addresses and using
217 extract_unsigned_integer() will not produce a correct
218 result. Make sure we invoke gdbarch_integer_to_address()
219 for those architectures which require it. */
220 if (gdbarch_integer_to_address_p (this->gdbarch))
221 {
222 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
223 struct type *int_type = get_unsigned_type (this->gdbarch,
224 value_type (result_val));
225
226 store_unsigned_integer (buf, this->addr_size, byte_order, result);
227 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
228 }
229
230 return (CORE_ADDR) result;
231 }
232
233 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
234
235 bool
236 dwarf_expr_context::fetch_in_stack_memory (int n)
237 {
238 if (stack.size () <= n)
239 error (_("Asked for position %d of stack, "
240 "stack only has %zu elements on it."),
241 n, stack.size ());
242 return stack[stack.size () - (1 + n)].in_stack_memory;
243 }
244
245 /* Return true if the expression stack is empty. */
246
247 bool
248 dwarf_expr_context::stack_empty_p () const
249 {
250 return stack.empty ();
251 }
252
253 /* Add a new piece to the dwarf_expr_context's piece list. */
254 void
255 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
256 {
257 this->pieces.emplace_back ();
258 dwarf_expr_piece &p = this->pieces.back ();
259
260 p.location = this->location;
261 p.size = size;
262 p.offset = offset;
263
264 if (p.location == DWARF_VALUE_LITERAL)
265 {
266 p.v.literal.data = this->data;
267 p.v.literal.length = this->len;
268 }
269 else if (stack_empty_p ())
270 {
271 p.location = DWARF_VALUE_OPTIMIZED_OUT;
272 /* Also reset the context's location, for our callers. This is
273 a somewhat strange approach, but this lets us avoid setting
274 the location to DWARF_VALUE_MEMORY in all the individual
275 cases in the evaluator. */
276 this->location = DWARF_VALUE_OPTIMIZED_OUT;
277 }
278 else if (p.location == DWARF_VALUE_MEMORY)
279 {
280 p.v.mem.addr = fetch_address (0);
281 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
282 }
283 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
284 {
285 p.v.ptr.die_sect_off = (sect_offset) this->len;
286 p.v.ptr.offset = value_as_long (fetch (0));
287 }
288 else if (p.location == DWARF_VALUE_REGISTER)
289 p.v.regno = value_as_long (fetch (0));
290 else
291 {
292 p.v.value = fetch (0);
293 }
294 }
295
296 /* Evaluate the expression at ADDR (LEN bytes long). */
297
298 void
299 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
300 {
301 int old_recursion_depth = this->recursion_depth;
302
303 execute_stack_op (addr, addr + len);
304
305 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
306
307 gdb_assert (this->recursion_depth == old_recursion_depth);
308 }
309
310 /* Helper to read a uleb128 value or throw an error. */
311
312 const gdb_byte *
313 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
314 uint64_t *r)
315 {
316 buf = gdb_read_uleb128 (buf, buf_end, r);
317 if (buf == NULL)
318 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
319 return buf;
320 }
321
322 /* Helper to read a sleb128 value or throw an error. */
323
324 const gdb_byte *
325 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
326 int64_t *r)
327 {
328 buf = gdb_read_sleb128 (buf, buf_end, r);
329 if (buf == NULL)
330 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
331 return buf;
332 }
333
334 const gdb_byte *
335 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
336 {
337 buf = gdb_skip_leb128 (buf, buf_end);
338 if (buf == NULL)
339 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
340 return buf;
341 }
342 \f
343
344 /* Check that the current operator is either at the end of an
345 expression, or that it is followed by a composition operator or by
346 DW_OP_GNU_uninit (which should terminate the expression). */
347
348 void
349 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
350 const char *op_name)
351 {
352 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
353 && *op_ptr != DW_OP_GNU_uninit)
354 error (_("DWARF-2 expression error: `%s' operations must be "
355 "used either alone or in conjunction with DW_OP_piece "
356 "or DW_OP_bit_piece."),
357 op_name);
358 }
359
360 /* Return true iff the types T1 and T2 are "the same". This only does
361 checks that might reasonably be needed to compare DWARF base
362 types. */
363
364 static int
365 base_types_equal_p (struct type *t1, struct type *t2)
366 {
367 if (TYPE_CODE (t1) != TYPE_CODE (t2))
368 return 0;
369 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
370 return 0;
371 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
372 }
373
374 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
375 DWARF register number. Otherwise return -1. */
376
377 int
378 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
379 {
380 uint64_t dwarf_reg;
381
382 if (buf_end <= buf)
383 return -1;
384 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
385 {
386 if (buf_end - buf != 1)
387 return -1;
388 return *buf - DW_OP_reg0;
389 }
390
391 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
392 {
393 buf++;
394 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
395 if (buf == NULL)
396 return -1;
397 buf = gdb_skip_leb128 (buf, buf_end);
398 if (buf == NULL)
399 return -1;
400 }
401 else if (*buf == DW_OP_regx)
402 {
403 buf++;
404 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
405 if (buf == NULL)
406 return -1;
407 }
408 else
409 return -1;
410 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
411 return -1;
412 return dwarf_reg;
413 }
414
415 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
416 DW_OP_deref* return the DWARF register number. Otherwise return -1.
417 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
418 size from DW_OP_deref_size. */
419
420 int
421 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
422 CORE_ADDR *deref_size_return)
423 {
424 uint64_t dwarf_reg;
425 int64_t offset;
426
427 if (buf_end <= buf)
428 return -1;
429
430 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
431 {
432 dwarf_reg = *buf - DW_OP_breg0;
433 buf++;
434 if (buf >= buf_end)
435 return -1;
436 }
437 else if (*buf == DW_OP_bregx)
438 {
439 buf++;
440 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
441 if (buf == NULL)
442 return -1;
443 if ((int) dwarf_reg != dwarf_reg)
444 return -1;
445 }
446 else
447 return -1;
448
449 buf = gdb_read_sleb128 (buf, buf_end, &offset);
450 if (buf == NULL)
451 return -1;
452 if (offset != 0)
453 return -1;
454
455 if (*buf == DW_OP_deref)
456 {
457 buf++;
458 *deref_size_return = -1;
459 }
460 else if (*buf == DW_OP_deref_size)
461 {
462 buf++;
463 if (buf >= buf_end)
464 return -1;
465 *deref_size_return = *buf++;
466 }
467 else
468 return -1;
469
470 if (buf != buf_end)
471 return -1;
472
473 return dwarf_reg;
474 }
475
476 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
477 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
478
479 int
480 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
481 CORE_ADDR *fb_offset_return)
482 {
483 int64_t fb_offset;
484
485 if (buf_end <= buf)
486 return 0;
487
488 if (*buf != DW_OP_fbreg)
489 return 0;
490 buf++;
491
492 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
493 if (buf == NULL)
494 return 0;
495 *fb_offset_return = fb_offset;
496 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
497 return 0;
498
499 return 1;
500 }
501
502 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
503 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
504 The matched SP register number depends on GDBARCH. */
505
506 int
507 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
508 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
509 {
510 uint64_t dwarf_reg;
511 int64_t sp_offset;
512
513 if (buf_end <= buf)
514 return 0;
515 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
516 {
517 dwarf_reg = *buf - DW_OP_breg0;
518 buf++;
519 }
520 else
521 {
522 if (*buf != DW_OP_bregx)
523 return 0;
524 buf++;
525 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
526 if (buf == NULL)
527 return 0;
528 }
529
530 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
531 != gdbarch_sp_regnum (gdbarch))
532 return 0;
533
534 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
535 if (buf == NULL)
536 return 0;
537 *sp_offset_return = sp_offset;
538 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
539 return 0;
540
541 return 1;
542 }
543
544 /* The engine for the expression evaluator. Using the context in this
545 object, evaluate the expression between OP_PTR and OP_END. */
546
547 void
548 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
549 const gdb_byte *op_end)
550 {
551 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
552 /* Old-style "untyped" DWARF values need special treatment in a
553 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
554 a special type for these values so we can distinguish them from
555 values that have an explicit type, because explicitly-typed
556 values do not need special treatment. This special type must be
557 different (in the `==' sense) from any base type coming from the
558 CU. */
559 struct type *address_type = this->address_type ();
560
561 this->location = DWARF_VALUE_MEMORY;
562 this->initialized = 1; /* Default is initialized. */
563
564 if (this->recursion_depth > this->max_recursion_depth)
565 error (_("DWARF-2 expression error: Loop detected (%d)."),
566 this->recursion_depth);
567 this->recursion_depth++;
568
569 while (op_ptr < op_end)
570 {
571 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
572 ULONGEST result;
573 /* Assume the value is not in stack memory.
574 Code that knows otherwise sets this to true.
575 Some arithmetic on stack addresses can probably be assumed to still
576 be a stack address, but we skip this complication for now.
577 This is just an optimization, so it's always ok to punt
578 and leave this as false. */
579 bool in_stack_memory = false;
580 uint64_t uoffset, reg;
581 int64_t offset;
582 struct value *result_val = NULL;
583
584 /* The DWARF expression might have a bug causing an infinite
585 loop. In that case, quitting is the only way out. */
586 QUIT;
587
588 switch (op)
589 {
590 case DW_OP_lit0:
591 case DW_OP_lit1:
592 case DW_OP_lit2:
593 case DW_OP_lit3:
594 case DW_OP_lit4:
595 case DW_OP_lit5:
596 case DW_OP_lit6:
597 case DW_OP_lit7:
598 case DW_OP_lit8:
599 case DW_OP_lit9:
600 case DW_OP_lit10:
601 case DW_OP_lit11:
602 case DW_OP_lit12:
603 case DW_OP_lit13:
604 case DW_OP_lit14:
605 case DW_OP_lit15:
606 case DW_OP_lit16:
607 case DW_OP_lit17:
608 case DW_OP_lit18:
609 case DW_OP_lit19:
610 case DW_OP_lit20:
611 case DW_OP_lit21:
612 case DW_OP_lit22:
613 case DW_OP_lit23:
614 case DW_OP_lit24:
615 case DW_OP_lit25:
616 case DW_OP_lit26:
617 case DW_OP_lit27:
618 case DW_OP_lit28:
619 case DW_OP_lit29:
620 case DW_OP_lit30:
621 case DW_OP_lit31:
622 result = op - DW_OP_lit0;
623 result_val = value_from_ulongest (address_type, result);
624 break;
625
626 case DW_OP_addr:
627 result = extract_unsigned_integer (op_ptr,
628 this->addr_size, byte_order);
629 op_ptr += this->addr_size;
630 /* Some versions of GCC emit DW_OP_addr before
631 DW_OP_GNU_push_tls_address. In this case the value is an
632 index, not an address. We don't support things like
633 branching between the address and the TLS op. */
634 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
635 result += this->offset;
636 result_val = value_from_ulongest (address_type, result);
637 break;
638
639 case DW_OP_GNU_addr_index:
640 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
641 result = this->get_addr_index (uoffset);
642 result += this->offset;
643 result_val = value_from_ulongest (address_type, result);
644 break;
645 case DW_OP_GNU_const_index:
646 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
647 result = this->get_addr_index (uoffset);
648 result_val = value_from_ulongest (address_type, result);
649 break;
650
651 case DW_OP_const1u:
652 result = extract_unsigned_integer (op_ptr, 1, byte_order);
653 result_val = value_from_ulongest (address_type, result);
654 op_ptr += 1;
655 break;
656 case DW_OP_const1s:
657 result = extract_signed_integer (op_ptr, 1, byte_order);
658 result_val = value_from_ulongest (address_type, result);
659 op_ptr += 1;
660 break;
661 case DW_OP_const2u:
662 result = extract_unsigned_integer (op_ptr, 2, byte_order);
663 result_val = value_from_ulongest (address_type, result);
664 op_ptr += 2;
665 break;
666 case DW_OP_const2s:
667 result = extract_signed_integer (op_ptr, 2, byte_order);
668 result_val = value_from_ulongest (address_type, result);
669 op_ptr += 2;
670 break;
671 case DW_OP_const4u:
672 result = extract_unsigned_integer (op_ptr, 4, byte_order);
673 result_val = value_from_ulongest (address_type, result);
674 op_ptr += 4;
675 break;
676 case DW_OP_const4s:
677 result = extract_signed_integer (op_ptr, 4, byte_order);
678 result_val = value_from_ulongest (address_type, result);
679 op_ptr += 4;
680 break;
681 case DW_OP_const8u:
682 result = extract_unsigned_integer (op_ptr, 8, byte_order);
683 result_val = value_from_ulongest (address_type, result);
684 op_ptr += 8;
685 break;
686 case DW_OP_const8s:
687 result = extract_signed_integer (op_ptr, 8, byte_order);
688 result_val = value_from_ulongest (address_type, result);
689 op_ptr += 8;
690 break;
691 case DW_OP_constu:
692 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
693 result = uoffset;
694 result_val = value_from_ulongest (address_type, result);
695 break;
696 case DW_OP_consts:
697 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
698 result = offset;
699 result_val = value_from_ulongest (address_type, result);
700 break;
701
702 /* The DW_OP_reg operations are required to occur alone in
703 location expressions. */
704 case DW_OP_reg0:
705 case DW_OP_reg1:
706 case DW_OP_reg2:
707 case DW_OP_reg3:
708 case DW_OP_reg4:
709 case DW_OP_reg5:
710 case DW_OP_reg6:
711 case DW_OP_reg7:
712 case DW_OP_reg8:
713 case DW_OP_reg9:
714 case DW_OP_reg10:
715 case DW_OP_reg11:
716 case DW_OP_reg12:
717 case DW_OP_reg13:
718 case DW_OP_reg14:
719 case DW_OP_reg15:
720 case DW_OP_reg16:
721 case DW_OP_reg17:
722 case DW_OP_reg18:
723 case DW_OP_reg19:
724 case DW_OP_reg20:
725 case DW_OP_reg21:
726 case DW_OP_reg22:
727 case DW_OP_reg23:
728 case DW_OP_reg24:
729 case DW_OP_reg25:
730 case DW_OP_reg26:
731 case DW_OP_reg27:
732 case DW_OP_reg28:
733 case DW_OP_reg29:
734 case DW_OP_reg30:
735 case DW_OP_reg31:
736 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
737
738 result = op - DW_OP_reg0;
739 result_val = value_from_ulongest (address_type, result);
740 this->location = DWARF_VALUE_REGISTER;
741 break;
742
743 case DW_OP_regx:
744 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
745 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
746
747 result = reg;
748 result_val = value_from_ulongest (address_type, result);
749 this->location = DWARF_VALUE_REGISTER;
750 break;
751
752 case DW_OP_implicit_value:
753 {
754 uint64_t len;
755
756 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
757 if (op_ptr + len > op_end)
758 error (_("DW_OP_implicit_value: too few bytes available."));
759 this->len = len;
760 this->data = op_ptr;
761 this->location = DWARF_VALUE_LITERAL;
762 op_ptr += len;
763 dwarf_expr_require_composition (op_ptr, op_end,
764 "DW_OP_implicit_value");
765 }
766 goto no_push;
767
768 case DW_OP_stack_value:
769 this->location = DWARF_VALUE_STACK;
770 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
771 goto no_push;
772
773 case DW_OP_implicit_pointer:
774 case DW_OP_GNU_implicit_pointer:
775 {
776 int64_t len;
777
778 if (this->ref_addr_size == -1)
779 error (_("DWARF-2 expression error: DW_OP_implicit_pointer "
780 "is not allowed in frame context"));
781
782 /* The referred-to DIE of sect_offset kind. */
783 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
784 byte_order);
785 op_ptr += this->ref_addr_size;
786
787 /* The byte offset into the data. */
788 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
789 result = (ULONGEST) len;
790 result_val = value_from_ulongest (address_type, result);
791
792 this->location = DWARF_VALUE_IMPLICIT_POINTER;
793 dwarf_expr_require_composition (op_ptr, op_end,
794 "DW_OP_implicit_pointer");
795 }
796 break;
797
798 case DW_OP_breg0:
799 case DW_OP_breg1:
800 case DW_OP_breg2:
801 case DW_OP_breg3:
802 case DW_OP_breg4:
803 case DW_OP_breg5:
804 case DW_OP_breg6:
805 case DW_OP_breg7:
806 case DW_OP_breg8:
807 case DW_OP_breg9:
808 case DW_OP_breg10:
809 case DW_OP_breg11:
810 case DW_OP_breg12:
811 case DW_OP_breg13:
812 case DW_OP_breg14:
813 case DW_OP_breg15:
814 case DW_OP_breg16:
815 case DW_OP_breg17:
816 case DW_OP_breg18:
817 case DW_OP_breg19:
818 case DW_OP_breg20:
819 case DW_OP_breg21:
820 case DW_OP_breg22:
821 case DW_OP_breg23:
822 case DW_OP_breg24:
823 case DW_OP_breg25:
824 case DW_OP_breg26:
825 case DW_OP_breg27:
826 case DW_OP_breg28:
827 case DW_OP_breg29:
828 case DW_OP_breg30:
829 case DW_OP_breg31:
830 {
831 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
832 result = this->read_addr_from_reg (op - DW_OP_breg0);
833 result += offset;
834 result_val = value_from_ulongest (address_type, result);
835 }
836 break;
837 case DW_OP_bregx:
838 {
839 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
840 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
841 result = this->read_addr_from_reg (reg);
842 result += offset;
843 result_val = value_from_ulongest (address_type, result);
844 }
845 break;
846 case DW_OP_fbreg:
847 {
848 const gdb_byte *datastart;
849 size_t datalen;
850
851 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
852
853 /* Rather than create a whole new context, we simply
854 backup the current stack locally and install a new empty stack,
855 then reset it afterwards, effectively erasing whatever the
856 recursive call put there. */
857 std::vector<dwarf_stack_value> saved_stack = std::move (stack);
858 stack.clear ();
859
860 /* FIXME: cagney/2003-03-26: This code should be using
861 get_frame_base_address(), and then implement a dwarf2
862 specific this_base method. */
863 this->get_frame_base (&datastart, &datalen);
864 eval (datastart, datalen);
865 if (this->location == DWARF_VALUE_MEMORY)
866 result = fetch_address (0);
867 else if (this->location == DWARF_VALUE_REGISTER)
868 result = this->read_addr_from_reg (value_as_long (fetch (0)));
869 else
870 error (_("Not implemented: computing frame "
871 "base using explicit value operator"));
872 result = result + offset;
873 result_val = value_from_ulongest (address_type, result);
874 in_stack_memory = true;
875
876 /* Restore the content of the original stack. */
877 stack = std::move (saved_stack);
878
879 this->location = DWARF_VALUE_MEMORY;
880 }
881 break;
882
883 case DW_OP_dup:
884 result_val = fetch (0);
885 in_stack_memory = fetch_in_stack_memory (0);
886 break;
887
888 case DW_OP_drop:
889 pop ();
890 goto no_push;
891
892 case DW_OP_pick:
893 offset = *op_ptr++;
894 result_val = fetch (offset);
895 in_stack_memory = fetch_in_stack_memory (offset);
896 break;
897
898 case DW_OP_swap:
899 {
900 if (stack.size () < 2)
901 error (_("Not enough elements for "
902 "DW_OP_swap. Need 2, have %zu."),
903 stack.size ());
904
905 dwarf_stack_value &t1 = stack[stack.size () - 1];
906 dwarf_stack_value &t2 = stack[stack.size () - 2];
907 std::swap (t1, t2);
908 goto no_push;
909 }
910
911 case DW_OP_over:
912 result_val = fetch (1);
913 in_stack_memory = fetch_in_stack_memory (1);
914 break;
915
916 case DW_OP_rot:
917 {
918 if (stack.size () < 3)
919 error (_("Not enough elements for "
920 "DW_OP_rot. Need 3, have %zu."),
921 stack.size ());
922
923 dwarf_stack_value temp = stack[stack.size () - 1];
924 stack[stack.size () - 1] = stack[stack.size () - 2];
925 stack[stack.size () - 2] = stack[stack.size () - 3];
926 stack[stack.size () - 3] = temp;
927 goto no_push;
928 }
929
930 case DW_OP_deref:
931 case DW_OP_deref_size:
932 case DW_OP_deref_type:
933 case DW_OP_GNU_deref_type:
934 {
935 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
936 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
937 CORE_ADDR addr = fetch_address (0);
938 struct type *type;
939
940 pop ();
941
942 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
943 {
944 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
945 cu_offset type_die_cu_off = (cu_offset) uoffset;
946 type = get_base_type (type_die_cu_off, 0);
947 }
948 else
949 type = address_type;
950
951 this->read_mem (buf, addr, addr_size);
952
953 /* If the size of the object read from memory is different
954 from the type length, we need to zero-extend it. */
955 if (TYPE_LENGTH (type) != addr_size)
956 {
957 ULONGEST datum =
958 extract_unsigned_integer (buf, addr_size, byte_order);
959
960 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
961 store_unsigned_integer (buf, TYPE_LENGTH (type),
962 byte_order, datum);
963 }
964
965 result_val = value_from_contents_and_address (type, buf, addr);
966 break;
967 }
968
969 case DW_OP_abs:
970 case DW_OP_neg:
971 case DW_OP_not:
972 case DW_OP_plus_uconst:
973 {
974 /* Unary operations. */
975 result_val = fetch (0);
976 pop ();
977
978 switch (op)
979 {
980 case DW_OP_abs:
981 if (value_less (result_val,
982 value_zero (value_type (result_val), not_lval)))
983 result_val = value_neg (result_val);
984 break;
985 case DW_OP_neg:
986 result_val = value_neg (result_val);
987 break;
988 case DW_OP_not:
989 dwarf_require_integral (value_type (result_val));
990 result_val = value_complement (result_val);
991 break;
992 case DW_OP_plus_uconst:
993 dwarf_require_integral (value_type (result_val));
994 result = value_as_long (result_val);
995 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
996 result += reg;
997 result_val = value_from_ulongest (address_type, result);
998 break;
999 }
1000 }
1001 break;
1002
1003 case DW_OP_and:
1004 case DW_OP_div:
1005 case DW_OP_minus:
1006 case DW_OP_mod:
1007 case DW_OP_mul:
1008 case DW_OP_or:
1009 case DW_OP_plus:
1010 case DW_OP_shl:
1011 case DW_OP_shr:
1012 case DW_OP_shra:
1013 case DW_OP_xor:
1014 case DW_OP_le:
1015 case DW_OP_ge:
1016 case DW_OP_eq:
1017 case DW_OP_lt:
1018 case DW_OP_gt:
1019 case DW_OP_ne:
1020 {
1021 /* Binary operations. */
1022 struct value *first, *second;
1023
1024 second = fetch (0);
1025 pop ();
1026
1027 first = fetch (0);
1028 pop ();
1029
1030 if (! base_types_equal_p (value_type (first), value_type (second)))
1031 error (_("Incompatible types on DWARF stack"));
1032
1033 switch (op)
1034 {
1035 case DW_OP_and:
1036 dwarf_require_integral (value_type (first));
1037 dwarf_require_integral (value_type (second));
1038 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1039 break;
1040 case DW_OP_div:
1041 result_val = value_binop (first, second, BINOP_DIV);
1042 break;
1043 case DW_OP_minus:
1044 result_val = value_binop (first, second, BINOP_SUB);
1045 break;
1046 case DW_OP_mod:
1047 {
1048 int cast_back = 0;
1049 struct type *orig_type = value_type (first);
1050
1051 /* We have to special-case "old-style" untyped values
1052 -- these must have mod computed using unsigned
1053 math. */
1054 if (orig_type == address_type)
1055 {
1056 struct type *utype
1057 = get_unsigned_type (this->gdbarch, orig_type);
1058
1059 cast_back = 1;
1060 first = value_cast (utype, first);
1061 second = value_cast (utype, second);
1062 }
1063 /* Note that value_binop doesn't handle float or
1064 decimal float here. This seems unimportant. */
1065 result_val = value_binop (first, second, BINOP_MOD);
1066 if (cast_back)
1067 result_val = value_cast (orig_type, result_val);
1068 }
1069 break;
1070 case DW_OP_mul:
1071 result_val = value_binop (first, second, BINOP_MUL);
1072 break;
1073 case DW_OP_or:
1074 dwarf_require_integral (value_type (first));
1075 dwarf_require_integral (value_type (second));
1076 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1077 break;
1078 case DW_OP_plus:
1079 result_val = value_binop (first, second, BINOP_ADD);
1080 break;
1081 case DW_OP_shl:
1082 dwarf_require_integral (value_type (first));
1083 dwarf_require_integral (value_type (second));
1084 result_val = value_binop (first, second, BINOP_LSH);
1085 break;
1086 case DW_OP_shr:
1087 dwarf_require_integral (value_type (first));
1088 dwarf_require_integral (value_type (second));
1089 if (!TYPE_UNSIGNED (value_type (first)))
1090 {
1091 struct type *utype
1092 = get_unsigned_type (this->gdbarch, value_type (first));
1093
1094 first = value_cast (utype, first);
1095 }
1096
1097 result_val = value_binop (first, second, BINOP_RSH);
1098 /* Make sure we wind up with the same type we started
1099 with. */
1100 if (value_type (result_val) != value_type (second))
1101 result_val = value_cast (value_type (second), result_val);
1102 break;
1103 case DW_OP_shra:
1104 dwarf_require_integral (value_type (first));
1105 dwarf_require_integral (value_type (second));
1106 if (TYPE_UNSIGNED (value_type (first)))
1107 {
1108 struct type *stype
1109 = get_signed_type (this->gdbarch, value_type (first));
1110
1111 first = value_cast (stype, first);
1112 }
1113
1114 result_val = value_binop (first, second, BINOP_RSH);
1115 /* Make sure we wind up with the same type we started
1116 with. */
1117 if (value_type (result_val) != value_type (second))
1118 result_val = value_cast (value_type (second), result_val);
1119 break;
1120 case DW_OP_xor:
1121 dwarf_require_integral (value_type (first));
1122 dwarf_require_integral (value_type (second));
1123 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1124 break;
1125 case DW_OP_le:
1126 /* A <= B is !(B < A). */
1127 result = ! value_less (second, first);
1128 result_val = value_from_ulongest (address_type, result);
1129 break;
1130 case DW_OP_ge:
1131 /* A >= B is !(A < B). */
1132 result = ! value_less (first, second);
1133 result_val = value_from_ulongest (address_type, result);
1134 break;
1135 case DW_OP_eq:
1136 result = value_equal (first, second);
1137 result_val = value_from_ulongest (address_type, result);
1138 break;
1139 case DW_OP_lt:
1140 result = value_less (first, second);
1141 result_val = value_from_ulongest (address_type, result);
1142 break;
1143 case DW_OP_gt:
1144 /* A > B is B < A. */
1145 result = value_less (second, first);
1146 result_val = value_from_ulongest (address_type, result);
1147 break;
1148 case DW_OP_ne:
1149 result = ! value_equal (first, second);
1150 result_val = value_from_ulongest (address_type, result);
1151 break;
1152 default:
1153 internal_error (__FILE__, __LINE__,
1154 _("Can't be reached."));
1155 }
1156 }
1157 break;
1158
1159 case DW_OP_call_frame_cfa:
1160 result = this->get_frame_cfa ();
1161 result_val = value_from_ulongest (address_type, result);
1162 in_stack_memory = true;
1163 break;
1164
1165 case DW_OP_GNU_push_tls_address:
1166 case DW_OP_form_tls_address:
1167 /* Variable is at a constant offset in the thread-local
1168 storage block into the objfile for the current thread and
1169 the dynamic linker module containing this expression. Here
1170 we return returns the offset from that base. The top of the
1171 stack has the offset from the beginning of the thread
1172 control block at which the variable is located. Nothing
1173 should follow this operator, so the top of stack would be
1174 returned. */
1175 result = value_as_long (fetch (0));
1176 pop ();
1177 result = this->get_tls_address (result);
1178 result_val = value_from_ulongest (address_type, result);
1179 break;
1180
1181 case DW_OP_skip:
1182 offset = extract_signed_integer (op_ptr, 2, byte_order);
1183 op_ptr += 2;
1184 op_ptr += offset;
1185 goto no_push;
1186
1187 case DW_OP_bra:
1188 {
1189 struct value *val;
1190
1191 offset = extract_signed_integer (op_ptr, 2, byte_order);
1192 op_ptr += 2;
1193 val = fetch (0);
1194 dwarf_require_integral (value_type (val));
1195 if (value_as_long (val) != 0)
1196 op_ptr += offset;
1197 pop ();
1198 }
1199 goto no_push;
1200
1201 case DW_OP_nop:
1202 goto no_push;
1203
1204 case DW_OP_piece:
1205 {
1206 uint64_t size;
1207
1208 /* Record the piece. */
1209 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1210 add_piece (8 * size, 0);
1211
1212 /* Pop off the address/regnum, and reset the location
1213 type. */
1214 if (this->location != DWARF_VALUE_LITERAL
1215 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1216 pop ();
1217 this->location = DWARF_VALUE_MEMORY;
1218 }
1219 goto no_push;
1220
1221 case DW_OP_bit_piece:
1222 {
1223 uint64_t size, uleb_offset;
1224
1225 /* Record the piece. */
1226 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1227 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
1228 add_piece (size, uleb_offset);
1229
1230 /* Pop off the address/regnum, and reset the location
1231 type. */
1232 if (this->location != DWARF_VALUE_LITERAL
1233 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1234 pop ();
1235 this->location = DWARF_VALUE_MEMORY;
1236 }
1237 goto no_push;
1238
1239 case DW_OP_GNU_uninit:
1240 if (op_ptr != op_end)
1241 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1242 "be the very last op."));
1243
1244 this->initialized = 0;
1245 goto no_push;
1246
1247 case DW_OP_call2:
1248 {
1249 cu_offset cu_off
1250 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
1251 op_ptr += 2;
1252 this->dwarf_call (cu_off);
1253 }
1254 goto no_push;
1255
1256 case DW_OP_call4:
1257 {
1258 cu_offset cu_off
1259 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1260 op_ptr += 4;
1261 this->dwarf_call (cu_off);
1262 }
1263 goto no_push;
1264
1265 case DW_OP_GNU_variable_value:
1266 {
1267 sect_offset sect_off
1268 = (sect_offset) extract_unsigned_integer (op_ptr,
1269 this->ref_addr_size,
1270 byte_order);
1271 op_ptr += this->ref_addr_size;
1272 result_val = this->dwarf_variable_value (sect_off);
1273 }
1274 break;
1275
1276 case DW_OP_entry_value:
1277 case DW_OP_GNU_entry_value:
1278 {
1279 uint64_t len;
1280 CORE_ADDR deref_size;
1281 union call_site_parameter_u kind_u;
1282
1283 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1284 if (op_ptr + len > op_end)
1285 error (_("DW_OP_entry_value: too few bytes available."));
1286
1287 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1288 if (kind_u.dwarf_reg != -1)
1289 {
1290 op_ptr += len;
1291 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1292 kind_u,
1293 -1 /* deref_size */);
1294 goto no_push;
1295 }
1296
1297 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1298 op_ptr + len,
1299 &deref_size);
1300 if (kind_u.dwarf_reg != -1)
1301 {
1302 if (deref_size == -1)
1303 deref_size = this->addr_size;
1304 op_ptr += len;
1305 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1306 kind_u, deref_size);
1307 goto no_push;
1308 }
1309
1310 error (_("DWARF-2 expression error: DW_OP_entry_value is "
1311 "supported only for single DW_OP_reg* "
1312 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1313 }
1314
1315 case DW_OP_GNU_parameter_ref:
1316 {
1317 union call_site_parameter_u kind_u;
1318
1319 kind_u.param_cu_off
1320 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1321 op_ptr += 4;
1322 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1323 kind_u,
1324 -1 /* deref_size */);
1325 }
1326 goto no_push;
1327
1328 case DW_OP_const_type:
1329 case DW_OP_GNU_const_type:
1330 {
1331 int n;
1332 const gdb_byte *data;
1333 struct type *type;
1334
1335 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1336 cu_offset type_die_cu_off = (cu_offset) uoffset;
1337
1338 n = *op_ptr++;
1339 data = op_ptr;
1340 op_ptr += n;
1341
1342 type = get_base_type (type_die_cu_off, n);
1343 result_val = value_from_contents (type, data);
1344 }
1345 break;
1346
1347 case DW_OP_regval_type:
1348 case DW_OP_GNU_regval_type:
1349 {
1350 struct type *type;
1351
1352 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1353 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1354 cu_offset type_die_cu_off = (cu_offset) uoffset;
1355
1356 type = get_base_type (type_die_cu_off, 0);
1357 result_val = this->get_reg_value (type, reg);
1358 }
1359 break;
1360
1361 case DW_OP_convert:
1362 case DW_OP_GNU_convert:
1363 case DW_OP_reinterpret:
1364 case DW_OP_GNU_reinterpret:
1365 {
1366 struct type *type;
1367
1368 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1369 cu_offset type_die_cu_off = (cu_offset) uoffset;
1370
1371 if (to_underlying (type_die_cu_off) == 0)
1372 type = address_type;
1373 else
1374 type = get_base_type (type_die_cu_off, 0);
1375
1376 result_val = fetch (0);
1377 pop ();
1378
1379 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
1380 result_val = value_cast (type, result_val);
1381 else if (type == value_type (result_val))
1382 {
1383 /* Nothing. */
1384 }
1385 else if (TYPE_LENGTH (type)
1386 != TYPE_LENGTH (value_type (result_val)))
1387 error (_("DW_OP_reinterpret has wrong size"));
1388 else
1389 result_val
1390 = value_from_contents (type,
1391 value_contents_all (result_val));
1392 }
1393 break;
1394
1395 case DW_OP_push_object_address:
1396 /* Return the address of the object we are currently observing. */
1397 result = this->get_object_address ();
1398 result_val = value_from_ulongest (address_type, result);
1399 break;
1400
1401 default:
1402 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1403 }
1404
1405 /* Most things push a result value. */
1406 gdb_assert (result_val != NULL);
1407 push (result_val, in_stack_memory);
1408 no_push:
1409 ;
1410 }
1411
1412 /* To simplify our main caller, if the result is an implicit
1413 pointer, then make a pieced value. This is ok because we can't
1414 have implicit pointers in contexts where pieces are invalid. */
1415 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1416 add_piece (8 * this->addr_size, 0);
1417
1418 this->recursion_depth--;
1419 gdb_assert (this->recursion_depth >= 0);
1420 }
1421
1422 void
1423 _initialize_dwarf2expr (void)
1424 {
1425 dwarf_arch_cookie
1426 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1427 }
This page took 0.064111 seconds and 4 git commands to generate.