gdb/
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
CommitLineData
852483bc
MK
1/* DWARF 2 Expression Evaluator.
2
7b6bb8da 3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
9b254dd1 4 Free Software Foundation, Inc.
852483bc 5
4c2df51b
DJ
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
a9762ec7 12 the Free Software Foundation; either version 3 of the License, or
4c2df51b
DJ
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
a9762ec7 21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
4c2df51b
DJ
22
23#include "defs.h"
24#include "symtab.h"
25#include "gdbtypes.h"
26#include "value.h"
27#include "gdbcore.h"
fa8f86ff 28#include "dwarf2.h"
4c2df51b 29#include "dwarf2expr.h"
1e3a102a 30#include "gdb_assert.h"
4c2df51b
DJ
31
32/* Local prototypes. */
33
34static void execute_stack_op (struct dwarf_expr_context *,
0d45f56e 35 const gdb_byte *, const gdb_byte *);
4c2df51b 36
8a9b8146
TT
37/* Cookie for gdbarch data. */
38
39static struct gdbarch_data *dwarf_arch_cookie;
40
41/* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44struct dwarf_gdbarch_types
45{
46 struct type *dw_types[3];
47};
48
49/* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51static void *
52dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53{
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60}
61
62/* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66static struct type *
67dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68{
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90}
91
4c2df51b
DJ
92/* Create a new context for the expression evaluator. */
93
94struct dwarf_expr_context *
e4adbba9 95new_dwarf_expr_context (void)
4c2df51b
DJ
96{
97 struct dwarf_expr_context *retval;
9a619af0 98
4c2df51b 99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
18ec9831
KB
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
b966cb8a
TT
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
87808bd6
JB
104 retval->num_pieces = 0;
105 retval->pieces = 0;
1e3a102a 106 retval->max_recursion_depth = 0x100;
4c2df51b
DJ
107 return retval;
108}
109
110/* Release the memory allocated to CTX. */
111
112void
113free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114{
115 xfree (ctx->stack);
87808bd6 116 xfree (ctx->pieces);
4c2df51b
DJ
117 xfree (ctx);
118}
119
4a227398
TT
120/* Helper for make_cleanup_free_dwarf_expr_context. */
121
122static void
123free_dwarf_expr_context_cleanup (void *arg)
124{
125 free_dwarf_expr_context (arg);
126}
127
128/* Return a cleanup that calls free_dwarf_expr_context. */
129
130struct cleanup *
131make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132{
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134}
135
4c2df51b
DJ
136/* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139static void
140dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141{
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
18ec9831 144 size_t newlen = ctx->stack_len + need + 10;
9a619af0 145
4c2df51b 146 ctx->stack = xrealloc (ctx->stack,
44353522 147 newlen * sizeof (struct dwarf_stack_value));
18ec9831 148 ctx->stack_allocated = newlen;
4c2df51b
DJ
149 }
150}
151
152/* Push VALUE onto CTX's stack. */
153
8a9b8146
TT
154static void
155dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
44353522 156 int in_stack_memory)
4c2df51b 157{
44353522
DE
158 struct dwarf_stack_value *v;
159
4c2df51b 160 dwarf_expr_grow_stack (ctx, 1);
44353522
DE
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
4c2df51b
DJ
164}
165
8a9b8146 166/* Push VALUE onto CTX's stack. */
4c2df51b
DJ
167
168void
8a9b8146
TT
169dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171{
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175}
176
177/* Pop the top item off of CTX's stack. */
178
179static void
4c2df51b
DJ
180dwarf_expr_pop (struct dwarf_expr_context *ctx)
181{
182 if (ctx->stack_len <= 0)
8a3fe4f8 183 error (_("dwarf expression stack underflow"));
4c2df51b
DJ
184 ctx->stack_len--;
185}
186
187/* Retrieve the N'th item on CTX's stack. */
188
8a9b8146 189struct value *
4c2df51b
DJ
190dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191{
ef0fdf07 192 if (ctx->stack_len <= n)
3e43a32a
MS
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
4c2df51b 195 n, ctx->stack_len);
44353522 196 return ctx->stack[ctx->stack_len - (1 + n)].value;
8a9b8146
TT
197}
198
199/* Require that TYPE be an integral type; throw an exception if not. */
44353522 200
8a9b8146
TT
201static void
202dwarf_require_integral (struct type *type)
203{
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208}
209
210/* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213static struct type *
214get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215{
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
44353522
DE
230}
231
8ddd9a20
TT
232/* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235static struct type *
236get_signed_type (struct gdbarch *gdbarch, struct type *type)
237{
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
252}
253
f2c7657e
UW
254/* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256CORE_ADDR
257dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258{
8a9b8146
TT
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
f2c7657e
UW
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
f2c7657e 276 gdb_byte *buf = alloca (ctx->addr_size);
8a9b8146
TT
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
f2c7657e
UW
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285}
286
44353522
DE
287/* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289int
290dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291{
292 if (ctx->stack_len <= n)
3e43a32a
MS
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
44353522
DE
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
4c2df51b
DJ
297}
298
cb826367
TT
299/* Return true if the expression stack is empty. */
300
301static int
302dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303{
304 return ctx->stack_len == 0;
305}
306
87808bd6
JB
307/* Add a new piece to CTX's piece list. */
308static void
d3b1e874 309add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
87808bd6
JB
310{
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
d3b1e874
TT
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
87808bd6
JB
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
cec03d70 320 p->location = ctx->location;
87808bd6 321 p->size = size;
d3b1e874
TT
322 p->offset = offset;
323
cec03d70
TT
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
cb826367
TT
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
f2c7657e
UW
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
8cf6f0b1
TT
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die = ctx->len;
8a9b8146 346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
8cf6f0b1 347 }
8a9b8146
TT
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
cec03d70 350 else
44353522 351 {
f2c7657e 352 p->v.value = dwarf_expr_fetch (ctx, 0);
44353522 353 }
87808bd6
JB
354}
355
4c2df51b
DJ
356/* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359void
0d45f56e
TT
360dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
4c2df51b 362{
1e3a102a
JK
363 int old_recursion_depth = ctx->recursion_depth;
364
4c2df51b 365 execute_stack_op (ctx, addr, addr + len);
1e3a102a
JK
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
4c2df51b
DJ
370}
371
372/* Decode the unsigned LEB128 constant at BUF into the variable pointed to
373 by R, and return the new value of BUF. Verify that it doesn't extend
8e3b41a9 374 past BUF_END. R can be NULL, the constant is then only skipped. */
4c2df51b 375
0d45f56e
TT
376const gdb_byte *
377read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
4c2df51b
DJ
378{
379 unsigned shift = 0;
380 ULONGEST result = 0;
852483bc 381 gdb_byte byte;
4c2df51b
DJ
382
383 while (1)
384 {
385 if (buf >= buf_end)
8a3fe4f8 386 error (_("read_uleb128: Corrupted DWARF expression."));
4c2df51b
DJ
387
388 byte = *buf++;
9930639c 389 result |= ((ULONGEST) (byte & 0x7f)) << shift;
4c2df51b
DJ
390 if ((byte & 0x80) == 0)
391 break;
392 shift += 7;
393 }
8e3b41a9
JK
394 if (r)
395 *r = result;
4c2df51b
DJ
396 return buf;
397}
398
399/* Decode the signed LEB128 constant at BUF into the variable pointed to
400 by R, and return the new value of BUF. Verify that it doesn't extend
8e3b41a9 401 past BUF_END. R can be NULL, the constant is then only skipped. */
4c2df51b 402
0d45f56e
TT
403const gdb_byte *
404read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
4c2df51b
DJ
405{
406 unsigned shift = 0;
407 LONGEST result = 0;
852483bc 408 gdb_byte byte;
4c2df51b
DJ
409
410 while (1)
411 {
412 if (buf >= buf_end)
8a3fe4f8 413 error (_("read_sleb128: Corrupted DWARF expression."));
4c2df51b
DJ
414
415 byte = *buf++;
9930639c 416 result |= ((ULONGEST) (byte & 0x7f)) << shift;
4c2df51b
DJ
417 shift += 7;
418 if ((byte & 0x80) == 0)
419 break;
420 }
421 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
04ad99e6 422 result |= -(((LONGEST) 1) << shift);
4c2df51b 423
8e3b41a9
JK
424 if (r)
425 *r = result;
4c2df51b
DJ
426 return buf;
427}
4c2df51b 428\f
cec03d70
TT
429
430/* Check that the current operator is either at the end of an
431 expression, or that it is followed by a composition operator. */
432
3cf03773
TT
433void
434dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
435 const char *op_name)
cec03d70
TT
436{
437 /* It seems like DW_OP_GNU_uninit should be handled here. However,
438 it doesn't seem to make sense for DW_OP_*_value, and it was not
439 checked at the other place that this function is called. */
440 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
441 error (_("DWARF-2 expression error: `%s' operations must be "
64b9b334 442 "used either alone or in conjunction with DW_OP_piece "
cec03d70
TT
443 "or DW_OP_bit_piece."),
444 op_name);
445}
446
8a9b8146
TT
447/* Return true iff the types T1 and T2 are "the same". This only does
448 checks that might reasonably be needed to compare DWARF base
449 types. */
450
451static int
452base_types_equal_p (struct type *t1, struct type *t2)
453{
454 if (TYPE_CODE (t1) != TYPE_CODE (t2))
455 return 0;
456 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
457 return 0;
458 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
459}
460
461/* A convenience function to call get_base_type on CTX and return the
462 result. DIE is the DIE whose type we need. SIZE is non-zero if
463 this function should verify that the resulting type has the correct
464 size. */
465
466static struct type *
467dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size)
468{
469 struct type *result;
470
9e8b7a03 471 if (ctx->funcs->get_base_type)
8a9b8146 472 {
9e8b7a03 473 result = ctx->funcs->get_base_type (ctx, die);
9ff3b74f
TT
474 if (result == NULL)
475 error (_("Could not find type for DW_OP_GNU_const_type"));
8a9b8146
TT
476 if (size != 0 && TYPE_LENGTH (result) != size)
477 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
478 }
479 else
480 /* Anything will do. */
481 result = builtin_type (ctx->gdbarch)->builtin_int;
482
483 return result;
484}
485
8e3b41a9
JK
486/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
487 DWARF register number. Otherwise return -1. */
488
489int
490dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
491{
492 ULONGEST dwarf_reg;
493
494 if (buf_end <= buf)
495 return -1;
496 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
497 {
498 if (buf_end - buf != 1)
499 return -1;
500 return *buf - DW_OP_reg0;
501 }
502
503 if (*buf == DW_OP_GNU_regval_type)
504 {
505 buf++;
506 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
507 buf = read_uleb128 (buf, buf_end, NULL);
508 }
509 else if (*buf == DW_OP_regx)
510 {
511 buf++;
512 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
513 }
514 else
515 return -1;
516 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
517 return -1;
518 return dwarf_reg;
519}
520
e18b2753
JK
521/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
522 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
523
524int
525dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
526 CORE_ADDR *fb_offset_return)
527{
528 LONGEST fb_offset;
529
530 if (buf_end <= buf)
531 return 0;
532
533 if (*buf != DW_OP_fbreg)
534 return 0;
535 buf++;
536
537 buf = read_sleb128 (buf, buf_end, &fb_offset);
538 *fb_offset_return = fb_offset;
539 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
540 return 0;
541
542 return 1;
543}
544
545/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
546 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
547 The matched SP register number depends on GDBARCH. */
548
549int
550dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
551 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
552{
553 ULONGEST dwarf_reg;
554 LONGEST sp_offset;
555
556 if (buf_end <= buf)
557 return 0;
558 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
559 {
560 dwarf_reg = *buf - DW_OP_breg0;
561 buf++;
562 }
563 else
564 {
565 if (*buf != DW_OP_bregx)
566 return 0;
567 buf++;
568 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
569 }
570
571 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
572 != gdbarch_sp_regnum (gdbarch))
573 return 0;
574
575 buf = read_sleb128 (buf, buf_end, &sp_offset);
576 *sp_offset_return = sp_offset;
577 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
578 return 0;
579
580 return 1;
581}
582
4c2df51b
DJ
583/* The engine for the expression evaluator. Using the context in CTX,
584 evaluate the expression between OP_PTR and OP_END. */
585
586static void
852483bc 587execute_stack_op (struct dwarf_expr_context *ctx,
0d45f56e 588 const gdb_byte *op_ptr, const gdb_byte *op_end)
4c2df51b 589{
e17a4113 590 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
8a9b8146
TT
591 /* Old-style "untyped" DWARF values need special treatment in a
592 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
593 a special type for these values so we can distinguish them from
594 values that have an explicit type, because explicitly-typed
595 values do not need special treatment. This special type must be
596 different (in the `==' sense) from any base type coming from the
597 CU. */
598 struct type *address_type = dwarf_expr_address_type (ctx);
9a619af0 599
cec03d70 600 ctx->location = DWARF_VALUE_MEMORY;
42be36b3 601 ctx->initialized = 1; /* Default is initialized. */
18ec9831 602
1e3a102a
JK
603 if (ctx->recursion_depth > ctx->max_recursion_depth)
604 error (_("DWARF-2 expression error: Loop detected (%d)."),
605 ctx->recursion_depth);
606 ctx->recursion_depth++;
607
4c2df51b
DJ
608 while (op_ptr < op_end)
609 {
610 enum dwarf_location_atom op = *op_ptr++;
f2c7657e 611 ULONGEST result;
44353522
DE
612 /* Assume the value is not in stack memory.
613 Code that knows otherwise sets this to 1.
614 Some arithmetic on stack addresses can probably be assumed to still
615 be a stack address, but we skip this complication for now.
616 This is just an optimization, so it's always ok to punt
617 and leave this as 0. */
618 int in_stack_memory = 0;
4c2df51b
DJ
619 ULONGEST uoffset, reg;
620 LONGEST offset;
8a9b8146 621 struct value *result_val = NULL;
4c2df51b 622
e0e9434c
TT
623 /* The DWARF expression might have a bug causing an infinite
624 loop. In that case, quitting is the only way out. */
625 QUIT;
626
4c2df51b
DJ
627 switch (op)
628 {
629 case DW_OP_lit0:
630 case DW_OP_lit1:
631 case DW_OP_lit2:
632 case DW_OP_lit3:
633 case DW_OP_lit4:
634 case DW_OP_lit5:
635 case DW_OP_lit6:
636 case DW_OP_lit7:
637 case DW_OP_lit8:
638 case DW_OP_lit9:
639 case DW_OP_lit10:
640 case DW_OP_lit11:
641 case DW_OP_lit12:
642 case DW_OP_lit13:
643 case DW_OP_lit14:
644 case DW_OP_lit15:
645 case DW_OP_lit16:
646 case DW_OP_lit17:
647 case DW_OP_lit18:
648 case DW_OP_lit19:
649 case DW_OP_lit20:
650 case DW_OP_lit21:
651 case DW_OP_lit22:
652 case DW_OP_lit23:
653 case DW_OP_lit24:
654 case DW_OP_lit25:
655 case DW_OP_lit26:
656 case DW_OP_lit27:
657 case DW_OP_lit28:
658 case DW_OP_lit29:
659 case DW_OP_lit30:
660 case DW_OP_lit31:
661 result = op - DW_OP_lit0;
8a9b8146 662 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
663 break;
664
665 case DW_OP_addr:
f2c7657e
UW
666 result = extract_unsigned_integer (op_ptr,
667 ctx->addr_size, byte_order);
ae0d2f24 668 op_ptr += ctx->addr_size;
ac56253d
TT
669 /* Some versions of GCC emit DW_OP_addr before
670 DW_OP_GNU_push_tls_address. In this case the value is an
671 index, not an address. We don't support things like
672 branching between the address and the TLS op. */
673 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
674 result += ctx->offset;
8a9b8146 675 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
676 break;
677
678 case DW_OP_const1u:
e17a4113 679 result = extract_unsigned_integer (op_ptr, 1, byte_order);
8a9b8146 680 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
681 op_ptr += 1;
682 break;
683 case DW_OP_const1s:
e17a4113 684 result = extract_signed_integer (op_ptr, 1, byte_order);
8a9b8146 685 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
686 op_ptr += 1;
687 break;
688 case DW_OP_const2u:
e17a4113 689 result = extract_unsigned_integer (op_ptr, 2, byte_order);
8a9b8146 690 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
691 op_ptr += 2;
692 break;
693 case DW_OP_const2s:
e17a4113 694 result = extract_signed_integer (op_ptr, 2, byte_order);
8a9b8146 695 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
696 op_ptr += 2;
697 break;
698 case DW_OP_const4u:
e17a4113 699 result = extract_unsigned_integer (op_ptr, 4, byte_order);
8a9b8146 700 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
701 op_ptr += 4;
702 break;
703 case DW_OP_const4s:
e17a4113 704 result = extract_signed_integer (op_ptr, 4, byte_order);
8a9b8146 705 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
706 op_ptr += 4;
707 break;
708 case DW_OP_const8u:
e17a4113 709 result = extract_unsigned_integer (op_ptr, 8, byte_order);
8a9b8146 710 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
711 op_ptr += 8;
712 break;
713 case DW_OP_const8s:
e17a4113 714 result = extract_signed_integer (op_ptr, 8, byte_order);
8a9b8146 715 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
716 op_ptr += 8;
717 break;
718 case DW_OP_constu:
719 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
720 result = uoffset;
8a9b8146 721 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
722 break;
723 case DW_OP_consts:
724 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
725 result = offset;
8a9b8146 726 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
727 break;
728
729 /* The DW_OP_reg operations are required to occur alone in
730 location expressions. */
731 case DW_OP_reg0:
732 case DW_OP_reg1:
733 case DW_OP_reg2:
734 case DW_OP_reg3:
735 case DW_OP_reg4:
736 case DW_OP_reg5:
737 case DW_OP_reg6:
738 case DW_OP_reg7:
739 case DW_OP_reg8:
740 case DW_OP_reg9:
741 case DW_OP_reg10:
742 case DW_OP_reg11:
743 case DW_OP_reg12:
744 case DW_OP_reg13:
745 case DW_OP_reg14:
746 case DW_OP_reg15:
747 case DW_OP_reg16:
748 case DW_OP_reg17:
749 case DW_OP_reg18:
750 case DW_OP_reg19:
751 case DW_OP_reg20:
752 case DW_OP_reg21:
753 case DW_OP_reg22:
754 case DW_OP_reg23:
755 case DW_OP_reg24:
756 case DW_OP_reg25:
757 case DW_OP_reg26:
758 case DW_OP_reg27:
759 case DW_OP_reg28:
760 case DW_OP_reg29:
761 case DW_OP_reg30:
762 case DW_OP_reg31:
42be36b3
CT
763 if (op_ptr != op_end
764 && *op_ptr != DW_OP_piece
d3b1e874 765 && *op_ptr != DW_OP_bit_piece
42be36b3 766 && *op_ptr != DW_OP_GNU_uninit)
8a3fe4f8 767 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
64b9b334 768 "used either alone or in conjunction with DW_OP_piece "
d3b1e874 769 "or DW_OP_bit_piece."));
4c2df51b 770
61fbb938 771 result = op - DW_OP_reg0;
8a9b8146 772 result_val = value_from_ulongest (address_type, result);
cec03d70 773 ctx->location = DWARF_VALUE_REGISTER;
4c2df51b
DJ
774 break;
775
776 case DW_OP_regx:
777 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
3cf03773 778 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
4c2df51b 779
61fbb938 780 result = reg;
8a9b8146 781 result_val = value_from_ulongest (address_type, result);
cec03d70 782 ctx->location = DWARF_VALUE_REGISTER;
4c2df51b
DJ
783 break;
784
cec03d70
TT
785 case DW_OP_implicit_value:
786 {
787 ULONGEST len;
9a619af0 788
cec03d70
TT
789 op_ptr = read_uleb128 (op_ptr, op_end, &len);
790 if (op_ptr + len > op_end)
791 error (_("DW_OP_implicit_value: too few bytes available."));
792 ctx->len = len;
793 ctx->data = op_ptr;
794 ctx->location = DWARF_VALUE_LITERAL;
795 op_ptr += len;
3cf03773
TT
796 dwarf_expr_require_composition (op_ptr, op_end,
797 "DW_OP_implicit_value");
cec03d70
TT
798 }
799 goto no_push;
800
801 case DW_OP_stack_value:
802 ctx->location = DWARF_VALUE_STACK;
3cf03773 803 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
cec03d70
TT
804 goto no_push;
805
8cf6f0b1
TT
806 case DW_OP_GNU_implicit_pointer:
807 {
808 ULONGEST die;
809 LONGEST len;
810
181cebd4
JK
811 if (ctx->ref_addr_size == -1)
812 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
813 "is not allowed in frame context"));
814
8cf6f0b1 815 /* The referred-to DIE. */
181cebd4 816 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
8cf6f0b1 817 byte_order);
181cebd4 818 op_ptr += ctx->ref_addr_size;
8cf6f0b1
TT
819
820 /* The byte offset into the data. */
821 op_ptr = read_sleb128 (op_ptr, op_end, &len);
822 result = (ULONGEST) len;
8a9b8146 823 result_val = value_from_ulongest (address_type, result);
8cf6f0b1
TT
824
825 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
826 dwarf_expr_require_composition (op_ptr, op_end,
827 "DW_OP_GNU_implicit_pointer");
828 }
829 break;
830
4c2df51b
DJ
831 case DW_OP_breg0:
832 case DW_OP_breg1:
833 case DW_OP_breg2:
834 case DW_OP_breg3:
835 case DW_OP_breg4:
836 case DW_OP_breg5:
837 case DW_OP_breg6:
838 case DW_OP_breg7:
839 case DW_OP_breg8:
840 case DW_OP_breg9:
841 case DW_OP_breg10:
842 case DW_OP_breg11:
843 case DW_OP_breg12:
844 case DW_OP_breg13:
845 case DW_OP_breg14:
846 case DW_OP_breg15:
847 case DW_OP_breg16:
848 case DW_OP_breg17:
849 case DW_OP_breg18:
850 case DW_OP_breg19:
851 case DW_OP_breg20:
852 case DW_OP_breg21:
853 case DW_OP_breg22:
854 case DW_OP_breg23:
855 case DW_OP_breg24:
856 case DW_OP_breg25:
857 case DW_OP_breg26:
858 case DW_OP_breg27:
859 case DW_OP_breg28:
860 case DW_OP_breg29:
861 case DW_OP_breg30:
862 case DW_OP_breg31:
863 {
864 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
9e8b7a03 865 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0);
4c2df51b 866 result += offset;
8a9b8146 867 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
868 }
869 break;
870 case DW_OP_bregx:
871 {
872 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
873 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
9e8b7a03 874 result = (ctx->funcs->read_reg) (ctx->baton, reg);
4c2df51b 875 result += offset;
8a9b8146 876 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
877 }
878 break;
879 case DW_OP_fbreg:
880 {
0d45f56e 881 const gdb_byte *datastart;
4c2df51b
DJ
882 size_t datalen;
883 unsigned int before_stack_len;
884
885 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
886 /* Rather than create a whole new context, we simply
887 record the stack length before execution, then reset it
888 afterwards, effectively erasing whatever the recursive
889 call put there. */
890 before_stack_len = ctx->stack_len;
da62e633
AC
891 /* FIXME: cagney/2003-03-26: This code should be using
892 get_frame_base_address(), and then implement a dwarf2
893 specific this_base method. */
9e8b7a03 894 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
4c2df51b 895 dwarf_expr_eval (ctx, datastart, datalen);
f2c7657e
UW
896 if (ctx->location == DWARF_VALUE_MEMORY)
897 result = dwarf_expr_fetch_address (ctx, 0);
898 else if (ctx->location == DWARF_VALUE_REGISTER)
9e8b7a03
JK
899 result = (ctx->funcs->read_reg) (ctx->baton,
900 value_as_long (dwarf_expr_fetch (ctx, 0)));
f2c7657e 901 else
3e43a32a
MS
902 error (_("Not implemented: computing frame "
903 "base using explicit value operator"));
4c2df51b 904 result = result + offset;
8a9b8146 905 result_val = value_from_ulongest (address_type, result);
44353522 906 in_stack_memory = 1;
4c2df51b 907 ctx->stack_len = before_stack_len;
cec03d70 908 ctx->location = DWARF_VALUE_MEMORY;
4c2df51b
DJ
909 }
910 break;
44353522 911
4c2df51b 912 case DW_OP_dup:
8a9b8146 913 result_val = dwarf_expr_fetch (ctx, 0);
44353522 914 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
4c2df51b
DJ
915 break;
916
917 case DW_OP_drop:
918 dwarf_expr_pop (ctx);
919 goto no_push;
920
921 case DW_OP_pick:
922 offset = *op_ptr++;
8a9b8146 923 result_val = dwarf_expr_fetch (ctx, offset);
44353522 924 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
4c2df51b 925 break;
9f3fe11c
TG
926
927 case DW_OP_swap:
928 {
44353522 929 struct dwarf_stack_value t1, t2;
9f3fe11c
TG
930
931 if (ctx->stack_len < 2)
3e43a32a 932 error (_("Not enough elements for "
0963b4bd 933 "DW_OP_swap. Need 2, have %d."),
9f3fe11c
TG
934 ctx->stack_len);
935 t1 = ctx->stack[ctx->stack_len - 1];
936 t2 = ctx->stack[ctx->stack_len - 2];
937 ctx->stack[ctx->stack_len - 1] = t2;
938 ctx->stack[ctx->stack_len - 2] = t1;
939 goto no_push;
940 }
4c2df51b
DJ
941
942 case DW_OP_over:
8a9b8146 943 result_val = dwarf_expr_fetch (ctx, 1);
44353522 944 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
4c2df51b
DJ
945 break;
946
947 case DW_OP_rot:
948 {
44353522 949 struct dwarf_stack_value t1, t2, t3;
4c2df51b
DJ
950
951 if (ctx->stack_len < 3)
0963b4bd
MS
952 error (_("Not enough elements for "
953 "DW_OP_rot. Need 3, have %d."),
4c2df51b
DJ
954 ctx->stack_len);
955 t1 = ctx->stack[ctx->stack_len - 1];
956 t2 = ctx->stack[ctx->stack_len - 2];
957 t3 = ctx->stack[ctx->stack_len - 3];
958 ctx->stack[ctx->stack_len - 1] = t2;
959 ctx->stack[ctx->stack_len - 2] = t3;
960 ctx->stack[ctx->stack_len - 3] = t1;
961 goto no_push;
962 }
963
964 case DW_OP_deref:
965 case DW_OP_deref_size:
8a9b8146 966 case DW_OP_GNU_deref_type:
f2c7657e
UW
967 {
968 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
969 gdb_byte *buf = alloca (addr_size);
970 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
8a9b8146
TT
971 struct type *type;
972
f2c7657e
UW
973 dwarf_expr_pop (ctx);
974
8a9b8146
TT
975 if (op == DW_OP_GNU_deref_type)
976 {
977 ULONGEST type_die;
978
979 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
980 type = dwarf_get_base_type (ctx, type_die, 0);
981 }
982 else
983 type = address_type;
984
9e8b7a03 985 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
325663dc
JB
986
987 /* If the size of the object read from memory is different
988 from the type length, we need to zero-extend it. */
989 if (TYPE_LENGTH (type) != addr_size)
990 {
991 ULONGEST result =
992 extract_unsigned_integer (buf, addr_size, byte_order);
993
994 buf = alloca (TYPE_LENGTH (type));
995 store_unsigned_integer (buf, TYPE_LENGTH (type),
996 byte_order, result);
997 }
998
8a9b8146 999 result_val = value_from_contents_and_address (type, buf, addr);
f2c7657e
UW
1000 break;
1001 }
1002
4c2df51b
DJ
1003 case DW_OP_abs:
1004 case DW_OP_neg:
1005 case DW_OP_not:
1006 case DW_OP_plus_uconst:
8a9b8146
TT
1007 {
1008 /* Unary operations. */
1009 result_val = dwarf_expr_fetch (ctx, 0);
1010 dwarf_expr_pop (ctx);
4c2df51b 1011
8a9b8146
TT
1012 switch (op)
1013 {
1014 case DW_OP_abs:
1015 if (value_less (result_val,
1016 value_zero (value_type (result_val), not_lval)))
1017 result_val = value_neg (result_val);
1018 break;
1019 case DW_OP_neg:
1020 result_val = value_neg (result_val);
1021 break;
1022 case DW_OP_not:
1023 dwarf_require_integral (value_type (result_val));
1024 result_val = value_complement (result_val);
1025 break;
1026 case DW_OP_plus_uconst:
1027 dwarf_require_integral (value_type (result_val));
1028 result = value_as_long (result_val);
1029 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1030 result += reg;
1031 result_val = value_from_ulongest (address_type, result);
1032 break;
1033 }
1034 }
4c2df51b
DJ
1035 break;
1036
1037 case DW_OP_and:
1038 case DW_OP_div:
1039 case DW_OP_minus:
1040 case DW_OP_mod:
1041 case DW_OP_mul:
1042 case DW_OP_or:
1043 case DW_OP_plus:
1044 case DW_OP_shl:
1045 case DW_OP_shr:
1046 case DW_OP_shra:
1047 case DW_OP_xor:
1048 case DW_OP_le:
1049 case DW_OP_ge:
1050 case DW_OP_eq:
1051 case DW_OP_lt:
1052 case DW_OP_gt:
1053 case DW_OP_ne:
1054 {
f2c7657e 1055 /* Binary operations. */
8a9b8146 1056 struct value *first, *second;
4c2df51b
DJ
1057
1058 second = dwarf_expr_fetch (ctx, 0);
1059 dwarf_expr_pop (ctx);
1060
b263358a 1061 first = dwarf_expr_fetch (ctx, 0);
4c2df51b
DJ
1062 dwarf_expr_pop (ctx);
1063
8a9b8146
TT
1064 if (! base_types_equal_p (value_type (first), value_type (second)))
1065 error (_("Incompatible types on DWARF stack"));
1066
4c2df51b
DJ
1067 switch (op)
1068 {
1069 case DW_OP_and:
8a9b8146
TT
1070 dwarf_require_integral (value_type (first));
1071 dwarf_require_integral (value_type (second));
1072 result_val = value_binop (first, second, BINOP_BITWISE_AND);
4c2df51b
DJ
1073 break;
1074 case DW_OP_div:
8a9b8146 1075 result_val = value_binop (first, second, BINOP_DIV);
99c87dab 1076 break;
4c2df51b 1077 case DW_OP_minus:
8a9b8146 1078 result_val = value_binop (first, second, BINOP_SUB);
4c2df51b
DJ
1079 break;
1080 case DW_OP_mod:
8a9b8146
TT
1081 {
1082 int cast_back = 0;
1083 struct type *orig_type = value_type (first);
1084
1085 /* We have to special-case "old-style" untyped values
1086 -- these must have mod computed using unsigned
1087 math. */
1088 if (orig_type == address_type)
1089 {
1090 struct type *utype
1091 = get_unsigned_type (ctx->gdbarch, orig_type);
1092
1093 cast_back = 1;
1094 first = value_cast (utype, first);
1095 second = value_cast (utype, second);
1096 }
1097 /* Note that value_binop doesn't handle float or
1098 decimal float here. This seems unimportant. */
1099 result_val = value_binop (first, second, BINOP_MOD);
1100 if (cast_back)
1101 result_val = value_cast (orig_type, result_val);
1102 }
4c2df51b
DJ
1103 break;
1104 case DW_OP_mul:
8a9b8146 1105 result_val = value_binop (first, second, BINOP_MUL);
4c2df51b
DJ
1106 break;
1107 case DW_OP_or:
8a9b8146
TT
1108 dwarf_require_integral (value_type (first));
1109 dwarf_require_integral (value_type (second));
1110 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
4c2df51b
DJ
1111 break;
1112 case DW_OP_plus:
8a9b8146 1113 result_val = value_binop (first, second, BINOP_ADD);
4c2df51b
DJ
1114 break;
1115 case DW_OP_shl:
8a9b8146
TT
1116 dwarf_require_integral (value_type (first));
1117 dwarf_require_integral (value_type (second));
1118 result_val = value_binop (first, second, BINOP_LSH);
4c2df51b
DJ
1119 break;
1120 case DW_OP_shr:
8a9b8146
TT
1121 dwarf_require_integral (value_type (first));
1122 dwarf_require_integral (value_type (second));
b087e0ed 1123 if (!TYPE_UNSIGNED (value_type (first)))
8a9b8146
TT
1124 {
1125 struct type *utype
1126 = get_unsigned_type (ctx->gdbarch, value_type (first));
1127
1128 first = value_cast (utype, first);
1129 }
1130
1131 result_val = value_binop (first, second, BINOP_RSH);
1132 /* Make sure we wind up with the same type we started
1133 with. */
1134 if (value_type (result_val) != value_type (second))
1135 result_val = value_cast (value_type (second), result_val);
99c87dab 1136 break;
4c2df51b 1137 case DW_OP_shra:
8a9b8146
TT
1138 dwarf_require_integral (value_type (first));
1139 dwarf_require_integral (value_type (second));
8ddd9a20
TT
1140 if (TYPE_UNSIGNED (value_type (first)))
1141 {
1142 struct type *stype
1143 = get_signed_type (ctx->gdbarch, value_type (first));
1144
1145 first = value_cast (stype, first);
1146 }
1147
8a9b8146 1148 result_val = value_binop (first, second, BINOP_RSH);
8ddd9a20
TT
1149 /* Make sure we wind up with the same type we started
1150 with. */
1151 if (value_type (result_val) != value_type (second))
1152 result_val = value_cast (value_type (second), result_val);
4c2df51b
DJ
1153 break;
1154 case DW_OP_xor:
8a9b8146
TT
1155 dwarf_require_integral (value_type (first));
1156 dwarf_require_integral (value_type (second));
1157 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
4c2df51b
DJ
1158 break;
1159 case DW_OP_le:
8a9b8146
TT
1160 /* A <= B is !(B < A). */
1161 result = ! value_less (second, first);
1162 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1163 break;
1164 case DW_OP_ge:
8a9b8146
TT
1165 /* A >= B is !(A < B). */
1166 result = ! value_less (first, second);
1167 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1168 break;
1169 case DW_OP_eq:
8a9b8146
TT
1170 result = value_equal (first, second);
1171 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1172 break;
1173 case DW_OP_lt:
8a9b8146
TT
1174 result = value_less (first, second);
1175 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1176 break;
1177 case DW_OP_gt:
8a9b8146
TT
1178 /* A > B is B < A. */
1179 result = value_less (second, first);
1180 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1181 break;
1182 case DW_OP_ne:
8a9b8146
TT
1183 result = ! value_equal (first, second);
1184 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1185 break;
1186 default:
1187 internal_error (__FILE__, __LINE__,
e2e0b3e5 1188 _("Can't be reached."));
4c2df51b 1189 }
4c2df51b
DJ
1190 }
1191 break;
1192
e7802207 1193 case DW_OP_call_frame_cfa:
9e8b7a03 1194 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
8a9b8146 1195 result_val = value_from_ulongest (address_type, result);
44353522 1196 in_stack_memory = 1;
e7802207
TT
1197 break;
1198
4c2df51b 1199 case DW_OP_GNU_push_tls_address:
c3228f12
EZ
1200 /* Variable is at a constant offset in the thread-local
1201 storage block into the objfile for the current thread and
0963b4bd 1202 the dynamic linker module containing this expression. Here
c3228f12
EZ
1203 we return returns the offset from that base. The top of the
1204 stack has the offset from the beginning of the thread
1205 control block at which the variable is located. Nothing
1206 should follow this operator, so the top of stack would be
1207 returned. */
8a9b8146 1208 result = value_as_long (dwarf_expr_fetch (ctx, 0));
4c2df51b 1209 dwarf_expr_pop (ctx);
9e8b7a03 1210 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
8a9b8146 1211 result_val = value_from_ulongest (address_type, result);
4c2df51b
DJ
1212 break;
1213
1214 case DW_OP_skip:
e17a4113 1215 offset = extract_signed_integer (op_ptr, 2, byte_order);
4c2df51b
DJ
1216 op_ptr += 2;
1217 op_ptr += offset;
1218 goto no_push;
1219
1220 case DW_OP_bra:
8a9b8146
TT
1221 {
1222 struct value *val;
1223
1224 offset = extract_signed_integer (op_ptr, 2, byte_order);
1225 op_ptr += 2;
1226 val = dwarf_expr_fetch (ctx, 0);
1227 dwarf_require_integral (value_type (val));
1228 if (value_as_long (val) != 0)
1229 op_ptr += offset;
1230 dwarf_expr_pop (ctx);
1231 }
4c2df51b
DJ
1232 goto no_push;
1233
1234 case DW_OP_nop:
1235 goto no_push;
1236
87808bd6
JB
1237 case DW_OP_piece:
1238 {
1239 ULONGEST size;
87808bd6
JB
1240
1241 /* Record the piece. */
1242 op_ptr = read_uleb128 (op_ptr, op_end, &size);
d3b1e874 1243 add_piece (ctx, 8 * size, 0);
87808bd6 1244
cec03d70
TT
1245 /* Pop off the address/regnum, and reset the location
1246 type. */
cb826367
TT
1247 if (ctx->location != DWARF_VALUE_LITERAL
1248 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
cec03d70
TT
1249 dwarf_expr_pop (ctx);
1250 ctx->location = DWARF_VALUE_MEMORY;
87808bd6
JB
1251 }
1252 goto no_push;
1253
d3b1e874
TT
1254 case DW_OP_bit_piece:
1255 {
1256 ULONGEST size, offset;
1257
1258 /* Record the piece. */
1259 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1260 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1261 add_piece (ctx, size, offset);
1262
1263 /* Pop off the address/regnum, and reset the location
1264 type. */
1265 if (ctx->location != DWARF_VALUE_LITERAL
1266 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1267 dwarf_expr_pop (ctx);
1268 ctx->location = DWARF_VALUE_MEMORY;
1269 }
1270 goto no_push;
1271
42be36b3
CT
1272 case DW_OP_GNU_uninit:
1273 if (op_ptr != op_end)
9c482037 1274 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
42be36b3
CT
1275 "be the very last op."));
1276
1277 ctx->initialized = 0;
1278 goto no_push;
1279
5c631832
JK
1280 case DW_OP_call2:
1281 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1282 op_ptr += 2;
9e8b7a03 1283 ctx->funcs->dwarf_call (ctx, result);
5c631832
JK
1284 goto no_push;
1285
1286 case DW_OP_call4:
1287 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1288 op_ptr += 4;
9e8b7a03 1289 ctx->funcs->dwarf_call (ctx, result);
5c631832 1290 goto no_push;
dd90784c
JK
1291
1292 case DW_OP_GNU_entry_value:
8e3b41a9
JK
1293 {
1294 ULONGEST len;
1295 int dwarf_reg;
1296 CORE_ADDR deref_size;
1297
1298 op_ptr = read_uleb128 (op_ptr, op_end, &len);
1299 if (op_ptr + len > op_end)
1300 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1301
1302 dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1303 if (dwarf_reg != -1)
1304 {
1305 op_ptr += len;
1306 ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg,
1307 0 /* unused */);
1308 goto no_push;
1309 }
1310
1311 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1312 "supported only for single DW_OP_reg*"));
1313 }
5c631832 1314
8a9b8146
TT
1315 case DW_OP_GNU_const_type:
1316 {
1317 ULONGEST type_die;
1318 int n;
1319 const gdb_byte *data;
1320 struct type *type;
1321
1322 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1323 n = *op_ptr++;
1324 data = op_ptr;
1325 op_ptr += n;
1326
1327 type = dwarf_get_base_type (ctx, type_die, n);
1328 result_val = value_from_contents (type, data);
1329 }
1330 break;
1331
1332 case DW_OP_GNU_regval_type:
1333 {
1334 ULONGEST type_die;
1335 struct type *type;
1336
1337 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1338 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1339
1340 type = dwarf_get_base_type (ctx, type_die, 0);
9e8b7a03 1341 result = (ctx->funcs->read_reg) (ctx->baton, reg);
d1b66e6d
TT
1342 result_val = value_from_ulongest (address_type, result);
1343 result_val = value_from_contents (type,
1344 value_contents_all (result_val));
8a9b8146
TT
1345 }
1346 break;
1347
1348 case DW_OP_GNU_convert:
1349 case DW_OP_GNU_reinterpret:
1350 {
1351 ULONGEST type_die;
1352 struct type *type;
1353
1354 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1355
c38c4bc5
TT
1356 if (type_die == 0)
1357 type = address_type;
1358 else
1359 type = dwarf_get_base_type (ctx, type_die, 0);
8a9b8146
TT
1360
1361 result_val = dwarf_expr_fetch (ctx, 0);
1362 dwarf_expr_pop (ctx);
1363
1364 if (op == DW_OP_GNU_convert)
1365 result_val = value_cast (type, result_val);
1366 else if (type == value_type (result_val))
1367 {
1368 /* Nothing. */
1369 }
1370 else if (TYPE_LENGTH (type)
1371 != TYPE_LENGTH (value_type (result_val)))
1372 error (_("DW_OP_GNU_reinterpret has wrong size"));
1373 else
1374 result_val
1375 = value_from_contents (type,
1376 value_contents_all (result_val));
1377 }
1378 break;
1379
4c2df51b 1380 default:
8a3fe4f8 1381 error (_("Unhandled dwarf expression opcode 0x%x"), op);
4c2df51b
DJ
1382 }
1383
1384 /* Most things push a result value. */
8a9b8146
TT
1385 gdb_assert (result_val != NULL);
1386 dwarf_expr_push (ctx, result_val, in_stack_memory);
82ae4854 1387 no_push:
b27cf2b3 1388 ;
4c2df51b 1389 }
1e3a102a 1390
8cf6f0b1
TT
1391 /* To simplify our main caller, if the result is an implicit
1392 pointer, then make a pieced value. This is ok because we can't
1393 have implicit pointers in contexts where pieces are invalid. */
1394 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1395 add_piece (ctx, 8 * ctx->addr_size, 0);
1396
dd90784c 1397abort_expression:
1e3a102a
JK
1398 ctx->recursion_depth--;
1399 gdb_assert (ctx->recursion_depth >= 0);
8a9b8146
TT
1400}
1401
3c6e0cb3
JK
1402/* Stub dwarf_expr_context_funcs.read_reg implementation. */
1403
1404CORE_ADDR
1405ctx_no_read_reg (void *baton, int regnum)
1406{
1407 error (_("Registers access is invalid in this context"));
1408}
1409
523f3620
JK
1410/* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1411
1412void
1413ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1414{
1415 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1416}
1417
1418/* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1419
1420CORE_ADDR
1421ctx_no_get_frame_cfa (void *baton)
1422{
1423 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1424}
1425
1426/* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1427
1428CORE_ADDR
1429ctx_no_get_frame_pc (void *baton)
1430{
1431 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1432}
1433
1434/* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1435
1436CORE_ADDR
1437ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1438{
1439 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1440}
1441
1442/* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1443
1444void
1445ctx_no_dwarf_call (struct dwarf_expr_context *ctx, size_t die_offset)
1446{
1447 error (_("%s is invalid in this context"), "DW_OP_call*");
1448}
1449
1450/* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1451
1452struct type *
1453ctx_no_get_base_type (struct dwarf_expr_context *ctx, size_t die)
1454{
1455 error (_("Support for typed DWARF is not supported in this context"));
1456}
1457
8e3b41a9
JK
1458/* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1459 implementation. */
1460
1461void
1462ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1463 int dwarf_reg, CORE_ADDR fb_offset)
1464{
1465 internal_error (__FILE__, __LINE__,
1466 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1467}
1468
8a9b8146
TT
1469void
1470_initialize_dwarf2expr (void)
1471{
1472 dwarf_arch_cookie
1473 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
4c2df51b 1474}
This page took 0.684079 seconds and 4 git commands to generate.