Sync config.sub,config.guess with upstream.
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30
31 /* Cookie for gdbarch data. */
32
33 static struct gdbarch_data *dwarf_arch_cookie;
34
35 /* This holds gdbarch-specific types used by the DWARF expression
36 evaluator. See comments in execute_stack_op. */
37
38 struct dwarf_gdbarch_types
39 {
40 struct type *dw_types[3];
41 };
42
43 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
44
45 static void *
46 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
47 {
48 struct dwarf_gdbarch_types *types
49 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
50
51 /* The types themselves are lazily initialized. */
52
53 return types;
54 }
55
56 /* Return the type used for DWARF operations where the type is
57 unspecified in the DWARF spec. Only certain sizes are
58 supported. */
59
60 struct type *
61 dwarf_expr_context::address_type () const
62 {
63 struct dwarf_gdbarch_types *types
64 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
65 dwarf_arch_cookie);
66 int ndx;
67
68 if (this->addr_size == 2)
69 ndx = 0;
70 else if (this->addr_size == 4)
71 ndx = 1;
72 else if (this->addr_size == 8)
73 ndx = 2;
74 else
75 error (_("Unsupported address size in DWARF expressions: %d bits"),
76 8 * this->addr_size);
77
78 if (types->dw_types[ndx] == NULL)
79 types->dw_types[ndx]
80 = arch_integer_type (this->gdbarch,
81 8 * this->addr_size,
82 0, "<signed DWARF address type>");
83
84 return types->dw_types[ndx];
85 }
86
87 /* Create a new context for the expression evaluator. */
88
89 dwarf_expr_context::dwarf_expr_context ()
90 : stack (NULL),
91 stack_len (0),
92 stack_allocated (10),
93 gdbarch (NULL),
94 addr_size (0),
95 ref_addr_size (0),
96 offset (0),
97 recursion_depth (0),
98 max_recursion_depth (0x100),
99 location (DWARF_VALUE_MEMORY),
100 len (0),
101 data (NULL),
102 initialized (0),
103 num_pieces (0),
104 pieces (NULL)
105 {
106 this->stack = XNEWVEC (struct dwarf_stack_value, this->stack_allocated);
107 }
108
109 /* Clean up a dwarf_expr_context. */
110
111 dwarf_expr_context::~dwarf_expr_context ()
112 {
113 xfree (this->stack);
114 xfree (this->pieces);
115 }
116
117 /* Expand the memory allocated stack to contain at least
118 NEED more elements than are currently used. */
119
120 void
121 dwarf_expr_context::grow_stack (size_t need)
122 {
123 if (this->stack_len + need > this->stack_allocated)
124 {
125 size_t newlen = this->stack_len + need + 10;
126
127 this->stack = XRESIZEVEC (struct dwarf_stack_value, this->stack, newlen);
128 this->stack_allocated = newlen;
129 }
130 }
131
132 /* Push VALUE onto the stack. */
133
134 void
135 dwarf_expr_context::push (struct value *value, int in_stack_memory)
136 {
137 struct dwarf_stack_value *v;
138
139 grow_stack (1);
140 v = &this->stack[this->stack_len++];
141 v->value = value;
142 v->in_stack_memory = in_stack_memory;
143 }
144
145 /* Push VALUE onto the stack. */
146
147 void
148 dwarf_expr_context::push_address (CORE_ADDR value, int in_stack_memory)
149 {
150 push (value_from_ulongest (address_type (), value), in_stack_memory);
151 }
152
153 /* Pop the top item off of the stack. */
154
155 void
156 dwarf_expr_context::pop ()
157 {
158 if (this->stack_len <= 0)
159 error (_("dwarf expression stack underflow"));
160 this->stack_len--;
161 }
162
163 /* Retrieve the N'th item on the stack. */
164
165 struct value *
166 dwarf_expr_context::fetch (int n)
167 {
168 if (this->stack_len <= n)
169 error (_("Asked for position %d of stack, "
170 "stack only has %d elements on it."),
171 n, this->stack_len);
172 return this->stack[this->stack_len - (1 + n)].value;
173 }
174
175 /* Require that TYPE be an integral type; throw an exception if not. */
176
177 static void
178 dwarf_require_integral (struct type *type)
179 {
180 if (TYPE_CODE (type) != TYPE_CODE_INT
181 && TYPE_CODE (type) != TYPE_CODE_CHAR
182 && TYPE_CODE (type) != TYPE_CODE_BOOL)
183 error (_("integral type expected in DWARF expression"));
184 }
185
186 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
187 type. */
188
189 static struct type *
190 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
191 {
192 switch (TYPE_LENGTH (type))
193 {
194 case 1:
195 return builtin_type (gdbarch)->builtin_uint8;
196 case 2:
197 return builtin_type (gdbarch)->builtin_uint16;
198 case 4:
199 return builtin_type (gdbarch)->builtin_uint32;
200 case 8:
201 return builtin_type (gdbarch)->builtin_uint64;
202 default:
203 error (_("no unsigned variant found for type, while evaluating "
204 "DWARF expression"));
205 }
206 }
207
208 /* Return the signed form of TYPE. TYPE is necessarily an integral
209 type. */
210
211 static struct type *
212 get_signed_type (struct gdbarch *gdbarch, struct type *type)
213 {
214 switch (TYPE_LENGTH (type))
215 {
216 case 1:
217 return builtin_type (gdbarch)->builtin_int8;
218 case 2:
219 return builtin_type (gdbarch)->builtin_int16;
220 case 4:
221 return builtin_type (gdbarch)->builtin_int32;
222 case 8:
223 return builtin_type (gdbarch)->builtin_int64;
224 default:
225 error (_("no signed variant found for type, while evaluating "
226 "DWARF expression"));
227 }
228 }
229
230 /* Retrieve the N'th item on the stack, converted to an address. */
231
232 CORE_ADDR
233 dwarf_expr_context::fetch_address (int n)
234 {
235 struct value *result_val = fetch (n);
236 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
237 ULONGEST result;
238
239 dwarf_require_integral (value_type (result_val));
240 result = extract_unsigned_integer (value_contents (result_val),
241 TYPE_LENGTH (value_type (result_val)),
242 byte_order);
243
244 /* For most architectures, calling extract_unsigned_integer() alone
245 is sufficient for extracting an address. However, some
246 architectures (e.g. MIPS) use signed addresses and using
247 extract_unsigned_integer() will not produce a correct
248 result. Make sure we invoke gdbarch_integer_to_address()
249 for those architectures which require it. */
250 if (gdbarch_integer_to_address_p (this->gdbarch))
251 {
252 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
253 struct type *int_type = get_unsigned_type (this->gdbarch,
254 value_type (result_val));
255
256 store_unsigned_integer (buf, this->addr_size, byte_order, result);
257 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
258 }
259
260 return (CORE_ADDR) result;
261 }
262
263 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
264
265 int
266 dwarf_expr_context::fetch_in_stack_memory (int n)
267 {
268 if (this->stack_len <= n)
269 error (_("Asked for position %d of stack, "
270 "stack only has %d elements on it."),
271 n, this->stack_len);
272 return this->stack[this->stack_len - (1 + n)].in_stack_memory;
273 }
274
275 /* Return true if the expression stack is empty. */
276
277 int
278 dwarf_expr_context::stack_empty_p () const
279 {
280 return this->stack_len == 0;
281 }
282
283 /* Add a new piece to the dwarf_expr_context's piece list. */
284 void
285 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
286 {
287 struct dwarf_expr_piece *p;
288
289 this->num_pieces++;
290
291 this->pieces
292 = XRESIZEVEC (struct dwarf_expr_piece, this->pieces, this->num_pieces);
293
294 p = &this->pieces[this->num_pieces - 1];
295 p->location = this->location;
296 p->size = size;
297 p->offset = offset;
298
299 if (p->location == DWARF_VALUE_LITERAL)
300 {
301 p->v.literal.data = this->data;
302 p->v.literal.length = this->len;
303 }
304 else if (stack_empty_p ())
305 {
306 p->location = DWARF_VALUE_OPTIMIZED_OUT;
307 /* Also reset the context's location, for our callers. This is
308 a somewhat strange approach, but this lets us avoid setting
309 the location to DWARF_VALUE_MEMORY in all the individual
310 cases in the evaluator. */
311 this->location = DWARF_VALUE_OPTIMIZED_OUT;
312 }
313 else if (p->location == DWARF_VALUE_MEMORY)
314 {
315 p->v.mem.addr = fetch_address (0);
316 p->v.mem.in_stack_memory = fetch_in_stack_memory (0);
317 }
318 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
319 {
320 p->v.ptr.die.sect_off = this->len;
321 p->v.ptr.offset = value_as_long (fetch (0));
322 }
323 else if (p->location == DWARF_VALUE_REGISTER)
324 p->v.regno = value_as_long (fetch (0));
325 else
326 {
327 p->v.value = fetch (0);
328 }
329 }
330
331 /* Evaluate the expression at ADDR (LEN bytes long). */
332
333 void
334 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
335 {
336 int old_recursion_depth = this->recursion_depth;
337
338 execute_stack_op (addr, addr + len);
339
340 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
341
342 gdb_assert (this->recursion_depth == old_recursion_depth);
343 }
344
345 /* Helper to read a uleb128 value or throw an error. */
346
347 const gdb_byte *
348 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
349 uint64_t *r)
350 {
351 buf = gdb_read_uleb128 (buf, buf_end, r);
352 if (buf == NULL)
353 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
354 return buf;
355 }
356
357 /* Helper to read a sleb128 value or throw an error. */
358
359 const gdb_byte *
360 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
361 int64_t *r)
362 {
363 buf = gdb_read_sleb128 (buf, buf_end, r);
364 if (buf == NULL)
365 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
366 return buf;
367 }
368
369 const gdb_byte *
370 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
371 {
372 buf = gdb_skip_leb128 (buf, buf_end);
373 if (buf == NULL)
374 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
375 return buf;
376 }
377 \f
378
379 /* Check that the current operator is either at the end of an
380 expression, or that it is followed by a composition operator or by
381 DW_OP_GNU_uninit (which should terminate the expression). */
382
383 void
384 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
385 const char *op_name)
386 {
387 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
388 && *op_ptr != DW_OP_GNU_uninit)
389 error (_("DWARF-2 expression error: `%s' operations must be "
390 "used either alone or in conjunction with DW_OP_piece "
391 "or DW_OP_bit_piece."),
392 op_name);
393 }
394
395 /* Return true iff the types T1 and T2 are "the same". This only does
396 checks that might reasonably be needed to compare DWARF base
397 types. */
398
399 static int
400 base_types_equal_p (struct type *t1, struct type *t2)
401 {
402 if (TYPE_CODE (t1) != TYPE_CODE (t2))
403 return 0;
404 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
405 return 0;
406 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
407 }
408
409 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
410 DWARF register number. Otherwise return -1. */
411
412 int
413 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
414 {
415 uint64_t dwarf_reg;
416
417 if (buf_end <= buf)
418 return -1;
419 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
420 {
421 if (buf_end - buf != 1)
422 return -1;
423 return *buf - DW_OP_reg0;
424 }
425
426 if (*buf == DW_OP_GNU_regval_type)
427 {
428 buf++;
429 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
430 if (buf == NULL)
431 return -1;
432 buf = gdb_skip_leb128 (buf, buf_end);
433 if (buf == NULL)
434 return -1;
435 }
436 else if (*buf == DW_OP_regx)
437 {
438 buf++;
439 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
440 if (buf == NULL)
441 return -1;
442 }
443 else
444 return -1;
445 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
446 return -1;
447 return dwarf_reg;
448 }
449
450 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
451 DW_OP_deref* return the DWARF register number. Otherwise return -1.
452 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
453 size from DW_OP_deref_size. */
454
455 int
456 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
457 CORE_ADDR *deref_size_return)
458 {
459 uint64_t dwarf_reg;
460 int64_t offset;
461
462 if (buf_end <= buf)
463 return -1;
464
465 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
466 {
467 dwarf_reg = *buf - DW_OP_breg0;
468 buf++;
469 if (buf >= buf_end)
470 return -1;
471 }
472 else if (*buf == DW_OP_bregx)
473 {
474 buf++;
475 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
476 if (buf == NULL)
477 return -1;
478 if ((int) dwarf_reg != dwarf_reg)
479 return -1;
480 }
481 else
482 return -1;
483
484 buf = gdb_read_sleb128 (buf, buf_end, &offset);
485 if (buf == NULL)
486 return -1;
487 if (offset != 0)
488 return -1;
489
490 if (*buf == DW_OP_deref)
491 {
492 buf++;
493 *deref_size_return = -1;
494 }
495 else if (*buf == DW_OP_deref_size)
496 {
497 buf++;
498 if (buf >= buf_end)
499 return -1;
500 *deref_size_return = *buf++;
501 }
502 else
503 return -1;
504
505 if (buf != buf_end)
506 return -1;
507
508 return dwarf_reg;
509 }
510
511 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
512 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
513
514 int
515 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
516 CORE_ADDR *fb_offset_return)
517 {
518 int64_t fb_offset;
519
520 if (buf_end <= buf)
521 return 0;
522
523 if (*buf != DW_OP_fbreg)
524 return 0;
525 buf++;
526
527 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
528 if (buf == NULL)
529 return 0;
530 *fb_offset_return = fb_offset;
531 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
532 return 0;
533
534 return 1;
535 }
536
537 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
538 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
539 The matched SP register number depends on GDBARCH. */
540
541 int
542 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
543 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
544 {
545 uint64_t dwarf_reg;
546 int64_t sp_offset;
547
548 if (buf_end <= buf)
549 return 0;
550 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
551 {
552 dwarf_reg = *buf - DW_OP_breg0;
553 buf++;
554 }
555 else
556 {
557 if (*buf != DW_OP_bregx)
558 return 0;
559 buf++;
560 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
561 if (buf == NULL)
562 return 0;
563 }
564
565 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
566 != gdbarch_sp_regnum (gdbarch))
567 return 0;
568
569 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
570 if (buf == NULL)
571 return 0;
572 *sp_offset_return = sp_offset;
573 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
574 return 0;
575
576 return 1;
577 }
578
579 /* The engine for the expression evaluator. Using the context in this
580 object, evaluate the expression between OP_PTR and OP_END. */
581
582 void
583 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
584 const gdb_byte *op_end)
585 {
586 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
587 /* Old-style "untyped" DWARF values need special treatment in a
588 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
589 a special type for these values so we can distinguish them from
590 values that have an explicit type, because explicitly-typed
591 values do not need special treatment. This special type must be
592 different (in the `==' sense) from any base type coming from the
593 CU. */
594 struct type *address_type = this->address_type ();
595
596 this->location = DWARF_VALUE_MEMORY;
597 this->initialized = 1; /* Default is initialized. */
598
599 if (this->recursion_depth > this->max_recursion_depth)
600 error (_("DWARF-2 expression error: Loop detected (%d)."),
601 this->recursion_depth);
602 this->recursion_depth++;
603
604 while (op_ptr < op_end)
605 {
606 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
607 ULONGEST result;
608 /* Assume the value is not in stack memory.
609 Code that knows otherwise sets this to 1.
610 Some arithmetic on stack addresses can probably be assumed to still
611 be a stack address, but we skip this complication for now.
612 This is just an optimization, so it's always ok to punt
613 and leave this as 0. */
614 int in_stack_memory = 0;
615 uint64_t uoffset, reg;
616 int64_t offset;
617 struct value *result_val = NULL;
618
619 /* The DWARF expression might have a bug causing an infinite
620 loop. In that case, quitting is the only way out. */
621 QUIT;
622
623 switch (op)
624 {
625 case DW_OP_lit0:
626 case DW_OP_lit1:
627 case DW_OP_lit2:
628 case DW_OP_lit3:
629 case DW_OP_lit4:
630 case DW_OP_lit5:
631 case DW_OP_lit6:
632 case DW_OP_lit7:
633 case DW_OP_lit8:
634 case DW_OP_lit9:
635 case DW_OP_lit10:
636 case DW_OP_lit11:
637 case DW_OP_lit12:
638 case DW_OP_lit13:
639 case DW_OP_lit14:
640 case DW_OP_lit15:
641 case DW_OP_lit16:
642 case DW_OP_lit17:
643 case DW_OP_lit18:
644 case DW_OP_lit19:
645 case DW_OP_lit20:
646 case DW_OP_lit21:
647 case DW_OP_lit22:
648 case DW_OP_lit23:
649 case DW_OP_lit24:
650 case DW_OP_lit25:
651 case DW_OP_lit26:
652 case DW_OP_lit27:
653 case DW_OP_lit28:
654 case DW_OP_lit29:
655 case DW_OP_lit30:
656 case DW_OP_lit31:
657 result = op - DW_OP_lit0;
658 result_val = value_from_ulongest (address_type, result);
659 break;
660
661 case DW_OP_addr:
662 result = extract_unsigned_integer (op_ptr,
663 this->addr_size, byte_order);
664 op_ptr += this->addr_size;
665 /* Some versions of GCC emit DW_OP_addr before
666 DW_OP_GNU_push_tls_address. In this case the value is an
667 index, not an address. We don't support things like
668 branching between the address and the TLS op. */
669 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
670 result += this->offset;
671 result_val = value_from_ulongest (address_type, result);
672 break;
673
674 case DW_OP_GNU_addr_index:
675 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
676 result = this->get_addr_index (uoffset);
677 result += this->offset;
678 result_val = value_from_ulongest (address_type, result);
679 break;
680 case DW_OP_GNU_const_index:
681 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
682 result = this->get_addr_index (uoffset);
683 result_val = value_from_ulongest (address_type, result);
684 break;
685
686 case DW_OP_const1u:
687 result = extract_unsigned_integer (op_ptr, 1, byte_order);
688 result_val = value_from_ulongest (address_type, result);
689 op_ptr += 1;
690 break;
691 case DW_OP_const1s:
692 result = extract_signed_integer (op_ptr, 1, byte_order);
693 result_val = value_from_ulongest (address_type, result);
694 op_ptr += 1;
695 break;
696 case DW_OP_const2u:
697 result = extract_unsigned_integer (op_ptr, 2, byte_order);
698 result_val = value_from_ulongest (address_type, result);
699 op_ptr += 2;
700 break;
701 case DW_OP_const2s:
702 result = extract_signed_integer (op_ptr, 2, byte_order);
703 result_val = value_from_ulongest (address_type, result);
704 op_ptr += 2;
705 break;
706 case DW_OP_const4u:
707 result = extract_unsigned_integer (op_ptr, 4, byte_order);
708 result_val = value_from_ulongest (address_type, result);
709 op_ptr += 4;
710 break;
711 case DW_OP_const4s:
712 result = extract_signed_integer (op_ptr, 4, byte_order);
713 result_val = value_from_ulongest (address_type, result);
714 op_ptr += 4;
715 break;
716 case DW_OP_const8u:
717 result = extract_unsigned_integer (op_ptr, 8, byte_order);
718 result_val = value_from_ulongest (address_type, result);
719 op_ptr += 8;
720 break;
721 case DW_OP_const8s:
722 result = extract_signed_integer (op_ptr, 8, byte_order);
723 result_val = value_from_ulongest (address_type, result);
724 op_ptr += 8;
725 break;
726 case DW_OP_constu:
727 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
728 result = uoffset;
729 result_val = value_from_ulongest (address_type, result);
730 break;
731 case DW_OP_consts:
732 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
733 result = offset;
734 result_val = value_from_ulongest (address_type, result);
735 break;
736
737 /* The DW_OP_reg operations are required to occur alone in
738 location expressions. */
739 case DW_OP_reg0:
740 case DW_OP_reg1:
741 case DW_OP_reg2:
742 case DW_OP_reg3:
743 case DW_OP_reg4:
744 case DW_OP_reg5:
745 case DW_OP_reg6:
746 case DW_OP_reg7:
747 case DW_OP_reg8:
748 case DW_OP_reg9:
749 case DW_OP_reg10:
750 case DW_OP_reg11:
751 case DW_OP_reg12:
752 case DW_OP_reg13:
753 case DW_OP_reg14:
754 case DW_OP_reg15:
755 case DW_OP_reg16:
756 case DW_OP_reg17:
757 case DW_OP_reg18:
758 case DW_OP_reg19:
759 case DW_OP_reg20:
760 case DW_OP_reg21:
761 case DW_OP_reg22:
762 case DW_OP_reg23:
763 case DW_OP_reg24:
764 case DW_OP_reg25:
765 case DW_OP_reg26:
766 case DW_OP_reg27:
767 case DW_OP_reg28:
768 case DW_OP_reg29:
769 case DW_OP_reg30:
770 case DW_OP_reg31:
771 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
772
773 result = op - DW_OP_reg0;
774 result_val = value_from_ulongest (address_type, result);
775 this->location = DWARF_VALUE_REGISTER;
776 break;
777
778 case DW_OP_regx:
779 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
780 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
781
782 result = reg;
783 result_val = value_from_ulongest (address_type, result);
784 this->location = DWARF_VALUE_REGISTER;
785 break;
786
787 case DW_OP_implicit_value:
788 {
789 uint64_t len;
790
791 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
792 if (op_ptr + len > op_end)
793 error (_("DW_OP_implicit_value: too few bytes available."));
794 this->len = len;
795 this->data = op_ptr;
796 this->location = DWARF_VALUE_LITERAL;
797 op_ptr += len;
798 dwarf_expr_require_composition (op_ptr, op_end,
799 "DW_OP_implicit_value");
800 }
801 goto no_push;
802
803 case DW_OP_stack_value:
804 this->location = DWARF_VALUE_STACK;
805 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
806 goto no_push;
807
808 case DW_OP_GNU_implicit_pointer:
809 {
810 int64_t len;
811
812 if (this->ref_addr_size == -1)
813 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
814 "is not allowed in frame context"));
815
816 /* The referred-to DIE of sect_offset kind. */
817 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
818 byte_order);
819 op_ptr += this->ref_addr_size;
820
821 /* The byte offset into the data. */
822 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
823 result = (ULONGEST) len;
824 result_val = value_from_ulongest (address_type, result);
825
826 this->location = DWARF_VALUE_IMPLICIT_POINTER;
827 dwarf_expr_require_composition (op_ptr, op_end,
828 "DW_OP_GNU_implicit_pointer");
829 }
830 break;
831
832 case DW_OP_breg0:
833 case DW_OP_breg1:
834 case DW_OP_breg2:
835 case DW_OP_breg3:
836 case DW_OP_breg4:
837 case DW_OP_breg5:
838 case DW_OP_breg6:
839 case DW_OP_breg7:
840 case DW_OP_breg8:
841 case DW_OP_breg9:
842 case DW_OP_breg10:
843 case DW_OP_breg11:
844 case DW_OP_breg12:
845 case DW_OP_breg13:
846 case DW_OP_breg14:
847 case DW_OP_breg15:
848 case DW_OP_breg16:
849 case DW_OP_breg17:
850 case DW_OP_breg18:
851 case DW_OP_breg19:
852 case DW_OP_breg20:
853 case DW_OP_breg21:
854 case DW_OP_breg22:
855 case DW_OP_breg23:
856 case DW_OP_breg24:
857 case DW_OP_breg25:
858 case DW_OP_breg26:
859 case DW_OP_breg27:
860 case DW_OP_breg28:
861 case DW_OP_breg29:
862 case DW_OP_breg30:
863 case DW_OP_breg31:
864 {
865 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
866 result = this->read_addr_from_reg (op - DW_OP_breg0);
867 result += offset;
868 result_val = value_from_ulongest (address_type, result);
869 }
870 break;
871 case DW_OP_bregx:
872 {
873 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
874 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
875 result = this->read_addr_from_reg (reg);
876 result += offset;
877 result_val = value_from_ulongest (address_type, result);
878 }
879 break;
880 case DW_OP_fbreg:
881 {
882 const gdb_byte *datastart;
883 size_t datalen;
884 unsigned int before_stack_len;
885
886 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
887 /* Rather than create a whole new context, we simply
888 record the stack length before execution, then reset it
889 afterwards, effectively erasing whatever the recursive
890 call put there. */
891 before_stack_len = this->stack_len;
892 /* FIXME: cagney/2003-03-26: This code should be using
893 get_frame_base_address(), and then implement a dwarf2
894 specific this_base method. */
895 this->get_frame_base (&datastart, &datalen);
896 eval (datastart, datalen);
897 if (this->location == DWARF_VALUE_MEMORY)
898 result = fetch_address (0);
899 else if (this->location == DWARF_VALUE_REGISTER)
900 result = this->read_addr_from_reg (value_as_long (fetch (0)));
901 else
902 error (_("Not implemented: computing frame "
903 "base using explicit value operator"));
904 result = result + offset;
905 result_val = value_from_ulongest (address_type, result);
906 in_stack_memory = 1;
907 this->stack_len = before_stack_len;
908 this->location = DWARF_VALUE_MEMORY;
909 }
910 break;
911
912 case DW_OP_dup:
913 result_val = fetch (0);
914 in_stack_memory = fetch_in_stack_memory (0);
915 break;
916
917 case DW_OP_drop:
918 pop ();
919 goto no_push;
920
921 case DW_OP_pick:
922 offset = *op_ptr++;
923 result_val = fetch (offset);
924 in_stack_memory = fetch_in_stack_memory (offset);
925 break;
926
927 case DW_OP_swap:
928 {
929 struct dwarf_stack_value t1, t2;
930
931 if (this->stack_len < 2)
932 error (_("Not enough elements for "
933 "DW_OP_swap. Need 2, have %d."),
934 this->stack_len);
935 t1 = this->stack[this->stack_len - 1];
936 t2 = this->stack[this->stack_len - 2];
937 this->stack[this->stack_len - 1] = t2;
938 this->stack[this->stack_len - 2] = t1;
939 goto no_push;
940 }
941
942 case DW_OP_over:
943 result_val = fetch (1);
944 in_stack_memory = fetch_in_stack_memory (1);
945 break;
946
947 case DW_OP_rot:
948 {
949 struct dwarf_stack_value t1, t2, t3;
950
951 if (this->stack_len < 3)
952 error (_("Not enough elements for "
953 "DW_OP_rot. Need 3, have %d."),
954 this->stack_len);
955 t1 = this->stack[this->stack_len - 1];
956 t2 = this->stack[this->stack_len - 2];
957 t3 = this->stack[this->stack_len - 3];
958 this->stack[this->stack_len - 1] = t2;
959 this->stack[this->stack_len - 2] = t3;
960 this->stack[this->stack_len - 3] = t1;
961 goto no_push;
962 }
963
964 case DW_OP_deref:
965 case DW_OP_deref_size:
966 case DW_OP_GNU_deref_type:
967 {
968 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
969 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
970 CORE_ADDR addr = fetch_address (0);
971 struct type *type;
972
973 pop ();
974
975 if (op == DW_OP_GNU_deref_type)
976 {
977 cu_offset type_die;
978
979 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
980 type_die.cu_off = uoffset;
981 type = get_base_type (type_die, 0);
982 }
983 else
984 type = address_type;
985
986 this->read_mem (buf, addr, addr_size);
987
988 /* If the size of the object read from memory is different
989 from the type length, we need to zero-extend it. */
990 if (TYPE_LENGTH (type) != addr_size)
991 {
992 ULONGEST result =
993 extract_unsigned_integer (buf, addr_size, byte_order);
994
995 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
996 store_unsigned_integer (buf, TYPE_LENGTH (type),
997 byte_order, result);
998 }
999
1000 result_val = value_from_contents_and_address (type, buf, addr);
1001 break;
1002 }
1003
1004 case DW_OP_abs:
1005 case DW_OP_neg:
1006 case DW_OP_not:
1007 case DW_OP_plus_uconst:
1008 {
1009 /* Unary operations. */
1010 result_val = fetch (0);
1011 pop ();
1012
1013 switch (op)
1014 {
1015 case DW_OP_abs:
1016 if (value_less (result_val,
1017 value_zero (value_type (result_val), not_lval)))
1018 result_val = value_neg (result_val);
1019 break;
1020 case DW_OP_neg:
1021 result_val = value_neg (result_val);
1022 break;
1023 case DW_OP_not:
1024 dwarf_require_integral (value_type (result_val));
1025 result_val = value_complement (result_val);
1026 break;
1027 case DW_OP_plus_uconst:
1028 dwarf_require_integral (value_type (result_val));
1029 result = value_as_long (result_val);
1030 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1031 result += reg;
1032 result_val = value_from_ulongest (address_type, result);
1033 break;
1034 }
1035 }
1036 break;
1037
1038 case DW_OP_and:
1039 case DW_OP_div:
1040 case DW_OP_minus:
1041 case DW_OP_mod:
1042 case DW_OP_mul:
1043 case DW_OP_or:
1044 case DW_OP_plus:
1045 case DW_OP_shl:
1046 case DW_OP_shr:
1047 case DW_OP_shra:
1048 case DW_OP_xor:
1049 case DW_OP_le:
1050 case DW_OP_ge:
1051 case DW_OP_eq:
1052 case DW_OP_lt:
1053 case DW_OP_gt:
1054 case DW_OP_ne:
1055 {
1056 /* Binary operations. */
1057 struct value *first, *second;
1058
1059 second = fetch (0);
1060 pop ();
1061
1062 first = fetch (0);
1063 pop ();
1064
1065 if (! base_types_equal_p (value_type (first), value_type (second)))
1066 error (_("Incompatible types on DWARF stack"));
1067
1068 switch (op)
1069 {
1070 case DW_OP_and:
1071 dwarf_require_integral (value_type (first));
1072 dwarf_require_integral (value_type (second));
1073 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1074 break;
1075 case DW_OP_div:
1076 result_val = value_binop (first, second, BINOP_DIV);
1077 break;
1078 case DW_OP_minus:
1079 result_val = value_binop (first, second, BINOP_SUB);
1080 break;
1081 case DW_OP_mod:
1082 {
1083 int cast_back = 0;
1084 struct type *orig_type = value_type (first);
1085
1086 /* We have to special-case "old-style" untyped values
1087 -- these must have mod computed using unsigned
1088 math. */
1089 if (orig_type == address_type)
1090 {
1091 struct type *utype
1092 = get_unsigned_type (this->gdbarch, orig_type);
1093
1094 cast_back = 1;
1095 first = value_cast (utype, first);
1096 second = value_cast (utype, second);
1097 }
1098 /* Note that value_binop doesn't handle float or
1099 decimal float here. This seems unimportant. */
1100 result_val = value_binop (first, second, BINOP_MOD);
1101 if (cast_back)
1102 result_val = value_cast (orig_type, result_val);
1103 }
1104 break;
1105 case DW_OP_mul:
1106 result_val = value_binop (first, second, BINOP_MUL);
1107 break;
1108 case DW_OP_or:
1109 dwarf_require_integral (value_type (first));
1110 dwarf_require_integral (value_type (second));
1111 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1112 break;
1113 case DW_OP_plus:
1114 result_val = value_binop (first, second, BINOP_ADD);
1115 break;
1116 case DW_OP_shl:
1117 dwarf_require_integral (value_type (first));
1118 dwarf_require_integral (value_type (second));
1119 result_val = value_binop (first, second, BINOP_LSH);
1120 break;
1121 case DW_OP_shr:
1122 dwarf_require_integral (value_type (first));
1123 dwarf_require_integral (value_type (second));
1124 if (!TYPE_UNSIGNED (value_type (first)))
1125 {
1126 struct type *utype
1127 = get_unsigned_type (this->gdbarch, value_type (first));
1128
1129 first = value_cast (utype, first);
1130 }
1131
1132 result_val = value_binop (first, second, BINOP_RSH);
1133 /* Make sure we wind up with the same type we started
1134 with. */
1135 if (value_type (result_val) != value_type (second))
1136 result_val = value_cast (value_type (second), result_val);
1137 break;
1138 case DW_OP_shra:
1139 dwarf_require_integral (value_type (first));
1140 dwarf_require_integral (value_type (second));
1141 if (TYPE_UNSIGNED (value_type (first)))
1142 {
1143 struct type *stype
1144 = get_signed_type (this->gdbarch, value_type (first));
1145
1146 first = value_cast (stype, first);
1147 }
1148
1149 result_val = value_binop (first, second, BINOP_RSH);
1150 /* Make sure we wind up with the same type we started
1151 with. */
1152 if (value_type (result_val) != value_type (second))
1153 result_val = value_cast (value_type (second), result_val);
1154 break;
1155 case DW_OP_xor:
1156 dwarf_require_integral (value_type (first));
1157 dwarf_require_integral (value_type (second));
1158 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1159 break;
1160 case DW_OP_le:
1161 /* A <= B is !(B < A). */
1162 result = ! value_less (second, first);
1163 result_val = value_from_ulongest (address_type, result);
1164 break;
1165 case DW_OP_ge:
1166 /* A >= B is !(A < B). */
1167 result = ! value_less (first, second);
1168 result_val = value_from_ulongest (address_type, result);
1169 break;
1170 case DW_OP_eq:
1171 result = value_equal (first, second);
1172 result_val = value_from_ulongest (address_type, result);
1173 break;
1174 case DW_OP_lt:
1175 result = value_less (first, second);
1176 result_val = value_from_ulongest (address_type, result);
1177 break;
1178 case DW_OP_gt:
1179 /* A > B is B < A. */
1180 result = value_less (second, first);
1181 result_val = value_from_ulongest (address_type, result);
1182 break;
1183 case DW_OP_ne:
1184 result = ! value_equal (first, second);
1185 result_val = value_from_ulongest (address_type, result);
1186 break;
1187 default:
1188 internal_error (__FILE__, __LINE__,
1189 _("Can't be reached."));
1190 }
1191 }
1192 break;
1193
1194 case DW_OP_call_frame_cfa:
1195 result = this->get_frame_cfa ();
1196 result_val = value_from_ulongest (address_type, result);
1197 in_stack_memory = 1;
1198 break;
1199
1200 case DW_OP_GNU_push_tls_address:
1201 case DW_OP_form_tls_address:
1202 /* Variable is at a constant offset in the thread-local
1203 storage block into the objfile for the current thread and
1204 the dynamic linker module containing this expression. Here
1205 we return returns the offset from that base. The top of the
1206 stack has the offset from the beginning of the thread
1207 control block at which the variable is located. Nothing
1208 should follow this operator, so the top of stack would be
1209 returned. */
1210 result = value_as_long (fetch (0));
1211 pop ();
1212 result = this->get_tls_address (result);
1213 result_val = value_from_ulongest (address_type, result);
1214 break;
1215
1216 case DW_OP_skip:
1217 offset = extract_signed_integer (op_ptr, 2, byte_order);
1218 op_ptr += 2;
1219 op_ptr += offset;
1220 goto no_push;
1221
1222 case DW_OP_bra:
1223 {
1224 struct value *val;
1225
1226 offset = extract_signed_integer (op_ptr, 2, byte_order);
1227 op_ptr += 2;
1228 val = fetch (0);
1229 dwarf_require_integral (value_type (val));
1230 if (value_as_long (val) != 0)
1231 op_ptr += offset;
1232 pop ();
1233 }
1234 goto no_push;
1235
1236 case DW_OP_nop:
1237 goto no_push;
1238
1239 case DW_OP_piece:
1240 {
1241 uint64_t size;
1242
1243 /* Record the piece. */
1244 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1245 add_piece (8 * size, 0);
1246
1247 /* Pop off the address/regnum, and reset the location
1248 type. */
1249 if (this->location != DWARF_VALUE_LITERAL
1250 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1251 pop ();
1252 this->location = DWARF_VALUE_MEMORY;
1253 }
1254 goto no_push;
1255
1256 case DW_OP_bit_piece:
1257 {
1258 uint64_t size, offset;
1259
1260 /* Record the piece. */
1261 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1262 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1263 add_piece (size, offset);
1264
1265 /* Pop off the address/regnum, and reset the location
1266 type. */
1267 if (this->location != DWARF_VALUE_LITERAL
1268 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1269 pop ();
1270 this->location = DWARF_VALUE_MEMORY;
1271 }
1272 goto no_push;
1273
1274 case DW_OP_GNU_uninit:
1275 if (op_ptr != op_end)
1276 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1277 "be the very last op."));
1278
1279 this->initialized = 0;
1280 goto no_push;
1281
1282 case DW_OP_call2:
1283 {
1284 cu_offset offset;
1285
1286 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1287 op_ptr += 2;
1288 this->dwarf_call (offset);
1289 }
1290 goto no_push;
1291
1292 case DW_OP_call4:
1293 {
1294 cu_offset offset;
1295
1296 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1297 op_ptr += 4;
1298 this->dwarf_call (offset);
1299 }
1300 goto no_push;
1301
1302 case DW_OP_GNU_entry_value:
1303 {
1304 uint64_t len;
1305 CORE_ADDR deref_size;
1306 union call_site_parameter_u kind_u;
1307
1308 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1309 if (op_ptr + len > op_end)
1310 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1311
1312 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1313 if (kind_u.dwarf_reg != -1)
1314 {
1315 op_ptr += len;
1316 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1317 kind_u,
1318 -1 /* deref_size */);
1319 goto no_push;
1320 }
1321
1322 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1323 op_ptr + len,
1324 &deref_size);
1325 if (kind_u.dwarf_reg != -1)
1326 {
1327 if (deref_size == -1)
1328 deref_size = this->addr_size;
1329 op_ptr += len;
1330 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1331 kind_u, deref_size);
1332 goto no_push;
1333 }
1334
1335 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1336 "supported only for single DW_OP_reg* "
1337 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1338 }
1339
1340 case DW_OP_GNU_parameter_ref:
1341 {
1342 union call_site_parameter_u kind_u;
1343
1344 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1345 byte_order);
1346 op_ptr += 4;
1347 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1348 kind_u,
1349 -1 /* deref_size */);
1350 }
1351 goto no_push;
1352
1353 case DW_OP_GNU_const_type:
1354 {
1355 cu_offset type_die;
1356 int n;
1357 const gdb_byte *data;
1358 struct type *type;
1359
1360 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1361 type_die.cu_off = uoffset;
1362 n = *op_ptr++;
1363 data = op_ptr;
1364 op_ptr += n;
1365
1366 type = get_base_type (type_die, n);
1367 result_val = value_from_contents (type, data);
1368 }
1369 break;
1370
1371 case DW_OP_GNU_regval_type:
1372 {
1373 cu_offset type_die;
1374 struct type *type;
1375
1376 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1377 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1378 type_die.cu_off = uoffset;
1379
1380 type = get_base_type (type_die, 0);
1381 result_val = this->get_reg_value (type, reg);
1382 }
1383 break;
1384
1385 case DW_OP_GNU_convert:
1386 case DW_OP_GNU_reinterpret:
1387 {
1388 cu_offset type_die;
1389 struct type *type;
1390
1391 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1392 type_die.cu_off = uoffset;
1393
1394 if (type_die.cu_off == 0)
1395 type = address_type;
1396 else
1397 type = get_base_type (type_die, 0);
1398
1399 result_val = fetch (0);
1400 pop ();
1401
1402 if (op == DW_OP_GNU_convert)
1403 result_val = value_cast (type, result_val);
1404 else if (type == value_type (result_val))
1405 {
1406 /* Nothing. */
1407 }
1408 else if (TYPE_LENGTH (type)
1409 != TYPE_LENGTH (value_type (result_val)))
1410 error (_("DW_OP_GNU_reinterpret has wrong size"));
1411 else
1412 result_val
1413 = value_from_contents (type,
1414 value_contents_all (result_val));
1415 }
1416 break;
1417
1418 case DW_OP_push_object_address:
1419 /* Return the address of the object we are currently observing. */
1420 result = this->get_object_address ();
1421 result_val = value_from_ulongest (address_type, result);
1422 break;
1423
1424 default:
1425 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1426 }
1427
1428 /* Most things push a result value. */
1429 gdb_assert (result_val != NULL);
1430 push (result_val, in_stack_memory);
1431 no_push:
1432 ;
1433 }
1434
1435 /* To simplify our main caller, if the result is an implicit
1436 pointer, then make a pieced value. This is ok because we can't
1437 have implicit pointers in contexts where pieces are invalid. */
1438 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1439 add_piece (8 * this->addr_size, 0);
1440
1441 abort_expression:
1442 this->recursion_depth--;
1443 gdb_assert (this->recursion_depth >= 0);
1444 }
1445
1446 /* Provide a prototype to silence -Wmissing-prototypes. */
1447 extern initialize_file_ftype _initialize_dwarf2expr;
1448
1449 void
1450 _initialize_dwarf2expr (void)
1451 {
1452 dwarf_arch_cookie
1453 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1454 }
This page took 0.068083 seconds and 4 git commands to generate.