gdb: introduce displaced_debug_printf
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "infrun.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "disasm.h"
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "target-float.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2/frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "remote.h"
45 #include "target-descriptions.h"
46 #include "user-regs.h"
47 #include "observable.h"
48 #include "count-one-bits.h"
49
50 #include "arch/arm.h"
51 #include "arch/arm-get-next-pcs.h"
52 #include "arm-tdep.h"
53 #include "gdb/sim-arm.h"
54
55 #include "elf-bfd.h"
56 #include "coff/internal.h"
57 #include "elf/arm.h"
58
59 #include "record.h"
60 #include "record-full.h"
61 #include <algorithm>
62
63 #include "producer.h"
64
65 #if GDB_SELF_TEST
66 #include "gdbsupport/selftest.h"
67 #endif
68
69 static bool arm_debug;
70
71 /* Macros for setting and testing a bit in a minimal symbol that marks
72 it as Thumb function. The MSB of the minimal symbol's "info" field
73 is used for this purpose.
74
75 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
76 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
77
78 #define MSYMBOL_SET_SPECIAL(msym) \
79 MSYMBOL_TARGET_FLAG_1 (msym) = 1
80
81 #define MSYMBOL_IS_SPECIAL(msym) \
82 MSYMBOL_TARGET_FLAG_1 (msym)
83
84 struct arm_mapping_symbol
85 {
86 CORE_ADDR value;
87 char type;
88
89 bool operator< (const arm_mapping_symbol &other) const
90 { return this->value < other.value; }
91 };
92
93 typedef std::vector<arm_mapping_symbol> arm_mapping_symbol_vec;
94
95 struct arm_per_bfd
96 {
97 explicit arm_per_bfd (size_t num_sections)
98 : section_maps (new arm_mapping_symbol_vec[num_sections]),
99 section_maps_sorted (new bool[num_sections] ())
100 {}
101
102 DISABLE_COPY_AND_ASSIGN (arm_per_bfd);
103
104 /* Information about mapping symbols ($a, $d, $t) in the objfile.
105
106 The format is an array of vectors of arm_mapping_symbols, there is one
107 vector for each section of the objfile (the array is index by BFD section
108 index).
109
110 For each section, the vector of arm_mapping_symbol is sorted by
111 symbol value (address). */
112 std::unique_ptr<arm_mapping_symbol_vec[]> section_maps;
113
114 /* For each corresponding element of section_maps above, is this vector
115 sorted. */
116 std::unique_ptr<bool[]> section_maps_sorted;
117 };
118
119 /* Per-bfd data used for mapping symbols. */
120 static bfd_key<arm_per_bfd> arm_bfd_data_key;
121
122 /* The list of available "set arm ..." and "show arm ..." commands. */
123 static struct cmd_list_element *setarmcmdlist = NULL;
124 static struct cmd_list_element *showarmcmdlist = NULL;
125
126 /* The type of floating-point to use. Keep this in sync with enum
127 arm_float_model, and the help string in _initialize_arm_tdep. */
128 static const char *const fp_model_strings[] =
129 {
130 "auto",
131 "softfpa",
132 "fpa",
133 "softvfp",
134 "vfp",
135 NULL
136 };
137
138 /* A variable that can be configured by the user. */
139 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
140 static const char *current_fp_model = "auto";
141
142 /* The ABI to use. Keep this in sync with arm_abi_kind. */
143 static const char *const arm_abi_strings[] =
144 {
145 "auto",
146 "APCS",
147 "AAPCS",
148 NULL
149 };
150
151 /* A variable that can be configured by the user. */
152 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
153 static const char *arm_abi_string = "auto";
154
155 /* The execution mode to assume. */
156 static const char *const arm_mode_strings[] =
157 {
158 "auto",
159 "arm",
160 "thumb",
161 NULL
162 };
163
164 static const char *arm_fallback_mode_string = "auto";
165 static const char *arm_force_mode_string = "auto";
166
167 /* The standard register names, and all the valid aliases for them. Note
168 that `fp', `sp' and `pc' are not added in this alias list, because they
169 have been added as builtin user registers in
170 std-regs.c:_initialize_frame_reg. */
171 static const struct
172 {
173 const char *name;
174 int regnum;
175 } arm_register_aliases[] = {
176 /* Basic register numbers. */
177 { "r0", 0 },
178 { "r1", 1 },
179 { "r2", 2 },
180 { "r3", 3 },
181 { "r4", 4 },
182 { "r5", 5 },
183 { "r6", 6 },
184 { "r7", 7 },
185 { "r8", 8 },
186 { "r9", 9 },
187 { "r10", 10 },
188 { "r11", 11 },
189 { "r12", 12 },
190 { "r13", 13 },
191 { "r14", 14 },
192 { "r15", 15 },
193 /* Synonyms (argument and variable registers). */
194 { "a1", 0 },
195 { "a2", 1 },
196 { "a3", 2 },
197 { "a4", 3 },
198 { "v1", 4 },
199 { "v2", 5 },
200 { "v3", 6 },
201 { "v4", 7 },
202 { "v5", 8 },
203 { "v6", 9 },
204 { "v7", 10 },
205 { "v8", 11 },
206 /* Other platform-specific names for r9. */
207 { "sb", 9 },
208 { "tr", 9 },
209 /* Special names. */
210 { "ip", 12 },
211 { "lr", 14 },
212 /* Names used by GCC (not listed in the ARM EABI). */
213 { "sl", 10 },
214 /* A special name from the older ATPCS. */
215 { "wr", 7 },
216 };
217
218 static const char *const arm_register_names[] =
219 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
220 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
221 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
222 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
223 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
224 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
225 "fps", "cpsr" }; /* 24 25 */
226
227 /* Holds the current set of options to be passed to the disassembler. */
228 static char *arm_disassembler_options;
229
230 /* Valid register name styles. */
231 static const char **valid_disassembly_styles;
232
233 /* Disassembly style to use. Default to "std" register names. */
234 static const char *disassembly_style;
235
236 /* All possible arm target descriptors. */
237 static struct target_desc *tdesc_arm_list[ARM_FP_TYPE_INVALID];
238 static struct target_desc *tdesc_arm_mprofile_list[ARM_M_TYPE_INVALID];
239
240 /* This is used to keep the bfd arch_info in sync with the disassembly
241 style. */
242 static void set_disassembly_style_sfunc (const char *, int,
243 struct cmd_list_element *);
244 static void show_disassembly_style_sfunc (struct ui_file *, int,
245 struct cmd_list_element *,
246 const char *);
247
248 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
249 readable_regcache *regcache,
250 int regnum, gdb_byte *buf);
251 static void arm_neon_quad_write (struct gdbarch *gdbarch,
252 struct regcache *regcache,
253 int regnum, const gdb_byte *buf);
254
255 static CORE_ADDR
256 arm_get_next_pcs_syscall_next_pc (struct arm_get_next_pcs *self);
257
258
259 /* get_next_pcs operations. */
260 static struct arm_get_next_pcs_ops arm_get_next_pcs_ops = {
261 arm_get_next_pcs_read_memory_unsigned_integer,
262 arm_get_next_pcs_syscall_next_pc,
263 arm_get_next_pcs_addr_bits_remove,
264 arm_get_next_pcs_is_thumb,
265 NULL,
266 };
267
268 struct arm_prologue_cache
269 {
270 /* The stack pointer at the time this frame was created; i.e. the
271 caller's stack pointer when this function was called. It is used
272 to identify this frame. */
273 CORE_ADDR prev_sp;
274
275 /* The frame base for this frame is just prev_sp - frame size.
276 FRAMESIZE is the distance from the frame pointer to the
277 initial stack pointer. */
278
279 int framesize;
280
281 /* The register used to hold the frame pointer for this frame. */
282 int framereg;
283
284 /* Saved register offsets. */
285 struct trad_frame_saved_reg *saved_regs;
286 };
287
288 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
289 CORE_ADDR prologue_start,
290 CORE_ADDR prologue_end,
291 struct arm_prologue_cache *cache);
292
293 /* Architecture version for displaced stepping. This effects the behaviour of
294 certain instructions, and really should not be hard-wired. */
295
296 #define DISPLACED_STEPPING_ARCH_VERSION 5
297
298 /* See arm-tdep.h. */
299
300 bool arm_apcs_32 = true;
301
302 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
303
304 int
305 arm_psr_thumb_bit (struct gdbarch *gdbarch)
306 {
307 if (gdbarch_tdep (gdbarch)->is_m)
308 return XPSR_T;
309 else
310 return CPSR_T;
311 }
312
313 /* Determine if the processor is currently executing in Thumb mode. */
314
315 int
316 arm_is_thumb (struct regcache *regcache)
317 {
318 ULONGEST cpsr;
319 ULONGEST t_bit = arm_psr_thumb_bit (regcache->arch ());
320
321 cpsr = regcache_raw_get_unsigned (regcache, ARM_PS_REGNUM);
322
323 return (cpsr & t_bit) != 0;
324 }
325
326 /* Determine if FRAME is executing in Thumb mode. */
327
328 int
329 arm_frame_is_thumb (struct frame_info *frame)
330 {
331 CORE_ADDR cpsr;
332 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
333
334 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
335 directly (from a signal frame or dummy frame) or by interpreting
336 the saved LR (from a prologue or DWARF frame). So consult it and
337 trust the unwinders. */
338 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
339
340 return (cpsr & t_bit) != 0;
341 }
342
343 /* Search for the mapping symbol covering MEMADDR. If one is found,
344 return its type. Otherwise, return 0. If START is non-NULL,
345 set *START to the location of the mapping symbol. */
346
347 static char
348 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
349 {
350 struct obj_section *sec;
351
352 /* If there are mapping symbols, consult them. */
353 sec = find_pc_section (memaddr);
354 if (sec != NULL)
355 {
356 arm_per_bfd *data = arm_bfd_data_key.get (sec->objfile->obfd);
357 if (data != NULL)
358 {
359 unsigned int section_idx = sec->the_bfd_section->index;
360 arm_mapping_symbol_vec &map
361 = data->section_maps[section_idx];
362
363 /* Sort the vector on first use. */
364 if (!data->section_maps_sorted[section_idx])
365 {
366 std::sort (map.begin (), map.end ());
367 data->section_maps_sorted[section_idx] = true;
368 }
369
370 struct arm_mapping_symbol map_key
371 = { memaddr - obj_section_addr (sec), 0 };
372 arm_mapping_symbol_vec::const_iterator it
373 = std::lower_bound (map.begin (), map.end (), map_key);
374
375 /* std::lower_bound finds the earliest ordered insertion
376 point. If the symbol at this position starts at this exact
377 address, we use that; otherwise, the preceding
378 mapping symbol covers this address. */
379 if (it < map.end ())
380 {
381 if (it->value == map_key.value)
382 {
383 if (start)
384 *start = it->value + obj_section_addr (sec);
385 return it->type;
386 }
387 }
388
389 if (it > map.begin ())
390 {
391 arm_mapping_symbol_vec::const_iterator prev_it
392 = it - 1;
393
394 if (start)
395 *start = prev_it->value + obj_section_addr (sec);
396 return prev_it->type;
397 }
398 }
399 }
400
401 return 0;
402 }
403
404 /* Determine if the program counter specified in MEMADDR is in a Thumb
405 function. This function should be called for addresses unrelated to
406 any executing frame; otherwise, prefer arm_frame_is_thumb. */
407
408 int
409 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
410 {
411 struct bound_minimal_symbol sym;
412 char type;
413 arm_displaced_step_closure *dsc
414 = ((arm_displaced_step_closure * )
415 get_displaced_step_closure_by_addr (memaddr));
416
417 /* If checking the mode of displaced instruction in copy area, the mode
418 should be determined by instruction on the original address. */
419 if (dsc)
420 {
421 displaced_debug_printf ("check mode of %.8lx instead of %.8lx",
422 (unsigned long) dsc->insn_addr,
423 (unsigned long) memaddr);
424 memaddr = dsc->insn_addr;
425 }
426
427 /* If bit 0 of the address is set, assume this is a Thumb address. */
428 if (IS_THUMB_ADDR (memaddr))
429 return 1;
430
431 /* If the user wants to override the symbol table, let him. */
432 if (strcmp (arm_force_mode_string, "arm") == 0)
433 return 0;
434 if (strcmp (arm_force_mode_string, "thumb") == 0)
435 return 1;
436
437 /* ARM v6-M and v7-M are always in Thumb mode. */
438 if (gdbarch_tdep (gdbarch)->is_m)
439 return 1;
440
441 /* If there are mapping symbols, consult them. */
442 type = arm_find_mapping_symbol (memaddr, NULL);
443 if (type)
444 return type == 't';
445
446 /* Thumb functions have a "special" bit set in minimal symbols. */
447 sym = lookup_minimal_symbol_by_pc (memaddr);
448 if (sym.minsym)
449 return (MSYMBOL_IS_SPECIAL (sym.minsym));
450
451 /* If the user wants to override the fallback mode, let them. */
452 if (strcmp (arm_fallback_mode_string, "arm") == 0)
453 return 0;
454 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
455 return 1;
456
457 /* If we couldn't find any symbol, but we're talking to a running
458 target, then trust the current value of $cpsr. This lets
459 "display/i $pc" always show the correct mode (though if there is
460 a symbol table we will not reach here, so it still may not be
461 displayed in the mode it will be executed). */
462 if (target_has_registers ())
463 return arm_frame_is_thumb (get_current_frame ());
464
465 /* Otherwise we're out of luck; we assume ARM. */
466 return 0;
467 }
468
469 /* Determine if the address specified equals any of these magic return
470 values, called EXC_RETURN, defined by the ARM v6-M, v7-M and v8-M
471 architectures.
472
473 From ARMv6-M Reference Manual B1.5.8
474 Table B1-5 Exception return behavior
475
476 EXC_RETURN Return To Return Stack
477 0xFFFFFFF1 Handler mode Main
478 0xFFFFFFF9 Thread mode Main
479 0xFFFFFFFD Thread mode Process
480
481 From ARMv7-M Reference Manual B1.5.8
482 Table B1-8 EXC_RETURN definition of exception return behavior, no FP
483
484 EXC_RETURN Return To Return Stack
485 0xFFFFFFF1 Handler mode Main
486 0xFFFFFFF9 Thread mode Main
487 0xFFFFFFFD Thread mode Process
488
489 Table B1-9 EXC_RETURN definition of exception return behavior, with
490 FP
491
492 EXC_RETURN Return To Return Stack Frame Type
493 0xFFFFFFE1 Handler mode Main Extended
494 0xFFFFFFE9 Thread mode Main Extended
495 0xFFFFFFED Thread mode Process Extended
496 0xFFFFFFF1 Handler mode Main Basic
497 0xFFFFFFF9 Thread mode Main Basic
498 0xFFFFFFFD Thread mode Process Basic
499
500 For more details see "B1.5.8 Exception return behavior"
501 in both ARMv6-M and ARMv7-M Architecture Reference Manuals.
502
503 In the ARMv8-M Architecture Technical Reference also adds
504 for implementations without the Security Extension:
505
506 EXC_RETURN Condition
507 0xFFFFFFB0 Return to Handler mode.
508 0xFFFFFFB8 Return to Thread mode using the main stack.
509 0xFFFFFFBC Return to Thread mode using the process stack. */
510
511 static int
512 arm_m_addr_is_magic (CORE_ADDR addr)
513 {
514 switch (addr)
515 {
516 /* Values from ARMv8-M Architecture Technical Reference. */
517 case 0xffffffb0:
518 case 0xffffffb8:
519 case 0xffffffbc:
520 /* Values from Tables in B1.5.8 the EXC_RETURN definitions of
521 the exception return behavior. */
522 case 0xffffffe1:
523 case 0xffffffe9:
524 case 0xffffffed:
525 case 0xfffffff1:
526 case 0xfffffff9:
527 case 0xfffffffd:
528 /* Address is magic. */
529 return 1;
530
531 default:
532 /* Address is not magic. */
533 return 0;
534 }
535 }
536
537 /* Remove useless bits from addresses in a running program. */
538 static CORE_ADDR
539 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
540 {
541 /* On M-profile devices, do not strip the low bit from EXC_RETURN
542 (the magic exception return address). */
543 if (gdbarch_tdep (gdbarch)->is_m
544 && arm_m_addr_is_magic (val))
545 return val;
546
547 if (arm_apcs_32)
548 return UNMAKE_THUMB_ADDR (val);
549 else
550 return (val & 0x03fffffc);
551 }
552
553 /* Return 1 if PC is the start of a compiler helper function which
554 can be safely ignored during prologue skipping. IS_THUMB is true
555 if the function is known to be a Thumb function due to the way it
556 is being called. */
557 static int
558 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
559 {
560 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
561 struct bound_minimal_symbol msym;
562
563 msym = lookup_minimal_symbol_by_pc (pc);
564 if (msym.minsym != NULL
565 && BMSYMBOL_VALUE_ADDRESS (msym) == pc
566 && msym.minsym->linkage_name () != NULL)
567 {
568 const char *name = msym.minsym->linkage_name ();
569
570 /* The GNU linker's Thumb call stub to foo is named
571 __foo_from_thumb. */
572 if (strstr (name, "_from_thumb") != NULL)
573 name += 2;
574
575 /* On soft-float targets, __truncdfsf2 is called to convert promoted
576 arguments to their argument types in non-prototyped
577 functions. */
578 if (startswith (name, "__truncdfsf2"))
579 return 1;
580 if (startswith (name, "__aeabi_d2f"))
581 return 1;
582
583 /* Internal functions related to thread-local storage. */
584 if (startswith (name, "__tls_get_addr"))
585 return 1;
586 if (startswith (name, "__aeabi_read_tp"))
587 return 1;
588 }
589 else
590 {
591 /* If we run against a stripped glibc, we may be unable to identify
592 special functions by name. Check for one important case,
593 __aeabi_read_tp, by comparing the *code* against the default
594 implementation (this is hand-written ARM assembler in glibc). */
595
596 if (!is_thumb
597 && read_code_unsigned_integer (pc, 4, byte_order_for_code)
598 == 0xe3e00a0f /* mov r0, #0xffff0fff */
599 && read_code_unsigned_integer (pc + 4, 4, byte_order_for_code)
600 == 0xe240f01f) /* sub pc, r0, #31 */
601 return 1;
602 }
603
604 return 0;
605 }
606
607 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
608 the first 16-bit of instruction, and INSN2 is the second 16-bit of
609 instruction. */
610 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
611 ((bits ((insn1), 0, 3) << 12) \
612 | (bits ((insn1), 10, 10) << 11) \
613 | (bits ((insn2), 12, 14) << 8) \
614 | bits ((insn2), 0, 7))
615
616 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
617 the 32-bit instruction. */
618 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
619 ((bits ((insn), 16, 19) << 12) \
620 | bits ((insn), 0, 11))
621
622 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
623
624 static unsigned int
625 thumb_expand_immediate (unsigned int imm)
626 {
627 unsigned int count = imm >> 7;
628
629 if (count < 8)
630 switch (count / 2)
631 {
632 case 0:
633 return imm & 0xff;
634 case 1:
635 return (imm & 0xff) | ((imm & 0xff) << 16);
636 case 2:
637 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
638 case 3:
639 return (imm & 0xff) | ((imm & 0xff) << 8)
640 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
641 }
642
643 return (0x80 | (imm & 0x7f)) << (32 - count);
644 }
645
646 /* Return 1 if the 16-bit Thumb instruction INSN restores SP in
647 epilogue, 0 otherwise. */
648
649 static int
650 thumb_instruction_restores_sp (unsigned short insn)
651 {
652 return (insn == 0x46bd /* mov sp, r7 */
653 || (insn & 0xff80) == 0xb000 /* add sp, imm */
654 || (insn & 0xfe00) == 0xbc00); /* pop <registers> */
655 }
656
657 /* Analyze a Thumb prologue, looking for a recognizable stack frame
658 and frame pointer. Scan until we encounter a store that could
659 clobber the stack frame unexpectedly, or an unknown instruction.
660 Return the last address which is definitely safe to skip for an
661 initial breakpoint. */
662
663 static CORE_ADDR
664 thumb_analyze_prologue (struct gdbarch *gdbarch,
665 CORE_ADDR start, CORE_ADDR limit,
666 struct arm_prologue_cache *cache)
667 {
668 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
669 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
670 int i;
671 pv_t regs[16];
672 CORE_ADDR offset;
673 CORE_ADDR unrecognized_pc = 0;
674
675 for (i = 0; i < 16; i++)
676 regs[i] = pv_register (i, 0);
677 pv_area stack (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
678
679 while (start < limit)
680 {
681 unsigned short insn;
682
683 insn = read_code_unsigned_integer (start, 2, byte_order_for_code);
684
685 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
686 {
687 int regno;
688 int mask;
689
690 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
691 break;
692
693 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
694 whether to save LR (R14). */
695 mask = (insn & 0xff) | ((insn & 0x100) << 6);
696
697 /* Calculate offsets of saved R0-R7 and LR. */
698 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
699 if (mask & (1 << regno))
700 {
701 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
702 -4);
703 stack.store (regs[ARM_SP_REGNUM], 4, regs[regno]);
704 }
705 }
706 else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */
707 {
708 offset = (insn & 0x7f) << 2; /* get scaled offset */
709 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
710 -offset);
711 }
712 else if (thumb_instruction_restores_sp (insn))
713 {
714 /* Don't scan past the epilogue. */
715 break;
716 }
717 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
718 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
719 (insn & 0xff) << 2);
720 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
721 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
722 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
723 bits (insn, 6, 8));
724 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
725 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
726 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
727 bits (insn, 0, 7));
728 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
729 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
730 && pv_is_constant (regs[bits (insn, 3, 5)]))
731 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
732 regs[bits (insn, 6, 8)]);
733 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
734 && pv_is_constant (regs[bits (insn, 3, 6)]))
735 {
736 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
737 int rm = bits (insn, 3, 6);
738 regs[rd] = pv_add (regs[rd], regs[rm]);
739 }
740 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
741 {
742 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
743 int src_reg = (insn & 0x78) >> 3;
744 regs[dst_reg] = regs[src_reg];
745 }
746 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
747 {
748 /* Handle stores to the stack. Normally pushes are used,
749 but with GCC -mtpcs-frame, there may be other stores
750 in the prologue to create the frame. */
751 int regno = (insn >> 8) & 0x7;
752 pv_t addr;
753
754 offset = (insn & 0xff) << 2;
755 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
756
757 if (stack.store_would_trash (addr))
758 break;
759
760 stack.store (addr, 4, regs[regno]);
761 }
762 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
763 {
764 int rd = bits (insn, 0, 2);
765 int rn = bits (insn, 3, 5);
766 pv_t addr;
767
768 offset = bits (insn, 6, 10) << 2;
769 addr = pv_add_constant (regs[rn], offset);
770
771 if (stack.store_would_trash (addr))
772 break;
773
774 stack.store (addr, 4, regs[rd]);
775 }
776 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
777 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
778 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
779 /* Ignore stores of argument registers to the stack. */
780 ;
781 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
782 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
783 /* Ignore block loads from the stack, potentially copying
784 parameters from memory. */
785 ;
786 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
787 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
788 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
789 /* Similarly ignore single loads from the stack. */
790 ;
791 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
792 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
793 /* Skip register copies, i.e. saves to another register
794 instead of the stack. */
795 ;
796 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
797 /* Recognize constant loads; even with small stacks these are necessary
798 on Thumb. */
799 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
800 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
801 {
802 /* Constant pool loads, for the same reason. */
803 unsigned int constant;
804 CORE_ADDR loc;
805
806 loc = start + 4 + bits (insn, 0, 7) * 4;
807 constant = read_memory_unsigned_integer (loc, 4, byte_order);
808 regs[bits (insn, 8, 10)] = pv_constant (constant);
809 }
810 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
811 {
812 unsigned short inst2;
813
814 inst2 = read_code_unsigned_integer (start + 2, 2,
815 byte_order_for_code);
816
817 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
818 {
819 /* BL, BLX. Allow some special function calls when
820 skipping the prologue; GCC generates these before
821 storing arguments to the stack. */
822 CORE_ADDR nextpc;
823 int j1, j2, imm1, imm2;
824
825 imm1 = sbits (insn, 0, 10);
826 imm2 = bits (inst2, 0, 10);
827 j1 = bit (inst2, 13);
828 j2 = bit (inst2, 11);
829
830 offset = ((imm1 << 12) + (imm2 << 1));
831 offset ^= ((!j2) << 22) | ((!j1) << 23);
832
833 nextpc = start + 4 + offset;
834 /* For BLX make sure to clear the low bits. */
835 if (bit (inst2, 12) == 0)
836 nextpc = nextpc & 0xfffffffc;
837
838 if (!skip_prologue_function (gdbarch, nextpc,
839 bit (inst2, 12) != 0))
840 break;
841 }
842
843 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
844 { registers } */
845 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
846 {
847 pv_t addr = regs[bits (insn, 0, 3)];
848 int regno;
849
850 if (stack.store_would_trash (addr))
851 break;
852
853 /* Calculate offsets of saved registers. */
854 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
855 if (inst2 & (1 << regno))
856 {
857 addr = pv_add_constant (addr, -4);
858 stack.store (addr, 4, regs[regno]);
859 }
860
861 if (insn & 0x0020)
862 regs[bits (insn, 0, 3)] = addr;
863 }
864
865 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
866 [Rn, #+/-imm]{!} */
867 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
868 {
869 int regno1 = bits (inst2, 12, 15);
870 int regno2 = bits (inst2, 8, 11);
871 pv_t addr = regs[bits (insn, 0, 3)];
872
873 offset = inst2 & 0xff;
874 if (insn & 0x0080)
875 addr = pv_add_constant (addr, offset);
876 else
877 addr = pv_add_constant (addr, -offset);
878
879 if (stack.store_would_trash (addr))
880 break;
881
882 stack.store (addr, 4, regs[regno1]);
883 stack.store (pv_add_constant (addr, 4),
884 4, regs[regno2]);
885
886 if (insn & 0x0020)
887 regs[bits (insn, 0, 3)] = addr;
888 }
889
890 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
891 && (inst2 & 0x0c00) == 0x0c00
892 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
893 {
894 int regno = bits (inst2, 12, 15);
895 pv_t addr = regs[bits (insn, 0, 3)];
896
897 offset = inst2 & 0xff;
898 if (inst2 & 0x0200)
899 addr = pv_add_constant (addr, offset);
900 else
901 addr = pv_add_constant (addr, -offset);
902
903 if (stack.store_would_trash (addr))
904 break;
905
906 stack.store (addr, 4, regs[regno]);
907
908 if (inst2 & 0x0100)
909 regs[bits (insn, 0, 3)] = addr;
910 }
911
912 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
913 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
914 {
915 int regno = bits (inst2, 12, 15);
916 pv_t addr;
917
918 offset = inst2 & 0xfff;
919 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
920
921 if (stack.store_would_trash (addr))
922 break;
923
924 stack.store (addr, 4, regs[regno]);
925 }
926
927 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
928 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
929 /* Ignore stores of argument registers to the stack. */
930 ;
931
932 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
933 && (inst2 & 0x0d00) == 0x0c00
934 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
935 /* Ignore stores of argument registers to the stack. */
936 ;
937
938 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
939 { registers } */
940 && (inst2 & 0x8000) == 0x0000
941 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
942 /* Ignore block loads from the stack, potentially copying
943 parameters from memory. */
944 ;
945
946 else if ((insn & 0xff70) == 0xe950 /* ldrd Rt, Rt2,
947 [Rn, #+/-imm] */
948 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
949 /* Similarly ignore dual loads from the stack. */
950 ;
951
952 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
953 && (inst2 & 0x0d00) == 0x0c00
954 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
955 /* Similarly ignore single loads from the stack. */
956 ;
957
958 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
960 /* Similarly ignore single loads from the stack. */
961 ;
962
963 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
964 && (inst2 & 0x8000) == 0x0000)
965 {
966 unsigned int imm = ((bits (insn, 10, 10) << 11)
967 | (bits (inst2, 12, 14) << 8)
968 | bits (inst2, 0, 7));
969
970 regs[bits (inst2, 8, 11)]
971 = pv_add_constant (regs[bits (insn, 0, 3)],
972 thumb_expand_immediate (imm));
973 }
974
975 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
976 && (inst2 & 0x8000) == 0x0000)
977 {
978 unsigned int imm = ((bits (insn, 10, 10) << 11)
979 | (bits (inst2, 12, 14) << 8)
980 | bits (inst2, 0, 7));
981
982 regs[bits (inst2, 8, 11)]
983 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
984 }
985
986 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
987 && (inst2 & 0x8000) == 0x0000)
988 {
989 unsigned int imm = ((bits (insn, 10, 10) << 11)
990 | (bits (inst2, 12, 14) << 8)
991 | bits (inst2, 0, 7));
992
993 regs[bits (inst2, 8, 11)]
994 = pv_add_constant (regs[bits (insn, 0, 3)],
995 - (CORE_ADDR) thumb_expand_immediate (imm));
996 }
997
998 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
999 && (inst2 & 0x8000) == 0x0000)
1000 {
1001 unsigned int imm = ((bits (insn, 10, 10) << 11)
1002 | (bits (inst2, 12, 14) << 8)
1003 | bits (inst2, 0, 7));
1004
1005 regs[bits (inst2, 8, 11)]
1006 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1007 }
1008
1009 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1010 {
1011 unsigned int imm = ((bits (insn, 10, 10) << 11)
1012 | (bits (inst2, 12, 14) << 8)
1013 | bits (inst2, 0, 7));
1014
1015 regs[bits (inst2, 8, 11)]
1016 = pv_constant (thumb_expand_immediate (imm));
1017 }
1018
1019 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1020 {
1021 unsigned int imm
1022 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1023
1024 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1025 }
1026
1027 else if (insn == 0xea5f /* mov.w Rd,Rm */
1028 && (inst2 & 0xf0f0) == 0)
1029 {
1030 int dst_reg = (inst2 & 0x0f00) >> 8;
1031 int src_reg = inst2 & 0xf;
1032 regs[dst_reg] = regs[src_reg];
1033 }
1034
1035 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1036 {
1037 /* Constant pool loads. */
1038 unsigned int constant;
1039 CORE_ADDR loc;
1040
1041 offset = bits (inst2, 0, 11);
1042 if (insn & 0x0080)
1043 loc = start + 4 + offset;
1044 else
1045 loc = start + 4 - offset;
1046
1047 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1048 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1049 }
1050
1051 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1052 {
1053 /* Constant pool loads. */
1054 unsigned int constant;
1055 CORE_ADDR loc;
1056
1057 offset = bits (inst2, 0, 7) << 2;
1058 if (insn & 0x0080)
1059 loc = start + 4 + offset;
1060 else
1061 loc = start + 4 - offset;
1062
1063 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1064 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1065
1066 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1067 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1068 }
1069
1070 else if (thumb2_instruction_changes_pc (insn, inst2))
1071 {
1072 /* Don't scan past anything that might change control flow. */
1073 break;
1074 }
1075 else
1076 {
1077 /* The optimizer might shove anything into the prologue,
1078 so we just skip what we don't recognize. */
1079 unrecognized_pc = start;
1080 }
1081
1082 start += 2;
1083 }
1084 else if (thumb_instruction_changes_pc (insn))
1085 {
1086 /* Don't scan past anything that might change control flow. */
1087 break;
1088 }
1089 else
1090 {
1091 /* The optimizer might shove anything into the prologue,
1092 so we just skip what we don't recognize. */
1093 unrecognized_pc = start;
1094 }
1095
1096 start += 2;
1097 }
1098
1099 if (arm_debug)
1100 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1101 paddress (gdbarch, start));
1102
1103 if (unrecognized_pc == 0)
1104 unrecognized_pc = start;
1105
1106 if (cache == NULL)
1107 return unrecognized_pc;
1108
1109 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1110 {
1111 /* Frame pointer is fp. Frame size is constant. */
1112 cache->framereg = ARM_FP_REGNUM;
1113 cache->framesize = -regs[ARM_FP_REGNUM].k;
1114 }
1115 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1116 {
1117 /* Frame pointer is r7. Frame size is constant. */
1118 cache->framereg = THUMB_FP_REGNUM;
1119 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1120 }
1121 else
1122 {
1123 /* Try the stack pointer... this is a bit desperate. */
1124 cache->framereg = ARM_SP_REGNUM;
1125 cache->framesize = -regs[ARM_SP_REGNUM].k;
1126 }
1127
1128 for (i = 0; i < 16; i++)
1129 if (stack.find_reg (gdbarch, i, &offset))
1130 cache->saved_regs[i].addr = offset;
1131
1132 return unrecognized_pc;
1133 }
1134
1135
1136 /* Try to analyze the instructions starting from PC, which load symbol
1137 __stack_chk_guard. Return the address of instruction after loading this
1138 symbol, set the dest register number to *BASEREG, and set the size of
1139 instructions for loading symbol in OFFSET. Return 0 if instructions are
1140 not recognized. */
1141
1142 static CORE_ADDR
1143 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1144 unsigned int *destreg, int *offset)
1145 {
1146 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1147 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1148 unsigned int low, high, address;
1149
1150 address = 0;
1151 if (is_thumb)
1152 {
1153 unsigned short insn1
1154 = read_code_unsigned_integer (pc, 2, byte_order_for_code);
1155
1156 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1157 {
1158 *destreg = bits (insn1, 8, 10);
1159 *offset = 2;
1160 address = (pc & 0xfffffffc) + 4 + (bits (insn1, 0, 7) << 2);
1161 address = read_memory_unsigned_integer (address, 4,
1162 byte_order_for_code);
1163 }
1164 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1165 {
1166 unsigned short insn2
1167 = read_code_unsigned_integer (pc + 2, 2, byte_order_for_code);
1168
1169 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1170
1171 insn1
1172 = read_code_unsigned_integer (pc + 4, 2, byte_order_for_code);
1173 insn2
1174 = read_code_unsigned_integer (pc + 6, 2, byte_order_for_code);
1175
1176 /* movt Rd, #const */
1177 if ((insn1 & 0xfbc0) == 0xf2c0)
1178 {
1179 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1180 *destreg = bits (insn2, 8, 11);
1181 *offset = 8;
1182 address = (high << 16 | low);
1183 }
1184 }
1185 }
1186 else
1187 {
1188 unsigned int insn
1189 = read_code_unsigned_integer (pc, 4, byte_order_for_code);
1190
1191 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, [PC, #immed] */
1192 {
1193 address = bits (insn, 0, 11) + pc + 8;
1194 address = read_memory_unsigned_integer (address, 4,
1195 byte_order_for_code);
1196
1197 *destreg = bits (insn, 12, 15);
1198 *offset = 4;
1199 }
1200 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1201 {
1202 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1203
1204 insn
1205 = read_code_unsigned_integer (pc + 4, 4, byte_order_for_code);
1206
1207 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1208 {
1209 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1210 *destreg = bits (insn, 12, 15);
1211 *offset = 8;
1212 address = (high << 16 | low);
1213 }
1214 }
1215 }
1216
1217 return address;
1218 }
1219
1220 /* Try to skip a sequence of instructions used for stack protector. If PC
1221 points to the first instruction of this sequence, return the address of
1222 first instruction after this sequence, otherwise, return original PC.
1223
1224 On arm, this sequence of instructions is composed of mainly three steps,
1225 Step 1: load symbol __stack_chk_guard,
1226 Step 2: load from address of __stack_chk_guard,
1227 Step 3: store it to somewhere else.
1228
1229 Usually, instructions on step 2 and step 3 are the same on various ARM
1230 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1231 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1232 instructions in step 1 vary from different ARM architectures. On ARMv7,
1233 they are,
1234
1235 movw Rn, #:lower16:__stack_chk_guard
1236 movt Rn, #:upper16:__stack_chk_guard
1237
1238 On ARMv5t, it is,
1239
1240 ldr Rn, .Label
1241 ....
1242 .Lable:
1243 .word __stack_chk_guard
1244
1245 Since ldr/str is a very popular instruction, we can't use them as
1246 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1247 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1248 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1249
1250 static CORE_ADDR
1251 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1252 {
1253 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1254 unsigned int basereg;
1255 struct bound_minimal_symbol stack_chk_guard;
1256 int offset;
1257 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1258 CORE_ADDR addr;
1259
1260 /* Try to parse the instructions in Step 1. */
1261 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1262 &basereg, &offset);
1263 if (!addr)
1264 return pc;
1265
1266 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1267 /* ADDR must correspond to a symbol whose name is __stack_chk_guard.
1268 Otherwise, this sequence cannot be for stack protector. */
1269 if (stack_chk_guard.minsym == NULL
1270 || !startswith (stack_chk_guard.minsym->linkage_name (), "__stack_chk_guard"))
1271 return pc;
1272
1273 if (is_thumb)
1274 {
1275 unsigned int destreg;
1276 unsigned short insn
1277 = read_code_unsigned_integer (pc + offset, 2, byte_order_for_code);
1278
1279 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1280 if ((insn & 0xf800) != 0x6800)
1281 return pc;
1282 if (bits (insn, 3, 5) != basereg)
1283 return pc;
1284 destreg = bits (insn, 0, 2);
1285
1286 insn = read_code_unsigned_integer (pc + offset + 2, 2,
1287 byte_order_for_code);
1288 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1289 if ((insn & 0xf800) != 0x6000)
1290 return pc;
1291 if (destreg != bits (insn, 0, 2))
1292 return pc;
1293 }
1294 else
1295 {
1296 unsigned int destreg;
1297 unsigned int insn
1298 = read_code_unsigned_integer (pc + offset, 4, byte_order_for_code);
1299
1300 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1301 if ((insn & 0x0e500000) != 0x04100000)
1302 return pc;
1303 if (bits (insn, 16, 19) != basereg)
1304 return pc;
1305 destreg = bits (insn, 12, 15);
1306 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1307 insn = read_code_unsigned_integer (pc + offset + 4,
1308 4, byte_order_for_code);
1309 if ((insn & 0x0e500000) != 0x04000000)
1310 return pc;
1311 if (bits (insn, 12, 15) != destreg)
1312 return pc;
1313 }
1314 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1315 on arm. */
1316 if (is_thumb)
1317 return pc + offset + 4;
1318 else
1319 return pc + offset + 8;
1320 }
1321
1322 /* Advance the PC across any function entry prologue instructions to
1323 reach some "real" code.
1324
1325 The APCS (ARM Procedure Call Standard) defines the following
1326 prologue:
1327
1328 mov ip, sp
1329 [stmfd sp!, {a1,a2,a3,a4}]
1330 stmfd sp!, {...,fp,ip,lr,pc}
1331 [stfe f7, [sp, #-12]!]
1332 [stfe f6, [sp, #-12]!]
1333 [stfe f5, [sp, #-12]!]
1334 [stfe f4, [sp, #-12]!]
1335 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1336
1337 static CORE_ADDR
1338 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1339 {
1340 CORE_ADDR func_addr, limit_pc;
1341
1342 /* See if we can determine the end of the prologue via the symbol table.
1343 If so, then return either PC, or the PC after the prologue, whichever
1344 is greater. */
1345 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1346 {
1347 CORE_ADDR post_prologue_pc
1348 = skip_prologue_using_sal (gdbarch, func_addr);
1349 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
1350
1351 if (post_prologue_pc)
1352 post_prologue_pc
1353 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1354
1355
1356 /* GCC always emits a line note before the prologue and another
1357 one after, even if the two are at the same address or on the
1358 same line. Take advantage of this so that we do not need to
1359 know every instruction that might appear in the prologue. We
1360 will have producer information for most binaries; if it is
1361 missing (e.g. for -gstabs), assuming the GNU tools. */
1362 if (post_prologue_pc
1363 && (cust == NULL
1364 || COMPUNIT_PRODUCER (cust) == NULL
1365 || startswith (COMPUNIT_PRODUCER (cust), "GNU ")
1366 || producer_is_llvm (COMPUNIT_PRODUCER (cust))))
1367 return post_prologue_pc;
1368
1369 if (post_prologue_pc != 0)
1370 {
1371 CORE_ADDR analyzed_limit;
1372
1373 /* For non-GCC compilers, make sure the entire line is an
1374 acceptable prologue; GDB will round this function's
1375 return value up to the end of the following line so we
1376 can not skip just part of a line (and we do not want to).
1377
1378 RealView does not treat the prologue specially, but does
1379 associate prologue code with the opening brace; so this
1380 lets us skip the first line if we think it is the opening
1381 brace. */
1382 if (arm_pc_is_thumb (gdbarch, func_addr))
1383 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1384 post_prologue_pc, NULL);
1385 else
1386 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1387 post_prologue_pc, NULL);
1388
1389 if (analyzed_limit != post_prologue_pc)
1390 return func_addr;
1391
1392 return post_prologue_pc;
1393 }
1394 }
1395
1396 /* Can't determine prologue from the symbol table, need to examine
1397 instructions. */
1398
1399 /* Find an upper limit on the function prologue using the debug
1400 information. If the debug information could not be used to provide
1401 that bound, then use an arbitrary large number as the upper bound. */
1402 /* Like arm_scan_prologue, stop no later than pc + 64. */
1403 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1404 if (limit_pc == 0)
1405 limit_pc = pc + 64; /* Magic. */
1406
1407
1408 /* Check if this is Thumb code. */
1409 if (arm_pc_is_thumb (gdbarch, pc))
1410 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1411 else
1412 return arm_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1413 }
1414
1415 /* *INDENT-OFF* */
1416 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1417 This function decodes a Thumb function prologue to determine:
1418 1) the size of the stack frame
1419 2) which registers are saved on it
1420 3) the offsets of saved regs
1421 4) the offset from the stack pointer to the frame pointer
1422
1423 A typical Thumb function prologue would create this stack frame
1424 (offsets relative to FP)
1425 old SP -> 24 stack parameters
1426 20 LR
1427 16 R7
1428 R7 -> 0 local variables (16 bytes)
1429 SP -> -12 additional stack space (12 bytes)
1430 The frame size would thus be 36 bytes, and the frame offset would be
1431 12 bytes. The frame register is R7.
1432
1433 The comments for thumb_skip_prolog() describe the algorithm we use
1434 to detect the end of the prolog. */
1435 /* *INDENT-ON* */
1436
1437 static void
1438 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1439 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1440 {
1441 CORE_ADDR prologue_start;
1442 CORE_ADDR prologue_end;
1443
1444 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1445 &prologue_end))
1446 {
1447 /* See comment in arm_scan_prologue for an explanation of
1448 this heuristics. */
1449 if (prologue_end > prologue_start + 64)
1450 {
1451 prologue_end = prologue_start + 64;
1452 }
1453 }
1454 else
1455 /* We're in the boondocks: we have no idea where the start of the
1456 function is. */
1457 return;
1458
1459 prologue_end = std::min (prologue_end, prev_pc);
1460
1461 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1462 }
1463
1464 /* Return 1 if the ARM instruction INSN restores SP in epilogue, 0
1465 otherwise. */
1466
1467 static int
1468 arm_instruction_restores_sp (unsigned int insn)
1469 {
1470 if (bits (insn, 28, 31) != INST_NV)
1471 {
1472 if ((insn & 0x0df0f000) == 0x0080d000
1473 /* ADD SP (register or immediate). */
1474 || (insn & 0x0df0f000) == 0x0040d000
1475 /* SUB SP (register or immediate). */
1476 || (insn & 0x0ffffff0) == 0x01a0d000
1477 /* MOV SP. */
1478 || (insn & 0x0fff0000) == 0x08bd0000
1479 /* POP (LDMIA). */
1480 || (insn & 0x0fff0000) == 0x049d0000)
1481 /* POP of a single register. */
1482 return 1;
1483 }
1484
1485 return 0;
1486 }
1487
1488 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1489 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1490 fill it in. Return the first address not recognized as a prologue
1491 instruction.
1492
1493 We recognize all the instructions typically found in ARM prologues,
1494 plus harmless instructions which can be skipped (either for analysis
1495 purposes, or a more restrictive set that can be skipped when finding
1496 the end of the prologue). */
1497
1498 static CORE_ADDR
1499 arm_analyze_prologue (struct gdbarch *gdbarch,
1500 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1501 struct arm_prologue_cache *cache)
1502 {
1503 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1504 int regno;
1505 CORE_ADDR offset, current_pc;
1506 pv_t regs[ARM_FPS_REGNUM];
1507 CORE_ADDR unrecognized_pc = 0;
1508
1509 /* Search the prologue looking for instructions that set up the
1510 frame pointer, adjust the stack pointer, and save registers.
1511
1512 Be careful, however, and if it doesn't look like a prologue,
1513 don't try to scan it. If, for instance, a frameless function
1514 begins with stmfd sp!, then we will tell ourselves there is
1515 a frame, which will confuse stack traceback, as well as "finish"
1516 and other operations that rely on a knowledge of the stack
1517 traceback. */
1518
1519 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1520 regs[regno] = pv_register (regno, 0);
1521 pv_area stack (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1522
1523 for (current_pc = prologue_start;
1524 current_pc < prologue_end;
1525 current_pc += 4)
1526 {
1527 unsigned int insn
1528 = read_code_unsigned_integer (current_pc, 4, byte_order_for_code);
1529
1530 if (insn == 0xe1a0c00d) /* mov ip, sp */
1531 {
1532 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1533 continue;
1534 }
1535 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1536 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1537 {
1538 unsigned imm = insn & 0xff; /* immediate value */
1539 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1540 int rd = bits (insn, 12, 15);
1541 imm = (imm >> rot) | (imm << (32 - rot));
1542 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1543 continue;
1544 }
1545 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1546 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1547 {
1548 unsigned imm = insn & 0xff; /* immediate value */
1549 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1550 int rd = bits (insn, 12, 15);
1551 imm = (imm >> rot) | (imm << (32 - rot));
1552 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1553 continue;
1554 }
1555 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1556 [sp, #-4]! */
1557 {
1558 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1559 break;
1560 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1561 stack.store (regs[ARM_SP_REGNUM], 4,
1562 regs[bits (insn, 12, 15)]);
1563 continue;
1564 }
1565 else if ((insn & 0xffff0000) == 0xe92d0000)
1566 /* stmfd sp!, {..., fp, ip, lr, pc}
1567 or
1568 stmfd sp!, {a1, a2, a3, a4} */
1569 {
1570 int mask = insn & 0xffff;
1571
1572 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1573 break;
1574
1575 /* Calculate offsets of saved registers. */
1576 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1577 if (mask & (1 << regno))
1578 {
1579 regs[ARM_SP_REGNUM]
1580 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1581 stack.store (regs[ARM_SP_REGNUM], 4, regs[regno]);
1582 }
1583 }
1584 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1585 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1586 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1587 {
1588 /* No need to add this to saved_regs -- it's just an arg reg. */
1589 continue;
1590 }
1591 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1592 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1593 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1594 {
1595 /* No need to add this to saved_regs -- it's just an arg reg. */
1596 continue;
1597 }
1598 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1599 { registers } */
1600 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1601 {
1602 /* No need to add this to saved_regs -- it's just arg regs. */
1603 continue;
1604 }
1605 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1606 {
1607 unsigned imm = insn & 0xff; /* immediate value */
1608 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1609 imm = (imm >> rot) | (imm << (32 - rot));
1610 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1611 }
1612 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1613 {
1614 unsigned imm = insn & 0xff; /* immediate value */
1615 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1616 imm = (imm >> rot) | (imm << (32 - rot));
1617 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1618 }
1619 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1620 [sp, -#c]! */
1621 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1622 {
1623 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1624 break;
1625
1626 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1627 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1628 stack.store (regs[ARM_SP_REGNUM], 12, regs[regno]);
1629 }
1630 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1631 [sp!] */
1632 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1633 {
1634 int n_saved_fp_regs;
1635 unsigned int fp_start_reg, fp_bound_reg;
1636
1637 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1638 break;
1639
1640 if ((insn & 0x800) == 0x800) /* N0 is set */
1641 {
1642 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1643 n_saved_fp_regs = 3;
1644 else
1645 n_saved_fp_regs = 1;
1646 }
1647 else
1648 {
1649 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1650 n_saved_fp_regs = 2;
1651 else
1652 n_saved_fp_regs = 4;
1653 }
1654
1655 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1656 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1657 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1658 {
1659 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1660 stack.store (regs[ARM_SP_REGNUM], 12,
1661 regs[fp_start_reg++]);
1662 }
1663 }
1664 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1665 {
1666 /* Allow some special function calls when skipping the
1667 prologue; GCC generates these before storing arguments to
1668 the stack. */
1669 CORE_ADDR dest = BranchDest (current_pc, insn);
1670
1671 if (skip_prologue_function (gdbarch, dest, 0))
1672 continue;
1673 else
1674 break;
1675 }
1676 else if ((insn & 0xf0000000) != 0xe0000000)
1677 break; /* Condition not true, exit early. */
1678 else if (arm_instruction_changes_pc (insn))
1679 /* Don't scan past anything that might change control flow. */
1680 break;
1681 else if (arm_instruction_restores_sp (insn))
1682 {
1683 /* Don't scan past the epilogue. */
1684 break;
1685 }
1686 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1687 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1688 /* Ignore block loads from the stack, potentially copying
1689 parameters from memory. */
1690 continue;
1691 else if ((insn & 0xfc500000) == 0xe4100000
1692 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1693 /* Similarly ignore single loads from the stack. */
1694 continue;
1695 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1696 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1697 register instead of the stack. */
1698 continue;
1699 else
1700 {
1701 /* The optimizer might shove anything into the prologue, if
1702 we build up cache (cache != NULL) from scanning prologue,
1703 we just skip what we don't recognize and scan further to
1704 make cache as complete as possible. However, if we skip
1705 prologue, we'll stop immediately on unrecognized
1706 instruction. */
1707 unrecognized_pc = current_pc;
1708 if (cache != NULL)
1709 continue;
1710 else
1711 break;
1712 }
1713 }
1714
1715 if (unrecognized_pc == 0)
1716 unrecognized_pc = current_pc;
1717
1718 if (cache)
1719 {
1720 int framereg, framesize;
1721
1722 /* The frame size is just the distance from the frame register
1723 to the original stack pointer. */
1724 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1725 {
1726 /* Frame pointer is fp. */
1727 framereg = ARM_FP_REGNUM;
1728 framesize = -regs[ARM_FP_REGNUM].k;
1729 }
1730 else
1731 {
1732 /* Try the stack pointer... this is a bit desperate. */
1733 framereg = ARM_SP_REGNUM;
1734 framesize = -regs[ARM_SP_REGNUM].k;
1735 }
1736
1737 cache->framereg = framereg;
1738 cache->framesize = framesize;
1739
1740 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1741 if (stack.find_reg (gdbarch, regno, &offset))
1742 cache->saved_regs[regno].addr = offset;
1743 }
1744
1745 if (arm_debug)
1746 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1747 paddress (gdbarch, unrecognized_pc));
1748
1749 return unrecognized_pc;
1750 }
1751
1752 static void
1753 arm_scan_prologue (struct frame_info *this_frame,
1754 struct arm_prologue_cache *cache)
1755 {
1756 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1757 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1758 CORE_ADDR prologue_start, prologue_end;
1759 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1760 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1761
1762 /* Assume there is no frame until proven otherwise. */
1763 cache->framereg = ARM_SP_REGNUM;
1764 cache->framesize = 0;
1765
1766 /* Check for Thumb prologue. */
1767 if (arm_frame_is_thumb (this_frame))
1768 {
1769 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1770 return;
1771 }
1772
1773 /* Find the function prologue. If we can't find the function in
1774 the symbol table, peek in the stack frame to find the PC. */
1775 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1776 &prologue_end))
1777 {
1778 /* One way to find the end of the prologue (which works well
1779 for unoptimized code) is to do the following:
1780
1781 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1782
1783 if (sal.line == 0)
1784 prologue_end = prev_pc;
1785 else if (sal.end < prologue_end)
1786 prologue_end = sal.end;
1787
1788 This mechanism is very accurate so long as the optimizer
1789 doesn't move any instructions from the function body into the
1790 prologue. If this happens, sal.end will be the last
1791 instruction in the first hunk of prologue code just before
1792 the first instruction that the scheduler has moved from
1793 the body to the prologue.
1794
1795 In order to make sure that we scan all of the prologue
1796 instructions, we use a slightly less accurate mechanism which
1797 may scan more than necessary. To help compensate for this
1798 lack of accuracy, the prologue scanning loop below contains
1799 several clauses which'll cause the loop to terminate early if
1800 an implausible prologue instruction is encountered.
1801
1802 The expression
1803
1804 prologue_start + 64
1805
1806 is a suitable endpoint since it accounts for the largest
1807 possible prologue plus up to five instructions inserted by
1808 the scheduler. */
1809
1810 if (prologue_end > prologue_start + 64)
1811 {
1812 prologue_end = prologue_start + 64; /* See above. */
1813 }
1814 }
1815 else
1816 {
1817 /* We have no symbol information. Our only option is to assume this
1818 function has a standard stack frame and the normal frame register.
1819 Then, we can find the value of our frame pointer on entrance to
1820 the callee (or at the present moment if this is the innermost frame).
1821 The value stored there should be the address of the stmfd + 8. */
1822 CORE_ADDR frame_loc;
1823 ULONGEST return_value;
1824
1825 /* AAPCS does not use a frame register, so we can abort here. */
1826 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_AAPCS)
1827 return;
1828
1829 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1830 if (!safe_read_memory_unsigned_integer (frame_loc, 4, byte_order,
1831 &return_value))
1832 return;
1833 else
1834 {
1835 prologue_start = gdbarch_addr_bits_remove
1836 (gdbarch, return_value) - 8;
1837 prologue_end = prologue_start + 64; /* See above. */
1838 }
1839 }
1840
1841 if (prev_pc < prologue_end)
1842 prologue_end = prev_pc;
1843
1844 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1845 }
1846
1847 static struct arm_prologue_cache *
1848 arm_make_prologue_cache (struct frame_info *this_frame)
1849 {
1850 int reg;
1851 struct arm_prologue_cache *cache;
1852 CORE_ADDR unwound_fp;
1853
1854 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1855 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1856
1857 arm_scan_prologue (this_frame, cache);
1858
1859 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1860 if (unwound_fp == 0)
1861 return cache;
1862
1863 cache->prev_sp = unwound_fp + cache->framesize;
1864
1865 /* Calculate actual addresses of saved registers using offsets
1866 determined by arm_scan_prologue. */
1867 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1868 if (trad_frame_addr_p (cache->saved_regs, reg))
1869 cache->saved_regs[reg].addr += cache->prev_sp;
1870
1871 return cache;
1872 }
1873
1874 /* Implementation of the stop_reason hook for arm_prologue frames. */
1875
1876 static enum unwind_stop_reason
1877 arm_prologue_unwind_stop_reason (struct frame_info *this_frame,
1878 void **this_cache)
1879 {
1880 struct arm_prologue_cache *cache;
1881 CORE_ADDR pc;
1882
1883 if (*this_cache == NULL)
1884 *this_cache = arm_make_prologue_cache (this_frame);
1885 cache = (struct arm_prologue_cache *) *this_cache;
1886
1887 /* This is meant to halt the backtrace at "_start". */
1888 pc = get_frame_pc (this_frame);
1889 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1890 return UNWIND_OUTERMOST;
1891
1892 /* If we've hit a wall, stop. */
1893 if (cache->prev_sp == 0)
1894 return UNWIND_OUTERMOST;
1895
1896 return UNWIND_NO_REASON;
1897 }
1898
1899 /* Our frame ID for a normal frame is the current function's starting PC
1900 and the caller's SP when we were called. */
1901
1902 static void
1903 arm_prologue_this_id (struct frame_info *this_frame,
1904 void **this_cache,
1905 struct frame_id *this_id)
1906 {
1907 struct arm_prologue_cache *cache;
1908 struct frame_id id;
1909 CORE_ADDR pc, func;
1910
1911 if (*this_cache == NULL)
1912 *this_cache = arm_make_prologue_cache (this_frame);
1913 cache = (struct arm_prologue_cache *) *this_cache;
1914
1915 /* Use function start address as part of the frame ID. If we cannot
1916 identify the start address (due to missing symbol information),
1917 fall back to just using the current PC. */
1918 pc = get_frame_pc (this_frame);
1919 func = get_frame_func (this_frame);
1920 if (!func)
1921 func = pc;
1922
1923 id = frame_id_build (cache->prev_sp, func);
1924 *this_id = id;
1925 }
1926
1927 static struct value *
1928 arm_prologue_prev_register (struct frame_info *this_frame,
1929 void **this_cache,
1930 int prev_regnum)
1931 {
1932 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1933 struct arm_prologue_cache *cache;
1934
1935 if (*this_cache == NULL)
1936 *this_cache = arm_make_prologue_cache (this_frame);
1937 cache = (struct arm_prologue_cache *) *this_cache;
1938
1939 /* If we are asked to unwind the PC, then we need to return the LR
1940 instead. The prologue may save PC, but it will point into this
1941 frame's prologue, not the next frame's resume location. Also
1942 strip the saved T bit. A valid LR may have the low bit set, but
1943 a valid PC never does. */
1944 if (prev_regnum == ARM_PC_REGNUM)
1945 {
1946 CORE_ADDR lr;
1947
1948 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1949 return frame_unwind_got_constant (this_frame, prev_regnum,
1950 arm_addr_bits_remove (gdbarch, lr));
1951 }
1952
1953 /* SP is generally not saved to the stack, but this frame is
1954 identified by the next frame's stack pointer at the time of the call.
1955 The value was already reconstructed into PREV_SP. */
1956 if (prev_regnum == ARM_SP_REGNUM)
1957 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1958
1959 /* The CPSR may have been changed by the call instruction and by the
1960 called function. The only bit we can reconstruct is the T bit,
1961 by checking the low bit of LR as of the call. This is a reliable
1962 indicator of Thumb-ness except for some ARM v4T pre-interworking
1963 Thumb code, which could get away with a clear low bit as long as
1964 the called function did not use bx. Guess that all other
1965 bits are unchanged; the condition flags are presumably lost,
1966 but the processor status is likely valid. */
1967 if (prev_regnum == ARM_PS_REGNUM)
1968 {
1969 CORE_ADDR lr, cpsr;
1970 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
1971
1972 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1973 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1974 if (IS_THUMB_ADDR (lr))
1975 cpsr |= t_bit;
1976 else
1977 cpsr &= ~t_bit;
1978 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1979 }
1980
1981 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1982 prev_regnum);
1983 }
1984
1985 struct frame_unwind arm_prologue_unwind = {
1986 NORMAL_FRAME,
1987 arm_prologue_unwind_stop_reason,
1988 arm_prologue_this_id,
1989 arm_prologue_prev_register,
1990 NULL,
1991 default_frame_sniffer
1992 };
1993
1994 /* Maintain a list of ARM exception table entries per objfile, similar to the
1995 list of mapping symbols. We only cache entries for standard ARM-defined
1996 personality routines; the cache will contain only the frame unwinding
1997 instructions associated with the entry (not the descriptors). */
1998
1999 struct arm_exidx_entry
2000 {
2001 CORE_ADDR addr;
2002 gdb_byte *entry;
2003
2004 bool operator< (const arm_exidx_entry &other) const
2005 {
2006 return addr < other.addr;
2007 }
2008 };
2009
2010 struct arm_exidx_data
2011 {
2012 std::vector<std::vector<arm_exidx_entry>> section_maps;
2013 };
2014
2015 /* Per-BFD key to store exception handling information. */
2016 static const struct bfd_key<arm_exidx_data> arm_exidx_data_key;
2017
2018 static struct obj_section *
2019 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2020 {
2021 struct obj_section *osect;
2022
2023 ALL_OBJFILE_OSECTIONS (objfile, osect)
2024 if (bfd_section_flags (osect->the_bfd_section) & SEC_ALLOC)
2025 {
2026 bfd_vma start, size;
2027 start = bfd_section_vma (osect->the_bfd_section);
2028 size = bfd_section_size (osect->the_bfd_section);
2029
2030 if (start <= vma && vma < start + size)
2031 return osect;
2032 }
2033
2034 return NULL;
2035 }
2036
2037 /* Parse contents of exception table and exception index sections
2038 of OBJFILE, and fill in the exception table entry cache.
2039
2040 For each entry that refers to a standard ARM-defined personality
2041 routine, extract the frame unwinding instructions (from either
2042 the index or the table section). The unwinding instructions
2043 are normalized by:
2044 - extracting them from the rest of the table data
2045 - converting to host endianness
2046 - appending the implicit 0xb0 ("Finish") code
2047
2048 The extracted and normalized instructions are stored for later
2049 retrieval by the arm_find_exidx_entry routine. */
2050
2051 static void
2052 arm_exidx_new_objfile (struct objfile *objfile)
2053 {
2054 struct arm_exidx_data *data;
2055 asection *exidx, *extab;
2056 bfd_vma exidx_vma = 0, extab_vma = 0;
2057 LONGEST i;
2058
2059 /* If we've already touched this file, do nothing. */
2060 if (!objfile || arm_exidx_data_key.get (objfile->obfd) != NULL)
2061 return;
2062
2063 /* Read contents of exception table and index. */
2064 exidx = bfd_get_section_by_name (objfile->obfd, ELF_STRING_ARM_unwind);
2065 gdb::byte_vector exidx_data;
2066 if (exidx)
2067 {
2068 exidx_vma = bfd_section_vma (exidx);
2069 exidx_data.resize (bfd_section_size (exidx));
2070
2071 if (!bfd_get_section_contents (objfile->obfd, exidx,
2072 exidx_data.data (), 0,
2073 exidx_data.size ()))
2074 return;
2075 }
2076
2077 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2078 gdb::byte_vector extab_data;
2079 if (extab)
2080 {
2081 extab_vma = bfd_section_vma (extab);
2082 extab_data.resize (bfd_section_size (extab));
2083
2084 if (!bfd_get_section_contents (objfile->obfd, extab,
2085 extab_data.data (), 0,
2086 extab_data.size ()))
2087 return;
2088 }
2089
2090 /* Allocate exception table data structure. */
2091 data = arm_exidx_data_key.emplace (objfile->obfd);
2092 data->section_maps.resize (objfile->obfd->section_count);
2093
2094 /* Fill in exception table. */
2095 for (i = 0; i < exidx_data.size () / 8; i++)
2096 {
2097 struct arm_exidx_entry new_exidx_entry;
2098 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data.data () + i * 8);
2099 bfd_vma val = bfd_h_get_32 (objfile->obfd,
2100 exidx_data.data () + i * 8 + 4);
2101 bfd_vma addr = 0, word = 0;
2102 int n_bytes = 0, n_words = 0;
2103 struct obj_section *sec;
2104 gdb_byte *entry = NULL;
2105
2106 /* Extract address of start of function. */
2107 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2108 idx += exidx_vma + i * 8;
2109
2110 /* Find section containing function and compute section offset. */
2111 sec = arm_obj_section_from_vma (objfile, idx);
2112 if (sec == NULL)
2113 continue;
2114 idx -= bfd_section_vma (sec->the_bfd_section);
2115
2116 /* Determine address of exception table entry. */
2117 if (val == 1)
2118 {
2119 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2120 }
2121 else if ((val & 0xff000000) == 0x80000000)
2122 {
2123 /* Exception table entry embedded in .ARM.exidx
2124 -- must be short form. */
2125 word = val;
2126 n_bytes = 3;
2127 }
2128 else if (!(val & 0x80000000))
2129 {
2130 /* Exception table entry in .ARM.extab. */
2131 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2132 addr += exidx_vma + i * 8 + 4;
2133
2134 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_data.size ())
2135 {
2136 word = bfd_h_get_32 (objfile->obfd,
2137 extab_data.data () + addr - extab_vma);
2138 addr += 4;
2139
2140 if ((word & 0xff000000) == 0x80000000)
2141 {
2142 /* Short form. */
2143 n_bytes = 3;
2144 }
2145 else if ((word & 0xff000000) == 0x81000000
2146 || (word & 0xff000000) == 0x82000000)
2147 {
2148 /* Long form. */
2149 n_bytes = 2;
2150 n_words = ((word >> 16) & 0xff);
2151 }
2152 else if (!(word & 0x80000000))
2153 {
2154 bfd_vma pers;
2155 struct obj_section *pers_sec;
2156 int gnu_personality = 0;
2157
2158 /* Custom personality routine. */
2159 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2160 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2161
2162 /* Check whether we've got one of the variants of the
2163 GNU personality routines. */
2164 pers_sec = arm_obj_section_from_vma (objfile, pers);
2165 if (pers_sec)
2166 {
2167 static const char *personality[] =
2168 {
2169 "__gcc_personality_v0",
2170 "__gxx_personality_v0",
2171 "__gcj_personality_v0",
2172 "__gnu_objc_personality_v0",
2173 NULL
2174 };
2175
2176 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2177 int k;
2178
2179 for (k = 0; personality[k]; k++)
2180 if (lookup_minimal_symbol_by_pc_name
2181 (pc, personality[k], objfile))
2182 {
2183 gnu_personality = 1;
2184 break;
2185 }
2186 }
2187
2188 /* If so, the next word contains a word count in the high
2189 byte, followed by the same unwind instructions as the
2190 pre-defined forms. */
2191 if (gnu_personality
2192 && addr + 4 <= extab_vma + extab_data.size ())
2193 {
2194 word = bfd_h_get_32 (objfile->obfd,
2195 (extab_data.data ()
2196 + addr - extab_vma));
2197 addr += 4;
2198 n_bytes = 3;
2199 n_words = ((word >> 24) & 0xff);
2200 }
2201 }
2202 }
2203 }
2204
2205 /* Sanity check address. */
2206 if (n_words)
2207 if (addr < extab_vma
2208 || addr + 4 * n_words > extab_vma + extab_data.size ())
2209 n_words = n_bytes = 0;
2210
2211 /* The unwind instructions reside in WORD (only the N_BYTES least
2212 significant bytes are valid), followed by N_WORDS words in the
2213 extab section starting at ADDR. */
2214 if (n_bytes || n_words)
2215 {
2216 gdb_byte *p = entry
2217 = (gdb_byte *) obstack_alloc (&objfile->objfile_obstack,
2218 n_bytes + n_words * 4 + 1);
2219
2220 while (n_bytes--)
2221 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2222
2223 while (n_words--)
2224 {
2225 word = bfd_h_get_32 (objfile->obfd,
2226 extab_data.data () + addr - extab_vma);
2227 addr += 4;
2228
2229 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2230 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2231 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2232 *p++ = (gdb_byte) (word & 0xff);
2233 }
2234
2235 /* Implied "Finish" to terminate the list. */
2236 *p++ = 0xb0;
2237 }
2238
2239 /* Push entry onto vector. They are guaranteed to always
2240 appear in order of increasing addresses. */
2241 new_exidx_entry.addr = idx;
2242 new_exidx_entry.entry = entry;
2243 data->section_maps[sec->the_bfd_section->index].push_back
2244 (new_exidx_entry);
2245 }
2246 }
2247
2248 /* Search for the exception table entry covering MEMADDR. If one is found,
2249 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2250 set *START to the start of the region covered by this entry. */
2251
2252 static gdb_byte *
2253 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2254 {
2255 struct obj_section *sec;
2256
2257 sec = find_pc_section (memaddr);
2258 if (sec != NULL)
2259 {
2260 struct arm_exidx_data *data;
2261 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2262
2263 data = arm_exidx_data_key.get (sec->objfile->obfd);
2264 if (data != NULL)
2265 {
2266 std::vector<arm_exidx_entry> &map
2267 = data->section_maps[sec->the_bfd_section->index];
2268 if (!map.empty ())
2269 {
2270 auto idx = std::lower_bound (map.begin (), map.end (), map_key);
2271
2272 /* std::lower_bound finds the earliest ordered insertion
2273 point. If the following symbol starts at this exact
2274 address, we use that; otherwise, the preceding
2275 exception table entry covers this address. */
2276 if (idx < map.end ())
2277 {
2278 if (idx->addr == map_key.addr)
2279 {
2280 if (start)
2281 *start = idx->addr + obj_section_addr (sec);
2282 return idx->entry;
2283 }
2284 }
2285
2286 if (idx > map.begin ())
2287 {
2288 idx = idx - 1;
2289 if (start)
2290 *start = idx->addr + obj_section_addr (sec);
2291 return idx->entry;
2292 }
2293 }
2294 }
2295 }
2296
2297 return NULL;
2298 }
2299
2300 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2301 instruction list from the ARM exception table entry ENTRY, allocate and
2302 return a prologue cache structure describing how to unwind this frame.
2303
2304 Return NULL if the unwinding instruction list contains a "spare",
2305 "reserved" or "refuse to unwind" instruction as defined in section
2306 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2307 for the ARM Architecture" document. */
2308
2309 static struct arm_prologue_cache *
2310 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2311 {
2312 CORE_ADDR vsp = 0;
2313 int vsp_valid = 0;
2314
2315 struct arm_prologue_cache *cache;
2316 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2317 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2318
2319 for (;;)
2320 {
2321 gdb_byte insn;
2322
2323 /* Whenever we reload SP, we actually have to retrieve its
2324 actual value in the current frame. */
2325 if (!vsp_valid)
2326 {
2327 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2328 {
2329 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2330 vsp = get_frame_register_unsigned (this_frame, reg);
2331 }
2332 else
2333 {
2334 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2335 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2336 }
2337
2338 vsp_valid = 1;
2339 }
2340
2341 /* Decode next unwind instruction. */
2342 insn = *entry++;
2343
2344 if ((insn & 0xc0) == 0)
2345 {
2346 int offset = insn & 0x3f;
2347 vsp += (offset << 2) + 4;
2348 }
2349 else if ((insn & 0xc0) == 0x40)
2350 {
2351 int offset = insn & 0x3f;
2352 vsp -= (offset << 2) + 4;
2353 }
2354 else if ((insn & 0xf0) == 0x80)
2355 {
2356 int mask = ((insn & 0xf) << 8) | *entry++;
2357 int i;
2358
2359 /* The special case of an all-zero mask identifies
2360 "Refuse to unwind". We return NULL to fall back
2361 to the prologue analyzer. */
2362 if (mask == 0)
2363 return NULL;
2364
2365 /* Pop registers r4..r15 under mask. */
2366 for (i = 0; i < 12; i++)
2367 if (mask & (1 << i))
2368 {
2369 cache->saved_regs[4 + i].addr = vsp;
2370 vsp += 4;
2371 }
2372
2373 /* Special-case popping SP -- we need to reload vsp. */
2374 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2375 vsp_valid = 0;
2376 }
2377 else if ((insn & 0xf0) == 0x90)
2378 {
2379 int reg = insn & 0xf;
2380
2381 /* Reserved cases. */
2382 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2383 return NULL;
2384
2385 /* Set SP from another register and mark VSP for reload. */
2386 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2387 vsp_valid = 0;
2388 }
2389 else if ((insn & 0xf0) == 0xa0)
2390 {
2391 int count = insn & 0x7;
2392 int pop_lr = (insn & 0x8) != 0;
2393 int i;
2394
2395 /* Pop r4..r[4+count]. */
2396 for (i = 0; i <= count; i++)
2397 {
2398 cache->saved_regs[4 + i].addr = vsp;
2399 vsp += 4;
2400 }
2401
2402 /* If indicated by flag, pop LR as well. */
2403 if (pop_lr)
2404 {
2405 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2406 vsp += 4;
2407 }
2408 }
2409 else if (insn == 0xb0)
2410 {
2411 /* We could only have updated PC by popping into it; if so, it
2412 will show up as address. Otherwise, copy LR into PC. */
2413 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2414 cache->saved_regs[ARM_PC_REGNUM]
2415 = cache->saved_regs[ARM_LR_REGNUM];
2416
2417 /* We're done. */
2418 break;
2419 }
2420 else if (insn == 0xb1)
2421 {
2422 int mask = *entry++;
2423 int i;
2424
2425 /* All-zero mask and mask >= 16 is "spare". */
2426 if (mask == 0 || mask >= 16)
2427 return NULL;
2428
2429 /* Pop r0..r3 under mask. */
2430 for (i = 0; i < 4; i++)
2431 if (mask & (1 << i))
2432 {
2433 cache->saved_regs[i].addr = vsp;
2434 vsp += 4;
2435 }
2436 }
2437 else if (insn == 0xb2)
2438 {
2439 ULONGEST offset = 0;
2440 unsigned shift = 0;
2441
2442 do
2443 {
2444 offset |= (*entry & 0x7f) << shift;
2445 shift += 7;
2446 }
2447 while (*entry++ & 0x80);
2448
2449 vsp += 0x204 + (offset << 2);
2450 }
2451 else if (insn == 0xb3)
2452 {
2453 int start = *entry >> 4;
2454 int count = (*entry++) & 0xf;
2455 int i;
2456
2457 /* Only registers D0..D15 are valid here. */
2458 if (start + count >= 16)
2459 return NULL;
2460
2461 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2462 for (i = 0; i <= count; i++)
2463 {
2464 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2465 vsp += 8;
2466 }
2467
2468 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2469 vsp += 4;
2470 }
2471 else if ((insn & 0xf8) == 0xb8)
2472 {
2473 int count = insn & 0x7;
2474 int i;
2475
2476 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2477 for (i = 0; i <= count; i++)
2478 {
2479 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2480 vsp += 8;
2481 }
2482
2483 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2484 vsp += 4;
2485 }
2486 else if (insn == 0xc6)
2487 {
2488 int start = *entry >> 4;
2489 int count = (*entry++) & 0xf;
2490 int i;
2491
2492 /* Only registers WR0..WR15 are valid. */
2493 if (start + count >= 16)
2494 return NULL;
2495
2496 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2497 for (i = 0; i <= count; i++)
2498 {
2499 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2500 vsp += 8;
2501 }
2502 }
2503 else if (insn == 0xc7)
2504 {
2505 int mask = *entry++;
2506 int i;
2507
2508 /* All-zero mask and mask >= 16 is "spare". */
2509 if (mask == 0 || mask >= 16)
2510 return NULL;
2511
2512 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2513 for (i = 0; i < 4; i++)
2514 if (mask & (1 << i))
2515 {
2516 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2517 vsp += 4;
2518 }
2519 }
2520 else if ((insn & 0xf8) == 0xc0)
2521 {
2522 int count = insn & 0x7;
2523 int i;
2524
2525 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2526 for (i = 0; i <= count; i++)
2527 {
2528 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2529 vsp += 8;
2530 }
2531 }
2532 else if (insn == 0xc8)
2533 {
2534 int start = *entry >> 4;
2535 int count = (*entry++) & 0xf;
2536 int i;
2537
2538 /* Only registers D0..D31 are valid. */
2539 if (start + count >= 16)
2540 return NULL;
2541
2542 /* Pop VFP double-precision registers
2543 D[16+start]..D[16+start+count]. */
2544 for (i = 0; i <= count; i++)
2545 {
2546 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2547 vsp += 8;
2548 }
2549 }
2550 else if (insn == 0xc9)
2551 {
2552 int start = *entry >> 4;
2553 int count = (*entry++) & 0xf;
2554 int i;
2555
2556 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2557 for (i = 0; i <= count; i++)
2558 {
2559 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2560 vsp += 8;
2561 }
2562 }
2563 else if ((insn & 0xf8) == 0xd0)
2564 {
2565 int count = insn & 0x7;
2566 int i;
2567
2568 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2569 for (i = 0; i <= count; i++)
2570 {
2571 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2572 vsp += 8;
2573 }
2574 }
2575 else
2576 {
2577 /* Everything else is "spare". */
2578 return NULL;
2579 }
2580 }
2581
2582 /* If we restore SP from a register, assume this was the frame register.
2583 Otherwise just fall back to SP as frame register. */
2584 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2585 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2586 else
2587 cache->framereg = ARM_SP_REGNUM;
2588
2589 /* Determine offset to previous frame. */
2590 cache->framesize
2591 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2592
2593 /* We already got the previous SP. */
2594 cache->prev_sp = vsp;
2595
2596 return cache;
2597 }
2598
2599 /* Unwinding via ARM exception table entries. Note that the sniffer
2600 already computes a filled-in prologue cache, which is then used
2601 with the same arm_prologue_this_id and arm_prologue_prev_register
2602 routines also used for prologue-parsing based unwinding. */
2603
2604 static int
2605 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2606 struct frame_info *this_frame,
2607 void **this_prologue_cache)
2608 {
2609 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2610 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2611 CORE_ADDR addr_in_block, exidx_region, func_start;
2612 struct arm_prologue_cache *cache;
2613 gdb_byte *entry;
2614
2615 /* See if we have an ARM exception table entry covering this address. */
2616 addr_in_block = get_frame_address_in_block (this_frame);
2617 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2618 if (!entry)
2619 return 0;
2620
2621 /* The ARM exception table does not describe unwind information
2622 for arbitrary PC values, but is guaranteed to be correct only
2623 at call sites. We have to decide here whether we want to use
2624 ARM exception table information for this frame, or fall back
2625 to using prologue parsing. (Note that if we have DWARF CFI,
2626 this sniffer isn't even called -- CFI is always preferred.)
2627
2628 Before we make this decision, however, we check whether we
2629 actually have *symbol* information for the current frame.
2630 If not, prologue parsing would not work anyway, so we might
2631 as well use the exception table and hope for the best. */
2632 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2633 {
2634 int exc_valid = 0;
2635
2636 /* If the next frame is "normal", we are at a call site in this
2637 frame, so exception information is guaranteed to be valid. */
2638 if (get_next_frame (this_frame)
2639 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2640 exc_valid = 1;
2641
2642 /* We also assume exception information is valid if we're currently
2643 blocked in a system call. The system library is supposed to
2644 ensure this, so that e.g. pthread cancellation works. */
2645 if (arm_frame_is_thumb (this_frame))
2646 {
2647 ULONGEST insn;
2648
2649 if (safe_read_memory_unsigned_integer (get_frame_pc (this_frame) - 2,
2650 2, byte_order_for_code, &insn)
2651 && (insn & 0xff00) == 0xdf00 /* svc */)
2652 exc_valid = 1;
2653 }
2654 else
2655 {
2656 ULONGEST insn;
2657
2658 if (safe_read_memory_unsigned_integer (get_frame_pc (this_frame) - 4,
2659 4, byte_order_for_code, &insn)
2660 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2661 exc_valid = 1;
2662 }
2663
2664 /* Bail out if we don't know that exception information is valid. */
2665 if (!exc_valid)
2666 return 0;
2667
2668 /* The ARM exception index does not mark the *end* of the region
2669 covered by the entry, and some functions will not have any entry.
2670 To correctly recognize the end of the covered region, the linker
2671 should have inserted dummy records with a CANTUNWIND marker.
2672
2673 Unfortunately, current versions of GNU ld do not reliably do
2674 this, and thus we may have found an incorrect entry above.
2675 As a (temporary) sanity check, we only use the entry if it
2676 lies *within* the bounds of the function. Note that this check
2677 might reject perfectly valid entries that just happen to cover
2678 multiple functions; therefore this check ought to be removed
2679 once the linker is fixed. */
2680 if (func_start > exidx_region)
2681 return 0;
2682 }
2683
2684 /* Decode the list of unwinding instructions into a prologue cache.
2685 Note that this may fail due to e.g. a "refuse to unwind" code. */
2686 cache = arm_exidx_fill_cache (this_frame, entry);
2687 if (!cache)
2688 return 0;
2689
2690 *this_prologue_cache = cache;
2691 return 1;
2692 }
2693
2694 struct frame_unwind arm_exidx_unwind = {
2695 NORMAL_FRAME,
2696 default_frame_unwind_stop_reason,
2697 arm_prologue_this_id,
2698 arm_prologue_prev_register,
2699 NULL,
2700 arm_exidx_unwind_sniffer
2701 };
2702
2703 static struct arm_prologue_cache *
2704 arm_make_epilogue_frame_cache (struct frame_info *this_frame)
2705 {
2706 struct arm_prologue_cache *cache;
2707 int reg;
2708
2709 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2710 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2711
2712 /* Still rely on the offset calculated from prologue. */
2713 arm_scan_prologue (this_frame, cache);
2714
2715 /* Since we are in epilogue, the SP has been restored. */
2716 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2717
2718 /* Calculate actual addresses of saved registers using offsets
2719 determined by arm_scan_prologue. */
2720 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2721 if (trad_frame_addr_p (cache->saved_regs, reg))
2722 cache->saved_regs[reg].addr += cache->prev_sp;
2723
2724 return cache;
2725 }
2726
2727 /* Implementation of function hook 'this_id' in
2728 'struct frame_uwnind' for epilogue unwinder. */
2729
2730 static void
2731 arm_epilogue_frame_this_id (struct frame_info *this_frame,
2732 void **this_cache,
2733 struct frame_id *this_id)
2734 {
2735 struct arm_prologue_cache *cache;
2736 CORE_ADDR pc, func;
2737
2738 if (*this_cache == NULL)
2739 *this_cache = arm_make_epilogue_frame_cache (this_frame);
2740 cache = (struct arm_prologue_cache *) *this_cache;
2741
2742 /* Use function start address as part of the frame ID. If we cannot
2743 identify the start address (due to missing symbol information),
2744 fall back to just using the current PC. */
2745 pc = get_frame_pc (this_frame);
2746 func = get_frame_func (this_frame);
2747 if (func == 0)
2748 func = pc;
2749
2750 (*this_id) = frame_id_build (cache->prev_sp, pc);
2751 }
2752
2753 /* Implementation of function hook 'prev_register' in
2754 'struct frame_uwnind' for epilogue unwinder. */
2755
2756 static struct value *
2757 arm_epilogue_frame_prev_register (struct frame_info *this_frame,
2758 void **this_cache, int regnum)
2759 {
2760 if (*this_cache == NULL)
2761 *this_cache = arm_make_epilogue_frame_cache (this_frame);
2762
2763 return arm_prologue_prev_register (this_frame, this_cache, regnum);
2764 }
2765
2766 static int arm_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch,
2767 CORE_ADDR pc);
2768 static int thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch,
2769 CORE_ADDR pc);
2770
2771 /* Implementation of function hook 'sniffer' in
2772 'struct frame_uwnind' for epilogue unwinder. */
2773
2774 static int
2775 arm_epilogue_frame_sniffer (const struct frame_unwind *self,
2776 struct frame_info *this_frame,
2777 void **this_prologue_cache)
2778 {
2779 if (frame_relative_level (this_frame) == 0)
2780 {
2781 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2782 CORE_ADDR pc = get_frame_pc (this_frame);
2783
2784 if (arm_frame_is_thumb (this_frame))
2785 return thumb_stack_frame_destroyed_p (gdbarch, pc);
2786 else
2787 return arm_stack_frame_destroyed_p_1 (gdbarch, pc);
2788 }
2789 else
2790 return 0;
2791 }
2792
2793 /* Frame unwinder from epilogue. */
2794
2795 static const struct frame_unwind arm_epilogue_frame_unwind =
2796 {
2797 NORMAL_FRAME,
2798 default_frame_unwind_stop_reason,
2799 arm_epilogue_frame_this_id,
2800 arm_epilogue_frame_prev_register,
2801 NULL,
2802 arm_epilogue_frame_sniffer,
2803 };
2804
2805 /* Recognize GCC's trampoline for thumb call-indirect. If we are in a
2806 trampoline, return the target PC. Otherwise return 0.
2807
2808 void call0a (char c, short s, int i, long l) {}
2809
2810 int main (void)
2811 {
2812 (*pointer_to_call0a) (c, s, i, l);
2813 }
2814
2815 Instead of calling a stub library function _call_via_xx (xx is
2816 the register name), GCC may inline the trampoline in the object
2817 file as below (register r2 has the address of call0a).
2818
2819 .global main
2820 .type main, %function
2821 ...
2822 bl .L1
2823 ...
2824 .size main, .-main
2825
2826 .L1:
2827 bx r2
2828
2829 The trampoline 'bx r2' doesn't belong to main. */
2830
2831 static CORE_ADDR
2832 arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc)
2833 {
2834 /* The heuristics of recognizing such trampoline is that FRAME is
2835 executing in Thumb mode and the instruction on PC is 'bx Rm'. */
2836 if (arm_frame_is_thumb (frame))
2837 {
2838 gdb_byte buf[2];
2839
2840 if (target_read_memory (pc, buf, 2) == 0)
2841 {
2842 struct gdbarch *gdbarch = get_frame_arch (frame);
2843 enum bfd_endian byte_order_for_code
2844 = gdbarch_byte_order_for_code (gdbarch);
2845 uint16_t insn
2846 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2847
2848 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2849 {
2850 CORE_ADDR dest
2851 = get_frame_register_unsigned (frame, bits (insn, 3, 6));
2852
2853 /* Clear the LSB so that gdb core sets step-resume
2854 breakpoint at the right address. */
2855 return UNMAKE_THUMB_ADDR (dest);
2856 }
2857 }
2858 }
2859
2860 return 0;
2861 }
2862
2863 static struct arm_prologue_cache *
2864 arm_make_stub_cache (struct frame_info *this_frame)
2865 {
2866 struct arm_prologue_cache *cache;
2867
2868 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2869 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2870
2871 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2872
2873 return cache;
2874 }
2875
2876 /* Our frame ID for a stub frame is the current SP and LR. */
2877
2878 static void
2879 arm_stub_this_id (struct frame_info *this_frame,
2880 void **this_cache,
2881 struct frame_id *this_id)
2882 {
2883 struct arm_prologue_cache *cache;
2884
2885 if (*this_cache == NULL)
2886 *this_cache = arm_make_stub_cache (this_frame);
2887 cache = (struct arm_prologue_cache *) *this_cache;
2888
2889 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2890 }
2891
2892 static int
2893 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2894 struct frame_info *this_frame,
2895 void **this_prologue_cache)
2896 {
2897 CORE_ADDR addr_in_block;
2898 gdb_byte dummy[4];
2899 CORE_ADDR pc, start_addr;
2900 const char *name;
2901
2902 addr_in_block = get_frame_address_in_block (this_frame);
2903 pc = get_frame_pc (this_frame);
2904 if (in_plt_section (addr_in_block)
2905 /* We also use the stub winder if the target memory is unreadable
2906 to avoid having the prologue unwinder trying to read it. */
2907 || target_read_memory (pc, dummy, 4) != 0)
2908 return 1;
2909
2910 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0
2911 && arm_skip_bx_reg (this_frame, pc) != 0)
2912 return 1;
2913
2914 return 0;
2915 }
2916
2917 struct frame_unwind arm_stub_unwind = {
2918 NORMAL_FRAME,
2919 default_frame_unwind_stop_reason,
2920 arm_stub_this_id,
2921 arm_prologue_prev_register,
2922 NULL,
2923 arm_stub_unwind_sniffer
2924 };
2925
2926 /* Put here the code to store, into CACHE->saved_regs, the addresses
2927 of the saved registers of frame described by THIS_FRAME. CACHE is
2928 returned. */
2929
2930 static struct arm_prologue_cache *
2931 arm_m_exception_cache (struct frame_info *this_frame)
2932 {
2933 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2935 struct arm_prologue_cache *cache;
2936 CORE_ADDR lr;
2937 CORE_ADDR sp;
2938 CORE_ADDR unwound_sp;
2939 LONGEST xpsr;
2940 uint32_t exc_return;
2941 uint32_t process_stack_used;
2942 uint32_t extended_frame_used;
2943 uint32_t secure_stack_used;
2944
2945 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2946 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2947
2948 /* ARMv7-M Architecture Reference "B1.5.6 Exception entry behavior"
2949 describes which bits in LR that define which stack was used prior
2950 to the exception and if FPU is used (causing extended stack frame). */
2951
2952 lr = get_frame_register_unsigned (this_frame, ARM_LR_REGNUM);
2953 sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2954
2955 /* Check EXC_RETURN indicator bits. */
2956 exc_return = (((lr >> 28) & 0xf) == 0xf);
2957
2958 /* Check EXC_RETURN bit SPSEL if Main or Thread (process) stack used. */
2959 process_stack_used = ((lr & (1 << 2)) != 0);
2960 if (exc_return && process_stack_used)
2961 {
2962 /* Thread (process) stack used.
2963 Potentially this could be other register defined by target, but PSP
2964 can be considered a standard name for the "Process Stack Pointer".
2965 To be fully aware of system registers like MSP and PSP, these could
2966 be added to a separate XML arm-m-system-profile that is valid for
2967 ARMv6-M and ARMv7-M architectures. Also to be able to debug eg a
2968 corefile off-line, then these registers must be defined by GDB,
2969 and also be included in the corefile regsets. */
2970
2971 int psp_regnum = user_reg_map_name_to_regnum (gdbarch, "psp", -1);
2972 if (psp_regnum == -1)
2973 {
2974 /* Thread (process) stack could not be fetched,
2975 give warning and exit. */
2976
2977 warning (_("no PSP thread stack unwinding supported."));
2978
2979 /* Terminate any further stack unwinding by refer to self. */
2980 cache->prev_sp = sp;
2981 return cache;
2982 }
2983 else
2984 {
2985 /* Thread (process) stack used, use PSP as SP. */
2986 unwound_sp = get_frame_register_unsigned (this_frame, psp_regnum);
2987 }
2988 }
2989 else
2990 {
2991 /* Main stack used, use MSP as SP. */
2992 unwound_sp = sp;
2993 }
2994
2995 /* The hardware saves eight 32-bit words, comprising xPSR,
2996 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
2997 "B1.5.6 Exception entry behavior" in
2998 "ARMv7-M Architecture Reference Manual". */
2999 cache->saved_regs[0].addr = unwound_sp;
3000 cache->saved_regs[1].addr = unwound_sp + 4;
3001 cache->saved_regs[2].addr = unwound_sp + 8;
3002 cache->saved_regs[3].addr = unwound_sp + 12;
3003 cache->saved_regs[ARM_IP_REGNUM].addr = unwound_sp + 16;
3004 cache->saved_regs[ARM_LR_REGNUM].addr = unwound_sp + 20;
3005 cache->saved_regs[ARM_PC_REGNUM].addr = unwound_sp + 24;
3006 cache->saved_regs[ARM_PS_REGNUM].addr = unwound_sp + 28;
3007
3008 /* Check EXC_RETURN bit FTYPE if extended stack frame (FPU regs stored)
3009 type used. */
3010 extended_frame_used = ((lr & (1 << 4)) == 0);
3011 if (exc_return && extended_frame_used)
3012 {
3013 int i;
3014 int fpu_regs_stack_offset;
3015
3016 /* This code does not take into account the lazy stacking, see "Lazy
3017 context save of FP state", in B1.5.7, also ARM AN298, supported
3018 by Cortex-M4F architecture.
3019 To fully handle this the FPCCR register (Floating-point Context
3020 Control Register) needs to be read out and the bits ASPEN and LSPEN
3021 could be checked to setup correct lazy stacked FP registers.
3022 This register is located at address 0xE000EF34. */
3023
3024 /* Extended stack frame type used. */
3025 fpu_regs_stack_offset = unwound_sp + 0x20;
3026 for (i = 0; i < 16; i++)
3027 {
3028 cache->saved_regs[ARM_D0_REGNUM + i].addr = fpu_regs_stack_offset;
3029 fpu_regs_stack_offset += 4;
3030 }
3031 cache->saved_regs[ARM_FPSCR_REGNUM].addr = unwound_sp + 0x60;
3032
3033 /* Offset 0x64 is reserved. */
3034 cache->prev_sp = unwound_sp + 0x68;
3035 }
3036 else
3037 {
3038 /* Standard stack frame type used. */
3039 cache->prev_sp = unwound_sp + 0x20;
3040 }
3041
3042 /* Check EXC_RETURN bit S if Secure or Non-secure stack used. */
3043 secure_stack_used = ((lr & (1 << 6)) != 0);
3044 if (exc_return && secure_stack_used)
3045 {
3046 /* ARMv8-M Exception and interrupt handling is not considered here.
3047 In the ARMv8-M architecture also EXC_RETURN bit S is controlling if
3048 the Secure or Non-secure stack was used. To separate Secure and
3049 Non-secure stacks, processors that are based on the ARMv8-M
3050 architecture support 4 stack pointers: MSP_S, PSP_S, MSP_NS, PSP_NS.
3051 In addition, a stack limit feature is provided using stack limit
3052 registers (accessible using MSR and MRS instructions) in Privileged
3053 level. */
3054 }
3055
3056 /* If bit 9 of the saved xPSR is set, then there is a four-byte
3057 aligner between the top of the 32-byte stack frame and the
3058 previous context's stack pointer. */
3059 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
3060 && (xpsr & (1 << 9)) != 0)
3061 cache->prev_sp += 4;
3062
3063 return cache;
3064 }
3065
3066 /* Implementation of function hook 'this_id' in
3067 'struct frame_uwnind'. */
3068
3069 static void
3070 arm_m_exception_this_id (struct frame_info *this_frame,
3071 void **this_cache,
3072 struct frame_id *this_id)
3073 {
3074 struct arm_prologue_cache *cache;
3075
3076 if (*this_cache == NULL)
3077 *this_cache = arm_m_exception_cache (this_frame);
3078 cache = (struct arm_prologue_cache *) *this_cache;
3079
3080 /* Our frame ID for a stub frame is the current SP and LR. */
3081 *this_id = frame_id_build (cache->prev_sp,
3082 get_frame_pc (this_frame));
3083 }
3084
3085 /* Implementation of function hook 'prev_register' in
3086 'struct frame_uwnind'. */
3087
3088 static struct value *
3089 arm_m_exception_prev_register (struct frame_info *this_frame,
3090 void **this_cache,
3091 int prev_regnum)
3092 {
3093 struct arm_prologue_cache *cache;
3094
3095 if (*this_cache == NULL)
3096 *this_cache = arm_m_exception_cache (this_frame);
3097 cache = (struct arm_prologue_cache *) *this_cache;
3098
3099 /* The value was already reconstructed into PREV_SP. */
3100 if (prev_regnum == ARM_SP_REGNUM)
3101 return frame_unwind_got_constant (this_frame, prev_regnum,
3102 cache->prev_sp);
3103
3104 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3105 prev_regnum);
3106 }
3107
3108 /* Implementation of function hook 'sniffer' in
3109 'struct frame_uwnind'. */
3110
3111 static int
3112 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3113 struct frame_info *this_frame,
3114 void **this_prologue_cache)
3115 {
3116 CORE_ADDR this_pc = get_frame_pc (this_frame);
3117
3118 /* No need to check is_m; this sniffer is only registered for
3119 M-profile architectures. */
3120
3121 /* Check if exception frame returns to a magic PC value. */
3122 return arm_m_addr_is_magic (this_pc);
3123 }
3124
3125 /* Frame unwinder for M-profile exceptions. */
3126
3127 struct frame_unwind arm_m_exception_unwind =
3128 {
3129 SIGTRAMP_FRAME,
3130 default_frame_unwind_stop_reason,
3131 arm_m_exception_this_id,
3132 arm_m_exception_prev_register,
3133 NULL,
3134 arm_m_exception_unwind_sniffer
3135 };
3136
3137 static CORE_ADDR
3138 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3139 {
3140 struct arm_prologue_cache *cache;
3141
3142 if (*this_cache == NULL)
3143 *this_cache = arm_make_prologue_cache (this_frame);
3144 cache = (struct arm_prologue_cache *) *this_cache;
3145
3146 return cache->prev_sp - cache->framesize;
3147 }
3148
3149 struct frame_base arm_normal_base = {
3150 &arm_prologue_unwind,
3151 arm_normal_frame_base,
3152 arm_normal_frame_base,
3153 arm_normal_frame_base
3154 };
3155
3156 static struct value *
3157 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3158 int regnum)
3159 {
3160 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3161 CORE_ADDR lr, cpsr;
3162 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3163
3164 switch (regnum)
3165 {
3166 case ARM_PC_REGNUM:
3167 /* The PC is normally copied from the return column, which
3168 describes saves of LR. However, that version may have an
3169 extra bit set to indicate Thumb state. The bit is not
3170 part of the PC. */
3171 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3172 return frame_unwind_got_constant (this_frame, regnum,
3173 arm_addr_bits_remove (gdbarch, lr));
3174
3175 case ARM_PS_REGNUM:
3176 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3177 cpsr = get_frame_register_unsigned (this_frame, regnum);
3178 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3179 if (IS_THUMB_ADDR (lr))
3180 cpsr |= t_bit;
3181 else
3182 cpsr &= ~t_bit;
3183 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3184
3185 default:
3186 internal_error (__FILE__, __LINE__,
3187 _("Unexpected register %d"), regnum);
3188 }
3189 }
3190
3191 static void
3192 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3193 struct dwarf2_frame_state_reg *reg,
3194 struct frame_info *this_frame)
3195 {
3196 switch (regnum)
3197 {
3198 case ARM_PC_REGNUM:
3199 case ARM_PS_REGNUM:
3200 reg->how = DWARF2_FRAME_REG_FN;
3201 reg->loc.fn = arm_dwarf2_prev_register;
3202 break;
3203 case ARM_SP_REGNUM:
3204 reg->how = DWARF2_FRAME_REG_CFA;
3205 break;
3206 }
3207 }
3208
3209 /* Implement the stack_frame_destroyed_p gdbarch method. */
3210
3211 static int
3212 thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3213 {
3214 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3215 unsigned int insn, insn2;
3216 int found_return = 0, found_stack_adjust = 0;
3217 CORE_ADDR func_start, func_end;
3218 CORE_ADDR scan_pc;
3219 gdb_byte buf[4];
3220
3221 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3222 return 0;
3223
3224 /* The epilogue is a sequence of instructions along the following lines:
3225
3226 - add stack frame size to SP or FP
3227 - [if frame pointer used] restore SP from FP
3228 - restore registers from SP [may include PC]
3229 - a return-type instruction [if PC wasn't already restored]
3230
3231 In a first pass, we scan forward from the current PC and verify the
3232 instructions we find as compatible with this sequence, ending in a
3233 return instruction.
3234
3235 However, this is not sufficient to distinguish indirect function calls
3236 within a function from indirect tail calls in the epilogue in some cases.
3237 Therefore, if we didn't already find any SP-changing instruction during
3238 forward scan, we add a backward scanning heuristic to ensure we actually
3239 are in the epilogue. */
3240
3241 scan_pc = pc;
3242 while (scan_pc < func_end && !found_return)
3243 {
3244 if (target_read_memory (scan_pc, buf, 2))
3245 break;
3246
3247 scan_pc += 2;
3248 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3249
3250 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3251 found_return = 1;
3252 else if (insn == 0x46f7) /* mov pc, lr */
3253 found_return = 1;
3254 else if (thumb_instruction_restores_sp (insn))
3255 {
3256 if ((insn & 0xff00) == 0xbd00) /* pop <registers, PC> */
3257 found_return = 1;
3258 }
3259 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3260 {
3261 if (target_read_memory (scan_pc, buf, 2))
3262 break;
3263
3264 scan_pc += 2;
3265 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3266
3267 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3268 {
3269 if (insn2 & 0x8000) /* <registers> include PC. */
3270 found_return = 1;
3271 }
3272 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3273 && (insn2 & 0x0fff) == 0x0b04)
3274 {
3275 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3276 found_return = 1;
3277 }
3278 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3279 && (insn2 & 0x0e00) == 0x0a00)
3280 ;
3281 else
3282 break;
3283 }
3284 else
3285 break;
3286 }
3287
3288 if (!found_return)
3289 return 0;
3290
3291 /* Since any instruction in the epilogue sequence, with the possible
3292 exception of return itself, updates the stack pointer, we need to
3293 scan backwards for at most one instruction. Try either a 16-bit or
3294 a 32-bit instruction. This is just a heuristic, so we do not worry
3295 too much about false positives. */
3296
3297 if (pc - 4 < func_start)
3298 return 0;
3299 if (target_read_memory (pc - 4, buf, 4))
3300 return 0;
3301
3302 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3303 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3304
3305 if (thumb_instruction_restores_sp (insn2))
3306 found_stack_adjust = 1;
3307 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3308 found_stack_adjust = 1;
3309 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3310 && (insn2 & 0x0fff) == 0x0b04)
3311 found_stack_adjust = 1;
3312 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3313 && (insn2 & 0x0e00) == 0x0a00)
3314 found_stack_adjust = 1;
3315
3316 return found_stack_adjust;
3317 }
3318
3319 static int
3320 arm_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch, CORE_ADDR pc)
3321 {
3322 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3323 unsigned int insn;
3324 int found_return;
3325 CORE_ADDR func_start, func_end;
3326
3327 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3328 return 0;
3329
3330 /* We are in the epilogue if the previous instruction was a stack
3331 adjustment and the next instruction is a possible return (bx, mov
3332 pc, or pop). We could have to scan backwards to find the stack
3333 adjustment, or forwards to find the return, but this is a decent
3334 approximation. First scan forwards. */
3335
3336 found_return = 0;
3337 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3338 if (bits (insn, 28, 31) != INST_NV)
3339 {
3340 if ((insn & 0x0ffffff0) == 0x012fff10)
3341 /* BX. */
3342 found_return = 1;
3343 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3344 /* MOV PC. */
3345 found_return = 1;
3346 else if ((insn & 0x0fff0000) == 0x08bd0000
3347 && (insn & 0x0000c000) != 0)
3348 /* POP (LDMIA), including PC or LR. */
3349 found_return = 1;
3350 }
3351
3352 if (!found_return)
3353 return 0;
3354
3355 /* Scan backwards. This is just a heuristic, so do not worry about
3356 false positives from mode changes. */
3357
3358 if (pc < func_start + 4)
3359 return 0;
3360
3361 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3362 if (arm_instruction_restores_sp (insn))
3363 return 1;
3364
3365 return 0;
3366 }
3367
3368 /* Implement the stack_frame_destroyed_p gdbarch method. */
3369
3370 static int
3371 arm_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3372 {
3373 if (arm_pc_is_thumb (gdbarch, pc))
3374 return thumb_stack_frame_destroyed_p (gdbarch, pc);
3375 else
3376 return arm_stack_frame_destroyed_p_1 (gdbarch, pc);
3377 }
3378
3379 /* When arguments must be pushed onto the stack, they go on in reverse
3380 order. The code below implements a FILO (stack) to do this. */
3381
3382 struct stack_item
3383 {
3384 int len;
3385 struct stack_item *prev;
3386 gdb_byte *data;
3387 };
3388
3389 static struct stack_item *
3390 push_stack_item (struct stack_item *prev, const gdb_byte *contents, int len)
3391 {
3392 struct stack_item *si;
3393 si = XNEW (struct stack_item);
3394 si->data = (gdb_byte *) xmalloc (len);
3395 si->len = len;
3396 si->prev = prev;
3397 memcpy (si->data, contents, len);
3398 return si;
3399 }
3400
3401 static struct stack_item *
3402 pop_stack_item (struct stack_item *si)
3403 {
3404 struct stack_item *dead = si;
3405 si = si->prev;
3406 xfree (dead->data);
3407 xfree (dead);
3408 return si;
3409 }
3410
3411 /* Implement the gdbarch type alignment method, overrides the generic
3412 alignment algorithm for anything that is arm specific. */
3413
3414 static ULONGEST
3415 arm_type_align (gdbarch *gdbarch, struct type *t)
3416 {
3417 t = check_typedef (t);
3418 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
3419 {
3420 /* Use the natural alignment for vector types (the same for
3421 scalar type), but the maximum alignment is 64-bit. */
3422 if (TYPE_LENGTH (t) > 8)
3423 return 8;
3424 else
3425 return TYPE_LENGTH (t);
3426 }
3427
3428 /* Allow the common code to calculate the alignment. */
3429 return 0;
3430 }
3431
3432 /* Possible base types for a candidate for passing and returning in
3433 VFP registers. */
3434
3435 enum arm_vfp_cprc_base_type
3436 {
3437 VFP_CPRC_UNKNOWN,
3438 VFP_CPRC_SINGLE,
3439 VFP_CPRC_DOUBLE,
3440 VFP_CPRC_VEC64,
3441 VFP_CPRC_VEC128
3442 };
3443
3444 /* The length of one element of base type B. */
3445
3446 static unsigned
3447 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3448 {
3449 switch (b)
3450 {
3451 case VFP_CPRC_SINGLE:
3452 return 4;
3453 case VFP_CPRC_DOUBLE:
3454 return 8;
3455 case VFP_CPRC_VEC64:
3456 return 8;
3457 case VFP_CPRC_VEC128:
3458 return 16;
3459 default:
3460 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3461 (int) b);
3462 }
3463 }
3464
3465 /* The character ('s', 'd' or 'q') for the type of VFP register used
3466 for passing base type B. */
3467
3468 static int
3469 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3470 {
3471 switch (b)
3472 {
3473 case VFP_CPRC_SINGLE:
3474 return 's';
3475 case VFP_CPRC_DOUBLE:
3476 return 'd';
3477 case VFP_CPRC_VEC64:
3478 return 'd';
3479 case VFP_CPRC_VEC128:
3480 return 'q';
3481 default:
3482 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3483 (int) b);
3484 }
3485 }
3486
3487 /* Determine whether T may be part of a candidate for passing and
3488 returning in VFP registers, ignoring the limit on the total number
3489 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3490 classification of the first valid component found; if it is not
3491 VFP_CPRC_UNKNOWN, all components must have the same classification
3492 as *BASE_TYPE. If it is found that T contains a type not permitted
3493 for passing and returning in VFP registers, a type differently
3494 classified from *BASE_TYPE, or two types differently classified
3495 from each other, return -1, otherwise return the total number of
3496 base-type elements found (possibly 0 in an empty structure or
3497 array). Vector types are not currently supported, matching the
3498 generic AAPCS support. */
3499
3500 static int
3501 arm_vfp_cprc_sub_candidate (struct type *t,
3502 enum arm_vfp_cprc_base_type *base_type)
3503 {
3504 t = check_typedef (t);
3505 switch (t->code ())
3506 {
3507 case TYPE_CODE_FLT:
3508 switch (TYPE_LENGTH (t))
3509 {
3510 case 4:
3511 if (*base_type == VFP_CPRC_UNKNOWN)
3512 *base_type = VFP_CPRC_SINGLE;
3513 else if (*base_type != VFP_CPRC_SINGLE)
3514 return -1;
3515 return 1;
3516
3517 case 8:
3518 if (*base_type == VFP_CPRC_UNKNOWN)
3519 *base_type = VFP_CPRC_DOUBLE;
3520 else if (*base_type != VFP_CPRC_DOUBLE)
3521 return -1;
3522 return 1;
3523
3524 default:
3525 return -1;
3526 }
3527 break;
3528
3529 case TYPE_CODE_COMPLEX:
3530 /* Arguments of complex T where T is one of the types float or
3531 double get treated as if they are implemented as:
3532
3533 struct complexT
3534 {
3535 T real;
3536 T imag;
3537 };
3538
3539 */
3540 switch (TYPE_LENGTH (t))
3541 {
3542 case 8:
3543 if (*base_type == VFP_CPRC_UNKNOWN)
3544 *base_type = VFP_CPRC_SINGLE;
3545 else if (*base_type != VFP_CPRC_SINGLE)
3546 return -1;
3547 return 2;
3548
3549 case 16:
3550 if (*base_type == VFP_CPRC_UNKNOWN)
3551 *base_type = VFP_CPRC_DOUBLE;
3552 else if (*base_type != VFP_CPRC_DOUBLE)
3553 return -1;
3554 return 2;
3555
3556 default:
3557 return -1;
3558 }
3559 break;
3560
3561 case TYPE_CODE_ARRAY:
3562 {
3563 if (t->is_vector ())
3564 {
3565 /* A 64-bit or 128-bit containerized vector type are VFP
3566 CPRCs. */
3567 switch (TYPE_LENGTH (t))
3568 {
3569 case 8:
3570 if (*base_type == VFP_CPRC_UNKNOWN)
3571 *base_type = VFP_CPRC_VEC64;
3572 return 1;
3573 case 16:
3574 if (*base_type == VFP_CPRC_UNKNOWN)
3575 *base_type = VFP_CPRC_VEC128;
3576 return 1;
3577 default:
3578 return -1;
3579 }
3580 }
3581 else
3582 {
3583 int count;
3584 unsigned unitlen;
3585
3586 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t),
3587 base_type);
3588 if (count == -1)
3589 return -1;
3590 if (TYPE_LENGTH (t) == 0)
3591 {
3592 gdb_assert (count == 0);
3593 return 0;
3594 }
3595 else if (count == 0)
3596 return -1;
3597 unitlen = arm_vfp_cprc_unit_length (*base_type);
3598 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3599 return TYPE_LENGTH (t) / unitlen;
3600 }
3601 }
3602 break;
3603
3604 case TYPE_CODE_STRUCT:
3605 {
3606 int count = 0;
3607 unsigned unitlen;
3608 int i;
3609 for (i = 0; i < t->num_fields (); i++)
3610 {
3611 int sub_count = 0;
3612
3613 if (!field_is_static (&t->field (i)))
3614 sub_count = arm_vfp_cprc_sub_candidate (t->field (i).type (),
3615 base_type);
3616 if (sub_count == -1)
3617 return -1;
3618 count += sub_count;
3619 }
3620 if (TYPE_LENGTH (t) == 0)
3621 {
3622 gdb_assert (count == 0);
3623 return 0;
3624 }
3625 else if (count == 0)
3626 return -1;
3627 unitlen = arm_vfp_cprc_unit_length (*base_type);
3628 if (TYPE_LENGTH (t) != unitlen * count)
3629 return -1;
3630 return count;
3631 }
3632
3633 case TYPE_CODE_UNION:
3634 {
3635 int count = 0;
3636 unsigned unitlen;
3637 int i;
3638 for (i = 0; i < t->num_fields (); i++)
3639 {
3640 int sub_count = arm_vfp_cprc_sub_candidate (t->field (i).type (),
3641 base_type);
3642 if (sub_count == -1)
3643 return -1;
3644 count = (count > sub_count ? count : sub_count);
3645 }
3646 if (TYPE_LENGTH (t) == 0)
3647 {
3648 gdb_assert (count == 0);
3649 return 0;
3650 }
3651 else if (count == 0)
3652 return -1;
3653 unitlen = arm_vfp_cprc_unit_length (*base_type);
3654 if (TYPE_LENGTH (t) != unitlen * count)
3655 return -1;
3656 return count;
3657 }
3658
3659 default:
3660 break;
3661 }
3662
3663 return -1;
3664 }
3665
3666 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3667 if passed to or returned from a non-variadic function with the VFP
3668 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3669 *BASE_TYPE to the base type for T and *COUNT to the number of
3670 elements of that base type before returning. */
3671
3672 static int
3673 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3674 int *count)
3675 {
3676 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3677 int c = arm_vfp_cprc_sub_candidate (t, &b);
3678 if (c <= 0 || c > 4)
3679 return 0;
3680 *base_type = b;
3681 *count = c;
3682 return 1;
3683 }
3684
3685 /* Return 1 if the VFP ABI should be used for passing arguments to and
3686 returning values from a function of type FUNC_TYPE, 0
3687 otherwise. */
3688
3689 static int
3690 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3691 {
3692 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3693 /* Variadic functions always use the base ABI. Assume that functions
3694 without debug info are not variadic. */
3695 if (func_type && check_typedef (func_type)->has_varargs ())
3696 return 0;
3697 /* The VFP ABI is only supported as a variant of AAPCS. */
3698 if (tdep->arm_abi != ARM_ABI_AAPCS)
3699 return 0;
3700 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3701 }
3702
3703 /* We currently only support passing parameters in integer registers, which
3704 conforms with GCC's default model, and VFP argument passing following
3705 the VFP variant of AAPCS. Several other variants exist and
3706 we should probably support some of them based on the selected ABI. */
3707
3708 static CORE_ADDR
3709 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3710 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3711 struct value **args, CORE_ADDR sp,
3712 function_call_return_method return_method,
3713 CORE_ADDR struct_addr)
3714 {
3715 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3716 int argnum;
3717 int argreg;
3718 int nstack;
3719 struct stack_item *si = NULL;
3720 int use_vfp_abi;
3721 struct type *ftype;
3722 unsigned vfp_regs_free = (1 << 16) - 1;
3723
3724 /* Determine the type of this function and whether the VFP ABI
3725 applies. */
3726 ftype = check_typedef (value_type (function));
3727 if (ftype->code () == TYPE_CODE_PTR)
3728 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3729 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3730
3731 /* Set the return address. For the ARM, the return breakpoint is
3732 always at BP_ADDR. */
3733 if (arm_pc_is_thumb (gdbarch, bp_addr))
3734 bp_addr |= 1;
3735 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3736
3737 /* Walk through the list of args and determine how large a temporary
3738 stack is required. Need to take care here as structs may be
3739 passed on the stack, and we have to push them. */
3740 nstack = 0;
3741
3742 argreg = ARM_A1_REGNUM;
3743 nstack = 0;
3744
3745 /* The struct_return pointer occupies the first parameter
3746 passing register. */
3747 if (return_method == return_method_struct)
3748 {
3749 if (arm_debug)
3750 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3751 gdbarch_register_name (gdbarch, argreg),
3752 paddress (gdbarch, struct_addr));
3753 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3754 argreg++;
3755 }
3756
3757 for (argnum = 0; argnum < nargs; argnum++)
3758 {
3759 int len;
3760 struct type *arg_type;
3761 struct type *target_type;
3762 enum type_code typecode;
3763 const bfd_byte *val;
3764 int align;
3765 enum arm_vfp_cprc_base_type vfp_base_type;
3766 int vfp_base_count;
3767 int may_use_core_reg = 1;
3768
3769 arg_type = check_typedef (value_type (args[argnum]));
3770 len = TYPE_LENGTH (arg_type);
3771 target_type = TYPE_TARGET_TYPE (arg_type);
3772 typecode = arg_type->code ();
3773 val = value_contents (args[argnum]);
3774
3775 align = type_align (arg_type);
3776 /* Round alignment up to a whole number of words. */
3777 align = (align + ARM_INT_REGISTER_SIZE - 1)
3778 & ~(ARM_INT_REGISTER_SIZE - 1);
3779 /* Different ABIs have different maximum alignments. */
3780 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3781 {
3782 /* The APCS ABI only requires word alignment. */
3783 align = ARM_INT_REGISTER_SIZE;
3784 }
3785 else
3786 {
3787 /* The AAPCS requires at most doubleword alignment. */
3788 if (align > ARM_INT_REGISTER_SIZE * 2)
3789 align = ARM_INT_REGISTER_SIZE * 2;
3790 }
3791
3792 if (use_vfp_abi
3793 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3794 &vfp_base_count))
3795 {
3796 int regno;
3797 int unit_length;
3798 int shift;
3799 unsigned mask;
3800
3801 /* Because this is a CPRC it cannot go in a core register or
3802 cause a core register to be skipped for alignment.
3803 Either it goes in VFP registers and the rest of this loop
3804 iteration is skipped for this argument, or it goes on the
3805 stack (and the stack alignment code is correct for this
3806 case). */
3807 may_use_core_reg = 0;
3808
3809 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3810 shift = unit_length / 4;
3811 mask = (1 << (shift * vfp_base_count)) - 1;
3812 for (regno = 0; regno < 16; regno += shift)
3813 if (((vfp_regs_free >> regno) & mask) == mask)
3814 break;
3815
3816 if (regno < 16)
3817 {
3818 int reg_char;
3819 int reg_scaled;
3820 int i;
3821
3822 vfp_regs_free &= ~(mask << regno);
3823 reg_scaled = regno / shift;
3824 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3825 for (i = 0; i < vfp_base_count; i++)
3826 {
3827 char name_buf[4];
3828 int regnum;
3829 if (reg_char == 'q')
3830 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3831 val + i * unit_length);
3832 else
3833 {
3834 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3835 reg_char, reg_scaled + i);
3836 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3837 strlen (name_buf));
3838 regcache->cooked_write (regnum, val + i * unit_length);
3839 }
3840 }
3841 continue;
3842 }
3843 else
3844 {
3845 /* This CPRC could not go in VFP registers, so all VFP
3846 registers are now marked as used. */
3847 vfp_regs_free = 0;
3848 }
3849 }
3850
3851 /* Push stack padding for doubleword alignment. */
3852 if (nstack & (align - 1))
3853 {
3854 si = push_stack_item (si, val, ARM_INT_REGISTER_SIZE);
3855 nstack += ARM_INT_REGISTER_SIZE;
3856 }
3857
3858 /* Doubleword aligned quantities must go in even register pairs. */
3859 if (may_use_core_reg
3860 && argreg <= ARM_LAST_ARG_REGNUM
3861 && align > ARM_INT_REGISTER_SIZE
3862 && argreg & 1)
3863 argreg++;
3864
3865 /* If the argument is a pointer to a function, and it is a
3866 Thumb function, create a LOCAL copy of the value and set
3867 the THUMB bit in it. */
3868 if (TYPE_CODE_PTR == typecode
3869 && target_type != NULL
3870 && TYPE_CODE_FUNC == check_typedef (target_type)->code ())
3871 {
3872 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3873 if (arm_pc_is_thumb (gdbarch, regval))
3874 {
3875 bfd_byte *copy = (bfd_byte *) alloca (len);
3876 store_unsigned_integer (copy, len, byte_order,
3877 MAKE_THUMB_ADDR (regval));
3878 val = copy;
3879 }
3880 }
3881
3882 /* Copy the argument to general registers or the stack in
3883 register-sized pieces. Large arguments are split between
3884 registers and stack. */
3885 while (len > 0)
3886 {
3887 int partial_len = len < ARM_INT_REGISTER_SIZE
3888 ? len : ARM_INT_REGISTER_SIZE;
3889 CORE_ADDR regval
3890 = extract_unsigned_integer (val, partial_len, byte_order);
3891
3892 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3893 {
3894 /* The argument is being passed in a general purpose
3895 register. */
3896 if (byte_order == BFD_ENDIAN_BIG)
3897 regval <<= (ARM_INT_REGISTER_SIZE - partial_len) * 8;
3898 if (arm_debug)
3899 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3900 argnum,
3901 gdbarch_register_name
3902 (gdbarch, argreg),
3903 phex (regval, ARM_INT_REGISTER_SIZE));
3904 regcache_cooked_write_unsigned (regcache, argreg, regval);
3905 argreg++;
3906 }
3907 else
3908 {
3909 gdb_byte buf[ARM_INT_REGISTER_SIZE];
3910
3911 memset (buf, 0, sizeof (buf));
3912 store_unsigned_integer (buf, partial_len, byte_order, regval);
3913
3914 /* Push the arguments onto the stack. */
3915 if (arm_debug)
3916 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3917 argnum, nstack);
3918 si = push_stack_item (si, buf, ARM_INT_REGISTER_SIZE);
3919 nstack += ARM_INT_REGISTER_SIZE;
3920 }
3921
3922 len -= partial_len;
3923 val += partial_len;
3924 }
3925 }
3926 /* If we have an odd number of words to push, then decrement the stack
3927 by one word now, so first stack argument will be dword aligned. */
3928 if (nstack & 4)
3929 sp -= 4;
3930
3931 while (si)
3932 {
3933 sp -= si->len;
3934 write_memory (sp, si->data, si->len);
3935 si = pop_stack_item (si);
3936 }
3937
3938 /* Finally, update teh SP register. */
3939 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3940
3941 return sp;
3942 }
3943
3944
3945 /* Always align the frame to an 8-byte boundary. This is required on
3946 some platforms and harmless on the rest. */
3947
3948 static CORE_ADDR
3949 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3950 {
3951 /* Align the stack to eight bytes. */
3952 return sp & ~ (CORE_ADDR) 7;
3953 }
3954
3955 static void
3956 print_fpu_flags (struct ui_file *file, int flags)
3957 {
3958 if (flags & (1 << 0))
3959 fputs_filtered ("IVO ", file);
3960 if (flags & (1 << 1))
3961 fputs_filtered ("DVZ ", file);
3962 if (flags & (1 << 2))
3963 fputs_filtered ("OFL ", file);
3964 if (flags & (1 << 3))
3965 fputs_filtered ("UFL ", file);
3966 if (flags & (1 << 4))
3967 fputs_filtered ("INX ", file);
3968 fputc_filtered ('\n', file);
3969 }
3970
3971 /* Print interesting information about the floating point processor
3972 (if present) or emulator. */
3973 static void
3974 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3975 struct frame_info *frame, const char *args)
3976 {
3977 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3978 int type;
3979
3980 type = (status >> 24) & 127;
3981 if (status & (1 << 31))
3982 fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
3983 else
3984 fprintf_filtered (file, _("Software FPU type %d\n"), type);
3985 /* i18n: [floating point unit] mask */
3986 fputs_filtered (_("mask: "), file);
3987 print_fpu_flags (file, status >> 16);
3988 /* i18n: [floating point unit] flags */
3989 fputs_filtered (_("flags: "), file);
3990 print_fpu_flags (file, status);
3991 }
3992
3993 /* Construct the ARM extended floating point type. */
3994 static struct type *
3995 arm_ext_type (struct gdbarch *gdbarch)
3996 {
3997 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3998
3999 if (!tdep->arm_ext_type)
4000 tdep->arm_ext_type
4001 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
4002 floatformats_arm_ext);
4003
4004 return tdep->arm_ext_type;
4005 }
4006
4007 static struct type *
4008 arm_neon_double_type (struct gdbarch *gdbarch)
4009 {
4010 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4011
4012 if (tdep->neon_double_type == NULL)
4013 {
4014 struct type *t, *elem;
4015
4016 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
4017 TYPE_CODE_UNION);
4018 elem = builtin_type (gdbarch)->builtin_uint8;
4019 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
4020 elem = builtin_type (gdbarch)->builtin_uint16;
4021 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
4022 elem = builtin_type (gdbarch)->builtin_uint32;
4023 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
4024 elem = builtin_type (gdbarch)->builtin_uint64;
4025 append_composite_type_field (t, "u64", elem);
4026 elem = builtin_type (gdbarch)->builtin_float;
4027 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
4028 elem = builtin_type (gdbarch)->builtin_double;
4029 append_composite_type_field (t, "f64", elem);
4030
4031 t->set_is_vector (true);
4032 t->set_name ("neon_d");
4033 tdep->neon_double_type = t;
4034 }
4035
4036 return tdep->neon_double_type;
4037 }
4038
4039 /* FIXME: The vector types are not correctly ordered on big-endian
4040 targets. Just as s0 is the low bits of d0, d0[0] is also the low
4041 bits of d0 - regardless of what unit size is being held in d0. So
4042 the offset of the first uint8 in d0 is 7, but the offset of the
4043 first float is 4. This code works as-is for little-endian
4044 targets. */
4045
4046 static struct type *
4047 arm_neon_quad_type (struct gdbarch *gdbarch)
4048 {
4049 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4050
4051 if (tdep->neon_quad_type == NULL)
4052 {
4053 struct type *t, *elem;
4054
4055 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
4056 TYPE_CODE_UNION);
4057 elem = builtin_type (gdbarch)->builtin_uint8;
4058 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
4059 elem = builtin_type (gdbarch)->builtin_uint16;
4060 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
4061 elem = builtin_type (gdbarch)->builtin_uint32;
4062 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
4063 elem = builtin_type (gdbarch)->builtin_uint64;
4064 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
4065 elem = builtin_type (gdbarch)->builtin_float;
4066 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
4067 elem = builtin_type (gdbarch)->builtin_double;
4068 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
4069
4070 t->set_is_vector (true);
4071 t->set_name ("neon_q");
4072 tdep->neon_quad_type = t;
4073 }
4074
4075 return tdep->neon_quad_type;
4076 }
4077
4078 /* Return the GDB type object for the "standard" data type of data in
4079 register N. */
4080
4081 static struct type *
4082 arm_register_type (struct gdbarch *gdbarch, int regnum)
4083 {
4084 int num_regs = gdbarch_num_regs (gdbarch);
4085
4086 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
4087 && regnum >= num_regs && regnum < num_regs + 32)
4088 return builtin_type (gdbarch)->builtin_float;
4089
4090 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
4091 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
4092 return arm_neon_quad_type (gdbarch);
4093
4094 /* If the target description has register information, we are only
4095 in this function so that we can override the types of
4096 double-precision registers for NEON. */
4097 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4098 {
4099 struct type *t = tdesc_register_type (gdbarch, regnum);
4100
4101 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4102 && t->code () == TYPE_CODE_FLT
4103 && gdbarch_tdep (gdbarch)->have_neon)
4104 return arm_neon_double_type (gdbarch);
4105 else
4106 return t;
4107 }
4108
4109 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4110 {
4111 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4112 return builtin_type (gdbarch)->builtin_void;
4113
4114 return arm_ext_type (gdbarch);
4115 }
4116 else if (regnum == ARM_SP_REGNUM)
4117 return builtin_type (gdbarch)->builtin_data_ptr;
4118 else if (regnum == ARM_PC_REGNUM)
4119 return builtin_type (gdbarch)->builtin_func_ptr;
4120 else if (regnum >= ARRAY_SIZE (arm_register_names))
4121 /* These registers are only supported on targets which supply
4122 an XML description. */
4123 return builtin_type (gdbarch)->builtin_int0;
4124 else
4125 return builtin_type (gdbarch)->builtin_uint32;
4126 }
4127
4128 /* Map a DWARF register REGNUM onto the appropriate GDB register
4129 number. */
4130
4131 static int
4132 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4133 {
4134 /* Core integer regs. */
4135 if (reg >= 0 && reg <= 15)
4136 return reg;
4137
4138 /* Legacy FPA encoding. These were once used in a way which
4139 overlapped with VFP register numbering, so their use is
4140 discouraged, but GDB doesn't support the ARM toolchain
4141 which used them for VFP. */
4142 if (reg >= 16 && reg <= 23)
4143 return ARM_F0_REGNUM + reg - 16;
4144
4145 /* New assignments for the FPA registers. */
4146 if (reg >= 96 && reg <= 103)
4147 return ARM_F0_REGNUM + reg - 96;
4148
4149 /* WMMX register assignments. */
4150 if (reg >= 104 && reg <= 111)
4151 return ARM_WCGR0_REGNUM + reg - 104;
4152
4153 if (reg >= 112 && reg <= 127)
4154 return ARM_WR0_REGNUM + reg - 112;
4155
4156 if (reg >= 192 && reg <= 199)
4157 return ARM_WC0_REGNUM + reg - 192;
4158
4159 /* VFP v2 registers. A double precision value is actually
4160 in d1 rather than s2, but the ABI only defines numbering
4161 for the single precision registers. This will "just work"
4162 in GDB for little endian targets (we'll read eight bytes,
4163 starting in s0 and then progressing to s1), but will be
4164 reversed on big endian targets with VFP. This won't
4165 be a problem for the new Neon quad registers; you're supposed
4166 to use DW_OP_piece for those. */
4167 if (reg >= 64 && reg <= 95)
4168 {
4169 char name_buf[4];
4170
4171 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4172 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4173 strlen (name_buf));
4174 }
4175
4176 /* VFP v3 / Neon registers. This range is also used for VFP v2
4177 registers, except that it now describes d0 instead of s0. */
4178 if (reg >= 256 && reg <= 287)
4179 {
4180 char name_buf[4];
4181
4182 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4183 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4184 strlen (name_buf));
4185 }
4186
4187 return -1;
4188 }
4189
4190 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4191 static int
4192 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4193 {
4194 int reg = regnum;
4195 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4196
4197 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4198 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4199
4200 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4201 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4202
4203 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4204 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4205
4206 if (reg < NUM_GREGS)
4207 return SIM_ARM_R0_REGNUM + reg;
4208 reg -= NUM_GREGS;
4209
4210 if (reg < NUM_FREGS)
4211 return SIM_ARM_FP0_REGNUM + reg;
4212 reg -= NUM_FREGS;
4213
4214 if (reg < NUM_SREGS)
4215 return SIM_ARM_FPS_REGNUM + reg;
4216 reg -= NUM_SREGS;
4217
4218 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4219 }
4220
4221 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4222 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4223 NULL if an error occurs. BUF is freed. */
4224
4225 static gdb_byte *
4226 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4227 int old_len, int new_len)
4228 {
4229 gdb_byte *new_buf;
4230 int bytes_to_read = new_len - old_len;
4231
4232 new_buf = (gdb_byte *) xmalloc (new_len);
4233 memcpy (new_buf + bytes_to_read, buf, old_len);
4234 xfree (buf);
4235 if (target_read_code (endaddr - new_len, new_buf, bytes_to_read) != 0)
4236 {
4237 xfree (new_buf);
4238 return NULL;
4239 }
4240 return new_buf;
4241 }
4242
4243 /* An IT block is at most the 2-byte IT instruction followed by
4244 four 4-byte instructions. The furthest back we must search to
4245 find an IT block that affects the current instruction is thus
4246 2 + 3 * 4 == 14 bytes. */
4247 #define MAX_IT_BLOCK_PREFIX 14
4248
4249 /* Use a quick scan if there are more than this many bytes of
4250 code. */
4251 #define IT_SCAN_THRESHOLD 32
4252
4253 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4254 A breakpoint in an IT block may not be hit, depending on the
4255 condition flags. */
4256 static CORE_ADDR
4257 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4258 {
4259 gdb_byte *buf;
4260 char map_type;
4261 CORE_ADDR boundary, func_start;
4262 int buf_len;
4263 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4264 int i, any, last_it, last_it_count;
4265
4266 /* If we are using BKPT breakpoints, none of this is necessary. */
4267 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4268 return bpaddr;
4269
4270 /* ARM mode does not have this problem. */
4271 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4272 return bpaddr;
4273
4274 /* We are setting a breakpoint in Thumb code that could potentially
4275 contain an IT block. The first step is to find how much Thumb
4276 code there is; we do not need to read outside of known Thumb
4277 sequences. */
4278 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4279 if (map_type == 0)
4280 /* Thumb-2 code must have mapping symbols to have a chance. */
4281 return bpaddr;
4282
4283 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4284
4285 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4286 && func_start > boundary)
4287 boundary = func_start;
4288
4289 /* Search for a candidate IT instruction. We have to do some fancy
4290 footwork to distinguish a real IT instruction from the second
4291 half of a 32-bit instruction, but there is no need for that if
4292 there's no candidate. */
4293 buf_len = std::min (bpaddr - boundary, (CORE_ADDR) MAX_IT_BLOCK_PREFIX);
4294 if (buf_len == 0)
4295 /* No room for an IT instruction. */
4296 return bpaddr;
4297
4298 buf = (gdb_byte *) xmalloc (buf_len);
4299 if (target_read_code (bpaddr - buf_len, buf, buf_len) != 0)
4300 return bpaddr;
4301 any = 0;
4302 for (i = 0; i < buf_len; i += 2)
4303 {
4304 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4305 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4306 {
4307 any = 1;
4308 break;
4309 }
4310 }
4311
4312 if (any == 0)
4313 {
4314 xfree (buf);
4315 return bpaddr;
4316 }
4317
4318 /* OK, the code bytes before this instruction contain at least one
4319 halfword which resembles an IT instruction. We know that it's
4320 Thumb code, but there are still two possibilities. Either the
4321 halfword really is an IT instruction, or it is the second half of
4322 a 32-bit Thumb instruction. The only way we can tell is to
4323 scan forwards from a known instruction boundary. */
4324 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4325 {
4326 int definite;
4327
4328 /* There's a lot of code before this instruction. Start with an
4329 optimistic search; it's easy to recognize halfwords that can
4330 not be the start of a 32-bit instruction, and use that to
4331 lock on to the instruction boundaries. */
4332 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4333 if (buf == NULL)
4334 return bpaddr;
4335 buf_len = IT_SCAN_THRESHOLD;
4336
4337 definite = 0;
4338 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4339 {
4340 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4341 if (thumb_insn_size (inst1) == 2)
4342 {
4343 definite = 1;
4344 break;
4345 }
4346 }
4347
4348 /* At this point, if DEFINITE, BUF[I] is the first place we
4349 are sure that we know the instruction boundaries, and it is far
4350 enough from BPADDR that we could not miss an IT instruction
4351 affecting BPADDR. If ! DEFINITE, give up - start from a
4352 known boundary. */
4353 if (! definite)
4354 {
4355 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
4356 bpaddr - boundary);
4357 if (buf == NULL)
4358 return bpaddr;
4359 buf_len = bpaddr - boundary;
4360 i = 0;
4361 }
4362 }
4363 else
4364 {
4365 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4366 if (buf == NULL)
4367 return bpaddr;
4368 buf_len = bpaddr - boundary;
4369 i = 0;
4370 }
4371
4372 /* Scan forwards. Find the last IT instruction before BPADDR. */
4373 last_it = -1;
4374 last_it_count = 0;
4375 while (i < buf_len)
4376 {
4377 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4378 last_it_count--;
4379 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4380 {
4381 last_it = i;
4382 if (inst1 & 0x0001)
4383 last_it_count = 4;
4384 else if (inst1 & 0x0002)
4385 last_it_count = 3;
4386 else if (inst1 & 0x0004)
4387 last_it_count = 2;
4388 else
4389 last_it_count = 1;
4390 }
4391 i += thumb_insn_size (inst1);
4392 }
4393
4394 xfree (buf);
4395
4396 if (last_it == -1)
4397 /* There wasn't really an IT instruction after all. */
4398 return bpaddr;
4399
4400 if (last_it_count < 1)
4401 /* It was too far away. */
4402 return bpaddr;
4403
4404 /* This really is a trouble spot. Move the breakpoint to the IT
4405 instruction. */
4406 return bpaddr - buf_len + last_it;
4407 }
4408
4409 /* ARM displaced stepping support.
4410
4411 Generally ARM displaced stepping works as follows:
4412
4413 1. When an instruction is to be single-stepped, it is first decoded by
4414 arm_process_displaced_insn. Depending on the type of instruction, it is
4415 then copied to a scratch location, possibly in a modified form. The
4416 copy_* set of functions performs such modification, as necessary. A
4417 breakpoint is placed after the modified instruction in the scratch space
4418 to return control to GDB. Note in particular that instructions which
4419 modify the PC will no longer do so after modification.
4420
4421 2. The instruction is single-stepped, by setting the PC to the scratch
4422 location address, and resuming. Control returns to GDB when the
4423 breakpoint is hit.
4424
4425 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4426 function used for the current instruction. This function's job is to
4427 put the CPU/memory state back to what it would have been if the
4428 instruction had been executed unmodified in its original location. */
4429
4430 /* NOP instruction (mov r0, r0). */
4431 #define ARM_NOP 0xe1a00000
4432 #define THUMB_NOP 0x4600
4433
4434 /* Helper for register reads for displaced stepping. In particular, this
4435 returns the PC as it would be seen by the instruction at its original
4436 location. */
4437
4438 ULONGEST
4439 displaced_read_reg (struct regcache *regs, arm_displaced_step_closure *dsc,
4440 int regno)
4441 {
4442 ULONGEST ret;
4443 CORE_ADDR from = dsc->insn_addr;
4444
4445 if (regno == ARM_PC_REGNUM)
4446 {
4447 /* Compute pipeline offset:
4448 - When executing an ARM instruction, PC reads as the address of the
4449 current instruction plus 8.
4450 - When executing a Thumb instruction, PC reads as the address of the
4451 current instruction plus 4. */
4452
4453 if (!dsc->is_thumb)
4454 from += 8;
4455 else
4456 from += 4;
4457
4458 displaced_debug_printf ("read pc value %.8lx",
4459 (unsigned long) from);
4460 return (ULONGEST) from;
4461 }
4462 else
4463 {
4464 regcache_cooked_read_unsigned (regs, regno, &ret);
4465
4466 displaced_debug_printf ("read r%d value %.8lx",
4467 regno, (unsigned long) ret);
4468
4469 return ret;
4470 }
4471 }
4472
4473 static int
4474 displaced_in_arm_mode (struct regcache *regs)
4475 {
4476 ULONGEST ps;
4477 ULONGEST t_bit = arm_psr_thumb_bit (regs->arch ());
4478
4479 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4480
4481 return (ps & t_bit) == 0;
4482 }
4483
4484 /* Write to the PC as from a branch instruction. */
4485
4486 static void
4487 branch_write_pc (struct regcache *regs, arm_displaced_step_closure *dsc,
4488 ULONGEST val)
4489 {
4490 if (!dsc->is_thumb)
4491 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4492 architecture versions < 6. */
4493 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4494 val & ~(ULONGEST) 0x3);
4495 else
4496 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4497 val & ~(ULONGEST) 0x1);
4498 }
4499
4500 /* Write to the PC as from a branch-exchange instruction. */
4501
4502 static void
4503 bx_write_pc (struct regcache *regs, ULONGEST val)
4504 {
4505 ULONGEST ps;
4506 ULONGEST t_bit = arm_psr_thumb_bit (regs->arch ());
4507
4508 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4509
4510 if ((val & 1) == 1)
4511 {
4512 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4513 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4514 }
4515 else if ((val & 2) == 0)
4516 {
4517 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4518 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4519 }
4520 else
4521 {
4522 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4523 mode, align dest to 4 bytes). */
4524 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4525 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4526 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4527 }
4528 }
4529
4530 /* Write to the PC as if from a load instruction. */
4531
4532 static void
4533 load_write_pc (struct regcache *regs, arm_displaced_step_closure *dsc,
4534 ULONGEST val)
4535 {
4536 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4537 bx_write_pc (regs, val);
4538 else
4539 branch_write_pc (regs, dsc, val);
4540 }
4541
4542 /* Write to the PC as if from an ALU instruction. */
4543
4544 static void
4545 alu_write_pc (struct regcache *regs, arm_displaced_step_closure *dsc,
4546 ULONGEST val)
4547 {
4548 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
4549 bx_write_pc (regs, val);
4550 else
4551 branch_write_pc (regs, dsc, val);
4552 }
4553
4554 /* Helper for writing to registers for displaced stepping. Writing to the PC
4555 has a varying effects depending on the instruction which does the write:
4556 this is controlled by the WRITE_PC argument. */
4557
4558 void
4559 displaced_write_reg (struct regcache *regs, arm_displaced_step_closure *dsc,
4560 int regno, ULONGEST val, enum pc_write_style write_pc)
4561 {
4562 if (regno == ARM_PC_REGNUM)
4563 {
4564 displaced_debug_printf ("writing pc %.8lx", (unsigned long) val);
4565
4566 switch (write_pc)
4567 {
4568 case BRANCH_WRITE_PC:
4569 branch_write_pc (regs, dsc, val);
4570 break;
4571
4572 case BX_WRITE_PC:
4573 bx_write_pc (regs, val);
4574 break;
4575
4576 case LOAD_WRITE_PC:
4577 load_write_pc (regs, dsc, val);
4578 break;
4579
4580 case ALU_WRITE_PC:
4581 alu_write_pc (regs, dsc, val);
4582 break;
4583
4584 case CANNOT_WRITE_PC:
4585 warning (_("Instruction wrote to PC in an unexpected way when "
4586 "single-stepping"));
4587 break;
4588
4589 default:
4590 internal_error (__FILE__, __LINE__,
4591 _("Invalid argument to displaced_write_reg"));
4592 }
4593
4594 dsc->wrote_to_pc = 1;
4595 }
4596 else
4597 {
4598 displaced_debug_printf ("writing r%d value %.8lx",
4599 regno, (unsigned long) val);
4600 regcache_cooked_write_unsigned (regs, regno, val);
4601 }
4602 }
4603
4604 /* This function is used to concisely determine if an instruction INSN
4605 references PC. Register fields of interest in INSN should have the
4606 corresponding fields of BITMASK set to 0b1111. The function
4607 returns return 1 if any of these fields in INSN reference the PC
4608 (also 0b1111, r15), else it returns 0. */
4609
4610 static int
4611 insn_references_pc (uint32_t insn, uint32_t bitmask)
4612 {
4613 uint32_t lowbit = 1;
4614
4615 while (bitmask != 0)
4616 {
4617 uint32_t mask;
4618
4619 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4620 ;
4621
4622 if (!lowbit)
4623 break;
4624
4625 mask = lowbit * 0xf;
4626
4627 if ((insn & mask) == mask)
4628 return 1;
4629
4630 bitmask &= ~mask;
4631 }
4632
4633 return 0;
4634 }
4635
4636 /* The simplest copy function. Many instructions have the same effect no
4637 matter what address they are executed at: in those cases, use this. */
4638
4639 static int
4640 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4641 const char *iname, arm_displaced_step_closure *dsc)
4642 {
4643 displaced_debug_printf ("copying insn %.8lx, opcode/class '%s' unmodified",
4644 (unsigned long) insn, iname);
4645
4646 dsc->modinsn[0] = insn;
4647
4648 return 0;
4649 }
4650
4651 static int
4652 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
4653 uint16_t insn2, const char *iname,
4654 arm_displaced_step_closure *dsc)
4655 {
4656 displaced_debug_printf ("copying insn %.4x %.4x, opcode/class '%s' "
4657 "unmodified", insn1, insn2, iname);
4658
4659 dsc->modinsn[0] = insn1;
4660 dsc->modinsn[1] = insn2;
4661 dsc->numinsns = 2;
4662
4663 return 0;
4664 }
4665
4666 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
4667 modification. */
4668 static int
4669 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, uint16_t insn,
4670 const char *iname,
4671 arm_displaced_step_closure *dsc)
4672 {
4673 displaced_debug_printf ("copying insn %.4x, opcode/class '%s' unmodified",
4674 insn, iname);
4675
4676 dsc->modinsn[0] = insn;
4677
4678 return 0;
4679 }
4680
4681 /* Preload instructions with immediate offset. */
4682
4683 static void
4684 cleanup_preload (struct gdbarch *gdbarch,
4685 struct regcache *regs, arm_displaced_step_closure *dsc)
4686 {
4687 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4688 if (!dsc->u.preload.immed)
4689 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4690 }
4691
4692 static void
4693 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
4694 arm_displaced_step_closure *dsc, unsigned int rn)
4695 {
4696 ULONGEST rn_val;
4697 /* Preload instructions:
4698
4699 {pli/pld} [rn, #+/-imm]
4700 ->
4701 {pli/pld} [r0, #+/-imm]. */
4702
4703 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4704 rn_val = displaced_read_reg (regs, dsc, rn);
4705 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4706 dsc->u.preload.immed = 1;
4707
4708 dsc->cleanup = &cleanup_preload;
4709 }
4710
4711 static int
4712 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4713 arm_displaced_step_closure *dsc)
4714 {
4715 unsigned int rn = bits (insn, 16, 19);
4716
4717 if (!insn_references_pc (insn, 0x000f0000ul))
4718 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
4719
4720 displaced_debug_printf ("copying preload insn %.8lx", (unsigned long) insn);
4721
4722 dsc->modinsn[0] = insn & 0xfff0ffff;
4723
4724 install_preload (gdbarch, regs, dsc, rn);
4725
4726 return 0;
4727 }
4728
4729 static int
4730 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
4731 struct regcache *regs, arm_displaced_step_closure *dsc)
4732 {
4733 unsigned int rn = bits (insn1, 0, 3);
4734 unsigned int u_bit = bit (insn1, 7);
4735 int imm12 = bits (insn2, 0, 11);
4736 ULONGEST pc_val;
4737
4738 if (rn != ARM_PC_REGNUM)
4739 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
4740
4741 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
4742 PLD (literal) Encoding T1. */
4743 displaced_debug_printf ("copying pld/pli pc (0x%x) %c imm12 %.4x",
4744 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
4745 imm12);
4746
4747 if (!u_bit)
4748 imm12 = -1 * imm12;
4749
4750 /* Rewrite instruction {pli/pld} PC imm12 into:
4751 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
4752
4753 {pli/pld} [r0, r1]
4754
4755 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
4756
4757 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4758 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
4759
4760 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
4761
4762 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
4763 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
4764 dsc->u.preload.immed = 0;
4765
4766 /* {pli/pld} [r0, r1] */
4767 dsc->modinsn[0] = insn1 & 0xfff0;
4768 dsc->modinsn[1] = 0xf001;
4769 dsc->numinsns = 2;
4770
4771 dsc->cleanup = &cleanup_preload;
4772 return 0;
4773 }
4774
4775 /* Preload instructions with register offset. */
4776
4777 static void
4778 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
4779 arm_displaced_step_closure *dsc, unsigned int rn,
4780 unsigned int rm)
4781 {
4782 ULONGEST rn_val, rm_val;
4783
4784 /* Preload register-offset instructions:
4785
4786 {pli/pld} [rn, rm {, shift}]
4787 ->
4788 {pli/pld} [r0, r1 {, shift}]. */
4789
4790 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4791 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
4792 rn_val = displaced_read_reg (regs, dsc, rn);
4793 rm_val = displaced_read_reg (regs, dsc, rm);
4794 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4795 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4796 dsc->u.preload.immed = 0;
4797
4798 dsc->cleanup = &cleanup_preload;
4799 }
4800
4801 static int
4802 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
4803 struct regcache *regs,
4804 arm_displaced_step_closure *dsc)
4805 {
4806 unsigned int rn = bits (insn, 16, 19);
4807 unsigned int rm = bits (insn, 0, 3);
4808
4809
4810 if (!insn_references_pc (insn, 0x000f000ful))
4811 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
4812
4813 displaced_debug_printf ("copying preload insn %.8lx",
4814 (unsigned long) insn);
4815
4816 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4817
4818 install_preload_reg (gdbarch, regs, dsc, rn, rm);
4819 return 0;
4820 }
4821
4822 /* Copy/cleanup coprocessor load and store instructions. */
4823
4824 static void
4825 cleanup_copro_load_store (struct gdbarch *gdbarch,
4826 struct regcache *regs,
4827 arm_displaced_step_closure *dsc)
4828 {
4829 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
4830
4831 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4832
4833 if (dsc->u.ldst.writeback)
4834 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4835 }
4836
4837 static void
4838 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
4839 arm_displaced_step_closure *dsc,
4840 int writeback, unsigned int rn)
4841 {
4842 ULONGEST rn_val;
4843
4844 /* Coprocessor load/store instructions:
4845
4846 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4847 ->
4848 {stc/stc2} [r0, #+/-imm].
4849
4850 ldc/ldc2 are handled identically. */
4851
4852 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4853 rn_val = displaced_read_reg (regs, dsc, rn);
4854 /* PC should be 4-byte aligned. */
4855 rn_val = rn_val & 0xfffffffc;
4856 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4857
4858 dsc->u.ldst.writeback = writeback;
4859 dsc->u.ldst.rn = rn;
4860
4861 dsc->cleanup = &cleanup_copro_load_store;
4862 }
4863
4864 static int
4865 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4866 struct regcache *regs,
4867 arm_displaced_step_closure *dsc)
4868 {
4869 unsigned int rn = bits (insn, 16, 19);
4870
4871 if (!insn_references_pc (insn, 0x000f0000ul))
4872 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4873
4874 displaced_debug_printf ("copying coprocessor load/store insn %.8lx",
4875 (unsigned long) insn);
4876
4877 dsc->modinsn[0] = insn & 0xfff0ffff;
4878
4879 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
4880
4881 return 0;
4882 }
4883
4884 static int
4885 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
4886 uint16_t insn2, struct regcache *regs,
4887 arm_displaced_step_closure *dsc)
4888 {
4889 unsigned int rn = bits (insn1, 0, 3);
4890
4891 if (rn != ARM_PC_REGNUM)
4892 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
4893 "copro load/store", dsc);
4894
4895 displaced_debug_printf ("copying coprocessor load/store insn %.4x%.4x",
4896 insn1, insn2);
4897
4898 dsc->modinsn[0] = insn1 & 0xfff0;
4899 dsc->modinsn[1] = insn2;
4900 dsc->numinsns = 2;
4901
4902 /* This function is called for copying instruction LDC/LDC2/VLDR, which
4903 doesn't support writeback, so pass 0. */
4904 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
4905
4906 return 0;
4907 }
4908
4909 /* Clean up branch instructions (actually perform the branch, by setting
4910 PC). */
4911
4912 static void
4913 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4914 arm_displaced_step_closure *dsc)
4915 {
4916 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
4917 int branch_taken = condition_true (dsc->u.branch.cond, status);
4918 enum pc_write_style write_pc = dsc->u.branch.exchange
4919 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4920
4921 if (!branch_taken)
4922 return;
4923
4924 if (dsc->u.branch.link)
4925 {
4926 /* The value of LR should be the next insn of current one. In order
4927 not to confuse logic handling later insn `bx lr', if current insn mode
4928 is Thumb, the bit 0 of LR value should be set to 1. */
4929 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
4930
4931 if (dsc->is_thumb)
4932 next_insn_addr |= 0x1;
4933
4934 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
4935 CANNOT_WRITE_PC);
4936 }
4937
4938 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
4939 }
4940
4941 /* Copy B/BL/BLX instructions with immediate destinations. */
4942
4943 static void
4944 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
4945 arm_displaced_step_closure *dsc,
4946 unsigned int cond, int exchange, int link, long offset)
4947 {
4948 /* Implement "BL<cond> <label>" as:
4949
4950 Preparation: cond <- instruction condition
4951 Insn: mov r0, r0 (nop)
4952 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4953
4954 B<cond> similar, but don't set r14 in cleanup. */
4955
4956 dsc->u.branch.cond = cond;
4957 dsc->u.branch.link = link;
4958 dsc->u.branch.exchange = exchange;
4959
4960 dsc->u.branch.dest = dsc->insn_addr;
4961 if (link && exchange)
4962 /* For BLX, offset is computed from the Align (PC, 4). */
4963 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
4964
4965 if (dsc->is_thumb)
4966 dsc->u.branch.dest += 4 + offset;
4967 else
4968 dsc->u.branch.dest += 8 + offset;
4969
4970 dsc->cleanup = &cleanup_branch;
4971 }
4972 static int
4973 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4974 struct regcache *regs, arm_displaced_step_closure *dsc)
4975 {
4976 unsigned int cond = bits (insn, 28, 31);
4977 int exchange = (cond == 0xf);
4978 int link = exchange || bit (insn, 24);
4979 long offset;
4980
4981 displaced_debug_printf ("copying %s immediate insn %.8lx",
4982 (exchange) ? "blx" : (link) ? "bl" : "b",
4983 (unsigned long) insn);
4984 if (exchange)
4985 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4986 then arrange the switch into Thumb mode. */
4987 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4988 else
4989 offset = bits (insn, 0, 23) << 2;
4990
4991 if (bit (offset, 25))
4992 offset = offset | ~0x3ffffff;
4993
4994 dsc->modinsn[0] = ARM_NOP;
4995
4996 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
4997 return 0;
4998 }
4999
5000 static int
5001 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5002 uint16_t insn2, struct regcache *regs,
5003 arm_displaced_step_closure *dsc)
5004 {
5005 int link = bit (insn2, 14);
5006 int exchange = link && !bit (insn2, 12);
5007 int cond = INST_AL;
5008 long offset = 0;
5009 int j1 = bit (insn2, 13);
5010 int j2 = bit (insn2, 11);
5011 int s = sbits (insn1, 10, 10);
5012 int i1 = !(j1 ^ bit (insn1, 10));
5013 int i2 = !(j2 ^ bit (insn1, 10));
5014
5015 if (!link && !exchange) /* B */
5016 {
5017 offset = (bits (insn2, 0, 10) << 1);
5018 if (bit (insn2, 12)) /* Encoding T4 */
5019 {
5020 offset |= (bits (insn1, 0, 9) << 12)
5021 | (i2 << 22)
5022 | (i1 << 23)
5023 | (s << 24);
5024 cond = INST_AL;
5025 }
5026 else /* Encoding T3 */
5027 {
5028 offset |= (bits (insn1, 0, 5) << 12)
5029 | (j1 << 18)
5030 | (j2 << 19)
5031 | (s << 20);
5032 cond = bits (insn1, 6, 9);
5033 }
5034 }
5035 else
5036 {
5037 offset = (bits (insn1, 0, 9) << 12);
5038 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5039 offset |= exchange ?
5040 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5041 }
5042
5043 displaced_debug_printf ("copying %s insn %.4x %.4x with offset %.8lx",
5044 link ? (exchange) ? "blx" : "bl" : "b",
5045 insn1, insn2, offset);
5046
5047 dsc->modinsn[0] = THUMB_NOP;
5048
5049 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5050 return 0;
5051 }
5052
5053 /* Copy B Thumb instructions. */
5054 static int
5055 thumb_copy_b (struct gdbarch *gdbarch, uint16_t insn,
5056 arm_displaced_step_closure *dsc)
5057 {
5058 unsigned int cond = 0;
5059 int offset = 0;
5060 unsigned short bit_12_15 = bits (insn, 12, 15);
5061 CORE_ADDR from = dsc->insn_addr;
5062
5063 if (bit_12_15 == 0xd)
5064 {
5065 /* offset = SignExtend (imm8:0, 32) */
5066 offset = sbits ((insn << 1), 0, 8);
5067 cond = bits (insn, 8, 11);
5068 }
5069 else if (bit_12_15 == 0xe) /* Encoding T2 */
5070 {
5071 offset = sbits ((insn << 1), 0, 11);
5072 cond = INST_AL;
5073 }
5074
5075 displaced_debug_printf ("copying b immediate insn %.4x with offset %d",
5076 insn, offset);
5077
5078 dsc->u.branch.cond = cond;
5079 dsc->u.branch.link = 0;
5080 dsc->u.branch.exchange = 0;
5081 dsc->u.branch.dest = from + 4 + offset;
5082
5083 dsc->modinsn[0] = THUMB_NOP;
5084
5085 dsc->cleanup = &cleanup_branch;
5086
5087 return 0;
5088 }
5089
5090 /* Copy BX/BLX with register-specified destinations. */
5091
5092 static void
5093 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5094 arm_displaced_step_closure *dsc, int link,
5095 unsigned int cond, unsigned int rm)
5096 {
5097 /* Implement {BX,BLX}<cond> <reg>" as:
5098
5099 Preparation: cond <- instruction condition
5100 Insn: mov r0, r0 (nop)
5101 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5102
5103 Don't set r14 in cleanup for BX. */
5104
5105 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5106
5107 dsc->u.branch.cond = cond;
5108 dsc->u.branch.link = link;
5109
5110 dsc->u.branch.exchange = 1;
5111
5112 dsc->cleanup = &cleanup_branch;
5113 }
5114
5115 static int
5116 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5117 struct regcache *regs, arm_displaced_step_closure *dsc)
5118 {
5119 unsigned int cond = bits (insn, 28, 31);
5120 /* BX: x12xxx1x
5121 BLX: x12xxx3x. */
5122 int link = bit (insn, 5);
5123 unsigned int rm = bits (insn, 0, 3);
5124
5125 displaced_debug_printf ("copying insn %.8lx", (unsigned long) insn);
5126
5127 dsc->modinsn[0] = ARM_NOP;
5128
5129 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5130 return 0;
5131 }
5132
5133 static int
5134 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
5135 struct regcache *regs,
5136 arm_displaced_step_closure *dsc)
5137 {
5138 int link = bit (insn, 7);
5139 unsigned int rm = bits (insn, 3, 6);
5140
5141 displaced_debug_printf ("copying insn %.4x", (unsigned short) insn);
5142
5143 dsc->modinsn[0] = THUMB_NOP;
5144
5145 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
5146
5147 return 0;
5148 }
5149
5150
5151 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5152
5153 static void
5154 cleanup_alu_imm (struct gdbarch *gdbarch,
5155 struct regcache *regs, arm_displaced_step_closure *dsc)
5156 {
5157 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5158 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5159 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5160 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5161 }
5162
5163 static int
5164 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5165 arm_displaced_step_closure *dsc)
5166 {
5167 unsigned int rn = bits (insn, 16, 19);
5168 unsigned int rd = bits (insn, 12, 15);
5169 unsigned int op = bits (insn, 21, 24);
5170 int is_mov = (op == 0xd);
5171 ULONGEST rd_val, rn_val;
5172
5173 if (!insn_references_pc (insn, 0x000ff000ul))
5174 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5175
5176 displaced_debug_printf ("copying immediate %s insn %.8lx",
5177 is_mov ? "move" : "ALU",
5178 (unsigned long) insn);
5179
5180 /* Instruction is of form:
5181
5182 <op><cond> rd, [rn,] #imm
5183
5184 Rewrite as:
5185
5186 Preparation: tmp1, tmp2 <- r0, r1;
5187 r0, r1 <- rd, rn
5188 Insn: <op><cond> r0, r1, #imm
5189 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5190 */
5191
5192 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5193 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5194 rn_val = displaced_read_reg (regs, dsc, rn);
5195 rd_val = displaced_read_reg (regs, dsc, rd);
5196 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5197 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5198 dsc->rd = rd;
5199
5200 if (is_mov)
5201 dsc->modinsn[0] = insn & 0xfff00fff;
5202 else
5203 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5204
5205 dsc->cleanup = &cleanup_alu_imm;
5206
5207 return 0;
5208 }
5209
5210 static int
5211 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
5212 uint16_t insn2, struct regcache *regs,
5213 arm_displaced_step_closure *dsc)
5214 {
5215 unsigned int op = bits (insn1, 5, 8);
5216 unsigned int rn, rm, rd;
5217 ULONGEST rd_val, rn_val;
5218
5219 rn = bits (insn1, 0, 3); /* Rn */
5220 rm = bits (insn2, 0, 3); /* Rm */
5221 rd = bits (insn2, 8, 11); /* Rd */
5222
5223 /* This routine is only called for instruction MOV. */
5224 gdb_assert (op == 0x2 && rn == 0xf);
5225
5226 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
5227 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
5228
5229 displaced_debug_printf ("copying reg %s insn %.4x%.4x", "ALU", insn1, insn2);
5230
5231 /* Instruction is of form:
5232
5233 <op><cond> rd, [rn,] #imm
5234
5235 Rewrite as:
5236
5237 Preparation: tmp1, tmp2 <- r0, r1;
5238 r0, r1 <- rd, rn
5239 Insn: <op><cond> r0, r1, #imm
5240 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5241 */
5242
5243 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5244 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5245 rn_val = displaced_read_reg (regs, dsc, rn);
5246 rd_val = displaced_read_reg (regs, dsc, rd);
5247 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5248 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5249 dsc->rd = rd;
5250
5251 dsc->modinsn[0] = insn1;
5252 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
5253 dsc->numinsns = 2;
5254
5255 dsc->cleanup = &cleanup_alu_imm;
5256
5257 return 0;
5258 }
5259
5260 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5261
5262 static void
5263 cleanup_alu_reg (struct gdbarch *gdbarch,
5264 struct regcache *regs, arm_displaced_step_closure *dsc)
5265 {
5266 ULONGEST rd_val;
5267 int i;
5268
5269 rd_val = displaced_read_reg (regs, dsc, 0);
5270
5271 for (i = 0; i < 3; i++)
5272 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5273
5274 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5275 }
5276
5277 static void
5278 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5279 arm_displaced_step_closure *dsc,
5280 unsigned int rd, unsigned int rn, unsigned int rm)
5281 {
5282 ULONGEST rd_val, rn_val, rm_val;
5283
5284 /* Instruction is of form:
5285
5286 <op><cond> rd, [rn,] rm [, <shift>]
5287
5288 Rewrite as:
5289
5290 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5291 r0, r1, r2 <- rd, rn, rm
5292 Insn: <op><cond> r0, [r1,] r2 [, <shift>]
5293 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5294 */
5295
5296 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5297 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5298 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5299 rd_val = displaced_read_reg (regs, dsc, rd);
5300 rn_val = displaced_read_reg (regs, dsc, rn);
5301 rm_val = displaced_read_reg (regs, dsc, rm);
5302 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5303 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5304 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5305 dsc->rd = rd;
5306
5307 dsc->cleanup = &cleanup_alu_reg;
5308 }
5309
5310 static int
5311 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5312 arm_displaced_step_closure *dsc)
5313 {
5314 unsigned int op = bits (insn, 21, 24);
5315 int is_mov = (op == 0xd);
5316
5317 if (!insn_references_pc (insn, 0x000ff00ful))
5318 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5319
5320 displaced_debug_printf ("copying reg %s insn %.8lx",
5321 is_mov ? "move" : "ALU", (unsigned long) insn);
5322
5323 if (is_mov)
5324 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5325 else
5326 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5327
5328 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
5329 bits (insn, 0, 3));
5330 return 0;
5331 }
5332
5333 static int
5334 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
5335 struct regcache *regs,
5336 arm_displaced_step_closure *dsc)
5337 {
5338 unsigned rm, rd;
5339
5340 rm = bits (insn, 3, 6);
5341 rd = (bit (insn, 7) << 3) | bits (insn, 0, 2);
5342
5343 if (rd != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
5344 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
5345
5346 displaced_debug_printf ("copying ALU reg insn %.4x", (unsigned short) insn);
5347
5348 dsc->modinsn[0] = ((insn & 0xff00) | 0x10);
5349
5350 install_alu_reg (gdbarch, regs, dsc, rd, rd, rm);
5351
5352 return 0;
5353 }
5354
5355 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5356
5357 static void
5358 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5359 struct regcache *regs,
5360 arm_displaced_step_closure *dsc)
5361 {
5362 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5363 int i;
5364
5365 for (i = 0; i < 4; i++)
5366 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5367
5368 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5369 }
5370
5371 static void
5372 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
5373 arm_displaced_step_closure *dsc,
5374 unsigned int rd, unsigned int rn, unsigned int rm,
5375 unsigned rs)
5376 {
5377 int i;
5378 ULONGEST rd_val, rn_val, rm_val, rs_val;
5379
5380 /* Instruction is of form:
5381
5382 <op><cond> rd, [rn,] rm, <shift> rs
5383
5384 Rewrite as:
5385
5386 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5387 r0, r1, r2, r3 <- rd, rn, rm, rs
5388 Insn: <op><cond> r0, r1, r2, <shift> r3
5389 Cleanup: tmp5 <- r0
5390 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5391 rd <- tmp5
5392 */
5393
5394 for (i = 0; i < 4; i++)
5395 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5396
5397 rd_val = displaced_read_reg (regs, dsc, rd);
5398 rn_val = displaced_read_reg (regs, dsc, rn);
5399 rm_val = displaced_read_reg (regs, dsc, rm);
5400 rs_val = displaced_read_reg (regs, dsc, rs);
5401 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5402 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5403 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5404 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5405 dsc->rd = rd;
5406 dsc->cleanup = &cleanup_alu_shifted_reg;
5407 }
5408
5409 static int
5410 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5411 struct regcache *regs,
5412 arm_displaced_step_closure *dsc)
5413 {
5414 unsigned int op = bits (insn, 21, 24);
5415 int is_mov = (op == 0xd);
5416 unsigned int rd, rn, rm, rs;
5417
5418 if (!insn_references_pc (insn, 0x000fff0ful))
5419 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5420
5421 displaced_debug_printf ("copying shifted reg %s insn %.8lx",
5422 is_mov ? "move" : "ALU",
5423 (unsigned long) insn);
5424
5425 rn = bits (insn, 16, 19);
5426 rm = bits (insn, 0, 3);
5427 rs = bits (insn, 8, 11);
5428 rd = bits (insn, 12, 15);
5429
5430 if (is_mov)
5431 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5432 else
5433 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5434
5435 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
5436
5437 return 0;
5438 }
5439
5440 /* Clean up load instructions. */
5441
5442 static void
5443 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5444 arm_displaced_step_closure *dsc)
5445 {
5446 ULONGEST rt_val, rt_val2 = 0, rn_val;
5447
5448 rt_val = displaced_read_reg (regs, dsc, 0);
5449 if (dsc->u.ldst.xfersize == 8)
5450 rt_val2 = displaced_read_reg (regs, dsc, 1);
5451 rn_val = displaced_read_reg (regs, dsc, 2);
5452
5453 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5454 if (dsc->u.ldst.xfersize > 4)
5455 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5456 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5457 if (!dsc->u.ldst.immed)
5458 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5459
5460 /* Handle register writeback. */
5461 if (dsc->u.ldst.writeback)
5462 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5463 /* Put result in right place. */
5464 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5465 if (dsc->u.ldst.xfersize == 8)
5466 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5467 }
5468
5469 /* Clean up store instructions. */
5470
5471 static void
5472 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5473 arm_displaced_step_closure *dsc)
5474 {
5475 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5476
5477 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5478 if (dsc->u.ldst.xfersize > 4)
5479 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5480 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5481 if (!dsc->u.ldst.immed)
5482 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5483 if (!dsc->u.ldst.restore_r4)
5484 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5485
5486 /* Writeback. */
5487 if (dsc->u.ldst.writeback)
5488 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5489 }
5490
5491 /* Copy "extra" load/store instructions. These are halfword/doubleword
5492 transfers, which have a different encoding to byte/word transfers. */
5493
5494 static int
5495 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unprivileged,
5496 struct regcache *regs, arm_displaced_step_closure *dsc)
5497 {
5498 unsigned int op1 = bits (insn, 20, 24);
5499 unsigned int op2 = bits (insn, 5, 6);
5500 unsigned int rt = bits (insn, 12, 15);
5501 unsigned int rn = bits (insn, 16, 19);
5502 unsigned int rm = bits (insn, 0, 3);
5503 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5504 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5505 int immed = (op1 & 0x4) != 0;
5506 int opcode;
5507 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5508
5509 if (!insn_references_pc (insn, 0x000ff00ful))
5510 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5511
5512 displaced_debug_printf ("copying %sextra load/store insn %.8lx",
5513 unprivileged ? "unprivileged " : "",
5514 (unsigned long) insn);
5515
5516 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5517
5518 if (opcode < 0)
5519 internal_error (__FILE__, __LINE__,
5520 _("copy_extra_ld_st: instruction decode error"));
5521
5522 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5523 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5524 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5525 if (!immed)
5526 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5527
5528 rt_val = displaced_read_reg (regs, dsc, rt);
5529 if (bytesize[opcode] == 8)
5530 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5531 rn_val = displaced_read_reg (regs, dsc, rn);
5532 if (!immed)
5533 rm_val = displaced_read_reg (regs, dsc, rm);
5534
5535 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5536 if (bytesize[opcode] == 8)
5537 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5538 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5539 if (!immed)
5540 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5541
5542 dsc->rd = rt;
5543 dsc->u.ldst.xfersize = bytesize[opcode];
5544 dsc->u.ldst.rn = rn;
5545 dsc->u.ldst.immed = immed;
5546 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5547 dsc->u.ldst.restore_r4 = 0;
5548
5549 if (immed)
5550 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5551 ->
5552 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5553 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5554 else
5555 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5556 ->
5557 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5558 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5559
5560 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5561
5562 return 0;
5563 }
5564
5565 /* Copy byte/half word/word loads and stores. */
5566
5567 static void
5568 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5569 arm_displaced_step_closure *dsc, int load,
5570 int immed, int writeback, int size, int usermode,
5571 int rt, int rm, int rn)
5572 {
5573 ULONGEST rt_val, rn_val, rm_val = 0;
5574
5575 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5576 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5577 if (!immed)
5578 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5579 if (!load)
5580 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
5581
5582 rt_val = displaced_read_reg (regs, dsc, rt);
5583 rn_val = displaced_read_reg (regs, dsc, rn);
5584 if (!immed)
5585 rm_val = displaced_read_reg (regs, dsc, rm);
5586
5587 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5588 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5589 if (!immed)
5590 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5591 dsc->rd = rt;
5592 dsc->u.ldst.xfersize = size;
5593 dsc->u.ldst.rn = rn;
5594 dsc->u.ldst.immed = immed;
5595 dsc->u.ldst.writeback = writeback;
5596
5597 /* To write PC we can do:
5598
5599 Before this sequence of instructions:
5600 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5601 r2 is the Rn value got from displaced_read_reg.
5602
5603 Insn1: push {pc} Write address of STR instruction + offset on stack
5604 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5605 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5606 = addr(Insn1) + offset - addr(Insn3) - 8
5607 = offset - 16
5608 Insn4: add r4, r4, #8 r4 = offset - 8
5609 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5610 = from + offset
5611 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5612
5613 Otherwise we don't know what value to write for PC, since the offset is
5614 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5615 of this can be found in Section "Saving from r15" in
5616 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5617
5618 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5619 }
5620
5621
5622 static int
5623 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
5624 uint16_t insn2, struct regcache *regs,
5625 arm_displaced_step_closure *dsc, int size)
5626 {
5627 unsigned int u_bit = bit (insn1, 7);
5628 unsigned int rt = bits (insn2, 12, 15);
5629 int imm12 = bits (insn2, 0, 11);
5630 ULONGEST pc_val;
5631
5632 displaced_debug_printf ("copying ldr pc (0x%x) R%d %c imm12 %.4x",
5633 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
5634 imm12);
5635
5636 if (!u_bit)
5637 imm12 = -1 * imm12;
5638
5639 /* Rewrite instruction LDR Rt imm12 into:
5640
5641 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
5642
5643 LDR R0, R2, R3,
5644
5645 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
5646
5647
5648 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5649 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5650 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5651
5652 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5653
5654 pc_val = pc_val & 0xfffffffc;
5655
5656 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
5657 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
5658
5659 dsc->rd = rt;
5660
5661 dsc->u.ldst.xfersize = size;
5662 dsc->u.ldst.immed = 0;
5663 dsc->u.ldst.writeback = 0;
5664 dsc->u.ldst.restore_r4 = 0;
5665
5666 /* LDR R0, R2, R3 */
5667 dsc->modinsn[0] = 0xf852;
5668 dsc->modinsn[1] = 0x3;
5669 dsc->numinsns = 2;
5670
5671 dsc->cleanup = &cleanup_load;
5672
5673 return 0;
5674 }
5675
5676 static int
5677 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
5678 uint16_t insn2, struct regcache *regs,
5679 arm_displaced_step_closure *dsc,
5680 int writeback, int immed)
5681 {
5682 unsigned int rt = bits (insn2, 12, 15);
5683 unsigned int rn = bits (insn1, 0, 3);
5684 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
5685 /* In LDR (register), there is also a register Rm, which is not allowed to
5686 be PC, so we don't have to check it. */
5687
5688 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
5689 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
5690 dsc);
5691
5692 displaced_debug_printf ("copying ldr r%d [r%d] insn %.4x%.4x",
5693 rt, rn, insn1, insn2);
5694
5695 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
5696 0, rt, rm, rn);
5697
5698 dsc->u.ldst.restore_r4 = 0;
5699
5700 if (immed)
5701 /* ldr[b]<cond> rt, [rn, #imm], etc.
5702 ->
5703 ldr[b]<cond> r0, [r2, #imm]. */
5704 {
5705 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
5706 dsc->modinsn[1] = insn2 & 0x0fff;
5707 }
5708 else
5709 /* ldr[b]<cond> rt, [rn, rm], etc.
5710 ->
5711 ldr[b]<cond> r0, [r2, r3]. */
5712 {
5713 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
5714 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
5715 }
5716
5717 dsc->numinsns = 2;
5718
5719 return 0;
5720 }
5721
5722
5723 static int
5724 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5725 struct regcache *regs,
5726 arm_displaced_step_closure *dsc,
5727 int load, int size, int usermode)
5728 {
5729 int immed = !bit (insn, 25);
5730 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
5731 unsigned int rt = bits (insn, 12, 15);
5732 unsigned int rn = bits (insn, 16, 19);
5733 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5734
5735 if (!insn_references_pc (insn, 0x000ff00ful))
5736 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
5737
5738 displaced_debug_printf ("copying %s%s r%d [r%d] insn %.8lx",
5739 load ? (size == 1 ? "ldrb" : "ldr")
5740 : (size == 1 ? "strb" : "str"),
5741 usermode ? "t" : "",
5742 rt, rn,
5743 (unsigned long) insn);
5744
5745 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
5746 usermode, rt, rm, rn);
5747
5748 if (load || rt != ARM_PC_REGNUM)
5749 {
5750 dsc->u.ldst.restore_r4 = 0;
5751
5752 if (immed)
5753 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5754 ->
5755 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5756 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5757 else
5758 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5759 ->
5760 {ldr,str}[b]<cond> r0, [r2, r3]. */
5761 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5762 }
5763 else
5764 {
5765 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5766 dsc->u.ldst.restore_r4 = 1;
5767 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5768 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5769 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5770 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5771 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5772
5773 /* As above. */
5774 if (immed)
5775 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5776 else
5777 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5778
5779 dsc->numinsns = 6;
5780 }
5781
5782 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5783
5784 return 0;
5785 }
5786
5787 /* Cleanup LDM instructions with fully-populated register list. This is an
5788 unfortunate corner case: it's impossible to implement correctly by modifying
5789 the instruction. The issue is as follows: we have an instruction,
5790
5791 ldm rN, {r0-r15}
5792
5793 which we must rewrite to avoid loading PC. A possible solution would be to
5794 do the load in two halves, something like (with suitable cleanup
5795 afterwards):
5796
5797 mov r8, rN
5798 ldm[id][ab] r8!, {r0-r7}
5799 str r7, <temp>
5800 ldm[id][ab] r8, {r7-r14}
5801 <bkpt>
5802
5803 but at present there's no suitable place for <temp>, since the scratch space
5804 is overwritten before the cleanup routine is called. For now, we simply
5805 emulate the instruction. */
5806
5807 static void
5808 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5809 arm_displaced_step_closure *dsc)
5810 {
5811 int inc = dsc->u.block.increment;
5812 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5813 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5814 uint32_t regmask = dsc->u.block.regmask;
5815 int regno = inc ? 0 : 15;
5816 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5817 int exception_return = dsc->u.block.load && dsc->u.block.user
5818 && (regmask & 0x8000) != 0;
5819 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5820 int do_transfer = condition_true (dsc->u.block.cond, status);
5821 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5822
5823 if (!do_transfer)
5824 return;
5825
5826 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5827 sensible we can do here. Complain loudly. */
5828 if (exception_return)
5829 error (_("Cannot single-step exception return"));
5830
5831 /* We don't handle any stores here for now. */
5832 gdb_assert (dsc->u.block.load != 0);
5833
5834 displaced_debug_printf ("emulating block transfer: %s %s %s",
5835 dsc->u.block.load ? "ldm" : "stm",
5836 dsc->u.block.increment ? "inc" : "dec",
5837 dsc->u.block.before ? "before" : "after");
5838
5839 while (regmask)
5840 {
5841 uint32_t memword;
5842
5843 if (inc)
5844 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
5845 regno++;
5846 else
5847 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5848 regno--;
5849
5850 xfer_addr += bump_before;
5851
5852 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5853 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5854
5855 xfer_addr += bump_after;
5856
5857 regmask &= ~(1 << regno);
5858 }
5859
5860 if (dsc->u.block.writeback)
5861 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5862 CANNOT_WRITE_PC);
5863 }
5864
5865 /* Clean up an STM which included the PC in the register list. */
5866
5867 static void
5868 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5869 arm_displaced_step_closure *dsc)
5870 {
5871 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5872 int store_executed = condition_true (dsc->u.block.cond, status);
5873 CORE_ADDR pc_stored_at, transferred_regs
5874 = count_one_bits (dsc->u.block.regmask);
5875 CORE_ADDR stm_insn_addr;
5876 uint32_t pc_val;
5877 long offset;
5878 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5879
5880 /* If condition code fails, there's nothing else to do. */
5881 if (!store_executed)
5882 return;
5883
5884 if (dsc->u.block.increment)
5885 {
5886 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5887
5888 if (dsc->u.block.before)
5889 pc_stored_at += 4;
5890 }
5891 else
5892 {
5893 pc_stored_at = dsc->u.block.xfer_addr;
5894
5895 if (dsc->u.block.before)
5896 pc_stored_at -= 4;
5897 }
5898
5899 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5900 stm_insn_addr = dsc->scratch_base;
5901 offset = pc_val - stm_insn_addr;
5902
5903 displaced_debug_printf ("detected PC offset %.8lx for STM instruction",
5904 offset);
5905
5906 /* Rewrite the stored PC to the proper value for the non-displaced original
5907 instruction. */
5908 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5909 dsc->insn_addr + offset);
5910 }
5911
5912 /* Clean up an LDM which includes the PC in the register list. We clumped all
5913 the registers in the transferred list into a contiguous range r0...rX (to
5914 avoid loading PC directly and losing control of the debugged program), so we
5915 must undo that here. */
5916
5917 static void
5918 cleanup_block_load_pc (struct gdbarch *gdbarch,
5919 struct regcache *regs,
5920 arm_displaced_step_closure *dsc)
5921 {
5922 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5923 int load_executed = condition_true (dsc->u.block.cond, status);
5924 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
5925 unsigned int regs_loaded = count_one_bits (mask);
5926 unsigned int num_to_shuffle = regs_loaded, clobbered;
5927
5928 /* The method employed here will fail if the register list is fully populated
5929 (we need to avoid loading PC directly). */
5930 gdb_assert (num_to_shuffle < 16);
5931
5932 if (!load_executed)
5933 return;
5934
5935 clobbered = (1 << num_to_shuffle) - 1;
5936
5937 while (num_to_shuffle > 0)
5938 {
5939 if ((mask & (1 << write_reg)) != 0)
5940 {
5941 unsigned int read_reg = num_to_shuffle - 1;
5942
5943 if (read_reg != write_reg)
5944 {
5945 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
5946 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5947 displaced_debug_printf ("LDM: move loaded register r%d to r%d",
5948 read_reg, write_reg);
5949 }
5950 else
5951 displaced_debug_printf ("LDM: register r%d already in the right "
5952 "place", write_reg);
5953
5954 clobbered &= ~(1 << write_reg);
5955
5956 num_to_shuffle--;
5957 }
5958
5959 write_reg--;
5960 }
5961
5962 /* Restore any registers we scribbled over. */
5963 for (write_reg = 0; clobbered != 0; write_reg++)
5964 {
5965 if ((clobbered & (1 << write_reg)) != 0)
5966 {
5967 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5968 CANNOT_WRITE_PC);
5969 displaced_debug_printf ("LDM: restored clobbered register r%d",
5970 write_reg);
5971 clobbered &= ~(1 << write_reg);
5972 }
5973 }
5974
5975 /* Perform register writeback manually. */
5976 if (dsc->u.block.writeback)
5977 {
5978 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5979
5980 if (dsc->u.block.increment)
5981 new_rn_val += regs_loaded * 4;
5982 else
5983 new_rn_val -= regs_loaded * 4;
5984
5985 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5986 CANNOT_WRITE_PC);
5987 }
5988 }
5989
5990 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5991 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5992
5993 static int
5994 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
5995 struct regcache *regs,
5996 arm_displaced_step_closure *dsc)
5997 {
5998 int load = bit (insn, 20);
5999 int user = bit (insn, 22);
6000 int increment = bit (insn, 23);
6001 int before = bit (insn, 24);
6002 int writeback = bit (insn, 21);
6003 int rn = bits (insn, 16, 19);
6004
6005 /* Block transfers which don't mention PC can be run directly
6006 out-of-line. */
6007 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6008 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6009
6010 if (rn == ARM_PC_REGNUM)
6011 {
6012 warning (_("displaced: Unpredictable LDM or STM with "
6013 "base register r15"));
6014 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6015 }
6016
6017 displaced_debug_printf ("copying block transfer insn %.8lx",
6018 (unsigned long) insn);
6019
6020 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6021 dsc->u.block.rn = rn;
6022
6023 dsc->u.block.load = load;
6024 dsc->u.block.user = user;
6025 dsc->u.block.increment = increment;
6026 dsc->u.block.before = before;
6027 dsc->u.block.writeback = writeback;
6028 dsc->u.block.cond = bits (insn, 28, 31);
6029
6030 dsc->u.block.regmask = insn & 0xffff;
6031
6032 if (load)
6033 {
6034 if ((insn & 0xffff) == 0xffff)
6035 {
6036 /* LDM with a fully-populated register list. This case is
6037 particularly tricky. Implement for now by fully emulating the
6038 instruction (which might not behave perfectly in all cases, but
6039 these instructions should be rare enough for that not to matter
6040 too much). */
6041 dsc->modinsn[0] = ARM_NOP;
6042
6043 dsc->cleanup = &cleanup_block_load_all;
6044 }
6045 else
6046 {
6047 /* LDM of a list of registers which includes PC. Implement by
6048 rewriting the list of registers to be transferred into a
6049 contiguous chunk r0...rX before doing the transfer, then shuffling
6050 registers into the correct places in the cleanup routine. */
6051 unsigned int regmask = insn & 0xffff;
6052 unsigned int num_in_list = count_one_bits (regmask), new_regmask;
6053 unsigned int i;
6054
6055 for (i = 0; i < num_in_list; i++)
6056 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6057
6058 /* Writeback makes things complicated. We need to avoid clobbering
6059 the base register with one of the registers in our modified
6060 register list, but just using a different register can't work in
6061 all cases, e.g.:
6062
6063 ldm r14!, {r0-r13,pc}
6064
6065 which would need to be rewritten as:
6066
6067 ldm rN!, {r0-r14}
6068
6069 but that can't work, because there's no free register for N.
6070
6071 Solve this by turning off the writeback bit, and emulating
6072 writeback manually in the cleanup routine. */
6073
6074 if (writeback)
6075 insn &= ~(1 << 21);
6076
6077 new_regmask = (1 << num_in_list) - 1;
6078
6079 displaced_debug_printf ("LDM r%d%s, {..., pc}: original reg list "
6080 "%.4x, modified list %.4x",
6081 rn, writeback ? "!" : "",
6082 (int) insn & 0xffff, new_regmask);
6083
6084 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6085
6086 dsc->cleanup = &cleanup_block_load_pc;
6087 }
6088 }
6089 else
6090 {
6091 /* STM of a list of registers which includes PC. Run the instruction
6092 as-is, but out of line: this will store the wrong value for the PC,
6093 so we must manually fix up the memory in the cleanup routine.
6094 Doing things this way has the advantage that we can auto-detect
6095 the offset of the PC write (which is architecture-dependent) in
6096 the cleanup routine. */
6097 dsc->modinsn[0] = insn;
6098
6099 dsc->cleanup = &cleanup_block_store_pc;
6100 }
6101
6102 return 0;
6103 }
6104
6105 static int
6106 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6107 struct regcache *regs,
6108 arm_displaced_step_closure *dsc)
6109 {
6110 int rn = bits (insn1, 0, 3);
6111 int load = bit (insn1, 4);
6112 int writeback = bit (insn1, 5);
6113
6114 /* Block transfers which don't mention PC can be run directly
6115 out-of-line. */
6116 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
6117 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
6118
6119 if (rn == ARM_PC_REGNUM)
6120 {
6121 warning (_("displaced: Unpredictable LDM or STM with "
6122 "base register r15"));
6123 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6124 "unpredictable ldm/stm", dsc);
6125 }
6126
6127 displaced_debug_printf ("copying block transfer insn %.4x%.4x",
6128 insn1, insn2);
6129
6130 /* Clear bit 13, since it should be always zero. */
6131 dsc->u.block.regmask = (insn2 & 0xdfff);
6132 dsc->u.block.rn = rn;
6133
6134 dsc->u.block.load = load;
6135 dsc->u.block.user = 0;
6136 dsc->u.block.increment = bit (insn1, 7);
6137 dsc->u.block.before = bit (insn1, 8);
6138 dsc->u.block.writeback = writeback;
6139 dsc->u.block.cond = INST_AL;
6140 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6141
6142 if (load)
6143 {
6144 if (dsc->u.block.regmask == 0xffff)
6145 {
6146 /* This branch is impossible to happen. */
6147 gdb_assert (0);
6148 }
6149 else
6150 {
6151 unsigned int regmask = dsc->u.block.regmask;
6152 unsigned int num_in_list = count_one_bits (regmask), new_regmask;
6153 unsigned int i;
6154
6155 for (i = 0; i < num_in_list; i++)
6156 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6157
6158 if (writeback)
6159 insn1 &= ~(1 << 5);
6160
6161 new_regmask = (1 << num_in_list) - 1;
6162
6163 displaced_debug_printf ("LDM r%d%s, {..., pc}: original reg list "
6164 "%.4x, modified list %.4x",
6165 rn, writeback ? "!" : "",
6166 (int) dsc->u.block.regmask, new_regmask);
6167
6168 dsc->modinsn[0] = insn1;
6169 dsc->modinsn[1] = (new_regmask & 0xffff);
6170 dsc->numinsns = 2;
6171
6172 dsc->cleanup = &cleanup_block_load_pc;
6173 }
6174 }
6175 else
6176 {
6177 dsc->modinsn[0] = insn1;
6178 dsc->modinsn[1] = insn2;
6179 dsc->numinsns = 2;
6180 dsc->cleanup = &cleanup_block_store_pc;
6181 }
6182 return 0;
6183 }
6184
6185 /* Wrapper over read_memory_unsigned_integer for use in arm_get_next_pcs.
6186 This is used to avoid a dependency on BFD's bfd_endian enum. */
6187
6188 ULONGEST
6189 arm_get_next_pcs_read_memory_unsigned_integer (CORE_ADDR memaddr, int len,
6190 int byte_order)
6191 {
6192 return read_memory_unsigned_integer (memaddr, len,
6193 (enum bfd_endian) byte_order);
6194 }
6195
6196 /* Wrapper over gdbarch_addr_bits_remove for use in arm_get_next_pcs. */
6197
6198 CORE_ADDR
6199 arm_get_next_pcs_addr_bits_remove (struct arm_get_next_pcs *self,
6200 CORE_ADDR val)
6201 {
6202 return gdbarch_addr_bits_remove (self->regcache->arch (), val);
6203 }
6204
6205 /* Wrapper over syscall_next_pc for use in get_next_pcs. */
6206
6207 static CORE_ADDR
6208 arm_get_next_pcs_syscall_next_pc (struct arm_get_next_pcs *self)
6209 {
6210 return 0;
6211 }
6212
6213 /* Wrapper over arm_is_thumb for use in arm_get_next_pcs. */
6214
6215 int
6216 arm_get_next_pcs_is_thumb (struct arm_get_next_pcs *self)
6217 {
6218 return arm_is_thumb (self->regcache);
6219 }
6220
6221 /* single_step() is called just before we want to resume the inferior,
6222 if we want to single-step it but there is no hardware or kernel
6223 single-step support. We find the target of the coming instructions
6224 and breakpoint them. */
6225
6226 std::vector<CORE_ADDR>
6227 arm_software_single_step (struct regcache *regcache)
6228 {
6229 struct gdbarch *gdbarch = regcache->arch ();
6230 struct arm_get_next_pcs next_pcs_ctx;
6231
6232 arm_get_next_pcs_ctor (&next_pcs_ctx,
6233 &arm_get_next_pcs_ops,
6234 gdbarch_byte_order (gdbarch),
6235 gdbarch_byte_order_for_code (gdbarch),
6236 0,
6237 regcache);
6238
6239 std::vector<CORE_ADDR> next_pcs = arm_get_next_pcs (&next_pcs_ctx);
6240
6241 for (CORE_ADDR &pc_ref : next_pcs)
6242 pc_ref = gdbarch_addr_bits_remove (gdbarch, pc_ref);
6243
6244 return next_pcs;
6245 }
6246
6247 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6248 for Linux, where some SVC instructions must be treated specially. */
6249
6250 static void
6251 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6252 arm_displaced_step_closure *dsc)
6253 {
6254 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6255
6256 displaced_debug_printf ("cleanup for svc, resume at %.8lx",
6257 (unsigned long) resume_addr);
6258
6259 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6260 }
6261
6262
6263 /* Common copy routine for svc instruction. */
6264
6265 static int
6266 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
6267 arm_displaced_step_closure *dsc)
6268 {
6269 /* Preparation: none.
6270 Insn: unmodified svc.
6271 Cleanup: pc <- insn_addr + insn_size. */
6272
6273 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6274 instruction. */
6275 dsc->wrote_to_pc = 1;
6276
6277 /* Allow OS-specific code to override SVC handling. */
6278 if (dsc->u.svc.copy_svc_os)
6279 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6280 else
6281 {
6282 dsc->cleanup = &cleanup_svc;
6283 return 0;
6284 }
6285 }
6286
6287 static int
6288 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6289 struct regcache *regs, arm_displaced_step_closure *dsc)
6290 {
6291
6292 displaced_debug_printf ("copying svc insn %.8lx",
6293 (unsigned long) insn);
6294
6295 dsc->modinsn[0] = insn;
6296
6297 return install_svc (gdbarch, regs, dsc);
6298 }
6299
6300 static int
6301 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
6302 struct regcache *regs, arm_displaced_step_closure *dsc)
6303 {
6304
6305 displaced_debug_printf ("copying svc insn %.4x", insn);
6306
6307 dsc->modinsn[0] = insn;
6308
6309 return install_svc (gdbarch, regs, dsc);
6310 }
6311
6312 /* Copy undefined instructions. */
6313
6314 static int
6315 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6316 arm_displaced_step_closure *dsc)
6317 {
6318 displaced_debug_printf ("copying undefined insn %.8lx",
6319 (unsigned long) insn);
6320
6321 dsc->modinsn[0] = insn;
6322
6323 return 0;
6324 }
6325
6326 static int
6327 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6328 arm_displaced_step_closure *dsc)
6329 {
6330
6331 displaced_debug_printf ("copying undefined insn %.4x %.4x",
6332 (unsigned short) insn1, (unsigned short) insn2);
6333
6334 dsc->modinsn[0] = insn1;
6335 dsc->modinsn[1] = insn2;
6336 dsc->numinsns = 2;
6337
6338 return 0;
6339 }
6340
6341 /* Copy unpredictable instructions. */
6342
6343 static int
6344 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6345 arm_displaced_step_closure *dsc)
6346 {
6347 displaced_debug_printf ("copying unpredictable insn %.8lx",
6348 (unsigned long) insn);
6349
6350 dsc->modinsn[0] = insn;
6351
6352 return 0;
6353 }
6354
6355 /* The decode_* functions are instruction decoding helpers. They mostly follow
6356 the presentation in the ARM ARM. */
6357
6358 static int
6359 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6360 struct regcache *regs,
6361 arm_displaced_step_closure *dsc)
6362 {
6363 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6364 unsigned int rn = bits (insn, 16, 19);
6365
6366 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0x1) == 0x0)
6367 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
6368 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0x1) == 0x1)
6369 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
6370 else if ((op1 & 0x60) == 0x20)
6371 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6372 else if ((op1 & 0x71) == 0x40)
6373 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
6374 dsc);
6375 else if ((op1 & 0x77) == 0x41)
6376 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6377 else if ((op1 & 0x77) == 0x45)
6378 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6379 else if ((op1 & 0x77) == 0x51)
6380 {
6381 if (rn != 0xf)
6382 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6383 else
6384 return arm_copy_unpred (gdbarch, insn, dsc);
6385 }
6386 else if ((op1 & 0x77) == 0x55)
6387 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6388 else if (op1 == 0x57)
6389 switch (op2)
6390 {
6391 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
6392 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
6393 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
6394 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
6395 default: return arm_copy_unpred (gdbarch, insn, dsc);
6396 }
6397 else if ((op1 & 0x63) == 0x43)
6398 return arm_copy_unpred (gdbarch, insn, dsc);
6399 else if ((op2 & 0x1) == 0x0)
6400 switch (op1 & ~0x80)
6401 {
6402 case 0x61:
6403 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6404 case 0x65:
6405 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6406 case 0x71: case 0x75:
6407 /* pld/pldw reg. */
6408 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
6409 case 0x63: case 0x67: case 0x73: case 0x77:
6410 return arm_copy_unpred (gdbarch, insn, dsc);
6411 default:
6412 return arm_copy_undef (gdbarch, insn, dsc);
6413 }
6414 else
6415 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6416 }
6417
6418 static int
6419 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6420 struct regcache *regs,
6421 arm_displaced_step_closure *dsc)
6422 {
6423 if (bit (insn, 27) == 0)
6424 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6425 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6426 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6427 {
6428 case 0x0: case 0x2:
6429 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
6430
6431 case 0x1: case 0x3:
6432 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
6433
6434 case 0x4: case 0x5: case 0x6: case 0x7:
6435 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6436
6437 case 0x8:
6438 switch ((insn & 0xe00000) >> 21)
6439 {
6440 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6441 /* stc/stc2. */
6442 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6443
6444 case 0x2:
6445 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6446
6447 default:
6448 return arm_copy_undef (gdbarch, insn, dsc);
6449 }
6450
6451 case 0x9:
6452 {
6453 int rn_f = (bits (insn, 16, 19) == 0xf);
6454 switch ((insn & 0xe00000) >> 21)
6455 {
6456 case 0x1: case 0x3:
6457 /* ldc/ldc2 imm (undefined for rn == pc). */
6458 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
6459 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6460
6461 case 0x2:
6462 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6463
6464 case 0x4: case 0x5: case 0x6: case 0x7:
6465 /* ldc/ldc2 lit (undefined for rn != pc). */
6466 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
6467 : arm_copy_undef (gdbarch, insn, dsc);
6468
6469 default:
6470 return arm_copy_undef (gdbarch, insn, dsc);
6471 }
6472 }
6473
6474 case 0xa:
6475 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6476
6477 case 0xb:
6478 if (bits (insn, 16, 19) == 0xf)
6479 /* ldc/ldc2 lit. */
6480 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6481 else
6482 return arm_copy_undef (gdbarch, insn, dsc);
6483
6484 case 0xc:
6485 if (bit (insn, 4))
6486 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6487 else
6488 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6489
6490 case 0xd:
6491 if (bit (insn, 4))
6492 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6493 else
6494 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6495
6496 default:
6497 return arm_copy_undef (gdbarch, insn, dsc);
6498 }
6499 }
6500
6501 /* Decode miscellaneous instructions in dp/misc encoding space. */
6502
6503 static int
6504 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6505 struct regcache *regs,
6506 arm_displaced_step_closure *dsc)
6507 {
6508 unsigned int op2 = bits (insn, 4, 6);
6509 unsigned int op = bits (insn, 21, 22);
6510
6511 switch (op2)
6512 {
6513 case 0x0:
6514 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6515
6516 case 0x1:
6517 if (op == 0x1) /* bx. */
6518 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6519 else if (op == 0x3)
6520 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
6521 else
6522 return arm_copy_undef (gdbarch, insn, dsc);
6523
6524 case 0x2:
6525 if (op == 0x1)
6526 /* Not really supported. */
6527 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
6528 else
6529 return arm_copy_undef (gdbarch, insn, dsc);
6530
6531 case 0x3:
6532 if (op == 0x1)
6533 return arm_copy_bx_blx_reg (gdbarch, insn,
6534 regs, dsc); /* blx register. */
6535 else
6536 return arm_copy_undef (gdbarch, insn, dsc);
6537
6538 case 0x5:
6539 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6540
6541 case 0x7:
6542 if (op == 0x1)
6543 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
6544 else if (op == 0x3)
6545 /* Not really supported. */
6546 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
6547 /* Fall through. */
6548
6549 default:
6550 return arm_copy_undef (gdbarch, insn, dsc);
6551 }
6552 }
6553
6554 static int
6555 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
6556 struct regcache *regs,
6557 arm_displaced_step_closure *dsc)
6558 {
6559 if (bit (insn, 25))
6560 switch (bits (insn, 20, 24))
6561 {
6562 case 0x10:
6563 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
6564
6565 case 0x14:
6566 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
6567
6568 case 0x12: case 0x16:
6569 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
6570
6571 default:
6572 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
6573 }
6574 else
6575 {
6576 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6577
6578 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6579 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
6580 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6581 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6582 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6583 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
6584 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6585 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6586 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6587 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6588 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6589 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
6590 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6591 /* 2nd arg means "unprivileged". */
6592 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6593 dsc);
6594 }
6595
6596 /* Should be unreachable. */
6597 return 1;
6598 }
6599
6600 static int
6601 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6602 struct regcache *regs,
6603 arm_displaced_step_closure *dsc)
6604 {
6605 int a = bit (insn, 25), b = bit (insn, 4);
6606 uint32_t op1 = bits (insn, 20, 24);
6607
6608 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6609 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6610 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
6611 else if ((!a && (op1 & 0x17) == 0x02)
6612 || (a && (op1 & 0x17) == 0x02 && !b))
6613 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
6614 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6615 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6616 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
6617 else if ((!a && (op1 & 0x17) == 0x03)
6618 || (a && (op1 & 0x17) == 0x03 && !b))
6619 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
6620 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6621 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6622 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6623 else if ((!a && (op1 & 0x17) == 0x06)
6624 || (a && (op1 & 0x17) == 0x06 && !b))
6625 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6626 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6627 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6628 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6629 else if ((!a && (op1 & 0x17) == 0x07)
6630 || (a && (op1 & 0x17) == 0x07 && !b))
6631 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6632
6633 /* Should be unreachable. */
6634 return 1;
6635 }
6636
6637 static int
6638 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
6639 arm_displaced_step_closure *dsc)
6640 {
6641 switch (bits (insn, 20, 24))
6642 {
6643 case 0x00: case 0x01: case 0x02: case 0x03:
6644 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6645
6646 case 0x04: case 0x05: case 0x06: case 0x07:
6647 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6648
6649 case 0x08: case 0x09: case 0x0a: case 0x0b:
6650 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6651 return arm_copy_unmodified (gdbarch, insn,
6652 "decode/pack/unpack/saturate/reverse", dsc);
6653
6654 case 0x18:
6655 if (bits (insn, 5, 7) == 0) /* op2. */
6656 {
6657 if (bits (insn, 12, 15) == 0xf)
6658 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
6659 else
6660 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
6661 }
6662 else
6663 return arm_copy_undef (gdbarch, insn, dsc);
6664
6665 case 0x1a: case 0x1b:
6666 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6667 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
6668 else
6669 return arm_copy_undef (gdbarch, insn, dsc);
6670
6671 case 0x1c: case 0x1d:
6672 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6673 {
6674 if (bits (insn, 0, 3) == 0xf)
6675 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
6676 else
6677 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
6678 }
6679 else
6680 return arm_copy_undef (gdbarch, insn, dsc);
6681
6682 case 0x1e: case 0x1f:
6683 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6684 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
6685 else
6686 return arm_copy_undef (gdbarch, insn, dsc);
6687 }
6688
6689 /* Should be unreachable. */
6690 return 1;
6691 }
6692
6693 static int
6694 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, uint32_t insn,
6695 struct regcache *regs,
6696 arm_displaced_step_closure *dsc)
6697 {
6698 if (bit (insn, 25))
6699 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6700 else
6701 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
6702 }
6703
6704 static int
6705 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6706 struct regcache *regs,
6707 arm_displaced_step_closure *dsc)
6708 {
6709 unsigned int opcode = bits (insn, 20, 24);
6710
6711 switch (opcode)
6712 {
6713 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6714 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6715
6716 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6717 case 0x12: case 0x16:
6718 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6719
6720 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6721 case 0x13: case 0x17:
6722 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6723
6724 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6725 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6726 /* Note: no writeback for these instructions. Bit 25 will always be
6727 zero though (via caller), so the following works OK. */
6728 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6729 }
6730
6731 /* Should be unreachable. */
6732 return 1;
6733 }
6734
6735 /* Decode shifted register instructions. */
6736
6737 static int
6738 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
6739 uint16_t insn2, struct regcache *regs,
6740 arm_displaced_step_closure *dsc)
6741 {
6742 /* PC is only allowed to be used in instruction MOV. */
6743
6744 unsigned int op = bits (insn1, 5, 8);
6745 unsigned int rn = bits (insn1, 0, 3);
6746
6747 if (op == 0x2 && rn == 0xf) /* MOV */
6748 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
6749 else
6750 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6751 "dp (shift reg)", dsc);
6752 }
6753
6754
6755 /* Decode extension register load/store. Exactly the same as
6756 arm_decode_ext_reg_ld_st. */
6757
6758 static int
6759 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
6760 uint16_t insn2, struct regcache *regs,
6761 arm_displaced_step_closure *dsc)
6762 {
6763 unsigned int opcode = bits (insn1, 4, 8);
6764
6765 switch (opcode)
6766 {
6767 case 0x04: case 0x05:
6768 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6769 "vfp/neon vmov", dsc);
6770
6771 case 0x08: case 0x0c: /* 01x00 */
6772 case 0x0a: case 0x0e: /* 01x10 */
6773 case 0x12: case 0x16: /* 10x10 */
6774 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6775 "vfp/neon vstm/vpush", dsc);
6776
6777 case 0x09: case 0x0d: /* 01x01 */
6778 case 0x0b: case 0x0f: /* 01x11 */
6779 case 0x13: case 0x17: /* 10x11 */
6780 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6781 "vfp/neon vldm/vpop", dsc);
6782
6783 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6784 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6785 "vstr", dsc);
6786 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6787 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
6788 }
6789
6790 /* Should be unreachable. */
6791 return 1;
6792 }
6793
6794 static int
6795 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn,
6796 struct regcache *regs, arm_displaced_step_closure *dsc)
6797 {
6798 unsigned int op1 = bits (insn, 20, 25);
6799 int op = bit (insn, 4);
6800 unsigned int coproc = bits (insn, 8, 11);
6801
6802 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6803 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6804 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6805 && (coproc & 0xe) != 0xa)
6806 /* stc/stc2. */
6807 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6808 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6809 && (coproc & 0xe) != 0xa)
6810 /* ldc/ldc2 imm/lit. */
6811 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6812 else if ((op1 & 0x3e) == 0x00)
6813 return arm_copy_undef (gdbarch, insn, dsc);
6814 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6815 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6816 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6817 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6818 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6819 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6820 else if ((op1 & 0x30) == 0x20 && !op)
6821 {
6822 if ((coproc & 0xe) == 0xa)
6823 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6824 else
6825 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6826 }
6827 else if ((op1 & 0x30) == 0x20 && op)
6828 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6829 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6830 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6831 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6832 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6833 else if ((op1 & 0x30) == 0x30)
6834 return arm_copy_svc (gdbarch, insn, regs, dsc);
6835 else
6836 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6837 }
6838
6839 static int
6840 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
6841 uint16_t insn2, struct regcache *regs,
6842 arm_displaced_step_closure *dsc)
6843 {
6844 unsigned int coproc = bits (insn2, 8, 11);
6845 unsigned int bit_5_8 = bits (insn1, 5, 8);
6846 unsigned int bit_9 = bit (insn1, 9);
6847 unsigned int bit_4 = bit (insn1, 4);
6848
6849 if (bit_9 == 0)
6850 {
6851 if (bit_5_8 == 2)
6852 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6853 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
6854 dsc);
6855 else if (bit_5_8 == 0) /* UNDEFINED. */
6856 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
6857 else
6858 {
6859 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
6860 if ((coproc & 0xe) == 0xa)
6861 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
6862 dsc);
6863 else /* coproc is not 101x. */
6864 {
6865 if (bit_4 == 0) /* STC/STC2. */
6866 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6867 "stc/stc2", dsc);
6868 else /* LDC/LDC2 {literal, immediate}. */
6869 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
6870 regs, dsc);
6871 }
6872 }
6873 }
6874 else
6875 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
6876
6877 return 0;
6878 }
6879
6880 static void
6881 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
6882 arm_displaced_step_closure *dsc, int rd)
6883 {
6884 /* ADR Rd, #imm
6885
6886 Rewrite as:
6887
6888 Preparation: Rd <- PC
6889 Insn: ADD Rd, #imm
6890 Cleanup: Null.
6891 */
6892
6893 /* Rd <- PC */
6894 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6895 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
6896 }
6897
6898 static int
6899 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
6900 arm_displaced_step_closure *dsc,
6901 int rd, unsigned int imm)
6902 {
6903
6904 /* Encoding T2: ADDS Rd, #imm */
6905 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
6906
6907 install_pc_relative (gdbarch, regs, dsc, rd);
6908
6909 return 0;
6910 }
6911
6912 static int
6913 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
6914 struct regcache *regs,
6915 arm_displaced_step_closure *dsc)
6916 {
6917 unsigned int rd = bits (insn, 8, 10);
6918 unsigned int imm8 = bits (insn, 0, 7);
6919
6920 displaced_debug_printf ("copying thumb adr r%d, #%d insn %.4x",
6921 rd, imm8, insn);
6922
6923 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
6924 }
6925
6926 static int
6927 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
6928 uint16_t insn2, struct regcache *regs,
6929 arm_displaced_step_closure *dsc)
6930 {
6931 unsigned int rd = bits (insn2, 8, 11);
6932 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
6933 extract raw immediate encoding rather than computing immediate. When
6934 generating ADD or SUB instruction, we can simply perform OR operation to
6935 set immediate into ADD. */
6936 unsigned int imm_3_8 = insn2 & 0x70ff;
6937 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
6938
6939 displaced_debug_printf ("copying thumb adr r%d, #%d:%d insn %.4x%.4x",
6940 rd, imm_i, imm_3_8, insn1, insn2);
6941
6942 if (bit (insn1, 7)) /* Encoding T2 */
6943 {
6944 /* Encoding T3: SUB Rd, Rd, #imm */
6945 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
6946 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
6947 }
6948 else /* Encoding T3 */
6949 {
6950 /* Encoding T3: ADD Rd, Rd, #imm */
6951 dsc->modinsn[0] = (0xf100 | rd | imm_i);
6952 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
6953 }
6954 dsc->numinsns = 2;
6955
6956 install_pc_relative (gdbarch, regs, dsc, rd);
6957
6958 return 0;
6959 }
6960
6961 static int
6962 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, uint16_t insn1,
6963 struct regcache *regs,
6964 arm_displaced_step_closure *dsc)
6965 {
6966 unsigned int rt = bits (insn1, 8, 10);
6967 unsigned int pc;
6968 int imm8 = (bits (insn1, 0, 7) << 2);
6969
6970 /* LDR Rd, #imm8
6971
6972 Rwrite as:
6973
6974 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
6975
6976 Insn: LDR R0, [R2, R3];
6977 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
6978
6979 displaced_debug_printf ("copying thumb ldr r%d [pc #%d]", rt, imm8);
6980
6981 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6982 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6983 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6984 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6985 /* The assembler calculates the required value of the offset from the
6986 Align(PC,4) value of this instruction to the label. */
6987 pc = pc & 0xfffffffc;
6988
6989 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
6990 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
6991
6992 dsc->rd = rt;
6993 dsc->u.ldst.xfersize = 4;
6994 dsc->u.ldst.rn = 0;
6995 dsc->u.ldst.immed = 0;
6996 dsc->u.ldst.writeback = 0;
6997 dsc->u.ldst.restore_r4 = 0;
6998
6999 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7000
7001 dsc->cleanup = &cleanup_load;
7002
7003 return 0;
7004 }
7005
7006 /* Copy Thumb cbnz/cbz instruction. */
7007
7008 static int
7009 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7010 struct regcache *regs,
7011 arm_displaced_step_closure *dsc)
7012 {
7013 int non_zero = bit (insn1, 11);
7014 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7015 CORE_ADDR from = dsc->insn_addr;
7016 int rn = bits (insn1, 0, 2);
7017 int rn_val = displaced_read_reg (regs, dsc, rn);
7018
7019 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7020 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7021 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7022 condition is false, let it be, cleanup_branch will do nothing. */
7023 if (dsc->u.branch.cond)
7024 {
7025 dsc->u.branch.cond = INST_AL;
7026 dsc->u.branch.dest = from + 4 + imm5;
7027 }
7028 else
7029 dsc->u.branch.dest = from + 2;
7030
7031 dsc->u.branch.link = 0;
7032 dsc->u.branch.exchange = 0;
7033
7034 displaced_debug_printf ("copying %s [r%d = 0x%x] insn %.4x to %.8lx",
7035 non_zero ? "cbnz" : "cbz",
7036 rn, rn_val, insn1, dsc->u.branch.dest);
7037
7038 dsc->modinsn[0] = THUMB_NOP;
7039
7040 dsc->cleanup = &cleanup_branch;
7041 return 0;
7042 }
7043
7044 /* Copy Table Branch Byte/Halfword */
7045 static int
7046 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7047 uint16_t insn2, struct regcache *regs,
7048 arm_displaced_step_closure *dsc)
7049 {
7050 ULONGEST rn_val, rm_val;
7051 int is_tbh = bit (insn2, 4);
7052 CORE_ADDR halfwords = 0;
7053 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7054
7055 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7056 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7057
7058 if (is_tbh)
7059 {
7060 gdb_byte buf[2];
7061
7062 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7063 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7064 }
7065 else
7066 {
7067 gdb_byte buf[1];
7068
7069 target_read_memory (rn_val + rm_val, buf, 1);
7070 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7071 }
7072
7073 displaced_debug_printf ("%s base 0x%x offset 0x%x offset 0x%x",
7074 is_tbh ? "tbh" : "tbb",
7075 (unsigned int) rn_val, (unsigned int) rm_val,
7076 (unsigned int) halfwords);
7077
7078 dsc->u.branch.cond = INST_AL;
7079 dsc->u.branch.link = 0;
7080 dsc->u.branch.exchange = 0;
7081 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7082
7083 dsc->cleanup = &cleanup_branch;
7084
7085 return 0;
7086 }
7087
7088 static void
7089 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7090 arm_displaced_step_closure *dsc)
7091 {
7092 /* PC <- r7 */
7093 int val = displaced_read_reg (regs, dsc, 7);
7094 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
7095
7096 /* r7 <- r8 */
7097 val = displaced_read_reg (regs, dsc, 8);
7098 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
7099
7100 /* r8 <- tmp[0] */
7101 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
7102
7103 }
7104
7105 static int
7106 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, uint16_t insn1,
7107 struct regcache *regs,
7108 arm_displaced_step_closure *dsc)
7109 {
7110 dsc->u.block.regmask = insn1 & 0x00ff;
7111
7112 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
7113 to :
7114
7115 (1) register list is full, that is, r0-r7 are used.
7116 Prepare: tmp[0] <- r8
7117
7118 POP {r0, r1, ...., r6, r7}; remove PC from reglist
7119 MOV r8, r7; Move value of r7 to r8;
7120 POP {r7}; Store PC value into r7.
7121
7122 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
7123
7124 (2) register list is not full, supposing there are N registers in
7125 register list (except PC, 0 <= N <= 7).
7126 Prepare: for each i, 0 - N, tmp[i] <- ri.
7127
7128 POP {r0, r1, ...., rN};
7129
7130 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
7131 from tmp[] properly.
7132 */
7133 displaced_debug_printf ("copying thumb pop {%.8x, pc} insn %.4x",
7134 dsc->u.block.regmask, insn1);
7135
7136 if (dsc->u.block.regmask == 0xff)
7137 {
7138 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
7139
7140 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
7141 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
7142 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
7143
7144 dsc->numinsns = 3;
7145 dsc->cleanup = &cleanup_pop_pc_16bit_all;
7146 }
7147 else
7148 {
7149 unsigned int num_in_list = count_one_bits (dsc->u.block.regmask);
7150 unsigned int i;
7151 unsigned int new_regmask;
7152
7153 for (i = 0; i < num_in_list + 1; i++)
7154 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7155
7156 new_regmask = (1 << (num_in_list + 1)) - 1;
7157
7158 displaced_debug_printf ("POP {..., pc}: original reg list %.4x, "
7159 "modified list %.4x",
7160 (int) dsc->u.block.regmask, new_regmask);
7161
7162 dsc->u.block.regmask |= 0x8000;
7163 dsc->u.block.writeback = 0;
7164 dsc->u.block.cond = INST_AL;
7165
7166 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
7167
7168 dsc->cleanup = &cleanup_block_load_pc;
7169 }
7170
7171 return 0;
7172 }
7173
7174 static void
7175 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7176 struct regcache *regs,
7177 arm_displaced_step_closure *dsc)
7178 {
7179 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
7180 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
7181 int err = 0;
7182
7183 /* 16-bit thumb instructions. */
7184 switch (op_bit_12_15)
7185 {
7186 /* Shift (imme), add, subtract, move and compare. */
7187 case 0: case 1: case 2: case 3:
7188 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7189 "shift/add/sub/mov/cmp",
7190 dsc);
7191 break;
7192 case 4:
7193 switch (op_bit_10_11)
7194 {
7195 case 0: /* Data-processing */
7196 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7197 "data-processing",
7198 dsc);
7199 break;
7200 case 1: /* Special data instructions and branch and exchange. */
7201 {
7202 unsigned short op = bits (insn1, 7, 9);
7203 if (op == 6 || op == 7) /* BX or BLX */
7204 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
7205 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
7206 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
7207 else
7208 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
7209 dsc);
7210 }
7211 break;
7212 default: /* LDR (literal) */
7213 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
7214 }
7215 break;
7216 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
7217 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
7218 break;
7219 case 10:
7220 if (op_bit_10_11 < 2) /* Generate PC-relative address */
7221 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
7222 else /* Generate SP-relative address */
7223 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
7224 break;
7225 case 11: /* Misc 16-bit instructions */
7226 {
7227 switch (bits (insn1, 8, 11))
7228 {
7229 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
7230 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
7231 break;
7232 case 12: case 13: /* POP */
7233 if (bit (insn1, 8)) /* PC is in register list. */
7234 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
7235 else
7236 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
7237 break;
7238 case 15: /* If-Then, and hints */
7239 if (bits (insn1, 0, 3))
7240 /* If-Then makes up to four following instructions conditional.
7241 IT instruction itself is not conditional, so handle it as a
7242 common unmodified instruction. */
7243 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
7244 dsc);
7245 else
7246 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
7247 break;
7248 default:
7249 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
7250 }
7251 }
7252 break;
7253 case 12:
7254 if (op_bit_10_11 < 2) /* Store multiple registers */
7255 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
7256 else /* Load multiple registers */
7257 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
7258 break;
7259 case 13: /* Conditional branch and supervisor call */
7260 if (bits (insn1, 9, 11) != 7) /* conditional branch */
7261 err = thumb_copy_b (gdbarch, insn1, dsc);
7262 else
7263 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
7264 break;
7265 case 14: /* Unconditional branch */
7266 err = thumb_copy_b (gdbarch, insn1, dsc);
7267 break;
7268 default:
7269 err = 1;
7270 }
7271
7272 if (err)
7273 internal_error (__FILE__, __LINE__,
7274 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
7275 }
7276
7277 static int
7278 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
7279 uint16_t insn1, uint16_t insn2,
7280 struct regcache *regs,
7281 arm_displaced_step_closure *dsc)
7282 {
7283 int rt = bits (insn2, 12, 15);
7284 int rn = bits (insn1, 0, 3);
7285 int op1 = bits (insn1, 7, 8);
7286
7287 switch (bits (insn1, 5, 6))
7288 {
7289 case 0: /* Load byte and memory hints */
7290 if (rt == 0xf) /* PLD/PLI */
7291 {
7292 if (rn == 0xf)
7293 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
7294 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
7295 else
7296 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7297 "pli/pld", dsc);
7298 }
7299 else
7300 {
7301 if (rn == 0xf) /* LDRB/LDRSB (literal) */
7302 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7303 1);
7304 else
7305 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7306 "ldrb{reg, immediate}/ldrbt",
7307 dsc);
7308 }
7309
7310 break;
7311 case 1: /* Load halfword and memory hints. */
7312 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
7313 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7314 "pld/unalloc memhint", dsc);
7315 else
7316 {
7317 if (rn == 0xf)
7318 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7319 2);
7320 else
7321 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7322 "ldrh/ldrht", dsc);
7323 }
7324 break;
7325 case 2: /* Load word */
7326 {
7327 int insn2_bit_8_11 = bits (insn2, 8, 11);
7328
7329 if (rn == 0xf)
7330 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
7331 else if (op1 == 0x1) /* Encoding T3 */
7332 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
7333 0, 1);
7334 else /* op1 == 0x0 */
7335 {
7336 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
7337 /* LDR (immediate) */
7338 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
7339 dsc, bit (insn2, 8), 1);
7340 else if (insn2_bit_8_11 == 0xe) /* LDRT */
7341 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7342 "ldrt", dsc);
7343 else
7344 /* LDR (register) */
7345 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
7346 dsc, 0, 0);
7347 }
7348 break;
7349 }
7350 default:
7351 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7352 break;
7353 }
7354 return 0;
7355 }
7356
7357 static void
7358 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7359 uint16_t insn2, struct regcache *regs,
7360 arm_displaced_step_closure *dsc)
7361 {
7362 int err = 0;
7363 unsigned short op = bit (insn2, 15);
7364 unsigned int op1 = bits (insn1, 11, 12);
7365
7366 switch (op1)
7367 {
7368 case 1:
7369 {
7370 switch (bits (insn1, 9, 10))
7371 {
7372 case 0:
7373 if (bit (insn1, 6))
7374 {
7375 /* Load/store {dual, exclusive}, table branch. */
7376 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
7377 && bits (insn2, 5, 7) == 0)
7378 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
7379 dsc);
7380 else
7381 /* PC is not allowed to use in load/store {dual, exclusive}
7382 instructions. */
7383 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7384 "load/store dual/ex", dsc);
7385 }
7386 else /* load/store multiple */
7387 {
7388 switch (bits (insn1, 7, 8))
7389 {
7390 case 0: case 3: /* SRS, RFE */
7391 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7392 "srs/rfe", dsc);
7393 break;
7394 case 1: case 2: /* LDM/STM/PUSH/POP */
7395 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
7396 break;
7397 }
7398 }
7399 break;
7400
7401 case 1:
7402 /* Data-processing (shift register). */
7403 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
7404 dsc);
7405 break;
7406 default: /* Coprocessor instructions. */
7407 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
7408 break;
7409 }
7410 break;
7411 }
7412 case 2: /* op1 = 2 */
7413 if (op) /* Branch and misc control. */
7414 {
7415 if (bit (insn2, 14) /* BLX/BL */
7416 || bit (insn2, 12) /* Unconditional branch */
7417 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
7418 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
7419 else
7420 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7421 "misc ctrl", dsc);
7422 }
7423 else
7424 {
7425 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
7426 {
7427 int dp_op = bits (insn1, 4, 8);
7428 int rn = bits (insn1, 0, 3);
7429 if ((dp_op == 0 || dp_op == 0xa) && rn == 0xf)
7430 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
7431 regs, dsc);
7432 else
7433 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7434 "dp/pb", dsc);
7435 }
7436 else /* Data processing (modified immediate) */
7437 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7438 "dp/mi", dsc);
7439 }
7440 break;
7441 case 3: /* op1 = 3 */
7442 switch (bits (insn1, 9, 10))
7443 {
7444 case 0:
7445 if (bit (insn1, 4))
7446 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
7447 regs, dsc);
7448 else /* NEON Load/Store and Store single data item */
7449 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7450 "neon elt/struct load/store",
7451 dsc);
7452 break;
7453 case 1: /* op1 = 3, bits (9, 10) == 1 */
7454 switch (bits (insn1, 7, 8))
7455 {
7456 case 0: case 1: /* Data processing (register) */
7457 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7458 "dp(reg)", dsc);
7459 break;
7460 case 2: /* Multiply and absolute difference */
7461 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7462 "mul/mua/diff", dsc);
7463 break;
7464 case 3: /* Long multiply and divide */
7465 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7466 "lmul/lmua", dsc);
7467 break;
7468 }
7469 break;
7470 default: /* Coprocessor instructions */
7471 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
7472 break;
7473 }
7474 break;
7475 default:
7476 err = 1;
7477 }
7478
7479 if (err)
7480 internal_error (__FILE__, __LINE__,
7481 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
7482
7483 }
7484
7485 static void
7486 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
7487 struct regcache *regs,
7488 arm_displaced_step_closure *dsc)
7489 {
7490 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7491 uint16_t insn1
7492 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
7493
7494 displaced_debug_printf ("process thumb insn %.4x at %.8lx",
7495 insn1, (unsigned long) from);
7496
7497 dsc->is_thumb = 1;
7498 dsc->insn_size = thumb_insn_size (insn1);
7499 if (thumb_insn_size (insn1) == 4)
7500 {
7501 uint16_t insn2
7502 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
7503 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
7504 }
7505 else
7506 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
7507 }
7508
7509 void
7510 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
7511 CORE_ADDR to, struct regcache *regs,
7512 arm_displaced_step_closure *dsc)
7513 {
7514 int err = 0;
7515 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7516 uint32_t insn;
7517
7518 /* Most displaced instructions use a 1-instruction scratch space, so set this
7519 here and override below if/when necessary. */
7520 dsc->numinsns = 1;
7521 dsc->insn_addr = from;
7522 dsc->scratch_base = to;
7523 dsc->cleanup = NULL;
7524 dsc->wrote_to_pc = 0;
7525
7526 if (!displaced_in_arm_mode (regs))
7527 return thumb_process_displaced_insn (gdbarch, from, regs, dsc);
7528
7529 dsc->is_thumb = 0;
7530 dsc->insn_size = 4;
7531 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
7532 displaced_debug_printf ("stepping insn %.8lx at %.8lx",
7533 (unsigned long) insn, (unsigned long) from);
7534
7535 if ((insn & 0xf0000000) == 0xf0000000)
7536 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
7537 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
7538 {
7539 case 0x0: case 0x1: case 0x2: case 0x3:
7540 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
7541 break;
7542
7543 case 0x4: case 0x5: case 0x6:
7544 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
7545 break;
7546
7547 case 0x7:
7548 err = arm_decode_media (gdbarch, insn, dsc);
7549 break;
7550
7551 case 0x8: case 0x9: case 0xa: case 0xb:
7552 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
7553 break;
7554
7555 case 0xc: case 0xd: case 0xe: case 0xf:
7556 err = arm_decode_svc_copro (gdbarch, insn, regs, dsc);
7557 break;
7558 }
7559
7560 if (err)
7561 internal_error (__FILE__, __LINE__,
7562 _("arm_process_displaced_insn: Instruction decode error"));
7563 }
7564
7565 /* Actually set up the scratch space for a displaced instruction. */
7566
7567 void
7568 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
7569 CORE_ADDR to, arm_displaced_step_closure *dsc)
7570 {
7571 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7572 unsigned int i, len, offset;
7573 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7574 int size = dsc->is_thumb? 2 : 4;
7575 const gdb_byte *bkp_insn;
7576
7577 offset = 0;
7578 /* Poke modified instruction(s). */
7579 for (i = 0; i < dsc->numinsns; i++)
7580 {
7581 if (size == 4)
7582 displaced_debug_printf ("writing insn %.8lx at %.8lx",
7583 dsc->modinsn[i], (unsigned long) to + offset);
7584 else if (size == 2)
7585 displaced_debug_printf ("writing insn %.4x at %.8lx",
7586 (unsigned short) dsc->modinsn[i],
7587 (unsigned long) to + offset);
7588
7589 write_memory_unsigned_integer (to + offset, size,
7590 byte_order_for_code,
7591 dsc->modinsn[i]);
7592 offset += size;
7593 }
7594
7595 /* Choose the correct breakpoint instruction. */
7596 if (dsc->is_thumb)
7597 {
7598 bkp_insn = tdep->thumb_breakpoint;
7599 len = tdep->thumb_breakpoint_size;
7600 }
7601 else
7602 {
7603 bkp_insn = tdep->arm_breakpoint;
7604 len = tdep->arm_breakpoint_size;
7605 }
7606
7607 /* Put breakpoint afterwards. */
7608 write_memory (to + offset, bkp_insn, len);
7609
7610 displaced_debug_printf ("copy %s->%s", paddress (gdbarch, from),
7611 paddress (gdbarch, to));
7612 }
7613
7614 /* Entry point for cleaning things up after a displaced instruction has been
7615 single-stepped. */
7616
7617 void
7618 arm_displaced_step_fixup (struct gdbarch *gdbarch,
7619 struct displaced_step_closure *dsc_,
7620 CORE_ADDR from, CORE_ADDR to,
7621 struct regcache *regs)
7622 {
7623 arm_displaced_step_closure *dsc = (arm_displaced_step_closure *) dsc_;
7624
7625 if (dsc->cleanup)
7626 dsc->cleanup (gdbarch, regs, dsc);
7627
7628 if (!dsc->wrote_to_pc)
7629 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7630 dsc->insn_addr + dsc->insn_size);
7631
7632 }
7633
7634 #include "bfd-in2.h"
7635 #include "libcoff.h"
7636
7637 static int
7638 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7639 {
7640 gdb_disassembler *di
7641 = static_cast<gdb_disassembler *>(info->application_data);
7642 struct gdbarch *gdbarch = di->arch ();
7643
7644 if (arm_pc_is_thumb (gdbarch, memaddr))
7645 {
7646 static asymbol *asym;
7647 static combined_entry_type ce;
7648 static struct coff_symbol_struct csym;
7649 static struct bfd fake_bfd;
7650 static bfd_target fake_target;
7651
7652 if (csym.native == NULL)
7653 {
7654 /* Create a fake symbol vector containing a Thumb symbol.
7655 This is solely so that the code in print_insn_little_arm()
7656 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7657 the presence of a Thumb symbol and switch to decoding
7658 Thumb instructions. */
7659
7660 fake_target.flavour = bfd_target_coff_flavour;
7661 fake_bfd.xvec = &fake_target;
7662 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7663 csym.native = &ce;
7664 csym.symbol.the_bfd = &fake_bfd;
7665 csym.symbol.name = "fake";
7666 asym = (asymbol *) & csym;
7667 }
7668
7669 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7670 info->symbols = &asym;
7671 }
7672 else
7673 info->symbols = NULL;
7674
7675 /* GDB is able to get bfd_mach from the exe_bfd, info->mach is
7676 accurate, so mark USER_SPECIFIED_MACHINE_TYPE bit. Otherwise,
7677 opcodes/arm-dis.c:print_insn reset info->mach, and it will trigger
7678 the assert on the mismatch of info->mach and
7679 bfd_get_mach (current_program_space->exec_bfd ()) in
7680 default_print_insn. */
7681 if (current_program_space->exec_bfd () != NULL)
7682 info->flags |= USER_SPECIFIED_MACHINE_TYPE;
7683
7684 return default_print_insn (memaddr, info);
7685 }
7686
7687 /* The following define instruction sequences that will cause ARM
7688 cpu's to take an undefined instruction trap. These are used to
7689 signal a breakpoint to GDB.
7690
7691 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7692 modes. A different instruction is required for each mode. The ARM
7693 cpu's can also be big or little endian. Thus four different
7694 instructions are needed to support all cases.
7695
7696 Note: ARMv4 defines several new instructions that will take the
7697 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7698 not in fact add the new instructions. The new undefined
7699 instructions in ARMv4 are all instructions that had no defined
7700 behaviour in earlier chips. There is no guarantee that they will
7701 raise an exception, but may be treated as NOP's. In practice, it
7702 may only safe to rely on instructions matching:
7703
7704 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7705 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7706 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7707
7708 Even this may only true if the condition predicate is true. The
7709 following use a condition predicate of ALWAYS so it is always TRUE.
7710
7711 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7712 and NetBSD all use a software interrupt rather than an undefined
7713 instruction to force a trap. This can be handled by by the
7714 abi-specific code during establishment of the gdbarch vector. */
7715
7716 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7717 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7718 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7719 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7720
7721 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7722 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7723 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7724 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7725
7726 /* Implement the breakpoint_kind_from_pc gdbarch method. */
7727
7728 static int
7729 arm_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
7730 {
7731 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7732 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7733
7734 if (arm_pc_is_thumb (gdbarch, *pcptr))
7735 {
7736 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7737
7738 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7739 check whether we are replacing a 32-bit instruction. */
7740 if (tdep->thumb2_breakpoint != NULL)
7741 {
7742 gdb_byte buf[2];
7743
7744 if (target_read_memory (*pcptr, buf, 2) == 0)
7745 {
7746 unsigned short inst1;
7747
7748 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7749 if (thumb_insn_size (inst1) == 4)
7750 return ARM_BP_KIND_THUMB2;
7751 }
7752 }
7753
7754 return ARM_BP_KIND_THUMB;
7755 }
7756 else
7757 return ARM_BP_KIND_ARM;
7758
7759 }
7760
7761 /* Implement the sw_breakpoint_from_kind gdbarch method. */
7762
7763 static const gdb_byte *
7764 arm_sw_breakpoint_from_kind (struct gdbarch *gdbarch, int kind, int *size)
7765 {
7766 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7767
7768 switch (kind)
7769 {
7770 case ARM_BP_KIND_ARM:
7771 *size = tdep->arm_breakpoint_size;
7772 return tdep->arm_breakpoint;
7773 case ARM_BP_KIND_THUMB:
7774 *size = tdep->thumb_breakpoint_size;
7775 return tdep->thumb_breakpoint;
7776 case ARM_BP_KIND_THUMB2:
7777 *size = tdep->thumb2_breakpoint_size;
7778 return tdep->thumb2_breakpoint;
7779 default:
7780 gdb_assert_not_reached ("unexpected arm breakpoint kind");
7781 }
7782 }
7783
7784 /* Implement the breakpoint_kind_from_current_state gdbarch method. */
7785
7786 static int
7787 arm_breakpoint_kind_from_current_state (struct gdbarch *gdbarch,
7788 struct regcache *regcache,
7789 CORE_ADDR *pcptr)
7790 {
7791 gdb_byte buf[4];
7792
7793 /* Check the memory pointed by PC is readable. */
7794 if (target_read_memory (regcache_read_pc (regcache), buf, 4) == 0)
7795 {
7796 struct arm_get_next_pcs next_pcs_ctx;
7797
7798 arm_get_next_pcs_ctor (&next_pcs_ctx,
7799 &arm_get_next_pcs_ops,
7800 gdbarch_byte_order (gdbarch),
7801 gdbarch_byte_order_for_code (gdbarch),
7802 0,
7803 regcache);
7804
7805 std::vector<CORE_ADDR> next_pcs = arm_get_next_pcs (&next_pcs_ctx);
7806
7807 /* If MEMADDR is the next instruction of current pc, do the
7808 software single step computation, and get the thumb mode by
7809 the destination address. */
7810 for (CORE_ADDR pc : next_pcs)
7811 {
7812 if (UNMAKE_THUMB_ADDR (pc) == *pcptr)
7813 {
7814 if (IS_THUMB_ADDR (pc))
7815 {
7816 *pcptr = MAKE_THUMB_ADDR (*pcptr);
7817 return arm_breakpoint_kind_from_pc (gdbarch, pcptr);
7818 }
7819 else
7820 return ARM_BP_KIND_ARM;
7821 }
7822 }
7823 }
7824
7825 return arm_breakpoint_kind_from_pc (gdbarch, pcptr);
7826 }
7827
7828 /* Extract from an array REGBUF containing the (raw) register state a
7829 function return value of type TYPE, and copy that, in virtual
7830 format, into VALBUF. */
7831
7832 static void
7833 arm_extract_return_value (struct type *type, struct regcache *regs,
7834 gdb_byte *valbuf)
7835 {
7836 struct gdbarch *gdbarch = regs->arch ();
7837 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7838
7839 if (TYPE_CODE_FLT == type->code ())
7840 {
7841 switch (gdbarch_tdep (gdbarch)->fp_model)
7842 {
7843 case ARM_FLOAT_FPA:
7844 {
7845 /* The value is in register F0 in internal format. We need to
7846 extract the raw value and then convert it to the desired
7847 internal type. */
7848 bfd_byte tmpbuf[ARM_FP_REGISTER_SIZE];
7849
7850 regs->cooked_read (ARM_F0_REGNUM, tmpbuf);
7851 target_float_convert (tmpbuf, arm_ext_type (gdbarch),
7852 valbuf, type);
7853 }
7854 break;
7855
7856 case ARM_FLOAT_SOFT_FPA:
7857 case ARM_FLOAT_SOFT_VFP:
7858 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7859 not using the VFP ABI code. */
7860 case ARM_FLOAT_VFP:
7861 regs->cooked_read (ARM_A1_REGNUM, valbuf);
7862 if (TYPE_LENGTH (type) > 4)
7863 regs->cooked_read (ARM_A1_REGNUM + 1,
7864 valbuf + ARM_INT_REGISTER_SIZE);
7865 break;
7866
7867 default:
7868 internal_error (__FILE__, __LINE__,
7869 _("arm_extract_return_value: "
7870 "Floating point model not supported"));
7871 break;
7872 }
7873 }
7874 else if (type->code () == TYPE_CODE_INT
7875 || type->code () == TYPE_CODE_CHAR
7876 || type->code () == TYPE_CODE_BOOL
7877 || type->code () == TYPE_CODE_PTR
7878 || TYPE_IS_REFERENCE (type)
7879 || type->code () == TYPE_CODE_ENUM)
7880 {
7881 /* If the type is a plain integer, then the access is
7882 straight-forward. Otherwise we have to play around a bit
7883 more. */
7884 int len = TYPE_LENGTH (type);
7885 int regno = ARM_A1_REGNUM;
7886 ULONGEST tmp;
7887
7888 while (len > 0)
7889 {
7890 /* By using store_unsigned_integer we avoid having to do
7891 anything special for small big-endian values. */
7892 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7893 store_unsigned_integer (valbuf,
7894 (len > ARM_INT_REGISTER_SIZE
7895 ? ARM_INT_REGISTER_SIZE : len),
7896 byte_order, tmp);
7897 len -= ARM_INT_REGISTER_SIZE;
7898 valbuf += ARM_INT_REGISTER_SIZE;
7899 }
7900 }
7901 else
7902 {
7903 /* For a structure or union the behaviour is as if the value had
7904 been stored to word-aligned memory and then loaded into
7905 registers with 32-bit load instruction(s). */
7906 int len = TYPE_LENGTH (type);
7907 int regno = ARM_A1_REGNUM;
7908 bfd_byte tmpbuf[ARM_INT_REGISTER_SIZE];
7909
7910 while (len > 0)
7911 {
7912 regs->cooked_read (regno++, tmpbuf);
7913 memcpy (valbuf, tmpbuf,
7914 len > ARM_INT_REGISTER_SIZE ? ARM_INT_REGISTER_SIZE : len);
7915 len -= ARM_INT_REGISTER_SIZE;
7916 valbuf += ARM_INT_REGISTER_SIZE;
7917 }
7918 }
7919 }
7920
7921
7922 /* Will a function return an aggregate type in memory or in a
7923 register? Return 0 if an aggregate type can be returned in a
7924 register, 1 if it must be returned in memory. */
7925
7926 static int
7927 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7928 {
7929 enum type_code code;
7930
7931 type = check_typedef (type);
7932
7933 /* Simple, non-aggregate types (ie not including vectors and
7934 complex) are always returned in a register (or registers). */
7935 code = type->code ();
7936 if (TYPE_CODE_STRUCT != code && TYPE_CODE_UNION != code
7937 && TYPE_CODE_ARRAY != code && TYPE_CODE_COMPLEX != code)
7938 return 0;
7939
7940 if (TYPE_CODE_ARRAY == code && type->is_vector ())
7941 {
7942 /* Vector values should be returned using ARM registers if they
7943 are not over 16 bytes. */
7944 return (TYPE_LENGTH (type) > 16);
7945 }
7946
7947 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7948 {
7949 /* The AAPCS says all aggregates not larger than a word are returned
7950 in a register. */
7951 if (TYPE_LENGTH (type) <= ARM_INT_REGISTER_SIZE)
7952 return 0;
7953
7954 return 1;
7955 }
7956 else
7957 {
7958 int nRc;
7959
7960 /* All aggregate types that won't fit in a register must be returned
7961 in memory. */
7962 if (TYPE_LENGTH (type) > ARM_INT_REGISTER_SIZE)
7963 return 1;
7964
7965 /* In the ARM ABI, "integer" like aggregate types are returned in
7966 registers. For an aggregate type to be integer like, its size
7967 must be less than or equal to ARM_INT_REGISTER_SIZE and the
7968 offset of each addressable subfield must be zero. Note that bit
7969 fields are not addressable, and all addressable subfields of
7970 unions always start at offset zero.
7971
7972 This function is based on the behaviour of GCC 2.95.1.
7973 See: gcc/arm.c: arm_return_in_memory() for details.
7974
7975 Note: All versions of GCC before GCC 2.95.2 do not set up the
7976 parameters correctly for a function returning the following
7977 structure: struct { float f;}; This should be returned in memory,
7978 not a register. Richard Earnshaw sent me a patch, but I do not
7979 know of any way to detect if a function like the above has been
7980 compiled with the correct calling convention. */
7981
7982 /* Assume all other aggregate types can be returned in a register.
7983 Run a check for structures, unions and arrays. */
7984 nRc = 0;
7985
7986 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7987 {
7988 int i;
7989 /* Need to check if this struct/union is "integer" like. For
7990 this to be true, its size must be less than or equal to
7991 ARM_INT_REGISTER_SIZE and the offset of each addressable
7992 subfield must be zero. Note that bit fields are not
7993 addressable, and unions always start at offset zero. If any
7994 of the subfields is a floating point type, the struct/union
7995 cannot be an integer type. */
7996
7997 /* For each field in the object, check:
7998 1) Is it FP? --> yes, nRc = 1;
7999 2) Is it addressable (bitpos != 0) and
8000 not packed (bitsize == 0)?
8001 --> yes, nRc = 1
8002 */
8003
8004 for (i = 0; i < type->num_fields (); i++)
8005 {
8006 enum type_code field_type_code;
8007
8008 field_type_code
8009 = check_typedef (type->field (i).type ())->code ();
8010
8011 /* Is it a floating point type field? */
8012 if (field_type_code == TYPE_CODE_FLT)
8013 {
8014 nRc = 1;
8015 break;
8016 }
8017
8018 /* If bitpos != 0, then we have to care about it. */
8019 if (TYPE_FIELD_BITPOS (type, i) != 0)
8020 {
8021 /* Bitfields are not addressable. If the field bitsize is
8022 zero, then the field is not packed. Hence it cannot be
8023 a bitfield or any other packed type. */
8024 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8025 {
8026 nRc = 1;
8027 break;
8028 }
8029 }
8030 }
8031 }
8032
8033 return nRc;
8034 }
8035 }
8036
8037 /* Write into appropriate registers a function return value of type
8038 TYPE, given in virtual format. */
8039
8040 static void
8041 arm_store_return_value (struct type *type, struct regcache *regs,
8042 const gdb_byte *valbuf)
8043 {
8044 struct gdbarch *gdbarch = regs->arch ();
8045 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8046
8047 if (type->code () == TYPE_CODE_FLT)
8048 {
8049 gdb_byte buf[ARM_FP_REGISTER_SIZE];
8050
8051 switch (gdbarch_tdep (gdbarch)->fp_model)
8052 {
8053 case ARM_FLOAT_FPA:
8054
8055 target_float_convert (valbuf, type, buf, arm_ext_type (gdbarch));
8056 regs->cooked_write (ARM_F0_REGNUM, buf);
8057 break;
8058
8059 case ARM_FLOAT_SOFT_FPA:
8060 case ARM_FLOAT_SOFT_VFP:
8061 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8062 not using the VFP ABI code. */
8063 case ARM_FLOAT_VFP:
8064 regs->cooked_write (ARM_A1_REGNUM, valbuf);
8065 if (TYPE_LENGTH (type) > 4)
8066 regs->cooked_write (ARM_A1_REGNUM + 1,
8067 valbuf + ARM_INT_REGISTER_SIZE);
8068 break;
8069
8070 default:
8071 internal_error (__FILE__, __LINE__,
8072 _("arm_store_return_value: Floating "
8073 "point model not supported"));
8074 break;
8075 }
8076 }
8077 else if (type->code () == TYPE_CODE_INT
8078 || type->code () == TYPE_CODE_CHAR
8079 || type->code () == TYPE_CODE_BOOL
8080 || type->code () == TYPE_CODE_PTR
8081 || TYPE_IS_REFERENCE (type)
8082 || type->code () == TYPE_CODE_ENUM)
8083 {
8084 if (TYPE_LENGTH (type) <= 4)
8085 {
8086 /* Values of one word or less are zero/sign-extended and
8087 returned in r0. */
8088 bfd_byte tmpbuf[ARM_INT_REGISTER_SIZE];
8089 LONGEST val = unpack_long (type, valbuf);
8090
8091 store_signed_integer (tmpbuf, ARM_INT_REGISTER_SIZE, byte_order, val);
8092 regs->cooked_write (ARM_A1_REGNUM, tmpbuf);
8093 }
8094 else
8095 {
8096 /* Integral values greater than one word are stored in consecutive
8097 registers starting with r0. This will always be a multiple of
8098 the regiser size. */
8099 int len = TYPE_LENGTH (type);
8100 int regno = ARM_A1_REGNUM;
8101
8102 while (len > 0)
8103 {
8104 regs->cooked_write (regno++, valbuf);
8105 len -= ARM_INT_REGISTER_SIZE;
8106 valbuf += ARM_INT_REGISTER_SIZE;
8107 }
8108 }
8109 }
8110 else
8111 {
8112 /* For a structure or union the behaviour is as if the value had
8113 been stored to word-aligned memory and then loaded into
8114 registers with 32-bit load instruction(s). */
8115 int len = TYPE_LENGTH (type);
8116 int regno = ARM_A1_REGNUM;
8117 bfd_byte tmpbuf[ARM_INT_REGISTER_SIZE];
8118
8119 while (len > 0)
8120 {
8121 memcpy (tmpbuf, valbuf,
8122 len > ARM_INT_REGISTER_SIZE ? ARM_INT_REGISTER_SIZE : len);
8123 regs->cooked_write (regno++, tmpbuf);
8124 len -= ARM_INT_REGISTER_SIZE;
8125 valbuf += ARM_INT_REGISTER_SIZE;
8126 }
8127 }
8128 }
8129
8130
8131 /* Handle function return values. */
8132
8133 static enum return_value_convention
8134 arm_return_value (struct gdbarch *gdbarch, struct value *function,
8135 struct type *valtype, struct regcache *regcache,
8136 gdb_byte *readbuf, const gdb_byte *writebuf)
8137 {
8138 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8139 struct type *func_type = function ? value_type (function) : NULL;
8140 enum arm_vfp_cprc_base_type vfp_base_type;
8141 int vfp_base_count;
8142
8143 if (arm_vfp_abi_for_function (gdbarch, func_type)
8144 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
8145 {
8146 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
8147 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
8148 int i;
8149 for (i = 0; i < vfp_base_count; i++)
8150 {
8151 if (reg_char == 'q')
8152 {
8153 if (writebuf)
8154 arm_neon_quad_write (gdbarch, regcache, i,
8155 writebuf + i * unit_length);
8156
8157 if (readbuf)
8158 arm_neon_quad_read (gdbarch, regcache, i,
8159 readbuf + i * unit_length);
8160 }
8161 else
8162 {
8163 char name_buf[4];
8164 int regnum;
8165
8166 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
8167 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8168 strlen (name_buf));
8169 if (writebuf)
8170 regcache->cooked_write (regnum, writebuf + i * unit_length);
8171 if (readbuf)
8172 regcache->cooked_read (regnum, readbuf + i * unit_length);
8173 }
8174 }
8175 return RETURN_VALUE_REGISTER_CONVENTION;
8176 }
8177
8178 if (valtype->code () == TYPE_CODE_STRUCT
8179 || valtype->code () == TYPE_CODE_UNION
8180 || valtype->code () == TYPE_CODE_ARRAY)
8181 {
8182 if (tdep->struct_return == pcc_struct_return
8183 || arm_return_in_memory (gdbarch, valtype))
8184 return RETURN_VALUE_STRUCT_CONVENTION;
8185 }
8186 else if (valtype->code () == TYPE_CODE_COMPLEX)
8187 {
8188 if (arm_return_in_memory (gdbarch, valtype))
8189 return RETURN_VALUE_STRUCT_CONVENTION;
8190 }
8191
8192 if (writebuf)
8193 arm_store_return_value (valtype, regcache, writebuf);
8194
8195 if (readbuf)
8196 arm_extract_return_value (valtype, regcache, readbuf);
8197
8198 return RETURN_VALUE_REGISTER_CONVENTION;
8199 }
8200
8201
8202 static int
8203 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
8204 {
8205 struct gdbarch *gdbarch = get_frame_arch (frame);
8206 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8207 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8208 CORE_ADDR jb_addr;
8209 gdb_byte buf[ARM_INT_REGISTER_SIZE];
8210
8211 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
8212
8213 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
8214 ARM_INT_REGISTER_SIZE))
8215 return 0;
8216
8217 *pc = extract_unsigned_integer (buf, ARM_INT_REGISTER_SIZE, byte_order);
8218 return 1;
8219 }
8220 /* A call to cmse secure entry function "foo" at "a" is modified by
8221 GNU ld as "b".
8222 a) bl xxxx <foo>
8223
8224 <foo>
8225 xxxx:
8226
8227 b) bl yyyy <__acle_se_foo>
8228
8229 section .gnu.sgstubs:
8230 <foo>
8231 yyyy: sg // secure gateway
8232 b.w xxxx <__acle_se_foo> // original_branch_dest
8233
8234 <__acle_se_foo>
8235 xxxx:
8236
8237 When the control at "b", the pc contains "yyyy" (sg address) which is a
8238 trampoline and does not exist in source code. This function returns the
8239 target pc "xxxx". For more details please refer to section 5.4
8240 (Entry functions) and section 3.4.4 (C level development flow of secure code)
8241 of "armv8-m-security-extensions-requirements-on-development-tools-engineering-specification"
8242 document on www.developer.arm.com. */
8243
8244 static CORE_ADDR
8245 arm_skip_cmse_entry (CORE_ADDR pc, const char *name, struct objfile *objfile)
8246 {
8247 int target_len = strlen (name) + strlen ("__acle_se_") + 1;
8248 char *target_name = (char *) alloca (target_len);
8249 xsnprintf (target_name, target_len, "%s%s", "__acle_se_", name);
8250
8251 struct bound_minimal_symbol minsym
8252 = lookup_minimal_symbol (target_name, NULL, objfile);
8253
8254 if (minsym.minsym != nullptr)
8255 return BMSYMBOL_VALUE_ADDRESS (minsym);
8256
8257 return 0;
8258 }
8259
8260 /* Return true when SEC points to ".gnu.sgstubs" section. */
8261
8262 static bool
8263 arm_is_sgstubs_section (struct obj_section *sec)
8264 {
8265 return (sec != nullptr
8266 && sec->the_bfd_section != nullptr
8267 && sec->the_bfd_section->name != nullptr
8268 && streq (sec->the_bfd_section->name, ".gnu.sgstubs"));
8269 }
8270
8271 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
8272 return the target PC. Otherwise return 0. */
8273
8274 CORE_ADDR
8275 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
8276 {
8277 const char *name;
8278 int namelen;
8279 CORE_ADDR start_addr;
8280
8281 /* Find the starting address and name of the function containing the PC. */
8282 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
8283 {
8284 /* Trampoline 'bx reg' doesn't belong to any functions. Do the
8285 check here. */
8286 start_addr = arm_skip_bx_reg (frame, pc);
8287 if (start_addr != 0)
8288 return start_addr;
8289
8290 return 0;
8291 }
8292
8293 /* If PC is in a Thumb call or return stub, return the address of the
8294 target PC, which is in a register. The thunk functions are called
8295 _call_via_xx, where x is the register name. The possible names
8296 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
8297 functions, named __ARM_call_via_r[0-7]. */
8298 if (startswith (name, "_call_via_")
8299 || startswith (name, "__ARM_call_via_"))
8300 {
8301 /* Use the name suffix to determine which register contains the
8302 target PC. */
8303 static const char *table[15] =
8304 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
8305 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
8306 };
8307 int regno;
8308 int offset = strlen (name) - 2;
8309
8310 for (regno = 0; regno <= 14; regno++)
8311 if (strcmp (&name[offset], table[regno]) == 0)
8312 return get_frame_register_unsigned (frame, regno);
8313 }
8314
8315 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
8316 non-interworking calls to foo. We could decode the stubs
8317 to find the target but it's easier to use the symbol table. */
8318 namelen = strlen (name);
8319 if (name[0] == '_' && name[1] == '_'
8320 && ((namelen > 2 + strlen ("_from_thumb")
8321 && startswith (name + namelen - strlen ("_from_thumb"), "_from_thumb"))
8322 || (namelen > 2 + strlen ("_from_arm")
8323 && startswith (name + namelen - strlen ("_from_arm"), "_from_arm"))))
8324 {
8325 char *target_name;
8326 int target_len = namelen - 2;
8327 struct bound_minimal_symbol minsym;
8328 struct objfile *objfile;
8329 struct obj_section *sec;
8330
8331 if (name[namelen - 1] == 'b')
8332 target_len -= strlen ("_from_thumb");
8333 else
8334 target_len -= strlen ("_from_arm");
8335
8336 target_name = (char *) alloca (target_len + 1);
8337 memcpy (target_name, name + 2, target_len);
8338 target_name[target_len] = '\0';
8339
8340 sec = find_pc_section (pc);
8341 objfile = (sec == NULL) ? NULL : sec->objfile;
8342 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
8343 if (minsym.minsym != NULL)
8344 return BMSYMBOL_VALUE_ADDRESS (minsym);
8345 else
8346 return 0;
8347 }
8348
8349 struct obj_section *section = find_pc_section (pc);
8350
8351 /* Check whether SECTION points to the ".gnu.sgstubs" section. */
8352 if (arm_is_sgstubs_section (section))
8353 return arm_skip_cmse_entry (pc, name, section->objfile);
8354
8355 return 0; /* not a stub */
8356 }
8357
8358 static void
8359 arm_update_current_architecture (void)
8360 {
8361 struct gdbarch_info info;
8362
8363 /* If the current architecture is not ARM, we have nothing to do. */
8364 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
8365 return;
8366
8367 /* Update the architecture. */
8368 gdbarch_info_init (&info);
8369
8370 if (!gdbarch_update_p (info))
8371 internal_error (__FILE__, __LINE__, _("could not update architecture"));
8372 }
8373
8374 static void
8375 set_fp_model_sfunc (const char *args, int from_tty,
8376 struct cmd_list_element *c)
8377 {
8378 int fp_model;
8379
8380 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
8381 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
8382 {
8383 arm_fp_model = (enum arm_float_model) fp_model;
8384 break;
8385 }
8386
8387 if (fp_model == ARM_FLOAT_LAST)
8388 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
8389 current_fp_model);
8390
8391 arm_update_current_architecture ();
8392 }
8393
8394 static void
8395 show_fp_model (struct ui_file *file, int from_tty,
8396 struct cmd_list_element *c, const char *value)
8397 {
8398 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
8399
8400 if (arm_fp_model == ARM_FLOAT_AUTO
8401 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
8402 fprintf_filtered (file, _("\
8403 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
8404 fp_model_strings[tdep->fp_model]);
8405 else
8406 fprintf_filtered (file, _("\
8407 The current ARM floating point model is \"%s\".\n"),
8408 fp_model_strings[arm_fp_model]);
8409 }
8410
8411 static void
8412 arm_set_abi (const char *args, int from_tty,
8413 struct cmd_list_element *c)
8414 {
8415 int arm_abi;
8416
8417 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
8418 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
8419 {
8420 arm_abi_global = (enum arm_abi_kind) arm_abi;
8421 break;
8422 }
8423
8424 if (arm_abi == ARM_ABI_LAST)
8425 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
8426 arm_abi_string);
8427
8428 arm_update_current_architecture ();
8429 }
8430
8431 static void
8432 arm_show_abi (struct ui_file *file, int from_tty,
8433 struct cmd_list_element *c, const char *value)
8434 {
8435 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
8436
8437 if (arm_abi_global == ARM_ABI_AUTO
8438 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
8439 fprintf_filtered (file, _("\
8440 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
8441 arm_abi_strings[tdep->arm_abi]);
8442 else
8443 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
8444 arm_abi_string);
8445 }
8446
8447 static void
8448 arm_show_fallback_mode (struct ui_file *file, int from_tty,
8449 struct cmd_list_element *c, const char *value)
8450 {
8451 fprintf_filtered (file,
8452 _("The current execution mode assumed "
8453 "(when symbols are unavailable) is \"%s\".\n"),
8454 arm_fallback_mode_string);
8455 }
8456
8457 static void
8458 arm_show_force_mode (struct ui_file *file, int from_tty,
8459 struct cmd_list_element *c, const char *value)
8460 {
8461 fprintf_filtered (file,
8462 _("The current execution mode assumed "
8463 "(even when symbols are available) is \"%s\".\n"),
8464 arm_force_mode_string);
8465 }
8466
8467 /* If the user changes the register disassembly style used for info
8468 register and other commands, we have to also switch the style used
8469 in opcodes for disassembly output. This function is run in the "set
8470 arm disassembly" command, and does that. */
8471
8472 static void
8473 set_disassembly_style_sfunc (const char *args, int from_tty,
8474 struct cmd_list_element *c)
8475 {
8476 /* Convert the short style name into the long style name (eg, reg-names-*)
8477 before calling the generic set_disassembler_options() function. */
8478 std::string long_name = std::string ("reg-names-") + disassembly_style;
8479 set_disassembler_options (&long_name[0]);
8480 }
8481
8482 static void
8483 show_disassembly_style_sfunc (struct ui_file *file, int from_tty,
8484 struct cmd_list_element *c, const char *value)
8485 {
8486 struct gdbarch *gdbarch = get_current_arch ();
8487 char *options = get_disassembler_options (gdbarch);
8488 const char *style = "";
8489 int len = 0;
8490 const char *opt;
8491
8492 FOR_EACH_DISASSEMBLER_OPTION (opt, options)
8493 if (CONST_STRNEQ (opt, "reg-names-"))
8494 {
8495 style = &opt[strlen ("reg-names-")];
8496 len = strcspn (style, ",");
8497 }
8498
8499 fprintf_unfiltered (file, "The disassembly style is \"%.*s\".\n", len, style);
8500 }
8501 \f
8502 /* Return the ARM register name corresponding to register I. */
8503 static const char *
8504 arm_register_name (struct gdbarch *gdbarch, int i)
8505 {
8506 const int num_regs = gdbarch_num_regs (gdbarch);
8507
8508 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
8509 && i >= num_regs && i < num_regs + 32)
8510 {
8511 static const char *const vfp_pseudo_names[] = {
8512 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
8513 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
8514 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
8515 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
8516 };
8517
8518 return vfp_pseudo_names[i - num_regs];
8519 }
8520
8521 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
8522 && i >= num_regs + 32 && i < num_regs + 32 + 16)
8523 {
8524 static const char *const neon_pseudo_names[] = {
8525 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
8526 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
8527 };
8528
8529 return neon_pseudo_names[i - num_regs - 32];
8530 }
8531
8532 if (i >= ARRAY_SIZE (arm_register_names))
8533 /* These registers are only supported on targets which supply
8534 an XML description. */
8535 return "";
8536
8537 return arm_register_names[i];
8538 }
8539
8540 /* Test whether the coff symbol specific value corresponds to a Thumb
8541 function. */
8542
8543 static int
8544 coff_sym_is_thumb (int val)
8545 {
8546 return (val == C_THUMBEXT
8547 || val == C_THUMBSTAT
8548 || val == C_THUMBEXTFUNC
8549 || val == C_THUMBSTATFUNC
8550 || val == C_THUMBLABEL);
8551 }
8552
8553 /* arm_coff_make_msymbol_special()
8554 arm_elf_make_msymbol_special()
8555
8556 These functions test whether the COFF or ELF symbol corresponds to
8557 an address in thumb code, and set a "special" bit in a minimal
8558 symbol to indicate that it does. */
8559
8560 static void
8561 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
8562 {
8563 elf_symbol_type *elfsym = (elf_symbol_type *) sym;
8564
8565 if (ARM_GET_SYM_BRANCH_TYPE (elfsym->internal_elf_sym.st_target_internal)
8566 == ST_BRANCH_TO_THUMB)
8567 MSYMBOL_SET_SPECIAL (msym);
8568 }
8569
8570 static void
8571 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
8572 {
8573 if (coff_sym_is_thumb (val))
8574 MSYMBOL_SET_SPECIAL (msym);
8575 }
8576
8577 static void
8578 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
8579 asymbol *sym)
8580 {
8581 const char *name = bfd_asymbol_name (sym);
8582 struct arm_per_bfd *data;
8583 struct arm_mapping_symbol new_map_sym;
8584
8585 gdb_assert (name[0] == '$');
8586 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
8587 return;
8588
8589 data = arm_bfd_data_key.get (objfile->obfd);
8590 if (data == NULL)
8591 data = arm_bfd_data_key.emplace (objfile->obfd,
8592 objfile->obfd->section_count);
8593 arm_mapping_symbol_vec &map
8594 = data->section_maps[bfd_asymbol_section (sym)->index];
8595
8596 new_map_sym.value = sym->value;
8597 new_map_sym.type = name[1];
8598
8599 /* Insert at the end, the vector will be sorted on first use. */
8600 map.push_back (new_map_sym);
8601 }
8602
8603 static void
8604 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
8605 {
8606 struct gdbarch *gdbarch = regcache->arch ();
8607 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
8608
8609 /* If necessary, set the T bit. */
8610 if (arm_apcs_32)
8611 {
8612 ULONGEST val, t_bit;
8613 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
8614 t_bit = arm_psr_thumb_bit (gdbarch);
8615 if (arm_pc_is_thumb (gdbarch, pc))
8616 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
8617 val | t_bit);
8618 else
8619 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
8620 val & ~t_bit);
8621 }
8622 }
8623
8624 /* Read the contents of a NEON quad register, by reading from two
8625 double registers. This is used to implement the quad pseudo
8626 registers, and for argument passing in case the quad registers are
8627 missing; vectors are passed in quad registers when using the VFP
8628 ABI, even if a NEON unit is not present. REGNUM is the index of
8629 the quad register, in [0, 15]. */
8630
8631 static enum register_status
8632 arm_neon_quad_read (struct gdbarch *gdbarch, readable_regcache *regcache,
8633 int regnum, gdb_byte *buf)
8634 {
8635 char name_buf[4];
8636 gdb_byte reg_buf[8];
8637 int offset, double_regnum;
8638 enum register_status status;
8639
8640 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
8641 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8642 strlen (name_buf));
8643
8644 /* d0 is always the least significant half of q0. */
8645 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8646 offset = 8;
8647 else
8648 offset = 0;
8649
8650 status = regcache->raw_read (double_regnum, reg_buf);
8651 if (status != REG_VALID)
8652 return status;
8653 memcpy (buf + offset, reg_buf, 8);
8654
8655 offset = 8 - offset;
8656 status = regcache->raw_read (double_regnum + 1, reg_buf);
8657 if (status != REG_VALID)
8658 return status;
8659 memcpy (buf + offset, reg_buf, 8);
8660
8661 return REG_VALID;
8662 }
8663
8664 static enum register_status
8665 arm_pseudo_read (struct gdbarch *gdbarch, readable_regcache *regcache,
8666 int regnum, gdb_byte *buf)
8667 {
8668 const int num_regs = gdbarch_num_regs (gdbarch);
8669 char name_buf[4];
8670 gdb_byte reg_buf[8];
8671 int offset, double_regnum;
8672
8673 gdb_assert (regnum >= num_regs);
8674 regnum -= num_regs;
8675
8676 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8677 /* Quad-precision register. */
8678 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
8679 else
8680 {
8681 enum register_status status;
8682
8683 /* Single-precision register. */
8684 gdb_assert (regnum < 32);
8685
8686 /* s0 is always the least significant half of d0. */
8687 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8688 offset = (regnum & 1) ? 0 : 4;
8689 else
8690 offset = (regnum & 1) ? 4 : 0;
8691
8692 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
8693 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8694 strlen (name_buf));
8695
8696 status = regcache->raw_read (double_regnum, reg_buf);
8697 if (status == REG_VALID)
8698 memcpy (buf, reg_buf + offset, 4);
8699 return status;
8700 }
8701 }
8702
8703 /* Store the contents of BUF to a NEON quad register, by writing to
8704 two double registers. This is used to implement the quad pseudo
8705 registers, and for argument passing in case the quad registers are
8706 missing; vectors are passed in quad registers when using the VFP
8707 ABI, even if a NEON unit is not present. REGNUM is the index
8708 of the quad register, in [0, 15]. */
8709
8710 static void
8711 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
8712 int regnum, const gdb_byte *buf)
8713 {
8714 char name_buf[4];
8715 int offset, double_regnum;
8716
8717 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
8718 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8719 strlen (name_buf));
8720
8721 /* d0 is always the least significant half of q0. */
8722 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8723 offset = 8;
8724 else
8725 offset = 0;
8726
8727 regcache->raw_write (double_regnum, buf + offset);
8728 offset = 8 - offset;
8729 regcache->raw_write (double_regnum + 1, buf + offset);
8730 }
8731
8732 static void
8733 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8734 int regnum, const gdb_byte *buf)
8735 {
8736 const int num_regs = gdbarch_num_regs (gdbarch);
8737 char name_buf[4];
8738 gdb_byte reg_buf[8];
8739 int offset, double_regnum;
8740
8741 gdb_assert (regnum >= num_regs);
8742 regnum -= num_regs;
8743
8744 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8745 /* Quad-precision register. */
8746 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8747 else
8748 {
8749 /* Single-precision register. */
8750 gdb_assert (regnum < 32);
8751
8752 /* s0 is always the least significant half of d0. */
8753 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8754 offset = (regnum & 1) ? 0 : 4;
8755 else
8756 offset = (regnum & 1) ? 4 : 0;
8757
8758 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
8759 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8760 strlen (name_buf));
8761
8762 regcache->raw_read (double_regnum, reg_buf);
8763 memcpy (reg_buf + offset, buf, 4);
8764 regcache->raw_write (double_regnum, reg_buf);
8765 }
8766 }
8767
8768 static struct value *
8769 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8770 {
8771 const int *reg_p = (const int *) baton;
8772 return value_of_register (*reg_p, frame);
8773 }
8774 \f
8775 static enum gdb_osabi
8776 arm_elf_osabi_sniffer (bfd *abfd)
8777 {
8778 unsigned int elfosabi;
8779 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8780
8781 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8782
8783 if (elfosabi == ELFOSABI_ARM)
8784 /* GNU tools use this value. Check note sections in this case,
8785 as well. */
8786 {
8787 for (asection *sect : gdb_bfd_sections (abfd))
8788 generic_elf_osabi_sniff_abi_tag_sections (abfd, sect, &osabi);
8789 }
8790
8791 /* Anything else will be handled by the generic ELF sniffer. */
8792 return osabi;
8793 }
8794
8795 static int
8796 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8797 struct reggroup *group)
8798 {
8799 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8800 this, FPS register belongs to save_regroup, restore_reggroup, and
8801 all_reggroup, of course. */
8802 if (regnum == ARM_FPS_REGNUM)
8803 return (group == float_reggroup
8804 || group == save_reggroup
8805 || group == restore_reggroup
8806 || group == all_reggroup);
8807 else
8808 return default_register_reggroup_p (gdbarch, regnum, group);
8809 }
8810
8811 /* For backward-compatibility we allow two 'g' packet lengths with
8812 the remote protocol depending on whether FPA registers are
8813 supplied. M-profile targets do not have FPA registers, but some
8814 stubs already exist in the wild which use a 'g' packet which
8815 supplies them albeit with dummy values. The packet format which
8816 includes FPA registers should be considered deprecated for
8817 M-profile targets. */
8818
8819 static void
8820 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
8821 {
8822 if (gdbarch_tdep (gdbarch)->is_m)
8823 {
8824 const target_desc *tdesc;
8825
8826 /* If we know from the executable this is an M-profile target,
8827 cater for remote targets whose register set layout is the
8828 same as the FPA layout. */
8829 tdesc = arm_read_mprofile_description (ARM_M_TYPE_WITH_FPA);
8830 register_remote_g_packet_guess (gdbarch,
8831 ARM_CORE_REGS_SIZE + ARM_FP_REGS_SIZE,
8832 tdesc);
8833
8834 /* The regular M-profile layout. */
8835 tdesc = arm_read_mprofile_description (ARM_M_TYPE_M_PROFILE);
8836 register_remote_g_packet_guess (gdbarch, ARM_CORE_REGS_SIZE,
8837 tdesc);
8838
8839 /* M-profile plus M4F VFP. */
8840 tdesc = arm_read_mprofile_description (ARM_M_TYPE_VFP_D16);
8841 register_remote_g_packet_guess (gdbarch,
8842 ARM_CORE_REGS_SIZE + ARM_VFP2_REGS_SIZE,
8843 tdesc);
8844 }
8845
8846 /* Otherwise we don't have a useful guess. */
8847 }
8848
8849 /* Implement the code_of_frame_writable gdbarch method. */
8850
8851 static int
8852 arm_code_of_frame_writable (struct gdbarch *gdbarch, struct frame_info *frame)
8853 {
8854 if (gdbarch_tdep (gdbarch)->is_m
8855 && get_frame_type (frame) == SIGTRAMP_FRAME)
8856 {
8857 /* M-profile exception frames return to some magic PCs, where
8858 isn't writable at all. */
8859 return 0;
8860 }
8861 else
8862 return 1;
8863 }
8864
8865 /* Implement gdbarch_gnu_triplet_regexp. If the arch name is arm then allow it
8866 to be postfixed by a version (eg armv7hl). */
8867
8868 static const char *
8869 arm_gnu_triplet_regexp (struct gdbarch *gdbarch)
8870 {
8871 if (strcmp (gdbarch_bfd_arch_info (gdbarch)->arch_name, "arm") == 0)
8872 return "arm(v[^- ]*)?";
8873 return gdbarch_bfd_arch_info (gdbarch)->arch_name;
8874 }
8875
8876 /* Initialize the current architecture based on INFO. If possible,
8877 re-use an architecture from ARCHES, which is a list of
8878 architectures already created during this debugging session.
8879
8880 Called e.g. at program startup, when reading a core file, and when
8881 reading a binary file. */
8882
8883 static struct gdbarch *
8884 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8885 {
8886 struct gdbarch_tdep *tdep;
8887 struct gdbarch *gdbarch;
8888 struct gdbarch_list *best_arch;
8889 enum arm_abi_kind arm_abi = arm_abi_global;
8890 enum arm_float_model fp_model = arm_fp_model;
8891 tdesc_arch_data_up tdesc_data;
8892 int i;
8893 bool is_m = false;
8894 int vfp_register_count = 0;
8895 bool have_vfp_pseudos = false, have_neon_pseudos = false;
8896 bool have_wmmx_registers = false;
8897 bool have_neon = false;
8898 bool have_fpa_registers = true;
8899 const struct target_desc *tdesc = info.target_desc;
8900
8901 /* If we have an object to base this architecture on, try to determine
8902 its ABI. */
8903
8904 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8905 {
8906 int ei_osabi, e_flags;
8907
8908 switch (bfd_get_flavour (info.abfd))
8909 {
8910 case bfd_target_coff_flavour:
8911 /* Assume it's an old APCS-style ABI. */
8912 /* XXX WinCE? */
8913 arm_abi = ARM_ABI_APCS;
8914 break;
8915
8916 case bfd_target_elf_flavour:
8917 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8918 e_flags = elf_elfheader (info.abfd)->e_flags;
8919
8920 if (ei_osabi == ELFOSABI_ARM)
8921 {
8922 /* GNU tools used to use this value, but do not for EABI
8923 objects. There's nowhere to tag an EABI version
8924 anyway, so assume APCS. */
8925 arm_abi = ARM_ABI_APCS;
8926 }
8927 else if (ei_osabi == ELFOSABI_NONE || ei_osabi == ELFOSABI_GNU)
8928 {
8929 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8930
8931 switch (eabi_ver)
8932 {
8933 case EF_ARM_EABI_UNKNOWN:
8934 /* Assume GNU tools. */
8935 arm_abi = ARM_ABI_APCS;
8936 break;
8937
8938 case EF_ARM_EABI_VER4:
8939 case EF_ARM_EABI_VER5:
8940 arm_abi = ARM_ABI_AAPCS;
8941 /* EABI binaries default to VFP float ordering.
8942 They may also contain build attributes that can
8943 be used to identify if the VFP argument-passing
8944 ABI is in use. */
8945 if (fp_model == ARM_FLOAT_AUTO)
8946 {
8947 #ifdef HAVE_ELF
8948 switch (bfd_elf_get_obj_attr_int (info.abfd,
8949 OBJ_ATTR_PROC,
8950 Tag_ABI_VFP_args))
8951 {
8952 case AEABI_VFP_args_base:
8953 /* "The user intended FP parameter/result
8954 passing to conform to AAPCS, base
8955 variant". */
8956 fp_model = ARM_FLOAT_SOFT_VFP;
8957 break;
8958 case AEABI_VFP_args_vfp:
8959 /* "The user intended FP parameter/result
8960 passing to conform to AAPCS, VFP
8961 variant". */
8962 fp_model = ARM_FLOAT_VFP;
8963 break;
8964 case AEABI_VFP_args_toolchain:
8965 /* "The user intended FP parameter/result
8966 passing to conform to tool chain-specific
8967 conventions" - we don't know any such
8968 conventions, so leave it as "auto". */
8969 break;
8970 case AEABI_VFP_args_compatible:
8971 /* "Code is compatible with both the base
8972 and VFP variants; the user did not permit
8973 non-variadic functions to pass FP
8974 parameters/results" - leave it as
8975 "auto". */
8976 break;
8977 default:
8978 /* Attribute value not mentioned in the
8979 November 2012 ABI, so leave it as
8980 "auto". */
8981 break;
8982 }
8983 #else
8984 fp_model = ARM_FLOAT_SOFT_VFP;
8985 #endif
8986 }
8987 break;
8988
8989 default:
8990 /* Leave it as "auto". */
8991 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8992 break;
8993 }
8994
8995 #ifdef HAVE_ELF
8996 /* Detect M-profile programs. This only works if the
8997 executable file includes build attributes; GCC does
8998 copy them to the executable, but e.g. RealView does
8999 not. */
9000 int attr_arch
9001 = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9002 Tag_CPU_arch);
9003 int attr_profile
9004 = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9005 Tag_CPU_arch_profile);
9006
9007 /* GCC specifies the profile for v6-M; RealView only
9008 specifies the profile for architectures starting with
9009 V7 (as opposed to architectures with a tag
9010 numerically greater than TAG_CPU_ARCH_V7). */
9011 if (!tdesc_has_registers (tdesc)
9012 && (attr_arch == TAG_CPU_ARCH_V6_M
9013 || attr_arch == TAG_CPU_ARCH_V6S_M
9014 || attr_profile == 'M'))
9015 is_m = true;
9016 #endif
9017 }
9018
9019 if (fp_model == ARM_FLOAT_AUTO)
9020 {
9021 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9022 {
9023 case 0:
9024 /* Leave it as "auto". Strictly speaking this case
9025 means FPA, but almost nobody uses that now, and
9026 many toolchains fail to set the appropriate bits
9027 for the floating-point model they use. */
9028 break;
9029 case EF_ARM_SOFT_FLOAT:
9030 fp_model = ARM_FLOAT_SOFT_FPA;
9031 break;
9032 case EF_ARM_VFP_FLOAT:
9033 fp_model = ARM_FLOAT_VFP;
9034 break;
9035 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9036 fp_model = ARM_FLOAT_SOFT_VFP;
9037 break;
9038 }
9039 }
9040
9041 if (e_flags & EF_ARM_BE8)
9042 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9043
9044 break;
9045
9046 default:
9047 /* Leave it as "auto". */
9048 break;
9049 }
9050 }
9051
9052 /* Check any target description for validity. */
9053 if (tdesc_has_registers (tdesc))
9054 {
9055 /* For most registers we require GDB's default names; but also allow
9056 the numeric names for sp / lr / pc, as a convenience. */
9057 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9058 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9059 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9060
9061 const struct tdesc_feature *feature;
9062 int valid_p;
9063
9064 feature = tdesc_find_feature (tdesc,
9065 "org.gnu.gdb.arm.core");
9066 if (feature == NULL)
9067 {
9068 feature = tdesc_find_feature (tdesc,
9069 "org.gnu.gdb.arm.m-profile");
9070 if (feature == NULL)
9071 return NULL;
9072 else
9073 is_m = true;
9074 }
9075
9076 tdesc_data = tdesc_data_alloc ();
9077
9078 valid_p = 1;
9079 for (i = 0; i < ARM_SP_REGNUM; i++)
9080 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9081 arm_register_names[i]);
9082 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data.get (),
9083 ARM_SP_REGNUM,
9084 arm_sp_names);
9085 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data.get (),
9086 ARM_LR_REGNUM,
9087 arm_lr_names);
9088 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data.get (),
9089 ARM_PC_REGNUM,
9090 arm_pc_names);
9091 if (is_m)
9092 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9093 ARM_PS_REGNUM, "xpsr");
9094 else
9095 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9096 ARM_PS_REGNUM, "cpsr");
9097
9098 if (!valid_p)
9099 return NULL;
9100
9101 feature = tdesc_find_feature (tdesc,
9102 "org.gnu.gdb.arm.fpa");
9103 if (feature != NULL)
9104 {
9105 valid_p = 1;
9106 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9107 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9108 arm_register_names[i]);
9109 if (!valid_p)
9110 return NULL;
9111 }
9112 else
9113 have_fpa_registers = false;
9114
9115 feature = tdesc_find_feature (tdesc,
9116 "org.gnu.gdb.xscale.iwmmxt");
9117 if (feature != NULL)
9118 {
9119 static const char *const iwmmxt_names[] = {
9120 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9121 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9122 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9123 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9124 };
9125
9126 valid_p = 1;
9127 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9128 valid_p
9129 &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9130 iwmmxt_names[i - ARM_WR0_REGNUM]);
9131
9132 /* Check for the control registers, but do not fail if they
9133 are missing. */
9134 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9135 tdesc_numbered_register (feature, tdesc_data.get (), i,
9136 iwmmxt_names[i - ARM_WR0_REGNUM]);
9137
9138 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9139 valid_p
9140 &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9141 iwmmxt_names[i - ARM_WR0_REGNUM]);
9142
9143 if (!valid_p)
9144 return NULL;
9145
9146 have_wmmx_registers = true;
9147 }
9148
9149 /* If we have a VFP unit, check whether the single precision registers
9150 are present. If not, then we will synthesize them as pseudo
9151 registers. */
9152 feature = tdesc_find_feature (tdesc,
9153 "org.gnu.gdb.arm.vfp");
9154 if (feature != NULL)
9155 {
9156 static const char *const vfp_double_names[] = {
9157 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9158 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9159 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9160 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9161 };
9162
9163 /* Require the double precision registers. There must be either
9164 16 or 32. */
9165 valid_p = 1;
9166 for (i = 0; i < 32; i++)
9167 {
9168 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9169 ARM_D0_REGNUM + i,
9170 vfp_double_names[i]);
9171 if (!valid_p)
9172 break;
9173 }
9174 if (!valid_p && i == 16)
9175 valid_p = 1;
9176
9177 /* Also require FPSCR. */
9178 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9179 ARM_FPSCR_REGNUM, "fpscr");
9180 if (!valid_p)
9181 return NULL;
9182
9183 if (tdesc_unnumbered_register (feature, "s0") == 0)
9184 have_vfp_pseudos = true;
9185
9186 vfp_register_count = i;
9187
9188 /* If we have VFP, also check for NEON. The architecture allows
9189 NEON without VFP (integer vector operations only), but GDB
9190 does not support that. */
9191 feature = tdesc_find_feature (tdesc,
9192 "org.gnu.gdb.arm.neon");
9193 if (feature != NULL)
9194 {
9195 /* NEON requires 32 double-precision registers. */
9196 if (i != 32)
9197 return NULL;
9198
9199 /* If there are quad registers defined by the stub, use
9200 their type; otherwise (normally) provide them with
9201 the default type. */
9202 if (tdesc_unnumbered_register (feature, "q0") == 0)
9203 have_neon_pseudos = true;
9204
9205 have_neon = true;
9206 }
9207 }
9208 }
9209
9210 /* If there is already a candidate, use it. */
9211 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
9212 best_arch != NULL;
9213 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
9214 {
9215 if (arm_abi != ARM_ABI_AUTO
9216 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
9217 continue;
9218
9219 if (fp_model != ARM_FLOAT_AUTO
9220 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
9221 continue;
9222
9223 /* There are various other properties in tdep that we do not
9224 need to check here: those derived from a target description,
9225 since gdbarches with a different target description are
9226 automatically disqualified. */
9227
9228 /* Do check is_m, though, since it might come from the binary. */
9229 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
9230 continue;
9231
9232 /* Found a match. */
9233 break;
9234 }
9235
9236 if (best_arch != NULL)
9237 return best_arch->gdbarch;
9238
9239 tdep = XCNEW (struct gdbarch_tdep);
9240 gdbarch = gdbarch_alloc (&info, tdep);
9241
9242 /* Record additional information about the architecture we are defining.
9243 These are gdbarch discriminators, like the OSABI. */
9244 tdep->arm_abi = arm_abi;
9245 tdep->fp_model = fp_model;
9246 tdep->is_m = is_m;
9247 tdep->have_fpa_registers = have_fpa_registers;
9248 tdep->have_wmmx_registers = have_wmmx_registers;
9249 gdb_assert (vfp_register_count == 0
9250 || vfp_register_count == 16
9251 || vfp_register_count == 32);
9252 tdep->vfp_register_count = vfp_register_count;
9253 tdep->have_vfp_pseudos = have_vfp_pseudos;
9254 tdep->have_neon_pseudos = have_neon_pseudos;
9255 tdep->have_neon = have_neon;
9256
9257 arm_register_g_packet_guesses (gdbarch);
9258
9259 /* Breakpoints. */
9260 switch (info.byte_order_for_code)
9261 {
9262 case BFD_ENDIAN_BIG:
9263 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
9264 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
9265 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
9266 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
9267
9268 break;
9269
9270 case BFD_ENDIAN_LITTLE:
9271 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
9272 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
9273 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
9274 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
9275
9276 break;
9277
9278 default:
9279 internal_error (__FILE__, __LINE__,
9280 _("arm_gdbarch_init: bad byte order for float format"));
9281 }
9282
9283 /* On ARM targets char defaults to unsigned. */
9284 set_gdbarch_char_signed (gdbarch, 0);
9285
9286 /* wchar_t is unsigned under the AAPCS. */
9287 if (tdep->arm_abi == ARM_ABI_AAPCS)
9288 set_gdbarch_wchar_signed (gdbarch, 0);
9289 else
9290 set_gdbarch_wchar_signed (gdbarch, 1);
9291
9292 /* Compute type alignment. */
9293 set_gdbarch_type_align (gdbarch, arm_type_align);
9294
9295 /* Note: for displaced stepping, this includes the breakpoint, and one word
9296 of additional scratch space. This setting isn't used for anything beside
9297 displaced stepping at present. */
9298 set_gdbarch_max_insn_length (gdbarch, 4 * ARM_DISPLACED_MODIFIED_INSNS);
9299
9300 /* This should be low enough for everything. */
9301 tdep->lowest_pc = 0x20;
9302 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
9303
9304 /* The default, for both APCS and AAPCS, is to return small
9305 structures in registers. */
9306 tdep->struct_return = reg_struct_return;
9307
9308 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
9309 set_gdbarch_frame_align (gdbarch, arm_frame_align);
9310
9311 if (is_m)
9312 set_gdbarch_code_of_frame_writable (gdbarch, arm_code_of_frame_writable);
9313
9314 set_gdbarch_write_pc (gdbarch, arm_write_pc);
9315
9316 frame_base_set_default (gdbarch, &arm_normal_base);
9317
9318 /* Address manipulation. */
9319 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
9320
9321 /* Advance PC across function entry code. */
9322 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
9323
9324 /* Detect whether PC is at a point where the stack has been destroyed. */
9325 set_gdbarch_stack_frame_destroyed_p (gdbarch, arm_stack_frame_destroyed_p);
9326
9327 /* Skip trampolines. */
9328 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
9329
9330 /* The stack grows downward. */
9331 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
9332
9333 /* Breakpoint manipulation. */
9334 set_gdbarch_breakpoint_kind_from_pc (gdbarch, arm_breakpoint_kind_from_pc);
9335 set_gdbarch_sw_breakpoint_from_kind (gdbarch, arm_sw_breakpoint_from_kind);
9336 set_gdbarch_breakpoint_kind_from_current_state (gdbarch,
9337 arm_breakpoint_kind_from_current_state);
9338
9339 /* Information about registers, etc. */
9340 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
9341 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
9342 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
9343 set_gdbarch_register_type (gdbarch, arm_register_type);
9344 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
9345
9346 /* This "info float" is FPA-specific. Use the generic version if we
9347 do not have FPA. */
9348 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
9349 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
9350
9351 /* Internal <-> external register number maps. */
9352 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
9353 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
9354
9355 set_gdbarch_register_name (gdbarch, arm_register_name);
9356
9357 /* Returning results. */
9358 set_gdbarch_return_value (gdbarch, arm_return_value);
9359
9360 /* Disassembly. */
9361 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
9362
9363 /* Minsymbol frobbing. */
9364 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
9365 set_gdbarch_coff_make_msymbol_special (gdbarch,
9366 arm_coff_make_msymbol_special);
9367 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
9368
9369 /* Thumb-2 IT block support. */
9370 set_gdbarch_adjust_breakpoint_address (gdbarch,
9371 arm_adjust_breakpoint_address);
9372
9373 /* Virtual tables. */
9374 set_gdbarch_vbit_in_delta (gdbarch, 1);
9375
9376 /* Hook in the ABI-specific overrides, if they have been registered. */
9377 gdbarch_init_osabi (info, gdbarch);
9378
9379 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
9380
9381 /* Add some default predicates. */
9382 if (is_m)
9383 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
9384 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
9385 dwarf2_append_unwinders (gdbarch);
9386 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
9387 frame_unwind_append_unwinder (gdbarch, &arm_epilogue_frame_unwind);
9388 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
9389
9390 /* Now we have tuned the configuration, set a few final things,
9391 based on what the OS ABI has told us. */
9392
9393 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
9394 binaries are always marked. */
9395 if (tdep->arm_abi == ARM_ABI_AUTO)
9396 tdep->arm_abi = ARM_ABI_APCS;
9397
9398 /* Watchpoints are not steppable. */
9399 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9400
9401 /* We used to default to FPA for generic ARM, but almost nobody
9402 uses that now, and we now provide a way for the user to force
9403 the model. So default to the most useful variant. */
9404 if (tdep->fp_model == ARM_FLOAT_AUTO)
9405 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
9406
9407 if (tdep->jb_pc >= 0)
9408 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
9409
9410 /* Floating point sizes and format. */
9411 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
9412 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
9413 {
9414 set_gdbarch_double_format
9415 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9416 set_gdbarch_long_double_format
9417 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9418 }
9419 else
9420 {
9421 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
9422 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
9423 }
9424
9425 if (have_vfp_pseudos)
9426 {
9427 /* NOTE: These are the only pseudo registers used by
9428 the ARM target at the moment. If more are added, a
9429 little more care in numbering will be needed. */
9430
9431 int num_pseudos = 32;
9432 if (have_neon_pseudos)
9433 num_pseudos += 16;
9434 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
9435 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
9436 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
9437 }
9438
9439 if (tdesc_data != nullptr)
9440 {
9441 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
9442
9443 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
9444
9445 /* Override tdesc_register_type to adjust the types of VFP
9446 registers for NEON. */
9447 set_gdbarch_register_type (gdbarch, arm_register_type);
9448 }
9449
9450 /* Add standard register aliases. We add aliases even for those
9451 names which are used by the current architecture - it's simpler,
9452 and does no harm, since nothing ever lists user registers. */
9453 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
9454 user_reg_add (gdbarch, arm_register_aliases[i].name,
9455 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
9456
9457 set_gdbarch_disassembler_options (gdbarch, &arm_disassembler_options);
9458 set_gdbarch_valid_disassembler_options (gdbarch, disassembler_options_arm ());
9459
9460 set_gdbarch_gnu_triplet_regexp (gdbarch, arm_gnu_triplet_regexp);
9461
9462 return gdbarch;
9463 }
9464
9465 static void
9466 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
9467 {
9468 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9469
9470 if (tdep == NULL)
9471 return;
9472
9473 fprintf_unfiltered (file, _("arm_dump_tdep: fp_model = %i\n"),
9474 (int) tdep->fp_model);
9475 fprintf_unfiltered (file, _("arm_dump_tdep: have_fpa_registers = %i\n"),
9476 (int) tdep->have_fpa_registers);
9477 fprintf_unfiltered (file, _("arm_dump_tdep: have_wmmx_registers = %i\n"),
9478 (int) tdep->have_wmmx_registers);
9479 fprintf_unfiltered (file, _("arm_dump_tdep: vfp_register_count = %i\n"),
9480 (int) tdep->vfp_register_count);
9481 fprintf_unfiltered (file, _("arm_dump_tdep: have_vfp_pseudos = %i\n"),
9482 (int) tdep->have_vfp_pseudos);
9483 fprintf_unfiltered (file, _("arm_dump_tdep: have_neon_pseudos = %i\n"),
9484 (int) tdep->have_neon_pseudos);
9485 fprintf_unfiltered (file, _("arm_dump_tdep: have_neon = %i\n"),
9486 (int) tdep->have_neon);
9487 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx\n"),
9488 (unsigned long) tdep->lowest_pc);
9489 }
9490
9491 #if GDB_SELF_TEST
9492 namespace selftests
9493 {
9494 static void arm_record_test (void);
9495 }
9496 #endif
9497
9498 void _initialize_arm_tdep ();
9499 void
9500 _initialize_arm_tdep ()
9501 {
9502 long length;
9503 int i, j;
9504 char regdesc[1024], *rdptr = regdesc;
9505 size_t rest = sizeof (regdesc);
9506
9507 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
9508
9509 /* Add ourselves to objfile event chain. */
9510 gdb::observers::new_objfile.attach (arm_exidx_new_objfile);
9511
9512 /* Register an ELF OS ABI sniffer for ARM binaries. */
9513 gdbarch_register_osabi_sniffer (bfd_arch_arm,
9514 bfd_target_elf_flavour,
9515 arm_elf_osabi_sniffer);
9516
9517 /* Add root prefix command for all "set arm"/"show arm" commands. */
9518 add_basic_prefix_cmd ("arm", no_class,
9519 _("Various ARM-specific commands."),
9520 &setarmcmdlist, "set arm ", 0, &setlist);
9521
9522 add_show_prefix_cmd ("arm", no_class,
9523 _("Various ARM-specific commands."),
9524 &showarmcmdlist, "show arm ", 0, &showlist);
9525
9526
9527 arm_disassembler_options = xstrdup ("reg-names-std");
9528 const disasm_options_t *disasm_options
9529 = &disassembler_options_arm ()->options;
9530 int num_disassembly_styles = 0;
9531 for (i = 0; disasm_options->name[i] != NULL; i++)
9532 if (CONST_STRNEQ (disasm_options->name[i], "reg-names-"))
9533 num_disassembly_styles++;
9534
9535 /* Initialize the array that will be passed to add_setshow_enum_cmd(). */
9536 valid_disassembly_styles = XNEWVEC (const char *,
9537 num_disassembly_styles + 1);
9538 for (i = j = 0; disasm_options->name[i] != NULL; i++)
9539 if (CONST_STRNEQ (disasm_options->name[i], "reg-names-"))
9540 {
9541 size_t offset = strlen ("reg-names-");
9542 const char *style = disasm_options->name[i];
9543 valid_disassembly_styles[j++] = &style[offset];
9544 length = snprintf (rdptr, rest, "%s - %s\n", &style[offset],
9545 disasm_options->description[i]);
9546 rdptr += length;
9547 rest -= length;
9548 }
9549 /* Mark the end of valid options. */
9550 valid_disassembly_styles[num_disassembly_styles] = NULL;
9551
9552 /* Create the help text. */
9553 std::string helptext = string_printf ("%s%s%s",
9554 _("The valid values are:\n"),
9555 regdesc,
9556 _("The default is \"std\"."));
9557
9558 add_setshow_enum_cmd("disassembler", no_class,
9559 valid_disassembly_styles, &disassembly_style,
9560 _("Set the disassembly style."),
9561 _("Show the disassembly style."),
9562 helptext.c_str (),
9563 set_disassembly_style_sfunc,
9564 show_disassembly_style_sfunc,
9565 &setarmcmdlist, &showarmcmdlist);
9566
9567 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
9568 _("Set usage of ARM 32-bit mode."),
9569 _("Show usage of ARM 32-bit mode."),
9570 _("When off, a 26-bit PC will be used."),
9571 NULL,
9572 NULL, /* FIXME: i18n: Usage of ARM 32-bit
9573 mode is %s. */
9574 &setarmcmdlist, &showarmcmdlist);
9575
9576 /* Add a command to allow the user to force the FPU model. */
9577 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
9578 _("Set the floating point type."),
9579 _("Show the floating point type."),
9580 _("auto - Determine the FP typefrom the OS-ABI.\n\
9581 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
9582 fpa - FPA co-processor (GCC compiled).\n\
9583 softvfp - Software FP with pure-endian doubles.\n\
9584 vfp - VFP co-processor."),
9585 set_fp_model_sfunc, show_fp_model,
9586 &setarmcmdlist, &showarmcmdlist);
9587
9588 /* Add a command to allow the user to force the ABI. */
9589 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
9590 _("Set the ABI."),
9591 _("Show the ABI."),
9592 NULL, arm_set_abi, arm_show_abi,
9593 &setarmcmdlist, &showarmcmdlist);
9594
9595 /* Add two commands to allow the user to force the assumed
9596 execution mode. */
9597 add_setshow_enum_cmd ("fallback-mode", class_support,
9598 arm_mode_strings, &arm_fallback_mode_string,
9599 _("Set the mode assumed when symbols are unavailable."),
9600 _("Show the mode assumed when symbols are unavailable."),
9601 NULL, NULL, arm_show_fallback_mode,
9602 &setarmcmdlist, &showarmcmdlist);
9603 add_setshow_enum_cmd ("force-mode", class_support,
9604 arm_mode_strings, &arm_force_mode_string,
9605 _("Set the mode assumed even when symbols are available."),
9606 _("Show the mode assumed even when symbols are available."),
9607 NULL, NULL, arm_show_force_mode,
9608 &setarmcmdlist, &showarmcmdlist);
9609
9610 /* Debugging flag. */
9611 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
9612 _("Set ARM debugging."),
9613 _("Show ARM debugging."),
9614 _("When on, arm-specific debugging is enabled."),
9615 NULL,
9616 NULL, /* FIXME: i18n: "ARM debugging is %s. */
9617 &setdebuglist, &showdebuglist);
9618
9619 #if GDB_SELF_TEST
9620 selftests::register_test ("arm-record", selftests::arm_record_test);
9621 #endif
9622
9623 }
9624
9625 /* ARM-reversible process record data structures. */
9626
9627 #define ARM_INSN_SIZE_BYTES 4
9628 #define THUMB_INSN_SIZE_BYTES 2
9629 #define THUMB2_INSN_SIZE_BYTES 4
9630
9631
9632 /* Position of the bit within a 32-bit ARM instruction
9633 that defines whether the instruction is a load or store. */
9634 #define INSN_S_L_BIT_NUM 20
9635
9636 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
9637 do \
9638 { \
9639 unsigned int reg_len = LENGTH; \
9640 if (reg_len) \
9641 { \
9642 REGS = XNEWVEC (uint32_t, reg_len); \
9643 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
9644 } \
9645 } \
9646 while (0)
9647
9648 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
9649 do \
9650 { \
9651 unsigned int mem_len = LENGTH; \
9652 if (mem_len) \
9653 { \
9654 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
9655 memcpy(&MEMS->len, &RECORD_BUF[0], \
9656 sizeof(struct arm_mem_r) * LENGTH); \
9657 } \
9658 } \
9659 while (0)
9660
9661 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
9662 #define INSN_RECORDED(ARM_RECORD) \
9663 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
9664
9665 /* ARM memory record structure. */
9666 struct arm_mem_r
9667 {
9668 uint32_t len; /* Record length. */
9669 uint32_t addr; /* Memory address. */
9670 };
9671
9672 /* ARM instruction record contains opcode of current insn
9673 and execution state (before entry to decode_insn()),
9674 contains list of to-be-modified registers and
9675 memory blocks (on return from decode_insn()). */
9676
9677 typedef struct insn_decode_record_t
9678 {
9679 struct gdbarch *gdbarch;
9680 struct regcache *regcache;
9681 CORE_ADDR this_addr; /* Address of the insn being decoded. */
9682 uint32_t arm_insn; /* Should accommodate thumb. */
9683 uint32_t cond; /* Condition code. */
9684 uint32_t opcode; /* Insn opcode. */
9685 uint32_t decode; /* Insn decode bits. */
9686 uint32_t mem_rec_count; /* No of mem records. */
9687 uint32_t reg_rec_count; /* No of reg records. */
9688 uint32_t *arm_regs; /* Registers to be saved for this record. */
9689 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
9690 } insn_decode_record;
9691
9692
9693 /* Checks ARM SBZ and SBO mandatory fields. */
9694
9695 static int
9696 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
9697 {
9698 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
9699
9700 if (!len)
9701 return 1;
9702
9703 if (!sbo)
9704 ones = ~ones;
9705
9706 while (ones)
9707 {
9708 if (!(ones & sbo))
9709 {
9710 return 0;
9711 }
9712 ones = ones >> 1;
9713 }
9714 return 1;
9715 }
9716
9717 enum arm_record_result
9718 {
9719 ARM_RECORD_SUCCESS = 0,
9720 ARM_RECORD_FAILURE = 1
9721 };
9722
9723 typedef enum
9724 {
9725 ARM_RECORD_STRH=1,
9726 ARM_RECORD_STRD
9727 } arm_record_strx_t;
9728
9729 typedef enum
9730 {
9731 ARM_RECORD=1,
9732 THUMB_RECORD,
9733 THUMB2_RECORD
9734 } record_type_t;
9735
9736
9737 static int
9738 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
9739 uint32_t *record_buf_mem, arm_record_strx_t str_type)
9740 {
9741
9742 struct regcache *reg_cache = arm_insn_r->regcache;
9743 ULONGEST u_regval[2]= {0};
9744
9745 uint32_t reg_src1 = 0, reg_src2 = 0;
9746 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
9747
9748 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
9749 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
9750
9751 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
9752 {
9753 /* 1) Handle misc store, immediate offset. */
9754 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
9755 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
9756 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
9757 regcache_raw_read_unsigned (reg_cache, reg_src1,
9758 &u_regval[0]);
9759 if (ARM_PC_REGNUM == reg_src1)
9760 {
9761 /* If R15 was used as Rn, hence current PC+8. */
9762 u_regval[0] = u_regval[0] + 8;
9763 }
9764 offset_8 = (immed_high << 4) | immed_low;
9765 /* Calculate target store address. */
9766 if (14 == arm_insn_r->opcode)
9767 {
9768 tgt_mem_addr = u_regval[0] + offset_8;
9769 }
9770 else
9771 {
9772 tgt_mem_addr = u_regval[0] - offset_8;
9773 }
9774 if (ARM_RECORD_STRH == str_type)
9775 {
9776 record_buf_mem[0] = 2;
9777 record_buf_mem[1] = tgt_mem_addr;
9778 arm_insn_r->mem_rec_count = 1;
9779 }
9780 else if (ARM_RECORD_STRD == str_type)
9781 {
9782 record_buf_mem[0] = 4;
9783 record_buf_mem[1] = tgt_mem_addr;
9784 record_buf_mem[2] = 4;
9785 record_buf_mem[3] = tgt_mem_addr + 4;
9786 arm_insn_r->mem_rec_count = 2;
9787 }
9788 }
9789 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
9790 {
9791 /* 2) Store, register offset. */
9792 /* Get Rm. */
9793 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
9794 /* Get Rn. */
9795 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
9796 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9797 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
9798 if (15 == reg_src2)
9799 {
9800 /* If R15 was used as Rn, hence current PC+8. */
9801 u_regval[0] = u_regval[0] + 8;
9802 }
9803 /* Calculate target store address, Rn +/- Rm, register offset. */
9804 if (12 == arm_insn_r->opcode)
9805 {
9806 tgt_mem_addr = u_regval[0] + u_regval[1];
9807 }
9808 else
9809 {
9810 tgt_mem_addr = u_regval[1] - u_regval[0];
9811 }
9812 if (ARM_RECORD_STRH == str_type)
9813 {
9814 record_buf_mem[0] = 2;
9815 record_buf_mem[1] = tgt_mem_addr;
9816 arm_insn_r->mem_rec_count = 1;
9817 }
9818 else if (ARM_RECORD_STRD == str_type)
9819 {
9820 record_buf_mem[0] = 4;
9821 record_buf_mem[1] = tgt_mem_addr;
9822 record_buf_mem[2] = 4;
9823 record_buf_mem[3] = tgt_mem_addr + 4;
9824 arm_insn_r->mem_rec_count = 2;
9825 }
9826 }
9827 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
9828 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
9829 {
9830 /* 3) Store, immediate pre-indexed. */
9831 /* 5) Store, immediate post-indexed. */
9832 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
9833 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
9834 offset_8 = (immed_high << 4) | immed_low;
9835 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
9836 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9837 /* Calculate target store address, Rn +/- Rm, register offset. */
9838 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
9839 {
9840 tgt_mem_addr = u_regval[0] + offset_8;
9841 }
9842 else
9843 {
9844 tgt_mem_addr = u_regval[0] - offset_8;
9845 }
9846 if (ARM_RECORD_STRH == str_type)
9847 {
9848 record_buf_mem[0] = 2;
9849 record_buf_mem[1] = tgt_mem_addr;
9850 arm_insn_r->mem_rec_count = 1;
9851 }
9852 else if (ARM_RECORD_STRD == str_type)
9853 {
9854 record_buf_mem[0] = 4;
9855 record_buf_mem[1] = tgt_mem_addr;
9856 record_buf_mem[2] = 4;
9857 record_buf_mem[3] = tgt_mem_addr + 4;
9858 arm_insn_r->mem_rec_count = 2;
9859 }
9860 /* Record Rn also as it changes. */
9861 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
9862 arm_insn_r->reg_rec_count = 1;
9863 }
9864 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
9865 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
9866 {
9867 /* 4) Store, register pre-indexed. */
9868 /* 6) Store, register post -indexed. */
9869 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
9870 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
9871 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9872 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
9873 /* Calculate target store address, Rn +/- Rm, register offset. */
9874 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
9875 {
9876 tgt_mem_addr = u_regval[0] + u_regval[1];
9877 }
9878 else
9879 {
9880 tgt_mem_addr = u_regval[1] - u_regval[0];
9881 }
9882 if (ARM_RECORD_STRH == str_type)
9883 {
9884 record_buf_mem[0] = 2;
9885 record_buf_mem[1] = tgt_mem_addr;
9886 arm_insn_r->mem_rec_count = 1;
9887 }
9888 else if (ARM_RECORD_STRD == str_type)
9889 {
9890 record_buf_mem[0] = 4;
9891 record_buf_mem[1] = tgt_mem_addr;
9892 record_buf_mem[2] = 4;
9893 record_buf_mem[3] = tgt_mem_addr + 4;
9894 arm_insn_r->mem_rec_count = 2;
9895 }
9896 /* Record Rn also as it changes. */
9897 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
9898 arm_insn_r->reg_rec_count = 1;
9899 }
9900 return 0;
9901 }
9902
9903 /* Handling ARM extension space insns. */
9904
9905 static int
9906 arm_record_extension_space (insn_decode_record *arm_insn_r)
9907 {
9908 int ret = 0; /* Return value: -1:record failure ; 0:success */
9909 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
9910 uint32_t record_buf[8], record_buf_mem[8];
9911 uint32_t reg_src1 = 0;
9912 struct regcache *reg_cache = arm_insn_r->regcache;
9913 ULONGEST u_regval = 0;
9914
9915 gdb_assert (!INSN_RECORDED(arm_insn_r));
9916 /* Handle unconditional insn extension space. */
9917
9918 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
9919 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
9920 if (arm_insn_r->cond)
9921 {
9922 /* PLD has no affect on architectural state, it just affects
9923 the caches. */
9924 if (5 == ((opcode1 & 0xE0) >> 5))
9925 {
9926 /* BLX(1) */
9927 record_buf[0] = ARM_PS_REGNUM;
9928 record_buf[1] = ARM_LR_REGNUM;
9929 arm_insn_r->reg_rec_count = 2;
9930 }
9931 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
9932 }
9933
9934
9935 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
9936 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
9937 {
9938 ret = -1;
9939 /* Undefined instruction on ARM V5; need to handle if later
9940 versions define it. */
9941 }
9942
9943 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
9944 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
9945 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
9946
9947 /* Handle arithmetic insn extension space. */
9948 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
9949 && !INSN_RECORDED(arm_insn_r))
9950 {
9951 /* Handle MLA(S) and MUL(S). */
9952 if (in_inclusive_range (insn_op1, 0U, 3U))
9953 {
9954 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
9955 record_buf[1] = ARM_PS_REGNUM;
9956 arm_insn_r->reg_rec_count = 2;
9957 }
9958 else if (in_inclusive_range (insn_op1, 4U, 15U))
9959 {
9960 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
9961 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
9962 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
9963 record_buf[2] = ARM_PS_REGNUM;
9964 arm_insn_r->reg_rec_count = 3;
9965 }
9966 }
9967
9968 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
9969 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
9970 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
9971
9972 /* Handle control insn extension space. */
9973
9974 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
9975 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
9976 {
9977 if (!bit (arm_insn_r->arm_insn,25))
9978 {
9979 if (!bits (arm_insn_r->arm_insn, 4, 7))
9980 {
9981 if ((0 == insn_op1) || (2 == insn_op1))
9982 {
9983 /* MRS. */
9984 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
9985 arm_insn_r->reg_rec_count = 1;
9986 }
9987 else if (1 == insn_op1)
9988 {
9989 /* CSPR is going to be changed. */
9990 record_buf[0] = ARM_PS_REGNUM;
9991 arm_insn_r->reg_rec_count = 1;
9992 }
9993 else if (3 == insn_op1)
9994 {
9995 /* SPSR is going to be changed. */
9996 /* We need to get SPSR value, which is yet to be done. */
9997 return -1;
9998 }
9999 }
10000 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
10001 {
10002 if (1 == insn_op1)
10003 {
10004 /* BX. */
10005 record_buf[0] = ARM_PS_REGNUM;
10006 arm_insn_r->reg_rec_count = 1;
10007 }
10008 else if (3 == insn_op1)
10009 {
10010 /* CLZ. */
10011 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10012 arm_insn_r->reg_rec_count = 1;
10013 }
10014 }
10015 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
10016 {
10017 /* BLX. */
10018 record_buf[0] = ARM_PS_REGNUM;
10019 record_buf[1] = ARM_LR_REGNUM;
10020 arm_insn_r->reg_rec_count = 2;
10021 }
10022 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
10023 {
10024 /* QADD, QSUB, QDADD, QDSUB */
10025 record_buf[0] = ARM_PS_REGNUM;
10026 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10027 arm_insn_r->reg_rec_count = 2;
10028 }
10029 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
10030 {
10031 /* BKPT. */
10032 record_buf[0] = ARM_PS_REGNUM;
10033 record_buf[1] = ARM_LR_REGNUM;
10034 arm_insn_r->reg_rec_count = 2;
10035
10036 /* Save SPSR also;how? */
10037 return -1;
10038 }
10039 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
10040 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
10041 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
10042 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
10043 )
10044 {
10045 if (0 == insn_op1 || 1 == insn_op1)
10046 {
10047 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
10048 /* We dont do optimization for SMULW<y> where we
10049 need only Rd. */
10050 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10051 record_buf[1] = ARM_PS_REGNUM;
10052 arm_insn_r->reg_rec_count = 2;
10053 }
10054 else if (2 == insn_op1)
10055 {
10056 /* SMLAL<x><y>. */
10057 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10058 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
10059 arm_insn_r->reg_rec_count = 2;
10060 }
10061 else if (3 == insn_op1)
10062 {
10063 /* SMUL<x><y>. */
10064 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10065 arm_insn_r->reg_rec_count = 1;
10066 }
10067 }
10068 }
10069 else
10070 {
10071 /* MSR : immediate form. */
10072 if (1 == insn_op1)
10073 {
10074 /* CSPR is going to be changed. */
10075 record_buf[0] = ARM_PS_REGNUM;
10076 arm_insn_r->reg_rec_count = 1;
10077 }
10078 else if (3 == insn_op1)
10079 {
10080 /* SPSR is going to be changed. */
10081 /* we need to get SPSR value, which is yet to be done */
10082 return -1;
10083 }
10084 }
10085 }
10086
10087 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10088 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
10089 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
10090
10091 /* Handle load/store insn extension space. */
10092
10093 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
10094 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
10095 && !INSN_RECORDED(arm_insn_r))
10096 {
10097 /* SWP/SWPB. */
10098 if (0 == insn_op1)
10099 {
10100 /* These insn, changes register and memory as well. */
10101 /* SWP or SWPB insn. */
10102 /* Get memory address given by Rn. */
10103 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10104 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
10105 /* SWP insn ?, swaps word. */
10106 if (8 == arm_insn_r->opcode)
10107 {
10108 record_buf_mem[0] = 4;
10109 }
10110 else
10111 {
10112 /* SWPB insn, swaps only byte. */
10113 record_buf_mem[0] = 1;
10114 }
10115 record_buf_mem[1] = u_regval;
10116 arm_insn_r->mem_rec_count = 1;
10117 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10118 arm_insn_r->reg_rec_count = 1;
10119 }
10120 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10121 {
10122 /* STRH. */
10123 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10124 ARM_RECORD_STRH);
10125 }
10126 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10127 {
10128 /* LDRD. */
10129 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10130 record_buf[1] = record_buf[0] + 1;
10131 arm_insn_r->reg_rec_count = 2;
10132 }
10133 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10134 {
10135 /* STRD. */
10136 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10137 ARM_RECORD_STRD);
10138 }
10139 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
10140 {
10141 /* LDRH, LDRSB, LDRSH. */
10142 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10143 arm_insn_r->reg_rec_count = 1;
10144 }
10145
10146 }
10147
10148 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
10149 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
10150 && !INSN_RECORDED(arm_insn_r))
10151 {
10152 ret = -1;
10153 /* Handle coprocessor insn extension space. */
10154 }
10155
10156 /* To be done for ARMv5 and later; as of now we return -1. */
10157 if (-1 == ret)
10158 return ret;
10159
10160 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10161 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10162
10163 return ret;
10164 }
10165
10166 /* Handling opcode 000 insns. */
10167
10168 static int
10169 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
10170 {
10171 struct regcache *reg_cache = arm_insn_r->regcache;
10172 uint32_t record_buf[8], record_buf_mem[8];
10173 ULONGEST u_regval[2] = {0};
10174
10175 uint32_t reg_src1 = 0;
10176 uint32_t opcode1 = 0;
10177
10178 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10179 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10180 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10181
10182 if (!((opcode1 & 0x19) == 0x10))
10183 {
10184 /* Data-processing (register) and Data-processing (register-shifted
10185 register */
10186 /* Out of 11 shifter operands mode, all the insn modifies destination
10187 register, which is specified by 13-16 decode. */
10188 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10189 record_buf[1] = ARM_PS_REGNUM;
10190 arm_insn_r->reg_rec_count = 2;
10191 }
10192 else if ((arm_insn_r->decode < 8) && ((opcode1 & 0x19) == 0x10))
10193 {
10194 /* Miscellaneous instructions */
10195
10196 if (3 == arm_insn_r->decode && 0x12 == opcode1
10197 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
10198 {
10199 /* Handle BLX, branch and link/exchange. */
10200 if (9 == arm_insn_r->opcode)
10201 {
10202 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
10203 and R14 stores the return address. */
10204 record_buf[0] = ARM_PS_REGNUM;
10205 record_buf[1] = ARM_LR_REGNUM;
10206 arm_insn_r->reg_rec_count = 2;
10207 }
10208 }
10209 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
10210 {
10211 /* Handle enhanced software breakpoint insn, BKPT. */
10212 /* CPSR is changed to be executed in ARM state, disabling normal
10213 interrupts, entering abort mode. */
10214 /* According to high vector configuration PC is set. */
10215 /* user hit breakpoint and type reverse, in
10216 that case, we need to go back with previous CPSR and
10217 Program Counter. */
10218 record_buf[0] = ARM_PS_REGNUM;
10219 record_buf[1] = ARM_LR_REGNUM;
10220 arm_insn_r->reg_rec_count = 2;
10221
10222 /* Save SPSR also; how? */
10223 return -1;
10224 }
10225 else if (1 == arm_insn_r->decode && 0x12 == opcode1
10226 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
10227 {
10228 /* Handle BX, branch and link/exchange. */
10229 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
10230 record_buf[0] = ARM_PS_REGNUM;
10231 arm_insn_r->reg_rec_count = 1;
10232 }
10233 else if (1 == arm_insn_r->decode && 0x16 == opcode1
10234 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
10235 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
10236 {
10237 /* Count leading zeros: CLZ. */
10238 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10239 arm_insn_r->reg_rec_count = 1;
10240 }
10241 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
10242 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10243 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
10244 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0))
10245 {
10246 /* Handle MRS insn. */
10247 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10248 arm_insn_r->reg_rec_count = 1;
10249 }
10250 }
10251 else if (9 == arm_insn_r->decode && opcode1 < 0x10)
10252 {
10253 /* Multiply and multiply-accumulate */
10254
10255 /* Handle multiply instructions. */
10256 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
10257 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
10258 {
10259 /* Handle MLA and MUL. */
10260 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10261 record_buf[1] = ARM_PS_REGNUM;
10262 arm_insn_r->reg_rec_count = 2;
10263 }
10264 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
10265 {
10266 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
10267 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10268 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10269 record_buf[2] = ARM_PS_REGNUM;
10270 arm_insn_r->reg_rec_count = 3;
10271 }
10272 }
10273 else if (9 == arm_insn_r->decode && opcode1 > 0x10)
10274 {
10275 /* Synchronization primitives */
10276
10277 /* Handling SWP, SWPB. */
10278 /* These insn, changes register and memory as well. */
10279 /* SWP or SWPB insn. */
10280
10281 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10282 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10283 /* SWP insn ?, swaps word. */
10284 if (8 == arm_insn_r->opcode)
10285 {
10286 record_buf_mem[0] = 4;
10287 }
10288 else
10289 {
10290 /* SWPB insn, swaps only byte. */
10291 record_buf_mem[0] = 1;
10292 }
10293 record_buf_mem[1] = u_regval[0];
10294 arm_insn_r->mem_rec_count = 1;
10295 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10296 arm_insn_r->reg_rec_count = 1;
10297 }
10298 else if (11 == arm_insn_r->decode || 13 == arm_insn_r->decode
10299 || 15 == arm_insn_r->decode)
10300 {
10301 if ((opcode1 & 0x12) == 2)
10302 {
10303 /* Extra load/store (unprivileged) */
10304 return -1;
10305 }
10306 else
10307 {
10308 /* Extra load/store */
10309 switch (bits (arm_insn_r->arm_insn, 5, 6))
10310 {
10311 case 1:
10312 if ((opcode1 & 0x05) == 0x0 || (opcode1 & 0x05) == 0x4)
10313 {
10314 /* STRH (register), STRH (immediate) */
10315 arm_record_strx (arm_insn_r, &record_buf[0],
10316 &record_buf_mem[0], ARM_RECORD_STRH);
10317 }
10318 else if ((opcode1 & 0x05) == 0x1)
10319 {
10320 /* LDRH (register) */
10321 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10322 arm_insn_r->reg_rec_count = 1;
10323
10324 if (bit (arm_insn_r->arm_insn, 21))
10325 {
10326 /* Write back to Rn. */
10327 record_buf[arm_insn_r->reg_rec_count++]
10328 = bits (arm_insn_r->arm_insn, 16, 19);
10329 }
10330 }
10331 else if ((opcode1 & 0x05) == 0x5)
10332 {
10333 /* LDRH (immediate), LDRH (literal) */
10334 int rn = bits (arm_insn_r->arm_insn, 16, 19);
10335
10336 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10337 arm_insn_r->reg_rec_count = 1;
10338
10339 if (rn != 15)
10340 {
10341 /*LDRH (immediate) */
10342 if (bit (arm_insn_r->arm_insn, 21))
10343 {
10344 /* Write back to Rn. */
10345 record_buf[arm_insn_r->reg_rec_count++] = rn;
10346 }
10347 }
10348 }
10349 else
10350 return -1;
10351 break;
10352 case 2:
10353 if ((opcode1 & 0x05) == 0x0)
10354 {
10355 /* LDRD (register) */
10356 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10357 record_buf[1] = record_buf[0] + 1;
10358 arm_insn_r->reg_rec_count = 2;
10359
10360 if (bit (arm_insn_r->arm_insn, 21))
10361 {
10362 /* Write back to Rn. */
10363 record_buf[arm_insn_r->reg_rec_count++]
10364 = bits (arm_insn_r->arm_insn, 16, 19);
10365 }
10366 }
10367 else if ((opcode1 & 0x05) == 0x1)
10368 {
10369 /* LDRSB (register) */
10370 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10371 arm_insn_r->reg_rec_count = 1;
10372
10373 if (bit (arm_insn_r->arm_insn, 21))
10374 {
10375 /* Write back to Rn. */
10376 record_buf[arm_insn_r->reg_rec_count++]
10377 = bits (arm_insn_r->arm_insn, 16, 19);
10378 }
10379 }
10380 else if ((opcode1 & 0x05) == 0x4 || (opcode1 & 0x05) == 0x5)
10381 {
10382 /* LDRD (immediate), LDRD (literal), LDRSB (immediate),
10383 LDRSB (literal) */
10384 int rn = bits (arm_insn_r->arm_insn, 16, 19);
10385
10386 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10387 arm_insn_r->reg_rec_count = 1;
10388
10389 if (rn != 15)
10390 {
10391 /*LDRD (immediate), LDRSB (immediate) */
10392 if (bit (arm_insn_r->arm_insn, 21))
10393 {
10394 /* Write back to Rn. */
10395 record_buf[arm_insn_r->reg_rec_count++] = rn;
10396 }
10397 }
10398 }
10399 else
10400 return -1;
10401 break;
10402 case 3:
10403 if ((opcode1 & 0x05) == 0x0)
10404 {
10405 /* STRD (register) */
10406 arm_record_strx (arm_insn_r, &record_buf[0],
10407 &record_buf_mem[0], ARM_RECORD_STRD);
10408 }
10409 else if ((opcode1 & 0x05) == 0x1)
10410 {
10411 /* LDRSH (register) */
10412 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10413 arm_insn_r->reg_rec_count = 1;
10414
10415 if (bit (arm_insn_r->arm_insn, 21))
10416 {
10417 /* Write back to Rn. */
10418 record_buf[arm_insn_r->reg_rec_count++]
10419 = bits (arm_insn_r->arm_insn, 16, 19);
10420 }
10421 }
10422 else if ((opcode1 & 0x05) == 0x4)
10423 {
10424 /* STRD (immediate) */
10425 arm_record_strx (arm_insn_r, &record_buf[0],
10426 &record_buf_mem[0], ARM_RECORD_STRD);
10427 }
10428 else if ((opcode1 & 0x05) == 0x5)
10429 {
10430 /* LDRSH (immediate), LDRSH (literal) */
10431 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10432 arm_insn_r->reg_rec_count = 1;
10433
10434 if (bit (arm_insn_r->arm_insn, 21))
10435 {
10436 /* Write back to Rn. */
10437 record_buf[arm_insn_r->reg_rec_count++]
10438 = bits (arm_insn_r->arm_insn, 16, 19);
10439 }
10440 }
10441 else
10442 return -1;
10443 break;
10444 default:
10445 return -1;
10446 }
10447 }
10448 }
10449 else
10450 {
10451 return -1;
10452 }
10453
10454 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10455 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10456 return 0;
10457 }
10458
10459 /* Handling opcode 001 insns. */
10460
10461 static int
10462 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
10463 {
10464 uint32_t record_buf[8], record_buf_mem[8];
10465
10466 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10467 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10468
10469 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
10470 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
10471 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
10472 )
10473 {
10474 /* Handle MSR insn. */
10475 if (9 == arm_insn_r->opcode)
10476 {
10477 /* CSPR is going to be changed. */
10478 record_buf[0] = ARM_PS_REGNUM;
10479 arm_insn_r->reg_rec_count = 1;
10480 }
10481 else
10482 {
10483 /* SPSR is going to be changed. */
10484 }
10485 }
10486 else if (arm_insn_r->opcode <= 15)
10487 {
10488 /* Normal data processing insns. */
10489 /* Out of 11 shifter operands mode, all the insn modifies destination
10490 register, which is specified by 13-16 decode. */
10491 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10492 record_buf[1] = ARM_PS_REGNUM;
10493 arm_insn_r->reg_rec_count = 2;
10494 }
10495 else
10496 {
10497 return -1;
10498 }
10499
10500 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10501 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10502 return 0;
10503 }
10504
10505 static int
10506 arm_record_media (insn_decode_record *arm_insn_r)
10507 {
10508 uint32_t record_buf[8];
10509
10510 switch (bits (arm_insn_r->arm_insn, 22, 24))
10511 {
10512 case 0:
10513 /* Parallel addition and subtraction, signed */
10514 case 1:
10515 /* Parallel addition and subtraction, unsigned */
10516 case 2:
10517 case 3:
10518 /* Packing, unpacking, saturation and reversal */
10519 {
10520 int rd = bits (arm_insn_r->arm_insn, 12, 15);
10521
10522 record_buf[arm_insn_r->reg_rec_count++] = rd;
10523 }
10524 break;
10525
10526 case 4:
10527 case 5:
10528 /* Signed multiplies */
10529 {
10530 int rd = bits (arm_insn_r->arm_insn, 16, 19);
10531 unsigned int op1 = bits (arm_insn_r->arm_insn, 20, 22);
10532
10533 record_buf[arm_insn_r->reg_rec_count++] = rd;
10534 if (op1 == 0x0)
10535 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10536 else if (op1 == 0x4)
10537 record_buf[arm_insn_r->reg_rec_count++]
10538 = bits (arm_insn_r->arm_insn, 12, 15);
10539 }
10540 break;
10541
10542 case 6:
10543 {
10544 if (bit (arm_insn_r->arm_insn, 21)
10545 && bits (arm_insn_r->arm_insn, 5, 6) == 0x2)
10546 {
10547 /* SBFX */
10548 record_buf[arm_insn_r->reg_rec_count++]
10549 = bits (arm_insn_r->arm_insn, 12, 15);
10550 }
10551 else if (bits (arm_insn_r->arm_insn, 20, 21) == 0x0
10552 && bits (arm_insn_r->arm_insn, 5, 7) == 0x0)
10553 {
10554 /* USAD8 and USADA8 */
10555 record_buf[arm_insn_r->reg_rec_count++]
10556 = bits (arm_insn_r->arm_insn, 16, 19);
10557 }
10558 }
10559 break;
10560
10561 case 7:
10562 {
10563 if (bits (arm_insn_r->arm_insn, 20, 21) == 0x3
10564 && bits (arm_insn_r->arm_insn, 5, 7) == 0x7)
10565 {
10566 /* Permanently UNDEFINED */
10567 return -1;
10568 }
10569 else
10570 {
10571 /* BFC, BFI and UBFX */
10572 record_buf[arm_insn_r->reg_rec_count++]
10573 = bits (arm_insn_r->arm_insn, 12, 15);
10574 }
10575 }
10576 break;
10577
10578 default:
10579 return -1;
10580 }
10581
10582 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10583
10584 return 0;
10585 }
10586
10587 /* Handle ARM mode instructions with opcode 010. */
10588
10589 static int
10590 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
10591 {
10592 struct regcache *reg_cache = arm_insn_r->regcache;
10593
10594 uint32_t reg_base , reg_dest;
10595 uint32_t offset_12, tgt_mem_addr;
10596 uint32_t record_buf[8], record_buf_mem[8];
10597 unsigned char wback;
10598 ULONGEST u_regval;
10599
10600 /* Calculate wback. */
10601 wback = (bit (arm_insn_r->arm_insn, 24) == 0)
10602 || (bit (arm_insn_r->arm_insn, 21) == 1);
10603
10604 arm_insn_r->reg_rec_count = 0;
10605 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
10606
10607 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10608 {
10609 /* LDR (immediate), LDR (literal), LDRB (immediate), LDRB (literal), LDRBT
10610 and LDRT. */
10611
10612 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
10613 record_buf[arm_insn_r->reg_rec_count++] = reg_dest;
10614
10615 /* The LDR instruction is capable of doing branching. If MOV LR, PC
10616 preceeds a LDR instruction having R15 as reg_base, it
10617 emulates a branch and link instruction, and hence we need to save
10618 CPSR and PC as well. */
10619 if (ARM_PC_REGNUM == reg_dest)
10620 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10621
10622 /* If wback is true, also save the base register, which is going to be
10623 written to. */
10624 if (wback)
10625 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10626 }
10627 else
10628 {
10629 /* STR (immediate), STRB (immediate), STRBT and STRT. */
10630
10631 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
10632 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
10633
10634 /* Handle bit U. */
10635 if (bit (arm_insn_r->arm_insn, 23))
10636 {
10637 /* U == 1: Add the offset. */
10638 tgt_mem_addr = (uint32_t) u_regval + offset_12;
10639 }
10640 else
10641 {
10642 /* U == 0: subtract the offset. */
10643 tgt_mem_addr = (uint32_t) u_regval - offset_12;
10644 }
10645
10646 /* Bit 22 tells us whether the store instruction writes 1 byte or 4
10647 bytes. */
10648 if (bit (arm_insn_r->arm_insn, 22))
10649 {
10650 /* STRB and STRBT: 1 byte. */
10651 record_buf_mem[0] = 1;
10652 }
10653 else
10654 {
10655 /* STR and STRT: 4 bytes. */
10656 record_buf_mem[0] = 4;
10657 }
10658
10659 /* Handle bit P. */
10660 if (bit (arm_insn_r->arm_insn, 24))
10661 record_buf_mem[1] = tgt_mem_addr;
10662 else
10663 record_buf_mem[1] = (uint32_t) u_regval;
10664
10665 arm_insn_r->mem_rec_count = 1;
10666
10667 /* If wback is true, also save the base register, which is going to be
10668 written to. */
10669 if (wback)
10670 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10671 }
10672
10673 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10674 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10675 return 0;
10676 }
10677
10678 /* Handling opcode 011 insns. */
10679
10680 static int
10681 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
10682 {
10683 struct regcache *reg_cache = arm_insn_r->regcache;
10684
10685 uint32_t shift_imm = 0;
10686 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
10687 uint32_t offset_12 = 0, tgt_mem_addr = 0;
10688 uint32_t record_buf[8], record_buf_mem[8];
10689
10690 LONGEST s_word;
10691 ULONGEST u_regval[2];
10692
10693 if (bit (arm_insn_r->arm_insn, 4))
10694 return arm_record_media (arm_insn_r);
10695
10696 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10697 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10698
10699 /* Handle enhanced store insns and LDRD DSP insn,
10700 order begins according to addressing modes for store insns
10701 STRH insn. */
10702
10703 /* LDR or STR? */
10704 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10705 {
10706 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
10707 /* LDR insn has a capability to do branching, if
10708 MOV LR, PC is preceded by LDR insn having Rn as R15
10709 in that case, it emulates branch and link insn, and hence we
10710 need to save CSPR and PC as well. */
10711 if (15 != reg_dest)
10712 {
10713 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10714 arm_insn_r->reg_rec_count = 1;
10715 }
10716 else
10717 {
10718 record_buf[0] = reg_dest;
10719 record_buf[1] = ARM_PS_REGNUM;
10720 arm_insn_r->reg_rec_count = 2;
10721 }
10722 }
10723 else
10724 {
10725 if (! bits (arm_insn_r->arm_insn, 4, 11))
10726 {
10727 /* Store insn, register offset and register pre-indexed,
10728 register post-indexed. */
10729 /* Get Rm. */
10730 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10731 /* Get Rn. */
10732 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10733 regcache_raw_read_unsigned (reg_cache, reg_src1
10734 , &u_regval[0]);
10735 regcache_raw_read_unsigned (reg_cache, reg_src2
10736 , &u_regval[1]);
10737 if (15 == reg_src2)
10738 {
10739 /* If R15 was used as Rn, hence current PC+8. */
10740 /* Pre-indexed mode doesnt reach here ; illegal insn. */
10741 u_regval[0] = u_regval[0] + 8;
10742 }
10743 /* Calculate target store address, Rn +/- Rm, register offset. */
10744 /* U == 1. */
10745 if (bit (arm_insn_r->arm_insn, 23))
10746 {
10747 tgt_mem_addr = u_regval[0] + u_regval[1];
10748 }
10749 else
10750 {
10751 tgt_mem_addr = u_regval[1] - u_regval[0];
10752 }
10753
10754 switch (arm_insn_r->opcode)
10755 {
10756 /* STR. */
10757 case 8:
10758 case 12:
10759 /* STR. */
10760 case 9:
10761 case 13:
10762 /* STRT. */
10763 case 1:
10764 case 5:
10765 /* STR. */
10766 case 0:
10767 case 4:
10768 record_buf_mem[0] = 4;
10769 break;
10770
10771 /* STRB. */
10772 case 10:
10773 case 14:
10774 /* STRB. */
10775 case 11:
10776 case 15:
10777 /* STRBT. */
10778 case 3:
10779 case 7:
10780 /* STRB. */
10781 case 2:
10782 case 6:
10783 record_buf_mem[0] = 1;
10784 break;
10785
10786 default:
10787 gdb_assert_not_reached ("no decoding pattern found");
10788 break;
10789 }
10790 record_buf_mem[1] = tgt_mem_addr;
10791 arm_insn_r->mem_rec_count = 1;
10792
10793 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
10794 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10795 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
10796 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
10797 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
10798 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
10799 )
10800 {
10801 /* Rn is going to be changed in pre-indexed mode and
10802 post-indexed mode as well. */
10803 record_buf[0] = reg_src2;
10804 arm_insn_r->reg_rec_count = 1;
10805 }
10806 }
10807 else
10808 {
10809 /* Store insn, scaled register offset; scaled pre-indexed. */
10810 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
10811 /* Get Rm. */
10812 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10813 /* Get Rn. */
10814 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10815 /* Get shift_imm. */
10816 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
10817 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10818 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
10819 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10820 /* Offset_12 used as shift. */
10821 switch (offset_12)
10822 {
10823 case 0:
10824 /* Offset_12 used as index. */
10825 offset_12 = u_regval[0] << shift_imm;
10826 break;
10827
10828 case 1:
10829 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
10830 break;
10831
10832 case 2:
10833 if (!shift_imm)
10834 {
10835 if (bit (u_regval[0], 31))
10836 {
10837 offset_12 = 0xFFFFFFFF;
10838 }
10839 else
10840 {
10841 offset_12 = 0;
10842 }
10843 }
10844 else
10845 {
10846 /* This is arithmetic shift. */
10847 offset_12 = s_word >> shift_imm;
10848 }
10849 break;
10850
10851 case 3:
10852 if (!shift_imm)
10853 {
10854 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
10855 &u_regval[1]);
10856 /* Get C flag value and shift it by 31. */
10857 offset_12 = (((bit (u_regval[1], 29)) << 31) \
10858 | (u_regval[0]) >> 1);
10859 }
10860 else
10861 {
10862 offset_12 = (u_regval[0] >> shift_imm) \
10863 | (u_regval[0] <<
10864 (sizeof(uint32_t) - shift_imm));
10865 }
10866 break;
10867
10868 default:
10869 gdb_assert_not_reached ("no decoding pattern found");
10870 break;
10871 }
10872
10873 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10874 /* bit U set. */
10875 if (bit (arm_insn_r->arm_insn, 23))
10876 {
10877 tgt_mem_addr = u_regval[1] + offset_12;
10878 }
10879 else
10880 {
10881 tgt_mem_addr = u_regval[1] - offset_12;
10882 }
10883
10884 switch (arm_insn_r->opcode)
10885 {
10886 /* STR. */
10887 case 8:
10888 case 12:
10889 /* STR. */
10890 case 9:
10891 case 13:
10892 /* STRT. */
10893 case 1:
10894 case 5:
10895 /* STR. */
10896 case 0:
10897 case 4:
10898 record_buf_mem[0] = 4;
10899 break;
10900
10901 /* STRB. */
10902 case 10:
10903 case 14:
10904 /* STRB. */
10905 case 11:
10906 case 15:
10907 /* STRBT. */
10908 case 3:
10909 case 7:
10910 /* STRB. */
10911 case 2:
10912 case 6:
10913 record_buf_mem[0] = 1;
10914 break;
10915
10916 default:
10917 gdb_assert_not_reached ("no decoding pattern found");
10918 break;
10919 }
10920 record_buf_mem[1] = tgt_mem_addr;
10921 arm_insn_r->mem_rec_count = 1;
10922
10923 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
10924 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10925 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
10926 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
10927 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
10928 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
10929 )
10930 {
10931 /* Rn is going to be changed in register scaled pre-indexed
10932 mode,and scaled post indexed mode. */
10933 record_buf[0] = reg_src2;
10934 arm_insn_r->reg_rec_count = 1;
10935 }
10936 }
10937 }
10938
10939 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10940 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10941 return 0;
10942 }
10943
10944 /* Handle ARM mode instructions with opcode 100. */
10945
10946 static int
10947 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
10948 {
10949 struct regcache *reg_cache = arm_insn_r->regcache;
10950 uint32_t register_count = 0, register_bits;
10951 uint32_t reg_base, addr_mode;
10952 uint32_t record_buf[24], record_buf_mem[48];
10953 uint32_t wback;
10954 ULONGEST u_regval;
10955
10956 /* Fetch the list of registers. */
10957 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
10958 arm_insn_r->reg_rec_count = 0;
10959
10960 /* Fetch the base register that contains the address we are loading data
10961 to. */
10962 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
10963
10964 /* Calculate wback. */
10965 wback = (bit (arm_insn_r->arm_insn, 21) == 1);
10966
10967 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10968 {
10969 /* LDM/LDMIA/LDMFD, LDMDA/LDMFA, LDMDB and LDMIB. */
10970
10971 /* Find out which registers are going to be loaded from memory. */
10972 while (register_bits)
10973 {
10974 if (register_bits & 0x00000001)
10975 record_buf[arm_insn_r->reg_rec_count++] = register_count;
10976 register_bits = register_bits >> 1;
10977 register_count++;
10978 }
10979
10980
10981 /* If wback is true, also save the base register, which is going to be
10982 written to. */
10983 if (wback)
10984 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10985
10986 /* Save the CPSR register. */
10987 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10988 }
10989 else
10990 {
10991 /* STM (STMIA, STMEA), STMDA (STMED), STMDB (STMFD) and STMIB (STMFA). */
10992
10993 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
10994
10995 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
10996
10997 /* Find out how many registers are going to be stored to memory. */
10998 while (register_bits)
10999 {
11000 if (register_bits & 0x00000001)
11001 register_count++;
11002 register_bits = register_bits >> 1;
11003 }
11004
11005 switch (addr_mode)
11006 {
11007 /* STMDA (STMED): Decrement after. */
11008 case 0:
11009 record_buf_mem[1] = (uint32_t) u_regval
11010 - register_count * ARM_INT_REGISTER_SIZE + 4;
11011 break;
11012 /* STM (STMIA, STMEA): Increment after. */
11013 case 1:
11014 record_buf_mem[1] = (uint32_t) u_regval;
11015 break;
11016 /* STMDB (STMFD): Decrement before. */
11017 case 2:
11018 record_buf_mem[1] = (uint32_t) u_regval
11019 - register_count * ARM_INT_REGISTER_SIZE;
11020 break;
11021 /* STMIB (STMFA): Increment before. */
11022 case 3:
11023 record_buf_mem[1] = (uint32_t) u_regval + ARM_INT_REGISTER_SIZE;
11024 break;
11025 default:
11026 gdb_assert_not_reached ("no decoding pattern found");
11027 break;
11028 }
11029
11030 record_buf_mem[0] = register_count * ARM_INT_REGISTER_SIZE;
11031 arm_insn_r->mem_rec_count = 1;
11032
11033 /* If wback is true, also save the base register, which is going to be
11034 written to. */
11035 if (wback)
11036 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11037 }
11038
11039 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11040 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11041 return 0;
11042 }
11043
11044 /* Handling opcode 101 insns. */
11045
11046 static int
11047 arm_record_b_bl (insn_decode_record *arm_insn_r)
11048 {
11049 uint32_t record_buf[8];
11050
11051 /* Handle B, BL, BLX(1) insns. */
11052 /* B simply branches so we do nothing here. */
11053 /* Note: BLX(1) doesnt fall here but instead it falls into
11054 extension space. */
11055 if (bit (arm_insn_r->arm_insn, 24))
11056 {
11057 record_buf[0] = ARM_LR_REGNUM;
11058 arm_insn_r->reg_rec_count = 1;
11059 }
11060
11061 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11062
11063 return 0;
11064 }
11065
11066 static int
11067 arm_record_unsupported_insn (insn_decode_record *arm_insn_r)
11068 {
11069 printf_unfiltered (_("Process record does not support instruction "
11070 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11071 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11072
11073 return -1;
11074 }
11075
11076 /* Record handler for vector data transfer instructions. */
11077
11078 static int
11079 arm_record_vdata_transfer_insn (insn_decode_record *arm_insn_r)
11080 {
11081 uint32_t bits_a, bit_c, bit_l, reg_t, reg_v;
11082 uint32_t record_buf[4];
11083
11084 reg_t = bits (arm_insn_r->arm_insn, 12, 15);
11085 reg_v = bits (arm_insn_r->arm_insn, 21, 23);
11086 bits_a = bits (arm_insn_r->arm_insn, 21, 23);
11087 bit_l = bit (arm_insn_r->arm_insn, 20);
11088 bit_c = bit (arm_insn_r->arm_insn, 8);
11089
11090 /* Handle VMOV instruction. */
11091 if (bit_l && bit_c)
11092 {
11093 record_buf[0] = reg_t;
11094 arm_insn_r->reg_rec_count = 1;
11095 }
11096 else if (bit_l && !bit_c)
11097 {
11098 /* Handle VMOV instruction. */
11099 if (bits_a == 0x00)
11100 {
11101 record_buf[0] = reg_t;
11102 arm_insn_r->reg_rec_count = 1;
11103 }
11104 /* Handle VMRS instruction. */
11105 else if (bits_a == 0x07)
11106 {
11107 if (reg_t == 15)
11108 reg_t = ARM_PS_REGNUM;
11109
11110 record_buf[0] = reg_t;
11111 arm_insn_r->reg_rec_count = 1;
11112 }
11113 }
11114 else if (!bit_l && !bit_c)
11115 {
11116 /* Handle VMOV instruction. */
11117 if (bits_a == 0x00)
11118 {
11119 record_buf[0] = ARM_D0_REGNUM + reg_v;
11120
11121 arm_insn_r->reg_rec_count = 1;
11122 }
11123 /* Handle VMSR instruction. */
11124 else if (bits_a == 0x07)
11125 {
11126 record_buf[0] = ARM_FPSCR_REGNUM;
11127 arm_insn_r->reg_rec_count = 1;
11128 }
11129 }
11130 else if (!bit_l && bit_c)
11131 {
11132 /* Handle VMOV instruction. */
11133 if (!(bits_a & 0x04))
11134 {
11135 record_buf[0] = (reg_v | (bit (arm_insn_r->arm_insn, 7) << 4))
11136 + ARM_D0_REGNUM;
11137 arm_insn_r->reg_rec_count = 1;
11138 }
11139 /* Handle VDUP instruction. */
11140 else
11141 {
11142 if (bit (arm_insn_r->arm_insn, 21))
11143 {
11144 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
11145 record_buf[0] = reg_v + ARM_D0_REGNUM;
11146 record_buf[1] = reg_v + ARM_D0_REGNUM + 1;
11147 arm_insn_r->reg_rec_count = 2;
11148 }
11149 else
11150 {
11151 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
11152 record_buf[0] = reg_v + ARM_D0_REGNUM;
11153 arm_insn_r->reg_rec_count = 1;
11154 }
11155 }
11156 }
11157
11158 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11159 return 0;
11160 }
11161
11162 /* Record handler for extension register load/store instructions. */
11163
11164 static int
11165 arm_record_exreg_ld_st_insn (insn_decode_record *arm_insn_r)
11166 {
11167 uint32_t opcode, single_reg;
11168 uint8_t op_vldm_vstm;
11169 uint32_t record_buf[8], record_buf_mem[128];
11170 ULONGEST u_regval = 0;
11171
11172 struct regcache *reg_cache = arm_insn_r->regcache;
11173
11174 opcode = bits (arm_insn_r->arm_insn, 20, 24);
11175 single_reg = !bit (arm_insn_r->arm_insn, 8);
11176 op_vldm_vstm = opcode & 0x1b;
11177
11178 /* Handle VMOV instructions. */
11179 if ((opcode & 0x1e) == 0x04)
11180 {
11181 if (bit (arm_insn_r->arm_insn, 20)) /* to_arm_registers bit 20? */
11182 {
11183 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11184 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11185 arm_insn_r->reg_rec_count = 2;
11186 }
11187 else
11188 {
11189 uint8_t reg_m = bits (arm_insn_r->arm_insn, 0, 3);
11190 uint8_t bit_m = bit (arm_insn_r->arm_insn, 5);
11191
11192 if (single_reg)
11193 {
11194 /* The first S register number m is REG_M:M (M is bit 5),
11195 the corresponding D register number is REG_M:M / 2, which
11196 is REG_M. */
11197 record_buf[arm_insn_r->reg_rec_count++] = ARM_D0_REGNUM + reg_m;
11198 /* The second S register number is REG_M:M + 1, the
11199 corresponding D register number is (REG_M:M + 1) / 2.
11200 IOW, if bit M is 1, the first and second S registers
11201 are mapped to different D registers, otherwise, they are
11202 in the same D register. */
11203 if (bit_m)
11204 {
11205 record_buf[arm_insn_r->reg_rec_count++]
11206 = ARM_D0_REGNUM + reg_m + 1;
11207 }
11208 }
11209 else
11210 {
11211 record_buf[0] = ((bit_m << 4) + reg_m + ARM_D0_REGNUM);
11212 arm_insn_r->reg_rec_count = 1;
11213 }
11214 }
11215 }
11216 /* Handle VSTM and VPUSH instructions. */
11217 else if (op_vldm_vstm == 0x08 || op_vldm_vstm == 0x0a
11218 || op_vldm_vstm == 0x12)
11219 {
11220 uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count;
11221 uint32_t memory_index = 0;
11222
11223 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
11224 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
11225 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
11226 imm_off32 = imm_off8 << 2;
11227 memory_count = imm_off8;
11228
11229 if (bit (arm_insn_r->arm_insn, 23))
11230 start_address = u_regval;
11231 else
11232 start_address = u_regval - imm_off32;
11233
11234 if (bit (arm_insn_r->arm_insn, 21))
11235 {
11236 record_buf[0] = reg_rn;
11237 arm_insn_r->reg_rec_count = 1;
11238 }
11239
11240 while (memory_count > 0)
11241 {
11242 if (single_reg)
11243 {
11244 record_buf_mem[memory_index] = 4;
11245 record_buf_mem[memory_index + 1] = start_address;
11246 start_address = start_address + 4;
11247 memory_index = memory_index + 2;
11248 }
11249 else
11250 {
11251 record_buf_mem[memory_index] = 4;
11252 record_buf_mem[memory_index + 1] = start_address;
11253 record_buf_mem[memory_index + 2] = 4;
11254 record_buf_mem[memory_index + 3] = start_address + 4;
11255 start_address = start_address + 8;
11256 memory_index = memory_index + 4;
11257 }
11258 memory_count--;
11259 }
11260 arm_insn_r->mem_rec_count = (memory_index >> 1);
11261 }
11262 /* Handle VLDM instructions. */
11263 else if (op_vldm_vstm == 0x09 || op_vldm_vstm == 0x0b
11264 || op_vldm_vstm == 0x13)
11265 {
11266 uint32_t reg_count, reg_vd;
11267 uint32_t reg_index = 0;
11268 uint32_t bit_d = bit (arm_insn_r->arm_insn, 22);
11269
11270 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11271 reg_count = bits (arm_insn_r->arm_insn, 0, 7);
11272
11273 /* REG_VD is the first D register number. If the instruction
11274 loads memory to S registers (SINGLE_REG is TRUE), the register
11275 number is (REG_VD << 1 | bit D), so the corresponding D
11276 register number is (REG_VD << 1 | bit D) / 2 = REG_VD. */
11277 if (!single_reg)
11278 reg_vd = reg_vd | (bit_d << 4);
11279
11280 if (bit (arm_insn_r->arm_insn, 21) /* write back */)
11281 record_buf[reg_index++] = bits (arm_insn_r->arm_insn, 16, 19);
11282
11283 /* If the instruction loads memory to D register, REG_COUNT should
11284 be divided by 2, according to the ARM Architecture Reference
11285 Manual. If the instruction loads memory to S register, divide by
11286 2 as well because two S registers are mapped to D register. */
11287 reg_count = reg_count / 2;
11288 if (single_reg && bit_d)
11289 {
11290 /* Increase the register count if S register list starts from
11291 an odd number (bit d is one). */
11292 reg_count++;
11293 }
11294
11295 while (reg_count > 0)
11296 {
11297 record_buf[reg_index++] = ARM_D0_REGNUM + reg_vd + reg_count - 1;
11298 reg_count--;
11299 }
11300 arm_insn_r->reg_rec_count = reg_index;
11301 }
11302 /* VSTR Vector store register. */
11303 else if ((opcode & 0x13) == 0x10)
11304 {
11305 uint32_t start_address, reg_rn, imm_off32, imm_off8;
11306 uint32_t memory_index = 0;
11307
11308 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
11309 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
11310 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
11311 imm_off32 = imm_off8 << 2;
11312
11313 if (bit (arm_insn_r->arm_insn, 23))
11314 start_address = u_regval + imm_off32;
11315 else
11316 start_address = u_regval - imm_off32;
11317
11318 if (single_reg)
11319 {
11320 record_buf_mem[memory_index] = 4;
11321 record_buf_mem[memory_index + 1] = start_address;
11322 arm_insn_r->mem_rec_count = 1;
11323 }
11324 else
11325 {
11326 record_buf_mem[memory_index] = 4;
11327 record_buf_mem[memory_index + 1] = start_address;
11328 record_buf_mem[memory_index + 2] = 4;
11329 record_buf_mem[memory_index + 3] = start_address + 4;
11330 arm_insn_r->mem_rec_count = 2;
11331 }
11332 }
11333 /* VLDR Vector load register. */
11334 else if ((opcode & 0x13) == 0x11)
11335 {
11336 uint32_t reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11337
11338 if (!single_reg)
11339 {
11340 reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4);
11341 record_buf[0] = ARM_D0_REGNUM + reg_vd;
11342 }
11343 else
11344 {
11345 reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22);
11346 /* Record register D rather than pseudo register S. */
11347 record_buf[0] = ARM_D0_REGNUM + reg_vd / 2;
11348 }
11349 arm_insn_r->reg_rec_count = 1;
11350 }
11351
11352 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11353 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11354 return 0;
11355 }
11356
11357 /* Record handler for arm/thumb mode VFP data processing instructions. */
11358
11359 static int
11360 arm_record_vfp_data_proc_insn (insn_decode_record *arm_insn_r)
11361 {
11362 uint32_t opc1, opc2, opc3, dp_op_sz, bit_d, reg_vd;
11363 uint32_t record_buf[4];
11364 enum insn_types {INSN_T0, INSN_T1, INSN_T2, INSN_T3, INSN_INV};
11365 enum insn_types curr_insn_type = INSN_INV;
11366
11367 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11368 opc1 = bits (arm_insn_r->arm_insn, 20, 23);
11369 opc2 = bits (arm_insn_r->arm_insn, 16, 19);
11370 opc3 = bits (arm_insn_r->arm_insn, 6, 7);
11371 dp_op_sz = bit (arm_insn_r->arm_insn, 8);
11372 bit_d = bit (arm_insn_r->arm_insn, 22);
11373 /* Mask off the "D" bit. */
11374 opc1 = opc1 & ~0x04;
11375
11376 /* Handle VMLA, VMLS. */
11377 if (opc1 == 0x00)
11378 {
11379 if (bit (arm_insn_r->arm_insn, 10))
11380 {
11381 if (bit (arm_insn_r->arm_insn, 6))
11382 curr_insn_type = INSN_T0;
11383 else
11384 curr_insn_type = INSN_T1;
11385 }
11386 else
11387 {
11388 if (dp_op_sz)
11389 curr_insn_type = INSN_T1;
11390 else
11391 curr_insn_type = INSN_T2;
11392 }
11393 }
11394 /* Handle VNMLA, VNMLS, VNMUL. */
11395 else if (opc1 == 0x01)
11396 {
11397 if (dp_op_sz)
11398 curr_insn_type = INSN_T1;
11399 else
11400 curr_insn_type = INSN_T2;
11401 }
11402 /* Handle VMUL. */
11403 else if (opc1 == 0x02 && !(opc3 & 0x01))
11404 {
11405 if (bit (arm_insn_r->arm_insn, 10))
11406 {
11407 if (bit (arm_insn_r->arm_insn, 6))
11408 curr_insn_type = INSN_T0;
11409 else
11410 curr_insn_type = INSN_T1;
11411 }
11412 else
11413 {
11414 if (dp_op_sz)
11415 curr_insn_type = INSN_T1;
11416 else
11417 curr_insn_type = INSN_T2;
11418 }
11419 }
11420 /* Handle VADD, VSUB. */
11421 else if (opc1 == 0x03)
11422 {
11423 if (!bit (arm_insn_r->arm_insn, 9))
11424 {
11425 if (bit (arm_insn_r->arm_insn, 6))
11426 curr_insn_type = INSN_T0;
11427 else
11428 curr_insn_type = INSN_T1;
11429 }
11430 else
11431 {
11432 if (dp_op_sz)
11433 curr_insn_type = INSN_T1;
11434 else
11435 curr_insn_type = INSN_T2;
11436 }
11437 }
11438 /* Handle VDIV. */
11439 else if (opc1 == 0x08)
11440 {
11441 if (dp_op_sz)
11442 curr_insn_type = INSN_T1;
11443 else
11444 curr_insn_type = INSN_T2;
11445 }
11446 /* Handle all other vfp data processing instructions. */
11447 else if (opc1 == 0x0b)
11448 {
11449 /* Handle VMOV. */
11450 if (!(opc3 & 0x01) || (opc2 == 0x00 && opc3 == 0x01))
11451 {
11452 if (bit (arm_insn_r->arm_insn, 4))
11453 {
11454 if (bit (arm_insn_r->arm_insn, 6))
11455 curr_insn_type = INSN_T0;
11456 else
11457 curr_insn_type = INSN_T1;
11458 }
11459 else
11460 {
11461 if (dp_op_sz)
11462 curr_insn_type = INSN_T1;
11463 else
11464 curr_insn_type = INSN_T2;
11465 }
11466 }
11467 /* Handle VNEG and VABS. */
11468 else if ((opc2 == 0x01 && opc3 == 0x01)
11469 || (opc2 == 0x00 && opc3 == 0x03))
11470 {
11471 if (!bit (arm_insn_r->arm_insn, 11))
11472 {
11473 if (bit (arm_insn_r->arm_insn, 6))
11474 curr_insn_type = INSN_T0;
11475 else
11476 curr_insn_type = INSN_T1;
11477 }
11478 else
11479 {
11480 if (dp_op_sz)
11481 curr_insn_type = INSN_T1;
11482 else
11483 curr_insn_type = INSN_T2;
11484 }
11485 }
11486 /* Handle VSQRT. */
11487 else if (opc2 == 0x01 && opc3 == 0x03)
11488 {
11489 if (dp_op_sz)
11490 curr_insn_type = INSN_T1;
11491 else
11492 curr_insn_type = INSN_T2;
11493 }
11494 /* Handle VCVT. */
11495 else if (opc2 == 0x07 && opc3 == 0x03)
11496 {
11497 if (!dp_op_sz)
11498 curr_insn_type = INSN_T1;
11499 else
11500 curr_insn_type = INSN_T2;
11501 }
11502 else if (opc3 & 0x01)
11503 {
11504 /* Handle VCVT. */
11505 if ((opc2 == 0x08) || (opc2 & 0x0e) == 0x0c)
11506 {
11507 if (!bit (arm_insn_r->arm_insn, 18))
11508 curr_insn_type = INSN_T2;
11509 else
11510 {
11511 if (dp_op_sz)
11512 curr_insn_type = INSN_T1;
11513 else
11514 curr_insn_type = INSN_T2;
11515 }
11516 }
11517 /* Handle VCVT. */
11518 else if ((opc2 & 0x0e) == 0x0a || (opc2 & 0x0e) == 0x0e)
11519 {
11520 if (dp_op_sz)
11521 curr_insn_type = INSN_T1;
11522 else
11523 curr_insn_type = INSN_T2;
11524 }
11525 /* Handle VCVTB, VCVTT. */
11526 else if ((opc2 & 0x0e) == 0x02)
11527 curr_insn_type = INSN_T2;
11528 /* Handle VCMP, VCMPE. */
11529 else if ((opc2 & 0x0e) == 0x04)
11530 curr_insn_type = INSN_T3;
11531 }
11532 }
11533
11534 switch (curr_insn_type)
11535 {
11536 case INSN_T0:
11537 reg_vd = reg_vd | (bit_d << 4);
11538 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11539 record_buf[1] = reg_vd + ARM_D0_REGNUM + 1;
11540 arm_insn_r->reg_rec_count = 2;
11541 break;
11542
11543 case INSN_T1:
11544 reg_vd = reg_vd | (bit_d << 4);
11545 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11546 arm_insn_r->reg_rec_count = 1;
11547 break;
11548
11549 case INSN_T2:
11550 reg_vd = (reg_vd << 1) | bit_d;
11551 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11552 arm_insn_r->reg_rec_count = 1;
11553 break;
11554
11555 case INSN_T3:
11556 record_buf[0] = ARM_FPSCR_REGNUM;
11557 arm_insn_r->reg_rec_count = 1;
11558 break;
11559
11560 default:
11561 gdb_assert_not_reached ("no decoding pattern found");
11562 break;
11563 }
11564
11565 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11566 return 0;
11567 }
11568
11569 /* Handling opcode 110 insns. */
11570
11571 static int
11572 arm_record_asimd_vfp_coproc (insn_decode_record *arm_insn_r)
11573 {
11574 uint32_t op1, op1_ebit, coproc;
11575
11576 coproc = bits (arm_insn_r->arm_insn, 8, 11);
11577 op1 = bits (arm_insn_r->arm_insn, 20, 25);
11578 op1_ebit = bit (arm_insn_r->arm_insn, 20);
11579
11580 if ((coproc & 0x0e) == 0x0a)
11581 {
11582 /* Handle extension register ld/st instructions. */
11583 if (!(op1 & 0x20))
11584 return arm_record_exreg_ld_st_insn (arm_insn_r);
11585
11586 /* 64-bit transfers between arm core and extension registers. */
11587 if ((op1 & 0x3e) == 0x04)
11588 return arm_record_exreg_ld_st_insn (arm_insn_r);
11589 }
11590 else
11591 {
11592 /* Handle coprocessor ld/st instructions. */
11593 if (!(op1 & 0x3a))
11594 {
11595 /* Store. */
11596 if (!op1_ebit)
11597 return arm_record_unsupported_insn (arm_insn_r);
11598 else
11599 /* Load. */
11600 return arm_record_unsupported_insn (arm_insn_r);
11601 }
11602
11603 /* Move to coprocessor from two arm core registers. */
11604 if (op1 == 0x4)
11605 return arm_record_unsupported_insn (arm_insn_r);
11606
11607 /* Move to two arm core registers from coprocessor. */
11608 if (op1 == 0x5)
11609 {
11610 uint32_t reg_t[2];
11611
11612 reg_t[0] = bits (arm_insn_r->arm_insn, 12, 15);
11613 reg_t[1] = bits (arm_insn_r->arm_insn, 16, 19);
11614 arm_insn_r->reg_rec_count = 2;
11615
11616 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, reg_t);
11617 return 0;
11618 }
11619 }
11620 return arm_record_unsupported_insn (arm_insn_r);
11621 }
11622
11623 /* Handling opcode 111 insns. */
11624
11625 static int
11626 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11627 {
11628 uint32_t op, op1_ebit, coproc, bits_24_25;
11629 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
11630 struct regcache *reg_cache = arm_insn_r->regcache;
11631
11632 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
11633 coproc = bits (arm_insn_r->arm_insn, 8, 11);
11634 op1_ebit = bit (arm_insn_r->arm_insn, 20);
11635 op = bit (arm_insn_r->arm_insn, 4);
11636 bits_24_25 = bits (arm_insn_r->arm_insn, 24, 25);
11637
11638 /* Handle arm SWI/SVC system call instructions. */
11639 if (bits_24_25 == 0x3)
11640 {
11641 if (tdep->arm_syscall_record != NULL)
11642 {
11643 ULONGEST svc_operand, svc_number;
11644
11645 svc_operand = (0x00ffffff & arm_insn_r->arm_insn);
11646
11647 if (svc_operand) /* OABI. */
11648 svc_number = svc_operand - 0x900000;
11649 else /* EABI. */
11650 regcache_raw_read_unsigned (reg_cache, 7, &svc_number);
11651
11652 return tdep->arm_syscall_record (reg_cache, svc_number);
11653 }
11654 else
11655 {
11656 printf_unfiltered (_("no syscall record support\n"));
11657 return -1;
11658 }
11659 }
11660 else if (bits_24_25 == 0x02)
11661 {
11662 if (op)
11663 {
11664 if ((coproc & 0x0e) == 0x0a)
11665 {
11666 /* 8, 16, and 32-bit transfer */
11667 return arm_record_vdata_transfer_insn (arm_insn_r);
11668 }
11669 else
11670 {
11671 if (op1_ebit)
11672 {
11673 /* MRC, MRC2 */
11674 uint32_t record_buf[1];
11675
11676 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11677 if (record_buf[0] == 15)
11678 record_buf[0] = ARM_PS_REGNUM;
11679
11680 arm_insn_r->reg_rec_count = 1;
11681 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count,
11682 record_buf);
11683 return 0;
11684 }
11685 else
11686 {
11687 /* MCR, MCR2 */
11688 return -1;
11689 }
11690 }
11691 }
11692 else
11693 {
11694 if ((coproc & 0x0e) == 0x0a)
11695 {
11696 /* VFP data-processing instructions. */
11697 return arm_record_vfp_data_proc_insn (arm_insn_r);
11698 }
11699 else
11700 {
11701 /* CDP, CDP2 */
11702 return -1;
11703 }
11704 }
11705 }
11706 else
11707 {
11708 unsigned int op1 = bits (arm_insn_r->arm_insn, 20, 25);
11709
11710 if (op1 == 5)
11711 {
11712 if ((coproc & 0x0e) != 0x0a)
11713 {
11714 /* MRRC, MRRC2 */
11715 return -1;
11716 }
11717 }
11718 else if (op1 == 4 || op1 == 5)
11719 {
11720 if ((coproc & 0x0e) == 0x0a)
11721 {
11722 /* 64-bit transfers between ARM core and extension */
11723 return -1;
11724 }
11725 else if (op1 == 4)
11726 {
11727 /* MCRR, MCRR2 */
11728 return -1;
11729 }
11730 }
11731 else if (op1 == 0 || op1 == 1)
11732 {
11733 /* UNDEFINED */
11734 return -1;
11735 }
11736 else
11737 {
11738 if ((coproc & 0x0e) == 0x0a)
11739 {
11740 /* Extension register load/store */
11741 }
11742 else
11743 {
11744 /* STC, STC2, LDC, LDC2 */
11745 }
11746 return -1;
11747 }
11748 }
11749
11750 return -1;
11751 }
11752
11753 /* Handling opcode 000 insns. */
11754
11755 static int
11756 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
11757 {
11758 uint32_t record_buf[8];
11759 uint32_t reg_src1 = 0;
11760
11761 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11762
11763 record_buf[0] = ARM_PS_REGNUM;
11764 record_buf[1] = reg_src1;
11765 thumb_insn_r->reg_rec_count = 2;
11766
11767 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11768
11769 return 0;
11770 }
11771
11772
11773 /* Handling opcode 001 insns. */
11774
11775 static int
11776 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
11777 {
11778 uint32_t record_buf[8];
11779 uint32_t reg_src1 = 0;
11780
11781 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11782
11783 record_buf[0] = ARM_PS_REGNUM;
11784 record_buf[1] = reg_src1;
11785 thumb_insn_r->reg_rec_count = 2;
11786
11787 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11788
11789 return 0;
11790 }
11791
11792 /* Handling opcode 010 insns. */
11793
11794 static int
11795 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
11796 {
11797 struct regcache *reg_cache = thumb_insn_r->regcache;
11798 uint32_t record_buf[8], record_buf_mem[8];
11799
11800 uint32_t reg_src1 = 0, reg_src2 = 0;
11801 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
11802
11803 ULONGEST u_regval[2] = {0};
11804
11805 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
11806
11807 if (bit (thumb_insn_r->arm_insn, 12))
11808 {
11809 /* Handle load/store register offset. */
11810 uint32_t opB = bits (thumb_insn_r->arm_insn, 9, 11);
11811
11812 if (in_inclusive_range (opB, 4U, 7U))
11813 {
11814 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
11815 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
11816 record_buf[0] = reg_src1;
11817 thumb_insn_r->reg_rec_count = 1;
11818 }
11819 else if (in_inclusive_range (opB, 0U, 2U))
11820 {
11821 /* STR(2), STRB(2), STRH(2) . */
11822 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11823 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
11824 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11825 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11826 if (0 == opB)
11827 record_buf_mem[0] = 4; /* STR (2). */
11828 else if (2 == opB)
11829 record_buf_mem[0] = 1; /* STRB (2). */
11830 else if (1 == opB)
11831 record_buf_mem[0] = 2; /* STRH (2). */
11832 record_buf_mem[1] = u_regval[0] + u_regval[1];
11833 thumb_insn_r->mem_rec_count = 1;
11834 }
11835 }
11836 else if (bit (thumb_insn_r->arm_insn, 11))
11837 {
11838 /* Handle load from literal pool. */
11839 /* LDR(3). */
11840 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11841 record_buf[0] = reg_src1;
11842 thumb_insn_r->reg_rec_count = 1;
11843 }
11844 else if (opcode1)
11845 {
11846 /* Special data instructions and branch and exchange */
11847 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
11848 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
11849 if ((3 == opcode2) && (!opcode3))
11850 {
11851 /* Branch with exchange. */
11852 record_buf[0] = ARM_PS_REGNUM;
11853 thumb_insn_r->reg_rec_count = 1;
11854 }
11855 else
11856 {
11857 /* Format 8; special data processing insns. */
11858 record_buf[0] = ARM_PS_REGNUM;
11859 record_buf[1] = (bit (thumb_insn_r->arm_insn, 7) << 3
11860 | bits (thumb_insn_r->arm_insn, 0, 2));
11861 thumb_insn_r->reg_rec_count = 2;
11862 }
11863 }
11864 else
11865 {
11866 /* Format 5; data processing insns. */
11867 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11868 if (bit (thumb_insn_r->arm_insn, 7))
11869 {
11870 reg_src1 = reg_src1 + 8;
11871 }
11872 record_buf[0] = ARM_PS_REGNUM;
11873 record_buf[1] = reg_src1;
11874 thumb_insn_r->reg_rec_count = 2;
11875 }
11876
11877 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11878 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11879 record_buf_mem);
11880
11881 return 0;
11882 }
11883
11884 /* Handling opcode 001 insns. */
11885
11886 static int
11887 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
11888 {
11889 struct regcache *reg_cache = thumb_insn_r->regcache;
11890 uint32_t record_buf[8], record_buf_mem[8];
11891
11892 uint32_t reg_src1 = 0;
11893 uint32_t opcode = 0, immed_5 = 0;
11894
11895 ULONGEST u_regval = 0;
11896
11897 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11898
11899 if (opcode)
11900 {
11901 /* LDR(1). */
11902 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11903 record_buf[0] = reg_src1;
11904 thumb_insn_r->reg_rec_count = 1;
11905 }
11906 else
11907 {
11908 /* STR(1). */
11909 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11910 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
11911 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11912 record_buf_mem[0] = 4;
11913 record_buf_mem[1] = u_regval + (immed_5 * 4);
11914 thumb_insn_r->mem_rec_count = 1;
11915 }
11916
11917 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11918 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11919 record_buf_mem);
11920
11921 return 0;
11922 }
11923
11924 /* Handling opcode 100 insns. */
11925
11926 static int
11927 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
11928 {
11929 struct regcache *reg_cache = thumb_insn_r->regcache;
11930 uint32_t record_buf[8], record_buf_mem[8];
11931
11932 uint32_t reg_src1 = 0;
11933 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
11934
11935 ULONGEST u_regval = 0;
11936
11937 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11938
11939 if (3 == opcode)
11940 {
11941 /* LDR(4). */
11942 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11943 record_buf[0] = reg_src1;
11944 thumb_insn_r->reg_rec_count = 1;
11945 }
11946 else if (1 == opcode)
11947 {
11948 /* LDRH(1). */
11949 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11950 record_buf[0] = reg_src1;
11951 thumb_insn_r->reg_rec_count = 1;
11952 }
11953 else if (2 == opcode)
11954 {
11955 /* STR(3). */
11956 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
11957 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
11958 record_buf_mem[0] = 4;
11959 record_buf_mem[1] = u_regval + (immed_8 * 4);
11960 thumb_insn_r->mem_rec_count = 1;
11961 }
11962 else if (0 == opcode)
11963 {
11964 /* STRH(1). */
11965 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
11966 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11967 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11968 record_buf_mem[0] = 2;
11969 record_buf_mem[1] = u_regval + (immed_5 * 2);
11970 thumb_insn_r->mem_rec_count = 1;
11971 }
11972
11973 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11974 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11975 record_buf_mem);
11976
11977 return 0;
11978 }
11979
11980 /* Handling opcode 101 insns. */
11981
11982 static int
11983 thumb_record_misc (insn_decode_record *thumb_insn_r)
11984 {
11985 struct regcache *reg_cache = thumb_insn_r->regcache;
11986
11987 uint32_t opcode = 0;
11988 uint32_t register_bits = 0, register_count = 0;
11989 uint32_t index = 0, start_address = 0;
11990 uint32_t record_buf[24], record_buf_mem[48];
11991 uint32_t reg_src1;
11992
11993 ULONGEST u_regval = 0;
11994
11995 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11996
11997 if (opcode == 0 || opcode == 1)
11998 {
11999 /* ADR and ADD (SP plus immediate) */
12000
12001 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12002 record_buf[0] = reg_src1;
12003 thumb_insn_r->reg_rec_count = 1;
12004 }
12005 else
12006 {
12007 /* Miscellaneous 16-bit instructions */
12008 uint32_t opcode2 = bits (thumb_insn_r->arm_insn, 8, 11);
12009
12010 switch (opcode2)
12011 {
12012 case 6:
12013 /* SETEND and CPS */
12014 break;
12015 case 0:
12016 /* ADD/SUB (SP plus immediate) */
12017 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12018 record_buf[0] = ARM_SP_REGNUM;
12019 thumb_insn_r->reg_rec_count = 1;
12020 break;
12021 case 1: /* fall through */
12022 case 3: /* fall through */
12023 case 9: /* fall through */
12024 case 11:
12025 /* CBNZ, CBZ */
12026 break;
12027 case 2:
12028 /* SXTH, SXTB, UXTH, UXTB */
12029 record_buf[0] = bits (thumb_insn_r->arm_insn, 0, 2);
12030 thumb_insn_r->reg_rec_count = 1;
12031 break;
12032 case 4: /* fall through */
12033 case 5:
12034 /* PUSH. */
12035 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12036 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12037 while (register_bits)
12038 {
12039 if (register_bits & 0x00000001)
12040 register_count++;
12041 register_bits = register_bits >> 1;
12042 }
12043 start_address = u_regval - \
12044 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12045 thumb_insn_r->mem_rec_count = register_count;
12046 while (register_count)
12047 {
12048 record_buf_mem[(register_count * 2) - 1] = start_address;
12049 record_buf_mem[(register_count * 2) - 2] = 4;
12050 start_address = start_address + 4;
12051 register_count--;
12052 }
12053 record_buf[0] = ARM_SP_REGNUM;
12054 thumb_insn_r->reg_rec_count = 1;
12055 break;
12056 case 10:
12057 /* REV, REV16, REVSH */
12058 record_buf[0] = bits (thumb_insn_r->arm_insn, 0, 2);
12059 thumb_insn_r->reg_rec_count = 1;
12060 break;
12061 case 12: /* fall through */
12062 case 13:
12063 /* POP. */
12064 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12065 while (register_bits)
12066 {
12067 if (register_bits & 0x00000001)
12068 record_buf[index++] = register_count;
12069 register_bits = register_bits >> 1;
12070 register_count++;
12071 }
12072 record_buf[index++] = ARM_PS_REGNUM;
12073 record_buf[index++] = ARM_SP_REGNUM;
12074 thumb_insn_r->reg_rec_count = index;
12075 break;
12076 case 0xe:
12077 /* BKPT insn. */
12078 /* Handle enhanced software breakpoint insn, BKPT. */
12079 /* CPSR is changed to be executed in ARM state, disabling normal
12080 interrupts, entering abort mode. */
12081 /* According to high vector configuration PC is set. */
12082 /* User hits breakpoint and type reverse, in that case, we need to go back with
12083 previous CPSR and Program Counter. */
12084 record_buf[0] = ARM_PS_REGNUM;
12085 record_buf[1] = ARM_LR_REGNUM;
12086 thumb_insn_r->reg_rec_count = 2;
12087 /* We need to save SPSR value, which is not yet done. */
12088 printf_unfiltered (_("Process record does not support instruction "
12089 "0x%0x at address %s.\n"),
12090 thumb_insn_r->arm_insn,
12091 paddress (thumb_insn_r->gdbarch,
12092 thumb_insn_r->this_addr));
12093 return -1;
12094
12095 case 0xf:
12096 /* If-Then, and hints */
12097 break;
12098 default:
12099 return -1;
12100 };
12101 }
12102
12103 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12104 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12105 record_buf_mem);
12106
12107 return 0;
12108 }
12109
12110 /* Handling opcode 110 insns. */
12111
12112 static int
12113 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12114 {
12115 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12116 struct regcache *reg_cache = thumb_insn_r->regcache;
12117
12118 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12119 uint32_t reg_src1 = 0;
12120 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12121 uint32_t index = 0, start_address = 0;
12122 uint32_t record_buf[24], record_buf_mem[48];
12123
12124 ULONGEST u_regval = 0;
12125
12126 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12127 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12128
12129 if (1 == opcode2)
12130 {
12131
12132 /* LDMIA. */
12133 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12134 /* Get Rn. */
12135 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12136 while (register_bits)
12137 {
12138 if (register_bits & 0x00000001)
12139 record_buf[index++] = register_count;
12140 register_bits = register_bits >> 1;
12141 register_count++;
12142 }
12143 record_buf[index++] = reg_src1;
12144 thumb_insn_r->reg_rec_count = index;
12145 }
12146 else if (0 == opcode2)
12147 {
12148 /* It handles both STMIA. */
12149 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12150 /* Get Rn. */
12151 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12152 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12153 while (register_bits)
12154 {
12155 if (register_bits & 0x00000001)
12156 register_count++;
12157 register_bits = register_bits >> 1;
12158 }
12159 start_address = u_regval;
12160 thumb_insn_r->mem_rec_count = register_count;
12161 while (register_count)
12162 {
12163 record_buf_mem[(register_count * 2) - 1] = start_address;
12164 record_buf_mem[(register_count * 2) - 2] = 4;
12165 start_address = start_address + 4;
12166 register_count--;
12167 }
12168 }
12169 else if (0x1F == opcode1)
12170 {
12171 /* Handle arm syscall insn. */
12172 if (tdep->arm_syscall_record != NULL)
12173 {
12174 regcache_raw_read_unsigned (reg_cache, 7, &u_regval);
12175 ret = tdep->arm_syscall_record (reg_cache, u_regval);
12176 }
12177 else
12178 {
12179 printf_unfiltered (_("no syscall record support\n"));
12180 return -1;
12181 }
12182 }
12183
12184 /* B (1), conditional branch is automatically taken care in process_record,
12185 as PC is saved there. */
12186
12187 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12188 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12189 record_buf_mem);
12190
12191 return ret;
12192 }
12193
12194 /* Handling opcode 111 insns. */
12195
12196 static int
12197 thumb_record_branch (insn_decode_record *thumb_insn_r)
12198 {
12199 uint32_t record_buf[8];
12200 uint32_t bits_h = 0;
12201
12202 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12203
12204 if (2 == bits_h || 3 == bits_h)
12205 {
12206 /* BL */
12207 record_buf[0] = ARM_LR_REGNUM;
12208 thumb_insn_r->reg_rec_count = 1;
12209 }
12210 else if (1 == bits_h)
12211 {
12212 /* BLX(1). */
12213 record_buf[0] = ARM_PS_REGNUM;
12214 record_buf[1] = ARM_LR_REGNUM;
12215 thumb_insn_r->reg_rec_count = 2;
12216 }
12217
12218 /* B(2) is automatically taken care in process_record, as PC is
12219 saved there. */
12220
12221 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12222
12223 return 0;
12224 }
12225
12226 /* Handler for thumb2 load/store multiple instructions. */
12227
12228 static int
12229 thumb2_record_ld_st_multiple (insn_decode_record *thumb2_insn_r)
12230 {
12231 struct regcache *reg_cache = thumb2_insn_r->regcache;
12232
12233 uint32_t reg_rn, op;
12234 uint32_t register_bits = 0, register_count = 0;
12235 uint32_t index = 0, start_address = 0;
12236 uint32_t record_buf[24], record_buf_mem[48];
12237
12238 ULONGEST u_regval = 0;
12239
12240 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12241 op = bits (thumb2_insn_r->arm_insn, 23, 24);
12242
12243 if (0 == op || 3 == op)
12244 {
12245 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12246 {
12247 /* Handle RFE instruction. */
12248 record_buf[0] = ARM_PS_REGNUM;
12249 thumb2_insn_r->reg_rec_count = 1;
12250 }
12251 else
12252 {
12253 /* Handle SRS instruction after reading banked SP. */
12254 return arm_record_unsupported_insn (thumb2_insn_r);
12255 }
12256 }
12257 else if (1 == op || 2 == op)
12258 {
12259 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12260 {
12261 /* Handle LDM/LDMIA/LDMFD and LDMDB/LDMEA instructions. */
12262 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12263 while (register_bits)
12264 {
12265 if (register_bits & 0x00000001)
12266 record_buf[index++] = register_count;
12267
12268 register_count++;
12269 register_bits = register_bits >> 1;
12270 }
12271 record_buf[index++] = reg_rn;
12272 record_buf[index++] = ARM_PS_REGNUM;
12273 thumb2_insn_r->reg_rec_count = index;
12274 }
12275 else
12276 {
12277 /* Handle STM/STMIA/STMEA and STMDB/STMFD. */
12278 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12279 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12280 while (register_bits)
12281 {
12282 if (register_bits & 0x00000001)
12283 register_count++;
12284
12285 register_bits = register_bits >> 1;
12286 }
12287
12288 if (1 == op)
12289 {
12290 /* Start address calculation for LDMDB/LDMEA. */
12291 start_address = u_regval;
12292 }
12293 else if (2 == op)
12294 {
12295 /* Start address calculation for LDMDB/LDMEA. */
12296 start_address = u_regval - register_count * 4;
12297 }
12298
12299 thumb2_insn_r->mem_rec_count = register_count;
12300 while (register_count)
12301 {
12302 record_buf_mem[register_count * 2 - 1] = start_address;
12303 record_buf_mem[register_count * 2 - 2] = 4;
12304 start_address = start_address + 4;
12305 register_count--;
12306 }
12307 record_buf[0] = reg_rn;
12308 record_buf[1] = ARM_PS_REGNUM;
12309 thumb2_insn_r->reg_rec_count = 2;
12310 }
12311 }
12312
12313 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12314 record_buf_mem);
12315 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12316 record_buf);
12317 return ARM_RECORD_SUCCESS;
12318 }
12319
12320 /* Handler for thumb2 load/store (dual/exclusive) and table branch
12321 instructions. */
12322
12323 static int
12324 thumb2_record_ld_st_dual_ex_tbb (insn_decode_record *thumb2_insn_r)
12325 {
12326 struct regcache *reg_cache = thumb2_insn_r->regcache;
12327
12328 uint32_t reg_rd, reg_rn, offset_imm;
12329 uint32_t reg_dest1, reg_dest2;
12330 uint32_t address, offset_addr;
12331 uint32_t record_buf[8], record_buf_mem[8];
12332 uint32_t op1, op2, op3;
12333
12334 ULONGEST u_regval[2];
12335
12336 op1 = bits (thumb2_insn_r->arm_insn, 23, 24);
12337 op2 = bits (thumb2_insn_r->arm_insn, 20, 21);
12338 op3 = bits (thumb2_insn_r->arm_insn, 4, 7);
12339
12340 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12341 {
12342 if(!(1 == op1 && 1 == op2 && (0 == op3 || 1 == op3)))
12343 {
12344 reg_dest1 = bits (thumb2_insn_r->arm_insn, 12, 15);
12345 record_buf[0] = reg_dest1;
12346 record_buf[1] = ARM_PS_REGNUM;
12347 thumb2_insn_r->reg_rec_count = 2;
12348 }
12349
12350 if (3 == op2 || (op1 & 2) || (1 == op1 && 1 == op2 && 7 == op3))
12351 {
12352 reg_dest2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12353 record_buf[2] = reg_dest2;
12354 thumb2_insn_r->reg_rec_count = 3;
12355 }
12356 }
12357 else
12358 {
12359 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12360 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12361
12362 if (0 == op1 && 0 == op2)
12363 {
12364 /* Handle STREX. */
12365 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12366 address = u_regval[0] + (offset_imm * 4);
12367 record_buf_mem[0] = 4;
12368 record_buf_mem[1] = address;
12369 thumb2_insn_r->mem_rec_count = 1;
12370 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12371 record_buf[0] = reg_rd;
12372 thumb2_insn_r->reg_rec_count = 1;
12373 }
12374 else if (1 == op1 && 0 == op2)
12375 {
12376 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12377 record_buf[0] = reg_rd;
12378 thumb2_insn_r->reg_rec_count = 1;
12379 address = u_regval[0];
12380 record_buf_mem[1] = address;
12381
12382 if (4 == op3)
12383 {
12384 /* Handle STREXB. */
12385 record_buf_mem[0] = 1;
12386 thumb2_insn_r->mem_rec_count = 1;
12387 }
12388 else if (5 == op3)
12389 {
12390 /* Handle STREXH. */
12391 record_buf_mem[0] = 2 ;
12392 thumb2_insn_r->mem_rec_count = 1;
12393 }
12394 else if (7 == op3)
12395 {
12396 /* Handle STREXD. */
12397 address = u_regval[0];
12398 record_buf_mem[0] = 4;
12399 record_buf_mem[2] = 4;
12400 record_buf_mem[3] = address + 4;
12401 thumb2_insn_r->mem_rec_count = 2;
12402 }
12403 }
12404 else
12405 {
12406 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12407
12408 if (bit (thumb2_insn_r->arm_insn, 24))
12409 {
12410 if (bit (thumb2_insn_r->arm_insn, 23))
12411 offset_addr = u_regval[0] + (offset_imm * 4);
12412 else
12413 offset_addr = u_regval[0] - (offset_imm * 4);
12414
12415 address = offset_addr;
12416 }
12417 else
12418 address = u_regval[0];
12419
12420 record_buf_mem[0] = 4;
12421 record_buf_mem[1] = address;
12422 record_buf_mem[2] = 4;
12423 record_buf_mem[3] = address + 4;
12424 thumb2_insn_r->mem_rec_count = 2;
12425 record_buf[0] = reg_rn;
12426 thumb2_insn_r->reg_rec_count = 1;
12427 }
12428 }
12429
12430 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12431 record_buf);
12432 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12433 record_buf_mem);
12434 return ARM_RECORD_SUCCESS;
12435 }
12436
12437 /* Handler for thumb2 data processing (shift register and modified immediate)
12438 instructions. */
12439
12440 static int
12441 thumb2_record_data_proc_sreg_mimm (insn_decode_record *thumb2_insn_r)
12442 {
12443 uint32_t reg_rd, op;
12444 uint32_t record_buf[8];
12445
12446 op = bits (thumb2_insn_r->arm_insn, 21, 24);
12447 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12448
12449 if ((0 == op || 4 == op || 8 == op || 13 == op) && 15 == reg_rd)
12450 {
12451 record_buf[0] = ARM_PS_REGNUM;
12452 thumb2_insn_r->reg_rec_count = 1;
12453 }
12454 else
12455 {
12456 record_buf[0] = reg_rd;
12457 record_buf[1] = ARM_PS_REGNUM;
12458 thumb2_insn_r->reg_rec_count = 2;
12459 }
12460
12461 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12462 record_buf);
12463 return ARM_RECORD_SUCCESS;
12464 }
12465
12466 /* Generic handler for thumb2 instructions which effect destination and PS
12467 registers. */
12468
12469 static int
12470 thumb2_record_ps_dest_generic (insn_decode_record *thumb2_insn_r)
12471 {
12472 uint32_t reg_rd;
12473 uint32_t record_buf[8];
12474
12475 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12476
12477 record_buf[0] = reg_rd;
12478 record_buf[1] = ARM_PS_REGNUM;
12479 thumb2_insn_r->reg_rec_count = 2;
12480
12481 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12482 record_buf);
12483 return ARM_RECORD_SUCCESS;
12484 }
12485
12486 /* Handler for thumb2 branch and miscellaneous control instructions. */
12487
12488 static int
12489 thumb2_record_branch_misc_cntrl (insn_decode_record *thumb2_insn_r)
12490 {
12491 uint32_t op, op1, op2;
12492 uint32_t record_buf[8];
12493
12494 op = bits (thumb2_insn_r->arm_insn, 20, 26);
12495 op1 = bits (thumb2_insn_r->arm_insn, 12, 14);
12496 op2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12497
12498 /* Handle MSR insn. */
12499 if (!(op1 & 0x2) && 0x38 == op)
12500 {
12501 if (!(op2 & 0x3))
12502 {
12503 /* CPSR is going to be changed. */
12504 record_buf[0] = ARM_PS_REGNUM;
12505 thumb2_insn_r->reg_rec_count = 1;
12506 }
12507 else
12508 {
12509 arm_record_unsupported_insn(thumb2_insn_r);
12510 return -1;
12511 }
12512 }
12513 else if (4 == (op1 & 0x5) || 5 == (op1 & 0x5))
12514 {
12515 /* BLX. */
12516 record_buf[0] = ARM_PS_REGNUM;
12517 record_buf[1] = ARM_LR_REGNUM;
12518 thumb2_insn_r->reg_rec_count = 2;
12519 }
12520
12521 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12522 record_buf);
12523 return ARM_RECORD_SUCCESS;
12524 }
12525
12526 /* Handler for thumb2 store single data item instructions. */
12527
12528 static int
12529 thumb2_record_str_single_data (insn_decode_record *thumb2_insn_r)
12530 {
12531 struct regcache *reg_cache = thumb2_insn_r->regcache;
12532
12533 uint32_t reg_rn, reg_rm, offset_imm, shift_imm;
12534 uint32_t address, offset_addr;
12535 uint32_t record_buf[8], record_buf_mem[8];
12536 uint32_t op1, op2;
12537
12538 ULONGEST u_regval[2];
12539
12540 op1 = bits (thumb2_insn_r->arm_insn, 21, 23);
12541 op2 = bits (thumb2_insn_r->arm_insn, 6, 11);
12542 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12543 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12544
12545 if (bit (thumb2_insn_r->arm_insn, 23))
12546 {
12547 /* T2 encoding. */
12548 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 11);
12549 offset_addr = u_regval[0] + offset_imm;
12550 address = offset_addr;
12551 }
12552 else
12553 {
12554 /* T3 encoding. */
12555 if ((0 == op1 || 1 == op1 || 2 == op1) && !(op2 & 0x20))
12556 {
12557 /* Handle STRB (register). */
12558 reg_rm = bits (thumb2_insn_r->arm_insn, 0, 3);
12559 regcache_raw_read_unsigned (reg_cache, reg_rm, &u_regval[1]);
12560 shift_imm = bits (thumb2_insn_r->arm_insn, 4, 5);
12561 offset_addr = u_regval[1] << shift_imm;
12562 address = u_regval[0] + offset_addr;
12563 }
12564 else
12565 {
12566 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12567 if (bit (thumb2_insn_r->arm_insn, 10))
12568 {
12569 if (bit (thumb2_insn_r->arm_insn, 9))
12570 offset_addr = u_regval[0] + offset_imm;
12571 else
12572 offset_addr = u_regval[0] - offset_imm;
12573
12574 address = offset_addr;
12575 }
12576 else
12577 address = u_regval[0];
12578 }
12579 }
12580
12581 switch (op1)
12582 {
12583 /* Store byte instructions. */
12584 case 4:
12585 case 0:
12586 record_buf_mem[0] = 1;
12587 break;
12588 /* Store half word instructions. */
12589 case 1:
12590 case 5:
12591 record_buf_mem[0] = 2;
12592 break;
12593 /* Store word instructions. */
12594 case 2:
12595 case 6:
12596 record_buf_mem[0] = 4;
12597 break;
12598
12599 default:
12600 gdb_assert_not_reached ("no decoding pattern found");
12601 break;
12602 }
12603
12604 record_buf_mem[1] = address;
12605 thumb2_insn_r->mem_rec_count = 1;
12606 record_buf[0] = reg_rn;
12607 thumb2_insn_r->reg_rec_count = 1;
12608
12609 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12610 record_buf);
12611 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12612 record_buf_mem);
12613 return ARM_RECORD_SUCCESS;
12614 }
12615
12616 /* Handler for thumb2 load memory hints instructions. */
12617
12618 static int
12619 thumb2_record_ld_mem_hints (insn_decode_record *thumb2_insn_r)
12620 {
12621 uint32_t record_buf[8];
12622 uint32_t reg_rt, reg_rn;
12623
12624 reg_rt = bits (thumb2_insn_r->arm_insn, 12, 15);
12625 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12626
12627 if (ARM_PC_REGNUM != reg_rt)
12628 {
12629 record_buf[0] = reg_rt;
12630 record_buf[1] = reg_rn;
12631 record_buf[2] = ARM_PS_REGNUM;
12632 thumb2_insn_r->reg_rec_count = 3;
12633
12634 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12635 record_buf);
12636 return ARM_RECORD_SUCCESS;
12637 }
12638
12639 return ARM_RECORD_FAILURE;
12640 }
12641
12642 /* Handler for thumb2 load word instructions. */
12643
12644 static int
12645 thumb2_record_ld_word (insn_decode_record *thumb2_insn_r)
12646 {
12647 uint32_t record_buf[8];
12648
12649 record_buf[0] = bits (thumb2_insn_r->arm_insn, 12, 15);
12650 record_buf[1] = ARM_PS_REGNUM;
12651 thumb2_insn_r->reg_rec_count = 2;
12652
12653 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12654 record_buf);
12655 return ARM_RECORD_SUCCESS;
12656 }
12657
12658 /* Handler for thumb2 long multiply, long multiply accumulate, and
12659 divide instructions. */
12660
12661 static int
12662 thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r)
12663 {
12664 uint32_t opcode1 = 0, opcode2 = 0;
12665 uint32_t record_buf[8];
12666
12667 opcode1 = bits (thumb2_insn_r->arm_insn, 20, 22);
12668 opcode2 = bits (thumb2_insn_r->arm_insn, 4, 7);
12669
12670 if (0 == opcode1 || 2 == opcode1 || (opcode1 >= 4 && opcode1 <= 6))
12671 {
12672 /* Handle SMULL, UMULL, SMULAL. */
12673 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
12674 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12675 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12676 record_buf[2] = ARM_PS_REGNUM;
12677 thumb2_insn_r->reg_rec_count = 3;
12678 }
12679 else if (1 == opcode1 || 3 == opcode2)
12680 {
12681 /* Handle SDIV and UDIV. */
12682 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12683 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12684 record_buf[2] = ARM_PS_REGNUM;
12685 thumb2_insn_r->reg_rec_count = 3;
12686 }
12687 else
12688 return ARM_RECORD_FAILURE;
12689
12690 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12691 record_buf);
12692 return ARM_RECORD_SUCCESS;
12693 }
12694
12695 /* Record handler for thumb32 coprocessor instructions. */
12696
12697 static int
12698 thumb2_record_coproc_insn (insn_decode_record *thumb2_insn_r)
12699 {
12700 if (bit (thumb2_insn_r->arm_insn, 25))
12701 return arm_record_coproc_data_proc (thumb2_insn_r);
12702 else
12703 return arm_record_asimd_vfp_coproc (thumb2_insn_r);
12704 }
12705
12706 /* Record handler for advance SIMD structure load/store instructions. */
12707
12708 static int
12709 thumb2_record_asimd_struct_ld_st (insn_decode_record *thumb2_insn_r)
12710 {
12711 struct regcache *reg_cache = thumb2_insn_r->regcache;
12712 uint32_t l_bit, a_bit, b_bits;
12713 uint32_t record_buf[128], record_buf_mem[128];
12714 uint32_t reg_rn, reg_vd, address, f_elem;
12715 uint32_t index_r = 0, index_e = 0, bf_regs = 0, index_m = 0, loop_t = 0;
12716 uint8_t f_ebytes;
12717
12718 l_bit = bit (thumb2_insn_r->arm_insn, 21);
12719 a_bit = bit (thumb2_insn_r->arm_insn, 23);
12720 b_bits = bits (thumb2_insn_r->arm_insn, 8, 11);
12721 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12722 reg_vd = bits (thumb2_insn_r->arm_insn, 12, 15);
12723 reg_vd = (bit (thumb2_insn_r->arm_insn, 22) << 4) | reg_vd;
12724 f_ebytes = (1 << bits (thumb2_insn_r->arm_insn, 6, 7));
12725 f_elem = 8 / f_ebytes;
12726
12727 if (!l_bit)
12728 {
12729 ULONGEST u_regval = 0;
12730 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12731 address = u_regval;
12732
12733 if (!a_bit)
12734 {
12735 /* Handle VST1. */
12736 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
12737 {
12738 if (b_bits == 0x07)
12739 bf_regs = 1;
12740 else if (b_bits == 0x0a)
12741 bf_regs = 2;
12742 else if (b_bits == 0x06)
12743 bf_regs = 3;
12744 else if (b_bits == 0x02)
12745 bf_regs = 4;
12746 else
12747 bf_regs = 0;
12748
12749 for (index_r = 0; index_r < bf_regs; index_r++)
12750 {
12751 for (index_e = 0; index_e < f_elem; index_e++)
12752 {
12753 record_buf_mem[index_m++] = f_ebytes;
12754 record_buf_mem[index_m++] = address;
12755 address = address + f_ebytes;
12756 thumb2_insn_r->mem_rec_count += 1;
12757 }
12758 }
12759 }
12760 /* Handle VST2. */
12761 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
12762 {
12763 if (b_bits == 0x09 || b_bits == 0x08)
12764 bf_regs = 1;
12765 else if (b_bits == 0x03)
12766 bf_regs = 2;
12767 else
12768 bf_regs = 0;
12769
12770 for (index_r = 0; index_r < bf_regs; index_r++)
12771 for (index_e = 0; index_e < f_elem; index_e++)
12772 {
12773 for (loop_t = 0; loop_t < 2; loop_t++)
12774 {
12775 record_buf_mem[index_m++] = f_ebytes;
12776 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12777 thumb2_insn_r->mem_rec_count += 1;
12778 }
12779 address = address + (2 * f_ebytes);
12780 }
12781 }
12782 /* Handle VST3. */
12783 else if ((b_bits & 0x0e) == 0x04)
12784 {
12785 for (index_e = 0; index_e < f_elem; index_e++)
12786 {
12787 for (loop_t = 0; loop_t < 3; loop_t++)
12788 {
12789 record_buf_mem[index_m++] = f_ebytes;
12790 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12791 thumb2_insn_r->mem_rec_count += 1;
12792 }
12793 address = address + (3 * f_ebytes);
12794 }
12795 }
12796 /* Handle VST4. */
12797 else if (!(b_bits & 0x0e))
12798 {
12799 for (index_e = 0; index_e < f_elem; index_e++)
12800 {
12801 for (loop_t = 0; loop_t < 4; loop_t++)
12802 {
12803 record_buf_mem[index_m++] = f_ebytes;
12804 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12805 thumb2_insn_r->mem_rec_count += 1;
12806 }
12807 address = address + (4 * f_ebytes);
12808 }
12809 }
12810 }
12811 else
12812 {
12813 uint8_t bft_size = bits (thumb2_insn_r->arm_insn, 10, 11);
12814
12815 if (bft_size == 0x00)
12816 f_ebytes = 1;
12817 else if (bft_size == 0x01)
12818 f_ebytes = 2;
12819 else if (bft_size == 0x02)
12820 f_ebytes = 4;
12821 else
12822 f_ebytes = 0;
12823
12824 /* Handle VST1. */
12825 if (!(b_bits & 0x0b) || b_bits == 0x08)
12826 thumb2_insn_r->mem_rec_count = 1;
12827 /* Handle VST2. */
12828 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09)
12829 thumb2_insn_r->mem_rec_count = 2;
12830 /* Handle VST3. */
12831 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a)
12832 thumb2_insn_r->mem_rec_count = 3;
12833 /* Handle VST4. */
12834 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b)
12835 thumb2_insn_r->mem_rec_count = 4;
12836
12837 for (index_m = 0; index_m < thumb2_insn_r->mem_rec_count; index_m++)
12838 {
12839 record_buf_mem[index_m] = f_ebytes;
12840 record_buf_mem[index_m] = address + (index_m * f_ebytes);
12841 }
12842 }
12843 }
12844 else
12845 {
12846 if (!a_bit)
12847 {
12848 /* Handle VLD1. */
12849 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
12850 thumb2_insn_r->reg_rec_count = 1;
12851 /* Handle VLD2. */
12852 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
12853 thumb2_insn_r->reg_rec_count = 2;
12854 /* Handle VLD3. */
12855 else if ((b_bits & 0x0e) == 0x04)
12856 thumb2_insn_r->reg_rec_count = 3;
12857 /* Handle VLD4. */
12858 else if (!(b_bits & 0x0e))
12859 thumb2_insn_r->reg_rec_count = 4;
12860 }
12861 else
12862 {
12863 /* Handle VLD1. */
12864 if (!(b_bits & 0x0b) || b_bits == 0x08 || b_bits == 0x0c)
12865 thumb2_insn_r->reg_rec_count = 1;
12866 /* Handle VLD2. */
12867 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09 || b_bits == 0x0d)
12868 thumb2_insn_r->reg_rec_count = 2;
12869 /* Handle VLD3. */
12870 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a || b_bits == 0x0e)
12871 thumb2_insn_r->reg_rec_count = 3;
12872 /* Handle VLD4. */
12873 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b || b_bits == 0x0f)
12874 thumb2_insn_r->reg_rec_count = 4;
12875
12876 for (index_r = 0; index_r < thumb2_insn_r->reg_rec_count; index_r++)
12877 record_buf[index_r] = reg_vd + ARM_D0_REGNUM + index_r;
12878 }
12879 }
12880
12881 if (bits (thumb2_insn_r->arm_insn, 0, 3) != 15)
12882 {
12883 record_buf[index_r] = reg_rn;
12884 thumb2_insn_r->reg_rec_count += 1;
12885 }
12886
12887 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12888 record_buf);
12889 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12890 record_buf_mem);
12891 return 0;
12892 }
12893
12894 /* Decodes thumb2 instruction type and invokes its record handler. */
12895
12896 static unsigned int
12897 thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r)
12898 {
12899 uint32_t op, op1, op2;
12900
12901 op = bit (thumb2_insn_r->arm_insn, 15);
12902 op1 = bits (thumb2_insn_r->arm_insn, 27, 28);
12903 op2 = bits (thumb2_insn_r->arm_insn, 20, 26);
12904
12905 if (op1 == 0x01)
12906 {
12907 if (!(op2 & 0x64 ))
12908 {
12909 /* Load/store multiple instruction. */
12910 return thumb2_record_ld_st_multiple (thumb2_insn_r);
12911 }
12912 else if ((op2 & 0x64) == 0x4)
12913 {
12914 /* Load/store (dual/exclusive) and table branch instruction. */
12915 return thumb2_record_ld_st_dual_ex_tbb (thumb2_insn_r);
12916 }
12917 else if ((op2 & 0x60) == 0x20)
12918 {
12919 /* Data-processing (shifted register). */
12920 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12921 }
12922 else if (op2 & 0x40)
12923 {
12924 /* Co-processor instructions. */
12925 return thumb2_record_coproc_insn (thumb2_insn_r);
12926 }
12927 }
12928 else if (op1 == 0x02)
12929 {
12930 if (op)
12931 {
12932 /* Branches and miscellaneous control instructions. */
12933 return thumb2_record_branch_misc_cntrl (thumb2_insn_r);
12934 }
12935 else if (op2 & 0x20)
12936 {
12937 /* Data-processing (plain binary immediate) instruction. */
12938 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12939 }
12940 else
12941 {
12942 /* Data-processing (modified immediate). */
12943 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12944 }
12945 }
12946 else if (op1 == 0x03)
12947 {
12948 if (!(op2 & 0x71 ))
12949 {
12950 /* Store single data item. */
12951 return thumb2_record_str_single_data (thumb2_insn_r);
12952 }
12953 else if (!((op2 & 0x71) ^ 0x10))
12954 {
12955 /* Advanced SIMD or structure load/store instructions. */
12956 return thumb2_record_asimd_struct_ld_st (thumb2_insn_r);
12957 }
12958 else if (!((op2 & 0x67) ^ 0x01))
12959 {
12960 /* Load byte, memory hints instruction. */
12961 return thumb2_record_ld_mem_hints (thumb2_insn_r);
12962 }
12963 else if (!((op2 & 0x67) ^ 0x03))
12964 {
12965 /* Load halfword, memory hints instruction. */
12966 return thumb2_record_ld_mem_hints (thumb2_insn_r);
12967 }
12968 else if (!((op2 & 0x67) ^ 0x05))
12969 {
12970 /* Load word instruction. */
12971 return thumb2_record_ld_word (thumb2_insn_r);
12972 }
12973 else if (!((op2 & 0x70) ^ 0x20))
12974 {
12975 /* Data-processing (register) instruction. */
12976 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12977 }
12978 else if (!((op2 & 0x78) ^ 0x30))
12979 {
12980 /* Multiply, multiply accumulate, abs diff instruction. */
12981 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12982 }
12983 else if (!((op2 & 0x78) ^ 0x38))
12984 {
12985 /* Long multiply, long multiply accumulate, and divide. */
12986 return thumb2_record_lmul_lmla_div (thumb2_insn_r);
12987 }
12988 else if (op2 & 0x40)
12989 {
12990 /* Co-processor instructions. */
12991 return thumb2_record_coproc_insn (thumb2_insn_r);
12992 }
12993 }
12994
12995 return -1;
12996 }
12997
12998 namespace {
12999 /* Abstract memory reader. */
13000
13001 class abstract_memory_reader
13002 {
13003 public:
13004 /* Read LEN bytes of target memory at address MEMADDR, placing the
13005 results in GDB's memory at BUF. Return true on success. */
13006
13007 virtual bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) = 0;
13008 };
13009
13010 /* Instruction reader from real target. */
13011
13012 class instruction_reader : public abstract_memory_reader
13013 {
13014 public:
13015 bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) override
13016 {
13017 if (target_read_memory (memaddr, buf, len))
13018 return false;
13019 else
13020 return true;
13021 }
13022 };
13023
13024 } // namespace
13025
13026 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
13027 and positive val on failure. */
13028
13029 static int
13030 extract_arm_insn (abstract_memory_reader& reader,
13031 insn_decode_record *insn_record, uint32_t insn_size)
13032 {
13033 gdb_byte buf[insn_size];
13034
13035 memset (&buf[0], 0, insn_size);
13036
13037 if (!reader.read (insn_record->this_addr, buf, insn_size))
13038 return 1;
13039 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
13040 insn_size,
13041 gdbarch_byte_order_for_code (insn_record->gdbarch));
13042 return 0;
13043 }
13044
13045 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
13046
13047 /* Decode arm/thumb insn depending on condition cods and opcodes; and
13048 dispatch it. */
13049
13050 static int
13051 decode_insn (abstract_memory_reader &reader, insn_decode_record *arm_record,
13052 record_type_t record_type, uint32_t insn_size)
13053 {
13054
13055 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm
13056 instruction. */
13057 static const sti_arm_hdl_fp_t arm_handle_insn[8] =
13058 {
13059 arm_record_data_proc_misc_ld_str, /* 000. */
13060 arm_record_data_proc_imm, /* 001. */
13061 arm_record_ld_st_imm_offset, /* 010. */
13062 arm_record_ld_st_reg_offset, /* 011. */
13063 arm_record_ld_st_multiple, /* 100. */
13064 arm_record_b_bl, /* 101. */
13065 arm_record_asimd_vfp_coproc, /* 110. */
13066 arm_record_coproc_data_proc /* 111. */
13067 };
13068
13069 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb
13070 instruction. */
13071 static const sti_arm_hdl_fp_t thumb_handle_insn[8] =
13072 { \
13073 thumb_record_shift_add_sub, /* 000. */
13074 thumb_record_add_sub_cmp_mov, /* 001. */
13075 thumb_record_ld_st_reg_offset, /* 010. */
13076 thumb_record_ld_st_imm_offset, /* 011. */
13077 thumb_record_ld_st_stack, /* 100. */
13078 thumb_record_misc, /* 101. */
13079 thumb_record_ldm_stm_swi, /* 110. */
13080 thumb_record_branch /* 111. */
13081 };
13082
13083 uint32_t ret = 0; /* return value: negative:failure 0:success. */
13084 uint32_t insn_id = 0;
13085
13086 if (extract_arm_insn (reader, arm_record, insn_size))
13087 {
13088 if (record_debug)
13089 {
13090 printf_unfiltered (_("Process record: error reading memory at "
13091 "addr %s len = %d.\n"),
13092 paddress (arm_record->gdbarch,
13093 arm_record->this_addr), insn_size);
13094 }
13095 return -1;
13096 }
13097 else if (ARM_RECORD == record_type)
13098 {
13099 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
13100 insn_id = bits (arm_record->arm_insn, 25, 27);
13101
13102 if (arm_record->cond == 0xf)
13103 ret = arm_record_extension_space (arm_record);
13104 else
13105 {
13106 /* If this insn has fallen into extension space
13107 then we need not decode it anymore. */
13108 ret = arm_handle_insn[insn_id] (arm_record);
13109 }
13110 if (ret != ARM_RECORD_SUCCESS)
13111 {
13112 arm_record_unsupported_insn (arm_record);
13113 ret = -1;
13114 }
13115 }
13116 else if (THUMB_RECORD == record_type)
13117 {
13118 /* As thumb does not have condition codes, we set negative. */
13119 arm_record->cond = -1;
13120 insn_id = bits (arm_record->arm_insn, 13, 15);
13121 ret = thumb_handle_insn[insn_id] (arm_record);
13122 if (ret != ARM_RECORD_SUCCESS)
13123 {
13124 arm_record_unsupported_insn (arm_record);
13125 ret = -1;
13126 }
13127 }
13128 else if (THUMB2_RECORD == record_type)
13129 {
13130 /* As thumb does not have condition codes, we set negative. */
13131 arm_record->cond = -1;
13132
13133 /* Swap first half of 32bit thumb instruction with second half. */
13134 arm_record->arm_insn
13135 = (arm_record->arm_insn >> 16) | (arm_record->arm_insn << 16);
13136
13137 ret = thumb2_record_decode_insn_handler (arm_record);
13138
13139 if (ret != ARM_RECORD_SUCCESS)
13140 {
13141 arm_record_unsupported_insn (arm_record);
13142 ret = -1;
13143 }
13144 }
13145 else
13146 {
13147 /* Throw assertion. */
13148 gdb_assert_not_reached ("not a valid instruction, could not decode");
13149 }
13150
13151 return ret;
13152 }
13153
13154 #if GDB_SELF_TEST
13155 namespace selftests {
13156
13157 /* Provide both 16-bit and 32-bit thumb instructions. */
13158
13159 class instruction_reader_thumb : public abstract_memory_reader
13160 {
13161 public:
13162 template<size_t SIZE>
13163 instruction_reader_thumb (enum bfd_endian endian,
13164 const uint16_t (&insns)[SIZE])
13165 : m_endian (endian), m_insns (insns), m_insns_size (SIZE)
13166 {}
13167
13168 bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) override
13169 {
13170 SELF_CHECK (len == 4 || len == 2);
13171 SELF_CHECK (memaddr % 2 == 0);
13172 SELF_CHECK ((memaddr / 2) < m_insns_size);
13173
13174 store_unsigned_integer (buf, 2, m_endian, m_insns[memaddr / 2]);
13175 if (len == 4)
13176 {
13177 store_unsigned_integer (&buf[2], 2, m_endian,
13178 m_insns[memaddr / 2 + 1]);
13179 }
13180 return true;
13181 }
13182
13183 private:
13184 enum bfd_endian m_endian;
13185 const uint16_t *m_insns;
13186 size_t m_insns_size;
13187 };
13188
13189 static void
13190 arm_record_test (void)
13191 {
13192 struct gdbarch_info info;
13193 gdbarch_info_init (&info);
13194 info.bfd_arch_info = bfd_scan_arch ("arm");
13195
13196 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
13197
13198 SELF_CHECK (gdbarch != NULL);
13199
13200 /* 16-bit Thumb instructions. */
13201 {
13202 insn_decode_record arm_record;
13203
13204 memset (&arm_record, 0, sizeof (insn_decode_record));
13205 arm_record.gdbarch = gdbarch;
13206
13207 static const uint16_t insns[] = {
13208 /* db b2 uxtb r3, r3 */
13209 0xb2db,
13210 /* cd 58 ldr r5, [r1, r3] */
13211 0x58cd,
13212 };
13213
13214 enum bfd_endian endian = gdbarch_byte_order_for_code (arm_record.gdbarch);
13215 instruction_reader_thumb reader (endian, insns);
13216 int ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13217 THUMB_INSN_SIZE_BYTES);
13218
13219 SELF_CHECK (ret == 0);
13220 SELF_CHECK (arm_record.mem_rec_count == 0);
13221 SELF_CHECK (arm_record.reg_rec_count == 1);
13222 SELF_CHECK (arm_record.arm_regs[0] == 3);
13223
13224 arm_record.this_addr += 2;
13225 ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13226 THUMB_INSN_SIZE_BYTES);
13227
13228 SELF_CHECK (ret == 0);
13229 SELF_CHECK (arm_record.mem_rec_count == 0);
13230 SELF_CHECK (arm_record.reg_rec_count == 1);
13231 SELF_CHECK (arm_record.arm_regs[0] == 5);
13232 }
13233
13234 /* 32-bit Thumb-2 instructions. */
13235 {
13236 insn_decode_record arm_record;
13237
13238 memset (&arm_record, 0, sizeof (insn_decode_record));
13239 arm_record.gdbarch = gdbarch;
13240
13241 static const uint16_t insns[] = {
13242 /* 1d ee 70 7f mrc 15, 0, r7, cr13, cr0, {3} */
13243 0xee1d, 0x7f70,
13244 };
13245
13246 enum bfd_endian endian = gdbarch_byte_order_for_code (arm_record.gdbarch);
13247 instruction_reader_thumb reader (endian, insns);
13248 int ret = decode_insn (reader, &arm_record, THUMB2_RECORD,
13249 THUMB2_INSN_SIZE_BYTES);
13250
13251 SELF_CHECK (ret == 0);
13252 SELF_CHECK (arm_record.mem_rec_count == 0);
13253 SELF_CHECK (arm_record.reg_rec_count == 1);
13254 SELF_CHECK (arm_record.arm_regs[0] == 7);
13255 }
13256 }
13257 } // namespace selftests
13258 #endif /* GDB_SELF_TEST */
13259
13260 /* Cleans up local record registers and memory allocations. */
13261
13262 static void
13263 deallocate_reg_mem (insn_decode_record *record)
13264 {
13265 xfree (record->arm_regs);
13266 xfree (record->arm_mems);
13267 }
13268
13269
13270 /* Parse the current instruction and record the values of the registers and
13271 memory that will be changed in current instruction to record_arch_list".
13272 Return -1 if something is wrong. */
13273
13274 int
13275 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
13276 CORE_ADDR insn_addr)
13277 {
13278
13279 uint32_t no_of_rec = 0;
13280 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
13281 ULONGEST t_bit = 0, insn_id = 0;
13282
13283 ULONGEST u_regval = 0;
13284
13285 insn_decode_record arm_record;
13286
13287 memset (&arm_record, 0, sizeof (insn_decode_record));
13288 arm_record.regcache = regcache;
13289 arm_record.this_addr = insn_addr;
13290 arm_record.gdbarch = gdbarch;
13291
13292
13293 if (record_debug > 1)
13294 {
13295 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
13296 "addr = %s\n",
13297 paddress (gdbarch, arm_record.this_addr));
13298 }
13299
13300 instruction_reader reader;
13301 if (extract_arm_insn (reader, &arm_record, 2))
13302 {
13303 if (record_debug)
13304 {
13305 printf_unfiltered (_("Process record: error reading memory at "
13306 "addr %s len = %d.\n"),
13307 paddress (arm_record.gdbarch,
13308 arm_record.this_addr), 2);
13309 }
13310 return -1;
13311 }
13312
13313 /* Check the insn, whether it is thumb or arm one. */
13314
13315 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
13316 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
13317
13318
13319 if (!(u_regval & t_bit))
13320 {
13321 /* We are decoding arm insn. */
13322 ret = decode_insn (reader, &arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
13323 }
13324 else
13325 {
13326 insn_id = bits (arm_record.arm_insn, 11, 15);
13327 /* is it thumb2 insn? */
13328 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
13329 {
13330 ret = decode_insn (reader, &arm_record, THUMB2_RECORD,
13331 THUMB2_INSN_SIZE_BYTES);
13332 }
13333 else
13334 {
13335 /* We are decoding thumb insn. */
13336 ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13337 THUMB_INSN_SIZE_BYTES);
13338 }
13339 }
13340
13341 if (0 == ret)
13342 {
13343 /* Record registers. */
13344 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
13345 if (arm_record.arm_regs)
13346 {
13347 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
13348 {
13349 if (record_full_arch_list_add_reg
13350 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
13351 ret = -1;
13352 }
13353 }
13354 /* Record memories. */
13355 if (arm_record.arm_mems)
13356 {
13357 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
13358 {
13359 if (record_full_arch_list_add_mem
13360 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
13361 arm_record.arm_mems[no_of_rec].len))
13362 ret = -1;
13363 }
13364 }
13365
13366 if (record_full_arch_list_add_end ())
13367 ret = -1;
13368 }
13369
13370
13371 deallocate_reg_mem (&arm_record);
13372
13373 return ret;
13374 }
13375
13376 /* See arm-tdep.h. */
13377
13378 const target_desc *
13379 arm_read_description (arm_fp_type fp_type)
13380 {
13381 struct target_desc *tdesc = tdesc_arm_list[fp_type];
13382
13383 if (tdesc == nullptr)
13384 {
13385 tdesc = arm_create_target_description (fp_type);
13386 tdesc_arm_list[fp_type] = tdesc;
13387 }
13388
13389 return tdesc;
13390 }
13391
13392 /* See arm-tdep.h. */
13393
13394 const target_desc *
13395 arm_read_mprofile_description (arm_m_profile_type m_type)
13396 {
13397 struct target_desc *tdesc = tdesc_arm_mprofile_list[m_type];
13398
13399 if (tdesc == nullptr)
13400 {
13401 tdesc = arm_create_mprofile_target_description (m_type);
13402 tdesc_arm_mprofile_list[m_type] = tdesc;
13403 }
13404
13405 return tdesc;
13406 }
This page took 0.336045 seconds and 4 git commands to generate.