gdb/testsuite: restore configure script
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "infrun.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "disasm.h"
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "target-float.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2/frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "remote.h"
45 #include "target-descriptions.h"
46 #include "user-regs.h"
47 #include "observable.h"
48 #include "count-one-bits.h"
49
50 #include "arch/arm.h"
51 #include "arch/arm-get-next-pcs.h"
52 #include "arm-tdep.h"
53 #include "gdb/sim-arm.h"
54
55 #include "elf-bfd.h"
56 #include "coff/internal.h"
57 #include "elf/arm.h"
58
59 #include "record.h"
60 #include "record-full.h"
61 #include <algorithm>
62
63 #include "producer.h"
64
65 #if GDB_SELF_TEST
66 #include "gdbsupport/selftest.h"
67 #endif
68
69 static bool arm_debug;
70
71 /* Print an "arm" debug statement. */
72
73 #define arm_debug_printf(fmt, ...) \
74 debug_prefixed_printf_cond (arm_debug, "arm", fmt, ##__VA_ARGS__)
75
76 /* Macros for setting and testing a bit in a minimal symbol that marks
77 it as Thumb function. The MSB of the minimal symbol's "info" field
78 is used for this purpose.
79
80 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
81 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
82
83 #define MSYMBOL_SET_SPECIAL(msym) \
84 MSYMBOL_TARGET_FLAG_1 (msym) = 1
85
86 #define MSYMBOL_IS_SPECIAL(msym) \
87 MSYMBOL_TARGET_FLAG_1 (msym)
88
89 struct arm_mapping_symbol
90 {
91 CORE_ADDR value;
92 char type;
93
94 bool operator< (const arm_mapping_symbol &other) const
95 { return this->value < other.value; }
96 };
97
98 typedef std::vector<arm_mapping_symbol> arm_mapping_symbol_vec;
99
100 struct arm_per_bfd
101 {
102 explicit arm_per_bfd (size_t num_sections)
103 : section_maps (new arm_mapping_symbol_vec[num_sections]),
104 section_maps_sorted (new bool[num_sections] ())
105 {}
106
107 DISABLE_COPY_AND_ASSIGN (arm_per_bfd);
108
109 /* Information about mapping symbols ($a, $d, $t) in the objfile.
110
111 The format is an array of vectors of arm_mapping_symbols, there is one
112 vector for each section of the objfile (the array is index by BFD section
113 index).
114
115 For each section, the vector of arm_mapping_symbol is sorted by
116 symbol value (address). */
117 std::unique_ptr<arm_mapping_symbol_vec[]> section_maps;
118
119 /* For each corresponding element of section_maps above, is this vector
120 sorted. */
121 std::unique_ptr<bool[]> section_maps_sorted;
122 };
123
124 /* Per-bfd data used for mapping symbols. */
125 static bfd_key<arm_per_bfd> arm_bfd_data_key;
126
127 /* The list of available "set arm ..." and "show arm ..." commands. */
128 static struct cmd_list_element *setarmcmdlist = NULL;
129 static struct cmd_list_element *showarmcmdlist = NULL;
130
131 /* The type of floating-point to use. Keep this in sync with enum
132 arm_float_model, and the help string in _initialize_arm_tdep. */
133 static const char *const fp_model_strings[] =
134 {
135 "auto",
136 "softfpa",
137 "fpa",
138 "softvfp",
139 "vfp",
140 NULL
141 };
142
143 /* A variable that can be configured by the user. */
144 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
145 static const char *current_fp_model = "auto";
146
147 /* The ABI to use. Keep this in sync with arm_abi_kind. */
148 static const char *const arm_abi_strings[] =
149 {
150 "auto",
151 "APCS",
152 "AAPCS",
153 NULL
154 };
155
156 /* A variable that can be configured by the user. */
157 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
158 static const char *arm_abi_string = "auto";
159
160 /* The execution mode to assume. */
161 static const char *const arm_mode_strings[] =
162 {
163 "auto",
164 "arm",
165 "thumb",
166 NULL
167 };
168
169 static const char *arm_fallback_mode_string = "auto";
170 static const char *arm_force_mode_string = "auto";
171
172 /* The standard register names, and all the valid aliases for them. Note
173 that `fp', `sp' and `pc' are not added in this alias list, because they
174 have been added as builtin user registers in
175 std-regs.c:_initialize_frame_reg. */
176 static const struct
177 {
178 const char *name;
179 int regnum;
180 } arm_register_aliases[] = {
181 /* Basic register numbers. */
182 { "r0", 0 },
183 { "r1", 1 },
184 { "r2", 2 },
185 { "r3", 3 },
186 { "r4", 4 },
187 { "r5", 5 },
188 { "r6", 6 },
189 { "r7", 7 },
190 { "r8", 8 },
191 { "r9", 9 },
192 { "r10", 10 },
193 { "r11", 11 },
194 { "r12", 12 },
195 { "r13", 13 },
196 { "r14", 14 },
197 { "r15", 15 },
198 /* Synonyms (argument and variable registers). */
199 { "a1", 0 },
200 { "a2", 1 },
201 { "a3", 2 },
202 { "a4", 3 },
203 { "v1", 4 },
204 { "v2", 5 },
205 { "v3", 6 },
206 { "v4", 7 },
207 { "v5", 8 },
208 { "v6", 9 },
209 { "v7", 10 },
210 { "v8", 11 },
211 /* Other platform-specific names for r9. */
212 { "sb", 9 },
213 { "tr", 9 },
214 /* Special names. */
215 { "ip", 12 },
216 { "lr", 14 },
217 /* Names used by GCC (not listed in the ARM EABI). */
218 { "sl", 10 },
219 /* A special name from the older ATPCS. */
220 { "wr", 7 },
221 };
222
223 static const char *const arm_register_names[] =
224 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
225 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
226 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
227 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
228 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
229 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
230 "fps", "cpsr" }; /* 24 25 */
231
232 /* Holds the current set of options to be passed to the disassembler. */
233 static char *arm_disassembler_options;
234
235 /* Valid register name styles. */
236 static const char **valid_disassembly_styles;
237
238 /* Disassembly style to use. Default to "std" register names. */
239 static const char *disassembly_style;
240
241 /* All possible arm target descriptors. */
242 static struct target_desc *tdesc_arm_list[ARM_FP_TYPE_INVALID];
243 static struct target_desc *tdesc_arm_mprofile_list[ARM_M_TYPE_INVALID];
244
245 /* This is used to keep the bfd arch_info in sync with the disassembly
246 style. */
247 static void set_disassembly_style_sfunc (const char *, int,
248 struct cmd_list_element *);
249 static void show_disassembly_style_sfunc (struct ui_file *, int,
250 struct cmd_list_element *,
251 const char *);
252
253 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
254 readable_regcache *regcache,
255 int regnum, gdb_byte *buf);
256 static void arm_neon_quad_write (struct gdbarch *gdbarch,
257 struct regcache *regcache,
258 int regnum, const gdb_byte *buf);
259
260 static CORE_ADDR
261 arm_get_next_pcs_syscall_next_pc (struct arm_get_next_pcs *self);
262
263
264 /* get_next_pcs operations. */
265 static struct arm_get_next_pcs_ops arm_get_next_pcs_ops = {
266 arm_get_next_pcs_read_memory_unsigned_integer,
267 arm_get_next_pcs_syscall_next_pc,
268 arm_get_next_pcs_addr_bits_remove,
269 arm_get_next_pcs_is_thumb,
270 NULL,
271 };
272
273 struct arm_prologue_cache
274 {
275 /* The stack pointer at the time this frame was created; i.e. the
276 caller's stack pointer when this function was called. It is used
277 to identify this frame. */
278 CORE_ADDR prev_sp;
279
280 /* The frame base for this frame is just prev_sp - frame size.
281 FRAMESIZE is the distance from the frame pointer to the
282 initial stack pointer. */
283
284 int framesize;
285
286 /* The register used to hold the frame pointer for this frame. */
287 int framereg;
288
289 /* Saved register offsets. */
290 trad_frame_saved_reg *saved_regs;
291 };
292
293 namespace {
294
295 /* Abstract class to read ARM instructions from memory. */
296
297 class arm_instruction_reader
298 {
299 public:
300 /* Read a 4 bytes instruction from memory using the BYTE_ORDER endianness. */
301 virtual uint32_t read (CORE_ADDR memaddr, bfd_endian byte_order) const = 0;
302 };
303
304 /* Read instructions from target memory. */
305
306 class target_arm_instruction_reader : public arm_instruction_reader
307 {
308 public:
309 uint32_t read (CORE_ADDR memaddr, bfd_endian byte_order) const override
310 {
311 return read_code_unsigned_integer (memaddr, 4, byte_order);
312 }
313 };
314
315 } /* namespace */
316
317 static CORE_ADDR arm_analyze_prologue
318 (struct gdbarch *gdbarch, CORE_ADDR prologue_start, CORE_ADDR prologue_end,
319 struct arm_prologue_cache *cache, const arm_instruction_reader &insn_reader);
320
321 /* Architecture version for displaced stepping. This effects the behaviour of
322 certain instructions, and really should not be hard-wired. */
323
324 #define DISPLACED_STEPPING_ARCH_VERSION 5
325
326 /* See arm-tdep.h. */
327
328 bool arm_apcs_32 = true;
329
330 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
331
332 int
333 arm_psr_thumb_bit (struct gdbarch *gdbarch)
334 {
335 if (gdbarch_tdep (gdbarch)->is_m)
336 return XPSR_T;
337 else
338 return CPSR_T;
339 }
340
341 /* Determine if the processor is currently executing in Thumb mode. */
342
343 int
344 arm_is_thumb (struct regcache *regcache)
345 {
346 ULONGEST cpsr;
347 ULONGEST t_bit = arm_psr_thumb_bit (regcache->arch ());
348
349 cpsr = regcache_raw_get_unsigned (regcache, ARM_PS_REGNUM);
350
351 return (cpsr & t_bit) != 0;
352 }
353
354 /* Determine if FRAME is executing in Thumb mode. */
355
356 int
357 arm_frame_is_thumb (struct frame_info *frame)
358 {
359 CORE_ADDR cpsr;
360 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
361
362 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
363 directly (from a signal frame or dummy frame) or by interpreting
364 the saved LR (from a prologue or DWARF frame). So consult it and
365 trust the unwinders. */
366 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
367
368 return (cpsr & t_bit) != 0;
369 }
370
371 /* Search for the mapping symbol covering MEMADDR. If one is found,
372 return its type. Otherwise, return 0. If START is non-NULL,
373 set *START to the location of the mapping symbol. */
374
375 static char
376 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
377 {
378 struct obj_section *sec;
379
380 /* If there are mapping symbols, consult them. */
381 sec = find_pc_section (memaddr);
382 if (sec != NULL)
383 {
384 arm_per_bfd *data = arm_bfd_data_key.get (sec->objfile->obfd);
385 if (data != NULL)
386 {
387 unsigned int section_idx = sec->the_bfd_section->index;
388 arm_mapping_symbol_vec &map
389 = data->section_maps[section_idx];
390
391 /* Sort the vector on first use. */
392 if (!data->section_maps_sorted[section_idx])
393 {
394 std::sort (map.begin (), map.end ());
395 data->section_maps_sorted[section_idx] = true;
396 }
397
398 arm_mapping_symbol map_key = { memaddr - sec->addr (), 0 };
399 arm_mapping_symbol_vec::const_iterator it
400 = std::lower_bound (map.begin (), map.end (), map_key);
401
402 /* std::lower_bound finds the earliest ordered insertion
403 point. If the symbol at this position starts at this exact
404 address, we use that; otherwise, the preceding
405 mapping symbol covers this address. */
406 if (it < map.end ())
407 {
408 if (it->value == map_key.value)
409 {
410 if (start)
411 *start = it->value + sec->addr ();
412 return it->type;
413 }
414 }
415
416 if (it > map.begin ())
417 {
418 arm_mapping_symbol_vec::const_iterator prev_it
419 = it - 1;
420
421 if (start)
422 *start = prev_it->value + sec->addr ();
423 return prev_it->type;
424 }
425 }
426 }
427
428 return 0;
429 }
430
431 /* Determine if the program counter specified in MEMADDR is in a Thumb
432 function. This function should be called for addresses unrelated to
433 any executing frame; otherwise, prefer arm_frame_is_thumb. */
434
435 int
436 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
437 {
438 struct bound_minimal_symbol sym;
439 char type;
440 arm_displaced_step_copy_insn_closure *dsc = nullptr;
441
442 if (gdbarch_displaced_step_copy_insn_closure_by_addr_p (gdbarch))
443 dsc = ((arm_displaced_step_copy_insn_closure * )
444 gdbarch_displaced_step_copy_insn_closure_by_addr
445 (gdbarch, current_inferior (), memaddr));
446
447 /* If checking the mode of displaced instruction in copy area, the mode
448 should be determined by instruction on the original address. */
449 if (dsc)
450 {
451 displaced_debug_printf ("check mode of %.8lx instead of %.8lx",
452 (unsigned long) dsc->insn_addr,
453 (unsigned long) memaddr);
454 memaddr = dsc->insn_addr;
455 }
456
457 /* If bit 0 of the address is set, assume this is a Thumb address. */
458 if (IS_THUMB_ADDR (memaddr))
459 return 1;
460
461 /* If the user wants to override the symbol table, let him. */
462 if (strcmp (arm_force_mode_string, "arm") == 0)
463 return 0;
464 if (strcmp (arm_force_mode_string, "thumb") == 0)
465 return 1;
466
467 /* ARM v6-M and v7-M are always in Thumb mode. */
468 if (gdbarch_tdep (gdbarch)->is_m)
469 return 1;
470
471 /* If there are mapping symbols, consult them. */
472 type = arm_find_mapping_symbol (memaddr, NULL);
473 if (type)
474 return type == 't';
475
476 /* Thumb functions have a "special" bit set in minimal symbols. */
477 sym = lookup_minimal_symbol_by_pc (memaddr);
478 if (sym.minsym)
479 return (MSYMBOL_IS_SPECIAL (sym.minsym));
480
481 /* If the user wants to override the fallback mode, let them. */
482 if (strcmp (arm_fallback_mode_string, "arm") == 0)
483 return 0;
484 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
485 return 1;
486
487 /* If we couldn't find any symbol, but we're talking to a running
488 target, then trust the current value of $cpsr. This lets
489 "display/i $pc" always show the correct mode (though if there is
490 a symbol table we will not reach here, so it still may not be
491 displayed in the mode it will be executed). */
492 if (target_has_registers ())
493 return arm_frame_is_thumb (get_current_frame ());
494
495 /* Otherwise we're out of luck; we assume ARM. */
496 return 0;
497 }
498
499 /* Determine if the address specified equals any of these magic return
500 values, called EXC_RETURN, defined by the ARM v6-M, v7-M and v8-M
501 architectures.
502
503 From ARMv6-M Reference Manual B1.5.8
504 Table B1-5 Exception return behavior
505
506 EXC_RETURN Return To Return Stack
507 0xFFFFFFF1 Handler mode Main
508 0xFFFFFFF9 Thread mode Main
509 0xFFFFFFFD Thread mode Process
510
511 From ARMv7-M Reference Manual B1.5.8
512 Table B1-8 EXC_RETURN definition of exception return behavior, no FP
513
514 EXC_RETURN Return To Return Stack
515 0xFFFFFFF1 Handler mode Main
516 0xFFFFFFF9 Thread mode Main
517 0xFFFFFFFD Thread mode Process
518
519 Table B1-9 EXC_RETURN definition of exception return behavior, with
520 FP
521
522 EXC_RETURN Return To Return Stack Frame Type
523 0xFFFFFFE1 Handler mode Main Extended
524 0xFFFFFFE9 Thread mode Main Extended
525 0xFFFFFFED Thread mode Process Extended
526 0xFFFFFFF1 Handler mode Main Basic
527 0xFFFFFFF9 Thread mode Main Basic
528 0xFFFFFFFD Thread mode Process Basic
529
530 For more details see "B1.5.8 Exception return behavior"
531 in both ARMv6-M and ARMv7-M Architecture Reference Manuals.
532
533 In the ARMv8-M Architecture Technical Reference also adds
534 for implementations without the Security Extension:
535
536 EXC_RETURN Condition
537 0xFFFFFFB0 Return to Handler mode.
538 0xFFFFFFB8 Return to Thread mode using the main stack.
539 0xFFFFFFBC Return to Thread mode using the process stack. */
540
541 static int
542 arm_m_addr_is_magic (CORE_ADDR addr)
543 {
544 switch (addr)
545 {
546 /* Values from ARMv8-M Architecture Technical Reference. */
547 case 0xffffffb0:
548 case 0xffffffb8:
549 case 0xffffffbc:
550 /* Values from Tables in B1.5.8 the EXC_RETURN definitions of
551 the exception return behavior. */
552 case 0xffffffe1:
553 case 0xffffffe9:
554 case 0xffffffed:
555 case 0xfffffff1:
556 case 0xfffffff9:
557 case 0xfffffffd:
558 /* Address is magic. */
559 return 1;
560
561 default:
562 /* Address is not magic. */
563 return 0;
564 }
565 }
566
567 /* Remove useless bits from addresses in a running program. */
568 static CORE_ADDR
569 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
570 {
571 /* On M-profile devices, do not strip the low bit from EXC_RETURN
572 (the magic exception return address). */
573 if (gdbarch_tdep (gdbarch)->is_m
574 && arm_m_addr_is_magic (val))
575 return val;
576
577 if (arm_apcs_32)
578 return UNMAKE_THUMB_ADDR (val);
579 else
580 return (val & 0x03fffffc);
581 }
582
583 /* Return 1 if PC is the start of a compiler helper function which
584 can be safely ignored during prologue skipping. IS_THUMB is true
585 if the function is known to be a Thumb function due to the way it
586 is being called. */
587 static int
588 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
589 {
590 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
591 struct bound_minimal_symbol msym;
592
593 msym = lookup_minimal_symbol_by_pc (pc);
594 if (msym.minsym != NULL
595 && BMSYMBOL_VALUE_ADDRESS (msym) == pc
596 && msym.minsym->linkage_name () != NULL)
597 {
598 const char *name = msym.minsym->linkage_name ();
599
600 /* The GNU linker's Thumb call stub to foo is named
601 __foo_from_thumb. */
602 if (strstr (name, "_from_thumb") != NULL)
603 name += 2;
604
605 /* On soft-float targets, __truncdfsf2 is called to convert promoted
606 arguments to their argument types in non-prototyped
607 functions. */
608 if (startswith (name, "__truncdfsf2"))
609 return 1;
610 if (startswith (name, "__aeabi_d2f"))
611 return 1;
612
613 /* Internal functions related to thread-local storage. */
614 if (startswith (name, "__tls_get_addr"))
615 return 1;
616 if (startswith (name, "__aeabi_read_tp"))
617 return 1;
618 }
619 else
620 {
621 /* If we run against a stripped glibc, we may be unable to identify
622 special functions by name. Check for one important case,
623 __aeabi_read_tp, by comparing the *code* against the default
624 implementation (this is hand-written ARM assembler in glibc). */
625
626 if (!is_thumb
627 && read_code_unsigned_integer (pc, 4, byte_order_for_code)
628 == 0xe3e00a0f /* mov r0, #0xffff0fff */
629 && read_code_unsigned_integer (pc + 4, 4, byte_order_for_code)
630 == 0xe240f01f) /* sub pc, r0, #31 */
631 return 1;
632 }
633
634 return 0;
635 }
636
637 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
638 the first 16-bit of instruction, and INSN2 is the second 16-bit of
639 instruction. */
640 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
641 ((bits ((insn1), 0, 3) << 12) \
642 | (bits ((insn1), 10, 10) << 11) \
643 | (bits ((insn2), 12, 14) << 8) \
644 | bits ((insn2), 0, 7))
645
646 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
647 the 32-bit instruction. */
648 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
649 ((bits ((insn), 16, 19) << 12) \
650 | bits ((insn), 0, 11))
651
652 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
653
654 static unsigned int
655 thumb_expand_immediate (unsigned int imm)
656 {
657 unsigned int count = imm >> 7;
658
659 if (count < 8)
660 switch (count / 2)
661 {
662 case 0:
663 return imm & 0xff;
664 case 1:
665 return (imm & 0xff) | ((imm & 0xff) << 16);
666 case 2:
667 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
668 case 3:
669 return (imm & 0xff) | ((imm & 0xff) << 8)
670 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
671 }
672
673 return (0x80 | (imm & 0x7f)) << (32 - count);
674 }
675
676 /* Return 1 if the 16-bit Thumb instruction INSN restores SP in
677 epilogue, 0 otherwise. */
678
679 static int
680 thumb_instruction_restores_sp (unsigned short insn)
681 {
682 return (insn == 0x46bd /* mov sp, r7 */
683 || (insn & 0xff80) == 0xb000 /* add sp, imm */
684 || (insn & 0xfe00) == 0xbc00); /* pop <registers> */
685 }
686
687 /* Analyze a Thumb prologue, looking for a recognizable stack frame
688 and frame pointer. Scan until we encounter a store that could
689 clobber the stack frame unexpectedly, or an unknown instruction.
690 Return the last address which is definitely safe to skip for an
691 initial breakpoint. */
692
693 static CORE_ADDR
694 thumb_analyze_prologue (struct gdbarch *gdbarch,
695 CORE_ADDR start, CORE_ADDR limit,
696 struct arm_prologue_cache *cache)
697 {
698 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
699 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
700 int i;
701 pv_t regs[16];
702 CORE_ADDR offset;
703 CORE_ADDR unrecognized_pc = 0;
704
705 for (i = 0; i < 16; i++)
706 regs[i] = pv_register (i, 0);
707 pv_area stack (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
708
709 while (start < limit)
710 {
711 unsigned short insn;
712
713 insn = read_code_unsigned_integer (start, 2, byte_order_for_code);
714
715 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
716 {
717 int regno;
718 int mask;
719
720 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
721 break;
722
723 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
724 whether to save LR (R14). */
725 mask = (insn & 0xff) | ((insn & 0x100) << 6);
726
727 /* Calculate offsets of saved R0-R7 and LR. */
728 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
729 if (mask & (1 << regno))
730 {
731 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
732 -4);
733 stack.store (regs[ARM_SP_REGNUM], 4, regs[regno]);
734 }
735 }
736 else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */
737 {
738 offset = (insn & 0x7f) << 2; /* get scaled offset */
739 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
740 -offset);
741 }
742 else if (thumb_instruction_restores_sp (insn))
743 {
744 /* Don't scan past the epilogue. */
745 break;
746 }
747 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
748 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
749 (insn & 0xff) << 2);
750 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
751 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
752 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
753 bits (insn, 6, 8));
754 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
755 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
756 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
757 bits (insn, 0, 7));
758 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
759 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
760 && pv_is_constant (regs[bits (insn, 3, 5)]))
761 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
762 regs[bits (insn, 6, 8)]);
763 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
764 && pv_is_constant (regs[bits (insn, 3, 6)]))
765 {
766 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
767 int rm = bits (insn, 3, 6);
768 regs[rd] = pv_add (regs[rd], regs[rm]);
769 }
770 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
771 {
772 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
773 int src_reg = (insn & 0x78) >> 3;
774 regs[dst_reg] = regs[src_reg];
775 }
776 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
777 {
778 /* Handle stores to the stack. Normally pushes are used,
779 but with GCC -mtpcs-frame, there may be other stores
780 in the prologue to create the frame. */
781 int regno = (insn >> 8) & 0x7;
782 pv_t addr;
783
784 offset = (insn & 0xff) << 2;
785 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
786
787 if (stack.store_would_trash (addr))
788 break;
789
790 stack.store (addr, 4, regs[regno]);
791 }
792 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
793 {
794 int rd = bits (insn, 0, 2);
795 int rn = bits (insn, 3, 5);
796 pv_t addr;
797
798 offset = bits (insn, 6, 10) << 2;
799 addr = pv_add_constant (regs[rn], offset);
800
801 if (stack.store_would_trash (addr))
802 break;
803
804 stack.store (addr, 4, regs[rd]);
805 }
806 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
807 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
808 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
809 /* Ignore stores of argument registers to the stack. */
810 ;
811 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
812 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
813 /* Ignore block loads from the stack, potentially copying
814 parameters from memory. */
815 ;
816 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
817 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
818 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
819 /* Similarly ignore single loads from the stack. */
820 ;
821 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
822 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
823 /* Skip register copies, i.e. saves to another register
824 instead of the stack. */
825 ;
826 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
827 /* Recognize constant loads; even with small stacks these are necessary
828 on Thumb. */
829 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
830 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
831 {
832 /* Constant pool loads, for the same reason. */
833 unsigned int constant;
834 CORE_ADDR loc;
835
836 loc = start + 4 + bits (insn, 0, 7) * 4;
837 constant = read_memory_unsigned_integer (loc, 4, byte_order);
838 regs[bits (insn, 8, 10)] = pv_constant (constant);
839 }
840 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
841 {
842 unsigned short inst2;
843
844 inst2 = read_code_unsigned_integer (start + 2, 2,
845 byte_order_for_code);
846
847 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
848 {
849 /* BL, BLX. Allow some special function calls when
850 skipping the prologue; GCC generates these before
851 storing arguments to the stack. */
852 CORE_ADDR nextpc;
853 int j1, j2, imm1, imm2;
854
855 imm1 = sbits (insn, 0, 10);
856 imm2 = bits (inst2, 0, 10);
857 j1 = bit (inst2, 13);
858 j2 = bit (inst2, 11);
859
860 offset = ((imm1 << 12) + (imm2 << 1));
861 offset ^= ((!j2) << 22) | ((!j1) << 23);
862
863 nextpc = start + 4 + offset;
864 /* For BLX make sure to clear the low bits. */
865 if (bit (inst2, 12) == 0)
866 nextpc = nextpc & 0xfffffffc;
867
868 if (!skip_prologue_function (gdbarch, nextpc,
869 bit (inst2, 12) != 0))
870 break;
871 }
872
873 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
874 { registers } */
875 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
876 {
877 pv_t addr = regs[bits (insn, 0, 3)];
878 int regno;
879
880 if (stack.store_would_trash (addr))
881 break;
882
883 /* Calculate offsets of saved registers. */
884 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
885 if (inst2 & (1 << regno))
886 {
887 addr = pv_add_constant (addr, -4);
888 stack.store (addr, 4, regs[regno]);
889 }
890
891 if (insn & 0x0020)
892 regs[bits (insn, 0, 3)] = addr;
893 }
894
895 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
896 [Rn, #+/-imm]{!} */
897 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
898 {
899 int regno1 = bits (inst2, 12, 15);
900 int regno2 = bits (inst2, 8, 11);
901 pv_t addr = regs[bits (insn, 0, 3)];
902
903 offset = inst2 & 0xff;
904 if (insn & 0x0080)
905 addr = pv_add_constant (addr, offset);
906 else
907 addr = pv_add_constant (addr, -offset);
908
909 if (stack.store_would_trash (addr))
910 break;
911
912 stack.store (addr, 4, regs[regno1]);
913 stack.store (pv_add_constant (addr, 4),
914 4, regs[regno2]);
915
916 if (insn & 0x0020)
917 regs[bits (insn, 0, 3)] = addr;
918 }
919
920 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
921 && (inst2 & 0x0c00) == 0x0c00
922 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
923 {
924 int regno = bits (inst2, 12, 15);
925 pv_t addr = regs[bits (insn, 0, 3)];
926
927 offset = inst2 & 0xff;
928 if (inst2 & 0x0200)
929 addr = pv_add_constant (addr, offset);
930 else
931 addr = pv_add_constant (addr, -offset);
932
933 if (stack.store_would_trash (addr))
934 break;
935
936 stack.store (addr, 4, regs[regno]);
937
938 if (inst2 & 0x0100)
939 regs[bits (insn, 0, 3)] = addr;
940 }
941
942 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
943 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
944 {
945 int regno = bits (inst2, 12, 15);
946 pv_t addr;
947
948 offset = inst2 & 0xfff;
949 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
950
951 if (stack.store_would_trash (addr))
952 break;
953
954 stack.store (addr, 4, regs[regno]);
955 }
956
957 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
958 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
959 /* Ignore stores of argument registers to the stack. */
960 ;
961
962 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
963 && (inst2 & 0x0d00) == 0x0c00
964 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
965 /* Ignore stores of argument registers to the stack. */
966 ;
967
968 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
969 { registers } */
970 && (inst2 & 0x8000) == 0x0000
971 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
972 /* Ignore block loads from the stack, potentially copying
973 parameters from memory. */
974 ;
975
976 else if ((insn & 0xff70) == 0xe950 /* ldrd Rt, Rt2,
977 [Rn, #+/-imm] */
978 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
979 /* Similarly ignore dual loads from the stack. */
980 ;
981
982 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
983 && (inst2 & 0x0d00) == 0x0c00
984 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
985 /* Similarly ignore single loads from the stack. */
986 ;
987
988 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
989 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
990 /* Similarly ignore single loads from the stack. */
991 ;
992
993 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
994 && (inst2 & 0x8000) == 0x0000)
995 {
996 unsigned int imm = ((bits (insn, 10, 10) << 11)
997 | (bits (inst2, 12, 14) << 8)
998 | bits (inst2, 0, 7));
999
1000 regs[bits (inst2, 8, 11)]
1001 = pv_add_constant (regs[bits (insn, 0, 3)],
1002 thumb_expand_immediate (imm));
1003 }
1004
1005 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1006 && (inst2 & 0x8000) == 0x0000)
1007 {
1008 unsigned int imm = ((bits (insn, 10, 10) << 11)
1009 | (bits (inst2, 12, 14) << 8)
1010 | bits (inst2, 0, 7));
1011
1012 regs[bits (inst2, 8, 11)]
1013 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1014 }
1015
1016 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1017 && (inst2 & 0x8000) == 0x0000)
1018 {
1019 unsigned int imm = ((bits (insn, 10, 10) << 11)
1020 | (bits (inst2, 12, 14) << 8)
1021 | bits (inst2, 0, 7));
1022
1023 regs[bits (inst2, 8, 11)]
1024 = pv_add_constant (regs[bits (insn, 0, 3)],
1025 - (CORE_ADDR) thumb_expand_immediate (imm));
1026 }
1027
1028 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1029 && (inst2 & 0x8000) == 0x0000)
1030 {
1031 unsigned int imm = ((bits (insn, 10, 10) << 11)
1032 | (bits (inst2, 12, 14) << 8)
1033 | bits (inst2, 0, 7));
1034
1035 regs[bits (inst2, 8, 11)]
1036 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1037 }
1038
1039 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1040 {
1041 unsigned int imm = ((bits (insn, 10, 10) << 11)
1042 | (bits (inst2, 12, 14) << 8)
1043 | bits (inst2, 0, 7));
1044
1045 regs[bits (inst2, 8, 11)]
1046 = pv_constant (thumb_expand_immediate (imm));
1047 }
1048
1049 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1050 {
1051 unsigned int imm
1052 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1053
1054 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1055 }
1056
1057 else if (insn == 0xea5f /* mov.w Rd,Rm */
1058 && (inst2 & 0xf0f0) == 0)
1059 {
1060 int dst_reg = (inst2 & 0x0f00) >> 8;
1061 int src_reg = inst2 & 0xf;
1062 regs[dst_reg] = regs[src_reg];
1063 }
1064
1065 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1066 {
1067 /* Constant pool loads. */
1068 unsigned int constant;
1069 CORE_ADDR loc;
1070
1071 offset = bits (inst2, 0, 11);
1072 if (insn & 0x0080)
1073 loc = start + 4 + offset;
1074 else
1075 loc = start + 4 - offset;
1076
1077 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1078 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1079 }
1080
1081 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1082 {
1083 /* Constant pool loads. */
1084 unsigned int constant;
1085 CORE_ADDR loc;
1086
1087 offset = bits (inst2, 0, 7) << 2;
1088 if (insn & 0x0080)
1089 loc = start + 4 + offset;
1090 else
1091 loc = start + 4 - offset;
1092
1093 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1094 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1095
1096 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1097 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1098 }
1099
1100 else if (thumb2_instruction_changes_pc (insn, inst2))
1101 {
1102 /* Don't scan past anything that might change control flow. */
1103 break;
1104 }
1105 else
1106 {
1107 /* The optimizer might shove anything into the prologue,
1108 so we just skip what we don't recognize. */
1109 unrecognized_pc = start;
1110 }
1111
1112 start += 2;
1113 }
1114 else if (thumb_instruction_changes_pc (insn))
1115 {
1116 /* Don't scan past anything that might change control flow. */
1117 break;
1118 }
1119 else
1120 {
1121 /* The optimizer might shove anything into the prologue,
1122 so we just skip what we don't recognize. */
1123 unrecognized_pc = start;
1124 }
1125
1126 start += 2;
1127 }
1128
1129 arm_debug_printf ("Prologue scan stopped at %s",
1130 paddress (gdbarch, start));
1131
1132 if (unrecognized_pc == 0)
1133 unrecognized_pc = start;
1134
1135 if (cache == NULL)
1136 return unrecognized_pc;
1137
1138 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1139 {
1140 /* Frame pointer is fp. Frame size is constant. */
1141 cache->framereg = ARM_FP_REGNUM;
1142 cache->framesize = -regs[ARM_FP_REGNUM].k;
1143 }
1144 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1145 {
1146 /* Frame pointer is r7. Frame size is constant. */
1147 cache->framereg = THUMB_FP_REGNUM;
1148 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1149 }
1150 else
1151 {
1152 /* Try the stack pointer... this is a bit desperate. */
1153 cache->framereg = ARM_SP_REGNUM;
1154 cache->framesize = -regs[ARM_SP_REGNUM].k;
1155 }
1156
1157 for (i = 0; i < 16; i++)
1158 if (stack.find_reg (gdbarch, i, &offset))
1159 cache->saved_regs[i].set_addr (offset);
1160
1161 return unrecognized_pc;
1162 }
1163
1164
1165 /* Try to analyze the instructions starting from PC, which load symbol
1166 __stack_chk_guard. Return the address of instruction after loading this
1167 symbol, set the dest register number to *BASEREG, and set the size of
1168 instructions for loading symbol in OFFSET. Return 0 if instructions are
1169 not recognized. */
1170
1171 static CORE_ADDR
1172 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1173 unsigned int *destreg, int *offset)
1174 {
1175 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1176 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1177 unsigned int low, high, address;
1178
1179 address = 0;
1180 if (is_thumb)
1181 {
1182 unsigned short insn1
1183 = read_code_unsigned_integer (pc, 2, byte_order_for_code);
1184
1185 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1186 {
1187 *destreg = bits (insn1, 8, 10);
1188 *offset = 2;
1189 address = (pc & 0xfffffffc) + 4 + (bits (insn1, 0, 7) << 2);
1190 address = read_memory_unsigned_integer (address, 4,
1191 byte_order_for_code);
1192 }
1193 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1194 {
1195 unsigned short insn2
1196 = read_code_unsigned_integer (pc + 2, 2, byte_order_for_code);
1197
1198 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1199
1200 insn1
1201 = read_code_unsigned_integer (pc + 4, 2, byte_order_for_code);
1202 insn2
1203 = read_code_unsigned_integer (pc + 6, 2, byte_order_for_code);
1204
1205 /* movt Rd, #const */
1206 if ((insn1 & 0xfbc0) == 0xf2c0)
1207 {
1208 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1209 *destreg = bits (insn2, 8, 11);
1210 *offset = 8;
1211 address = (high << 16 | low);
1212 }
1213 }
1214 }
1215 else
1216 {
1217 unsigned int insn
1218 = read_code_unsigned_integer (pc, 4, byte_order_for_code);
1219
1220 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, [PC, #immed] */
1221 {
1222 address = bits (insn, 0, 11) + pc + 8;
1223 address = read_memory_unsigned_integer (address, 4,
1224 byte_order_for_code);
1225
1226 *destreg = bits (insn, 12, 15);
1227 *offset = 4;
1228 }
1229 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1230 {
1231 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1232
1233 insn
1234 = read_code_unsigned_integer (pc + 4, 4, byte_order_for_code);
1235
1236 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1237 {
1238 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1239 *destreg = bits (insn, 12, 15);
1240 *offset = 8;
1241 address = (high << 16 | low);
1242 }
1243 }
1244 }
1245
1246 return address;
1247 }
1248
1249 /* Try to skip a sequence of instructions used for stack protector. If PC
1250 points to the first instruction of this sequence, return the address of
1251 first instruction after this sequence, otherwise, return original PC.
1252
1253 On arm, this sequence of instructions is composed of mainly three steps,
1254 Step 1: load symbol __stack_chk_guard,
1255 Step 2: load from address of __stack_chk_guard,
1256 Step 3: store it to somewhere else.
1257
1258 Usually, instructions on step 2 and step 3 are the same on various ARM
1259 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1260 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1261 instructions in step 1 vary from different ARM architectures. On ARMv7,
1262 they are,
1263
1264 movw Rn, #:lower16:__stack_chk_guard
1265 movt Rn, #:upper16:__stack_chk_guard
1266
1267 On ARMv5t, it is,
1268
1269 ldr Rn, .Label
1270 ....
1271 .Lable:
1272 .word __stack_chk_guard
1273
1274 Since ldr/str is a very popular instruction, we can't use them as
1275 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1276 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1277 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1278
1279 static CORE_ADDR
1280 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1281 {
1282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1283 unsigned int basereg;
1284 struct bound_minimal_symbol stack_chk_guard;
1285 int offset;
1286 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1287 CORE_ADDR addr;
1288
1289 /* Try to parse the instructions in Step 1. */
1290 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1291 &basereg, &offset);
1292 if (!addr)
1293 return pc;
1294
1295 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1296 /* ADDR must correspond to a symbol whose name is __stack_chk_guard.
1297 Otherwise, this sequence cannot be for stack protector. */
1298 if (stack_chk_guard.minsym == NULL
1299 || !startswith (stack_chk_guard.minsym->linkage_name (), "__stack_chk_guard"))
1300 return pc;
1301
1302 if (is_thumb)
1303 {
1304 unsigned int destreg;
1305 unsigned short insn
1306 = read_code_unsigned_integer (pc + offset, 2, byte_order_for_code);
1307
1308 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1309 if ((insn & 0xf800) != 0x6800)
1310 return pc;
1311 if (bits (insn, 3, 5) != basereg)
1312 return pc;
1313 destreg = bits (insn, 0, 2);
1314
1315 insn = read_code_unsigned_integer (pc + offset + 2, 2,
1316 byte_order_for_code);
1317 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1318 if ((insn & 0xf800) != 0x6000)
1319 return pc;
1320 if (destreg != bits (insn, 0, 2))
1321 return pc;
1322 }
1323 else
1324 {
1325 unsigned int destreg;
1326 unsigned int insn
1327 = read_code_unsigned_integer (pc + offset, 4, byte_order_for_code);
1328
1329 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1330 if ((insn & 0x0e500000) != 0x04100000)
1331 return pc;
1332 if (bits (insn, 16, 19) != basereg)
1333 return pc;
1334 destreg = bits (insn, 12, 15);
1335 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1336 insn = read_code_unsigned_integer (pc + offset + 4,
1337 4, byte_order_for_code);
1338 if ((insn & 0x0e500000) != 0x04000000)
1339 return pc;
1340 if (bits (insn, 12, 15) != destreg)
1341 return pc;
1342 }
1343 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1344 on arm. */
1345 if (is_thumb)
1346 return pc + offset + 4;
1347 else
1348 return pc + offset + 8;
1349 }
1350
1351 /* Advance the PC across any function entry prologue instructions to
1352 reach some "real" code.
1353
1354 The APCS (ARM Procedure Call Standard) defines the following
1355 prologue:
1356
1357 mov ip, sp
1358 [stmfd sp!, {a1,a2,a3,a4}]
1359 stmfd sp!, {...,fp,ip,lr,pc}
1360 [stfe f7, [sp, #-12]!]
1361 [stfe f6, [sp, #-12]!]
1362 [stfe f5, [sp, #-12]!]
1363 [stfe f4, [sp, #-12]!]
1364 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1365
1366 static CORE_ADDR
1367 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1368 {
1369 CORE_ADDR func_addr, limit_pc;
1370
1371 /* See if we can determine the end of the prologue via the symbol table.
1372 If so, then return either PC, or the PC after the prologue, whichever
1373 is greater. */
1374 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1375 {
1376 CORE_ADDR post_prologue_pc
1377 = skip_prologue_using_sal (gdbarch, func_addr);
1378 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
1379
1380 if (post_prologue_pc)
1381 post_prologue_pc
1382 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1383
1384
1385 /* GCC always emits a line note before the prologue and another
1386 one after, even if the two are at the same address or on the
1387 same line. Take advantage of this so that we do not need to
1388 know every instruction that might appear in the prologue. We
1389 will have producer information for most binaries; if it is
1390 missing (e.g. for -gstabs), assuming the GNU tools. */
1391 if (post_prologue_pc
1392 && (cust == NULL
1393 || COMPUNIT_PRODUCER (cust) == NULL
1394 || startswith (COMPUNIT_PRODUCER (cust), "GNU ")
1395 || producer_is_llvm (COMPUNIT_PRODUCER (cust))))
1396 return post_prologue_pc;
1397
1398 if (post_prologue_pc != 0)
1399 {
1400 CORE_ADDR analyzed_limit;
1401
1402 /* For non-GCC compilers, make sure the entire line is an
1403 acceptable prologue; GDB will round this function's
1404 return value up to the end of the following line so we
1405 can not skip just part of a line (and we do not want to).
1406
1407 RealView does not treat the prologue specially, but does
1408 associate prologue code with the opening brace; so this
1409 lets us skip the first line if we think it is the opening
1410 brace. */
1411 if (arm_pc_is_thumb (gdbarch, func_addr))
1412 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1413 post_prologue_pc, NULL);
1414 else
1415 analyzed_limit
1416 = arm_analyze_prologue (gdbarch, func_addr, post_prologue_pc,
1417 NULL, target_arm_instruction_reader ());
1418
1419 if (analyzed_limit != post_prologue_pc)
1420 return func_addr;
1421
1422 return post_prologue_pc;
1423 }
1424 }
1425
1426 /* Can't determine prologue from the symbol table, need to examine
1427 instructions. */
1428
1429 /* Find an upper limit on the function prologue using the debug
1430 information. If the debug information could not be used to provide
1431 that bound, then use an arbitrary large number as the upper bound. */
1432 /* Like arm_scan_prologue, stop no later than pc + 64. */
1433 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1434 if (limit_pc == 0)
1435 limit_pc = pc + 64; /* Magic. */
1436
1437
1438 /* Check if this is Thumb code. */
1439 if (arm_pc_is_thumb (gdbarch, pc))
1440 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1441 else
1442 return arm_analyze_prologue (gdbarch, pc, limit_pc, NULL,
1443 target_arm_instruction_reader ());
1444 }
1445
1446 /* *INDENT-OFF* */
1447 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1448 This function decodes a Thumb function prologue to determine:
1449 1) the size of the stack frame
1450 2) which registers are saved on it
1451 3) the offsets of saved regs
1452 4) the offset from the stack pointer to the frame pointer
1453
1454 A typical Thumb function prologue would create this stack frame
1455 (offsets relative to FP)
1456 old SP -> 24 stack parameters
1457 20 LR
1458 16 R7
1459 R7 -> 0 local variables (16 bytes)
1460 SP -> -12 additional stack space (12 bytes)
1461 The frame size would thus be 36 bytes, and the frame offset would be
1462 12 bytes. The frame register is R7.
1463
1464 The comments for thumb_skip_prolog() describe the algorithm we use
1465 to detect the end of the prolog. */
1466 /* *INDENT-ON* */
1467
1468 static void
1469 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1470 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1471 {
1472 CORE_ADDR prologue_start;
1473 CORE_ADDR prologue_end;
1474
1475 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1476 &prologue_end))
1477 {
1478 /* See comment in arm_scan_prologue for an explanation of
1479 this heuristics. */
1480 if (prologue_end > prologue_start + 64)
1481 {
1482 prologue_end = prologue_start + 64;
1483 }
1484 }
1485 else
1486 /* We're in the boondocks: we have no idea where the start of the
1487 function is. */
1488 return;
1489
1490 prologue_end = std::min (prologue_end, prev_pc);
1491
1492 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1493 }
1494
1495 /* Return 1 if the ARM instruction INSN restores SP in epilogue, 0
1496 otherwise. */
1497
1498 static int
1499 arm_instruction_restores_sp (unsigned int insn)
1500 {
1501 if (bits (insn, 28, 31) != INST_NV)
1502 {
1503 if ((insn & 0x0df0f000) == 0x0080d000
1504 /* ADD SP (register or immediate). */
1505 || (insn & 0x0df0f000) == 0x0040d000
1506 /* SUB SP (register or immediate). */
1507 || (insn & 0x0ffffff0) == 0x01a0d000
1508 /* MOV SP. */
1509 || (insn & 0x0fff0000) == 0x08bd0000
1510 /* POP (LDMIA). */
1511 || (insn & 0x0fff0000) == 0x049d0000)
1512 /* POP of a single register. */
1513 return 1;
1514 }
1515
1516 return 0;
1517 }
1518
1519 /* Implement immediate value decoding, as described in section A5.2.4
1520 (Modified immediate constants in ARM instructions) of the ARM Architecture
1521 Reference Manual (ARMv7-A and ARMv7-R edition). */
1522
1523 static uint32_t
1524 arm_expand_immediate (uint32_t imm)
1525 {
1526 /* Immediate values are 12 bits long. */
1527 gdb_assert ((imm & 0xfffff000) == 0);
1528
1529 uint32_t unrotated_value = imm & 0xff;
1530 uint32_t rotate_amount = (imm & 0xf00) >> 7;
1531
1532 if (rotate_amount == 0)
1533 return unrotated_value;
1534
1535 return ((unrotated_value >> rotate_amount)
1536 | (unrotated_value << (32 - rotate_amount)));
1537 }
1538
1539 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1540 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1541 fill it in. Return the first address not recognized as a prologue
1542 instruction.
1543
1544 We recognize all the instructions typically found in ARM prologues,
1545 plus harmless instructions which can be skipped (either for analysis
1546 purposes, or a more restrictive set that can be skipped when finding
1547 the end of the prologue). */
1548
1549 static CORE_ADDR
1550 arm_analyze_prologue (struct gdbarch *gdbarch,
1551 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1552 struct arm_prologue_cache *cache,
1553 const arm_instruction_reader &insn_reader)
1554 {
1555 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1556 int regno;
1557 CORE_ADDR offset, current_pc;
1558 pv_t regs[ARM_FPS_REGNUM];
1559 CORE_ADDR unrecognized_pc = 0;
1560
1561 /* Search the prologue looking for instructions that set up the
1562 frame pointer, adjust the stack pointer, and save registers.
1563
1564 Be careful, however, and if it doesn't look like a prologue,
1565 don't try to scan it. If, for instance, a frameless function
1566 begins with stmfd sp!, then we will tell ourselves there is
1567 a frame, which will confuse stack traceback, as well as "finish"
1568 and other operations that rely on a knowledge of the stack
1569 traceback. */
1570
1571 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1572 regs[regno] = pv_register (regno, 0);
1573 pv_area stack (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1574
1575 for (current_pc = prologue_start;
1576 current_pc < prologue_end;
1577 current_pc += 4)
1578 {
1579 uint32_t insn = insn_reader.read (current_pc, byte_order_for_code);
1580
1581 if (insn == 0xe1a0c00d) /* mov ip, sp */
1582 {
1583 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1584 continue;
1585 }
1586 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1587 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1588 {
1589 uint32_t imm = arm_expand_immediate (insn & 0xfff);
1590 int rd = bits (insn, 12, 15);
1591 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1592 continue;
1593 }
1594 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1595 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1596 {
1597 uint32_t imm = arm_expand_immediate (insn & 0xfff);
1598 int rd = bits (insn, 12, 15);
1599 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1600 continue;
1601 }
1602 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1603 [sp, #-4]! */
1604 {
1605 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1606 break;
1607 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1608 stack.store (regs[ARM_SP_REGNUM], 4,
1609 regs[bits (insn, 12, 15)]);
1610 continue;
1611 }
1612 else if ((insn & 0xffff0000) == 0xe92d0000)
1613 /* stmfd sp!, {..., fp, ip, lr, pc}
1614 or
1615 stmfd sp!, {a1, a2, a3, a4} */
1616 {
1617 int mask = insn & 0xffff;
1618
1619 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1620 break;
1621
1622 /* Calculate offsets of saved registers. */
1623 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1624 if (mask & (1 << regno))
1625 {
1626 regs[ARM_SP_REGNUM]
1627 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1628 stack.store (regs[ARM_SP_REGNUM], 4, regs[regno]);
1629 }
1630 }
1631 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1632 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1633 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1634 {
1635 /* No need to add this to saved_regs -- it's just an arg reg. */
1636 continue;
1637 }
1638 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1639 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1640 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1641 {
1642 /* No need to add this to saved_regs -- it's just an arg reg. */
1643 continue;
1644 }
1645 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1646 { registers } */
1647 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1648 {
1649 /* No need to add this to saved_regs -- it's just arg regs. */
1650 continue;
1651 }
1652 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1653 {
1654 uint32_t imm = arm_expand_immediate (insn & 0xfff);
1655 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1656 }
1657 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1658 {
1659 uint32_t imm = arm_expand_immediate(insn & 0xfff);
1660 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1661 }
1662 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1663 [sp, -#c]! */
1664 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1665 {
1666 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1667 break;
1668
1669 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1670 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1671 stack.store (regs[ARM_SP_REGNUM], 12, regs[regno]);
1672 }
1673 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1674 [sp!] */
1675 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1676 {
1677 int n_saved_fp_regs;
1678 unsigned int fp_start_reg, fp_bound_reg;
1679
1680 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1681 break;
1682
1683 if ((insn & 0x800) == 0x800) /* N0 is set */
1684 {
1685 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1686 n_saved_fp_regs = 3;
1687 else
1688 n_saved_fp_regs = 1;
1689 }
1690 else
1691 {
1692 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1693 n_saved_fp_regs = 2;
1694 else
1695 n_saved_fp_regs = 4;
1696 }
1697
1698 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1699 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1700 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1701 {
1702 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1703 stack.store (regs[ARM_SP_REGNUM], 12,
1704 regs[fp_start_reg++]);
1705 }
1706 }
1707 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1708 {
1709 /* Allow some special function calls when skipping the
1710 prologue; GCC generates these before storing arguments to
1711 the stack. */
1712 CORE_ADDR dest = BranchDest (current_pc, insn);
1713
1714 if (skip_prologue_function (gdbarch, dest, 0))
1715 continue;
1716 else
1717 break;
1718 }
1719 else if ((insn & 0xf0000000) != 0xe0000000)
1720 break; /* Condition not true, exit early. */
1721 else if (arm_instruction_changes_pc (insn))
1722 /* Don't scan past anything that might change control flow. */
1723 break;
1724 else if (arm_instruction_restores_sp (insn))
1725 {
1726 /* Don't scan past the epilogue. */
1727 break;
1728 }
1729 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1730 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1731 /* Ignore block loads from the stack, potentially copying
1732 parameters from memory. */
1733 continue;
1734 else if ((insn & 0xfc500000) == 0xe4100000
1735 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1736 /* Similarly ignore single loads from the stack. */
1737 continue;
1738 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1739 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1740 register instead of the stack. */
1741 continue;
1742 else
1743 {
1744 /* The optimizer might shove anything into the prologue, if
1745 we build up cache (cache != NULL) from scanning prologue,
1746 we just skip what we don't recognize and scan further to
1747 make cache as complete as possible. However, if we skip
1748 prologue, we'll stop immediately on unrecognized
1749 instruction. */
1750 unrecognized_pc = current_pc;
1751 if (cache != NULL)
1752 continue;
1753 else
1754 break;
1755 }
1756 }
1757
1758 if (unrecognized_pc == 0)
1759 unrecognized_pc = current_pc;
1760
1761 if (cache)
1762 {
1763 int framereg, framesize;
1764
1765 /* The frame size is just the distance from the frame register
1766 to the original stack pointer. */
1767 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1768 {
1769 /* Frame pointer is fp. */
1770 framereg = ARM_FP_REGNUM;
1771 framesize = -regs[ARM_FP_REGNUM].k;
1772 }
1773 else
1774 {
1775 /* Try the stack pointer... this is a bit desperate. */
1776 framereg = ARM_SP_REGNUM;
1777 framesize = -regs[ARM_SP_REGNUM].k;
1778 }
1779
1780 cache->framereg = framereg;
1781 cache->framesize = framesize;
1782
1783 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1784 if (stack.find_reg (gdbarch, regno, &offset))
1785 cache->saved_regs[regno].set_addr (offset);
1786 }
1787
1788 arm_debug_printf ("Prologue scan stopped at %s",
1789 paddress (gdbarch, unrecognized_pc));
1790
1791 return unrecognized_pc;
1792 }
1793
1794 static void
1795 arm_scan_prologue (struct frame_info *this_frame,
1796 struct arm_prologue_cache *cache)
1797 {
1798 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1799 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1800 CORE_ADDR prologue_start, prologue_end;
1801 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1802 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1803
1804 /* Assume there is no frame until proven otherwise. */
1805 cache->framereg = ARM_SP_REGNUM;
1806 cache->framesize = 0;
1807
1808 /* Check for Thumb prologue. */
1809 if (arm_frame_is_thumb (this_frame))
1810 {
1811 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1812 return;
1813 }
1814
1815 /* Find the function prologue. If we can't find the function in
1816 the symbol table, peek in the stack frame to find the PC. */
1817 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1818 &prologue_end))
1819 {
1820 /* One way to find the end of the prologue (which works well
1821 for unoptimized code) is to do the following:
1822
1823 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1824
1825 if (sal.line == 0)
1826 prologue_end = prev_pc;
1827 else if (sal.end < prologue_end)
1828 prologue_end = sal.end;
1829
1830 This mechanism is very accurate so long as the optimizer
1831 doesn't move any instructions from the function body into the
1832 prologue. If this happens, sal.end will be the last
1833 instruction in the first hunk of prologue code just before
1834 the first instruction that the scheduler has moved from
1835 the body to the prologue.
1836
1837 In order to make sure that we scan all of the prologue
1838 instructions, we use a slightly less accurate mechanism which
1839 may scan more than necessary. To help compensate for this
1840 lack of accuracy, the prologue scanning loop below contains
1841 several clauses which'll cause the loop to terminate early if
1842 an implausible prologue instruction is encountered.
1843
1844 The expression
1845
1846 prologue_start + 64
1847
1848 is a suitable endpoint since it accounts for the largest
1849 possible prologue plus up to five instructions inserted by
1850 the scheduler. */
1851
1852 if (prologue_end > prologue_start + 64)
1853 {
1854 prologue_end = prologue_start + 64; /* See above. */
1855 }
1856 }
1857 else
1858 {
1859 /* We have no symbol information. Our only option is to assume this
1860 function has a standard stack frame and the normal frame register.
1861 Then, we can find the value of our frame pointer on entrance to
1862 the callee (or at the present moment if this is the innermost frame).
1863 The value stored there should be the address of the stmfd + 8. */
1864 CORE_ADDR frame_loc;
1865 ULONGEST return_value;
1866
1867 /* AAPCS does not use a frame register, so we can abort here. */
1868 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_AAPCS)
1869 return;
1870
1871 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1872 if (!safe_read_memory_unsigned_integer (frame_loc, 4, byte_order,
1873 &return_value))
1874 return;
1875 else
1876 {
1877 prologue_start = gdbarch_addr_bits_remove
1878 (gdbarch, return_value) - 8;
1879 prologue_end = prologue_start + 64; /* See above. */
1880 }
1881 }
1882
1883 if (prev_pc < prologue_end)
1884 prologue_end = prev_pc;
1885
1886 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache,
1887 target_arm_instruction_reader ());
1888 }
1889
1890 static struct arm_prologue_cache *
1891 arm_make_prologue_cache (struct frame_info *this_frame)
1892 {
1893 int reg;
1894 struct arm_prologue_cache *cache;
1895 CORE_ADDR unwound_fp;
1896
1897 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1898 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1899
1900 arm_scan_prologue (this_frame, cache);
1901
1902 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1903 if (unwound_fp == 0)
1904 return cache;
1905
1906 cache->prev_sp = unwound_fp + cache->framesize;
1907
1908 /* Calculate actual addresses of saved registers using offsets
1909 determined by arm_scan_prologue. */
1910 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1911 if (cache->saved_regs[reg].is_addr ())
1912 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1913 + cache->prev_sp);
1914
1915 return cache;
1916 }
1917
1918 /* Implementation of the stop_reason hook for arm_prologue frames. */
1919
1920 static enum unwind_stop_reason
1921 arm_prologue_unwind_stop_reason (struct frame_info *this_frame,
1922 void **this_cache)
1923 {
1924 struct arm_prologue_cache *cache;
1925 CORE_ADDR pc;
1926
1927 if (*this_cache == NULL)
1928 *this_cache = arm_make_prologue_cache (this_frame);
1929 cache = (struct arm_prologue_cache *) *this_cache;
1930
1931 /* This is meant to halt the backtrace at "_start". */
1932 pc = get_frame_pc (this_frame);
1933 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1934 return UNWIND_OUTERMOST;
1935
1936 /* If we've hit a wall, stop. */
1937 if (cache->prev_sp == 0)
1938 return UNWIND_OUTERMOST;
1939
1940 return UNWIND_NO_REASON;
1941 }
1942
1943 /* Our frame ID for a normal frame is the current function's starting PC
1944 and the caller's SP when we were called. */
1945
1946 static void
1947 arm_prologue_this_id (struct frame_info *this_frame,
1948 void **this_cache,
1949 struct frame_id *this_id)
1950 {
1951 struct arm_prologue_cache *cache;
1952 struct frame_id id;
1953 CORE_ADDR pc, func;
1954
1955 if (*this_cache == NULL)
1956 *this_cache = arm_make_prologue_cache (this_frame);
1957 cache = (struct arm_prologue_cache *) *this_cache;
1958
1959 /* Use function start address as part of the frame ID. If we cannot
1960 identify the start address (due to missing symbol information),
1961 fall back to just using the current PC. */
1962 pc = get_frame_pc (this_frame);
1963 func = get_frame_func (this_frame);
1964 if (!func)
1965 func = pc;
1966
1967 id = frame_id_build (cache->prev_sp, func);
1968 *this_id = id;
1969 }
1970
1971 static struct value *
1972 arm_prologue_prev_register (struct frame_info *this_frame,
1973 void **this_cache,
1974 int prev_regnum)
1975 {
1976 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1977 struct arm_prologue_cache *cache;
1978
1979 if (*this_cache == NULL)
1980 *this_cache = arm_make_prologue_cache (this_frame);
1981 cache = (struct arm_prologue_cache *) *this_cache;
1982
1983 /* If we are asked to unwind the PC, then we need to return the LR
1984 instead. The prologue may save PC, but it will point into this
1985 frame's prologue, not the next frame's resume location. Also
1986 strip the saved T bit. A valid LR may have the low bit set, but
1987 a valid PC never does. */
1988 if (prev_regnum == ARM_PC_REGNUM)
1989 {
1990 CORE_ADDR lr;
1991
1992 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1993 return frame_unwind_got_constant (this_frame, prev_regnum,
1994 arm_addr_bits_remove (gdbarch, lr));
1995 }
1996
1997 /* SP is generally not saved to the stack, but this frame is
1998 identified by the next frame's stack pointer at the time of the call.
1999 The value was already reconstructed into PREV_SP. */
2000 if (prev_regnum == ARM_SP_REGNUM)
2001 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2002
2003 /* The CPSR may have been changed by the call instruction and by the
2004 called function. The only bit we can reconstruct is the T bit,
2005 by checking the low bit of LR as of the call. This is a reliable
2006 indicator of Thumb-ness except for some ARM v4T pre-interworking
2007 Thumb code, which could get away with a clear low bit as long as
2008 the called function did not use bx. Guess that all other
2009 bits are unchanged; the condition flags are presumably lost,
2010 but the processor status is likely valid. */
2011 if (prev_regnum == ARM_PS_REGNUM)
2012 {
2013 CORE_ADDR lr, cpsr;
2014 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2015
2016 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2017 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2018 if (IS_THUMB_ADDR (lr))
2019 cpsr |= t_bit;
2020 else
2021 cpsr &= ~t_bit;
2022 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2023 }
2024
2025 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2026 prev_regnum);
2027 }
2028
2029 static frame_unwind arm_prologue_unwind = {
2030 "arm prologue",
2031 NORMAL_FRAME,
2032 arm_prologue_unwind_stop_reason,
2033 arm_prologue_this_id,
2034 arm_prologue_prev_register,
2035 NULL,
2036 default_frame_sniffer
2037 };
2038
2039 /* Maintain a list of ARM exception table entries per objfile, similar to the
2040 list of mapping symbols. We only cache entries for standard ARM-defined
2041 personality routines; the cache will contain only the frame unwinding
2042 instructions associated with the entry (not the descriptors). */
2043
2044 struct arm_exidx_entry
2045 {
2046 CORE_ADDR addr;
2047 gdb_byte *entry;
2048
2049 bool operator< (const arm_exidx_entry &other) const
2050 {
2051 return addr < other.addr;
2052 }
2053 };
2054
2055 struct arm_exidx_data
2056 {
2057 std::vector<std::vector<arm_exidx_entry>> section_maps;
2058 };
2059
2060 /* Per-BFD key to store exception handling information. */
2061 static const struct bfd_key<arm_exidx_data> arm_exidx_data_key;
2062
2063 static struct obj_section *
2064 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2065 {
2066 struct obj_section *osect;
2067
2068 ALL_OBJFILE_OSECTIONS (objfile, osect)
2069 if (bfd_section_flags (osect->the_bfd_section) & SEC_ALLOC)
2070 {
2071 bfd_vma start, size;
2072 start = bfd_section_vma (osect->the_bfd_section);
2073 size = bfd_section_size (osect->the_bfd_section);
2074
2075 if (start <= vma && vma < start + size)
2076 return osect;
2077 }
2078
2079 return NULL;
2080 }
2081
2082 /* Parse contents of exception table and exception index sections
2083 of OBJFILE, and fill in the exception table entry cache.
2084
2085 For each entry that refers to a standard ARM-defined personality
2086 routine, extract the frame unwinding instructions (from either
2087 the index or the table section). The unwinding instructions
2088 are normalized by:
2089 - extracting them from the rest of the table data
2090 - converting to host endianness
2091 - appending the implicit 0xb0 ("Finish") code
2092
2093 The extracted and normalized instructions are stored for later
2094 retrieval by the arm_find_exidx_entry routine. */
2095
2096 static void
2097 arm_exidx_new_objfile (struct objfile *objfile)
2098 {
2099 struct arm_exidx_data *data;
2100 asection *exidx, *extab;
2101 bfd_vma exidx_vma = 0, extab_vma = 0;
2102 LONGEST i;
2103
2104 /* If we've already touched this file, do nothing. */
2105 if (!objfile || arm_exidx_data_key.get (objfile->obfd) != NULL)
2106 return;
2107
2108 /* Read contents of exception table and index. */
2109 exidx = bfd_get_section_by_name (objfile->obfd, ELF_STRING_ARM_unwind);
2110 gdb::byte_vector exidx_data;
2111 if (exidx)
2112 {
2113 exidx_vma = bfd_section_vma (exidx);
2114 exidx_data.resize (bfd_section_size (exidx));
2115
2116 if (!bfd_get_section_contents (objfile->obfd, exidx,
2117 exidx_data.data (), 0,
2118 exidx_data.size ()))
2119 return;
2120 }
2121
2122 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2123 gdb::byte_vector extab_data;
2124 if (extab)
2125 {
2126 extab_vma = bfd_section_vma (extab);
2127 extab_data.resize (bfd_section_size (extab));
2128
2129 if (!bfd_get_section_contents (objfile->obfd, extab,
2130 extab_data.data (), 0,
2131 extab_data.size ()))
2132 return;
2133 }
2134
2135 /* Allocate exception table data structure. */
2136 data = arm_exidx_data_key.emplace (objfile->obfd);
2137 data->section_maps.resize (objfile->obfd->section_count);
2138
2139 /* Fill in exception table. */
2140 for (i = 0; i < exidx_data.size () / 8; i++)
2141 {
2142 struct arm_exidx_entry new_exidx_entry;
2143 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data.data () + i * 8);
2144 bfd_vma val = bfd_h_get_32 (objfile->obfd,
2145 exidx_data.data () + i * 8 + 4);
2146 bfd_vma addr = 0, word = 0;
2147 int n_bytes = 0, n_words = 0;
2148 struct obj_section *sec;
2149 gdb_byte *entry = NULL;
2150
2151 /* Extract address of start of function. */
2152 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2153 idx += exidx_vma + i * 8;
2154
2155 /* Find section containing function and compute section offset. */
2156 sec = arm_obj_section_from_vma (objfile, idx);
2157 if (sec == NULL)
2158 continue;
2159 idx -= bfd_section_vma (sec->the_bfd_section);
2160
2161 /* Determine address of exception table entry. */
2162 if (val == 1)
2163 {
2164 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2165 }
2166 else if ((val & 0xff000000) == 0x80000000)
2167 {
2168 /* Exception table entry embedded in .ARM.exidx
2169 -- must be short form. */
2170 word = val;
2171 n_bytes = 3;
2172 }
2173 else if (!(val & 0x80000000))
2174 {
2175 /* Exception table entry in .ARM.extab. */
2176 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2177 addr += exidx_vma + i * 8 + 4;
2178
2179 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_data.size ())
2180 {
2181 word = bfd_h_get_32 (objfile->obfd,
2182 extab_data.data () + addr - extab_vma);
2183 addr += 4;
2184
2185 if ((word & 0xff000000) == 0x80000000)
2186 {
2187 /* Short form. */
2188 n_bytes = 3;
2189 }
2190 else if ((word & 0xff000000) == 0x81000000
2191 || (word & 0xff000000) == 0x82000000)
2192 {
2193 /* Long form. */
2194 n_bytes = 2;
2195 n_words = ((word >> 16) & 0xff);
2196 }
2197 else if (!(word & 0x80000000))
2198 {
2199 bfd_vma pers;
2200 struct obj_section *pers_sec;
2201 int gnu_personality = 0;
2202
2203 /* Custom personality routine. */
2204 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2205 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2206
2207 /* Check whether we've got one of the variants of the
2208 GNU personality routines. */
2209 pers_sec = arm_obj_section_from_vma (objfile, pers);
2210 if (pers_sec)
2211 {
2212 static const char *personality[] =
2213 {
2214 "__gcc_personality_v0",
2215 "__gxx_personality_v0",
2216 "__gcj_personality_v0",
2217 "__gnu_objc_personality_v0",
2218 NULL
2219 };
2220
2221 CORE_ADDR pc = pers + pers_sec->offset ();
2222 int k;
2223
2224 for (k = 0; personality[k]; k++)
2225 if (lookup_minimal_symbol_by_pc_name
2226 (pc, personality[k], objfile))
2227 {
2228 gnu_personality = 1;
2229 break;
2230 }
2231 }
2232
2233 /* If so, the next word contains a word count in the high
2234 byte, followed by the same unwind instructions as the
2235 pre-defined forms. */
2236 if (gnu_personality
2237 && addr + 4 <= extab_vma + extab_data.size ())
2238 {
2239 word = bfd_h_get_32 (objfile->obfd,
2240 (extab_data.data ()
2241 + addr - extab_vma));
2242 addr += 4;
2243 n_bytes = 3;
2244 n_words = ((word >> 24) & 0xff);
2245 }
2246 }
2247 }
2248 }
2249
2250 /* Sanity check address. */
2251 if (n_words)
2252 if (addr < extab_vma
2253 || addr + 4 * n_words > extab_vma + extab_data.size ())
2254 n_words = n_bytes = 0;
2255
2256 /* The unwind instructions reside in WORD (only the N_BYTES least
2257 significant bytes are valid), followed by N_WORDS words in the
2258 extab section starting at ADDR. */
2259 if (n_bytes || n_words)
2260 {
2261 gdb_byte *p = entry
2262 = (gdb_byte *) obstack_alloc (&objfile->objfile_obstack,
2263 n_bytes + n_words * 4 + 1);
2264
2265 while (n_bytes--)
2266 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2267
2268 while (n_words--)
2269 {
2270 word = bfd_h_get_32 (objfile->obfd,
2271 extab_data.data () + addr - extab_vma);
2272 addr += 4;
2273
2274 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2275 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2276 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2277 *p++ = (gdb_byte) (word & 0xff);
2278 }
2279
2280 /* Implied "Finish" to terminate the list. */
2281 *p++ = 0xb0;
2282 }
2283
2284 /* Push entry onto vector. They are guaranteed to always
2285 appear in order of increasing addresses. */
2286 new_exidx_entry.addr = idx;
2287 new_exidx_entry.entry = entry;
2288 data->section_maps[sec->the_bfd_section->index].push_back
2289 (new_exidx_entry);
2290 }
2291 }
2292
2293 /* Search for the exception table entry covering MEMADDR. If one is found,
2294 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2295 set *START to the start of the region covered by this entry. */
2296
2297 static gdb_byte *
2298 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2299 {
2300 struct obj_section *sec;
2301
2302 sec = find_pc_section (memaddr);
2303 if (sec != NULL)
2304 {
2305 struct arm_exidx_data *data;
2306 struct arm_exidx_entry map_key = { memaddr - sec->addr (), 0 };
2307
2308 data = arm_exidx_data_key.get (sec->objfile->obfd);
2309 if (data != NULL)
2310 {
2311 std::vector<arm_exidx_entry> &map
2312 = data->section_maps[sec->the_bfd_section->index];
2313 if (!map.empty ())
2314 {
2315 auto idx = std::lower_bound (map.begin (), map.end (), map_key);
2316
2317 /* std::lower_bound finds the earliest ordered insertion
2318 point. If the following symbol starts at this exact
2319 address, we use that; otherwise, the preceding
2320 exception table entry covers this address. */
2321 if (idx < map.end ())
2322 {
2323 if (idx->addr == map_key.addr)
2324 {
2325 if (start)
2326 *start = idx->addr + sec->addr ();
2327 return idx->entry;
2328 }
2329 }
2330
2331 if (idx > map.begin ())
2332 {
2333 idx = idx - 1;
2334 if (start)
2335 *start = idx->addr + sec->addr ();
2336 return idx->entry;
2337 }
2338 }
2339 }
2340 }
2341
2342 return NULL;
2343 }
2344
2345 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2346 instruction list from the ARM exception table entry ENTRY, allocate and
2347 return a prologue cache structure describing how to unwind this frame.
2348
2349 Return NULL if the unwinding instruction list contains a "spare",
2350 "reserved" or "refuse to unwind" instruction as defined in section
2351 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2352 for the ARM Architecture" document. */
2353
2354 static struct arm_prologue_cache *
2355 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2356 {
2357 CORE_ADDR vsp = 0;
2358 int vsp_valid = 0;
2359
2360 struct arm_prologue_cache *cache;
2361 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2362 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2363
2364 for (;;)
2365 {
2366 gdb_byte insn;
2367
2368 /* Whenever we reload SP, we actually have to retrieve its
2369 actual value in the current frame. */
2370 if (!vsp_valid)
2371 {
2372 if (cache->saved_regs[ARM_SP_REGNUM].is_realreg ())
2373 {
2374 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg ();
2375 vsp = get_frame_register_unsigned (this_frame, reg);
2376 }
2377 else
2378 {
2379 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr ();
2380 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2381 }
2382
2383 vsp_valid = 1;
2384 }
2385
2386 /* Decode next unwind instruction. */
2387 insn = *entry++;
2388
2389 if ((insn & 0xc0) == 0)
2390 {
2391 int offset = insn & 0x3f;
2392 vsp += (offset << 2) + 4;
2393 }
2394 else if ((insn & 0xc0) == 0x40)
2395 {
2396 int offset = insn & 0x3f;
2397 vsp -= (offset << 2) + 4;
2398 }
2399 else if ((insn & 0xf0) == 0x80)
2400 {
2401 int mask = ((insn & 0xf) << 8) | *entry++;
2402 int i;
2403
2404 /* The special case of an all-zero mask identifies
2405 "Refuse to unwind". We return NULL to fall back
2406 to the prologue analyzer. */
2407 if (mask == 0)
2408 return NULL;
2409
2410 /* Pop registers r4..r15 under mask. */
2411 for (i = 0; i < 12; i++)
2412 if (mask & (1 << i))
2413 {
2414 cache->saved_regs[4 + i].set_addr (vsp);
2415 vsp += 4;
2416 }
2417
2418 /* Special-case popping SP -- we need to reload vsp. */
2419 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2420 vsp_valid = 0;
2421 }
2422 else if ((insn & 0xf0) == 0x90)
2423 {
2424 int reg = insn & 0xf;
2425
2426 /* Reserved cases. */
2427 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2428 return NULL;
2429
2430 /* Set SP from another register and mark VSP for reload. */
2431 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2432 vsp_valid = 0;
2433 }
2434 else if ((insn & 0xf0) == 0xa0)
2435 {
2436 int count = insn & 0x7;
2437 int pop_lr = (insn & 0x8) != 0;
2438 int i;
2439
2440 /* Pop r4..r[4+count]. */
2441 for (i = 0; i <= count; i++)
2442 {
2443 cache->saved_regs[4 + i].set_addr (vsp);
2444 vsp += 4;
2445 }
2446
2447 /* If indicated by flag, pop LR as well. */
2448 if (pop_lr)
2449 {
2450 cache->saved_regs[ARM_LR_REGNUM].set_addr (vsp);
2451 vsp += 4;
2452 }
2453 }
2454 else if (insn == 0xb0)
2455 {
2456 /* We could only have updated PC by popping into it; if so, it
2457 will show up as address. Otherwise, copy LR into PC. */
2458 if (!cache->saved_regs[ARM_PC_REGNUM].is_addr ())
2459 cache->saved_regs[ARM_PC_REGNUM]
2460 = cache->saved_regs[ARM_LR_REGNUM];
2461
2462 /* We're done. */
2463 break;
2464 }
2465 else if (insn == 0xb1)
2466 {
2467 int mask = *entry++;
2468 int i;
2469
2470 /* All-zero mask and mask >= 16 is "spare". */
2471 if (mask == 0 || mask >= 16)
2472 return NULL;
2473
2474 /* Pop r0..r3 under mask. */
2475 for (i = 0; i < 4; i++)
2476 if (mask & (1 << i))
2477 {
2478 cache->saved_regs[i].set_addr (vsp);
2479 vsp += 4;
2480 }
2481 }
2482 else if (insn == 0xb2)
2483 {
2484 ULONGEST offset = 0;
2485 unsigned shift = 0;
2486
2487 do
2488 {
2489 offset |= (*entry & 0x7f) << shift;
2490 shift += 7;
2491 }
2492 while (*entry++ & 0x80);
2493
2494 vsp += 0x204 + (offset << 2);
2495 }
2496 else if (insn == 0xb3)
2497 {
2498 int start = *entry >> 4;
2499 int count = (*entry++) & 0xf;
2500 int i;
2501
2502 /* Only registers D0..D15 are valid here. */
2503 if (start + count >= 16)
2504 return NULL;
2505
2506 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2507 for (i = 0; i <= count; i++)
2508 {
2509 cache->saved_regs[ARM_D0_REGNUM + start + i].set_addr (vsp);
2510 vsp += 8;
2511 }
2512
2513 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2514 vsp += 4;
2515 }
2516 else if ((insn & 0xf8) == 0xb8)
2517 {
2518 int count = insn & 0x7;
2519 int i;
2520
2521 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2522 for (i = 0; i <= count; i++)
2523 {
2524 cache->saved_regs[ARM_D0_REGNUM + 8 + i].set_addr (vsp);
2525 vsp += 8;
2526 }
2527
2528 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2529 vsp += 4;
2530 }
2531 else if (insn == 0xc6)
2532 {
2533 int start = *entry >> 4;
2534 int count = (*entry++) & 0xf;
2535 int i;
2536
2537 /* Only registers WR0..WR15 are valid. */
2538 if (start + count >= 16)
2539 return NULL;
2540
2541 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2542 for (i = 0; i <= count; i++)
2543 {
2544 cache->saved_regs[ARM_WR0_REGNUM + start + i].set_addr (vsp);
2545 vsp += 8;
2546 }
2547 }
2548 else if (insn == 0xc7)
2549 {
2550 int mask = *entry++;
2551 int i;
2552
2553 /* All-zero mask and mask >= 16 is "spare". */
2554 if (mask == 0 || mask >= 16)
2555 return NULL;
2556
2557 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2558 for (i = 0; i < 4; i++)
2559 if (mask & (1 << i))
2560 {
2561 cache->saved_regs[ARM_WCGR0_REGNUM + i].set_addr (vsp);
2562 vsp += 4;
2563 }
2564 }
2565 else if ((insn & 0xf8) == 0xc0)
2566 {
2567 int count = insn & 0x7;
2568 int i;
2569
2570 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2571 for (i = 0; i <= count; i++)
2572 {
2573 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].set_addr (vsp);
2574 vsp += 8;
2575 }
2576 }
2577 else if (insn == 0xc8)
2578 {
2579 int start = *entry >> 4;
2580 int count = (*entry++) & 0xf;
2581 int i;
2582
2583 /* Only registers D0..D31 are valid. */
2584 if (start + count >= 16)
2585 return NULL;
2586
2587 /* Pop VFP double-precision registers
2588 D[16+start]..D[16+start+count]. */
2589 for (i = 0; i <= count; i++)
2590 {
2591 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].set_addr (vsp);
2592 vsp += 8;
2593 }
2594 }
2595 else if (insn == 0xc9)
2596 {
2597 int start = *entry >> 4;
2598 int count = (*entry++) & 0xf;
2599 int i;
2600
2601 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2602 for (i = 0; i <= count; i++)
2603 {
2604 cache->saved_regs[ARM_D0_REGNUM + start + i].set_addr (vsp);
2605 vsp += 8;
2606 }
2607 }
2608 else if ((insn & 0xf8) == 0xd0)
2609 {
2610 int count = insn & 0x7;
2611 int i;
2612
2613 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2614 for (i = 0; i <= count; i++)
2615 {
2616 cache->saved_regs[ARM_D0_REGNUM + 8 + i].set_addr (vsp);
2617 vsp += 8;
2618 }
2619 }
2620 else
2621 {
2622 /* Everything else is "spare". */
2623 return NULL;
2624 }
2625 }
2626
2627 /* If we restore SP from a register, assume this was the frame register.
2628 Otherwise just fall back to SP as frame register. */
2629 if (cache->saved_regs[ARM_SP_REGNUM].is_realreg ())
2630 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg ();
2631 else
2632 cache->framereg = ARM_SP_REGNUM;
2633
2634 /* Determine offset to previous frame. */
2635 cache->framesize
2636 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2637
2638 /* We already got the previous SP. */
2639 cache->prev_sp = vsp;
2640
2641 return cache;
2642 }
2643
2644 /* Unwinding via ARM exception table entries. Note that the sniffer
2645 already computes a filled-in prologue cache, which is then used
2646 with the same arm_prologue_this_id and arm_prologue_prev_register
2647 routines also used for prologue-parsing based unwinding. */
2648
2649 static int
2650 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2651 struct frame_info *this_frame,
2652 void **this_prologue_cache)
2653 {
2654 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2655 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2656 CORE_ADDR addr_in_block, exidx_region, func_start;
2657 struct arm_prologue_cache *cache;
2658 gdb_byte *entry;
2659
2660 /* See if we have an ARM exception table entry covering this address. */
2661 addr_in_block = get_frame_address_in_block (this_frame);
2662 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2663 if (!entry)
2664 return 0;
2665
2666 /* The ARM exception table does not describe unwind information
2667 for arbitrary PC values, but is guaranteed to be correct only
2668 at call sites. We have to decide here whether we want to use
2669 ARM exception table information for this frame, or fall back
2670 to using prologue parsing. (Note that if we have DWARF CFI,
2671 this sniffer isn't even called -- CFI is always preferred.)
2672
2673 Before we make this decision, however, we check whether we
2674 actually have *symbol* information for the current frame.
2675 If not, prologue parsing would not work anyway, so we might
2676 as well use the exception table and hope for the best. */
2677 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2678 {
2679 int exc_valid = 0;
2680
2681 /* If the next frame is "normal", we are at a call site in this
2682 frame, so exception information is guaranteed to be valid. */
2683 if (get_next_frame (this_frame)
2684 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2685 exc_valid = 1;
2686
2687 /* We also assume exception information is valid if we're currently
2688 blocked in a system call. The system library is supposed to
2689 ensure this, so that e.g. pthread cancellation works. */
2690 if (arm_frame_is_thumb (this_frame))
2691 {
2692 ULONGEST insn;
2693
2694 if (safe_read_memory_unsigned_integer (get_frame_pc (this_frame) - 2,
2695 2, byte_order_for_code, &insn)
2696 && (insn & 0xff00) == 0xdf00 /* svc */)
2697 exc_valid = 1;
2698 }
2699 else
2700 {
2701 ULONGEST insn;
2702
2703 if (safe_read_memory_unsigned_integer (get_frame_pc (this_frame) - 4,
2704 4, byte_order_for_code, &insn)
2705 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2706 exc_valid = 1;
2707 }
2708
2709 /* Bail out if we don't know that exception information is valid. */
2710 if (!exc_valid)
2711 return 0;
2712
2713 /* The ARM exception index does not mark the *end* of the region
2714 covered by the entry, and some functions will not have any entry.
2715 To correctly recognize the end of the covered region, the linker
2716 should have inserted dummy records with a CANTUNWIND marker.
2717
2718 Unfortunately, current versions of GNU ld do not reliably do
2719 this, and thus we may have found an incorrect entry above.
2720 As a (temporary) sanity check, we only use the entry if it
2721 lies *within* the bounds of the function. Note that this check
2722 might reject perfectly valid entries that just happen to cover
2723 multiple functions; therefore this check ought to be removed
2724 once the linker is fixed. */
2725 if (func_start > exidx_region)
2726 return 0;
2727 }
2728
2729 /* Decode the list of unwinding instructions into a prologue cache.
2730 Note that this may fail due to e.g. a "refuse to unwind" code. */
2731 cache = arm_exidx_fill_cache (this_frame, entry);
2732 if (!cache)
2733 return 0;
2734
2735 *this_prologue_cache = cache;
2736 return 1;
2737 }
2738
2739 struct frame_unwind arm_exidx_unwind = {
2740 "arm exidx",
2741 NORMAL_FRAME,
2742 default_frame_unwind_stop_reason,
2743 arm_prologue_this_id,
2744 arm_prologue_prev_register,
2745 NULL,
2746 arm_exidx_unwind_sniffer
2747 };
2748
2749 static struct arm_prologue_cache *
2750 arm_make_epilogue_frame_cache (struct frame_info *this_frame)
2751 {
2752 struct arm_prologue_cache *cache;
2753 int reg;
2754
2755 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2756 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2757
2758 /* Still rely on the offset calculated from prologue. */
2759 arm_scan_prologue (this_frame, cache);
2760
2761 /* Since we are in epilogue, the SP has been restored. */
2762 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2763
2764 /* Calculate actual addresses of saved registers using offsets
2765 determined by arm_scan_prologue. */
2766 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2767 if (cache->saved_regs[reg].is_addr ())
2768 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
2769 + cache->prev_sp);
2770
2771 return cache;
2772 }
2773
2774 /* Implementation of function hook 'this_id' in
2775 'struct frame_uwnind' for epilogue unwinder. */
2776
2777 static void
2778 arm_epilogue_frame_this_id (struct frame_info *this_frame,
2779 void **this_cache,
2780 struct frame_id *this_id)
2781 {
2782 struct arm_prologue_cache *cache;
2783 CORE_ADDR pc, func;
2784
2785 if (*this_cache == NULL)
2786 *this_cache = arm_make_epilogue_frame_cache (this_frame);
2787 cache = (struct arm_prologue_cache *) *this_cache;
2788
2789 /* Use function start address as part of the frame ID. If we cannot
2790 identify the start address (due to missing symbol information),
2791 fall back to just using the current PC. */
2792 pc = get_frame_pc (this_frame);
2793 func = get_frame_func (this_frame);
2794 if (func == 0)
2795 func = pc;
2796
2797 (*this_id) = frame_id_build (cache->prev_sp, pc);
2798 }
2799
2800 /* Implementation of function hook 'prev_register' in
2801 'struct frame_uwnind' for epilogue unwinder. */
2802
2803 static struct value *
2804 arm_epilogue_frame_prev_register (struct frame_info *this_frame,
2805 void **this_cache, int regnum)
2806 {
2807 if (*this_cache == NULL)
2808 *this_cache = arm_make_epilogue_frame_cache (this_frame);
2809
2810 return arm_prologue_prev_register (this_frame, this_cache, regnum);
2811 }
2812
2813 static int arm_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch,
2814 CORE_ADDR pc);
2815 static int thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch,
2816 CORE_ADDR pc);
2817
2818 /* Implementation of function hook 'sniffer' in
2819 'struct frame_uwnind' for epilogue unwinder. */
2820
2821 static int
2822 arm_epilogue_frame_sniffer (const struct frame_unwind *self,
2823 struct frame_info *this_frame,
2824 void **this_prologue_cache)
2825 {
2826 if (frame_relative_level (this_frame) == 0)
2827 {
2828 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2829 CORE_ADDR pc = get_frame_pc (this_frame);
2830
2831 if (arm_frame_is_thumb (this_frame))
2832 return thumb_stack_frame_destroyed_p (gdbarch, pc);
2833 else
2834 return arm_stack_frame_destroyed_p_1 (gdbarch, pc);
2835 }
2836 else
2837 return 0;
2838 }
2839
2840 /* Frame unwinder from epilogue. */
2841
2842 static const struct frame_unwind arm_epilogue_frame_unwind =
2843 {
2844 "arm epilogue",
2845 NORMAL_FRAME,
2846 default_frame_unwind_stop_reason,
2847 arm_epilogue_frame_this_id,
2848 arm_epilogue_frame_prev_register,
2849 NULL,
2850 arm_epilogue_frame_sniffer,
2851 };
2852
2853 /* Recognize GCC's trampoline for thumb call-indirect. If we are in a
2854 trampoline, return the target PC. Otherwise return 0.
2855
2856 void call0a (char c, short s, int i, long l) {}
2857
2858 int main (void)
2859 {
2860 (*pointer_to_call0a) (c, s, i, l);
2861 }
2862
2863 Instead of calling a stub library function _call_via_xx (xx is
2864 the register name), GCC may inline the trampoline in the object
2865 file as below (register r2 has the address of call0a).
2866
2867 .global main
2868 .type main, %function
2869 ...
2870 bl .L1
2871 ...
2872 .size main, .-main
2873
2874 .L1:
2875 bx r2
2876
2877 The trampoline 'bx r2' doesn't belong to main. */
2878
2879 static CORE_ADDR
2880 arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc)
2881 {
2882 /* The heuristics of recognizing such trampoline is that FRAME is
2883 executing in Thumb mode and the instruction on PC is 'bx Rm'. */
2884 if (arm_frame_is_thumb (frame))
2885 {
2886 gdb_byte buf[2];
2887
2888 if (target_read_memory (pc, buf, 2) == 0)
2889 {
2890 struct gdbarch *gdbarch = get_frame_arch (frame);
2891 enum bfd_endian byte_order_for_code
2892 = gdbarch_byte_order_for_code (gdbarch);
2893 uint16_t insn
2894 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2895
2896 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2897 {
2898 CORE_ADDR dest
2899 = get_frame_register_unsigned (frame, bits (insn, 3, 6));
2900
2901 /* Clear the LSB so that gdb core sets step-resume
2902 breakpoint at the right address. */
2903 return UNMAKE_THUMB_ADDR (dest);
2904 }
2905 }
2906 }
2907
2908 return 0;
2909 }
2910
2911 static struct arm_prologue_cache *
2912 arm_make_stub_cache (struct frame_info *this_frame)
2913 {
2914 struct arm_prologue_cache *cache;
2915
2916 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2917 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2918
2919 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2920
2921 return cache;
2922 }
2923
2924 /* Our frame ID for a stub frame is the current SP and LR. */
2925
2926 static void
2927 arm_stub_this_id (struct frame_info *this_frame,
2928 void **this_cache,
2929 struct frame_id *this_id)
2930 {
2931 struct arm_prologue_cache *cache;
2932
2933 if (*this_cache == NULL)
2934 *this_cache = arm_make_stub_cache (this_frame);
2935 cache = (struct arm_prologue_cache *) *this_cache;
2936
2937 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2938 }
2939
2940 static int
2941 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2942 struct frame_info *this_frame,
2943 void **this_prologue_cache)
2944 {
2945 CORE_ADDR addr_in_block;
2946 gdb_byte dummy[4];
2947 CORE_ADDR pc, start_addr;
2948 const char *name;
2949
2950 addr_in_block = get_frame_address_in_block (this_frame);
2951 pc = get_frame_pc (this_frame);
2952 if (in_plt_section (addr_in_block)
2953 /* We also use the stub winder if the target memory is unreadable
2954 to avoid having the prologue unwinder trying to read it. */
2955 || target_read_memory (pc, dummy, 4) != 0)
2956 return 1;
2957
2958 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0
2959 && arm_skip_bx_reg (this_frame, pc) != 0)
2960 return 1;
2961
2962 return 0;
2963 }
2964
2965 struct frame_unwind arm_stub_unwind = {
2966 "arm stub",
2967 NORMAL_FRAME,
2968 default_frame_unwind_stop_reason,
2969 arm_stub_this_id,
2970 arm_prologue_prev_register,
2971 NULL,
2972 arm_stub_unwind_sniffer
2973 };
2974
2975 /* Put here the code to store, into CACHE->saved_regs, the addresses
2976 of the saved registers of frame described by THIS_FRAME. CACHE is
2977 returned. */
2978
2979 static struct arm_prologue_cache *
2980 arm_m_exception_cache (struct frame_info *this_frame)
2981 {
2982 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2983 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2984 struct arm_prologue_cache *cache;
2985 CORE_ADDR lr;
2986 CORE_ADDR sp;
2987 CORE_ADDR unwound_sp;
2988 LONGEST xpsr;
2989 uint32_t exc_return;
2990 uint32_t process_stack_used;
2991 uint32_t extended_frame_used;
2992 uint32_t secure_stack_used;
2993
2994 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2995 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2996
2997 /* ARMv7-M Architecture Reference "B1.5.6 Exception entry behavior"
2998 describes which bits in LR that define which stack was used prior
2999 to the exception and if FPU is used (causing extended stack frame). */
3000
3001 lr = get_frame_register_unsigned (this_frame, ARM_LR_REGNUM);
3002 sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
3003
3004 /* Check EXC_RETURN indicator bits. */
3005 exc_return = (((lr >> 28) & 0xf) == 0xf);
3006
3007 /* Check EXC_RETURN bit SPSEL if Main or Thread (process) stack used. */
3008 process_stack_used = ((lr & (1 << 2)) != 0);
3009 if (exc_return && process_stack_used)
3010 {
3011 /* Thread (process) stack used.
3012 Potentially this could be other register defined by target, but PSP
3013 can be considered a standard name for the "Process Stack Pointer".
3014 To be fully aware of system registers like MSP and PSP, these could
3015 be added to a separate XML arm-m-system-profile that is valid for
3016 ARMv6-M and ARMv7-M architectures. Also to be able to debug eg a
3017 corefile off-line, then these registers must be defined by GDB,
3018 and also be included in the corefile regsets. */
3019
3020 int psp_regnum = user_reg_map_name_to_regnum (gdbarch, "psp", -1);
3021 if (psp_regnum == -1)
3022 {
3023 /* Thread (process) stack could not be fetched,
3024 give warning and exit. */
3025
3026 warning (_("no PSP thread stack unwinding supported."));
3027
3028 /* Terminate any further stack unwinding by refer to self. */
3029 cache->prev_sp = sp;
3030 return cache;
3031 }
3032 else
3033 {
3034 /* Thread (process) stack used, use PSP as SP. */
3035 unwound_sp = get_frame_register_unsigned (this_frame, psp_regnum);
3036 }
3037 }
3038 else
3039 {
3040 /* Main stack used, use MSP as SP. */
3041 unwound_sp = sp;
3042 }
3043
3044 /* The hardware saves eight 32-bit words, comprising xPSR,
3045 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
3046 "B1.5.6 Exception entry behavior" in
3047 "ARMv7-M Architecture Reference Manual". */
3048 cache->saved_regs[0].set_addr (unwound_sp);
3049 cache->saved_regs[1].set_addr (unwound_sp + 4);
3050 cache->saved_regs[2].set_addr (unwound_sp + 8);
3051 cache->saved_regs[3].set_addr (unwound_sp + 12);
3052 cache->saved_regs[ARM_IP_REGNUM].set_addr (unwound_sp + 16);
3053 cache->saved_regs[ARM_LR_REGNUM].set_addr (unwound_sp + 20);
3054 cache->saved_regs[ARM_PC_REGNUM].set_addr (unwound_sp + 24);
3055 cache->saved_regs[ARM_PS_REGNUM].set_addr (unwound_sp + 28);
3056
3057 /* Check EXC_RETURN bit FTYPE if extended stack frame (FPU regs stored)
3058 type used. */
3059 extended_frame_used = ((lr & (1 << 4)) == 0);
3060 if (exc_return && extended_frame_used)
3061 {
3062 int i;
3063 int fpu_regs_stack_offset;
3064
3065 /* This code does not take into account the lazy stacking, see "Lazy
3066 context save of FP state", in B1.5.7, also ARM AN298, supported
3067 by Cortex-M4F architecture.
3068 To fully handle this the FPCCR register (Floating-point Context
3069 Control Register) needs to be read out and the bits ASPEN and LSPEN
3070 could be checked to setup correct lazy stacked FP registers.
3071 This register is located at address 0xE000EF34. */
3072
3073 /* Extended stack frame type used. */
3074 fpu_regs_stack_offset = unwound_sp + 0x20;
3075 for (i = 0; i < 16; i++)
3076 {
3077 cache->saved_regs[ARM_D0_REGNUM + i].set_addr (fpu_regs_stack_offset);
3078 fpu_regs_stack_offset += 4;
3079 }
3080 cache->saved_regs[ARM_FPSCR_REGNUM].set_addr (unwound_sp + 0x60);
3081
3082 /* Offset 0x64 is reserved. */
3083 cache->prev_sp = unwound_sp + 0x68;
3084 }
3085 else
3086 {
3087 /* Standard stack frame type used. */
3088 cache->prev_sp = unwound_sp + 0x20;
3089 }
3090
3091 /* Check EXC_RETURN bit S if Secure or Non-secure stack used. */
3092 secure_stack_used = ((lr & (1 << 6)) != 0);
3093 if (exc_return && secure_stack_used)
3094 {
3095 /* ARMv8-M Exception and interrupt handling is not considered here.
3096 In the ARMv8-M architecture also EXC_RETURN bit S is controlling if
3097 the Secure or Non-secure stack was used. To separate Secure and
3098 Non-secure stacks, processors that are based on the ARMv8-M
3099 architecture support 4 stack pointers: MSP_S, PSP_S, MSP_NS, PSP_NS.
3100 In addition, a stack limit feature is provided using stack limit
3101 registers (accessible using MSR and MRS instructions) in Privileged
3102 level. */
3103 }
3104
3105 /* If bit 9 of the saved xPSR is set, then there is a four-byte
3106 aligner between the top of the 32-byte stack frame and the
3107 previous context's stack pointer. */
3108 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
3109 && (xpsr & (1 << 9)) != 0)
3110 cache->prev_sp += 4;
3111
3112 return cache;
3113 }
3114
3115 /* Implementation of function hook 'this_id' in
3116 'struct frame_uwnind'. */
3117
3118 static void
3119 arm_m_exception_this_id (struct frame_info *this_frame,
3120 void **this_cache,
3121 struct frame_id *this_id)
3122 {
3123 struct arm_prologue_cache *cache;
3124
3125 if (*this_cache == NULL)
3126 *this_cache = arm_m_exception_cache (this_frame);
3127 cache = (struct arm_prologue_cache *) *this_cache;
3128
3129 /* Our frame ID for a stub frame is the current SP and LR. */
3130 *this_id = frame_id_build (cache->prev_sp,
3131 get_frame_pc (this_frame));
3132 }
3133
3134 /* Implementation of function hook 'prev_register' in
3135 'struct frame_uwnind'. */
3136
3137 static struct value *
3138 arm_m_exception_prev_register (struct frame_info *this_frame,
3139 void **this_cache,
3140 int prev_regnum)
3141 {
3142 struct arm_prologue_cache *cache;
3143
3144 if (*this_cache == NULL)
3145 *this_cache = arm_m_exception_cache (this_frame);
3146 cache = (struct arm_prologue_cache *) *this_cache;
3147
3148 /* The value was already reconstructed into PREV_SP. */
3149 if (prev_regnum == ARM_SP_REGNUM)
3150 return frame_unwind_got_constant (this_frame, prev_regnum,
3151 cache->prev_sp);
3152
3153 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3154 prev_regnum);
3155 }
3156
3157 /* Implementation of function hook 'sniffer' in
3158 'struct frame_uwnind'. */
3159
3160 static int
3161 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3162 struct frame_info *this_frame,
3163 void **this_prologue_cache)
3164 {
3165 CORE_ADDR this_pc = get_frame_pc (this_frame);
3166
3167 /* No need to check is_m; this sniffer is only registered for
3168 M-profile architectures. */
3169
3170 /* Check if exception frame returns to a magic PC value. */
3171 return arm_m_addr_is_magic (this_pc);
3172 }
3173
3174 /* Frame unwinder for M-profile exceptions. */
3175
3176 struct frame_unwind arm_m_exception_unwind =
3177 {
3178 "arm m exception",
3179 SIGTRAMP_FRAME,
3180 default_frame_unwind_stop_reason,
3181 arm_m_exception_this_id,
3182 arm_m_exception_prev_register,
3183 NULL,
3184 arm_m_exception_unwind_sniffer
3185 };
3186
3187 static CORE_ADDR
3188 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3189 {
3190 struct arm_prologue_cache *cache;
3191
3192 if (*this_cache == NULL)
3193 *this_cache = arm_make_prologue_cache (this_frame);
3194 cache = (struct arm_prologue_cache *) *this_cache;
3195
3196 return cache->prev_sp - cache->framesize;
3197 }
3198
3199 struct frame_base arm_normal_base = {
3200 &arm_prologue_unwind,
3201 arm_normal_frame_base,
3202 arm_normal_frame_base,
3203 arm_normal_frame_base
3204 };
3205
3206 static struct value *
3207 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3208 int regnum)
3209 {
3210 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3211 CORE_ADDR lr, cpsr;
3212 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3213
3214 switch (regnum)
3215 {
3216 case ARM_PC_REGNUM:
3217 /* The PC is normally copied from the return column, which
3218 describes saves of LR. However, that version may have an
3219 extra bit set to indicate Thumb state. The bit is not
3220 part of the PC. */
3221 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3222 return frame_unwind_got_constant (this_frame, regnum,
3223 arm_addr_bits_remove (gdbarch, lr));
3224
3225 case ARM_PS_REGNUM:
3226 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3227 cpsr = get_frame_register_unsigned (this_frame, regnum);
3228 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3229 if (IS_THUMB_ADDR (lr))
3230 cpsr |= t_bit;
3231 else
3232 cpsr &= ~t_bit;
3233 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3234
3235 default:
3236 internal_error (__FILE__, __LINE__,
3237 _("Unexpected register %d"), regnum);
3238 }
3239 }
3240
3241 static void
3242 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3243 struct dwarf2_frame_state_reg *reg,
3244 struct frame_info *this_frame)
3245 {
3246 switch (regnum)
3247 {
3248 case ARM_PC_REGNUM:
3249 case ARM_PS_REGNUM:
3250 reg->how = DWARF2_FRAME_REG_FN;
3251 reg->loc.fn = arm_dwarf2_prev_register;
3252 break;
3253 case ARM_SP_REGNUM:
3254 reg->how = DWARF2_FRAME_REG_CFA;
3255 break;
3256 }
3257 }
3258
3259 /* Implement the stack_frame_destroyed_p gdbarch method. */
3260
3261 static int
3262 thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3263 {
3264 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3265 unsigned int insn, insn2;
3266 int found_return = 0, found_stack_adjust = 0;
3267 CORE_ADDR func_start, func_end;
3268 CORE_ADDR scan_pc;
3269 gdb_byte buf[4];
3270
3271 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3272 return 0;
3273
3274 /* The epilogue is a sequence of instructions along the following lines:
3275
3276 - add stack frame size to SP or FP
3277 - [if frame pointer used] restore SP from FP
3278 - restore registers from SP [may include PC]
3279 - a return-type instruction [if PC wasn't already restored]
3280
3281 In a first pass, we scan forward from the current PC and verify the
3282 instructions we find as compatible with this sequence, ending in a
3283 return instruction.
3284
3285 However, this is not sufficient to distinguish indirect function calls
3286 within a function from indirect tail calls in the epilogue in some cases.
3287 Therefore, if we didn't already find any SP-changing instruction during
3288 forward scan, we add a backward scanning heuristic to ensure we actually
3289 are in the epilogue. */
3290
3291 scan_pc = pc;
3292 while (scan_pc < func_end && !found_return)
3293 {
3294 if (target_read_memory (scan_pc, buf, 2))
3295 break;
3296
3297 scan_pc += 2;
3298 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3299
3300 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3301 found_return = 1;
3302 else if (insn == 0x46f7) /* mov pc, lr */
3303 found_return = 1;
3304 else if (thumb_instruction_restores_sp (insn))
3305 {
3306 if ((insn & 0xff00) == 0xbd00) /* pop <registers, PC> */
3307 found_return = 1;
3308 }
3309 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3310 {
3311 if (target_read_memory (scan_pc, buf, 2))
3312 break;
3313
3314 scan_pc += 2;
3315 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3316
3317 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3318 {
3319 if (insn2 & 0x8000) /* <registers> include PC. */
3320 found_return = 1;
3321 }
3322 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3323 && (insn2 & 0x0fff) == 0x0b04)
3324 {
3325 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3326 found_return = 1;
3327 }
3328 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3329 && (insn2 & 0x0e00) == 0x0a00)
3330 ;
3331 else
3332 break;
3333 }
3334 else
3335 break;
3336 }
3337
3338 if (!found_return)
3339 return 0;
3340
3341 /* Since any instruction in the epilogue sequence, with the possible
3342 exception of return itself, updates the stack pointer, we need to
3343 scan backwards for at most one instruction. Try either a 16-bit or
3344 a 32-bit instruction. This is just a heuristic, so we do not worry
3345 too much about false positives. */
3346
3347 if (pc - 4 < func_start)
3348 return 0;
3349 if (target_read_memory (pc - 4, buf, 4))
3350 return 0;
3351
3352 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3353 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3354
3355 if (thumb_instruction_restores_sp (insn2))
3356 found_stack_adjust = 1;
3357 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3358 found_stack_adjust = 1;
3359 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3360 && (insn2 & 0x0fff) == 0x0b04)
3361 found_stack_adjust = 1;
3362 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3363 && (insn2 & 0x0e00) == 0x0a00)
3364 found_stack_adjust = 1;
3365
3366 return found_stack_adjust;
3367 }
3368
3369 static int
3370 arm_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch, CORE_ADDR pc)
3371 {
3372 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3373 unsigned int insn;
3374 int found_return;
3375 CORE_ADDR func_start, func_end;
3376
3377 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3378 return 0;
3379
3380 /* We are in the epilogue if the previous instruction was a stack
3381 adjustment and the next instruction is a possible return (bx, mov
3382 pc, or pop). We could have to scan backwards to find the stack
3383 adjustment, or forwards to find the return, but this is a decent
3384 approximation. First scan forwards. */
3385
3386 found_return = 0;
3387 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3388 if (bits (insn, 28, 31) != INST_NV)
3389 {
3390 if ((insn & 0x0ffffff0) == 0x012fff10)
3391 /* BX. */
3392 found_return = 1;
3393 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3394 /* MOV PC. */
3395 found_return = 1;
3396 else if ((insn & 0x0fff0000) == 0x08bd0000
3397 && (insn & 0x0000c000) != 0)
3398 /* POP (LDMIA), including PC or LR. */
3399 found_return = 1;
3400 }
3401
3402 if (!found_return)
3403 return 0;
3404
3405 /* Scan backwards. This is just a heuristic, so do not worry about
3406 false positives from mode changes. */
3407
3408 if (pc < func_start + 4)
3409 return 0;
3410
3411 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3412 if (arm_instruction_restores_sp (insn))
3413 return 1;
3414
3415 return 0;
3416 }
3417
3418 /* Implement the stack_frame_destroyed_p gdbarch method. */
3419
3420 static int
3421 arm_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3422 {
3423 if (arm_pc_is_thumb (gdbarch, pc))
3424 return thumb_stack_frame_destroyed_p (gdbarch, pc);
3425 else
3426 return arm_stack_frame_destroyed_p_1 (gdbarch, pc);
3427 }
3428
3429 /* When arguments must be pushed onto the stack, they go on in reverse
3430 order. The code below implements a FILO (stack) to do this. */
3431
3432 struct stack_item
3433 {
3434 int len;
3435 struct stack_item *prev;
3436 gdb_byte *data;
3437 };
3438
3439 static struct stack_item *
3440 push_stack_item (struct stack_item *prev, const gdb_byte *contents, int len)
3441 {
3442 struct stack_item *si;
3443 si = XNEW (struct stack_item);
3444 si->data = (gdb_byte *) xmalloc (len);
3445 si->len = len;
3446 si->prev = prev;
3447 memcpy (si->data, contents, len);
3448 return si;
3449 }
3450
3451 static struct stack_item *
3452 pop_stack_item (struct stack_item *si)
3453 {
3454 struct stack_item *dead = si;
3455 si = si->prev;
3456 xfree (dead->data);
3457 xfree (dead);
3458 return si;
3459 }
3460
3461 /* Implement the gdbarch type alignment method, overrides the generic
3462 alignment algorithm for anything that is arm specific. */
3463
3464 static ULONGEST
3465 arm_type_align (gdbarch *gdbarch, struct type *t)
3466 {
3467 t = check_typedef (t);
3468 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
3469 {
3470 /* Use the natural alignment for vector types (the same for
3471 scalar type), but the maximum alignment is 64-bit. */
3472 if (TYPE_LENGTH (t) > 8)
3473 return 8;
3474 else
3475 return TYPE_LENGTH (t);
3476 }
3477
3478 /* Allow the common code to calculate the alignment. */
3479 return 0;
3480 }
3481
3482 /* Possible base types for a candidate for passing and returning in
3483 VFP registers. */
3484
3485 enum arm_vfp_cprc_base_type
3486 {
3487 VFP_CPRC_UNKNOWN,
3488 VFP_CPRC_SINGLE,
3489 VFP_CPRC_DOUBLE,
3490 VFP_CPRC_VEC64,
3491 VFP_CPRC_VEC128
3492 };
3493
3494 /* The length of one element of base type B. */
3495
3496 static unsigned
3497 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3498 {
3499 switch (b)
3500 {
3501 case VFP_CPRC_SINGLE:
3502 return 4;
3503 case VFP_CPRC_DOUBLE:
3504 return 8;
3505 case VFP_CPRC_VEC64:
3506 return 8;
3507 case VFP_CPRC_VEC128:
3508 return 16;
3509 default:
3510 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3511 (int) b);
3512 }
3513 }
3514
3515 /* The character ('s', 'd' or 'q') for the type of VFP register used
3516 for passing base type B. */
3517
3518 static int
3519 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3520 {
3521 switch (b)
3522 {
3523 case VFP_CPRC_SINGLE:
3524 return 's';
3525 case VFP_CPRC_DOUBLE:
3526 return 'd';
3527 case VFP_CPRC_VEC64:
3528 return 'd';
3529 case VFP_CPRC_VEC128:
3530 return 'q';
3531 default:
3532 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3533 (int) b);
3534 }
3535 }
3536
3537 /* Determine whether T may be part of a candidate for passing and
3538 returning in VFP registers, ignoring the limit on the total number
3539 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3540 classification of the first valid component found; if it is not
3541 VFP_CPRC_UNKNOWN, all components must have the same classification
3542 as *BASE_TYPE. If it is found that T contains a type not permitted
3543 for passing and returning in VFP registers, a type differently
3544 classified from *BASE_TYPE, or two types differently classified
3545 from each other, return -1, otherwise return the total number of
3546 base-type elements found (possibly 0 in an empty structure or
3547 array). Vector types are not currently supported, matching the
3548 generic AAPCS support. */
3549
3550 static int
3551 arm_vfp_cprc_sub_candidate (struct type *t,
3552 enum arm_vfp_cprc_base_type *base_type)
3553 {
3554 t = check_typedef (t);
3555 switch (t->code ())
3556 {
3557 case TYPE_CODE_FLT:
3558 switch (TYPE_LENGTH (t))
3559 {
3560 case 4:
3561 if (*base_type == VFP_CPRC_UNKNOWN)
3562 *base_type = VFP_CPRC_SINGLE;
3563 else if (*base_type != VFP_CPRC_SINGLE)
3564 return -1;
3565 return 1;
3566
3567 case 8:
3568 if (*base_type == VFP_CPRC_UNKNOWN)
3569 *base_type = VFP_CPRC_DOUBLE;
3570 else if (*base_type != VFP_CPRC_DOUBLE)
3571 return -1;
3572 return 1;
3573
3574 default:
3575 return -1;
3576 }
3577 break;
3578
3579 case TYPE_CODE_COMPLEX:
3580 /* Arguments of complex T where T is one of the types float or
3581 double get treated as if they are implemented as:
3582
3583 struct complexT
3584 {
3585 T real;
3586 T imag;
3587 };
3588
3589 */
3590 switch (TYPE_LENGTH (t))
3591 {
3592 case 8:
3593 if (*base_type == VFP_CPRC_UNKNOWN)
3594 *base_type = VFP_CPRC_SINGLE;
3595 else if (*base_type != VFP_CPRC_SINGLE)
3596 return -1;
3597 return 2;
3598
3599 case 16:
3600 if (*base_type == VFP_CPRC_UNKNOWN)
3601 *base_type = VFP_CPRC_DOUBLE;
3602 else if (*base_type != VFP_CPRC_DOUBLE)
3603 return -1;
3604 return 2;
3605
3606 default:
3607 return -1;
3608 }
3609 break;
3610
3611 case TYPE_CODE_ARRAY:
3612 {
3613 if (t->is_vector ())
3614 {
3615 /* A 64-bit or 128-bit containerized vector type are VFP
3616 CPRCs. */
3617 switch (TYPE_LENGTH (t))
3618 {
3619 case 8:
3620 if (*base_type == VFP_CPRC_UNKNOWN)
3621 *base_type = VFP_CPRC_VEC64;
3622 return 1;
3623 case 16:
3624 if (*base_type == VFP_CPRC_UNKNOWN)
3625 *base_type = VFP_CPRC_VEC128;
3626 return 1;
3627 default:
3628 return -1;
3629 }
3630 }
3631 else
3632 {
3633 int count;
3634 unsigned unitlen;
3635
3636 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t),
3637 base_type);
3638 if (count == -1)
3639 return -1;
3640 if (TYPE_LENGTH (t) == 0)
3641 {
3642 gdb_assert (count == 0);
3643 return 0;
3644 }
3645 else if (count == 0)
3646 return -1;
3647 unitlen = arm_vfp_cprc_unit_length (*base_type);
3648 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3649 return TYPE_LENGTH (t) / unitlen;
3650 }
3651 }
3652 break;
3653
3654 case TYPE_CODE_STRUCT:
3655 {
3656 int count = 0;
3657 unsigned unitlen;
3658 int i;
3659 for (i = 0; i < t->num_fields (); i++)
3660 {
3661 int sub_count = 0;
3662
3663 if (!field_is_static (&t->field (i)))
3664 sub_count = arm_vfp_cprc_sub_candidate (t->field (i).type (),
3665 base_type);
3666 if (sub_count == -1)
3667 return -1;
3668 count += sub_count;
3669 }
3670 if (TYPE_LENGTH (t) == 0)
3671 {
3672 gdb_assert (count == 0);
3673 return 0;
3674 }
3675 else if (count == 0)
3676 return -1;
3677 unitlen = arm_vfp_cprc_unit_length (*base_type);
3678 if (TYPE_LENGTH (t) != unitlen * count)
3679 return -1;
3680 return count;
3681 }
3682
3683 case TYPE_CODE_UNION:
3684 {
3685 int count = 0;
3686 unsigned unitlen;
3687 int i;
3688 for (i = 0; i < t->num_fields (); i++)
3689 {
3690 int sub_count = arm_vfp_cprc_sub_candidate (t->field (i).type (),
3691 base_type);
3692 if (sub_count == -1)
3693 return -1;
3694 count = (count > sub_count ? count : sub_count);
3695 }
3696 if (TYPE_LENGTH (t) == 0)
3697 {
3698 gdb_assert (count == 0);
3699 return 0;
3700 }
3701 else if (count == 0)
3702 return -1;
3703 unitlen = arm_vfp_cprc_unit_length (*base_type);
3704 if (TYPE_LENGTH (t) != unitlen * count)
3705 return -1;
3706 return count;
3707 }
3708
3709 default:
3710 break;
3711 }
3712
3713 return -1;
3714 }
3715
3716 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3717 if passed to or returned from a non-variadic function with the VFP
3718 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3719 *BASE_TYPE to the base type for T and *COUNT to the number of
3720 elements of that base type before returning. */
3721
3722 static int
3723 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3724 int *count)
3725 {
3726 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3727 int c = arm_vfp_cprc_sub_candidate (t, &b);
3728 if (c <= 0 || c > 4)
3729 return 0;
3730 *base_type = b;
3731 *count = c;
3732 return 1;
3733 }
3734
3735 /* Return 1 if the VFP ABI should be used for passing arguments to and
3736 returning values from a function of type FUNC_TYPE, 0
3737 otherwise. */
3738
3739 static int
3740 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3741 {
3742 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3743 /* Variadic functions always use the base ABI. Assume that functions
3744 without debug info are not variadic. */
3745 if (func_type && check_typedef (func_type)->has_varargs ())
3746 return 0;
3747 /* The VFP ABI is only supported as a variant of AAPCS. */
3748 if (tdep->arm_abi != ARM_ABI_AAPCS)
3749 return 0;
3750 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3751 }
3752
3753 /* We currently only support passing parameters in integer registers, which
3754 conforms with GCC's default model, and VFP argument passing following
3755 the VFP variant of AAPCS. Several other variants exist and
3756 we should probably support some of them based on the selected ABI. */
3757
3758 static CORE_ADDR
3759 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3760 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3761 struct value **args, CORE_ADDR sp,
3762 function_call_return_method return_method,
3763 CORE_ADDR struct_addr)
3764 {
3765 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3766 int argnum;
3767 int argreg;
3768 int nstack;
3769 struct stack_item *si = NULL;
3770 int use_vfp_abi;
3771 struct type *ftype;
3772 unsigned vfp_regs_free = (1 << 16) - 1;
3773
3774 /* Determine the type of this function and whether the VFP ABI
3775 applies. */
3776 ftype = check_typedef (value_type (function));
3777 if (ftype->code () == TYPE_CODE_PTR)
3778 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3779 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3780
3781 /* Set the return address. For the ARM, the return breakpoint is
3782 always at BP_ADDR. */
3783 if (arm_pc_is_thumb (gdbarch, bp_addr))
3784 bp_addr |= 1;
3785 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3786
3787 /* Walk through the list of args and determine how large a temporary
3788 stack is required. Need to take care here as structs may be
3789 passed on the stack, and we have to push them. */
3790 nstack = 0;
3791
3792 argreg = ARM_A1_REGNUM;
3793 nstack = 0;
3794
3795 /* The struct_return pointer occupies the first parameter
3796 passing register. */
3797 if (return_method == return_method_struct)
3798 {
3799 arm_debug_printf ("struct return in %s = %s",
3800 gdbarch_register_name (gdbarch, argreg),
3801 paddress (gdbarch, struct_addr));
3802
3803 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3804 argreg++;
3805 }
3806
3807 for (argnum = 0; argnum < nargs; argnum++)
3808 {
3809 int len;
3810 struct type *arg_type;
3811 struct type *target_type;
3812 enum type_code typecode;
3813 const bfd_byte *val;
3814 int align;
3815 enum arm_vfp_cprc_base_type vfp_base_type;
3816 int vfp_base_count;
3817 int may_use_core_reg = 1;
3818
3819 arg_type = check_typedef (value_type (args[argnum]));
3820 len = TYPE_LENGTH (arg_type);
3821 target_type = TYPE_TARGET_TYPE (arg_type);
3822 typecode = arg_type->code ();
3823 val = value_contents (args[argnum]);
3824
3825 align = type_align (arg_type);
3826 /* Round alignment up to a whole number of words. */
3827 align = (align + ARM_INT_REGISTER_SIZE - 1)
3828 & ~(ARM_INT_REGISTER_SIZE - 1);
3829 /* Different ABIs have different maximum alignments. */
3830 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3831 {
3832 /* The APCS ABI only requires word alignment. */
3833 align = ARM_INT_REGISTER_SIZE;
3834 }
3835 else
3836 {
3837 /* The AAPCS requires at most doubleword alignment. */
3838 if (align > ARM_INT_REGISTER_SIZE * 2)
3839 align = ARM_INT_REGISTER_SIZE * 2;
3840 }
3841
3842 if (use_vfp_abi
3843 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3844 &vfp_base_count))
3845 {
3846 int regno;
3847 int unit_length;
3848 int shift;
3849 unsigned mask;
3850
3851 /* Because this is a CPRC it cannot go in a core register or
3852 cause a core register to be skipped for alignment.
3853 Either it goes in VFP registers and the rest of this loop
3854 iteration is skipped for this argument, or it goes on the
3855 stack (and the stack alignment code is correct for this
3856 case). */
3857 may_use_core_reg = 0;
3858
3859 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3860 shift = unit_length / 4;
3861 mask = (1 << (shift * vfp_base_count)) - 1;
3862 for (regno = 0; regno < 16; regno += shift)
3863 if (((vfp_regs_free >> regno) & mask) == mask)
3864 break;
3865
3866 if (regno < 16)
3867 {
3868 int reg_char;
3869 int reg_scaled;
3870 int i;
3871
3872 vfp_regs_free &= ~(mask << regno);
3873 reg_scaled = regno / shift;
3874 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3875 for (i = 0; i < vfp_base_count; i++)
3876 {
3877 char name_buf[4];
3878 int regnum;
3879 if (reg_char == 'q')
3880 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3881 val + i * unit_length);
3882 else
3883 {
3884 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3885 reg_char, reg_scaled + i);
3886 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3887 strlen (name_buf));
3888 regcache->cooked_write (regnum, val + i * unit_length);
3889 }
3890 }
3891 continue;
3892 }
3893 else
3894 {
3895 /* This CPRC could not go in VFP registers, so all VFP
3896 registers are now marked as used. */
3897 vfp_regs_free = 0;
3898 }
3899 }
3900
3901 /* Push stack padding for doubleword alignment. */
3902 if (nstack & (align - 1))
3903 {
3904 si = push_stack_item (si, val, ARM_INT_REGISTER_SIZE);
3905 nstack += ARM_INT_REGISTER_SIZE;
3906 }
3907
3908 /* Doubleword aligned quantities must go in even register pairs. */
3909 if (may_use_core_reg
3910 && argreg <= ARM_LAST_ARG_REGNUM
3911 && align > ARM_INT_REGISTER_SIZE
3912 && argreg & 1)
3913 argreg++;
3914
3915 /* If the argument is a pointer to a function, and it is a
3916 Thumb function, create a LOCAL copy of the value and set
3917 the THUMB bit in it. */
3918 if (TYPE_CODE_PTR == typecode
3919 && target_type != NULL
3920 && TYPE_CODE_FUNC == check_typedef (target_type)->code ())
3921 {
3922 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3923 if (arm_pc_is_thumb (gdbarch, regval))
3924 {
3925 bfd_byte *copy = (bfd_byte *) alloca (len);
3926 store_unsigned_integer (copy, len, byte_order,
3927 MAKE_THUMB_ADDR (regval));
3928 val = copy;
3929 }
3930 }
3931
3932 /* Copy the argument to general registers or the stack in
3933 register-sized pieces. Large arguments are split between
3934 registers and stack. */
3935 while (len > 0)
3936 {
3937 int partial_len = len < ARM_INT_REGISTER_SIZE
3938 ? len : ARM_INT_REGISTER_SIZE;
3939 CORE_ADDR regval
3940 = extract_unsigned_integer (val, partial_len, byte_order);
3941
3942 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3943 {
3944 /* The argument is being passed in a general purpose
3945 register. */
3946 if (byte_order == BFD_ENDIAN_BIG)
3947 regval <<= (ARM_INT_REGISTER_SIZE - partial_len) * 8;
3948
3949 arm_debug_printf ("arg %d in %s = 0x%s", argnum,
3950 gdbarch_register_name (gdbarch, argreg),
3951 phex (regval, ARM_INT_REGISTER_SIZE));
3952
3953 regcache_cooked_write_unsigned (regcache, argreg, regval);
3954 argreg++;
3955 }
3956 else
3957 {
3958 gdb_byte buf[ARM_INT_REGISTER_SIZE];
3959
3960 memset (buf, 0, sizeof (buf));
3961 store_unsigned_integer (buf, partial_len, byte_order, regval);
3962
3963 /* Push the arguments onto the stack. */
3964 arm_debug_printf ("arg %d @ sp + %d", argnum, nstack);
3965 si = push_stack_item (si, buf, ARM_INT_REGISTER_SIZE);
3966 nstack += ARM_INT_REGISTER_SIZE;
3967 }
3968
3969 len -= partial_len;
3970 val += partial_len;
3971 }
3972 }
3973 /* If we have an odd number of words to push, then decrement the stack
3974 by one word now, so first stack argument will be dword aligned. */
3975 if (nstack & 4)
3976 sp -= 4;
3977
3978 while (si)
3979 {
3980 sp -= si->len;
3981 write_memory (sp, si->data, si->len);
3982 si = pop_stack_item (si);
3983 }
3984
3985 /* Finally, update teh SP register. */
3986 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3987
3988 return sp;
3989 }
3990
3991
3992 /* Always align the frame to an 8-byte boundary. This is required on
3993 some platforms and harmless on the rest. */
3994
3995 static CORE_ADDR
3996 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3997 {
3998 /* Align the stack to eight bytes. */
3999 return sp & ~ (CORE_ADDR) 7;
4000 }
4001
4002 static void
4003 print_fpu_flags (struct ui_file *file, int flags)
4004 {
4005 if (flags & (1 << 0))
4006 fputs_filtered ("IVO ", file);
4007 if (flags & (1 << 1))
4008 fputs_filtered ("DVZ ", file);
4009 if (flags & (1 << 2))
4010 fputs_filtered ("OFL ", file);
4011 if (flags & (1 << 3))
4012 fputs_filtered ("UFL ", file);
4013 if (flags & (1 << 4))
4014 fputs_filtered ("INX ", file);
4015 fputc_filtered ('\n', file);
4016 }
4017
4018 /* Print interesting information about the floating point processor
4019 (if present) or emulator. */
4020 static void
4021 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
4022 struct frame_info *frame, const char *args)
4023 {
4024 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
4025 int type;
4026
4027 type = (status >> 24) & 127;
4028 if (status & (1 << 31))
4029 fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
4030 else
4031 fprintf_filtered (file, _("Software FPU type %d\n"), type);
4032 /* i18n: [floating point unit] mask */
4033 fputs_filtered (_("mask: "), file);
4034 print_fpu_flags (file, status >> 16);
4035 /* i18n: [floating point unit] flags */
4036 fputs_filtered (_("flags: "), file);
4037 print_fpu_flags (file, status);
4038 }
4039
4040 /* Construct the ARM extended floating point type. */
4041 static struct type *
4042 arm_ext_type (struct gdbarch *gdbarch)
4043 {
4044 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4045
4046 if (!tdep->arm_ext_type)
4047 tdep->arm_ext_type
4048 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
4049 floatformats_arm_ext);
4050
4051 return tdep->arm_ext_type;
4052 }
4053
4054 static struct type *
4055 arm_neon_double_type (struct gdbarch *gdbarch)
4056 {
4057 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4058
4059 if (tdep->neon_double_type == NULL)
4060 {
4061 struct type *t, *elem;
4062
4063 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
4064 TYPE_CODE_UNION);
4065 elem = builtin_type (gdbarch)->builtin_uint8;
4066 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
4067 elem = builtin_type (gdbarch)->builtin_uint16;
4068 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
4069 elem = builtin_type (gdbarch)->builtin_uint32;
4070 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
4071 elem = builtin_type (gdbarch)->builtin_uint64;
4072 append_composite_type_field (t, "u64", elem);
4073 elem = builtin_type (gdbarch)->builtin_float;
4074 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
4075 elem = builtin_type (gdbarch)->builtin_double;
4076 append_composite_type_field (t, "f64", elem);
4077
4078 t->set_is_vector (true);
4079 t->set_name ("neon_d");
4080 tdep->neon_double_type = t;
4081 }
4082
4083 return tdep->neon_double_type;
4084 }
4085
4086 /* FIXME: The vector types are not correctly ordered on big-endian
4087 targets. Just as s0 is the low bits of d0, d0[0] is also the low
4088 bits of d0 - regardless of what unit size is being held in d0. So
4089 the offset of the first uint8 in d0 is 7, but the offset of the
4090 first float is 4. This code works as-is for little-endian
4091 targets. */
4092
4093 static struct type *
4094 arm_neon_quad_type (struct gdbarch *gdbarch)
4095 {
4096 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4097
4098 if (tdep->neon_quad_type == NULL)
4099 {
4100 struct type *t, *elem;
4101
4102 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
4103 TYPE_CODE_UNION);
4104 elem = builtin_type (gdbarch)->builtin_uint8;
4105 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
4106 elem = builtin_type (gdbarch)->builtin_uint16;
4107 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
4108 elem = builtin_type (gdbarch)->builtin_uint32;
4109 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
4110 elem = builtin_type (gdbarch)->builtin_uint64;
4111 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
4112 elem = builtin_type (gdbarch)->builtin_float;
4113 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
4114 elem = builtin_type (gdbarch)->builtin_double;
4115 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
4116
4117 t->set_is_vector (true);
4118 t->set_name ("neon_q");
4119 tdep->neon_quad_type = t;
4120 }
4121
4122 return tdep->neon_quad_type;
4123 }
4124
4125 /* Return the GDB type object for the "standard" data type of data in
4126 register N. */
4127
4128 static struct type *
4129 arm_register_type (struct gdbarch *gdbarch, int regnum)
4130 {
4131 int num_regs = gdbarch_num_regs (gdbarch);
4132
4133 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
4134 && regnum >= num_regs && regnum < num_regs + 32)
4135 return builtin_type (gdbarch)->builtin_float;
4136
4137 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
4138 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
4139 return arm_neon_quad_type (gdbarch);
4140
4141 /* If the target description has register information, we are only
4142 in this function so that we can override the types of
4143 double-precision registers for NEON. */
4144 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4145 {
4146 struct type *t = tdesc_register_type (gdbarch, regnum);
4147
4148 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4149 && t->code () == TYPE_CODE_FLT
4150 && gdbarch_tdep (gdbarch)->have_neon)
4151 return arm_neon_double_type (gdbarch);
4152 else
4153 return t;
4154 }
4155
4156 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4157 {
4158 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4159 return builtin_type (gdbarch)->builtin_void;
4160
4161 return arm_ext_type (gdbarch);
4162 }
4163 else if (regnum == ARM_SP_REGNUM)
4164 return builtin_type (gdbarch)->builtin_data_ptr;
4165 else if (regnum == ARM_PC_REGNUM)
4166 return builtin_type (gdbarch)->builtin_func_ptr;
4167 else if (regnum >= ARRAY_SIZE (arm_register_names))
4168 /* These registers are only supported on targets which supply
4169 an XML description. */
4170 return builtin_type (gdbarch)->builtin_int0;
4171 else
4172 return builtin_type (gdbarch)->builtin_uint32;
4173 }
4174
4175 /* Map a DWARF register REGNUM onto the appropriate GDB register
4176 number. */
4177
4178 static int
4179 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4180 {
4181 /* Core integer regs. */
4182 if (reg >= 0 && reg <= 15)
4183 return reg;
4184
4185 /* Legacy FPA encoding. These were once used in a way which
4186 overlapped with VFP register numbering, so their use is
4187 discouraged, but GDB doesn't support the ARM toolchain
4188 which used them for VFP. */
4189 if (reg >= 16 && reg <= 23)
4190 return ARM_F0_REGNUM + reg - 16;
4191
4192 /* New assignments for the FPA registers. */
4193 if (reg >= 96 && reg <= 103)
4194 return ARM_F0_REGNUM + reg - 96;
4195
4196 /* WMMX register assignments. */
4197 if (reg >= 104 && reg <= 111)
4198 return ARM_WCGR0_REGNUM + reg - 104;
4199
4200 if (reg >= 112 && reg <= 127)
4201 return ARM_WR0_REGNUM + reg - 112;
4202
4203 if (reg >= 192 && reg <= 199)
4204 return ARM_WC0_REGNUM + reg - 192;
4205
4206 /* VFP v2 registers. A double precision value is actually
4207 in d1 rather than s2, but the ABI only defines numbering
4208 for the single precision registers. This will "just work"
4209 in GDB for little endian targets (we'll read eight bytes,
4210 starting in s0 and then progressing to s1), but will be
4211 reversed on big endian targets with VFP. This won't
4212 be a problem for the new Neon quad registers; you're supposed
4213 to use DW_OP_piece for those. */
4214 if (reg >= 64 && reg <= 95)
4215 {
4216 char name_buf[4];
4217
4218 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4219 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4220 strlen (name_buf));
4221 }
4222
4223 /* VFP v3 / Neon registers. This range is also used for VFP v2
4224 registers, except that it now describes d0 instead of s0. */
4225 if (reg >= 256 && reg <= 287)
4226 {
4227 char name_buf[4];
4228
4229 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4230 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4231 strlen (name_buf));
4232 }
4233
4234 return -1;
4235 }
4236
4237 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4238 static int
4239 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4240 {
4241 int reg = regnum;
4242 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4243
4244 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4245 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4246
4247 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4248 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4249
4250 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4251 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4252
4253 if (reg < NUM_GREGS)
4254 return SIM_ARM_R0_REGNUM + reg;
4255 reg -= NUM_GREGS;
4256
4257 if (reg < NUM_FREGS)
4258 return SIM_ARM_FP0_REGNUM + reg;
4259 reg -= NUM_FREGS;
4260
4261 if (reg < NUM_SREGS)
4262 return SIM_ARM_FPS_REGNUM + reg;
4263 reg -= NUM_SREGS;
4264
4265 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4266 }
4267
4268 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4269 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4270 NULL if an error occurs. BUF is freed. */
4271
4272 static gdb_byte *
4273 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4274 int old_len, int new_len)
4275 {
4276 gdb_byte *new_buf;
4277 int bytes_to_read = new_len - old_len;
4278
4279 new_buf = (gdb_byte *) xmalloc (new_len);
4280 memcpy (new_buf + bytes_to_read, buf, old_len);
4281 xfree (buf);
4282 if (target_read_code (endaddr - new_len, new_buf, bytes_to_read) != 0)
4283 {
4284 xfree (new_buf);
4285 return NULL;
4286 }
4287 return new_buf;
4288 }
4289
4290 /* An IT block is at most the 2-byte IT instruction followed by
4291 four 4-byte instructions. The furthest back we must search to
4292 find an IT block that affects the current instruction is thus
4293 2 + 3 * 4 == 14 bytes. */
4294 #define MAX_IT_BLOCK_PREFIX 14
4295
4296 /* Use a quick scan if there are more than this many bytes of
4297 code. */
4298 #define IT_SCAN_THRESHOLD 32
4299
4300 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4301 A breakpoint in an IT block may not be hit, depending on the
4302 condition flags. */
4303 static CORE_ADDR
4304 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4305 {
4306 gdb_byte *buf;
4307 char map_type;
4308 CORE_ADDR boundary, func_start;
4309 int buf_len;
4310 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4311 int i, any, last_it, last_it_count;
4312
4313 /* If we are using BKPT breakpoints, none of this is necessary. */
4314 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4315 return bpaddr;
4316
4317 /* ARM mode does not have this problem. */
4318 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4319 return bpaddr;
4320
4321 /* We are setting a breakpoint in Thumb code that could potentially
4322 contain an IT block. The first step is to find how much Thumb
4323 code there is; we do not need to read outside of known Thumb
4324 sequences. */
4325 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4326 if (map_type == 0)
4327 /* Thumb-2 code must have mapping symbols to have a chance. */
4328 return bpaddr;
4329
4330 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4331
4332 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4333 && func_start > boundary)
4334 boundary = func_start;
4335
4336 /* Search for a candidate IT instruction. We have to do some fancy
4337 footwork to distinguish a real IT instruction from the second
4338 half of a 32-bit instruction, but there is no need for that if
4339 there's no candidate. */
4340 buf_len = std::min (bpaddr - boundary, (CORE_ADDR) MAX_IT_BLOCK_PREFIX);
4341 if (buf_len == 0)
4342 /* No room for an IT instruction. */
4343 return bpaddr;
4344
4345 buf = (gdb_byte *) xmalloc (buf_len);
4346 if (target_read_code (bpaddr - buf_len, buf, buf_len) != 0)
4347 return bpaddr;
4348 any = 0;
4349 for (i = 0; i < buf_len; i += 2)
4350 {
4351 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4352 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4353 {
4354 any = 1;
4355 break;
4356 }
4357 }
4358
4359 if (any == 0)
4360 {
4361 xfree (buf);
4362 return bpaddr;
4363 }
4364
4365 /* OK, the code bytes before this instruction contain at least one
4366 halfword which resembles an IT instruction. We know that it's
4367 Thumb code, but there are still two possibilities. Either the
4368 halfword really is an IT instruction, or it is the second half of
4369 a 32-bit Thumb instruction. The only way we can tell is to
4370 scan forwards from a known instruction boundary. */
4371 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4372 {
4373 int definite;
4374
4375 /* There's a lot of code before this instruction. Start with an
4376 optimistic search; it's easy to recognize halfwords that can
4377 not be the start of a 32-bit instruction, and use that to
4378 lock on to the instruction boundaries. */
4379 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4380 if (buf == NULL)
4381 return bpaddr;
4382 buf_len = IT_SCAN_THRESHOLD;
4383
4384 definite = 0;
4385 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4386 {
4387 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4388 if (thumb_insn_size (inst1) == 2)
4389 {
4390 definite = 1;
4391 break;
4392 }
4393 }
4394
4395 /* At this point, if DEFINITE, BUF[I] is the first place we
4396 are sure that we know the instruction boundaries, and it is far
4397 enough from BPADDR that we could not miss an IT instruction
4398 affecting BPADDR. If ! DEFINITE, give up - start from a
4399 known boundary. */
4400 if (! definite)
4401 {
4402 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
4403 bpaddr - boundary);
4404 if (buf == NULL)
4405 return bpaddr;
4406 buf_len = bpaddr - boundary;
4407 i = 0;
4408 }
4409 }
4410 else
4411 {
4412 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4413 if (buf == NULL)
4414 return bpaddr;
4415 buf_len = bpaddr - boundary;
4416 i = 0;
4417 }
4418
4419 /* Scan forwards. Find the last IT instruction before BPADDR. */
4420 last_it = -1;
4421 last_it_count = 0;
4422 while (i < buf_len)
4423 {
4424 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4425 last_it_count--;
4426 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4427 {
4428 last_it = i;
4429 if (inst1 & 0x0001)
4430 last_it_count = 4;
4431 else if (inst1 & 0x0002)
4432 last_it_count = 3;
4433 else if (inst1 & 0x0004)
4434 last_it_count = 2;
4435 else
4436 last_it_count = 1;
4437 }
4438 i += thumb_insn_size (inst1);
4439 }
4440
4441 xfree (buf);
4442
4443 if (last_it == -1)
4444 /* There wasn't really an IT instruction after all. */
4445 return bpaddr;
4446
4447 if (last_it_count < 1)
4448 /* It was too far away. */
4449 return bpaddr;
4450
4451 /* This really is a trouble spot. Move the breakpoint to the IT
4452 instruction. */
4453 return bpaddr - buf_len + last_it;
4454 }
4455
4456 /* ARM displaced stepping support.
4457
4458 Generally ARM displaced stepping works as follows:
4459
4460 1. When an instruction is to be single-stepped, it is first decoded by
4461 arm_process_displaced_insn. Depending on the type of instruction, it is
4462 then copied to a scratch location, possibly in a modified form. The
4463 copy_* set of functions performs such modification, as necessary. A
4464 breakpoint is placed after the modified instruction in the scratch space
4465 to return control to GDB. Note in particular that instructions which
4466 modify the PC will no longer do so after modification.
4467
4468 2. The instruction is single-stepped, by setting the PC to the scratch
4469 location address, and resuming. Control returns to GDB when the
4470 breakpoint is hit.
4471
4472 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4473 function used for the current instruction. This function's job is to
4474 put the CPU/memory state back to what it would have been if the
4475 instruction had been executed unmodified in its original location. */
4476
4477 /* NOP instruction (mov r0, r0). */
4478 #define ARM_NOP 0xe1a00000
4479 #define THUMB_NOP 0x4600
4480
4481 /* Helper for register reads for displaced stepping. In particular, this
4482 returns the PC as it would be seen by the instruction at its original
4483 location. */
4484
4485 ULONGEST
4486 displaced_read_reg (regcache *regs, arm_displaced_step_copy_insn_closure *dsc,
4487 int regno)
4488 {
4489 ULONGEST ret;
4490 CORE_ADDR from = dsc->insn_addr;
4491
4492 if (regno == ARM_PC_REGNUM)
4493 {
4494 /* Compute pipeline offset:
4495 - When executing an ARM instruction, PC reads as the address of the
4496 current instruction plus 8.
4497 - When executing a Thumb instruction, PC reads as the address of the
4498 current instruction plus 4. */
4499
4500 if (!dsc->is_thumb)
4501 from += 8;
4502 else
4503 from += 4;
4504
4505 displaced_debug_printf ("read pc value %.8lx",
4506 (unsigned long) from);
4507 return (ULONGEST) from;
4508 }
4509 else
4510 {
4511 regcache_cooked_read_unsigned (regs, regno, &ret);
4512
4513 displaced_debug_printf ("read r%d value %.8lx",
4514 regno, (unsigned long) ret);
4515
4516 return ret;
4517 }
4518 }
4519
4520 static int
4521 displaced_in_arm_mode (struct regcache *regs)
4522 {
4523 ULONGEST ps;
4524 ULONGEST t_bit = arm_psr_thumb_bit (regs->arch ());
4525
4526 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4527
4528 return (ps & t_bit) == 0;
4529 }
4530
4531 /* Write to the PC as from a branch instruction. */
4532
4533 static void
4534 branch_write_pc (regcache *regs, arm_displaced_step_copy_insn_closure *dsc,
4535 ULONGEST val)
4536 {
4537 if (!dsc->is_thumb)
4538 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4539 architecture versions < 6. */
4540 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4541 val & ~(ULONGEST) 0x3);
4542 else
4543 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4544 val & ~(ULONGEST) 0x1);
4545 }
4546
4547 /* Write to the PC as from a branch-exchange instruction. */
4548
4549 static void
4550 bx_write_pc (struct regcache *regs, ULONGEST val)
4551 {
4552 ULONGEST ps;
4553 ULONGEST t_bit = arm_psr_thumb_bit (regs->arch ());
4554
4555 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4556
4557 if ((val & 1) == 1)
4558 {
4559 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4560 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4561 }
4562 else if ((val & 2) == 0)
4563 {
4564 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4565 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4566 }
4567 else
4568 {
4569 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4570 mode, align dest to 4 bytes). */
4571 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4572 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4573 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4574 }
4575 }
4576
4577 /* Write to the PC as if from a load instruction. */
4578
4579 static void
4580 load_write_pc (regcache *regs, arm_displaced_step_copy_insn_closure *dsc,
4581 ULONGEST val)
4582 {
4583 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4584 bx_write_pc (regs, val);
4585 else
4586 branch_write_pc (regs, dsc, val);
4587 }
4588
4589 /* Write to the PC as if from an ALU instruction. */
4590
4591 static void
4592 alu_write_pc (regcache *regs, arm_displaced_step_copy_insn_closure *dsc,
4593 ULONGEST val)
4594 {
4595 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
4596 bx_write_pc (regs, val);
4597 else
4598 branch_write_pc (regs, dsc, val);
4599 }
4600
4601 /* Helper for writing to registers for displaced stepping. Writing to the PC
4602 has a varying effects depending on the instruction which does the write:
4603 this is controlled by the WRITE_PC argument. */
4604
4605 void
4606 displaced_write_reg (regcache *regs, arm_displaced_step_copy_insn_closure *dsc,
4607 int regno, ULONGEST val, enum pc_write_style write_pc)
4608 {
4609 if (regno == ARM_PC_REGNUM)
4610 {
4611 displaced_debug_printf ("writing pc %.8lx", (unsigned long) val);
4612
4613 switch (write_pc)
4614 {
4615 case BRANCH_WRITE_PC:
4616 branch_write_pc (regs, dsc, val);
4617 break;
4618
4619 case BX_WRITE_PC:
4620 bx_write_pc (regs, val);
4621 break;
4622
4623 case LOAD_WRITE_PC:
4624 load_write_pc (regs, dsc, val);
4625 break;
4626
4627 case ALU_WRITE_PC:
4628 alu_write_pc (regs, dsc, val);
4629 break;
4630
4631 case CANNOT_WRITE_PC:
4632 warning (_("Instruction wrote to PC in an unexpected way when "
4633 "single-stepping"));
4634 break;
4635
4636 default:
4637 internal_error (__FILE__, __LINE__,
4638 _("Invalid argument to displaced_write_reg"));
4639 }
4640
4641 dsc->wrote_to_pc = 1;
4642 }
4643 else
4644 {
4645 displaced_debug_printf ("writing r%d value %.8lx",
4646 regno, (unsigned long) val);
4647 regcache_cooked_write_unsigned (regs, regno, val);
4648 }
4649 }
4650
4651 /* This function is used to concisely determine if an instruction INSN
4652 references PC. Register fields of interest in INSN should have the
4653 corresponding fields of BITMASK set to 0b1111. The function
4654 returns return 1 if any of these fields in INSN reference the PC
4655 (also 0b1111, r15), else it returns 0. */
4656
4657 static int
4658 insn_references_pc (uint32_t insn, uint32_t bitmask)
4659 {
4660 uint32_t lowbit = 1;
4661
4662 while (bitmask != 0)
4663 {
4664 uint32_t mask;
4665
4666 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4667 ;
4668
4669 if (!lowbit)
4670 break;
4671
4672 mask = lowbit * 0xf;
4673
4674 if ((insn & mask) == mask)
4675 return 1;
4676
4677 bitmask &= ~mask;
4678 }
4679
4680 return 0;
4681 }
4682
4683 /* The simplest copy function. Many instructions have the same effect no
4684 matter what address they are executed at: in those cases, use this. */
4685
4686 static int
4687 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn, const char *iname,
4688 arm_displaced_step_copy_insn_closure *dsc)
4689 {
4690 displaced_debug_printf ("copying insn %.8lx, opcode/class '%s' unmodified",
4691 (unsigned long) insn, iname);
4692
4693 dsc->modinsn[0] = insn;
4694
4695 return 0;
4696 }
4697
4698 static int
4699 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
4700 uint16_t insn2, const char *iname,
4701 arm_displaced_step_copy_insn_closure *dsc)
4702 {
4703 displaced_debug_printf ("copying insn %.4x %.4x, opcode/class '%s' "
4704 "unmodified", insn1, insn2, iname);
4705
4706 dsc->modinsn[0] = insn1;
4707 dsc->modinsn[1] = insn2;
4708 dsc->numinsns = 2;
4709
4710 return 0;
4711 }
4712
4713 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
4714 modification. */
4715 static int
4716 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, uint16_t insn,
4717 const char *iname,
4718 arm_displaced_step_copy_insn_closure *dsc)
4719 {
4720 displaced_debug_printf ("copying insn %.4x, opcode/class '%s' unmodified",
4721 insn, iname);
4722
4723 dsc->modinsn[0] = insn;
4724
4725 return 0;
4726 }
4727
4728 /* Preload instructions with immediate offset. */
4729
4730 static void
4731 cleanup_preload (struct gdbarch *gdbarch, regcache *regs,
4732 arm_displaced_step_copy_insn_closure *dsc)
4733 {
4734 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4735 if (!dsc->u.preload.immed)
4736 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4737 }
4738
4739 static void
4740 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
4741 arm_displaced_step_copy_insn_closure *dsc, unsigned int rn)
4742 {
4743 ULONGEST rn_val;
4744 /* Preload instructions:
4745
4746 {pli/pld} [rn, #+/-imm]
4747 ->
4748 {pli/pld} [r0, #+/-imm]. */
4749
4750 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4751 rn_val = displaced_read_reg (regs, dsc, rn);
4752 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4753 dsc->u.preload.immed = 1;
4754
4755 dsc->cleanup = &cleanup_preload;
4756 }
4757
4758 static int
4759 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4760 arm_displaced_step_copy_insn_closure *dsc)
4761 {
4762 unsigned int rn = bits (insn, 16, 19);
4763
4764 if (!insn_references_pc (insn, 0x000f0000ul))
4765 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
4766
4767 displaced_debug_printf ("copying preload insn %.8lx", (unsigned long) insn);
4768
4769 dsc->modinsn[0] = insn & 0xfff0ffff;
4770
4771 install_preload (gdbarch, regs, dsc, rn);
4772
4773 return 0;
4774 }
4775
4776 static int
4777 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
4778 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
4779 {
4780 unsigned int rn = bits (insn1, 0, 3);
4781 unsigned int u_bit = bit (insn1, 7);
4782 int imm12 = bits (insn2, 0, 11);
4783 ULONGEST pc_val;
4784
4785 if (rn != ARM_PC_REGNUM)
4786 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
4787
4788 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
4789 PLD (literal) Encoding T1. */
4790 displaced_debug_printf ("copying pld/pli pc (0x%x) %c imm12 %.4x",
4791 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
4792 imm12);
4793
4794 if (!u_bit)
4795 imm12 = -1 * imm12;
4796
4797 /* Rewrite instruction {pli/pld} PC imm12 into:
4798 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
4799
4800 {pli/pld} [r0, r1]
4801
4802 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
4803
4804 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4805 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
4806
4807 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
4808
4809 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
4810 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
4811 dsc->u.preload.immed = 0;
4812
4813 /* {pli/pld} [r0, r1] */
4814 dsc->modinsn[0] = insn1 & 0xfff0;
4815 dsc->modinsn[1] = 0xf001;
4816 dsc->numinsns = 2;
4817
4818 dsc->cleanup = &cleanup_preload;
4819 return 0;
4820 }
4821
4822 /* Preload instructions with register offset. */
4823
4824 static void
4825 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
4826 arm_displaced_step_copy_insn_closure *dsc, unsigned int rn,
4827 unsigned int rm)
4828 {
4829 ULONGEST rn_val, rm_val;
4830
4831 /* Preload register-offset instructions:
4832
4833 {pli/pld} [rn, rm {, shift}]
4834 ->
4835 {pli/pld} [r0, r1 {, shift}]. */
4836
4837 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4838 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
4839 rn_val = displaced_read_reg (regs, dsc, rn);
4840 rm_val = displaced_read_reg (regs, dsc, rm);
4841 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4842 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4843 dsc->u.preload.immed = 0;
4844
4845 dsc->cleanup = &cleanup_preload;
4846 }
4847
4848 static int
4849 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
4850 struct regcache *regs,
4851 arm_displaced_step_copy_insn_closure *dsc)
4852 {
4853 unsigned int rn = bits (insn, 16, 19);
4854 unsigned int rm = bits (insn, 0, 3);
4855
4856
4857 if (!insn_references_pc (insn, 0x000f000ful))
4858 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
4859
4860 displaced_debug_printf ("copying preload insn %.8lx",
4861 (unsigned long) insn);
4862
4863 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4864
4865 install_preload_reg (gdbarch, regs, dsc, rn, rm);
4866 return 0;
4867 }
4868
4869 /* Copy/cleanup coprocessor load and store instructions. */
4870
4871 static void
4872 cleanup_copro_load_store (struct gdbarch *gdbarch,
4873 struct regcache *regs,
4874 arm_displaced_step_copy_insn_closure *dsc)
4875 {
4876 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
4877
4878 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4879
4880 if (dsc->u.ldst.writeback)
4881 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4882 }
4883
4884 static void
4885 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
4886 arm_displaced_step_copy_insn_closure *dsc,
4887 int writeback, unsigned int rn)
4888 {
4889 ULONGEST rn_val;
4890
4891 /* Coprocessor load/store instructions:
4892
4893 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4894 ->
4895 {stc/stc2} [r0, #+/-imm].
4896
4897 ldc/ldc2 are handled identically. */
4898
4899 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4900 rn_val = displaced_read_reg (regs, dsc, rn);
4901 /* PC should be 4-byte aligned. */
4902 rn_val = rn_val & 0xfffffffc;
4903 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4904
4905 dsc->u.ldst.writeback = writeback;
4906 dsc->u.ldst.rn = rn;
4907
4908 dsc->cleanup = &cleanup_copro_load_store;
4909 }
4910
4911 static int
4912 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4913 struct regcache *regs,
4914 arm_displaced_step_copy_insn_closure *dsc)
4915 {
4916 unsigned int rn = bits (insn, 16, 19);
4917
4918 if (!insn_references_pc (insn, 0x000f0000ul))
4919 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4920
4921 displaced_debug_printf ("copying coprocessor load/store insn %.8lx",
4922 (unsigned long) insn);
4923
4924 dsc->modinsn[0] = insn & 0xfff0ffff;
4925
4926 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
4927
4928 return 0;
4929 }
4930
4931 static int
4932 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
4933 uint16_t insn2, struct regcache *regs,
4934 arm_displaced_step_copy_insn_closure *dsc)
4935 {
4936 unsigned int rn = bits (insn1, 0, 3);
4937
4938 if (rn != ARM_PC_REGNUM)
4939 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
4940 "copro load/store", dsc);
4941
4942 displaced_debug_printf ("copying coprocessor load/store insn %.4x%.4x",
4943 insn1, insn2);
4944
4945 dsc->modinsn[0] = insn1 & 0xfff0;
4946 dsc->modinsn[1] = insn2;
4947 dsc->numinsns = 2;
4948
4949 /* This function is called for copying instruction LDC/LDC2/VLDR, which
4950 doesn't support writeback, so pass 0. */
4951 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
4952
4953 return 0;
4954 }
4955
4956 /* Clean up branch instructions (actually perform the branch, by setting
4957 PC). */
4958
4959 static void
4960 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4961 arm_displaced_step_copy_insn_closure *dsc)
4962 {
4963 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
4964 int branch_taken = condition_true (dsc->u.branch.cond, status);
4965 enum pc_write_style write_pc = dsc->u.branch.exchange
4966 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4967
4968 if (!branch_taken)
4969 return;
4970
4971 if (dsc->u.branch.link)
4972 {
4973 /* The value of LR should be the next insn of current one. In order
4974 not to confuse logic handling later insn `bx lr', if current insn mode
4975 is Thumb, the bit 0 of LR value should be set to 1. */
4976 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
4977
4978 if (dsc->is_thumb)
4979 next_insn_addr |= 0x1;
4980
4981 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
4982 CANNOT_WRITE_PC);
4983 }
4984
4985 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
4986 }
4987
4988 /* Copy B/BL/BLX instructions with immediate destinations. */
4989
4990 static void
4991 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
4992 arm_displaced_step_copy_insn_closure *dsc,
4993 unsigned int cond, int exchange, int link, long offset)
4994 {
4995 /* Implement "BL<cond> <label>" as:
4996
4997 Preparation: cond <- instruction condition
4998 Insn: mov r0, r0 (nop)
4999 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5000
5001 B<cond> similar, but don't set r14 in cleanup. */
5002
5003 dsc->u.branch.cond = cond;
5004 dsc->u.branch.link = link;
5005 dsc->u.branch.exchange = exchange;
5006
5007 dsc->u.branch.dest = dsc->insn_addr;
5008 if (link && exchange)
5009 /* For BLX, offset is computed from the Align (PC, 4). */
5010 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5011
5012 if (dsc->is_thumb)
5013 dsc->u.branch.dest += 4 + offset;
5014 else
5015 dsc->u.branch.dest += 8 + offset;
5016
5017 dsc->cleanup = &cleanup_branch;
5018 }
5019 static int
5020 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5021 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
5022 {
5023 unsigned int cond = bits (insn, 28, 31);
5024 int exchange = (cond == 0xf);
5025 int link = exchange || bit (insn, 24);
5026 long offset;
5027
5028 displaced_debug_printf ("copying %s immediate insn %.8lx",
5029 (exchange) ? "blx" : (link) ? "bl" : "b",
5030 (unsigned long) insn);
5031 if (exchange)
5032 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5033 then arrange the switch into Thumb mode. */
5034 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5035 else
5036 offset = bits (insn, 0, 23) << 2;
5037
5038 if (bit (offset, 25))
5039 offset = offset | ~0x3ffffff;
5040
5041 dsc->modinsn[0] = ARM_NOP;
5042
5043 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5044 return 0;
5045 }
5046
5047 static int
5048 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5049 uint16_t insn2, struct regcache *regs,
5050 arm_displaced_step_copy_insn_closure *dsc)
5051 {
5052 int link = bit (insn2, 14);
5053 int exchange = link && !bit (insn2, 12);
5054 int cond = INST_AL;
5055 long offset = 0;
5056 int j1 = bit (insn2, 13);
5057 int j2 = bit (insn2, 11);
5058 int s = sbits (insn1, 10, 10);
5059 int i1 = !(j1 ^ bit (insn1, 10));
5060 int i2 = !(j2 ^ bit (insn1, 10));
5061
5062 if (!link && !exchange) /* B */
5063 {
5064 offset = (bits (insn2, 0, 10) << 1);
5065 if (bit (insn2, 12)) /* Encoding T4 */
5066 {
5067 offset |= (bits (insn1, 0, 9) << 12)
5068 | (i2 << 22)
5069 | (i1 << 23)
5070 | (s << 24);
5071 cond = INST_AL;
5072 }
5073 else /* Encoding T3 */
5074 {
5075 offset |= (bits (insn1, 0, 5) << 12)
5076 | (j1 << 18)
5077 | (j2 << 19)
5078 | (s << 20);
5079 cond = bits (insn1, 6, 9);
5080 }
5081 }
5082 else
5083 {
5084 offset = (bits (insn1, 0, 9) << 12);
5085 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5086 offset |= exchange ?
5087 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5088 }
5089
5090 displaced_debug_printf ("copying %s insn %.4x %.4x with offset %.8lx",
5091 link ? (exchange) ? "blx" : "bl" : "b",
5092 insn1, insn2, offset);
5093
5094 dsc->modinsn[0] = THUMB_NOP;
5095
5096 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5097 return 0;
5098 }
5099
5100 /* Copy B Thumb instructions. */
5101 static int
5102 thumb_copy_b (struct gdbarch *gdbarch, uint16_t insn,
5103 arm_displaced_step_copy_insn_closure *dsc)
5104 {
5105 unsigned int cond = 0;
5106 int offset = 0;
5107 unsigned short bit_12_15 = bits (insn, 12, 15);
5108 CORE_ADDR from = dsc->insn_addr;
5109
5110 if (bit_12_15 == 0xd)
5111 {
5112 /* offset = SignExtend (imm8:0, 32) */
5113 offset = sbits ((insn << 1), 0, 8);
5114 cond = bits (insn, 8, 11);
5115 }
5116 else if (bit_12_15 == 0xe) /* Encoding T2 */
5117 {
5118 offset = sbits ((insn << 1), 0, 11);
5119 cond = INST_AL;
5120 }
5121
5122 displaced_debug_printf ("copying b immediate insn %.4x with offset %d",
5123 insn, offset);
5124
5125 dsc->u.branch.cond = cond;
5126 dsc->u.branch.link = 0;
5127 dsc->u.branch.exchange = 0;
5128 dsc->u.branch.dest = from + 4 + offset;
5129
5130 dsc->modinsn[0] = THUMB_NOP;
5131
5132 dsc->cleanup = &cleanup_branch;
5133
5134 return 0;
5135 }
5136
5137 /* Copy BX/BLX with register-specified destinations. */
5138
5139 static void
5140 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5141 arm_displaced_step_copy_insn_closure *dsc, int link,
5142 unsigned int cond, unsigned int rm)
5143 {
5144 /* Implement {BX,BLX}<cond> <reg>" as:
5145
5146 Preparation: cond <- instruction condition
5147 Insn: mov r0, r0 (nop)
5148 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5149
5150 Don't set r14 in cleanup for BX. */
5151
5152 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5153
5154 dsc->u.branch.cond = cond;
5155 dsc->u.branch.link = link;
5156
5157 dsc->u.branch.exchange = 1;
5158
5159 dsc->cleanup = &cleanup_branch;
5160 }
5161
5162 static int
5163 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5164 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
5165 {
5166 unsigned int cond = bits (insn, 28, 31);
5167 /* BX: x12xxx1x
5168 BLX: x12xxx3x. */
5169 int link = bit (insn, 5);
5170 unsigned int rm = bits (insn, 0, 3);
5171
5172 displaced_debug_printf ("copying insn %.8lx", (unsigned long) insn);
5173
5174 dsc->modinsn[0] = ARM_NOP;
5175
5176 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5177 return 0;
5178 }
5179
5180 static int
5181 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
5182 struct regcache *regs,
5183 arm_displaced_step_copy_insn_closure *dsc)
5184 {
5185 int link = bit (insn, 7);
5186 unsigned int rm = bits (insn, 3, 6);
5187
5188 displaced_debug_printf ("copying insn %.4x", (unsigned short) insn);
5189
5190 dsc->modinsn[0] = THUMB_NOP;
5191
5192 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
5193
5194 return 0;
5195 }
5196
5197
5198 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5199
5200 static void
5201 cleanup_alu_imm (struct gdbarch *gdbarch,
5202 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
5203 {
5204 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5205 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5206 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5207 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5208 }
5209
5210 static int
5211 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5212 arm_displaced_step_copy_insn_closure *dsc)
5213 {
5214 unsigned int rn = bits (insn, 16, 19);
5215 unsigned int rd = bits (insn, 12, 15);
5216 unsigned int op = bits (insn, 21, 24);
5217 int is_mov = (op == 0xd);
5218 ULONGEST rd_val, rn_val;
5219
5220 if (!insn_references_pc (insn, 0x000ff000ul))
5221 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5222
5223 displaced_debug_printf ("copying immediate %s insn %.8lx",
5224 is_mov ? "move" : "ALU",
5225 (unsigned long) insn);
5226
5227 /* Instruction is of form:
5228
5229 <op><cond> rd, [rn,] #imm
5230
5231 Rewrite as:
5232
5233 Preparation: tmp1, tmp2 <- r0, r1;
5234 r0, r1 <- rd, rn
5235 Insn: <op><cond> r0, r1, #imm
5236 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5237 */
5238
5239 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5240 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5241 rn_val = displaced_read_reg (regs, dsc, rn);
5242 rd_val = displaced_read_reg (regs, dsc, rd);
5243 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5244 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5245 dsc->rd = rd;
5246
5247 if (is_mov)
5248 dsc->modinsn[0] = insn & 0xfff00fff;
5249 else
5250 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5251
5252 dsc->cleanup = &cleanup_alu_imm;
5253
5254 return 0;
5255 }
5256
5257 static int
5258 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
5259 uint16_t insn2, struct regcache *regs,
5260 arm_displaced_step_copy_insn_closure *dsc)
5261 {
5262 unsigned int op = bits (insn1, 5, 8);
5263 unsigned int rn, rm, rd;
5264 ULONGEST rd_val, rn_val;
5265
5266 rn = bits (insn1, 0, 3); /* Rn */
5267 rm = bits (insn2, 0, 3); /* Rm */
5268 rd = bits (insn2, 8, 11); /* Rd */
5269
5270 /* This routine is only called for instruction MOV. */
5271 gdb_assert (op == 0x2 && rn == 0xf);
5272
5273 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
5274 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
5275
5276 displaced_debug_printf ("copying reg %s insn %.4x%.4x", "ALU", insn1, insn2);
5277
5278 /* Instruction is of form:
5279
5280 <op><cond> rd, [rn,] #imm
5281
5282 Rewrite as:
5283
5284 Preparation: tmp1, tmp2 <- r0, r1;
5285 r0, r1 <- rd, rn
5286 Insn: <op><cond> r0, r1, #imm
5287 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5288 */
5289
5290 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5291 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5292 rn_val = displaced_read_reg (regs, dsc, rn);
5293 rd_val = displaced_read_reg (regs, dsc, rd);
5294 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5295 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5296 dsc->rd = rd;
5297
5298 dsc->modinsn[0] = insn1;
5299 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
5300 dsc->numinsns = 2;
5301
5302 dsc->cleanup = &cleanup_alu_imm;
5303
5304 return 0;
5305 }
5306
5307 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5308
5309 static void
5310 cleanup_alu_reg (struct gdbarch *gdbarch,
5311 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
5312 {
5313 ULONGEST rd_val;
5314 int i;
5315
5316 rd_val = displaced_read_reg (regs, dsc, 0);
5317
5318 for (i = 0; i < 3; i++)
5319 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5320
5321 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5322 }
5323
5324 static void
5325 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5326 arm_displaced_step_copy_insn_closure *dsc,
5327 unsigned int rd, unsigned int rn, unsigned int rm)
5328 {
5329 ULONGEST rd_val, rn_val, rm_val;
5330
5331 /* Instruction is of form:
5332
5333 <op><cond> rd, [rn,] rm [, <shift>]
5334
5335 Rewrite as:
5336
5337 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5338 r0, r1, r2 <- rd, rn, rm
5339 Insn: <op><cond> r0, [r1,] r2 [, <shift>]
5340 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5341 */
5342
5343 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5344 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5345 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5346 rd_val = displaced_read_reg (regs, dsc, rd);
5347 rn_val = displaced_read_reg (regs, dsc, rn);
5348 rm_val = displaced_read_reg (regs, dsc, rm);
5349 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5350 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5351 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5352 dsc->rd = rd;
5353
5354 dsc->cleanup = &cleanup_alu_reg;
5355 }
5356
5357 static int
5358 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5359 arm_displaced_step_copy_insn_closure *dsc)
5360 {
5361 unsigned int op = bits (insn, 21, 24);
5362 int is_mov = (op == 0xd);
5363
5364 if (!insn_references_pc (insn, 0x000ff00ful))
5365 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5366
5367 displaced_debug_printf ("copying reg %s insn %.8lx",
5368 is_mov ? "move" : "ALU", (unsigned long) insn);
5369
5370 if (is_mov)
5371 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5372 else
5373 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5374
5375 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
5376 bits (insn, 0, 3));
5377 return 0;
5378 }
5379
5380 static int
5381 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
5382 struct regcache *regs,
5383 arm_displaced_step_copy_insn_closure *dsc)
5384 {
5385 unsigned rm, rd;
5386
5387 rm = bits (insn, 3, 6);
5388 rd = (bit (insn, 7) << 3) | bits (insn, 0, 2);
5389
5390 if (rd != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
5391 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
5392
5393 displaced_debug_printf ("copying ALU reg insn %.4x", (unsigned short) insn);
5394
5395 dsc->modinsn[0] = ((insn & 0xff00) | 0x10);
5396
5397 install_alu_reg (gdbarch, regs, dsc, rd, rd, rm);
5398
5399 return 0;
5400 }
5401
5402 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5403
5404 static void
5405 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5406 struct regcache *regs,
5407 arm_displaced_step_copy_insn_closure *dsc)
5408 {
5409 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5410 int i;
5411
5412 for (i = 0; i < 4; i++)
5413 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5414
5415 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5416 }
5417
5418 static void
5419 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
5420 arm_displaced_step_copy_insn_closure *dsc,
5421 unsigned int rd, unsigned int rn, unsigned int rm,
5422 unsigned rs)
5423 {
5424 int i;
5425 ULONGEST rd_val, rn_val, rm_val, rs_val;
5426
5427 /* Instruction is of form:
5428
5429 <op><cond> rd, [rn,] rm, <shift> rs
5430
5431 Rewrite as:
5432
5433 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5434 r0, r1, r2, r3 <- rd, rn, rm, rs
5435 Insn: <op><cond> r0, r1, r2, <shift> r3
5436 Cleanup: tmp5 <- r0
5437 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5438 rd <- tmp5
5439 */
5440
5441 for (i = 0; i < 4; i++)
5442 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5443
5444 rd_val = displaced_read_reg (regs, dsc, rd);
5445 rn_val = displaced_read_reg (regs, dsc, rn);
5446 rm_val = displaced_read_reg (regs, dsc, rm);
5447 rs_val = displaced_read_reg (regs, dsc, rs);
5448 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5449 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5450 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5451 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5452 dsc->rd = rd;
5453 dsc->cleanup = &cleanup_alu_shifted_reg;
5454 }
5455
5456 static int
5457 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5458 struct regcache *regs,
5459 arm_displaced_step_copy_insn_closure *dsc)
5460 {
5461 unsigned int op = bits (insn, 21, 24);
5462 int is_mov = (op == 0xd);
5463 unsigned int rd, rn, rm, rs;
5464
5465 if (!insn_references_pc (insn, 0x000fff0ful))
5466 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5467
5468 displaced_debug_printf ("copying shifted reg %s insn %.8lx",
5469 is_mov ? "move" : "ALU",
5470 (unsigned long) insn);
5471
5472 rn = bits (insn, 16, 19);
5473 rm = bits (insn, 0, 3);
5474 rs = bits (insn, 8, 11);
5475 rd = bits (insn, 12, 15);
5476
5477 if (is_mov)
5478 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5479 else
5480 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5481
5482 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
5483
5484 return 0;
5485 }
5486
5487 /* Clean up load instructions. */
5488
5489 static void
5490 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5491 arm_displaced_step_copy_insn_closure *dsc)
5492 {
5493 ULONGEST rt_val, rt_val2 = 0, rn_val;
5494
5495 rt_val = displaced_read_reg (regs, dsc, 0);
5496 if (dsc->u.ldst.xfersize == 8)
5497 rt_val2 = displaced_read_reg (regs, dsc, 1);
5498 rn_val = displaced_read_reg (regs, dsc, 2);
5499
5500 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5501 if (dsc->u.ldst.xfersize > 4)
5502 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5503 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5504 if (!dsc->u.ldst.immed)
5505 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5506
5507 /* Handle register writeback. */
5508 if (dsc->u.ldst.writeback)
5509 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5510 /* Put result in right place. */
5511 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5512 if (dsc->u.ldst.xfersize == 8)
5513 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5514 }
5515
5516 /* Clean up store instructions. */
5517
5518 static void
5519 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5520 arm_displaced_step_copy_insn_closure *dsc)
5521 {
5522 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5523
5524 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5525 if (dsc->u.ldst.xfersize > 4)
5526 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5527 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5528 if (!dsc->u.ldst.immed)
5529 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5530 if (!dsc->u.ldst.restore_r4)
5531 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5532
5533 /* Writeback. */
5534 if (dsc->u.ldst.writeback)
5535 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5536 }
5537
5538 /* Copy "extra" load/store instructions. These are halfword/doubleword
5539 transfers, which have a different encoding to byte/word transfers. */
5540
5541 static int
5542 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unprivileged,
5543 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
5544 {
5545 unsigned int op1 = bits (insn, 20, 24);
5546 unsigned int op2 = bits (insn, 5, 6);
5547 unsigned int rt = bits (insn, 12, 15);
5548 unsigned int rn = bits (insn, 16, 19);
5549 unsigned int rm = bits (insn, 0, 3);
5550 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5551 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5552 int immed = (op1 & 0x4) != 0;
5553 int opcode;
5554 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5555
5556 if (!insn_references_pc (insn, 0x000ff00ful))
5557 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5558
5559 displaced_debug_printf ("copying %sextra load/store insn %.8lx",
5560 unprivileged ? "unprivileged " : "",
5561 (unsigned long) insn);
5562
5563 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5564
5565 if (opcode < 0)
5566 internal_error (__FILE__, __LINE__,
5567 _("copy_extra_ld_st: instruction decode error"));
5568
5569 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5570 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5571 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5572 if (!immed)
5573 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5574
5575 rt_val = displaced_read_reg (regs, dsc, rt);
5576 if (bytesize[opcode] == 8)
5577 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5578 rn_val = displaced_read_reg (regs, dsc, rn);
5579 if (!immed)
5580 rm_val = displaced_read_reg (regs, dsc, rm);
5581
5582 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5583 if (bytesize[opcode] == 8)
5584 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5585 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5586 if (!immed)
5587 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5588
5589 dsc->rd = rt;
5590 dsc->u.ldst.xfersize = bytesize[opcode];
5591 dsc->u.ldst.rn = rn;
5592 dsc->u.ldst.immed = immed;
5593 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5594 dsc->u.ldst.restore_r4 = 0;
5595
5596 if (immed)
5597 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5598 ->
5599 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5600 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5601 else
5602 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5603 ->
5604 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5605 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5606
5607 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5608
5609 return 0;
5610 }
5611
5612 /* Copy byte/half word/word loads and stores. */
5613
5614 static void
5615 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5616 arm_displaced_step_copy_insn_closure *dsc, int load,
5617 int immed, int writeback, int size, int usermode,
5618 int rt, int rm, int rn)
5619 {
5620 ULONGEST rt_val, rn_val, rm_val = 0;
5621
5622 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5623 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5624 if (!immed)
5625 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5626 if (!load)
5627 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
5628
5629 rt_val = displaced_read_reg (regs, dsc, rt);
5630 rn_val = displaced_read_reg (regs, dsc, rn);
5631 if (!immed)
5632 rm_val = displaced_read_reg (regs, dsc, rm);
5633
5634 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5635 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5636 if (!immed)
5637 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5638 dsc->rd = rt;
5639 dsc->u.ldst.xfersize = size;
5640 dsc->u.ldst.rn = rn;
5641 dsc->u.ldst.immed = immed;
5642 dsc->u.ldst.writeback = writeback;
5643
5644 /* To write PC we can do:
5645
5646 Before this sequence of instructions:
5647 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5648 r2 is the Rn value got from displaced_read_reg.
5649
5650 Insn1: push {pc} Write address of STR instruction + offset on stack
5651 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5652 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5653 = addr(Insn1) + offset - addr(Insn3) - 8
5654 = offset - 16
5655 Insn4: add r4, r4, #8 r4 = offset - 8
5656 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5657 = from + offset
5658 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5659
5660 Otherwise we don't know what value to write for PC, since the offset is
5661 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5662 of this can be found in Section "Saving from r15" in
5663 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5664
5665 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5666 }
5667
5668
5669 static int
5670 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
5671 uint16_t insn2, struct regcache *regs,
5672 arm_displaced_step_copy_insn_closure *dsc, int size)
5673 {
5674 unsigned int u_bit = bit (insn1, 7);
5675 unsigned int rt = bits (insn2, 12, 15);
5676 int imm12 = bits (insn2, 0, 11);
5677 ULONGEST pc_val;
5678
5679 displaced_debug_printf ("copying ldr pc (0x%x) R%d %c imm12 %.4x",
5680 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
5681 imm12);
5682
5683 if (!u_bit)
5684 imm12 = -1 * imm12;
5685
5686 /* Rewrite instruction LDR Rt imm12 into:
5687
5688 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
5689
5690 LDR R0, R2, R3,
5691
5692 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
5693
5694
5695 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5696 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5697 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5698
5699 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5700
5701 pc_val = pc_val & 0xfffffffc;
5702
5703 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
5704 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
5705
5706 dsc->rd = rt;
5707
5708 dsc->u.ldst.xfersize = size;
5709 dsc->u.ldst.immed = 0;
5710 dsc->u.ldst.writeback = 0;
5711 dsc->u.ldst.restore_r4 = 0;
5712
5713 /* LDR R0, R2, R3 */
5714 dsc->modinsn[0] = 0xf852;
5715 dsc->modinsn[1] = 0x3;
5716 dsc->numinsns = 2;
5717
5718 dsc->cleanup = &cleanup_load;
5719
5720 return 0;
5721 }
5722
5723 static int
5724 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
5725 uint16_t insn2, struct regcache *regs,
5726 arm_displaced_step_copy_insn_closure *dsc,
5727 int writeback, int immed)
5728 {
5729 unsigned int rt = bits (insn2, 12, 15);
5730 unsigned int rn = bits (insn1, 0, 3);
5731 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
5732 /* In LDR (register), there is also a register Rm, which is not allowed to
5733 be PC, so we don't have to check it. */
5734
5735 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
5736 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
5737 dsc);
5738
5739 displaced_debug_printf ("copying ldr r%d [r%d] insn %.4x%.4x",
5740 rt, rn, insn1, insn2);
5741
5742 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
5743 0, rt, rm, rn);
5744
5745 dsc->u.ldst.restore_r4 = 0;
5746
5747 if (immed)
5748 /* ldr[b]<cond> rt, [rn, #imm], etc.
5749 ->
5750 ldr[b]<cond> r0, [r2, #imm]. */
5751 {
5752 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
5753 dsc->modinsn[1] = insn2 & 0x0fff;
5754 }
5755 else
5756 /* ldr[b]<cond> rt, [rn, rm], etc.
5757 ->
5758 ldr[b]<cond> r0, [r2, r3]. */
5759 {
5760 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
5761 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
5762 }
5763
5764 dsc->numinsns = 2;
5765
5766 return 0;
5767 }
5768
5769
5770 static int
5771 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5772 struct regcache *regs,
5773 arm_displaced_step_copy_insn_closure *dsc,
5774 int load, int size, int usermode)
5775 {
5776 int immed = !bit (insn, 25);
5777 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
5778 unsigned int rt = bits (insn, 12, 15);
5779 unsigned int rn = bits (insn, 16, 19);
5780 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5781
5782 if (!insn_references_pc (insn, 0x000ff00ful))
5783 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
5784
5785 displaced_debug_printf ("copying %s%s r%d [r%d] insn %.8lx",
5786 load ? (size == 1 ? "ldrb" : "ldr")
5787 : (size == 1 ? "strb" : "str"),
5788 usermode ? "t" : "",
5789 rt, rn,
5790 (unsigned long) insn);
5791
5792 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
5793 usermode, rt, rm, rn);
5794
5795 if (load || rt != ARM_PC_REGNUM)
5796 {
5797 dsc->u.ldst.restore_r4 = 0;
5798
5799 if (immed)
5800 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5801 ->
5802 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5803 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5804 else
5805 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5806 ->
5807 {ldr,str}[b]<cond> r0, [r2, r3]. */
5808 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5809 }
5810 else
5811 {
5812 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5813 dsc->u.ldst.restore_r4 = 1;
5814 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5815 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5816 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5817 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5818 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5819
5820 /* As above. */
5821 if (immed)
5822 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5823 else
5824 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5825
5826 dsc->numinsns = 6;
5827 }
5828
5829 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5830
5831 return 0;
5832 }
5833
5834 /* Cleanup LDM instructions with fully-populated register list. This is an
5835 unfortunate corner case: it's impossible to implement correctly by modifying
5836 the instruction. The issue is as follows: we have an instruction,
5837
5838 ldm rN, {r0-r15}
5839
5840 which we must rewrite to avoid loading PC. A possible solution would be to
5841 do the load in two halves, something like (with suitable cleanup
5842 afterwards):
5843
5844 mov r8, rN
5845 ldm[id][ab] r8!, {r0-r7}
5846 str r7, <temp>
5847 ldm[id][ab] r8, {r7-r14}
5848 <bkpt>
5849
5850 but at present there's no suitable place for <temp>, since the scratch space
5851 is overwritten before the cleanup routine is called. For now, we simply
5852 emulate the instruction. */
5853
5854 static void
5855 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5856 arm_displaced_step_copy_insn_closure *dsc)
5857 {
5858 int inc = dsc->u.block.increment;
5859 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5860 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5861 uint32_t regmask = dsc->u.block.regmask;
5862 int regno = inc ? 0 : 15;
5863 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5864 int exception_return = dsc->u.block.load && dsc->u.block.user
5865 && (regmask & 0x8000) != 0;
5866 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5867 int do_transfer = condition_true (dsc->u.block.cond, status);
5868 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5869
5870 if (!do_transfer)
5871 return;
5872
5873 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5874 sensible we can do here. Complain loudly. */
5875 if (exception_return)
5876 error (_("Cannot single-step exception return"));
5877
5878 /* We don't handle any stores here for now. */
5879 gdb_assert (dsc->u.block.load != 0);
5880
5881 displaced_debug_printf ("emulating block transfer: %s %s %s",
5882 dsc->u.block.load ? "ldm" : "stm",
5883 dsc->u.block.increment ? "inc" : "dec",
5884 dsc->u.block.before ? "before" : "after");
5885
5886 while (regmask)
5887 {
5888 uint32_t memword;
5889
5890 if (inc)
5891 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
5892 regno++;
5893 else
5894 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5895 regno--;
5896
5897 xfer_addr += bump_before;
5898
5899 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5900 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5901
5902 xfer_addr += bump_after;
5903
5904 regmask &= ~(1 << regno);
5905 }
5906
5907 if (dsc->u.block.writeback)
5908 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5909 CANNOT_WRITE_PC);
5910 }
5911
5912 /* Clean up an STM which included the PC in the register list. */
5913
5914 static void
5915 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5916 arm_displaced_step_copy_insn_closure *dsc)
5917 {
5918 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5919 int store_executed = condition_true (dsc->u.block.cond, status);
5920 CORE_ADDR pc_stored_at, transferred_regs
5921 = count_one_bits (dsc->u.block.regmask);
5922 CORE_ADDR stm_insn_addr;
5923 uint32_t pc_val;
5924 long offset;
5925 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5926
5927 /* If condition code fails, there's nothing else to do. */
5928 if (!store_executed)
5929 return;
5930
5931 if (dsc->u.block.increment)
5932 {
5933 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5934
5935 if (dsc->u.block.before)
5936 pc_stored_at += 4;
5937 }
5938 else
5939 {
5940 pc_stored_at = dsc->u.block.xfer_addr;
5941
5942 if (dsc->u.block.before)
5943 pc_stored_at -= 4;
5944 }
5945
5946 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5947 stm_insn_addr = dsc->scratch_base;
5948 offset = pc_val - stm_insn_addr;
5949
5950 displaced_debug_printf ("detected PC offset %.8lx for STM instruction",
5951 offset);
5952
5953 /* Rewrite the stored PC to the proper value for the non-displaced original
5954 instruction. */
5955 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5956 dsc->insn_addr + offset);
5957 }
5958
5959 /* Clean up an LDM which includes the PC in the register list. We clumped all
5960 the registers in the transferred list into a contiguous range r0...rX (to
5961 avoid loading PC directly and losing control of the debugged program), so we
5962 must undo that here. */
5963
5964 static void
5965 cleanup_block_load_pc (struct gdbarch *gdbarch,
5966 struct regcache *regs,
5967 arm_displaced_step_copy_insn_closure *dsc)
5968 {
5969 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5970 int load_executed = condition_true (dsc->u.block.cond, status);
5971 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
5972 unsigned int regs_loaded = count_one_bits (mask);
5973 unsigned int num_to_shuffle = regs_loaded, clobbered;
5974
5975 /* The method employed here will fail if the register list is fully populated
5976 (we need to avoid loading PC directly). */
5977 gdb_assert (num_to_shuffle < 16);
5978
5979 if (!load_executed)
5980 return;
5981
5982 clobbered = (1 << num_to_shuffle) - 1;
5983
5984 while (num_to_shuffle > 0)
5985 {
5986 if ((mask & (1 << write_reg)) != 0)
5987 {
5988 unsigned int read_reg = num_to_shuffle - 1;
5989
5990 if (read_reg != write_reg)
5991 {
5992 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
5993 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5994 displaced_debug_printf ("LDM: move loaded register r%d to r%d",
5995 read_reg, write_reg);
5996 }
5997 else
5998 displaced_debug_printf ("LDM: register r%d already in the right "
5999 "place", write_reg);
6000
6001 clobbered &= ~(1 << write_reg);
6002
6003 num_to_shuffle--;
6004 }
6005
6006 write_reg--;
6007 }
6008
6009 /* Restore any registers we scribbled over. */
6010 for (write_reg = 0; clobbered != 0; write_reg++)
6011 {
6012 if ((clobbered & (1 << write_reg)) != 0)
6013 {
6014 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6015 CANNOT_WRITE_PC);
6016 displaced_debug_printf ("LDM: restored clobbered register r%d",
6017 write_reg);
6018 clobbered &= ~(1 << write_reg);
6019 }
6020 }
6021
6022 /* Perform register writeback manually. */
6023 if (dsc->u.block.writeback)
6024 {
6025 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6026
6027 if (dsc->u.block.increment)
6028 new_rn_val += regs_loaded * 4;
6029 else
6030 new_rn_val -= regs_loaded * 4;
6031
6032 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6033 CANNOT_WRITE_PC);
6034 }
6035 }
6036
6037 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6038 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6039
6040 static int
6041 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6042 struct regcache *regs,
6043 arm_displaced_step_copy_insn_closure *dsc)
6044 {
6045 int load = bit (insn, 20);
6046 int user = bit (insn, 22);
6047 int increment = bit (insn, 23);
6048 int before = bit (insn, 24);
6049 int writeback = bit (insn, 21);
6050 int rn = bits (insn, 16, 19);
6051
6052 /* Block transfers which don't mention PC can be run directly
6053 out-of-line. */
6054 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6055 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6056
6057 if (rn == ARM_PC_REGNUM)
6058 {
6059 warning (_("displaced: Unpredictable LDM or STM with "
6060 "base register r15"));
6061 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6062 }
6063
6064 displaced_debug_printf ("copying block transfer insn %.8lx",
6065 (unsigned long) insn);
6066
6067 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6068 dsc->u.block.rn = rn;
6069
6070 dsc->u.block.load = load;
6071 dsc->u.block.user = user;
6072 dsc->u.block.increment = increment;
6073 dsc->u.block.before = before;
6074 dsc->u.block.writeback = writeback;
6075 dsc->u.block.cond = bits (insn, 28, 31);
6076
6077 dsc->u.block.regmask = insn & 0xffff;
6078
6079 if (load)
6080 {
6081 if ((insn & 0xffff) == 0xffff)
6082 {
6083 /* LDM with a fully-populated register list. This case is
6084 particularly tricky. Implement for now by fully emulating the
6085 instruction (which might not behave perfectly in all cases, but
6086 these instructions should be rare enough for that not to matter
6087 too much). */
6088 dsc->modinsn[0] = ARM_NOP;
6089
6090 dsc->cleanup = &cleanup_block_load_all;
6091 }
6092 else
6093 {
6094 /* LDM of a list of registers which includes PC. Implement by
6095 rewriting the list of registers to be transferred into a
6096 contiguous chunk r0...rX before doing the transfer, then shuffling
6097 registers into the correct places in the cleanup routine. */
6098 unsigned int regmask = insn & 0xffff;
6099 unsigned int num_in_list = count_one_bits (regmask), new_regmask;
6100 unsigned int i;
6101
6102 for (i = 0; i < num_in_list; i++)
6103 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6104
6105 /* Writeback makes things complicated. We need to avoid clobbering
6106 the base register with one of the registers in our modified
6107 register list, but just using a different register can't work in
6108 all cases, e.g.:
6109
6110 ldm r14!, {r0-r13,pc}
6111
6112 which would need to be rewritten as:
6113
6114 ldm rN!, {r0-r14}
6115
6116 but that can't work, because there's no free register for N.
6117
6118 Solve this by turning off the writeback bit, and emulating
6119 writeback manually in the cleanup routine. */
6120
6121 if (writeback)
6122 insn &= ~(1 << 21);
6123
6124 new_regmask = (1 << num_in_list) - 1;
6125
6126 displaced_debug_printf ("LDM r%d%s, {..., pc}: original reg list "
6127 "%.4x, modified list %.4x",
6128 rn, writeback ? "!" : "",
6129 (int) insn & 0xffff, new_regmask);
6130
6131 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6132
6133 dsc->cleanup = &cleanup_block_load_pc;
6134 }
6135 }
6136 else
6137 {
6138 /* STM of a list of registers which includes PC. Run the instruction
6139 as-is, but out of line: this will store the wrong value for the PC,
6140 so we must manually fix up the memory in the cleanup routine.
6141 Doing things this way has the advantage that we can auto-detect
6142 the offset of the PC write (which is architecture-dependent) in
6143 the cleanup routine. */
6144 dsc->modinsn[0] = insn;
6145
6146 dsc->cleanup = &cleanup_block_store_pc;
6147 }
6148
6149 return 0;
6150 }
6151
6152 static int
6153 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6154 struct regcache *regs,
6155 arm_displaced_step_copy_insn_closure *dsc)
6156 {
6157 int rn = bits (insn1, 0, 3);
6158 int load = bit (insn1, 4);
6159 int writeback = bit (insn1, 5);
6160
6161 /* Block transfers which don't mention PC can be run directly
6162 out-of-line. */
6163 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
6164 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
6165
6166 if (rn == ARM_PC_REGNUM)
6167 {
6168 warning (_("displaced: Unpredictable LDM or STM with "
6169 "base register r15"));
6170 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6171 "unpredictable ldm/stm", dsc);
6172 }
6173
6174 displaced_debug_printf ("copying block transfer insn %.4x%.4x",
6175 insn1, insn2);
6176
6177 /* Clear bit 13, since it should be always zero. */
6178 dsc->u.block.regmask = (insn2 & 0xdfff);
6179 dsc->u.block.rn = rn;
6180
6181 dsc->u.block.load = load;
6182 dsc->u.block.user = 0;
6183 dsc->u.block.increment = bit (insn1, 7);
6184 dsc->u.block.before = bit (insn1, 8);
6185 dsc->u.block.writeback = writeback;
6186 dsc->u.block.cond = INST_AL;
6187 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6188
6189 if (load)
6190 {
6191 if (dsc->u.block.regmask == 0xffff)
6192 {
6193 /* This branch is impossible to happen. */
6194 gdb_assert (0);
6195 }
6196 else
6197 {
6198 unsigned int regmask = dsc->u.block.regmask;
6199 unsigned int num_in_list = count_one_bits (regmask), new_regmask;
6200 unsigned int i;
6201
6202 for (i = 0; i < num_in_list; i++)
6203 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6204
6205 if (writeback)
6206 insn1 &= ~(1 << 5);
6207
6208 new_regmask = (1 << num_in_list) - 1;
6209
6210 displaced_debug_printf ("LDM r%d%s, {..., pc}: original reg list "
6211 "%.4x, modified list %.4x",
6212 rn, writeback ? "!" : "",
6213 (int) dsc->u.block.regmask, new_regmask);
6214
6215 dsc->modinsn[0] = insn1;
6216 dsc->modinsn[1] = (new_regmask & 0xffff);
6217 dsc->numinsns = 2;
6218
6219 dsc->cleanup = &cleanup_block_load_pc;
6220 }
6221 }
6222 else
6223 {
6224 dsc->modinsn[0] = insn1;
6225 dsc->modinsn[1] = insn2;
6226 dsc->numinsns = 2;
6227 dsc->cleanup = &cleanup_block_store_pc;
6228 }
6229 return 0;
6230 }
6231
6232 /* Wrapper over read_memory_unsigned_integer for use in arm_get_next_pcs.
6233 This is used to avoid a dependency on BFD's bfd_endian enum. */
6234
6235 ULONGEST
6236 arm_get_next_pcs_read_memory_unsigned_integer (CORE_ADDR memaddr, int len,
6237 int byte_order)
6238 {
6239 return read_memory_unsigned_integer (memaddr, len,
6240 (enum bfd_endian) byte_order);
6241 }
6242
6243 /* Wrapper over gdbarch_addr_bits_remove for use in arm_get_next_pcs. */
6244
6245 CORE_ADDR
6246 arm_get_next_pcs_addr_bits_remove (struct arm_get_next_pcs *self,
6247 CORE_ADDR val)
6248 {
6249 return gdbarch_addr_bits_remove (self->regcache->arch (), val);
6250 }
6251
6252 /* Wrapper over syscall_next_pc for use in get_next_pcs. */
6253
6254 static CORE_ADDR
6255 arm_get_next_pcs_syscall_next_pc (struct arm_get_next_pcs *self)
6256 {
6257 return 0;
6258 }
6259
6260 /* Wrapper over arm_is_thumb for use in arm_get_next_pcs. */
6261
6262 int
6263 arm_get_next_pcs_is_thumb (struct arm_get_next_pcs *self)
6264 {
6265 return arm_is_thumb (self->regcache);
6266 }
6267
6268 /* single_step() is called just before we want to resume the inferior,
6269 if we want to single-step it but there is no hardware or kernel
6270 single-step support. We find the target of the coming instructions
6271 and breakpoint them. */
6272
6273 std::vector<CORE_ADDR>
6274 arm_software_single_step (struct regcache *regcache)
6275 {
6276 struct gdbarch *gdbarch = regcache->arch ();
6277 struct arm_get_next_pcs next_pcs_ctx;
6278
6279 arm_get_next_pcs_ctor (&next_pcs_ctx,
6280 &arm_get_next_pcs_ops,
6281 gdbarch_byte_order (gdbarch),
6282 gdbarch_byte_order_for_code (gdbarch),
6283 0,
6284 regcache);
6285
6286 std::vector<CORE_ADDR> next_pcs = arm_get_next_pcs (&next_pcs_ctx);
6287
6288 for (CORE_ADDR &pc_ref : next_pcs)
6289 pc_ref = gdbarch_addr_bits_remove (gdbarch, pc_ref);
6290
6291 return next_pcs;
6292 }
6293
6294 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6295 for Linux, where some SVC instructions must be treated specially. */
6296
6297 static void
6298 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6299 arm_displaced_step_copy_insn_closure *dsc)
6300 {
6301 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6302
6303 displaced_debug_printf ("cleanup for svc, resume at %.8lx",
6304 (unsigned long) resume_addr);
6305
6306 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6307 }
6308
6309
6310 /* Common copy routine for svc instruction. */
6311
6312 static int
6313 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
6314 arm_displaced_step_copy_insn_closure *dsc)
6315 {
6316 /* Preparation: none.
6317 Insn: unmodified svc.
6318 Cleanup: pc <- insn_addr + insn_size. */
6319
6320 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6321 instruction. */
6322 dsc->wrote_to_pc = 1;
6323
6324 /* Allow OS-specific code to override SVC handling. */
6325 if (dsc->u.svc.copy_svc_os)
6326 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6327 else
6328 {
6329 dsc->cleanup = &cleanup_svc;
6330 return 0;
6331 }
6332 }
6333
6334 static int
6335 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6336 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
6337 {
6338
6339 displaced_debug_printf ("copying svc insn %.8lx",
6340 (unsigned long) insn);
6341
6342 dsc->modinsn[0] = insn;
6343
6344 return install_svc (gdbarch, regs, dsc);
6345 }
6346
6347 static int
6348 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
6349 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
6350 {
6351
6352 displaced_debug_printf ("copying svc insn %.4x", insn);
6353
6354 dsc->modinsn[0] = insn;
6355
6356 return install_svc (gdbarch, regs, dsc);
6357 }
6358
6359 /* Copy undefined instructions. */
6360
6361 static int
6362 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6363 arm_displaced_step_copy_insn_closure *dsc)
6364 {
6365 displaced_debug_printf ("copying undefined insn %.8lx",
6366 (unsigned long) insn);
6367
6368 dsc->modinsn[0] = insn;
6369
6370 return 0;
6371 }
6372
6373 static int
6374 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6375 arm_displaced_step_copy_insn_closure *dsc)
6376 {
6377
6378 displaced_debug_printf ("copying undefined insn %.4x %.4x",
6379 (unsigned short) insn1, (unsigned short) insn2);
6380
6381 dsc->modinsn[0] = insn1;
6382 dsc->modinsn[1] = insn2;
6383 dsc->numinsns = 2;
6384
6385 return 0;
6386 }
6387
6388 /* Copy unpredictable instructions. */
6389
6390 static int
6391 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6392 arm_displaced_step_copy_insn_closure *dsc)
6393 {
6394 displaced_debug_printf ("copying unpredictable insn %.8lx",
6395 (unsigned long) insn);
6396
6397 dsc->modinsn[0] = insn;
6398
6399 return 0;
6400 }
6401
6402 /* The decode_* functions are instruction decoding helpers. They mostly follow
6403 the presentation in the ARM ARM. */
6404
6405 static int
6406 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6407 struct regcache *regs,
6408 arm_displaced_step_copy_insn_closure *dsc)
6409 {
6410 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6411 unsigned int rn = bits (insn, 16, 19);
6412
6413 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0x1) == 0x0)
6414 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
6415 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0x1) == 0x1)
6416 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
6417 else if ((op1 & 0x60) == 0x20)
6418 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6419 else if ((op1 & 0x71) == 0x40)
6420 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
6421 dsc);
6422 else if ((op1 & 0x77) == 0x41)
6423 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6424 else if ((op1 & 0x77) == 0x45)
6425 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6426 else if ((op1 & 0x77) == 0x51)
6427 {
6428 if (rn != 0xf)
6429 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6430 else
6431 return arm_copy_unpred (gdbarch, insn, dsc);
6432 }
6433 else if ((op1 & 0x77) == 0x55)
6434 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6435 else if (op1 == 0x57)
6436 switch (op2)
6437 {
6438 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
6439 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
6440 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
6441 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
6442 default: return arm_copy_unpred (gdbarch, insn, dsc);
6443 }
6444 else if ((op1 & 0x63) == 0x43)
6445 return arm_copy_unpred (gdbarch, insn, dsc);
6446 else if ((op2 & 0x1) == 0x0)
6447 switch (op1 & ~0x80)
6448 {
6449 case 0x61:
6450 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6451 case 0x65:
6452 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6453 case 0x71: case 0x75:
6454 /* pld/pldw reg. */
6455 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
6456 case 0x63: case 0x67: case 0x73: case 0x77:
6457 return arm_copy_unpred (gdbarch, insn, dsc);
6458 default:
6459 return arm_copy_undef (gdbarch, insn, dsc);
6460 }
6461 else
6462 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6463 }
6464
6465 static int
6466 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6467 struct regcache *regs,
6468 arm_displaced_step_copy_insn_closure *dsc)
6469 {
6470 if (bit (insn, 27) == 0)
6471 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6472 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6473 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6474 {
6475 case 0x0: case 0x2:
6476 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
6477
6478 case 0x1: case 0x3:
6479 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
6480
6481 case 0x4: case 0x5: case 0x6: case 0x7:
6482 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6483
6484 case 0x8:
6485 switch ((insn & 0xe00000) >> 21)
6486 {
6487 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6488 /* stc/stc2. */
6489 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6490
6491 case 0x2:
6492 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6493
6494 default:
6495 return arm_copy_undef (gdbarch, insn, dsc);
6496 }
6497
6498 case 0x9:
6499 {
6500 int rn_f = (bits (insn, 16, 19) == 0xf);
6501 switch ((insn & 0xe00000) >> 21)
6502 {
6503 case 0x1: case 0x3:
6504 /* ldc/ldc2 imm (undefined for rn == pc). */
6505 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
6506 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6507
6508 case 0x2:
6509 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6510
6511 case 0x4: case 0x5: case 0x6: case 0x7:
6512 /* ldc/ldc2 lit (undefined for rn != pc). */
6513 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
6514 : arm_copy_undef (gdbarch, insn, dsc);
6515
6516 default:
6517 return arm_copy_undef (gdbarch, insn, dsc);
6518 }
6519 }
6520
6521 case 0xa:
6522 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6523
6524 case 0xb:
6525 if (bits (insn, 16, 19) == 0xf)
6526 /* ldc/ldc2 lit. */
6527 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6528 else
6529 return arm_copy_undef (gdbarch, insn, dsc);
6530
6531 case 0xc:
6532 if (bit (insn, 4))
6533 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6534 else
6535 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6536
6537 case 0xd:
6538 if (bit (insn, 4))
6539 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6540 else
6541 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6542
6543 default:
6544 return arm_copy_undef (gdbarch, insn, dsc);
6545 }
6546 }
6547
6548 /* Decode miscellaneous instructions in dp/misc encoding space. */
6549
6550 static int
6551 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6552 struct regcache *regs,
6553 arm_displaced_step_copy_insn_closure *dsc)
6554 {
6555 unsigned int op2 = bits (insn, 4, 6);
6556 unsigned int op = bits (insn, 21, 22);
6557
6558 switch (op2)
6559 {
6560 case 0x0:
6561 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6562
6563 case 0x1:
6564 if (op == 0x1) /* bx. */
6565 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6566 else if (op == 0x3)
6567 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
6568 else
6569 return arm_copy_undef (gdbarch, insn, dsc);
6570
6571 case 0x2:
6572 if (op == 0x1)
6573 /* Not really supported. */
6574 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
6575 else
6576 return arm_copy_undef (gdbarch, insn, dsc);
6577
6578 case 0x3:
6579 if (op == 0x1)
6580 return arm_copy_bx_blx_reg (gdbarch, insn,
6581 regs, dsc); /* blx register. */
6582 else
6583 return arm_copy_undef (gdbarch, insn, dsc);
6584
6585 case 0x5:
6586 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6587
6588 case 0x7:
6589 if (op == 0x1)
6590 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
6591 else if (op == 0x3)
6592 /* Not really supported. */
6593 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
6594 /* Fall through. */
6595
6596 default:
6597 return arm_copy_undef (gdbarch, insn, dsc);
6598 }
6599 }
6600
6601 static int
6602 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
6603 struct regcache *regs,
6604 arm_displaced_step_copy_insn_closure *dsc)
6605 {
6606 if (bit (insn, 25))
6607 switch (bits (insn, 20, 24))
6608 {
6609 case 0x10:
6610 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
6611
6612 case 0x14:
6613 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
6614
6615 case 0x12: case 0x16:
6616 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
6617
6618 default:
6619 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
6620 }
6621 else
6622 {
6623 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6624
6625 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6626 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
6627 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6628 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6629 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6630 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
6631 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6632 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6633 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6634 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6635 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6636 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
6637 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6638 /* 2nd arg means "unprivileged". */
6639 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6640 dsc);
6641 }
6642
6643 /* Should be unreachable. */
6644 return 1;
6645 }
6646
6647 static int
6648 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6649 struct regcache *regs,
6650 arm_displaced_step_copy_insn_closure *dsc)
6651 {
6652 int a = bit (insn, 25), b = bit (insn, 4);
6653 uint32_t op1 = bits (insn, 20, 24);
6654
6655 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6656 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6657 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
6658 else if ((!a && (op1 & 0x17) == 0x02)
6659 || (a && (op1 & 0x17) == 0x02 && !b))
6660 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
6661 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6662 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6663 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
6664 else if ((!a && (op1 & 0x17) == 0x03)
6665 || (a && (op1 & 0x17) == 0x03 && !b))
6666 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
6667 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6668 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6669 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6670 else if ((!a && (op1 & 0x17) == 0x06)
6671 || (a && (op1 & 0x17) == 0x06 && !b))
6672 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6673 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6674 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6675 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6676 else if ((!a && (op1 & 0x17) == 0x07)
6677 || (a && (op1 & 0x17) == 0x07 && !b))
6678 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6679
6680 /* Should be unreachable. */
6681 return 1;
6682 }
6683
6684 static int
6685 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
6686 arm_displaced_step_copy_insn_closure *dsc)
6687 {
6688 switch (bits (insn, 20, 24))
6689 {
6690 case 0x00: case 0x01: case 0x02: case 0x03:
6691 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6692
6693 case 0x04: case 0x05: case 0x06: case 0x07:
6694 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6695
6696 case 0x08: case 0x09: case 0x0a: case 0x0b:
6697 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6698 return arm_copy_unmodified (gdbarch, insn,
6699 "decode/pack/unpack/saturate/reverse", dsc);
6700
6701 case 0x18:
6702 if (bits (insn, 5, 7) == 0) /* op2. */
6703 {
6704 if (bits (insn, 12, 15) == 0xf)
6705 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
6706 else
6707 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
6708 }
6709 else
6710 return arm_copy_undef (gdbarch, insn, dsc);
6711
6712 case 0x1a: case 0x1b:
6713 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6714 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
6715 else
6716 return arm_copy_undef (gdbarch, insn, dsc);
6717
6718 case 0x1c: case 0x1d:
6719 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6720 {
6721 if (bits (insn, 0, 3) == 0xf)
6722 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
6723 else
6724 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
6725 }
6726 else
6727 return arm_copy_undef (gdbarch, insn, dsc);
6728
6729 case 0x1e: case 0x1f:
6730 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6731 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
6732 else
6733 return arm_copy_undef (gdbarch, insn, dsc);
6734 }
6735
6736 /* Should be unreachable. */
6737 return 1;
6738 }
6739
6740 static int
6741 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, uint32_t insn,
6742 struct regcache *regs,
6743 arm_displaced_step_copy_insn_closure *dsc)
6744 {
6745 if (bit (insn, 25))
6746 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6747 else
6748 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
6749 }
6750
6751 static int
6752 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6753 struct regcache *regs,
6754 arm_displaced_step_copy_insn_closure *dsc)
6755 {
6756 unsigned int opcode = bits (insn, 20, 24);
6757
6758 switch (opcode)
6759 {
6760 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6761 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6762
6763 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6764 case 0x12: case 0x16:
6765 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6766
6767 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6768 case 0x13: case 0x17:
6769 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6770
6771 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6772 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6773 /* Note: no writeback for these instructions. Bit 25 will always be
6774 zero though (via caller), so the following works OK. */
6775 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6776 }
6777
6778 /* Should be unreachable. */
6779 return 1;
6780 }
6781
6782 /* Decode shifted register instructions. */
6783
6784 static int
6785 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
6786 uint16_t insn2, struct regcache *regs,
6787 arm_displaced_step_copy_insn_closure *dsc)
6788 {
6789 /* PC is only allowed to be used in instruction MOV. */
6790
6791 unsigned int op = bits (insn1, 5, 8);
6792 unsigned int rn = bits (insn1, 0, 3);
6793
6794 if (op == 0x2 && rn == 0xf) /* MOV */
6795 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
6796 else
6797 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6798 "dp (shift reg)", dsc);
6799 }
6800
6801
6802 /* Decode extension register load/store. Exactly the same as
6803 arm_decode_ext_reg_ld_st. */
6804
6805 static int
6806 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
6807 uint16_t insn2, struct regcache *regs,
6808 arm_displaced_step_copy_insn_closure *dsc)
6809 {
6810 unsigned int opcode = bits (insn1, 4, 8);
6811
6812 switch (opcode)
6813 {
6814 case 0x04: case 0x05:
6815 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6816 "vfp/neon vmov", dsc);
6817
6818 case 0x08: case 0x0c: /* 01x00 */
6819 case 0x0a: case 0x0e: /* 01x10 */
6820 case 0x12: case 0x16: /* 10x10 */
6821 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6822 "vfp/neon vstm/vpush", dsc);
6823
6824 case 0x09: case 0x0d: /* 01x01 */
6825 case 0x0b: case 0x0f: /* 01x11 */
6826 case 0x13: case 0x17: /* 10x11 */
6827 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6828 "vfp/neon vldm/vpop", dsc);
6829
6830 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6831 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6832 "vstr", dsc);
6833 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6834 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
6835 }
6836
6837 /* Should be unreachable. */
6838 return 1;
6839 }
6840
6841 static int
6842 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn,
6843 regcache *regs, arm_displaced_step_copy_insn_closure *dsc)
6844 {
6845 unsigned int op1 = bits (insn, 20, 25);
6846 int op = bit (insn, 4);
6847 unsigned int coproc = bits (insn, 8, 11);
6848
6849 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6850 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6851 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6852 && (coproc & 0xe) != 0xa)
6853 /* stc/stc2. */
6854 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6855 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6856 && (coproc & 0xe) != 0xa)
6857 /* ldc/ldc2 imm/lit. */
6858 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6859 else if ((op1 & 0x3e) == 0x00)
6860 return arm_copy_undef (gdbarch, insn, dsc);
6861 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6862 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6863 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6864 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6865 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6866 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6867 else if ((op1 & 0x30) == 0x20 && !op)
6868 {
6869 if ((coproc & 0xe) == 0xa)
6870 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6871 else
6872 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6873 }
6874 else if ((op1 & 0x30) == 0x20 && op)
6875 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6876 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6877 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6878 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6879 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6880 else if ((op1 & 0x30) == 0x30)
6881 return arm_copy_svc (gdbarch, insn, regs, dsc);
6882 else
6883 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6884 }
6885
6886 static int
6887 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
6888 uint16_t insn2, struct regcache *regs,
6889 arm_displaced_step_copy_insn_closure *dsc)
6890 {
6891 unsigned int coproc = bits (insn2, 8, 11);
6892 unsigned int bit_5_8 = bits (insn1, 5, 8);
6893 unsigned int bit_9 = bit (insn1, 9);
6894 unsigned int bit_4 = bit (insn1, 4);
6895
6896 if (bit_9 == 0)
6897 {
6898 if (bit_5_8 == 2)
6899 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6900 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
6901 dsc);
6902 else if (bit_5_8 == 0) /* UNDEFINED. */
6903 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
6904 else
6905 {
6906 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
6907 if ((coproc & 0xe) == 0xa)
6908 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
6909 dsc);
6910 else /* coproc is not 101x. */
6911 {
6912 if (bit_4 == 0) /* STC/STC2. */
6913 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6914 "stc/stc2", dsc);
6915 else /* LDC/LDC2 {literal, immediate}. */
6916 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
6917 regs, dsc);
6918 }
6919 }
6920 }
6921 else
6922 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
6923
6924 return 0;
6925 }
6926
6927 static void
6928 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
6929 arm_displaced_step_copy_insn_closure *dsc, int rd)
6930 {
6931 /* ADR Rd, #imm
6932
6933 Rewrite as:
6934
6935 Preparation: Rd <- PC
6936 Insn: ADD Rd, #imm
6937 Cleanup: Null.
6938 */
6939
6940 /* Rd <- PC */
6941 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6942 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
6943 }
6944
6945 static int
6946 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
6947 arm_displaced_step_copy_insn_closure *dsc,
6948 int rd, unsigned int imm)
6949 {
6950
6951 /* Encoding T2: ADDS Rd, #imm */
6952 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
6953
6954 install_pc_relative (gdbarch, regs, dsc, rd);
6955
6956 return 0;
6957 }
6958
6959 static int
6960 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
6961 struct regcache *regs,
6962 arm_displaced_step_copy_insn_closure *dsc)
6963 {
6964 unsigned int rd = bits (insn, 8, 10);
6965 unsigned int imm8 = bits (insn, 0, 7);
6966
6967 displaced_debug_printf ("copying thumb adr r%d, #%d insn %.4x",
6968 rd, imm8, insn);
6969
6970 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
6971 }
6972
6973 static int
6974 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
6975 uint16_t insn2, struct regcache *regs,
6976 arm_displaced_step_copy_insn_closure *dsc)
6977 {
6978 unsigned int rd = bits (insn2, 8, 11);
6979 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
6980 extract raw immediate encoding rather than computing immediate. When
6981 generating ADD or SUB instruction, we can simply perform OR operation to
6982 set immediate into ADD. */
6983 unsigned int imm_3_8 = insn2 & 0x70ff;
6984 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
6985
6986 displaced_debug_printf ("copying thumb adr r%d, #%d:%d insn %.4x%.4x",
6987 rd, imm_i, imm_3_8, insn1, insn2);
6988
6989 if (bit (insn1, 7)) /* Encoding T2 */
6990 {
6991 /* Encoding T3: SUB Rd, Rd, #imm */
6992 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
6993 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
6994 }
6995 else /* Encoding T3 */
6996 {
6997 /* Encoding T3: ADD Rd, Rd, #imm */
6998 dsc->modinsn[0] = (0xf100 | rd | imm_i);
6999 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7000 }
7001 dsc->numinsns = 2;
7002
7003 install_pc_relative (gdbarch, regs, dsc, rd);
7004
7005 return 0;
7006 }
7007
7008 static int
7009 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, uint16_t insn1,
7010 struct regcache *regs,
7011 arm_displaced_step_copy_insn_closure *dsc)
7012 {
7013 unsigned int rt = bits (insn1, 8, 10);
7014 unsigned int pc;
7015 int imm8 = (bits (insn1, 0, 7) << 2);
7016
7017 /* LDR Rd, #imm8
7018
7019 Rwrite as:
7020
7021 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7022
7023 Insn: LDR R0, [R2, R3];
7024 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7025
7026 displaced_debug_printf ("copying thumb ldr r%d [pc #%d]", rt, imm8);
7027
7028 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
7029 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
7030 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
7031 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7032 /* The assembler calculates the required value of the offset from the
7033 Align(PC,4) value of this instruction to the label. */
7034 pc = pc & 0xfffffffc;
7035
7036 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
7037 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
7038
7039 dsc->rd = rt;
7040 dsc->u.ldst.xfersize = 4;
7041 dsc->u.ldst.rn = 0;
7042 dsc->u.ldst.immed = 0;
7043 dsc->u.ldst.writeback = 0;
7044 dsc->u.ldst.restore_r4 = 0;
7045
7046 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7047
7048 dsc->cleanup = &cleanup_load;
7049
7050 return 0;
7051 }
7052
7053 /* Copy Thumb cbnz/cbz instruction. */
7054
7055 static int
7056 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7057 struct regcache *regs,
7058 arm_displaced_step_copy_insn_closure *dsc)
7059 {
7060 int non_zero = bit (insn1, 11);
7061 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7062 CORE_ADDR from = dsc->insn_addr;
7063 int rn = bits (insn1, 0, 2);
7064 int rn_val = displaced_read_reg (regs, dsc, rn);
7065
7066 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7067 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7068 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7069 condition is false, let it be, cleanup_branch will do nothing. */
7070 if (dsc->u.branch.cond)
7071 {
7072 dsc->u.branch.cond = INST_AL;
7073 dsc->u.branch.dest = from + 4 + imm5;
7074 }
7075 else
7076 dsc->u.branch.dest = from + 2;
7077
7078 dsc->u.branch.link = 0;
7079 dsc->u.branch.exchange = 0;
7080
7081 displaced_debug_printf ("copying %s [r%d = 0x%x] insn %.4x to %.8lx",
7082 non_zero ? "cbnz" : "cbz",
7083 rn, rn_val, insn1, dsc->u.branch.dest);
7084
7085 dsc->modinsn[0] = THUMB_NOP;
7086
7087 dsc->cleanup = &cleanup_branch;
7088 return 0;
7089 }
7090
7091 /* Copy Table Branch Byte/Halfword */
7092 static int
7093 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7094 uint16_t insn2, struct regcache *regs,
7095 arm_displaced_step_copy_insn_closure *dsc)
7096 {
7097 ULONGEST rn_val, rm_val;
7098 int is_tbh = bit (insn2, 4);
7099 CORE_ADDR halfwords = 0;
7100 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7101
7102 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7103 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7104
7105 if (is_tbh)
7106 {
7107 gdb_byte buf[2];
7108
7109 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7110 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7111 }
7112 else
7113 {
7114 gdb_byte buf[1];
7115
7116 target_read_memory (rn_val + rm_val, buf, 1);
7117 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7118 }
7119
7120 displaced_debug_printf ("%s base 0x%x offset 0x%x offset 0x%x",
7121 is_tbh ? "tbh" : "tbb",
7122 (unsigned int) rn_val, (unsigned int) rm_val,
7123 (unsigned int) halfwords);
7124
7125 dsc->u.branch.cond = INST_AL;
7126 dsc->u.branch.link = 0;
7127 dsc->u.branch.exchange = 0;
7128 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7129
7130 dsc->cleanup = &cleanup_branch;
7131
7132 return 0;
7133 }
7134
7135 static void
7136 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7137 arm_displaced_step_copy_insn_closure *dsc)
7138 {
7139 /* PC <- r7 */
7140 int val = displaced_read_reg (regs, dsc, 7);
7141 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
7142
7143 /* r7 <- r8 */
7144 val = displaced_read_reg (regs, dsc, 8);
7145 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
7146
7147 /* r8 <- tmp[0] */
7148 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
7149
7150 }
7151
7152 static int
7153 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, uint16_t insn1,
7154 struct regcache *regs,
7155 arm_displaced_step_copy_insn_closure *dsc)
7156 {
7157 dsc->u.block.regmask = insn1 & 0x00ff;
7158
7159 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
7160 to :
7161
7162 (1) register list is full, that is, r0-r7 are used.
7163 Prepare: tmp[0] <- r8
7164
7165 POP {r0, r1, ...., r6, r7}; remove PC from reglist
7166 MOV r8, r7; Move value of r7 to r8;
7167 POP {r7}; Store PC value into r7.
7168
7169 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
7170
7171 (2) register list is not full, supposing there are N registers in
7172 register list (except PC, 0 <= N <= 7).
7173 Prepare: for each i, 0 - N, tmp[i] <- ri.
7174
7175 POP {r0, r1, ...., rN};
7176
7177 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
7178 from tmp[] properly.
7179 */
7180 displaced_debug_printf ("copying thumb pop {%.8x, pc} insn %.4x",
7181 dsc->u.block.regmask, insn1);
7182
7183 if (dsc->u.block.regmask == 0xff)
7184 {
7185 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
7186
7187 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
7188 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
7189 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
7190
7191 dsc->numinsns = 3;
7192 dsc->cleanup = &cleanup_pop_pc_16bit_all;
7193 }
7194 else
7195 {
7196 unsigned int num_in_list = count_one_bits (dsc->u.block.regmask);
7197 unsigned int i;
7198 unsigned int new_regmask;
7199
7200 for (i = 0; i < num_in_list + 1; i++)
7201 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7202
7203 new_regmask = (1 << (num_in_list + 1)) - 1;
7204
7205 displaced_debug_printf ("POP {..., pc}: original reg list %.4x, "
7206 "modified list %.4x",
7207 (int) dsc->u.block.regmask, new_regmask);
7208
7209 dsc->u.block.regmask |= 0x8000;
7210 dsc->u.block.writeback = 0;
7211 dsc->u.block.cond = INST_AL;
7212
7213 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
7214
7215 dsc->cleanup = &cleanup_block_load_pc;
7216 }
7217
7218 return 0;
7219 }
7220
7221 static void
7222 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7223 struct regcache *regs,
7224 arm_displaced_step_copy_insn_closure *dsc)
7225 {
7226 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
7227 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
7228 int err = 0;
7229
7230 /* 16-bit thumb instructions. */
7231 switch (op_bit_12_15)
7232 {
7233 /* Shift (imme), add, subtract, move and compare. */
7234 case 0: case 1: case 2: case 3:
7235 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7236 "shift/add/sub/mov/cmp",
7237 dsc);
7238 break;
7239 case 4:
7240 switch (op_bit_10_11)
7241 {
7242 case 0: /* Data-processing */
7243 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7244 "data-processing",
7245 dsc);
7246 break;
7247 case 1: /* Special data instructions and branch and exchange. */
7248 {
7249 unsigned short op = bits (insn1, 7, 9);
7250 if (op == 6 || op == 7) /* BX or BLX */
7251 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
7252 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
7253 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
7254 else
7255 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
7256 dsc);
7257 }
7258 break;
7259 default: /* LDR (literal) */
7260 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
7261 }
7262 break;
7263 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
7264 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
7265 break;
7266 case 10:
7267 if (op_bit_10_11 < 2) /* Generate PC-relative address */
7268 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
7269 else /* Generate SP-relative address */
7270 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
7271 break;
7272 case 11: /* Misc 16-bit instructions */
7273 {
7274 switch (bits (insn1, 8, 11))
7275 {
7276 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
7277 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
7278 break;
7279 case 12: case 13: /* POP */
7280 if (bit (insn1, 8)) /* PC is in register list. */
7281 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
7282 else
7283 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
7284 break;
7285 case 15: /* If-Then, and hints */
7286 if (bits (insn1, 0, 3))
7287 /* If-Then makes up to four following instructions conditional.
7288 IT instruction itself is not conditional, so handle it as a
7289 common unmodified instruction. */
7290 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
7291 dsc);
7292 else
7293 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
7294 break;
7295 default:
7296 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
7297 }
7298 }
7299 break;
7300 case 12:
7301 if (op_bit_10_11 < 2) /* Store multiple registers */
7302 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
7303 else /* Load multiple registers */
7304 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
7305 break;
7306 case 13: /* Conditional branch and supervisor call */
7307 if (bits (insn1, 9, 11) != 7) /* conditional branch */
7308 err = thumb_copy_b (gdbarch, insn1, dsc);
7309 else
7310 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
7311 break;
7312 case 14: /* Unconditional branch */
7313 err = thumb_copy_b (gdbarch, insn1, dsc);
7314 break;
7315 default:
7316 err = 1;
7317 }
7318
7319 if (err)
7320 internal_error (__FILE__, __LINE__,
7321 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
7322 }
7323
7324 static int
7325 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
7326 uint16_t insn1, uint16_t insn2,
7327 struct regcache *regs,
7328 arm_displaced_step_copy_insn_closure *dsc)
7329 {
7330 int rt = bits (insn2, 12, 15);
7331 int rn = bits (insn1, 0, 3);
7332 int op1 = bits (insn1, 7, 8);
7333
7334 switch (bits (insn1, 5, 6))
7335 {
7336 case 0: /* Load byte and memory hints */
7337 if (rt == 0xf) /* PLD/PLI */
7338 {
7339 if (rn == 0xf)
7340 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
7341 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
7342 else
7343 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7344 "pli/pld", dsc);
7345 }
7346 else
7347 {
7348 if (rn == 0xf) /* LDRB/LDRSB (literal) */
7349 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7350 1);
7351 else
7352 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7353 "ldrb{reg, immediate}/ldrbt",
7354 dsc);
7355 }
7356
7357 break;
7358 case 1: /* Load halfword and memory hints. */
7359 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
7360 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7361 "pld/unalloc memhint", dsc);
7362 else
7363 {
7364 if (rn == 0xf)
7365 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7366 2);
7367 else
7368 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7369 "ldrh/ldrht", dsc);
7370 }
7371 break;
7372 case 2: /* Load word */
7373 {
7374 int insn2_bit_8_11 = bits (insn2, 8, 11);
7375
7376 if (rn == 0xf)
7377 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
7378 else if (op1 == 0x1) /* Encoding T3 */
7379 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
7380 0, 1);
7381 else /* op1 == 0x0 */
7382 {
7383 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
7384 /* LDR (immediate) */
7385 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
7386 dsc, bit (insn2, 8), 1);
7387 else if (insn2_bit_8_11 == 0xe) /* LDRT */
7388 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7389 "ldrt", dsc);
7390 else
7391 /* LDR (register) */
7392 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
7393 dsc, 0, 0);
7394 }
7395 break;
7396 }
7397 default:
7398 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7399 break;
7400 }
7401 return 0;
7402 }
7403
7404 static void
7405 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7406 uint16_t insn2, struct regcache *regs,
7407 arm_displaced_step_copy_insn_closure *dsc)
7408 {
7409 int err = 0;
7410 unsigned short op = bit (insn2, 15);
7411 unsigned int op1 = bits (insn1, 11, 12);
7412
7413 switch (op1)
7414 {
7415 case 1:
7416 {
7417 switch (bits (insn1, 9, 10))
7418 {
7419 case 0:
7420 if (bit (insn1, 6))
7421 {
7422 /* Load/store {dual, exclusive}, table branch. */
7423 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
7424 && bits (insn2, 5, 7) == 0)
7425 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
7426 dsc);
7427 else
7428 /* PC is not allowed to use in load/store {dual, exclusive}
7429 instructions. */
7430 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7431 "load/store dual/ex", dsc);
7432 }
7433 else /* load/store multiple */
7434 {
7435 switch (bits (insn1, 7, 8))
7436 {
7437 case 0: case 3: /* SRS, RFE */
7438 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7439 "srs/rfe", dsc);
7440 break;
7441 case 1: case 2: /* LDM/STM/PUSH/POP */
7442 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
7443 break;
7444 }
7445 }
7446 break;
7447
7448 case 1:
7449 /* Data-processing (shift register). */
7450 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
7451 dsc);
7452 break;
7453 default: /* Coprocessor instructions. */
7454 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
7455 break;
7456 }
7457 break;
7458 }
7459 case 2: /* op1 = 2 */
7460 if (op) /* Branch and misc control. */
7461 {
7462 if (bit (insn2, 14) /* BLX/BL */
7463 || bit (insn2, 12) /* Unconditional branch */
7464 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
7465 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
7466 else
7467 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7468 "misc ctrl", dsc);
7469 }
7470 else
7471 {
7472 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
7473 {
7474 int dp_op = bits (insn1, 4, 8);
7475 int rn = bits (insn1, 0, 3);
7476 if ((dp_op == 0 || dp_op == 0xa) && rn == 0xf)
7477 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
7478 regs, dsc);
7479 else
7480 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7481 "dp/pb", dsc);
7482 }
7483 else /* Data processing (modified immediate) */
7484 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7485 "dp/mi", dsc);
7486 }
7487 break;
7488 case 3: /* op1 = 3 */
7489 switch (bits (insn1, 9, 10))
7490 {
7491 case 0:
7492 if (bit (insn1, 4))
7493 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
7494 regs, dsc);
7495 else /* NEON Load/Store and Store single data item */
7496 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7497 "neon elt/struct load/store",
7498 dsc);
7499 break;
7500 case 1: /* op1 = 3, bits (9, 10) == 1 */
7501 switch (bits (insn1, 7, 8))
7502 {
7503 case 0: case 1: /* Data processing (register) */
7504 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7505 "dp(reg)", dsc);
7506 break;
7507 case 2: /* Multiply and absolute difference */
7508 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7509 "mul/mua/diff", dsc);
7510 break;
7511 case 3: /* Long multiply and divide */
7512 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7513 "lmul/lmua", dsc);
7514 break;
7515 }
7516 break;
7517 default: /* Coprocessor instructions */
7518 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
7519 break;
7520 }
7521 break;
7522 default:
7523 err = 1;
7524 }
7525
7526 if (err)
7527 internal_error (__FILE__, __LINE__,
7528 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
7529
7530 }
7531
7532 static void
7533 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
7534 struct regcache *regs,
7535 arm_displaced_step_copy_insn_closure *dsc)
7536 {
7537 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7538 uint16_t insn1
7539 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
7540
7541 displaced_debug_printf ("process thumb insn %.4x at %.8lx",
7542 insn1, (unsigned long) from);
7543
7544 dsc->is_thumb = 1;
7545 dsc->insn_size = thumb_insn_size (insn1);
7546 if (thumb_insn_size (insn1) == 4)
7547 {
7548 uint16_t insn2
7549 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
7550 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
7551 }
7552 else
7553 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
7554 }
7555
7556 void
7557 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
7558 CORE_ADDR to, struct regcache *regs,
7559 arm_displaced_step_copy_insn_closure *dsc)
7560 {
7561 int err = 0;
7562 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7563 uint32_t insn;
7564
7565 /* Most displaced instructions use a 1-instruction scratch space, so set this
7566 here and override below if/when necessary. */
7567 dsc->numinsns = 1;
7568 dsc->insn_addr = from;
7569 dsc->scratch_base = to;
7570 dsc->cleanup = NULL;
7571 dsc->wrote_to_pc = 0;
7572
7573 if (!displaced_in_arm_mode (regs))
7574 return thumb_process_displaced_insn (gdbarch, from, regs, dsc);
7575
7576 dsc->is_thumb = 0;
7577 dsc->insn_size = 4;
7578 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
7579 displaced_debug_printf ("stepping insn %.8lx at %.8lx",
7580 (unsigned long) insn, (unsigned long) from);
7581
7582 if ((insn & 0xf0000000) == 0xf0000000)
7583 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
7584 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
7585 {
7586 case 0x0: case 0x1: case 0x2: case 0x3:
7587 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
7588 break;
7589
7590 case 0x4: case 0x5: case 0x6:
7591 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
7592 break;
7593
7594 case 0x7:
7595 err = arm_decode_media (gdbarch, insn, dsc);
7596 break;
7597
7598 case 0x8: case 0x9: case 0xa: case 0xb:
7599 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
7600 break;
7601
7602 case 0xc: case 0xd: case 0xe: case 0xf:
7603 err = arm_decode_svc_copro (gdbarch, insn, regs, dsc);
7604 break;
7605 }
7606
7607 if (err)
7608 internal_error (__FILE__, __LINE__,
7609 _("arm_process_displaced_insn: Instruction decode error"));
7610 }
7611
7612 /* Actually set up the scratch space for a displaced instruction. */
7613
7614 void
7615 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
7616 CORE_ADDR to,
7617 arm_displaced_step_copy_insn_closure *dsc)
7618 {
7619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7620 unsigned int i, len, offset;
7621 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7622 int size = dsc->is_thumb? 2 : 4;
7623 const gdb_byte *bkp_insn;
7624
7625 offset = 0;
7626 /* Poke modified instruction(s). */
7627 for (i = 0; i < dsc->numinsns; i++)
7628 {
7629 if (size == 4)
7630 displaced_debug_printf ("writing insn %.8lx at %.8lx",
7631 dsc->modinsn[i], (unsigned long) to + offset);
7632 else if (size == 2)
7633 displaced_debug_printf ("writing insn %.4x at %.8lx",
7634 (unsigned short) dsc->modinsn[i],
7635 (unsigned long) to + offset);
7636
7637 write_memory_unsigned_integer (to + offset, size,
7638 byte_order_for_code,
7639 dsc->modinsn[i]);
7640 offset += size;
7641 }
7642
7643 /* Choose the correct breakpoint instruction. */
7644 if (dsc->is_thumb)
7645 {
7646 bkp_insn = tdep->thumb_breakpoint;
7647 len = tdep->thumb_breakpoint_size;
7648 }
7649 else
7650 {
7651 bkp_insn = tdep->arm_breakpoint;
7652 len = tdep->arm_breakpoint_size;
7653 }
7654
7655 /* Put breakpoint afterwards. */
7656 write_memory (to + offset, bkp_insn, len);
7657
7658 displaced_debug_printf ("copy %s->%s", paddress (gdbarch, from),
7659 paddress (gdbarch, to));
7660 }
7661
7662 /* Entry point for cleaning things up after a displaced instruction has been
7663 single-stepped. */
7664
7665 void
7666 arm_displaced_step_fixup (struct gdbarch *gdbarch,
7667 struct displaced_step_copy_insn_closure *dsc_,
7668 CORE_ADDR from, CORE_ADDR to,
7669 struct regcache *regs)
7670 {
7671 arm_displaced_step_copy_insn_closure *dsc
7672 = (arm_displaced_step_copy_insn_closure *) dsc_;
7673
7674 if (dsc->cleanup)
7675 dsc->cleanup (gdbarch, regs, dsc);
7676
7677 if (!dsc->wrote_to_pc)
7678 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7679 dsc->insn_addr + dsc->insn_size);
7680
7681 }
7682
7683 #include "bfd-in2.h"
7684 #include "libcoff.h"
7685
7686 static int
7687 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7688 {
7689 gdb_disassembler *di
7690 = static_cast<gdb_disassembler *>(info->application_data);
7691 struct gdbarch *gdbarch = di->arch ();
7692
7693 if (arm_pc_is_thumb (gdbarch, memaddr))
7694 {
7695 static asymbol *asym;
7696 static combined_entry_type ce;
7697 static struct coff_symbol_struct csym;
7698 static struct bfd fake_bfd;
7699 static bfd_target fake_target;
7700
7701 if (csym.native == NULL)
7702 {
7703 /* Create a fake symbol vector containing a Thumb symbol.
7704 This is solely so that the code in print_insn_little_arm()
7705 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7706 the presence of a Thumb symbol and switch to decoding
7707 Thumb instructions. */
7708
7709 fake_target.flavour = bfd_target_coff_flavour;
7710 fake_bfd.xvec = &fake_target;
7711 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7712 csym.native = &ce;
7713 csym.symbol.the_bfd = &fake_bfd;
7714 csym.symbol.name = "fake";
7715 asym = (asymbol *) & csym;
7716 }
7717
7718 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7719 info->symbols = &asym;
7720 }
7721 else
7722 info->symbols = NULL;
7723
7724 /* GDB is able to get bfd_mach from the exe_bfd, info->mach is
7725 accurate, so mark USER_SPECIFIED_MACHINE_TYPE bit. Otherwise,
7726 opcodes/arm-dis.c:print_insn reset info->mach, and it will trigger
7727 the assert on the mismatch of info->mach and
7728 bfd_get_mach (current_program_space->exec_bfd ()) in
7729 default_print_insn. */
7730 if (current_program_space->exec_bfd () != NULL)
7731 info->flags |= USER_SPECIFIED_MACHINE_TYPE;
7732
7733 return default_print_insn (memaddr, info);
7734 }
7735
7736 /* The following define instruction sequences that will cause ARM
7737 cpu's to take an undefined instruction trap. These are used to
7738 signal a breakpoint to GDB.
7739
7740 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7741 modes. A different instruction is required for each mode. The ARM
7742 cpu's can also be big or little endian. Thus four different
7743 instructions are needed to support all cases.
7744
7745 Note: ARMv4 defines several new instructions that will take the
7746 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7747 not in fact add the new instructions. The new undefined
7748 instructions in ARMv4 are all instructions that had no defined
7749 behaviour in earlier chips. There is no guarantee that they will
7750 raise an exception, but may be treated as NOP's. In practice, it
7751 may only safe to rely on instructions matching:
7752
7753 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7754 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7755 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7756
7757 Even this may only true if the condition predicate is true. The
7758 following use a condition predicate of ALWAYS so it is always TRUE.
7759
7760 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7761 and NetBSD all use a software interrupt rather than an undefined
7762 instruction to force a trap. This can be handled by by the
7763 abi-specific code during establishment of the gdbarch vector. */
7764
7765 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7766 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7767 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7768 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7769
7770 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7771 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7772 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7773 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7774
7775 /* Implement the breakpoint_kind_from_pc gdbarch method. */
7776
7777 static int
7778 arm_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
7779 {
7780 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7781 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7782
7783 if (arm_pc_is_thumb (gdbarch, *pcptr))
7784 {
7785 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7786
7787 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7788 check whether we are replacing a 32-bit instruction. */
7789 if (tdep->thumb2_breakpoint != NULL)
7790 {
7791 gdb_byte buf[2];
7792
7793 if (target_read_memory (*pcptr, buf, 2) == 0)
7794 {
7795 unsigned short inst1;
7796
7797 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7798 if (thumb_insn_size (inst1) == 4)
7799 return ARM_BP_KIND_THUMB2;
7800 }
7801 }
7802
7803 return ARM_BP_KIND_THUMB;
7804 }
7805 else
7806 return ARM_BP_KIND_ARM;
7807
7808 }
7809
7810 /* Implement the sw_breakpoint_from_kind gdbarch method. */
7811
7812 static const gdb_byte *
7813 arm_sw_breakpoint_from_kind (struct gdbarch *gdbarch, int kind, int *size)
7814 {
7815 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7816
7817 switch (kind)
7818 {
7819 case ARM_BP_KIND_ARM:
7820 *size = tdep->arm_breakpoint_size;
7821 return tdep->arm_breakpoint;
7822 case ARM_BP_KIND_THUMB:
7823 *size = tdep->thumb_breakpoint_size;
7824 return tdep->thumb_breakpoint;
7825 case ARM_BP_KIND_THUMB2:
7826 *size = tdep->thumb2_breakpoint_size;
7827 return tdep->thumb2_breakpoint;
7828 default:
7829 gdb_assert_not_reached ("unexpected arm breakpoint kind");
7830 }
7831 }
7832
7833 /* Implement the breakpoint_kind_from_current_state gdbarch method. */
7834
7835 static int
7836 arm_breakpoint_kind_from_current_state (struct gdbarch *gdbarch,
7837 struct regcache *regcache,
7838 CORE_ADDR *pcptr)
7839 {
7840 gdb_byte buf[4];
7841
7842 /* Check the memory pointed by PC is readable. */
7843 if (target_read_memory (regcache_read_pc (regcache), buf, 4) == 0)
7844 {
7845 struct arm_get_next_pcs next_pcs_ctx;
7846
7847 arm_get_next_pcs_ctor (&next_pcs_ctx,
7848 &arm_get_next_pcs_ops,
7849 gdbarch_byte_order (gdbarch),
7850 gdbarch_byte_order_for_code (gdbarch),
7851 0,
7852 regcache);
7853
7854 std::vector<CORE_ADDR> next_pcs = arm_get_next_pcs (&next_pcs_ctx);
7855
7856 /* If MEMADDR is the next instruction of current pc, do the
7857 software single step computation, and get the thumb mode by
7858 the destination address. */
7859 for (CORE_ADDR pc : next_pcs)
7860 {
7861 if (UNMAKE_THUMB_ADDR (pc) == *pcptr)
7862 {
7863 if (IS_THUMB_ADDR (pc))
7864 {
7865 *pcptr = MAKE_THUMB_ADDR (*pcptr);
7866 return arm_breakpoint_kind_from_pc (gdbarch, pcptr);
7867 }
7868 else
7869 return ARM_BP_KIND_ARM;
7870 }
7871 }
7872 }
7873
7874 return arm_breakpoint_kind_from_pc (gdbarch, pcptr);
7875 }
7876
7877 /* Extract from an array REGBUF containing the (raw) register state a
7878 function return value of type TYPE, and copy that, in virtual
7879 format, into VALBUF. */
7880
7881 static void
7882 arm_extract_return_value (struct type *type, struct regcache *regs,
7883 gdb_byte *valbuf)
7884 {
7885 struct gdbarch *gdbarch = regs->arch ();
7886 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7887
7888 if (TYPE_CODE_FLT == type->code ())
7889 {
7890 switch (gdbarch_tdep (gdbarch)->fp_model)
7891 {
7892 case ARM_FLOAT_FPA:
7893 {
7894 /* The value is in register F0 in internal format. We need to
7895 extract the raw value and then convert it to the desired
7896 internal type. */
7897 bfd_byte tmpbuf[ARM_FP_REGISTER_SIZE];
7898
7899 regs->cooked_read (ARM_F0_REGNUM, tmpbuf);
7900 target_float_convert (tmpbuf, arm_ext_type (gdbarch),
7901 valbuf, type);
7902 }
7903 break;
7904
7905 case ARM_FLOAT_SOFT_FPA:
7906 case ARM_FLOAT_SOFT_VFP:
7907 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7908 not using the VFP ABI code. */
7909 case ARM_FLOAT_VFP:
7910 regs->cooked_read (ARM_A1_REGNUM, valbuf);
7911 if (TYPE_LENGTH (type) > 4)
7912 regs->cooked_read (ARM_A1_REGNUM + 1,
7913 valbuf + ARM_INT_REGISTER_SIZE);
7914 break;
7915
7916 default:
7917 internal_error (__FILE__, __LINE__,
7918 _("arm_extract_return_value: "
7919 "Floating point model not supported"));
7920 break;
7921 }
7922 }
7923 else if (type->code () == TYPE_CODE_INT
7924 || type->code () == TYPE_CODE_CHAR
7925 || type->code () == TYPE_CODE_BOOL
7926 || type->code () == TYPE_CODE_PTR
7927 || TYPE_IS_REFERENCE (type)
7928 || type->code () == TYPE_CODE_ENUM)
7929 {
7930 /* If the type is a plain integer, then the access is
7931 straight-forward. Otherwise we have to play around a bit
7932 more. */
7933 int len = TYPE_LENGTH (type);
7934 int regno = ARM_A1_REGNUM;
7935 ULONGEST tmp;
7936
7937 while (len > 0)
7938 {
7939 /* By using store_unsigned_integer we avoid having to do
7940 anything special for small big-endian values. */
7941 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7942 store_unsigned_integer (valbuf,
7943 (len > ARM_INT_REGISTER_SIZE
7944 ? ARM_INT_REGISTER_SIZE : len),
7945 byte_order, tmp);
7946 len -= ARM_INT_REGISTER_SIZE;
7947 valbuf += ARM_INT_REGISTER_SIZE;
7948 }
7949 }
7950 else
7951 {
7952 /* For a structure or union the behaviour is as if the value had
7953 been stored to word-aligned memory and then loaded into
7954 registers with 32-bit load instruction(s). */
7955 int len = TYPE_LENGTH (type);
7956 int regno = ARM_A1_REGNUM;
7957 bfd_byte tmpbuf[ARM_INT_REGISTER_SIZE];
7958
7959 while (len > 0)
7960 {
7961 regs->cooked_read (regno++, tmpbuf);
7962 memcpy (valbuf, tmpbuf,
7963 len > ARM_INT_REGISTER_SIZE ? ARM_INT_REGISTER_SIZE : len);
7964 len -= ARM_INT_REGISTER_SIZE;
7965 valbuf += ARM_INT_REGISTER_SIZE;
7966 }
7967 }
7968 }
7969
7970
7971 /* Will a function return an aggregate type in memory or in a
7972 register? Return 0 if an aggregate type can be returned in a
7973 register, 1 if it must be returned in memory. */
7974
7975 static int
7976 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7977 {
7978 enum type_code code;
7979
7980 type = check_typedef (type);
7981
7982 /* Simple, non-aggregate types (ie not including vectors and
7983 complex) are always returned in a register (or registers). */
7984 code = type->code ();
7985 if (TYPE_CODE_STRUCT != code && TYPE_CODE_UNION != code
7986 && TYPE_CODE_ARRAY != code && TYPE_CODE_COMPLEX != code)
7987 return 0;
7988
7989 if (TYPE_CODE_ARRAY == code && type->is_vector ())
7990 {
7991 /* Vector values should be returned using ARM registers if they
7992 are not over 16 bytes. */
7993 return (TYPE_LENGTH (type) > 16);
7994 }
7995
7996 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7997 {
7998 /* The AAPCS says all aggregates not larger than a word are returned
7999 in a register. */
8000 if (TYPE_LENGTH (type) <= ARM_INT_REGISTER_SIZE)
8001 return 0;
8002
8003 return 1;
8004 }
8005 else
8006 {
8007 int nRc;
8008
8009 /* All aggregate types that won't fit in a register must be returned
8010 in memory. */
8011 if (TYPE_LENGTH (type) > ARM_INT_REGISTER_SIZE)
8012 return 1;
8013
8014 /* In the ARM ABI, "integer" like aggregate types are returned in
8015 registers. For an aggregate type to be integer like, its size
8016 must be less than or equal to ARM_INT_REGISTER_SIZE and the
8017 offset of each addressable subfield must be zero. Note that bit
8018 fields are not addressable, and all addressable subfields of
8019 unions always start at offset zero.
8020
8021 This function is based on the behaviour of GCC 2.95.1.
8022 See: gcc/arm.c: arm_return_in_memory() for details.
8023
8024 Note: All versions of GCC before GCC 2.95.2 do not set up the
8025 parameters correctly for a function returning the following
8026 structure: struct { float f;}; This should be returned in memory,
8027 not a register. Richard Earnshaw sent me a patch, but I do not
8028 know of any way to detect if a function like the above has been
8029 compiled with the correct calling convention. */
8030
8031 /* Assume all other aggregate types can be returned in a register.
8032 Run a check for structures, unions and arrays. */
8033 nRc = 0;
8034
8035 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8036 {
8037 int i;
8038 /* Need to check if this struct/union is "integer" like. For
8039 this to be true, its size must be less than or equal to
8040 ARM_INT_REGISTER_SIZE and the offset of each addressable
8041 subfield must be zero. Note that bit fields are not
8042 addressable, and unions always start at offset zero. If any
8043 of the subfields is a floating point type, the struct/union
8044 cannot be an integer type. */
8045
8046 /* For each field in the object, check:
8047 1) Is it FP? --> yes, nRc = 1;
8048 2) Is it addressable (bitpos != 0) and
8049 not packed (bitsize == 0)?
8050 --> yes, nRc = 1
8051 */
8052
8053 for (i = 0; i < type->num_fields (); i++)
8054 {
8055 enum type_code field_type_code;
8056
8057 field_type_code
8058 = check_typedef (type->field (i).type ())->code ();
8059
8060 /* Is it a floating point type field? */
8061 if (field_type_code == TYPE_CODE_FLT)
8062 {
8063 nRc = 1;
8064 break;
8065 }
8066
8067 /* If bitpos != 0, then we have to care about it. */
8068 if (TYPE_FIELD_BITPOS (type, i) != 0)
8069 {
8070 /* Bitfields are not addressable. If the field bitsize is
8071 zero, then the field is not packed. Hence it cannot be
8072 a bitfield or any other packed type. */
8073 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8074 {
8075 nRc = 1;
8076 break;
8077 }
8078 }
8079 }
8080 }
8081
8082 return nRc;
8083 }
8084 }
8085
8086 /* Write into appropriate registers a function return value of type
8087 TYPE, given in virtual format. */
8088
8089 static void
8090 arm_store_return_value (struct type *type, struct regcache *regs,
8091 const gdb_byte *valbuf)
8092 {
8093 struct gdbarch *gdbarch = regs->arch ();
8094 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8095
8096 if (type->code () == TYPE_CODE_FLT)
8097 {
8098 gdb_byte buf[ARM_FP_REGISTER_SIZE];
8099
8100 switch (gdbarch_tdep (gdbarch)->fp_model)
8101 {
8102 case ARM_FLOAT_FPA:
8103
8104 target_float_convert (valbuf, type, buf, arm_ext_type (gdbarch));
8105 regs->cooked_write (ARM_F0_REGNUM, buf);
8106 break;
8107
8108 case ARM_FLOAT_SOFT_FPA:
8109 case ARM_FLOAT_SOFT_VFP:
8110 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8111 not using the VFP ABI code. */
8112 case ARM_FLOAT_VFP:
8113 regs->cooked_write (ARM_A1_REGNUM, valbuf);
8114 if (TYPE_LENGTH (type) > 4)
8115 regs->cooked_write (ARM_A1_REGNUM + 1,
8116 valbuf + ARM_INT_REGISTER_SIZE);
8117 break;
8118
8119 default:
8120 internal_error (__FILE__, __LINE__,
8121 _("arm_store_return_value: Floating "
8122 "point model not supported"));
8123 break;
8124 }
8125 }
8126 else if (type->code () == TYPE_CODE_INT
8127 || type->code () == TYPE_CODE_CHAR
8128 || type->code () == TYPE_CODE_BOOL
8129 || type->code () == TYPE_CODE_PTR
8130 || TYPE_IS_REFERENCE (type)
8131 || type->code () == TYPE_CODE_ENUM)
8132 {
8133 if (TYPE_LENGTH (type) <= 4)
8134 {
8135 /* Values of one word or less are zero/sign-extended and
8136 returned in r0. */
8137 bfd_byte tmpbuf[ARM_INT_REGISTER_SIZE];
8138 LONGEST val = unpack_long (type, valbuf);
8139
8140 store_signed_integer (tmpbuf, ARM_INT_REGISTER_SIZE, byte_order, val);
8141 regs->cooked_write (ARM_A1_REGNUM, tmpbuf);
8142 }
8143 else
8144 {
8145 /* Integral values greater than one word are stored in consecutive
8146 registers starting with r0. This will always be a multiple of
8147 the regiser size. */
8148 int len = TYPE_LENGTH (type);
8149 int regno = ARM_A1_REGNUM;
8150
8151 while (len > 0)
8152 {
8153 regs->cooked_write (regno++, valbuf);
8154 len -= ARM_INT_REGISTER_SIZE;
8155 valbuf += ARM_INT_REGISTER_SIZE;
8156 }
8157 }
8158 }
8159 else
8160 {
8161 /* For a structure or union the behaviour is as if the value had
8162 been stored to word-aligned memory and then loaded into
8163 registers with 32-bit load instruction(s). */
8164 int len = TYPE_LENGTH (type);
8165 int regno = ARM_A1_REGNUM;
8166 bfd_byte tmpbuf[ARM_INT_REGISTER_SIZE];
8167
8168 while (len > 0)
8169 {
8170 memcpy (tmpbuf, valbuf,
8171 len > ARM_INT_REGISTER_SIZE ? ARM_INT_REGISTER_SIZE : len);
8172 regs->cooked_write (regno++, tmpbuf);
8173 len -= ARM_INT_REGISTER_SIZE;
8174 valbuf += ARM_INT_REGISTER_SIZE;
8175 }
8176 }
8177 }
8178
8179
8180 /* Handle function return values. */
8181
8182 static enum return_value_convention
8183 arm_return_value (struct gdbarch *gdbarch, struct value *function,
8184 struct type *valtype, struct regcache *regcache,
8185 gdb_byte *readbuf, const gdb_byte *writebuf)
8186 {
8187 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8188 struct type *func_type = function ? value_type (function) : NULL;
8189 enum arm_vfp_cprc_base_type vfp_base_type;
8190 int vfp_base_count;
8191
8192 if (arm_vfp_abi_for_function (gdbarch, func_type)
8193 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
8194 {
8195 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
8196 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
8197 int i;
8198 for (i = 0; i < vfp_base_count; i++)
8199 {
8200 if (reg_char == 'q')
8201 {
8202 if (writebuf)
8203 arm_neon_quad_write (gdbarch, regcache, i,
8204 writebuf + i * unit_length);
8205
8206 if (readbuf)
8207 arm_neon_quad_read (gdbarch, regcache, i,
8208 readbuf + i * unit_length);
8209 }
8210 else
8211 {
8212 char name_buf[4];
8213 int regnum;
8214
8215 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
8216 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8217 strlen (name_buf));
8218 if (writebuf)
8219 regcache->cooked_write (regnum, writebuf + i * unit_length);
8220 if (readbuf)
8221 regcache->cooked_read (regnum, readbuf + i * unit_length);
8222 }
8223 }
8224 return RETURN_VALUE_REGISTER_CONVENTION;
8225 }
8226
8227 if (valtype->code () == TYPE_CODE_STRUCT
8228 || valtype->code () == TYPE_CODE_UNION
8229 || valtype->code () == TYPE_CODE_ARRAY)
8230 {
8231 if (tdep->struct_return == pcc_struct_return
8232 || arm_return_in_memory (gdbarch, valtype))
8233 return RETURN_VALUE_STRUCT_CONVENTION;
8234 }
8235 else if (valtype->code () == TYPE_CODE_COMPLEX)
8236 {
8237 if (arm_return_in_memory (gdbarch, valtype))
8238 return RETURN_VALUE_STRUCT_CONVENTION;
8239 }
8240
8241 if (writebuf)
8242 arm_store_return_value (valtype, regcache, writebuf);
8243
8244 if (readbuf)
8245 arm_extract_return_value (valtype, regcache, readbuf);
8246
8247 return RETURN_VALUE_REGISTER_CONVENTION;
8248 }
8249
8250
8251 static int
8252 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
8253 {
8254 struct gdbarch *gdbarch = get_frame_arch (frame);
8255 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8256 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8257 CORE_ADDR jb_addr;
8258 gdb_byte buf[ARM_INT_REGISTER_SIZE];
8259
8260 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
8261
8262 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
8263 ARM_INT_REGISTER_SIZE))
8264 return 0;
8265
8266 *pc = extract_unsigned_integer (buf, ARM_INT_REGISTER_SIZE, byte_order);
8267 return 1;
8268 }
8269 /* A call to cmse secure entry function "foo" at "a" is modified by
8270 GNU ld as "b".
8271 a) bl xxxx <foo>
8272
8273 <foo>
8274 xxxx:
8275
8276 b) bl yyyy <__acle_se_foo>
8277
8278 section .gnu.sgstubs:
8279 <foo>
8280 yyyy: sg // secure gateway
8281 b.w xxxx <__acle_se_foo> // original_branch_dest
8282
8283 <__acle_se_foo>
8284 xxxx:
8285
8286 When the control at "b", the pc contains "yyyy" (sg address) which is a
8287 trampoline and does not exist in source code. This function returns the
8288 target pc "xxxx". For more details please refer to section 5.4
8289 (Entry functions) and section 3.4.4 (C level development flow of secure code)
8290 of "armv8-m-security-extensions-requirements-on-development-tools-engineering-specification"
8291 document on www.developer.arm.com. */
8292
8293 static CORE_ADDR
8294 arm_skip_cmse_entry (CORE_ADDR pc, const char *name, struct objfile *objfile)
8295 {
8296 int target_len = strlen (name) + strlen ("__acle_se_") + 1;
8297 char *target_name = (char *) alloca (target_len);
8298 xsnprintf (target_name, target_len, "%s%s", "__acle_se_", name);
8299
8300 struct bound_minimal_symbol minsym
8301 = lookup_minimal_symbol (target_name, NULL, objfile);
8302
8303 if (minsym.minsym != nullptr)
8304 return BMSYMBOL_VALUE_ADDRESS (minsym);
8305
8306 return 0;
8307 }
8308
8309 /* Return true when SEC points to ".gnu.sgstubs" section. */
8310
8311 static bool
8312 arm_is_sgstubs_section (struct obj_section *sec)
8313 {
8314 return (sec != nullptr
8315 && sec->the_bfd_section != nullptr
8316 && sec->the_bfd_section->name != nullptr
8317 && streq (sec->the_bfd_section->name, ".gnu.sgstubs"));
8318 }
8319
8320 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
8321 return the target PC. Otherwise return 0. */
8322
8323 CORE_ADDR
8324 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
8325 {
8326 const char *name;
8327 int namelen;
8328 CORE_ADDR start_addr;
8329
8330 /* Find the starting address and name of the function containing the PC. */
8331 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
8332 {
8333 /* Trampoline 'bx reg' doesn't belong to any functions. Do the
8334 check here. */
8335 start_addr = arm_skip_bx_reg (frame, pc);
8336 if (start_addr != 0)
8337 return start_addr;
8338
8339 return 0;
8340 }
8341
8342 /* If PC is in a Thumb call or return stub, return the address of the
8343 target PC, which is in a register. The thunk functions are called
8344 _call_via_xx, where x is the register name. The possible names
8345 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
8346 functions, named __ARM_call_via_r[0-7]. */
8347 if (startswith (name, "_call_via_")
8348 || startswith (name, "__ARM_call_via_"))
8349 {
8350 /* Use the name suffix to determine which register contains the
8351 target PC. */
8352 static const char *table[15] =
8353 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
8354 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
8355 };
8356 int regno;
8357 int offset = strlen (name) - 2;
8358
8359 for (regno = 0; regno <= 14; regno++)
8360 if (strcmp (&name[offset], table[regno]) == 0)
8361 return get_frame_register_unsigned (frame, regno);
8362 }
8363
8364 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
8365 non-interworking calls to foo. We could decode the stubs
8366 to find the target but it's easier to use the symbol table. */
8367 namelen = strlen (name);
8368 if (name[0] == '_' && name[1] == '_'
8369 && ((namelen > 2 + strlen ("_from_thumb")
8370 && startswith (name + namelen - strlen ("_from_thumb"), "_from_thumb"))
8371 || (namelen > 2 + strlen ("_from_arm")
8372 && startswith (name + namelen - strlen ("_from_arm"), "_from_arm"))))
8373 {
8374 char *target_name;
8375 int target_len = namelen - 2;
8376 struct bound_minimal_symbol minsym;
8377 struct objfile *objfile;
8378 struct obj_section *sec;
8379
8380 if (name[namelen - 1] == 'b')
8381 target_len -= strlen ("_from_thumb");
8382 else
8383 target_len -= strlen ("_from_arm");
8384
8385 target_name = (char *) alloca (target_len + 1);
8386 memcpy (target_name, name + 2, target_len);
8387 target_name[target_len] = '\0';
8388
8389 sec = find_pc_section (pc);
8390 objfile = (sec == NULL) ? NULL : sec->objfile;
8391 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
8392 if (minsym.minsym != NULL)
8393 return BMSYMBOL_VALUE_ADDRESS (minsym);
8394 else
8395 return 0;
8396 }
8397
8398 struct obj_section *section = find_pc_section (pc);
8399
8400 /* Check whether SECTION points to the ".gnu.sgstubs" section. */
8401 if (arm_is_sgstubs_section (section))
8402 return arm_skip_cmse_entry (pc, name, section->objfile);
8403
8404 return 0; /* not a stub */
8405 }
8406
8407 static void
8408 arm_update_current_architecture (void)
8409 {
8410 /* If the current architecture is not ARM, we have nothing to do. */
8411 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
8412 return;
8413
8414 /* Update the architecture. */
8415 gdbarch_info info;
8416 if (!gdbarch_update_p (info))
8417 internal_error (__FILE__, __LINE__, _("could not update architecture"));
8418 }
8419
8420 static void
8421 set_fp_model_sfunc (const char *args, int from_tty,
8422 struct cmd_list_element *c)
8423 {
8424 int fp_model;
8425
8426 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
8427 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
8428 {
8429 arm_fp_model = (enum arm_float_model) fp_model;
8430 break;
8431 }
8432
8433 if (fp_model == ARM_FLOAT_LAST)
8434 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
8435 current_fp_model);
8436
8437 arm_update_current_architecture ();
8438 }
8439
8440 static void
8441 show_fp_model (struct ui_file *file, int from_tty,
8442 struct cmd_list_element *c, const char *value)
8443 {
8444 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
8445
8446 if (arm_fp_model == ARM_FLOAT_AUTO
8447 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
8448 fprintf_filtered (file, _("\
8449 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
8450 fp_model_strings[tdep->fp_model]);
8451 else
8452 fprintf_filtered (file, _("\
8453 The current ARM floating point model is \"%s\".\n"),
8454 fp_model_strings[arm_fp_model]);
8455 }
8456
8457 static void
8458 arm_set_abi (const char *args, int from_tty,
8459 struct cmd_list_element *c)
8460 {
8461 int arm_abi;
8462
8463 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
8464 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
8465 {
8466 arm_abi_global = (enum arm_abi_kind) arm_abi;
8467 break;
8468 }
8469
8470 if (arm_abi == ARM_ABI_LAST)
8471 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
8472 arm_abi_string);
8473
8474 arm_update_current_architecture ();
8475 }
8476
8477 static void
8478 arm_show_abi (struct ui_file *file, int from_tty,
8479 struct cmd_list_element *c, const char *value)
8480 {
8481 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
8482
8483 if (arm_abi_global == ARM_ABI_AUTO
8484 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
8485 fprintf_filtered (file, _("\
8486 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
8487 arm_abi_strings[tdep->arm_abi]);
8488 else
8489 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
8490 arm_abi_string);
8491 }
8492
8493 static void
8494 arm_show_fallback_mode (struct ui_file *file, int from_tty,
8495 struct cmd_list_element *c, const char *value)
8496 {
8497 fprintf_filtered (file,
8498 _("The current execution mode assumed "
8499 "(when symbols are unavailable) is \"%s\".\n"),
8500 arm_fallback_mode_string);
8501 }
8502
8503 static void
8504 arm_show_force_mode (struct ui_file *file, int from_tty,
8505 struct cmd_list_element *c, const char *value)
8506 {
8507 fprintf_filtered (file,
8508 _("The current execution mode assumed "
8509 "(even when symbols are available) is \"%s\".\n"),
8510 arm_force_mode_string);
8511 }
8512
8513 /* If the user changes the register disassembly style used for info
8514 register and other commands, we have to also switch the style used
8515 in opcodes for disassembly output. This function is run in the "set
8516 arm disassembly" command, and does that. */
8517
8518 static void
8519 set_disassembly_style_sfunc (const char *args, int from_tty,
8520 struct cmd_list_element *c)
8521 {
8522 /* Convert the short style name into the long style name (eg, reg-names-*)
8523 before calling the generic set_disassembler_options() function. */
8524 std::string long_name = std::string ("reg-names-") + disassembly_style;
8525 set_disassembler_options (&long_name[0]);
8526 }
8527
8528 static void
8529 show_disassembly_style_sfunc (struct ui_file *file, int from_tty,
8530 struct cmd_list_element *c, const char *value)
8531 {
8532 struct gdbarch *gdbarch = get_current_arch ();
8533 char *options = get_disassembler_options (gdbarch);
8534 const char *style = "";
8535 int len = 0;
8536 const char *opt;
8537
8538 FOR_EACH_DISASSEMBLER_OPTION (opt, options)
8539 if (startswith (opt, "reg-names-"))
8540 {
8541 style = &opt[strlen ("reg-names-")];
8542 len = strcspn (style, ",");
8543 }
8544
8545 fprintf_unfiltered (file, "The disassembly style is \"%.*s\".\n", len, style);
8546 }
8547 \f
8548 /* Return the ARM register name corresponding to register I. */
8549 static const char *
8550 arm_register_name (struct gdbarch *gdbarch, int i)
8551 {
8552 const int num_regs = gdbarch_num_regs (gdbarch);
8553
8554 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
8555 && i >= num_regs && i < num_regs + 32)
8556 {
8557 static const char *const vfp_pseudo_names[] = {
8558 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
8559 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
8560 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
8561 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
8562 };
8563
8564 return vfp_pseudo_names[i - num_regs];
8565 }
8566
8567 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
8568 && i >= num_regs + 32 && i < num_regs + 32 + 16)
8569 {
8570 static const char *const neon_pseudo_names[] = {
8571 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
8572 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
8573 };
8574
8575 return neon_pseudo_names[i - num_regs - 32];
8576 }
8577
8578 if (i >= ARRAY_SIZE (arm_register_names))
8579 /* These registers are only supported on targets which supply
8580 an XML description. */
8581 return "";
8582
8583 return arm_register_names[i];
8584 }
8585
8586 /* Test whether the coff symbol specific value corresponds to a Thumb
8587 function. */
8588
8589 static int
8590 coff_sym_is_thumb (int val)
8591 {
8592 return (val == C_THUMBEXT
8593 || val == C_THUMBSTAT
8594 || val == C_THUMBEXTFUNC
8595 || val == C_THUMBSTATFUNC
8596 || val == C_THUMBLABEL);
8597 }
8598
8599 /* arm_coff_make_msymbol_special()
8600 arm_elf_make_msymbol_special()
8601
8602 These functions test whether the COFF or ELF symbol corresponds to
8603 an address in thumb code, and set a "special" bit in a minimal
8604 symbol to indicate that it does. */
8605
8606 static void
8607 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
8608 {
8609 elf_symbol_type *elfsym = (elf_symbol_type *) sym;
8610
8611 if (ARM_GET_SYM_BRANCH_TYPE (elfsym->internal_elf_sym.st_target_internal)
8612 == ST_BRANCH_TO_THUMB)
8613 MSYMBOL_SET_SPECIAL (msym);
8614 }
8615
8616 static void
8617 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
8618 {
8619 if (coff_sym_is_thumb (val))
8620 MSYMBOL_SET_SPECIAL (msym);
8621 }
8622
8623 static void
8624 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
8625 asymbol *sym)
8626 {
8627 const char *name = bfd_asymbol_name (sym);
8628 struct arm_per_bfd *data;
8629 struct arm_mapping_symbol new_map_sym;
8630
8631 gdb_assert (name[0] == '$');
8632 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
8633 return;
8634
8635 data = arm_bfd_data_key.get (objfile->obfd);
8636 if (data == NULL)
8637 data = arm_bfd_data_key.emplace (objfile->obfd,
8638 objfile->obfd->section_count);
8639 arm_mapping_symbol_vec &map
8640 = data->section_maps[bfd_asymbol_section (sym)->index];
8641
8642 new_map_sym.value = sym->value;
8643 new_map_sym.type = name[1];
8644
8645 /* Insert at the end, the vector will be sorted on first use. */
8646 map.push_back (new_map_sym);
8647 }
8648
8649 static void
8650 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
8651 {
8652 struct gdbarch *gdbarch = regcache->arch ();
8653 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
8654
8655 /* If necessary, set the T bit. */
8656 if (arm_apcs_32)
8657 {
8658 ULONGEST val, t_bit;
8659 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
8660 t_bit = arm_psr_thumb_bit (gdbarch);
8661 if (arm_pc_is_thumb (gdbarch, pc))
8662 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
8663 val | t_bit);
8664 else
8665 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
8666 val & ~t_bit);
8667 }
8668 }
8669
8670 /* Read the contents of a NEON quad register, by reading from two
8671 double registers. This is used to implement the quad pseudo
8672 registers, and for argument passing in case the quad registers are
8673 missing; vectors are passed in quad registers when using the VFP
8674 ABI, even if a NEON unit is not present. REGNUM is the index of
8675 the quad register, in [0, 15]. */
8676
8677 static enum register_status
8678 arm_neon_quad_read (struct gdbarch *gdbarch, readable_regcache *regcache,
8679 int regnum, gdb_byte *buf)
8680 {
8681 char name_buf[4];
8682 gdb_byte reg_buf[8];
8683 int offset, double_regnum;
8684 enum register_status status;
8685
8686 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
8687 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8688 strlen (name_buf));
8689
8690 /* d0 is always the least significant half of q0. */
8691 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8692 offset = 8;
8693 else
8694 offset = 0;
8695
8696 status = regcache->raw_read (double_regnum, reg_buf);
8697 if (status != REG_VALID)
8698 return status;
8699 memcpy (buf + offset, reg_buf, 8);
8700
8701 offset = 8 - offset;
8702 status = regcache->raw_read (double_regnum + 1, reg_buf);
8703 if (status != REG_VALID)
8704 return status;
8705 memcpy (buf + offset, reg_buf, 8);
8706
8707 return REG_VALID;
8708 }
8709
8710 static enum register_status
8711 arm_pseudo_read (struct gdbarch *gdbarch, readable_regcache *regcache,
8712 int regnum, gdb_byte *buf)
8713 {
8714 const int num_regs = gdbarch_num_regs (gdbarch);
8715 char name_buf[4];
8716 gdb_byte reg_buf[8];
8717 int offset, double_regnum;
8718
8719 gdb_assert (regnum >= num_regs);
8720 regnum -= num_regs;
8721
8722 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8723 /* Quad-precision register. */
8724 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
8725 else
8726 {
8727 enum register_status status;
8728
8729 /* Single-precision register. */
8730 gdb_assert (regnum < 32);
8731
8732 /* s0 is always the least significant half of d0. */
8733 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8734 offset = (regnum & 1) ? 0 : 4;
8735 else
8736 offset = (regnum & 1) ? 4 : 0;
8737
8738 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
8739 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8740 strlen (name_buf));
8741
8742 status = regcache->raw_read (double_regnum, reg_buf);
8743 if (status == REG_VALID)
8744 memcpy (buf, reg_buf + offset, 4);
8745 return status;
8746 }
8747 }
8748
8749 /* Store the contents of BUF to a NEON quad register, by writing to
8750 two double registers. This is used to implement the quad pseudo
8751 registers, and for argument passing in case the quad registers are
8752 missing; vectors are passed in quad registers when using the VFP
8753 ABI, even if a NEON unit is not present. REGNUM is the index
8754 of the quad register, in [0, 15]. */
8755
8756 static void
8757 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
8758 int regnum, const gdb_byte *buf)
8759 {
8760 char name_buf[4];
8761 int offset, double_regnum;
8762
8763 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
8764 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8765 strlen (name_buf));
8766
8767 /* d0 is always the least significant half of q0. */
8768 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8769 offset = 8;
8770 else
8771 offset = 0;
8772
8773 regcache->raw_write (double_regnum, buf + offset);
8774 offset = 8 - offset;
8775 regcache->raw_write (double_regnum + 1, buf + offset);
8776 }
8777
8778 static void
8779 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8780 int regnum, const gdb_byte *buf)
8781 {
8782 const int num_regs = gdbarch_num_regs (gdbarch);
8783 char name_buf[4];
8784 gdb_byte reg_buf[8];
8785 int offset, double_regnum;
8786
8787 gdb_assert (regnum >= num_regs);
8788 regnum -= num_regs;
8789
8790 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8791 /* Quad-precision register. */
8792 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8793 else
8794 {
8795 /* Single-precision register. */
8796 gdb_assert (regnum < 32);
8797
8798 /* s0 is always the least significant half of d0. */
8799 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8800 offset = (regnum & 1) ? 0 : 4;
8801 else
8802 offset = (regnum & 1) ? 4 : 0;
8803
8804 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
8805 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8806 strlen (name_buf));
8807
8808 regcache->raw_read (double_regnum, reg_buf);
8809 memcpy (reg_buf + offset, buf, 4);
8810 regcache->raw_write (double_regnum, reg_buf);
8811 }
8812 }
8813
8814 static struct value *
8815 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8816 {
8817 const int *reg_p = (const int *) baton;
8818 return value_of_register (*reg_p, frame);
8819 }
8820 \f
8821 static enum gdb_osabi
8822 arm_elf_osabi_sniffer (bfd *abfd)
8823 {
8824 unsigned int elfosabi;
8825 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8826
8827 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8828
8829 if (elfosabi == ELFOSABI_ARM)
8830 /* GNU tools use this value. Check note sections in this case,
8831 as well. */
8832 {
8833 for (asection *sect : gdb_bfd_sections (abfd))
8834 generic_elf_osabi_sniff_abi_tag_sections (abfd, sect, &osabi);
8835 }
8836
8837 /* Anything else will be handled by the generic ELF sniffer. */
8838 return osabi;
8839 }
8840
8841 static int
8842 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8843 struct reggroup *group)
8844 {
8845 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8846 this, FPS register belongs to save_regroup, restore_reggroup, and
8847 all_reggroup, of course. */
8848 if (regnum == ARM_FPS_REGNUM)
8849 return (group == float_reggroup
8850 || group == save_reggroup
8851 || group == restore_reggroup
8852 || group == all_reggroup);
8853 else
8854 return default_register_reggroup_p (gdbarch, regnum, group);
8855 }
8856
8857 /* For backward-compatibility we allow two 'g' packet lengths with
8858 the remote protocol depending on whether FPA registers are
8859 supplied. M-profile targets do not have FPA registers, but some
8860 stubs already exist in the wild which use a 'g' packet which
8861 supplies them albeit with dummy values. The packet format which
8862 includes FPA registers should be considered deprecated for
8863 M-profile targets. */
8864
8865 static void
8866 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
8867 {
8868 if (gdbarch_tdep (gdbarch)->is_m)
8869 {
8870 const target_desc *tdesc;
8871
8872 /* If we know from the executable this is an M-profile target,
8873 cater for remote targets whose register set layout is the
8874 same as the FPA layout. */
8875 tdesc = arm_read_mprofile_description (ARM_M_TYPE_WITH_FPA);
8876 register_remote_g_packet_guess (gdbarch,
8877 ARM_CORE_REGS_SIZE + ARM_FP_REGS_SIZE,
8878 tdesc);
8879
8880 /* The regular M-profile layout. */
8881 tdesc = arm_read_mprofile_description (ARM_M_TYPE_M_PROFILE);
8882 register_remote_g_packet_guess (gdbarch, ARM_CORE_REGS_SIZE,
8883 tdesc);
8884
8885 /* M-profile plus M4F VFP. */
8886 tdesc = arm_read_mprofile_description (ARM_M_TYPE_VFP_D16);
8887 register_remote_g_packet_guess (gdbarch,
8888 ARM_CORE_REGS_SIZE + ARM_VFP2_REGS_SIZE,
8889 tdesc);
8890 }
8891
8892 /* Otherwise we don't have a useful guess. */
8893 }
8894
8895 /* Implement the code_of_frame_writable gdbarch method. */
8896
8897 static int
8898 arm_code_of_frame_writable (struct gdbarch *gdbarch, struct frame_info *frame)
8899 {
8900 if (gdbarch_tdep (gdbarch)->is_m
8901 && get_frame_type (frame) == SIGTRAMP_FRAME)
8902 {
8903 /* M-profile exception frames return to some magic PCs, where
8904 isn't writable at all. */
8905 return 0;
8906 }
8907 else
8908 return 1;
8909 }
8910
8911 /* Implement gdbarch_gnu_triplet_regexp. If the arch name is arm then allow it
8912 to be postfixed by a version (eg armv7hl). */
8913
8914 static const char *
8915 arm_gnu_triplet_regexp (struct gdbarch *gdbarch)
8916 {
8917 if (strcmp (gdbarch_bfd_arch_info (gdbarch)->arch_name, "arm") == 0)
8918 return "arm(v[^- ]*)?";
8919 return gdbarch_bfd_arch_info (gdbarch)->arch_name;
8920 }
8921
8922 /* Initialize the current architecture based on INFO. If possible,
8923 re-use an architecture from ARCHES, which is a list of
8924 architectures already created during this debugging session.
8925
8926 Called e.g. at program startup, when reading a core file, and when
8927 reading a binary file. */
8928
8929 static struct gdbarch *
8930 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8931 {
8932 struct gdbarch_tdep *tdep;
8933 struct gdbarch *gdbarch;
8934 struct gdbarch_list *best_arch;
8935 enum arm_abi_kind arm_abi = arm_abi_global;
8936 enum arm_float_model fp_model = arm_fp_model;
8937 tdesc_arch_data_up tdesc_data;
8938 int i;
8939 bool is_m = false;
8940 int vfp_register_count = 0;
8941 bool have_vfp_pseudos = false, have_neon_pseudos = false;
8942 bool have_wmmx_registers = false;
8943 bool have_neon = false;
8944 bool have_fpa_registers = true;
8945 const struct target_desc *tdesc = info.target_desc;
8946
8947 /* If we have an object to base this architecture on, try to determine
8948 its ABI. */
8949
8950 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8951 {
8952 int ei_osabi, e_flags;
8953
8954 switch (bfd_get_flavour (info.abfd))
8955 {
8956 case bfd_target_coff_flavour:
8957 /* Assume it's an old APCS-style ABI. */
8958 /* XXX WinCE? */
8959 arm_abi = ARM_ABI_APCS;
8960 break;
8961
8962 case bfd_target_elf_flavour:
8963 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8964 e_flags = elf_elfheader (info.abfd)->e_flags;
8965
8966 if (ei_osabi == ELFOSABI_ARM)
8967 {
8968 /* GNU tools used to use this value, but do not for EABI
8969 objects. There's nowhere to tag an EABI version
8970 anyway, so assume APCS. */
8971 arm_abi = ARM_ABI_APCS;
8972 }
8973 else if (ei_osabi == ELFOSABI_NONE || ei_osabi == ELFOSABI_GNU)
8974 {
8975 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8976
8977 switch (eabi_ver)
8978 {
8979 case EF_ARM_EABI_UNKNOWN:
8980 /* Assume GNU tools. */
8981 arm_abi = ARM_ABI_APCS;
8982 break;
8983
8984 case EF_ARM_EABI_VER4:
8985 case EF_ARM_EABI_VER5:
8986 arm_abi = ARM_ABI_AAPCS;
8987 /* EABI binaries default to VFP float ordering.
8988 They may also contain build attributes that can
8989 be used to identify if the VFP argument-passing
8990 ABI is in use. */
8991 if (fp_model == ARM_FLOAT_AUTO)
8992 {
8993 #ifdef HAVE_ELF
8994 switch (bfd_elf_get_obj_attr_int (info.abfd,
8995 OBJ_ATTR_PROC,
8996 Tag_ABI_VFP_args))
8997 {
8998 case AEABI_VFP_args_base:
8999 /* "The user intended FP parameter/result
9000 passing to conform to AAPCS, base
9001 variant". */
9002 fp_model = ARM_FLOAT_SOFT_VFP;
9003 break;
9004 case AEABI_VFP_args_vfp:
9005 /* "The user intended FP parameter/result
9006 passing to conform to AAPCS, VFP
9007 variant". */
9008 fp_model = ARM_FLOAT_VFP;
9009 break;
9010 case AEABI_VFP_args_toolchain:
9011 /* "The user intended FP parameter/result
9012 passing to conform to tool chain-specific
9013 conventions" - we don't know any such
9014 conventions, so leave it as "auto". */
9015 break;
9016 case AEABI_VFP_args_compatible:
9017 /* "Code is compatible with both the base
9018 and VFP variants; the user did not permit
9019 non-variadic functions to pass FP
9020 parameters/results" - leave it as
9021 "auto". */
9022 break;
9023 default:
9024 /* Attribute value not mentioned in the
9025 November 2012 ABI, so leave it as
9026 "auto". */
9027 break;
9028 }
9029 #else
9030 fp_model = ARM_FLOAT_SOFT_VFP;
9031 #endif
9032 }
9033 break;
9034
9035 default:
9036 /* Leave it as "auto". */
9037 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9038 break;
9039 }
9040
9041 #ifdef HAVE_ELF
9042 /* Detect M-profile programs. This only works if the
9043 executable file includes build attributes; GCC does
9044 copy them to the executable, but e.g. RealView does
9045 not. */
9046 int attr_arch
9047 = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9048 Tag_CPU_arch);
9049 int attr_profile
9050 = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9051 Tag_CPU_arch_profile);
9052
9053 /* GCC specifies the profile for v6-M; RealView only
9054 specifies the profile for architectures starting with
9055 V7 (as opposed to architectures with a tag
9056 numerically greater than TAG_CPU_ARCH_V7). */
9057 if (!tdesc_has_registers (tdesc)
9058 && (attr_arch == TAG_CPU_ARCH_V6_M
9059 || attr_arch == TAG_CPU_ARCH_V6S_M
9060 || attr_profile == 'M'))
9061 is_m = true;
9062 #endif
9063 }
9064
9065 if (fp_model == ARM_FLOAT_AUTO)
9066 {
9067 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9068 {
9069 case 0:
9070 /* Leave it as "auto". Strictly speaking this case
9071 means FPA, but almost nobody uses that now, and
9072 many toolchains fail to set the appropriate bits
9073 for the floating-point model they use. */
9074 break;
9075 case EF_ARM_SOFT_FLOAT:
9076 fp_model = ARM_FLOAT_SOFT_FPA;
9077 break;
9078 case EF_ARM_VFP_FLOAT:
9079 fp_model = ARM_FLOAT_VFP;
9080 break;
9081 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9082 fp_model = ARM_FLOAT_SOFT_VFP;
9083 break;
9084 }
9085 }
9086
9087 if (e_flags & EF_ARM_BE8)
9088 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9089
9090 break;
9091
9092 default:
9093 /* Leave it as "auto". */
9094 break;
9095 }
9096 }
9097
9098 /* Check any target description for validity. */
9099 if (tdesc_has_registers (tdesc))
9100 {
9101 /* For most registers we require GDB's default names; but also allow
9102 the numeric names for sp / lr / pc, as a convenience. */
9103 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9104 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9105 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9106
9107 const struct tdesc_feature *feature;
9108 int valid_p;
9109
9110 feature = tdesc_find_feature (tdesc,
9111 "org.gnu.gdb.arm.core");
9112 if (feature == NULL)
9113 {
9114 feature = tdesc_find_feature (tdesc,
9115 "org.gnu.gdb.arm.m-profile");
9116 if (feature == NULL)
9117 return NULL;
9118 else
9119 is_m = true;
9120 }
9121
9122 tdesc_data = tdesc_data_alloc ();
9123
9124 valid_p = 1;
9125 for (i = 0; i < ARM_SP_REGNUM; i++)
9126 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9127 arm_register_names[i]);
9128 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data.get (),
9129 ARM_SP_REGNUM,
9130 arm_sp_names);
9131 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data.get (),
9132 ARM_LR_REGNUM,
9133 arm_lr_names);
9134 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data.get (),
9135 ARM_PC_REGNUM,
9136 arm_pc_names);
9137 if (is_m)
9138 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9139 ARM_PS_REGNUM, "xpsr");
9140 else
9141 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9142 ARM_PS_REGNUM, "cpsr");
9143
9144 if (!valid_p)
9145 return NULL;
9146
9147 feature = tdesc_find_feature (tdesc,
9148 "org.gnu.gdb.arm.fpa");
9149 if (feature != NULL)
9150 {
9151 valid_p = 1;
9152 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9153 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9154 arm_register_names[i]);
9155 if (!valid_p)
9156 return NULL;
9157 }
9158 else
9159 have_fpa_registers = false;
9160
9161 feature = tdesc_find_feature (tdesc,
9162 "org.gnu.gdb.xscale.iwmmxt");
9163 if (feature != NULL)
9164 {
9165 static const char *const iwmmxt_names[] = {
9166 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9167 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9168 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9169 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9170 };
9171
9172 valid_p = 1;
9173 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9174 valid_p
9175 &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9176 iwmmxt_names[i - ARM_WR0_REGNUM]);
9177
9178 /* Check for the control registers, but do not fail if they
9179 are missing. */
9180 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9181 tdesc_numbered_register (feature, tdesc_data.get (), i,
9182 iwmmxt_names[i - ARM_WR0_REGNUM]);
9183
9184 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9185 valid_p
9186 &= tdesc_numbered_register (feature, tdesc_data.get (), i,
9187 iwmmxt_names[i - ARM_WR0_REGNUM]);
9188
9189 if (!valid_p)
9190 return NULL;
9191
9192 have_wmmx_registers = true;
9193 }
9194
9195 /* If we have a VFP unit, check whether the single precision registers
9196 are present. If not, then we will synthesize them as pseudo
9197 registers. */
9198 feature = tdesc_find_feature (tdesc,
9199 "org.gnu.gdb.arm.vfp");
9200 if (feature != NULL)
9201 {
9202 static const char *const vfp_double_names[] = {
9203 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9204 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9205 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9206 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9207 };
9208
9209 /* Require the double precision registers. There must be either
9210 16 or 32. */
9211 valid_p = 1;
9212 for (i = 0; i < 32; i++)
9213 {
9214 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9215 ARM_D0_REGNUM + i,
9216 vfp_double_names[i]);
9217 if (!valid_p)
9218 break;
9219 }
9220 if (!valid_p && i == 16)
9221 valid_p = 1;
9222
9223 /* Also require FPSCR. */
9224 valid_p &= tdesc_numbered_register (feature, tdesc_data.get (),
9225 ARM_FPSCR_REGNUM, "fpscr");
9226 if (!valid_p)
9227 return NULL;
9228
9229 if (tdesc_unnumbered_register (feature, "s0") == 0)
9230 have_vfp_pseudos = true;
9231
9232 vfp_register_count = i;
9233
9234 /* If we have VFP, also check for NEON. The architecture allows
9235 NEON without VFP (integer vector operations only), but GDB
9236 does not support that. */
9237 feature = tdesc_find_feature (tdesc,
9238 "org.gnu.gdb.arm.neon");
9239 if (feature != NULL)
9240 {
9241 /* NEON requires 32 double-precision registers. */
9242 if (i != 32)
9243 return NULL;
9244
9245 /* If there are quad registers defined by the stub, use
9246 their type; otherwise (normally) provide them with
9247 the default type. */
9248 if (tdesc_unnumbered_register (feature, "q0") == 0)
9249 have_neon_pseudos = true;
9250
9251 have_neon = true;
9252 }
9253 }
9254 }
9255
9256 /* If there is already a candidate, use it. */
9257 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
9258 best_arch != NULL;
9259 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
9260 {
9261 if (arm_abi != ARM_ABI_AUTO
9262 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
9263 continue;
9264
9265 if (fp_model != ARM_FLOAT_AUTO
9266 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
9267 continue;
9268
9269 /* There are various other properties in tdep that we do not
9270 need to check here: those derived from a target description,
9271 since gdbarches with a different target description are
9272 automatically disqualified. */
9273
9274 /* Do check is_m, though, since it might come from the binary. */
9275 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
9276 continue;
9277
9278 /* Found a match. */
9279 break;
9280 }
9281
9282 if (best_arch != NULL)
9283 return best_arch->gdbarch;
9284
9285 tdep = XCNEW (struct gdbarch_tdep);
9286 gdbarch = gdbarch_alloc (&info, tdep);
9287
9288 /* Record additional information about the architecture we are defining.
9289 These are gdbarch discriminators, like the OSABI. */
9290 tdep->arm_abi = arm_abi;
9291 tdep->fp_model = fp_model;
9292 tdep->is_m = is_m;
9293 tdep->have_fpa_registers = have_fpa_registers;
9294 tdep->have_wmmx_registers = have_wmmx_registers;
9295 gdb_assert (vfp_register_count == 0
9296 || vfp_register_count == 16
9297 || vfp_register_count == 32);
9298 tdep->vfp_register_count = vfp_register_count;
9299 tdep->have_vfp_pseudos = have_vfp_pseudos;
9300 tdep->have_neon_pseudos = have_neon_pseudos;
9301 tdep->have_neon = have_neon;
9302
9303 arm_register_g_packet_guesses (gdbarch);
9304
9305 /* Breakpoints. */
9306 switch (info.byte_order_for_code)
9307 {
9308 case BFD_ENDIAN_BIG:
9309 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
9310 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
9311 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
9312 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
9313
9314 break;
9315
9316 case BFD_ENDIAN_LITTLE:
9317 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
9318 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
9319 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
9320 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
9321
9322 break;
9323
9324 default:
9325 internal_error (__FILE__, __LINE__,
9326 _("arm_gdbarch_init: bad byte order for float format"));
9327 }
9328
9329 /* On ARM targets char defaults to unsigned. */
9330 set_gdbarch_char_signed (gdbarch, 0);
9331
9332 /* wchar_t is unsigned under the AAPCS. */
9333 if (tdep->arm_abi == ARM_ABI_AAPCS)
9334 set_gdbarch_wchar_signed (gdbarch, 0);
9335 else
9336 set_gdbarch_wchar_signed (gdbarch, 1);
9337
9338 /* Compute type alignment. */
9339 set_gdbarch_type_align (gdbarch, arm_type_align);
9340
9341 /* Note: for displaced stepping, this includes the breakpoint, and one word
9342 of additional scratch space. This setting isn't used for anything beside
9343 displaced stepping at present. */
9344 set_gdbarch_max_insn_length (gdbarch, 4 * ARM_DISPLACED_MODIFIED_INSNS);
9345
9346 /* This should be low enough for everything. */
9347 tdep->lowest_pc = 0x20;
9348 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
9349
9350 /* The default, for both APCS and AAPCS, is to return small
9351 structures in registers. */
9352 tdep->struct_return = reg_struct_return;
9353
9354 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
9355 set_gdbarch_frame_align (gdbarch, arm_frame_align);
9356
9357 if (is_m)
9358 set_gdbarch_code_of_frame_writable (gdbarch, arm_code_of_frame_writable);
9359
9360 set_gdbarch_write_pc (gdbarch, arm_write_pc);
9361
9362 frame_base_set_default (gdbarch, &arm_normal_base);
9363
9364 /* Address manipulation. */
9365 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
9366
9367 /* Advance PC across function entry code. */
9368 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
9369
9370 /* Detect whether PC is at a point where the stack has been destroyed. */
9371 set_gdbarch_stack_frame_destroyed_p (gdbarch, arm_stack_frame_destroyed_p);
9372
9373 /* Skip trampolines. */
9374 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
9375
9376 /* The stack grows downward. */
9377 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
9378
9379 /* Breakpoint manipulation. */
9380 set_gdbarch_breakpoint_kind_from_pc (gdbarch, arm_breakpoint_kind_from_pc);
9381 set_gdbarch_sw_breakpoint_from_kind (gdbarch, arm_sw_breakpoint_from_kind);
9382 set_gdbarch_breakpoint_kind_from_current_state (gdbarch,
9383 arm_breakpoint_kind_from_current_state);
9384
9385 /* Information about registers, etc. */
9386 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
9387 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
9388 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
9389 set_gdbarch_register_type (gdbarch, arm_register_type);
9390 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
9391
9392 /* This "info float" is FPA-specific. Use the generic version if we
9393 do not have FPA. */
9394 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
9395 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
9396
9397 /* Internal <-> external register number maps. */
9398 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
9399 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
9400
9401 set_gdbarch_register_name (gdbarch, arm_register_name);
9402
9403 /* Returning results. */
9404 set_gdbarch_return_value (gdbarch, arm_return_value);
9405
9406 /* Disassembly. */
9407 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
9408
9409 /* Minsymbol frobbing. */
9410 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
9411 set_gdbarch_coff_make_msymbol_special (gdbarch,
9412 arm_coff_make_msymbol_special);
9413 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
9414
9415 /* Thumb-2 IT block support. */
9416 set_gdbarch_adjust_breakpoint_address (gdbarch,
9417 arm_adjust_breakpoint_address);
9418
9419 /* Virtual tables. */
9420 set_gdbarch_vbit_in_delta (gdbarch, 1);
9421
9422 /* Hook in the ABI-specific overrides, if they have been registered. */
9423 gdbarch_init_osabi (info, gdbarch);
9424
9425 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
9426
9427 /* Add some default predicates. */
9428 if (is_m)
9429 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
9430 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
9431 dwarf2_append_unwinders (gdbarch);
9432 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
9433 frame_unwind_append_unwinder (gdbarch, &arm_epilogue_frame_unwind);
9434 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
9435
9436 /* Now we have tuned the configuration, set a few final things,
9437 based on what the OS ABI has told us. */
9438
9439 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
9440 binaries are always marked. */
9441 if (tdep->arm_abi == ARM_ABI_AUTO)
9442 tdep->arm_abi = ARM_ABI_APCS;
9443
9444 /* Watchpoints are not steppable. */
9445 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9446
9447 /* We used to default to FPA for generic ARM, but almost nobody
9448 uses that now, and we now provide a way for the user to force
9449 the model. So default to the most useful variant. */
9450 if (tdep->fp_model == ARM_FLOAT_AUTO)
9451 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
9452
9453 if (tdep->jb_pc >= 0)
9454 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
9455
9456 /* Floating point sizes and format. */
9457 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
9458 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
9459 {
9460 set_gdbarch_double_format
9461 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9462 set_gdbarch_long_double_format
9463 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9464 }
9465 else
9466 {
9467 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
9468 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
9469 }
9470
9471 if (have_vfp_pseudos)
9472 {
9473 /* NOTE: These are the only pseudo registers used by
9474 the ARM target at the moment. If more are added, a
9475 little more care in numbering will be needed. */
9476
9477 int num_pseudos = 32;
9478 if (have_neon_pseudos)
9479 num_pseudos += 16;
9480 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
9481 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
9482 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
9483 }
9484
9485 if (tdesc_data != nullptr)
9486 {
9487 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
9488
9489 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
9490
9491 /* Override tdesc_register_type to adjust the types of VFP
9492 registers for NEON. */
9493 set_gdbarch_register_type (gdbarch, arm_register_type);
9494 }
9495
9496 /* Add standard register aliases. We add aliases even for those
9497 names which are used by the current architecture - it's simpler,
9498 and does no harm, since nothing ever lists user registers. */
9499 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
9500 user_reg_add (gdbarch, arm_register_aliases[i].name,
9501 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
9502
9503 set_gdbarch_disassembler_options (gdbarch, &arm_disassembler_options);
9504 set_gdbarch_valid_disassembler_options (gdbarch, disassembler_options_arm ());
9505
9506 set_gdbarch_gnu_triplet_regexp (gdbarch, arm_gnu_triplet_regexp);
9507
9508 return gdbarch;
9509 }
9510
9511 static void
9512 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
9513 {
9514 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9515
9516 if (tdep == NULL)
9517 return;
9518
9519 fprintf_unfiltered (file, _("arm_dump_tdep: fp_model = %i\n"),
9520 (int) tdep->fp_model);
9521 fprintf_unfiltered (file, _("arm_dump_tdep: have_fpa_registers = %i\n"),
9522 (int) tdep->have_fpa_registers);
9523 fprintf_unfiltered (file, _("arm_dump_tdep: have_wmmx_registers = %i\n"),
9524 (int) tdep->have_wmmx_registers);
9525 fprintf_unfiltered (file, _("arm_dump_tdep: vfp_register_count = %i\n"),
9526 (int) tdep->vfp_register_count);
9527 fprintf_unfiltered (file, _("arm_dump_tdep: have_vfp_pseudos = %i\n"),
9528 (int) tdep->have_vfp_pseudos);
9529 fprintf_unfiltered (file, _("arm_dump_tdep: have_neon_pseudos = %i\n"),
9530 (int) tdep->have_neon_pseudos);
9531 fprintf_unfiltered (file, _("arm_dump_tdep: have_neon = %i\n"),
9532 (int) tdep->have_neon);
9533 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx\n"),
9534 (unsigned long) tdep->lowest_pc);
9535 }
9536
9537 #if GDB_SELF_TEST
9538 namespace selftests
9539 {
9540 static void arm_record_test (void);
9541 static void arm_analyze_prologue_test ();
9542 }
9543 #endif
9544
9545 void _initialize_arm_tdep ();
9546 void
9547 _initialize_arm_tdep ()
9548 {
9549 long length;
9550 int i, j;
9551 char regdesc[1024], *rdptr = regdesc;
9552 size_t rest = sizeof (regdesc);
9553
9554 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
9555
9556 /* Add ourselves to objfile event chain. */
9557 gdb::observers::new_objfile.attach (arm_exidx_new_objfile, "arm-tdep");
9558
9559 /* Register an ELF OS ABI sniffer for ARM binaries. */
9560 gdbarch_register_osabi_sniffer (bfd_arch_arm,
9561 bfd_target_elf_flavour,
9562 arm_elf_osabi_sniffer);
9563
9564 /* Add root prefix command for all "set arm"/"show arm" commands. */
9565 add_basic_prefix_cmd ("arm", no_class,
9566 _("Various ARM-specific commands."),
9567 &setarmcmdlist, 0, &setlist);
9568
9569 add_show_prefix_cmd ("arm", no_class,
9570 _("Various ARM-specific commands."),
9571 &showarmcmdlist, 0, &showlist);
9572
9573
9574 arm_disassembler_options = xstrdup ("reg-names-std");
9575 const disasm_options_t *disasm_options
9576 = &disassembler_options_arm ()->options;
9577 int num_disassembly_styles = 0;
9578 for (i = 0; disasm_options->name[i] != NULL; i++)
9579 if (startswith (disasm_options->name[i], "reg-names-"))
9580 num_disassembly_styles++;
9581
9582 /* Initialize the array that will be passed to add_setshow_enum_cmd(). */
9583 valid_disassembly_styles = XNEWVEC (const char *,
9584 num_disassembly_styles + 1);
9585 for (i = j = 0; disasm_options->name[i] != NULL; i++)
9586 if (startswith (disasm_options->name[i], "reg-names-"))
9587 {
9588 size_t offset = strlen ("reg-names-");
9589 const char *style = disasm_options->name[i];
9590 valid_disassembly_styles[j++] = &style[offset];
9591 length = snprintf (rdptr, rest, "%s - %s\n", &style[offset],
9592 disasm_options->description[i]);
9593 rdptr += length;
9594 rest -= length;
9595 }
9596 /* Mark the end of valid options. */
9597 valid_disassembly_styles[num_disassembly_styles] = NULL;
9598
9599 /* Create the help text. */
9600 std::string helptext = string_printf ("%s%s%s",
9601 _("The valid values are:\n"),
9602 regdesc,
9603 _("The default is \"std\"."));
9604
9605 add_setshow_enum_cmd("disassembler", no_class,
9606 valid_disassembly_styles, &disassembly_style,
9607 _("Set the disassembly style."),
9608 _("Show the disassembly style."),
9609 helptext.c_str (),
9610 set_disassembly_style_sfunc,
9611 show_disassembly_style_sfunc,
9612 &setarmcmdlist, &showarmcmdlist);
9613
9614 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
9615 _("Set usage of ARM 32-bit mode."),
9616 _("Show usage of ARM 32-bit mode."),
9617 _("When off, a 26-bit PC will be used."),
9618 NULL,
9619 NULL, /* FIXME: i18n: Usage of ARM 32-bit
9620 mode is %s. */
9621 &setarmcmdlist, &showarmcmdlist);
9622
9623 /* Add a command to allow the user to force the FPU model. */
9624 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
9625 _("Set the floating point type."),
9626 _("Show the floating point type."),
9627 _("auto - Determine the FP typefrom the OS-ABI.\n\
9628 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
9629 fpa - FPA co-processor (GCC compiled).\n\
9630 softvfp - Software FP with pure-endian doubles.\n\
9631 vfp - VFP co-processor."),
9632 set_fp_model_sfunc, show_fp_model,
9633 &setarmcmdlist, &showarmcmdlist);
9634
9635 /* Add a command to allow the user to force the ABI. */
9636 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
9637 _("Set the ABI."),
9638 _("Show the ABI."),
9639 NULL, arm_set_abi, arm_show_abi,
9640 &setarmcmdlist, &showarmcmdlist);
9641
9642 /* Add two commands to allow the user to force the assumed
9643 execution mode. */
9644 add_setshow_enum_cmd ("fallback-mode", class_support,
9645 arm_mode_strings, &arm_fallback_mode_string,
9646 _("Set the mode assumed when symbols are unavailable."),
9647 _("Show the mode assumed when symbols are unavailable."),
9648 NULL, NULL, arm_show_fallback_mode,
9649 &setarmcmdlist, &showarmcmdlist);
9650 add_setshow_enum_cmd ("force-mode", class_support,
9651 arm_mode_strings, &arm_force_mode_string,
9652 _("Set the mode assumed even when symbols are available."),
9653 _("Show the mode assumed even when symbols are available."),
9654 NULL, NULL, arm_show_force_mode,
9655 &setarmcmdlist, &showarmcmdlist);
9656
9657 /* Debugging flag. */
9658 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
9659 _("Set ARM debugging."),
9660 _("Show ARM debugging."),
9661 _("When on, arm-specific debugging is enabled."),
9662 NULL,
9663 NULL, /* FIXME: i18n: "ARM debugging is %s. */
9664 &setdebuglist, &showdebuglist);
9665
9666 #if GDB_SELF_TEST
9667 selftests::register_test ("arm-record", selftests::arm_record_test);
9668 selftests::register_test ("arm_analyze_prologue", selftests::arm_analyze_prologue_test);
9669 #endif
9670
9671 }
9672
9673 /* ARM-reversible process record data structures. */
9674
9675 #define ARM_INSN_SIZE_BYTES 4
9676 #define THUMB_INSN_SIZE_BYTES 2
9677 #define THUMB2_INSN_SIZE_BYTES 4
9678
9679
9680 /* Position of the bit within a 32-bit ARM instruction
9681 that defines whether the instruction is a load or store. */
9682 #define INSN_S_L_BIT_NUM 20
9683
9684 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
9685 do \
9686 { \
9687 unsigned int reg_len = LENGTH; \
9688 if (reg_len) \
9689 { \
9690 REGS = XNEWVEC (uint32_t, reg_len); \
9691 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
9692 } \
9693 } \
9694 while (0)
9695
9696 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
9697 do \
9698 { \
9699 unsigned int mem_len = LENGTH; \
9700 if (mem_len) \
9701 { \
9702 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
9703 memcpy(&MEMS->len, &RECORD_BUF[0], \
9704 sizeof(struct arm_mem_r) * LENGTH); \
9705 } \
9706 } \
9707 while (0)
9708
9709 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
9710 #define INSN_RECORDED(ARM_RECORD) \
9711 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
9712
9713 /* ARM memory record structure. */
9714 struct arm_mem_r
9715 {
9716 uint32_t len; /* Record length. */
9717 uint32_t addr; /* Memory address. */
9718 };
9719
9720 /* ARM instruction record contains opcode of current insn
9721 and execution state (before entry to decode_insn()),
9722 contains list of to-be-modified registers and
9723 memory blocks (on return from decode_insn()). */
9724
9725 typedef struct insn_decode_record_t
9726 {
9727 struct gdbarch *gdbarch;
9728 struct regcache *regcache;
9729 CORE_ADDR this_addr; /* Address of the insn being decoded. */
9730 uint32_t arm_insn; /* Should accommodate thumb. */
9731 uint32_t cond; /* Condition code. */
9732 uint32_t opcode; /* Insn opcode. */
9733 uint32_t decode; /* Insn decode bits. */
9734 uint32_t mem_rec_count; /* No of mem records. */
9735 uint32_t reg_rec_count; /* No of reg records. */
9736 uint32_t *arm_regs; /* Registers to be saved for this record. */
9737 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
9738 } insn_decode_record;
9739
9740
9741 /* Checks ARM SBZ and SBO mandatory fields. */
9742
9743 static int
9744 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
9745 {
9746 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
9747
9748 if (!len)
9749 return 1;
9750
9751 if (!sbo)
9752 ones = ~ones;
9753
9754 while (ones)
9755 {
9756 if (!(ones & sbo))
9757 {
9758 return 0;
9759 }
9760 ones = ones >> 1;
9761 }
9762 return 1;
9763 }
9764
9765 enum arm_record_result
9766 {
9767 ARM_RECORD_SUCCESS = 0,
9768 ARM_RECORD_FAILURE = 1
9769 };
9770
9771 typedef enum
9772 {
9773 ARM_RECORD_STRH=1,
9774 ARM_RECORD_STRD
9775 } arm_record_strx_t;
9776
9777 typedef enum
9778 {
9779 ARM_RECORD=1,
9780 THUMB_RECORD,
9781 THUMB2_RECORD
9782 } record_type_t;
9783
9784
9785 static int
9786 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
9787 uint32_t *record_buf_mem, arm_record_strx_t str_type)
9788 {
9789
9790 struct regcache *reg_cache = arm_insn_r->regcache;
9791 ULONGEST u_regval[2]= {0};
9792
9793 uint32_t reg_src1 = 0, reg_src2 = 0;
9794 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
9795
9796 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
9797 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
9798
9799 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
9800 {
9801 /* 1) Handle misc store, immediate offset. */
9802 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
9803 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
9804 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
9805 regcache_raw_read_unsigned (reg_cache, reg_src1,
9806 &u_regval[0]);
9807 if (ARM_PC_REGNUM == reg_src1)
9808 {
9809 /* If R15 was used as Rn, hence current PC+8. */
9810 u_regval[0] = u_regval[0] + 8;
9811 }
9812 offset_8 = (immed_high << 4) | immed_low;
9813 /* Calculate target store address. */
9814 if (14 == arm_insn_r->opcode)
9815 {
9816 tgt_mem_addr = u_regval[0] + offset_8;
9817 }
9818 else
9819 {
9820 tgt_mem_addr = u_regval[0] - offset_8;
9821 }
9822 if (ARM_RECORD_STRH == str_type)
9823 {
9824 record_buf_mem[0] = 2;
9825 record_buf_mem[1] = tgt_mem_addr;
9826 arm_insn_r->mem_rec_count = 1;
9827 }
9828 else if (ARM_RECORD_STRD == str_type)
9829 {
9830 record_buf_mem[0] = 4;
9831 record_buf_mem[1] = tgt_mem_addr;
9832 record_buf_mem[2] = 4;
9833 record_buf_mem[3] = tgt_mem_addr + 4;
9834 arm_insn_r->mem_rec_count = 2;
9835 }
9836 }
9837 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
9838 {
9839 /* 2) Store, register offset. */
9840 /* Get Rm. */
9841 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
9842 /* Get Rn. */
9843 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
9844 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9845 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
9846 if (15 == reg_src2)
9847 {
9848 /* If R15 was used as Rn, hence current PC+8. */
9849 u_regval[0] = u_regval[0] + 8;
9850 }
9851 /* Calculate target store address, Rn +/- Rm, register offset. */
9852 if (12 == arm_insn_r->opcode)
9853 {
9854 tgt_mem_addr = u_regval[0] + u_regval[1];
9855 }
9856 else
9857 {
9858 tgt_mem_addr = u_regval[1] - u_regval[0];
9859 }
9860 if (ARM_RECORD_STRH == str_type)
9861 {
9862 record_buf_mem[0] = 2;
9863 record_buf_mem[1] = tgt_mem_addr;
9864 arm_insn_r->mem_rec_count = 1;
9865 }
9866 else if (ARM_RECORD_STRD == str_type)
9867 {
9868 record_buf_mem[0] = 4;
9869 record_buf_mem[1] = tgt_mem_addr;
9870 record_buf_mem[2] = 4;
9871 record_buf_mem[3] = tgt_mem_addr + 4;
9872 arm_insn_r->mem_rec_count = 2;
9873 }
9874 }
9875 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
9876 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
9877 {
9878 /* 3) Store, immediate pre-indexed. */
9879 /* 5) Store, immediate post-indexed. */
9880 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
9881 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
9882 offset_8 = (immed_high << 4) | immed_low;
9883 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
9884 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9885 /* Calculate target store address, Rn +/- Rm, register offset. */
9886 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
9887 {
9888 tgt_mem_addr = u_regval[0] + offset_8;
9889 }
9890 else
9891 {
9892 tgt_mem_addr = u_regval[0] - offset_8;
9893 }
9894 if (ARM_RECORD_STRH == str_type)
9895 {
9896 record_buf_mem[0] = 2;
9897 record_buf_mem[1] = tgt_mem_addr;
9898 arm_insn_r->mem_rec_count = 1;
9899 }
9900 else if (ARM_RECORD_STRD == str_type)
9901 {
9902 record_buf_mem[0] = 4;
9903 record_buf_mem[1] = tgt_mem_addr;
9904 record_buf_mem[2] = 4;
9905 record_buf_mem[3] = tgt_mem_addr + 4;
9906 arm_insn_r->mem_rec_count = 2;
9907 }
9908 /* Record Rn also as it changes. */
9909 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
9910 arm_insn_r->reg_rec_count = 1;
9911 }
9912 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
9913 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
9914 {
9915 /* 4) Store, register pre-indexed. */
9916 /* 6) Store, register post -indexed. */
9917 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
9918 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
9919 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9920 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
9921 /* Calculate target store address, Rn +/- Rm, register offset. */
9922 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
9923 {
9924 tgt_mem_addr = u_regval[0] + u_regval[1];
9925 }
9926 else
9927 {
9928 tgt_mem_addr = u_regval[1] - u_regval[0];
9929 }
9930 if (ARM_RECORD_STRH == str_type)
9931 {
9932 record_buf_mem[0] = 2;
9933 record_buf_mem[1] = tgt_mem_addr;
9934 arm_insn_r->mem_rec_count = 1;
9935 }
9936 else if (ARM_RECORD_STRD == str_type)
9937 {
9938 record_buf_mem[0] = 4;
9939 record_buf_mem[1] = tgt_mem_addr;
9940 record_buf_mem[2] = 4;
9941 record_buf_mem[3] = tgt_mem_addr + 4;
9942 arm_insn_r->mem_rec_count = 2;
9943 }
9944 /* Record Rn also as it changes. */
9945 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
9946 arm_insn_r->reg_rec_count = 1;
9947 }
9948 return 0;
9949 }
9950
9951 /* Handling ARM extension space insns. */
9952
9953 static int
9954 arm_record_extension_space (insn_decode_record *arm_insn_r)
9955 {
9956 int ret = 0; /* Return value: -1:record failure ; 0:success */
9957 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
9958 uint32_t record_buf[8], record_buf_mem[8];
9959 uint32_t reg_src1 = 0;
9960 struct regcache *reg_cache = arm_insn_r->regcache;
9961 ULONGEST u_regval = 0;
9962
9963 gdb_assert (!INSN_RECORDED(arm_insn_r));
9964 /* Handle unconditional insn extension space. */
9965
9966 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
9967 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
9968 if (arm_insn_r->cond)
9969 {
9970 /* PLD has no affect on architectural state, it just affects
9971 the caches. */
9972 if (5 == ((opcode1 & 0xE0) >> 5))
9973 {
9974 /* BLX(1) */
9975 record_buf[0] = ARM_PS_REGNUM;
9976 record_buf[1] = ARM_LR_REGNUM;
9977 arm_insn_r->reg_rec_count = 2;
9978 }
9979 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
9980 }
9981
9982
9983 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
9984 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
9985 {
9986 ret = -1;
9987 /* Undefined instruction on ARM V5; need to handle if later
9988 versions define it. */
9989 }
9990
9991 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
9992 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
9993 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
9994
9995 /* Handle arithmetic insn extension space. */
9996 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
9997 && !INSN_RECORDED(arm_insn_r))
9998 {
9999 /* Handle MLA(S) and MUL(S). */
10000 if (in_inclusive_range (insn_op1, 0U, 3U))
10001 {
10002 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10003 record_buf[1] = ARM_PS_REGNUM;
10004 arm_insn_r->reg_rec_count = 2;
10005 }
10006 else if (in_inclusive_range (insn_op1, 4U, 15U))
10007 {
10008 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
10009 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10010 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10011 record_buf[2] = ARM_PS_REGNUM;
10012 arm_insn_r->reg_rec_count = 3;
10013 }
10014 }
10015
10016 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
10017 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
10018 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
10019
10020 /* Handle control insn extension space. */
10021
10022 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
10023 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
10024 {
10025 if (!bit (arm_insn_r->arm_insn,25))
10026 {
10027 if (!bits (arm_insn_r->arm_insn, 4, 7))
10028 {
10029 if ((0 == insn_op1) || (2 == insn_op1))
10030 {
10031 /* MRS. */
10032 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10033 arm_insn_r->reg_rec_count = 1;
10034 }
10035 else if (1 == insn_op1)
10036 {
10037 /* CSPR is going to be changed. */
10038 record_buf[0] = ARM_PS_REGNUM;
10039 arm_insn_r->reg_rec_count = 1;
10040 }
10041 else if (3 == insn_op1)
10042 {
10043 /* SPSR is going to be changed. */
10044 /* We need to get SPSR value, which is yet to be done. */
10045 return -1;
10046 }
10047 }
10048 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
10049 {
10050 if (1 == insn_op1)
10051 {
10052 /* BX. */
10053 record_buf[0] = ARM_PS_REGNUM;
10054 arm_insn_r->reg_rec_count = 1;
10055 }
10056 else if (3 == insn_op1)
10057 {
10058 /* CLZ. */
10059 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10060 arm_insn_r->reg_rec_count = 1;
10061 }
10062 }
10063 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
10064 {
10065 /* BLX. */
10066 record_buf[0] = ARM_PS_REGNUM;
10067 record_buf[1] = ARM_LR_REGNUM;
10068 arm_insn_r->reg_rec_count = 2;
10069 }
10070 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
10071 {
10072 /* QADD, QSUB, QDADD, QDSUB */
10073 record_buf[0] = ARM_PS_REGNUM;
10074 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10075 arm_insn_r->reg_rec_count = 2;
10076 }
10077 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
10078 {
10079 /* BKPT. */
10080 record_buf[0] = ARM_PS_REGNUM;
10081 record_buf[1] = ARM_LR_REGNUM;
10082 arm_insn_r->reg_rec_count = 2;
10083
10084 /* Save SPSR also;how? */
10085 return -1;
10086 }
10087 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
10088 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
10089 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
10090 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
10091 )
10092 {
10093 if (0 == insn_op1 || 1 == insn_op1)
10094 {
10095 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
10096 /* We dont do optimization for SMULW<y> where we
10097 need only Rd. */
10098 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10099 record_buf[1] = ARM_PS_REGNUM;
10100 arm_insn_r->reg_rec_count = 2;
10101 }
10102 else if (2 == insn_op1)
10103 {
10104 /* SMLAL<x><y>. */
10105 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10106 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
10107 arm_insn_r->reg_rec_count = 2;
10108 }
10109 else if (3 == insn_op1)
10110 {
10111 /* SMUL<x><y>. */
10112 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10113 arm_insn_r->reg_rec_count = 1;
10114 }
10115 }
10116 }
10117 else
10118 {
10119 /* MSR : immediate form. */
10120 if (1 == insn_op1)
10121 {
10122 /* CSPR is going to be changed. */
10123 record_buf[0] = ARM_PS_REGNUM;
10124 arm_insn_r->reg_rec_count = 1;
10125 }
10126 else if (3 == insn_op1)
10127 {
10128 /* SPSR is going to be changed. */
10129 /* we need to get SPSR value, which is yet to be done */
10130 return -1;
10131 }
10132 }
10133 }
10134
10135 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10136 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
10137 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
10138
10139 /* Handle load/store insn extension space. */
10140
10141 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
10142 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
10143 && !INSN_RECORDED(arm_insn_r))
10144 {
10145 /* SWP/SWPB. */
10146 if (0 == insn_op1)
10147 {
10148 /* These insn, changes register and memory as well. */
10149 /* SWP or SWPB insn. */
10150 /* Get memory address given by Rn. */
10151 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10152 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
10153 /* SWP insn ?, swaps word. */
10154 if (8 == arm_insn_r->opcode)
10155 {
10156 record_buf_mem[0] = 4;
10157 }
10158 else
10159 {
10160 /* SWPB insn, swaps only byte. */
10161 record_buf_mem[0] = 1;
10162 }
10163 record_buf_mem[1] = u_regval;
10164 arm_insn_r->mem_rec_count = 1;
10165 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10166 arm_insn_r->reg_rec_count = 1;
10167 }
10168 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10169 {
10170 /* STRH. */
10171 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10172 ARM_RECORD_STRH);
10173 }
10174 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10175 {
10176 /* LDRD. */
10177 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10178 record_buf[1] = record_buf[0] + 1;
10179 arm_insn_r->reg_rec_count = 2;
10180 }
10181 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10182 {
10183 /* STRD. */
10184 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10185 ARM_RECORD_STRD);
10186 }
10187 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
10188 {
10189 /* LDRH, LDRSB, LDRSH. */
10190 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10191 arm_insn_r->reg_rec_count = 1;
10192 }
10193
10194 }
10195
10196 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
10197 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
10198 && !INSN_RECORDED(arm_insn_r))
10199 {
10200 ret = -1;
10201 /* Handle coprocessor insn extension space. */
10202 }
10203
10204 /* To be done for ARMv5 and later; as of now we return -1. */
10205 if (-1 == ret)
10206 return ret;
10207
10208 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10209 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10210
10211 return ret;
10212 }
10213
10214 /* Handling opcode 000 insns. */
10215
10216 static int
10217 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
10218 {
10219 struct regcache *reg_cache = arm_insn_r->regcache;
10220 uint32_t record_buf[8], record_buf_mem[8];
10221 ULONGEST u_regval[2] = {0};
10222
10223 uint32_t reg_src1 = 0;
10224 uint32_t opcode1 = 0;
10225
10226 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10227 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10228 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10229
10230 if (!((opcode1 & 0x19) == 0x10))
10231 {
10232 /* Data-processing (register) and Data-processing (register-shifted
10233 register */
10234 /* Out of 11 shifter operands mode, all the insn modifies destination
10235 register, which is specified by 13-16 decode. */
10236 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10237 record_buf[1] = ARM_PS_REGNUM;
10238 arm_insn_r->reg_rec_count = 2;
10239 }
10240 else if ((arm_insn_r->decode < 8) && ((opcode1 & 0x19) == 0x10))
10241 {
10242 /* Miscellaneous instructions */
10243
10244 if (3 == arm_insn_r->decode && 0x12 == opcode1
10245 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
10246 {
10247 /* Handle BLX, branch and link/exchange. */
10248 if (9 == arm_insn_r->opcode)
10249 {
10250 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
10251 and R14 stores the return address. */
10252 record_buf[0] = ARM_PS_REGNUM;
10253 record_buf[1] = ARM_LR_REGNUM;
10254 arm_insn_r->reg_rec_count = 2;
10255 }
10256 }
10257 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
10258 {
10259 /* Handle enhanced software breakpoint insn, BKPT. */
10260 /* CPSR is changed to be executed in ARM state, disabling normal
10261 interrupts, entering abort mode. */
10262 /* According to high vector configuration PC is set. */
10263 /* user hit breakpoint and type reverse, in
10264 that case, we need to go back with previous CPSR and
10265 Program Counter. */
10266 record_buf[0] = ARM_PS_REGNUM;
10267 record_buf[1] = ARM_LR_REGNUM;
10268 arm_insn_r->reg_rec_count = 2;
10269
10270 /* Save SPSR also; how? */
10271 return -1;
10272 }
10273 else if (1 == arm_insn_r->decode && 0x12 == opcode1
10274 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
10275 {
10276 /* Handle BX, branch and link/exchange. */
10277 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
10278 record_buf[0] = ARM_PS_REGNUM;
10279 arm_insn_r->reg_rec_count = 1;
10280 }
10281 else if (1 == arm_insn_r->decode && 0x16 == opcode1
10282 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
10283 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
10284 {
10285 /* Count leading zeros: CLZ. */
10286 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10287 arm_insn_r->reg_rec_count = 1;
10288 }
10289 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
10290 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10291 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
10292 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0))
10293 {
10294 /* Handle MRS insn. */
10295 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10296 arm_insn_r->reg_rec_count = 1;
10297 }
10298 }
10299 else if (9 == arm_insn_r->decode && opcode1 < 0x10)
10300 {
10301 /* Multiply and multiply-accumulate */
10302
10303 /* Handle multiply instructions. */
10304 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
10305 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
10306 {
10307 /* Handle MLA and MUL. */
10308 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10309 record_buf[1] = ARM_PS_REGNUM;
10310 arm_insn_r->reg_rec_count = 2;
10311 }
10312 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
10313 {
10314 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
10315 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10316 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10317 record_buf[2] = ARM_PS_REGNUM;
10318 arm_insn_r->reg_rec_count = 3;
10319 }
10320 }
10321 else if (9 == arm_insn_r->decode && opcode1 > 0x10)
10322 {
10323 /* Synchronization primitives */
10324
10325 /* Handling SWP, SWPB. */
10326 /* These insn, changes register and memory as well. */
10327 /* SWP or SWPB insn. */
10328
10329 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10330 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10331 /* SWP insn ?, swaps word. */
10332 if (8 == arm_insn_r->opcode)
10333 {
10334 record_buf_mem[0] = 4;
10335 }
10336 else
10337 {
10338 /* SWPB insn, swaps only byte. */
10339 record_buf_mem[0] = 1;
10340 }
10341 record_buf_mem[1] = u_regval[0];
10342 arm_insn_r->mem_rec_count = 1;
10343 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10344 arm_insn_r->reg_rec_count = 1;
10345 }
10346 else if (11 == arm_insn_r->decode || 13 == arm_insn_r->decode
10347 || 15 == arm_insn_r->decode)
10348 {
10349 if ((opcode1 & 0x12) == 2)
10350 {
10351 /* Extra load/store (unprivileged) */
10352 return -1;
10353 }
10354 else
10355 {
10356 /* Extra load/store */
10357 switch (bits (arm_insn_r->arm_insn, 5, 6))
10358 {
10359 case 1:
10360 if ((opcode1 & 0x05) == 0x0 || (opcode1 & 0x05) == 0x4)
10361 {
10362 /* STRH (register), STRH (immediate) */
10363 arm_record_strx (arm_insn_r, &record_buf[0],
10364 &record_buf_mem[0], ARM_RECORD_STRH);
10365 }
10366 else if ((opcode1 & 0x05) == 0x1)
10367 {
10368 /* LDRH (register) */
10369 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10370 arm_insn_r->reg_rec_count = 1;
10371
10372 if (bit (arm_insn_r->arm_insn, 21))
10373 {
10374 /* Write back to Rn. */
10375 record_buf[arm_insn_r->reg_rec_count++]
10376 = bits (arm_insn_r->arm_insn, 16, 19);
10377 }
10378 }
10379 else if ((opcode1 & 0x05) == 0x5)
10380 {
10381 /* LDRH (immediate), LDRH (literal) */
10382 int rn = bits (arm_insn_r->arm_insn, 16, 19);
10383
10384 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10385 arm_insn_r->reg_rec_count = 1;
10386
10387 if (rn != 15)
10388 {
10389 /*LDRH (immediate) */
10390 if (bit (arm_insn_r->arm_insn, 21))
10391 {
10392 /* Write back to Rn. */
10393 record_buf[arm_insn_r->reg_rec_count++] = rn;
10394 }
10395 }
10396 }
10397 else
10398 return -1;
10399 break;
10400 case 2:
10401 if ((opcode1 & 0x05) == 0x0)
10402 {
10403 /* LDRD (register) */
10404 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10405 record_buf[1] = record_buf[0] + 1;
10406 arm_insn_r->reg_rec_count = 2;
10407
10408 if (bit (arm_insn_r->arm_insn, 21))
10409 {
10410 /* Write back to Rn. */
10411 record_buf[arm_insn_r->reg_rec_count++]
10412 = bits (arm_insn_r->arm_insn, 16, 19);
10413 }
10414 }
10415 else if ((opcode1 & 0x05) == 0x1)
10416 {
10417 /* LDRSB (register) */
10418 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10419 arm_insn_r->reg_rec_count = 1;
10420
10421 if (bit (arm_insn_r->arm_insn, 21))
10422 {
10423 /* Write back to Rn. */
10424 record_buf[arm_insn_r->reg_rec_count++]
10425 = bits (arm_insn_r->arm_insn, 16, 19);
10426 }
10427 }
10428 else if ((opcode1 & 0x05) == 0x4 || (opcode1 & 0x05) == 0x5)
10429 {
10430 /* LDRD (immediate), LDRD (literal), LDRSB (immediate),
10431 LDRSB (literal) */
10432 int rn = bits (arm_insn_r->arm_insn, 16, 19);
10433
10434 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10435 arm_insn_r->reg_rec_count = 1;
10436
10437 if (rn != 15)
10438 {
10439 /*LDRD (immediate), LDRSB (immediate) */
10440 if (bit (arm_insn_r->arm_insn, 21))
10441 {
10442 /* Write back to Rn. */
10443 record_buf[arm_insn_r->reg_rec_count++] = rn;
10444 }
10445 }
10446 }
10447 else
10448 return -1;
10449 break;
10450 case 3:
10451 if ((opcode1 & 0x05) == 0x0)
10452 {
10453 /* STRD (register) */
10454 arm_record_strx (arm_insn_r, &record_buf[0],
10455 &record_buf_mem[0], ARM_RECORD_STRD);
10456 }
10457 else if ((opcode1 & 0x05) == 0x1)
10458 {
10459 /* LDRSH (register) */
10460 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10461 arm_insn_r->reg_rec_count = 1;
10462
10463 if (bit (arm_insn_r->arm_insn, 21))
10464 {
10465 /* Write back to Rn. */
10466 record_buf[arm_insn_r->reg_rec_count++]
10467 = bits (arm_insn_r->arm_insn, 16, 19);
10468 }
10469 }
10470 else if ((opcode1 & 0x05) == 0x4)
10471 {
10472 /* STRD (immediate) */
10473 arm_record_strx (arm_insn_r, &record_buf[0],
10474 &record_buf_mem[0], ARM_RECORD_STRD);
10475 }
10476 else if ((opcode1 & 0x05) == 0x5)
10477 {
10478 /* LDRSH (immediate), LDRSH (literal) */
10479 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10480 arm_insn_r->reg_rec_count = 1;
10481
10482 if (bit (arm_insn_r->arm_insn, 21))
10483 {
10484 /* Write back to Rn. */
10485 record_buf[arm_insn_r->reg_rec_count++]
10486 = bits (arm_insn_r->arm_insn, 16, 19);
10487 }
10488 }
10489 else
10490 return -1;
10491 break;
10492 default:
10493 return -1;
10494 }
10495 }
10496 }
10497 else
10498 {
10499 return -1;
10500 }
10501
10502 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10503 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10504 return 0;
10505 }
10506
10507 /* Handling opcode 001 insns. */
10508
10509 static int
10510 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
10511 {
10512 uint32_t record_buf[8], record_buf_mem[8];
10513
10514 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10515 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10516
10517 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
10518 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
10519 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
10520 )
10521 {
10522 /* Handle MSR insn. */
10523 if (9 == arm_insn_r->opcode)
10524 {
10525 /* CSPR is going to be changed. */
10526 record_buf[0] = ARM_PS_REGNUM;
10527 arm_insn_r->reg_rec_count = 1;
10528 }
10529 else
10530 {
10531 /* SPSR is going to be changed. */
10532 }
10533 }
10534 else if (arm_insn_r->opcode <= 15)
10535 {
10536 /* Normal data processing insns. */
10537 /* Out of 11 shifter operands mode, all the insn modifies destination
10538 register, which is specified by 13-16 decode. */
10539 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10540 record_buf[1] = ARM_PS_REGNUM;
10541 arm_insn_r->reg_rec_count = 2;
10542 }
10543 else
10544 {
10545 return -1;
10546 }
10547
10548 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10549 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10550 return 0;
10551 }
10552
10553 static int
10554 arm_record_media (insn_decode_record *arm_insn_r)
10555 {
10556 uint32_t record_buf[8];
10557
10558 switch (bits (arm_insn_r->arm_insn, 22, 24))
10559 {
10560 case 0:
10561 /* Parallel addition and subtraction, signed */
10562 case 1:
10563 /* Parallel addition and subtraction, unsigned */
10564 case 2:
10565 case 3:
10566 /* Packing, unpacking, saturation and reversal */
10567 {
10568 int rd = bits (arm_insn_r->arm_insn, 12, 15);
10569
10570 record_buf[arm_insn_r->reg_rec_count++] = rd;
10571 }
10572 break;
10573
10574 case 4:
10575 case 5:
10576 /* Signed multiplies */
10577 {
10578 int rd = bits (arm_insn_r->arm_insn, 16, 19);
10579 unsigned int op1 = bits (arm_insn_r->arm_insn, 20, 22);
10580
10581 record_buf[arm_insn_r->reg_rec_count++] = rd;
10582 if (op1 == 0x0)
10583 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10584 else if (op1 == 0x4)
10585 record_buf[arm_insn_r->reg_rec_count++]
10586 = bits (arm_insn_r->arm_insn, 12, 15);
10587 }
10588 break;
10589
10590 case 6:
10591 {
10592 if (bit (arm_insn_r->arm_insn, 21)
10593 && bits (arm_insn_r->arm_insn, 5, 6) == 0x2)
10594 {
10595 /* SBFX */
10596 record_buf[arm_insn_r->reg_rec_count++]
10597 = bits (arm_insn_r->arm_insn, 12, 15);
10598 }
10599 else if (bits (arm_insn_r->arm_insn, 20, 21) == 0x0
10600 && bits (arm_insn_r->arm_insn, 5, 7) == 0x0)
10601 {
10602 /* USAD8 and USADA8 */
10603 record_buf[arm_insn_r->reg_rec_count++]
10604 = bits (arm_insn_r->arm_insn, 16, 19);
10605 }
10606 }
10607 break;
10608
10609 case 7:
10610 {
10611 if (bits (arm_insn_r->arm_insn, 20, 21) == 0x3
10612 && bits (arm_insn_r->arm_insn, 5, 7) == 0x7)
10613 {
10614 /* Permanently UNDEFINED */
10615 return -1;
10616 }
10617 else
10618 {
10619 /* BFC, BFI and UBFX */
10620 record_buf[arm_insn_r->reg_rec_count++]
10621 = bits (arm_insn_r->arm_insn, 12, 15);
10622 }
10623 }
10624 break;
10625
10626 default:
10627 return -1;
10628 }
10629
10630 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10631
10632 return 0;
10633 }
10634
10635 /* Handle ARM mode instructions with opcode 010. */
10636
10637 static int
10638 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
10639 {
10640 struct regcache *reg_cache = arm_insn_r->regcache;
10641
10642 uint32_t reg_base , reg_dest;
10643 uint32_t offset_12, tgt_mem_addr;
10644 uint32_t record_buf[8], record_buf_mem[8];
10645 unsigned char wback;
10646 ULONGEST u_regval;
10647
10648 /* Calculate wback. */
10649 wback = (bit (arm_insn_r->arm_insn, 24) == 0)
10650 || (bit (arm_insn_r->arm_insn, 21) == 1);
10651
10652 arm_insn_r->reg_rec_count = 0;
10653 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
10654
10655 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10656 {
10657 /* LDR (immediate), LDR (literal), LDRB (immediate), LDRB (literal), LDRBT
10658 and LDRT. */
10659
10660 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
10661 record_buf[arm_insn_r->reg_rec_count++] = reg_dest;
10662
10663 /* The LDR instruction is capable of doing branching. If MOV LR, PC
10664 preceeds a LDR instruction having R15 as reg_base, it
10665 emulates a branch and link instruction, and hence we need to save
10666 CPSR and PC as well. */
10667 if (ARM_PC_REGNUM == reg_dest)
10668 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10669
10670 /* If wback is true, also save the base register, which is going to be
10671 written to. */
10672 if (wback)
10673 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10674 }
10675 else
10676 {
10677 /* STR (immediate), STRB (immediate), STRBT and STRT. */
10678
10679 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
10680 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
10681
10682 /* Handle bit U. */
10683 if (bit (arm_insn_r->arm_insn, 23))
10684 {
10685 /* U == 1: Add the offset. */
10686 tgt_mem_addr = (uint32_t) u_regval + offset_12;
10687 }
10688 else
10689 {
10690 /* U == 0: subtract the offset. */
10691 tgt_mem_addr = (uint32_t) u_regval - offset_12;
10692 }
10693
10694 /* Bit 22 tells us whether the store instruction writes 1 byte or 4
10695 bytes. */
10696 if (bit (arm_insn_r->arm_insn, 22))
10697 {
10698 /* STRB and STRBT: 1 byte. */
10699 record_buf_mem[0] = 1;
10700 }
10701 else
10702 {
10703 /* STR and STRT: 4 bytes. */
10704 record_buf_mem[0] = 4;
10705 }
10706
10707 /* Handle bit P. */
10708 if (bit (arm_insn_r->arm_insn, 24))
10709 record_buf_mem[1] = tgt_mem_addr;
10710 else
10711 record_buf_mem[1] = (uint32_t) u_regval;
10712
10713 arm_insn_r->mem_rec_count = 1;
10714
10715 /* If wback is true, also save the base register, which is going to be
10716 written to. */
10717 if (wback)
10718 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10719 }
10720
10721 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10722 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10723 return 0;
10724 }
10725
10726 /* Handling opcode 011 insns. */
10727
10728 static int
10729 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
10730 {
10731 struct regcache *reg_cache = arm_insn_r->regcache;
10732
10733 uint32_t shift_imm = 0;
10734 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
10735 uint32_t offset_12 = 0, tgt_mem_addr = 0;
10736 uint32_t record_buf[8], record_buf_mem[8];
10737
10738 LONGEST s_word;
10739 ULONGEST u_regval[2];
10740
10741 if (bit (arm_insn_r->arm_insn, 4))
10742 return arm_record_media (arm_insn_r);
10743
10744 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10745 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10746
10747 /* Handle enhanced store insns and LDRD DSP insn,
10748 order begins according to addressing modes for store insns
10749 STRH insn. */
10750
10751 /* LDR or STR? */
10752 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10753 {
10754 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
10755 /* LDR insn has a capability to do branching, if
10756 MOV LR, PC is preceded by LDR insn having Rn as R15
10757 in that case, it emulates branch and link insn, and hence we
10758 need to save CSPR and PC as well. */
10759 if (15 != reg_dest)
10760 {
10761 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10762 arm_insn_r->reg_rec_count = 1;
10763 }
10764 else
10765 {
10766 record_buf[0] = reg_dest;
10767 record_buf[1] = ARM_PS_REGNUM;
10768 arm_insn_r->reg_rec_count = 2;
10769 }
10770 }
10771 else
10772 {
10773 if (! bits (arm_insn_r->arm_insn, 4, 11))
10774 {
10775 /* Store insn, register offset and register pre-indexed,
10776 register post-indexed. */
10777 /* Get Rm. */
10778 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10779 /* Get Rn. */
10780 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10781 regcache_raw_read_unsigned (reg_cache, reg_src1
10782 , &u_regval[0]);
10783 regcache_raw_read_unsigned (reg_cache, reg_src2
10784 , &u_regval[1]);
10785 if (15 == reg_src2)
10786 {
10787 /* If R15 was used as Rn, hence current PC+8. */
10788 /* Pre-indexed mode doesnt reach here ; illegal insn. */
10789 u_regval[0] = u_regval[0] + 8;
10790 }
10791 /* Calculate target store address, Rn +/- Rm, register offset. */
10792 /* U == 1. */
10793 if (bit (arm_insn_r->arm_insn, 23))
10794 {
10795 tgt_mem_addr = u_regval[0] + u_regval[1];
10796 }
10797 else
10798 {
10799 tgt_mem_addr = u_regval[1] - u_regval[0];
10800 }
10801
10802 switch (arm_insn_r->opcode)
10803 {
10804 /* STR. */
10805 case 8:
10806 case 12:
10807 /* STR. */
10808 case 9:
10809 case 13:
10810 /* STRT. */
10811 case 1:
10812 case 5:
10813 /* STR. */
10814 case 0:
10815 case 4:
10816 record_buf_mem[0] = 4;
10817 break;
10818
10819 /* STRB. */
10820 case 10:
10821 case 14:
10822 /* STRB. */
10823 case 11:
10824 case 15:
10825 /* STRBT. */
10826 case 3:
10827 case 7:
10828 /* STRB. */
10829 case 2:
10830 case 6:
10831 record_buf_mem[0] = 1;
10832 break;
10833
10834 default:
10835 gdb_assert_not_reached ("no decoding pattern found");
10836 break;
10837 }
10838 record_buf_mem[1] = tgt_mem_addr;
10839 arm_insn_r->mem_rec_count = 1;
10840
10841 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
10842 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10843 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
10844 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
10845 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
10846 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
10847 )
10848 {
10849 /* Rn is going to be changed in pre-indexed mode and
10850 post-indexed mode as well. */
10851 record_buf[0] = reg_src2;
10852 arm_insn_r->reg_rec_count = 1;
10853 }
10854 }
10855 else
10856 {
10857 /* Store insn, scaled register offset; scaled pre-indexed. */
10858 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
10859 /* Get Rm. */
10860 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10861 /* Get Rn. */
10862 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10863 /* Get shift_imm. */
10864 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
10865 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10866 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
10867 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10868 /* Offset_12 used as shift. */
10869 switch (offset_12)
10870 {
10871 case 0:
10872 /* Offset_12 used as index. */
10873 offset_12 = u_regval[0] << shift_imm;
10874 break;
10875
10876 case 1:
10877 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
10878 break;
10879
10880 case 2:
10881 if (!shift_imm)
10882 {
10883 if (bit (u_regval[0], 31))
10884 {
10885 offset_12 = 0xFFFFFFFF;
10886 }
10887 else
10888 {
10889 offset_12 = 0;
10890 }
10891 }
10892 else
10893 {
10894 /* This is arithmetic shift. */
10895 offset_12 = s_word >> shift_imm;
10896 }
10897 break;
10898
10899 case 3:
10900 if (!shift_imm)
10901 {
10902 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
10903 &u_regval[1]);
10904 /* Get C flag value and shift it by 31. */
10905 offset_12 = (((bit (u_regval[1], 29)) << 31) \
10906 | (u_regval[0]) >> 1);
10907 }
10908 else
10909 {
10910 offset_12 = (u_regval[0] >> shift_imm) \
10911 | (u_regval[0] <<
10912 (sizeof(uint32_t) - shift_imm));
10913 }
10914 break;
10915
10916 default:
10917 gdb_assert_not_reached ("no decoding pattern found");
10918 break;
10919 }
10920
10921 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10922 /* bit U set. */
10923 if (bit (arm_insn_r->arm_insn, 23))
10924 {
10925 tgt_mem_addr = u_regval[1] + offset_12;
10926 }
10927 else
10928 {
10929 tgt_mem_addr = u_regval[1] - offset_12;
10930 }
10931
10932 switch (arm_insn_r->opcode)
10933 {
10934 /* STR. */
10935 case 8:
10936 case 12:
10937 /* STR. */
10938 case 9:
10939 case 13:
10940 /* STRT. */
10941 case 1:
10942 case 5:
10943 /* STR. */
10944 case 0:
10945 case 4:
10946 record_buf_mem[0] = 4;
10947 break;
10948
10949 /* STRB. */
10950 case 10:
10951 case 14:
10952 /* STRB. */
10953 case 11:
10954 case 15:
10955 /* STRBT. */
10956 case 3:
10957 case 7:
10958 /* STRB. */
10959 case 2:
10960 case 6:
10961 record_buf_mem[0] = 1;
10962 break;
10963
10964 default:
10965 gdb_assert_not_reached ("no decoding pattern found");
10966 break;
10967 }
10968 record_buf_mem[1] = tgt_mem_addr;
10969 arm_insn_r->mem_rec_count = 1;
10970
10971 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
10972 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10973 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
10974 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
10975 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
10976 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
10977 )
10978 {
10979 /* Rn is going to be changed in register scaled pre-indexed
10980 mode,and scaled post indexed mode. */
10981 record_buf[0] = reg_src2;
10982 arm_insn_r->reg_rec_count = 1;
10983 }
10984 }
10985 }
10986
10987 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10988 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10989 return 0;
10990 }
10991
10992 /* Handle ARM mode instructions with opcode 100. */
10993
10994 static int
10995 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
10996 {
10997 struct regcache *reg_cache = arm_insn_r->regcache;
10998 uint32_t register_count = 0, register_bits;
10999 uint32_t reg_base, addr_mode;
11000 uint32_t record_buf[24], record_buf_mem[48];
11001 uint32_t wback;
11002 ULONGEST u_regval;
11003
11004 /* Fetch the list of registers. */
11005 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11006 arm_insn_r->reg_rec_count = 0;
11007
11008 /* Fetch the base register that contains the address we are loading data
11009 to. */
11010 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
11011
11012 /* Calculate wback. */
11013 wback = (bit (arm_insn_r->arm_insn, 21) == 1);
11014
11015 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11016 {
11017 /* LDM/LDMIA/LDMFD, LDMDA/LDMFA, LDMDB and LDMIB. */
11018
11019 /* Find out which registers are going to be loaded from memory. */
11020 while (register_bits)
11021 {
11022 if (register_bits & 0x00000001)
11023 record_buf[arm_insn_r->reg_rec_count++] = register_count;
11024 register_bits = register_bits >> 1;
11025 register_count++;
11026 }
11027
11028
11029 /* If wback is true, also save the base register, which is going to be
11030 written to. */
11031 if (wback)
11032 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11033
11034 /* Save the CPSR register. */
11035 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
11036 }
11037 else
11038 {
11039 /* STM (STMIA, STMEA), STMDA (STMED), STMDB (STMFD) and STMIB (STMFA). */
11040
11041 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
11042
11043 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
11044
11045 /* Find out how many registers are going to be stored to memory. */
11046 while (register_bits)
11047 {
11048 if (register_bits & 0x00000001)
11049 register_count++;
11050 register_bits = register_bits >> 1;
11051 }
11052
11053 switch (addr_mode)
11054 {
11055 /* STMDA (STMED): Decrement after. */
11056 case 0:
11057 record_buf_mem[1] = (uint32_t) u_regval
11058 - register_count * ARM_INT_REGISTER_SIZE + 4;
11059 break;
11060 /* STM (STMIA, STMEA): Increment after. */
11061 case 1:
11062 record_buf_mem[1] = (uint32_t) u_regval;
11063 break;
11064 /* STMDB (STMFD): Decrement before. */
11065 case 2:
11066 record_buf_mem[1] = (uint32_t) u_regval
11067 - register_count * ARM_INT_REGISTER_SIZE;
11068 break;
11069 /* STMIB (STMFA): Increment before. */
11070 case 3:
11071 record_buf_mem[1] = (uint32_t) u_regval + ARM_INT_REGISTER_SIZE;
11072 break;
11073 default:
11074 gdb_assert_not_reached ("no decoding pattern found");
11075 break;
11076 }
11077
11078 record_buf_mem[0] = register_count * ARM_INT_REGISTER_SIZE;
11079 arm_insn_r->mem_rec_count = 1;
11080
11081 /* If wback is true, also save the base register, which is going to be
11082 written to. */
11083 if (wback)
11084 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11085 }
11086
11087 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11088 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11089 return 0;
11090 }
11091
11092 /* Handling opcode 101 insns. */
11093
11094 static int
11095 arm_record_b_bl (insn_decode_record *arm_insn_r)
11096 {
11097 uint32_t record_buf[8];
11098
11099 /* Handle B, BL, BLX(1) insns. */
11100 /* B simply branches so we do nothing here. */
11101 /* Note: BLX(1) doesnt fall here but instead it falls into
11102 extension space. */
11103 if (bit (arm_insn_r->arm_insn, 24))
11104 {
11105 record_buf[0] = ARM_LR_REGNUM;
11106 arm_insn_r->reg_rec_count = 1;
11107 }
11108
11109 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11110
11111 return 0;
11112 }
11113
11114 static int
11115 arm_record_unsupported_insn (insn_decode_record *arm_insn_r)
11116 {
11117 printf_unfiltered (_("Process record does not support instruction "
11118 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11119 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11120
11121 return -1;
11122 }
11123
11124 /* Record handler for vector data transfer instructions. */
11125
11126 static int
11127 arm_record_vdata_transfer_insn (insn_decode_record *arm_insn_r)
11128 {
11129 uint32_t bits_a, bit_c, bit_l, reg_t, reg_v;
11130 uint32_t record_buf[4];
11131
11132 reg_t = bits (arm_insn_r->arm_insn, 12, 15);
11133 reg_v = bits (arm_insn_r->arm_insn, 21, 23);
11134 bits_a = bits (arm_insn_r->arm_insn, 21, 23);
11135 bit_l = bit (arm_insn_r->arm_insn, 20);
11136 bit_c = bit (arm_insn_r->arm_insn, 8);
11137
11138 /* Handle VMOV instruction. */
11139 if (bit_l && bit_c)
11140 {
11141 record_buf[0] = reg_t;
11142 arm_insn_r->reg_rec_count = 1;
11143 }
11144 else if (bit_l && !bit_c)
11145 {
11146 /* Handle VMOV instruction. */
11147 if (bits_a == 0x00)
11148 {
11149 record_buf[0] = reg_t;
11150 arm_insn_r->reg_rec_count = 1;
11151 }
11152 /* Handle VMRS instruction. */
11153 else if (bits_a == 0x07)
11154 {
11155 if (reg_t == 15)
11156 reg_t = ARM_PS_REGNUM;
11157
11158 record_buf[0] = reg_t;
11159 arm_insn_r->reg_rec_count = 1;
11160 }
11161 }
11162 else if (!bit_l && !bit_c)
11163 {
11164 /* Handle VMOV instruction. */
11165 if (bits_a == 0x00)
11166 {
11167 record_buf[0] = ARM_D0_REGNUM + reg_v;
11168
11169 arm_insn_r->reg_rec_count = 1;
11170 }
11171 /* Handle VMSR instruction. */
11172 else if (bits_a == 0x07)
11173 {
11174 record_buf[0] = ARM_FPSCR_REGNUM;
11175 arm_insn_r->reg_rec_count = 1;
11176 }
11177 }
11178 else if (!bit_l && bit_c)
11179 {
11180 /* Handle VMOV instruction. */
11181 if (!(bits_a & 0x04))
11182 {
11183 record_buf[0] = (reg_v | (bit (arm_insn_r->arm_insn, 7) << 4))
11184 + ARM_D0_REGNUM;
11185 arm_insn_r->reg_rec_count = 1;
11186 }
11187 /* Handle VDUP instruction. */
11188 else
11189 {
11190 if (bit (arm_insn_r->arm_insn, 21))
11191 {
11192 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
11193 record_buf[0] = reg_v + ARM_D0_REGNUM;
11194 record_buf[1] = reg_v + ARM_D0_REGNUM + 1;
11195 arm_insn_r->reg_rec_count = 2;
11196 }
11197 else
11198 {
11199 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
11200 record_buf[0] = reg_v + ARM_D0_REGNUM;
11201 arm_insn_r->reg_rec_count = 1;
11202 }
11203 }
11204 }
11205
11206 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11207 return 0;
11208 }
11209
11210 /* Record handler for extension register load/store instructions. */
11211
11212 static int
11213 arm_record_exreg_ld_st_insn (insn_decode_record *arm_insn_r)
11214 {
11215 uint32_t opcode, single_reg;
11216 uint8_t op_vldm_vstm;
11217 uint32_t record_buf[8], record_buf_mem[128];
11218 ULONGEST u_regval = 0;
11219
11220 struct regcache *reg_cache = arm_insn_r->regcache;
11221
11222 opcode = bits (arm_insn_r->arm_insn, 20, 24);
11223 single_reg = !bit (arm_insn_r->arm_insn, 8);
11224 op_vldm_vstm = opcode & 0x1b;
11225
11226 /* Handle VMOV instructions. */
11227 if ((opcode & 0x1e) == 0x04)
11228 {
11229 if (bit (arm_insn_r->arm_insn, 20)) /* to_arm_registers bit 20? */
11230 {
11231 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11232 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11233 arm_insn_r->reg_rec_count = 2;
11234 }
11235 else
11236 {
11237 uint8_t reg_m = bits (arm_insn_r->arm_insn, 0, 3);
11238 uint8_t bit_m = bit (arm_insn_r->arm_insn, 5);
11239
11240 if (single_reg)
11241 {
11242 /* The first S register number m is REG_M:M (M is bit 5),
11243 the corresponding D register number is REG_M:M / 2, which
11244 is REG_M. */
11245 record_buf[arm_insn_r->reg_rec_count++] = ARM_D0_REGNUM + reg_m;
11246 /* The second S register number is REG_M:M + 1, the
11247 corresponding D register number is (REG_M:M + 1) / 2.
11248 IOW, if bit M is 1, the first and second S registers
11249 are mapped to different D registers, otherwise, they are
11250 in the same D register. */
11251 if (bit_m)
11252 {
11253 record_buf[arm_insn_r->reg_rec_count++]
11254 = ARM_D0_REGNUM + reg_m + 1;
11255 }
11256 }
11257 else
11258 {
11259 record_buf[0] = ((bit_m << 4) + reg_m + ARM_D0_REGNUM);
11260 arm_insn_r->reg_rec_count = 1;
11261 }
11262 }
11263 }
11264 /* Handle VSTM and VPUSH instructions. */
11265 else if (op_vldm_vstm == 0x08 || op_vldm_vstm == 0x0a
11266 || op_vldm_vstm == 0x12)
11267 {
11268 uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count;
11269 uint32_t memory_index = 0;
11270
11271 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
11272 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
11273 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
11274 imm_off32 = imm_off8 << 2;
11275 memory_count = imm_off8;
11276
11277 if (bit (arm_insn_r->arm_insn, 23))
11278 start_address = u_regval;
11279 else
11280 start_address = u_regval - imm_off32;
11281
11282 if (bit (arm_insn_r->arm_insn, 21))
11283 {
11284 record_buf[0] = reg_rn;
11285 arm_insn_r->reg_rec_count = 1;
11286 }
11287
11288 while (memory_count > 0)
11289 {
11290 if (single_reg)
11291 {
11292 record_buf_mem[memory_index] = 4;
11293 record_buf_mem[memory_index + 1] = start_address;
11294 start_address = start_address + 4;
11295 memory_index = memory_index + 2;
11296 }
11297 else
11298 {
11299 record_buf_mem[memory_index] = 4;
11300 record_buf_mem[memory_index + 1] = start_address;
11301 record_buf_mem[memory_index + 2] = 4;
11302 record_buf_mem[memory_index + 3] = start_address + 4;
11303 start_address = start_address + 8;
11304 memory_index = memory_index + 4;
11305 }
11306 memory_count--;
11307 }
11308 arm_insn_r->mem_rec_count = (memory_index >> 1);
11309 }
11310 /* Handle VLDM instructions. */
11311 else if (op_vldm_vstm == 0x09 || op_vldm_vstm == 0x0b
11312 || op_vldm_vstm == 0x13)
11313 {
11314 uint32_t reg_count, reg_vd;
11315 uint32_t reg_index = 0;
11316 uint32_t bit_d = bit (arm_insn_r->arm_insn, 22);
11317
11318 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11319 reg_count = bits (arm_insn_r->arm_insn, 0, 7);
11320
11321 /* REG_VD is the first D register number. If the instruction
11322 loads memory to S registers (SINGLE_REG is TRUE), the register
11323 number is (REG_VD << 1 | bit D), so the corresponding D
11324 register number is (REG_VD << 1 | bit D) / 2 = REG_VD. */
11325 if (!single_reg)
11326 reg_vd = reg_vd | (bit_d << 4);
11327
11328 if (bit (arm_insn_r->arm_insn, 21) /* write back */)
11329 record_buf[reg_index++] = bits (arm_insn_r->arm_insn, 16, 19);
11330
11331 /* If the instruction loads memory to D register, REG_COUNT should
11332 be divided by 2, according to the ARM Architecture Reference
11333 Manual. If the instruction loads memory to S register, divide by
11334 2 as well because two S registers are mapped to D register. */
11335 reg_count = reg_count / 2;
11336 if (single_reg && bit_d)
11337 {
11338 /* Increase the register count if S register list starts from
11339 an odd number (bit d is one). */
11340 reg_count++;
11341 }
11342
11343 while (reg_count > 0)
11344 {
11345 record_buf[reg_index++] = ARM_D0_REGNUM + reg_vd + reg_count - 1;
11346 reg_count--;
11347 }
11348 arm_insn_r->reg_rec_count = reg_index;
11349 }
11350 /* VSTR Vector store register. */
11351 else if ((opcode & 0x13) == 0x10)
11352 {
11353 uint32_t start_address, reg_rn, imm_off32, imm_off8;
11354 uint32_t memory_index = 0;
11355
11356 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
11357 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
11358 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
11359 imm_off32 = imm_off8 << 2;
11360
11361 if (bit (arm_insn_r->arm_insn, 23))
11362 start_address = u_regval + imm_off32;
11363 else
11364 start_address = u_regval - imm_off32;
11365
11366 if (single_reg)
11367 {
11368 record_buf_mem[memory_index] = 4;
11369 record_buf_mem[memory_index + 1] = start_address;
11370 arm_insn_r->mem_rec_count = 1;
11371 }
11372 else
11373 {
11374 record_buf_mem[memory_index] = 4;
11375 record_buf_mem[memory_index + 1] = start_address;
11376 record_buf_mem[memory_index + 2] = 4;
11377 record_buf_mem[memory_index + 3] = start_address + 4;
11378 arm_insn_r->mem_rec_count = 2;
11379 }
11380 }
11381 /* VLDR Vector load register. */
11382 else if ((opcode & 0x13) == 0x11)
11383 {
11384 uint32_t reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11385
11386 if (!single_reg)
11387 {
11388 reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4);
11389 record_buf[0] = ARM_D0_REGNUM + reg_vd;
11390 }
11391 else
11392 {
11393 reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22);
11394 /* Record register D rather than pseudo register S. */
11395 record_buf[0] = ARM_D0_REGNUM + reg_vd / 2;
11396 }
11397 arm_insn_r->reg_rec_count = 1;
11398 }
11399
11400 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11401 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11402 return 0;
11403 }
11404
11405 /* Record handler for arm/thumb mode VFP data processing instructions. */
11406
11407 static int
11408 arm_record_vfp_data_proc_insn (insn_decode_record *arm_insn_r)
11409 {
11410 uint32_t opc1, opc2, opc3, dp_op_sz, bit_d, reg_vd;
11411 uint32_t record_buf[4];
11412 enum insn_types {INSN_T0, INSN_T1, INSN_T2, INSN_T3, INSN_INV};
11413 enum insn_types curr_insn_type = INSN_INV;
11414
11415 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11416 opc1 = bits (arm_insn_r->arm_insn, 20, 23);
11417 opc2 = bits (arm_insn_r->arm_insn, 16, 19);
11418 opc3 = bits (arm_insn_r->arm_insn, 6, 7);
11419 dp_op_sz = bit (arm_insn_r->arm_insn, 8);
11420 bit_d = bit (arm_insn_r->arm_insn, 22);
11421 /* Mask off the "D" bit. */
11422 opc1 = opc1 & ~0x04;
11423
11424 /* Handle VMLA, VMLS. */
11425 if (opc1 == 0x00)
11426 {
11427 if (bit (arm_insn_r->arm_insn, 10))
11428 {
11429 if (bit (arm_insn_r->arm_insn, 6))
11430 curr_insn_type = INSN_T0;
11431 else
11432 curr_insn_type = INSN_T1;
11433 }
11434 else
11435 {
11436 if (dp_op_sz)
11437 curr_insn_type = INSN_T1;
11438 else
11439 curr_insn_type = INSN_T2;
11440 }
11441 }
11442 /* Handle VNMLA, VNMLS, VNMUL. */
11443 else if (opc1 == 0x01)
11444 {
11445 if (dp_op_sz)
11446 curr_insn_type = INSN_T1;
11447 else
11448 curr_insn_type = INSN_T2;
11449 }
11450 /* Handle VMUL. */
11451 else if (opc1 == 0x02 && !(opc3 & 0x01))
11452 {
11453 if (bit (arm_insn_r->arm_insn, 10))
11454 {
11455 if (bit (arm_insn_r->arm_insn, 6))
11456 curr_insn_type = INSN_T0;
11457 else
11458 curr_insn_type = INSN_T1;
11459 }
11460 else
11461 {
11462 if (dp_op_sz)
11463 curr_insn_type = INSN_T1;
11464 else
11465 curr_insn_type = INSN_T2;
11466 }
11467 }
11468 /* Handle VADD, VSUB. */
11469 else if (opc1 == 0x03)
11470 {
11471 if (!bit (arm_insn_r->arm_insn, 9))
11472 {
11473 if (bit (arm_insn_r->arm_insn, 6))
11474 curr_insn_type = INSN_T0;
11475 else
11476 curr_insn_type = INSN_T1;
11477 }
11478 else
11479 {
11480 if (dp_op_sz)
11481 curr_insn_type = INSN_T1;
11482 else
11483 curr_insn_type = INSN_T2;
11484 }
11485 }
11486 /* Handle VDIV. */
11487 else if (opc1 == 0x08)
11488 {
11489 if (dp_op_sz)
11490 curr_insn_type = INSN_T1;
11491 else
11492 curr_insn_type = INSN_T2;
11493 }
11494 /* Handle all other vfp data processing instructions. */
11495 else if (opc1 == 0x0b)
11496 {
11497 /* Handle VMOV. */
11498 if (!(opc3 & 0x01) || (opc2 == 0x00 && opc3 == 0x01))
11499 {
11500 if (bit (arm_insn_r->arm_insn, 4))
11501 {
11502 if (bit (arm_insn_r->arm_insn, 6))
11503 curr_insn_type = INSN_T0;
11504 else
11505 curr_insn_type = INSN_T1;
11506 }
11507 else
11508 {
11509 if (dp_op_sz)
11510 curr_insn_type = INSN_T1;
11511 else
11512 curr_insn_type = INSN_T2;
11513 }
11514 }
11515 /* Handle VNEG and VABS. */
11516 else if ((opc2 == 0x01 && opc3 == 0x01)
11517 || (opc2 == 0x00 && opc3 == 0x03))
11518 {
11519 if (!bit (arm_insn_r->arm_insn, 11))
11520 {
11521 if (bit (arm_insn_r->arm_insn, 6))
11522 curr_insn_type = INSN_T0;
11523 else
11524 curr_insn_type = INSN_T1;
11525 }
11526 else
11527 {
11528 if (dp_op_sz)
11529 curr_insn_type = INSN_T1;
11530 else
11531 curr_insn_type = INSN_T2;
11532 }
11533 }
11534 /* Handle VSQRT. */
11535 else if (opc2 == 0x01 && opc3 == 0x03)
11536 {
11537 if (dp_op_sz)
11538 curr_insn_type = INSN_T1;
11539 else
11540 curr_insn_type = INSN_T2;
11541 }
11542 /* Handle VCVT. */
11543 else if (opc2 == 0x07 && opc3 == 0x03)
11544 {
11545 if (!dp_op_sz)
11546 curr_insn_type = INSN_T1;
11547 else
11548 curr_insn_type = INSN_T2;
11549 }
11550 else if (opc3 & 0x01)
11551 {
11552 /* Handle VCVT. */
11553 if ((opc2 == 0x08) || (opc2 & 0x0e) == 0x0c)
11554 {
11555 if (!bit (arm_insn_r->arm_insn, 18))
11556 curr_insn_type = INSN_T2;
11557 else
11558 {
11559 if (dp_op_sz)
11560 curr_insn_type = INSN_T1;
11561 else
11562 curr_insn_type = INSN_T2;
11563 }
11564 }
11565 /* Handle VCVT. */
11566 else if ((opc2 & 0x0e) == 0x0a || (opc2 & 0x0e) == 0x0e)
11567 {
11568 if (dp_op_sz)
11569 curr_insn_type = INSN_T1;
11570 else
11571 curr_insn_type = INSN_T2;
11572 }
11573 /* Handle VCVTB, VCVTT. */
11574 else if ((opc2 & 0x0e) == 0x02)
11575 curr_insn_type = INSN_T2;
11576 /* Handle VCMP, VCMPE. */
11577 else if ((opc2 & 0x0e) == 0x04)
11578 curr_insn_type = INSN_T3;
11579 }
11580 }
11581
11582 switch (curr_insn_type)
11583 {
11584 case INSN_T0:
11585 reg_vd = reg_vd | (bit_d << 4);
11586 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11587 record_buf[1] = reg_vd + ARM_D0_REGNUM + 1;
11588 arm_insn_r->reg_rec_count = 2;
11589 break;
11590
11591 case INSN_T1:
11592 reg_vd = reg_vd | (bit_d << 4);
11593 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11594 arm_insn_r->reg_rec_count = 1;
11595 break;
11596
11597 case INSN_T2:
11598 reg_vd = (reg_vd << 1) | bit_d;
11599 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11600 arm_insn_r->reg_rec_count = 1;
11601 break;
11602
11603 case INSN_T3:
11604 record_buf[0] = ARM_FPSCR_REGNUM;
11605 arm_insn_r->reg_rec_count = 1;
11606 break;
11607
11608 default:
11609 gdb_assert_not_reached ("no decoding pattern found");
11610 break;
11611 }
11612
11613 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11614 return 0;
11615 }
11616
11617 /* Handling opcode 110 insns. */
11618
11619 static int
11620 arm_record_asimd_vfp_coproc (insn_decode_record *arm_insn_r)
11621 {
11622 uint32_t op1, op1_ebit, coproc;
11623
11624 coproc = bits (arm_insn_r->arm_insn, 8, 11);
11625 op1 = bits (arm_insn_r->arm_insn, 20, 25);
11626 op1_ebit = bit (arm_insn_r->arm_insn, 20);
11627
11628 if ((coproc & 0x0e) == 0x0a)
11629 {
11630 /* Handle extension register ld/st instructions. */
11631 if (!(op1 & 0x20))
11632 return arm_record_exreg_ld_st_insn (arm_insn_r);
11633
11634 /* 64-bit transfers between arm core and extension registers. */
11635 if ((op1 & 0x3e) == 0x04)
11636 return arm_record_exreg_ld_st_insn (arm_insn_r);
11637 }
11638 else
11639 {
11640 /* Handle coprocessor ld/st instructions. */
11641 if (!(op1 & 0x3a))
11642 {
11643 /* Store. */
11644 if (!op1_ebit)
11645 return arm_record_unsupported_insn (arm_insn_r);
11646 else
11647 /* Load. */
11648 return arm_record_unsupported_insn (arm_insn_r);
11649 }
11650
11651 /* Move to coprocessor from two arm core registers. */
11652 if (op1 == 0x4)
11653 return arm_record_unsupported_insn (arm_insn_r);
11654
11655 /* Move to two arm core registers from coprocessor. */
11656 if (op1 == 0x5)
11657 {
11658 uint32_t reg_t[2];
11659
11660 reg_t[0] = bits (arm_insn_r->arm_insn, 12, 15);
11661 reg_t[1] = bits (arm_insn_r->arm_insn, 16, 19);
11662 arm_insn_r->reg_rec_count = 2;
11663
11664 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, reg_t);
11665 return 0;
11666 }
11667 }
11668 return arm_record_unsupported_insn (arm_insn_r);
11669 }
11670
11671 /* Handling opcode 111 insns. */
11672
11673 static int
11674 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11675 {
11676 uint32_t op, op1_ebit, coproc, bits_24_25;
11677 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
11678 struct regcache *reg_cache = arm_insn_r->regcache;
11679
11680 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
11681 coproc = bits (arm_insn_r->arm_insn, 8, 11);
11682 op1_ebit = bit (arm_insn_r->arm_insn, 20);
11683 op = bit (arm_insn_r->arm_insn, 4);
11684 bits_24_25 = bits (arm_insn_r->arm_insn, 24, 25);
11685
11686 /* Handle arm SWI/SVC system call instructions. */
11687 if (bits_24_25 == 0x3)
11688 {
11689 if (tdep->arm_syscall_record != NULL)
11690 {
11691 ULONGEST svc_operand, svc_number;
11692
11693 svc_operand = (0x00ffffff & arm_insn_r->arm_insn);
11694
11695 if (svc_operand) /* OABI. */
11696 svc_number = svc_operand - 0x900000;
11697 else /* EABI. */
11698 regcache_raw_read_unsigned (reg_cache, 7, &svc_number);
11699
11700 return tdep->arm_syscall_record (reg_cache, svc_number);
11701 }
11702 else
11703 {
11704 printf_unfiltered (_("no syscall record support\n"));
11705 return -1;
11706 }
11707 }
11708 else if (bits_24_25 == 0x02)
11709 {
11710 if (op)
11711 {
11712 if ((coproc & 0x0e) == 0x0a)
11713 {
11714 /* 8, 16, and 32-bit transfer */
11715 return arm_record_vdata_transfer_insn (arm_insn_r);
11716 }
11717 else
11718 {
11719 if (op1_ebit)
11720 {
11721 /* MRC, MRC2 */
11722 uint32_t record_buf[1];
11723
11724 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11725 if (record_buf[0] == 15)
11726 record_buf[0] = ARM_PS_REGNUM;
11727
11728 arm_insn_r->reg_rec_count = 1;
11729 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count,
11730 record_buf);
11731 return 0;
11732 }
11733 else
11734 {
11735 /* MCR, MCR2 */
11736 return -1;
11737 }
11738 }
11739 }
11740 else
11741 {
11742 if ((coproc & 0x0e) == 0x0a)
11743 {
11744 /* VFP data-processing instructions. */
11745 return arm_record_vfp_data_proc_insn (arm_insn_r);
11746 }
11747 else
11748 {
11749 /* CDP, CDP2 */
11750 return -1;
11751 }
11752 }
11753 }
11754 else
11755 {
11756 unsigned int op1 = bits (arm_insn_r->arm_insn, 20, 25);
11757
11758 if (op1 == 5)
11759 {
11760 if ((coproc & 0x0e) != 0x0a)
11761 {
11762 /* MRRC, MRRC2 */
11763 return -1;
11764 }
11765 }
11766 else if (op1 == 4 || op1 == 5)
11767 {
11768 if ((coproc & 0x0e) == 0x0a)
11769 {
11770 /* 64-bit transfers between ARM core and extension */
11771 return -1;
11772 }
11773 else if (op1 == 4)
11774 {
11775 /* MCRR, MCRR2 */
11776 return -1;
11777 }
11778 }
11779 else if (op1 == 0 || op1 == 1)
11780 {
11781 /* UNDEFINED */
11782 return -1;
11783 }
11784 else
11785 {
11786 if ((coproc & 0x0e) == 0x0a)
11787 {
11788 /* Extension register load/store */
11789 }
11790 else
11791 {
11792 /* STC, STC2, LDC, LDC2 */
11793 }
11794 return -1;
11795 }
11796 }
11797
11798 return -1;
11799 }
11800
11801 /* Handling opcode 000 insns. */
11802
11803 static int
11804 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
11805 {
11806 uint32_t record_buf[8];
11807 uint32_t reg_src1 = 0;
11808
11809 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11810
11811 record_buf[0] = ARM_PS_REGNUM;
11812 record_buf[1] = reg_src1;
11813 thumb_insn_r->reg_rec_count = 2;
11814
11815 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11816
11817 return 0;
11818 }
11819
11820
11821 /* Handling opcode 001 insns. */
11822
11823 static int
11824 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
11825 {
11826 uint32_t record_buf[8];
11827 uint32_t reg_src1 = 0;
11828
11829 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11830
11831 record_buf[0] = ARM_PS_REGNUM;
11832 record_buf[1] = reg_src1;
11833 thumb_insn_r->reg_rec_count = 2;
11834
11835 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11836
11837 return 0;
11838 }
11839
11840 /* Handling opcode 010 insns. */
11841
11842 static int
11843 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
11844 {
11845 struct regcache *reg_cache = thumb_insn_r->regcache;
11846 uint32_t record_buf[8], record_buf_mem[8];
11847
11848 uint32_t reg_src1 = 0, reg_src2 = 0;
11849 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
11850
11851 ULONGEST u_regval[2] = {0};
11852
11853 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
11854
11855 if (bit (thumb_insn_r->arm_insn, 12))
11856 {
11857 /* Handle load/store register offset. */
11858 uint32_t opB = bits (thumb_insn_r->arm_insn, 9, 11);
11859
11860 if (in_inclusive_range (opB, 4U, 7U))
11861 {
11862 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
11863 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
11864 record_buf[0] = reg_src1;
11865 thumb_insn_r->reg_rec_count = 1;
11866 }
11867 else if (in_inclusive_range (opB, 0U, 2U))
11868 {
11869 /* STR(2), STRB(2), STRH(2) . */
11870 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11871 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
11872 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11873 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11874 if (0 == opB)
11875 record_buf_mem[0] = 4; /* STR (2). */
11876 else if (2 == opB)
11877 record_buf_mem[0] = 1; /* STRB (2). */
11878 else if (1 == opB)
11879 record_buf_mem[0] = 2; /* STRH (2). */
11880 record_buf_mem[1] = u_regval[0] + u_regval[1];
11881 thumb_insn_r->mem_rec_count = 1;
11882 }
11883 }
11884 else if (bit (thumb_insn_r->arm_insn, 11))
11885 {
11886 /* Handle load from literal pool. */
11887 /* LDR(3). */
11888 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11889 record_buf[0] = reg_src1;
11890 thumb_insn_r->reg_rec_count = 1;
11891 }
11892 else if (opcode1)
11893 {
11894 /* Special data instructions and branch and exchange */
11895 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
11896 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
11897 if ((3 == opcode2) && (!opcode3))
11898 {
11899 /* Branch with exchange. */
11900 record_buf[0] = ARM_PS_REGNUM;
11901 thumb_insn_r->reg_rec_count = 1;
11902 }
11903 else
11904 {
11905 /* Format 8; special data processing insns. */
11906 record_buf[0] = ARM_PS_REGNUM;
11907 record_buf[1] = (bit (thumb_insn_r->arm_insn, 7) << 3
11908 | bits (thumb_insn_r->arm_insn, 0, 2));
11909 thumb_insn_r->reg_rec_count = 2;
11910 }
11911 }
11912 else
11913 {
11914 /* Format 5; data processing insns. */
11915 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11916 if (bit (thumb_insn_r->arm_insn, 7))
11917 {
11918 reg_src1 = reg_src1 + 8;
11919 }
11920 record_buf[0] = ARM_PS_REGNUM;
11921 record_buf[1] = reg_src1;
11922 thumb_insn_r->reg_rec_count = 2;
11923 }
11924
11925 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11926 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11927 record_buf_mem);
11928
11929 return 0;
11930 }
11931
11932 /* Handling opcode 001 insns. */
11933
11934 static int
11935 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
11936 {
11937 struct regcache *reg_cache = thumb_insn_r->regcache;
11938 uint32_t record_buf[8], record_buf_mem[8];
11939
11940 uint32_t reg_src1 = 0;
11941 uint32_t opcode = 0, immed_5 = 0;
11942
11943 ULONGEST u_regval = 0;
11944
11945 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11946
11947 if (opcode)
11948 {
11949 /* LDR(1). */
11950 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11951 record_buf[0] = reg_src1;
11952 thumb_insn_r->reg_rec_count = 1;
11953 }
11954 else
11955 {
11956 /* STR(1). */
11957 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11958 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
11959 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11960 record_buf_mem[0] = 4;
11961 record_buf_mem[1] = u_regval + (immed_5 * 4);
11962 thumb_insn_r->mem_rec_count = 1;
11963 }
11964
11965 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11966 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11967 record_buf_mem);
11968
11969 return 0;
11970 }
11971
11972 /* Handling opcode 100 insns. */
11973
11974 static int
11975 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
11976 {
11977 struct regcache *reg_cache = thumb_insn_r->regcache;
11978 uint32_t record_buf[8], record_buf_mem[8];
11979
11980 uint32_t reg_src1 = 0;
11981 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
11982
11983 ULONGEST u_regval = 0;
11984
11985 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11986
11987 if (3 == opcode)
11988 {
11989 /* LDR(4). */
11990 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11991 record_buf[0] = reg_src1;
11992 thumb_insn_r->reg_rec_count = 1;
11993 }
11994 else if (1 == opcode)
11995 {
11996 /* LDRH(1). */
11997 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11998 record_buf[0] = reg_src1;
11999 thumb_insn_r->reg_rec_count = 1;
12000 }
12001 else if (2 == opcode)
12002 {
12003 /* STR(3). */
12004 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
12005 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12006 record_buf_mem[0] = 4;
12007 record_buf_mem[1] = u_regval + (immed_8 * 4);
12008 thumb_insn_r->mem_rec_count = 1;
12009 }
12010 else if (0 == opcode)
12011 {
12012 /* STRH(1). */
12013 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12014 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12015 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12016 record_buf_mem[0] = 2;
12017 record_buf_mem[1] = u_regval + (immed_5 * 2);
12018 thumb_insn_r->mem_rec_count = 1;
12019 }
12020
12021 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12022 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12023 record_buf_mem);
12024
12025 return 0;
12026 }
12027
12028 /* Handling opcode 101 insns. */
12029
12030 static int
12031 thumb_record_misc (insn_decode_record *thumb_insn_r)
12032 {
12033 struct regcache *reg_cache = thumb_insn_r->regcache;
12034
12035 uint32_t opcode = 0;
12036 uint32_t register_bits = 0, register_count = 0;
12037 uint32_t index = 0, start_address = 0;
12038 uint32_t record_buf[24], record_buf_mem[48];
12039 uint32_t reg_src1;
12040
12041 ULONGEST u_regval = 0;
12042
12043 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12044
12045 if (opcode == 0 || opcode == 1)
12046 {
12047 /* ADR and ADD (SP plus immediate) */
12048
12049 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12050 record_buf[0] = reg_src1;
12051 thumb_insn_r->reg_rec_count = 1;
12052 }
12053 else
12054 {
12055 /* Miscellaneous 16-bit instructions */
12056 uint32_t opcode2 = bits (thumb_insn_r->arm_insn, 8, 11);
12057
12058 switch (opcode2)
12059 {
12060 case 6:
12061 /* SETEND and CPS */
12062 break;
12063 case 0:
12064 /* ADD/SUB (SP plus immediate) */
12065 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12066 record_buf[0] = ARM_SP_REGNUM;
12067 thumb_insn_r->reg_rec_count = 1;
12068 break;
12069 case 1: /* fall through */
12070 case 3: /* fall through */
12071 case 9: /* fall through */
12072 case 11:
12073 /* CBNZ, CBZ */
12074 break;
12075 case 2:
12076 /* SXTH, SXTB, UXTH, UXTB */
12077 record_buf[0] = bits (thumb_insn_r->arm_insn, 0, 2);
12078 thumb_insn_r->reg_rec_count = 1;
12079 break;
12080 case 4: /* fall through */
12081 case 5:
12082 /* PUSH. */
12083 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12084 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12085 while (register_bits)
12086 {
12087 if (register_bits & 0x00000001)
12088 register_count++;
12089 register_bits = register_bits >> 1;
12090 }
12091 start_address = u_regval - \
12092 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12093 thumb_insn_r->mem_rec_count = register_count;
12094 while (register_count)
12095 {
12096 record_buf_mem[(register_count * 2) - 1] = start_address;
12097 record_buf_mem[(register_count * 2) - 2] = 4;
12098 start_address = start_address + 4;
12099 register_count--;
12100 }
12101 record_buf[0] = ARM_SP_REGNUM;
12102 thumb_insn_r->reg_rec_count = 1;
12103 break;
12104 case 10:
12105 /* REV, REV16, REVSH */
12106 record_buf[0] = bits (thumb_insn_r->arm_insn, 0, 2);
12107 thumb_insn_r->reg_rec_count = 1;
12108 break;
12109 case 12: /* fall through */
12110 case 13:
12111 /* POP. */
12112 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12113 while (register_bits)
12114 {
12115 if (register_bits & 0x00000001)
12116 record_buf[index++] = register_count;
12117 register_bits = register_bits >> 1;
12118 register_count++;
12119 }
12120 record_buf[index++] = ARM_PS_REGNUM;
12121 record_buf[index++] = ARM_SP_REGNUM;
12122 thumb_insn_r->reg_rec_count = index;
12123 break;
12124 case 0xe:
12125 /* BKPT insn. */
12126 /* Handle enhanced software breakpoint insn, BKPT. */
12127 /* CPSR is changed to be executed in ARM state, disabling normal
12128 interrupts, entering abort mode. */
12129 /* According to high vector configuration PC is set. */
12130 /* User hits breakpoint and type reverse, in that case, we need to go back with
12131 previous CPSR and Program Counter. */
12132 record_buf[0] = ARM_PS_REGNUM;
12133 record_buf[1] = ARM_LR_REGNUM;
12134 thumb_insn_r->reg_rec_count = 2;
12135 /* We need to save SPSR value, which is not yet done. */
12136 printf_unfiltered (_("Process record does not support instruction "
12137 "0x%0x at address %s.\n"),
12138 thumb_insn_r->arm_insn,
12139 paddress (thumb_insn_r->gdbarch,
12140 thumb_insn_r->this_addr));
12141 return -1;
12142
12143 case 0xf:
12144 /* If-Then, and hints */
12145 break;
12146 default:
12147 return -1;
12148 };
12149 }
12150
12151 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12152 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12153 record_buf_mem);
12154
12155 return 0;
12156 }
12157
12158 /* Handling opcode 110 insns. */
12159
12160 static int
12161 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12162 {
12163 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12164 struct regcache *reg_cache = thumb_insn_r->regcache;
12165
12166 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12167 uint32_t reg_src1 = 0;
12168 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12169 uint32_t index = 0, start_address = 0;
12170 uint32_t record_buf[24], record_buf_mem[48];
12171
12172 ULONGEST u_regval = 0;
12173
12174 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12175 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12176
12177 if (1 == opcode2)
12178 {
12179
12180 /* LDMIA. */
12181 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12182 /* Get Rn. */
12183 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12184 while (register_bits)
12185 {
12186 if (register_bits & 0x00000001)
12187 record_buf[index++] = register_count;
12188 register_bits = register_bits >> 1;
12189 register_count++;
12190 }
12191 record_buf[index++] = reg_src1;
12192 thumb_insn_r->reg_rec_count = index;
12193 }
12194 else if (0 == opcode2)
12195 {
12196 /* It handles both STMIA. */
12197 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12198 /* Get Rn. */
12199 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12200 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12201 while (register_bits)
12202 {
12203 if (register_bits & 0x00000001)
12204 register_count++;
12205 register_bits = register_bits >> 1;
12206 }
12207 start_address = u_regval;
12208 thumb_insn_r->mem_rec_count = register_count;
12209 while (register_count)
12210 {
12211 record_buf_mem[(register_count * 2) - 1] = start_address;
12212 record_buf_mem[(register_count * 2) - 2] = 4;
12213 start_address = start_address + 4;
12214 register_count--;
12215 }
12216 }
12217 else if (0x1F == opcode1)
12218 {
12219 /* Handle arm syscall insn. */
12220 if (tdep->arm_syscall_record != NULL)
12221 {
12222 regcache_raw_read_unsigned (reg_cache, 7, &u_regval);
12223 ret = tdep->arm_syscall_record (reg_cache, u_regval);
12224 }
12225 else
12226 {
12227 printf_unfiltered (_("no syscall record support\n"));
12228 return -1;
12229 }
12230 }
12231
12232 /* B (1), conditional branch is automatically taken care in process_record,
12233 as PC is saved there. */
12234
12235 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12236 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12237 record_buf_mem);
12238
12239 return ret;
12240 }
12241
12242 /* Handling opcode 111 insns. */
12243
12244 static int
12245 thumb_record_branch (insn_decode_record *thumb_insn_r)
12246 {
12247 uint32_t record_buf[8];
12248 uint32_t bits_h = 0;
12249
12250 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12251
12252 if (2 == bits_h || 3 == bits_h)
12253 {
12254 /* BL */
12255 record_buf[0] = ARM_LR_REGNUM;
12256 thumb_insn_r->reg_rec_count = 1;
12257 }
12258 else if (1 == bits_h)
12259 {
12260 /* BLX(1). */
12261 record_buf[0] = ARM_PS_REGNUM;
12262 record_buf[1] = ARM_LR_REGNUM;
12263 thumb_insn_r->reg_rec_count = 2;
12264 }
12265
12266 /* B(2) is automatically taken care in process_record, as PC is
12267 saved there. */
12268
12269 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12270
12271 return 0;
12272 }
12273
12274 /* Handler for thumb2 load/store multiple instructions. */
12275
12276 static int
12277 thumb2_record_ld_st_multiple (insn_decode_record *thumb2_insn_r)
12278 {
12279 struct regcache *reg_cache = thumb2_insn_r->regcache;
12280
12281 uint32_t reg_rn, op;
12282 uint32_t register_bits = 0, register_count = 0;
12283 uint32_t index = 0, start_address = 0;
12284 uint32_t record_buf[24], record_buf_mem[48];
12285
12286 ULONGEST u_regval = 0;
12287
12288 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12289 op = bits (thumb2_insn_r->arm_insn, 23, 24);
12290
12291 if (0 == op || 3 == op)
12292 {
12293 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12294 {
12295 /* Handle RFE instruction. */
12296 record_buf[0] = ARM_PS_REGNUM;
12297 thumb2_insn_r->reg_rec_count = 1;
12298 }
12299 else
12300 {
12301 /* Handle SRS instruction after reading banked SP. */
12302 return arm_record_unsupported_insn (thumb2_insn_r);
12303 }
12304 }
12305 else if (1 == op || 2 == op)
12306 {
12307 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12308 {
12309 /* Handle LDM/LDMIA/LDMFD and LDMDB/LDMEA instructions. */
12310 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12311 while (register_bits)
12312 {
12313 if (register_bits & 0x00000001)
12314 record_buf[index++] = register_count;
12315
12316 register_count++;
12317 register_bits = register_bits >> 1;
12318 }
12319 record_buf[index++] = reg_rn;
12320 record_buf[index++] = ARM_PS_REGNUM;
12321 thumb2_insn_r->reg_rec_count = index;
12322 }
12323 else
12324 {
12325 /* Handle STM/STMIA/STMEA and STMDB/STMFD. */
12326 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12327 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12328 while (register_bits)
12329 {
12330 if (register_bits & 0x00000001)
12331 register_count++;
12332
12333 register_bits = register_bits >> 1;
12334 }
12335
12336 if (1 == op)
12337 {
12338 /* Start address calculation for LDMDB/LDMEA. */
12339 start_address = u_regval;
12340 }
12341 else if (2 == op)
12342 {
12343 /* Start address calculation for LDMDB/LDMEA. */
12344 start_address = u_regval - register_count * 4;
12345 }
12346
12347 thumb2_insn_r->mem_rec_count = register_count;
12348 while (register_count)
12349 {
12350 record_buf_mem[register_count * 2 - 1] = start_address;
12351 record_buf_mem[register_count * 2 - 2] = 4;
12352 start_address = start_address + 4;
12353 register_count--;
12354 }
12355 record_buf[0] = reg_rn;
12356 record_buf[1] = ARM_PS_REGNUM;
12357 thumb2_insn_r->reg_rec_count = 2;
12358 }
12359 }
12360
12361 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12362 record_buf_mem);
12363 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12364 record_buf);
12365 return ARM_RECORD_SUCCESS;
12366 }
12367
12368 /* Handler for thumb2 load/store (dual/exclusive) and table branch
12369 instructions. */
12370
12371 static int
12372 thumb2_record_ld_st_dual_ex_tbb (insn_decode_record *thumb2_insn_r)
12373 {
12374 struct regcache *reg_cache = thumb2_insn_r->regcache;
12375
12376 uint32_t reg_rd, reg_rn, offset_imm;
12377 uint32_t reg_dest1, reg_dest2;
12378 uint32_t address, offset_addr;
12379 uint32_t record_buf[8], record_buf_mem[8];
12380 uint32_t op1, op2, op3;
12381
12382 ULONGEST u_regval[2];
12383
12384 op1 = bits (thumb2_insn_r->arm_insn, 23, 24);
12385 op2 = bits (thumb2_insn_r->arm_insn, 20, 21);
12386 op3 = bits (thumb2_insn_r->arm_insn, 4, 7);
12387
12388 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12389 {
12390 if(!(1 == op1 && 1 == op2 && (0 == op3 || 1 == op3)))
12391 {
12392 reg_dest1 = bits (thumb2_insn_r->arm_insn, 12, 15);
12393 record_buf[0] = reg_dest1;
12394 record_buf[1] = ARM_PS_REGNUM;
12395 thumb2_insn_r->reg_rec_count = 2;
12396 }
12397
12398 if (3 == op2 || (op1 & 2) || (1 == op1 && 1 == op2 && 7 == op3))
12399 {
12400 reg_dest2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12401 record_buf[2] = reg_dest2;
12402 thumb2_insn_r->reg_rec_count = 3;
12403 }
12404 }
12405 else
12406 {
12407 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12408 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12409
12410 if (0 == op1 && 0 == op2)
12411 {
12412 /* Handle STREX. */
12413 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12414 address = u_regval[0] + (offset_imm * 4);
12415 record_buf_mem[0] = 4;
12416 record_buf_mem[1] = address;
12417 thumb2_insn_r->mem_rec_count = 1;
12418 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12419 record_buf[0] = reg_rd;
12420 thumb2_insn_r->reg_rec_count = 1;
12421 }
12422 else if (1 == op1 && 0 == op2)
12423 {
12424 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12425 record_buf[0] = reg_rd;
12426 thumb2_insn_r->reg_rec_count = 1;
12427 address = u_regval[0];
12428 record_buf_mem[1] = address;
12429
12430 if (4 == op3)
12431 {
12432 /* Handle STREXB. */
12433 record_buf_mem[0] = 1;
12434 thumb2_insn_r->mem_rec_count = 1;
12435 }
12436 else if (5 == op3)
12437 {
12438 /* Handle STREXH. */
12439 record_buf_mem[0] = 2 ;
12440 thumb2_insn_r->mem_rec_count = 1;
12441 }
12442 else if (7 == op3)
12443 {
12444 /* Handle STREXD. */
12445 address = u_regval[0];
12446 record_buf_mem[0] = 4;
12447 record_buf_mem[2] = 4;
12448 record_buf_mem[3] = address + 4;
12449 thumb2_insn_r->mem_rec_count = 2;
12450 }
12451 }
12452 else
12453 {
12454 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12455
12456 if (bit (thumb2_insn_r->arm_insn, 24))
12457 {
12458 if (bit (thumb2_insn_r->arm_insn, 23))
12459 offset_addr = u_regval[0] + (offset_imm * 4);
12460 else
12461 offset_addr = u_regval[0] - (offset_imm * 4);
12462
12463 address = offset_addr;
12464 }
12465 else
12466 address = u_regval[0];
12467
12468 record_buf_mem[0] = 4;
12469 record_buf_mem[1] = address;
12470 record_buf_mem[2] = 4;
12471 record_buf_mem[3] = address + 4;
12472 thumb2_insn_r->mem_rec_count = 2;
12473 record_buf[0] = reg_rn;
12474 thumb2_insn_r->reg_rec_count = 1;
12475 }
12476 }
12477
12478 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12479 record_buf);
12480 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12481 record_buf_mem);
12482 return ARM_RECORD_SUCCESS;
12483 }
12484
12485 /* Handler for thumb2 data processing (shift register and modified immediate)
12486 instructions. */
12487
12488 static int
12489 thumb2_record_data_proc_sreg_mimm (insn_decode_record *thumb2_insn_r)
12490 {
12491 uint32_t reg_rd, op;
12492 uint32_t record_buf[8];
12493
12494 op = bits (thumb2_insn_r->arm_insn, 21, 24);
12495 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12496
12497 if ((0 == op || 4 == op || 8 == op || 13 == op) && 15 == reg_rd)
12498 {
12499 record_buf[0] = ARM_PS_REGNUM;
12500 thumb2_insn_r->reg_rec_count = 1;
12501 }
12502 else
12503 {
12504 record_buf[0] = reg_rd;
12505 record_buf[1] = ARM_PS_REGNUM;
12506 thumb2_insn_r->reg_rec_count = 2;
12507 }
12508
12509 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12510 record_buf);
12511 return ARM_RECORD_SUCCESS;
12512 }
12513
12514 /* Generic handler for thumb2 instructions which effect destination and PS
12515 registers. */
12516
12517 static int
12518 thumb2_record_ps_dest_generic (insn_decode_record *thumb2_insn_r)
12519 {
12520 uint32_t reg_rd;
12521 uint32_t record_buf[8];
12522
12523 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12524
12525 record_buf[0] = reg_rd;
12526 record_buf[1] = ARM_PS_REGNUM;
12527 thumb2_insn_r->reg_rec_count = 2;
12528
12529 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12530 record_buf);
12531 return ARM_RECORD_SUCCESS;
12532 }
12533
12534 /* Handler for thumb2 branch and miscellaneous control instructions. */
12535
12536 static int
12537 thumb2_record_branch_misc_cntrl (insn_decode_record *thumb2_insn_r)
12538 {
12539 uint32_t op, op1, op2;
12540 uint32_t record_buf[8];
12541
12542 op = bits (thumb2_insn_r->arm_insn, 20, 26);
12543 op1 = bits (thumb2_insn_r->arm_insn, 12, 14);
12544 op2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12545
12546 /* Handle MSR insn. */
12547 if (!(op1 & 0x2) && 0x38 == op)
12548 {
12549 if (!(op2 & 0x3))
12550 {
12551 /* CPSR is going to be changed. */
12552 record_buf[0] = ARM_PS_REGNUM;
12553 thumb2_insn_r->reg_rec_count = 1;
12554 }
12555 else
12556 {
12557 arm_record_unsupported_insn(thumb2_insn_r);
12558 return -1;
12559 }
12560 }
12561 else if (4 == (op1 & 0x5) || 5 == (op1 & 0x5))
12562 {
12563 /* BLX. */
12564 record_buf[0] = ARM_PS_REGNUM;
12565 record_buf[1] = ARM_LR_REGNUM;
12566 thumb2_insn_r->reg_rec_count = 2;
12567 }
12568
12569 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12570 record_buf);
12571 return ARM_RECORD_SUCCESS;
12572 }
12573
12574 /* Handler for thumb2 store single data item instructions. */
12575
12576 static int
12577 thumb2_record_str_single_data (insn_decode_record *thumb2_insn_r)
12578 {
12579 struct regcache *reg_cache = thumb2_insn_r->regcache;
12580
12581 uint32_t reg_rn, reg_rm, offset_imm, shift_imm;
12582 uint32_t address, offset_addr;
12583 uint32_t record_buf[8], record_buf_mem[8];
12584 uint32_t op1, op2;
12585
12586 ULONGEST u_regval[2];
12587
12588 op1 = bits (thumb2_insn_r->arm_insn, 21, 23);
12589 op2 = bits (thumb2_insn_r->arm_insn, 6, 11);
12590 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12591 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12592
12593 if (bit (thumb2_insn_r->arm_insn, 23))
12594 {
12595 /* T2 encoding. */
12596 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 11);
12597 offset_addr = u_regval[0] + offset_imm;
12598 address = offset_addr;
12599 }
12600 else
12601 {
12602 /* T3 encoding. */
12603 if ((0 == op1 || 1 == op1 || 2 == op1) && !(op2 & 0x20))
12604 {
12605 /* Handle STRB (register). */
12606 reg_rm = bits (thumb2_insn_r->arm_insn, 0, 3);
12607 regcache_raw_read_unsigned (reg_cache, reg_rm, &u_regval[1]);
12608 shift_imm = bits (thumb2_insn_r->arm_insn, 4, 5);
12609 offset_addr = u_regval[1] << shift_imm;
12610 address = u_regval[0] + offset_addr;
12611 }
12612 else
12613 {
12614 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12615 if (bit (thumb2_insn_r->arm_insn, 10))
12616 {
12617 if (bit (thumb2_insn_r->arm_insn, 9))
12618 offset_addr = u_regval[0] + offset_imm;
12619 else
12620 offset_addr = u_regval[0] - offset_imm;
12621
12622 address = offset_addr;
12623 }
12624 else
12625 address = u_regval[0];
12626 }
12627 }
12628
12629 switch (op1)
12630 {
12631 /* Store byte instructions. */
12632 case 4:
12633 case 0:
12634 record_buf_mem[0] = 1;
12635 break;
12636 /* Store half word instructions. */
12637 case 1:
12638 case 5:
12639 record_buf_mem[0] = 2;
12640 break;
12641 /* Store word instructions. */
12642 case 2:
12643 case 6:
12644 record_buf_mem[0] = 4;
12645 break;
12646
12647 default:
12648 gdb_assert_not_reached ("no decoding pattern found");
12649 break;
12650 }
12651
12652 record_buf_mem[1] = address;
12653 thumb2_insn_r->mem_rec_count = 1;
12654 record_buf[0] = reg_rn;
12655 thumb2_insn_r->reg_rec_count = 1;
12656
12657 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12658 record_buf);
12659 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12660 record_buf_mem);
12661 return ARM_RECORD_SUCCESS;
12662 }
12663
12664 /* Handler for thumb2 load memory hints instructions. */
12665
12666 static int
12667 thumb2_record_ld_mem_hints (insn_decode_record *thumb2_insn_r)
12668 {
12669 uint32_t record_buf[8];
12670 uint32_t reg_rt, reg_rn;
12671
12672 reg_rt = bits (thumb2_insn_r->arm_insn, 12, 15);
12673 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12674
12675 if (ARM_PC_REGNUM != reg_rt)
12676 {
12677 record_buf[0] = reg_rt;
12678 record_buf[1] = reg_rn;
12679 record_buf[2] = ARM_PS_REGNUM;
12680 thumb2_insn_r->reg_rec_count = 3;
12681
12682 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12683 record_buf);
12684 return ARM_RECORD_SUCCESS;
12685 }
12686
12687 return ARM_RECORD_FAILURE;
12688 }
12689
12690 /* Handler for thumb2 load word instructions. */
12691
12692 static int
12693 thumb2_record_ld_word (insn_decode_record *thumb2_insn_r)
12694 {
12695 uint32_t record_buf[8];
12696
12697 record_buf[0] = bits (thumb2_insn_r->arm_insn, 12, 15);
12698 record_buf[1] = ARM_PS_REGNUM;
12699 thumb2_insn_r->reg_rec_count = 2;
12700
12701 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12702 record_buf);
12703 return ARM_RECORD_SUCCESS;
12704 }
12705
12706 /* Handler for thumb2 long multiply, long multiply accumulate, and
12707 divide instructions. */
12708
12709 static int
12710 thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r)
12711 {
12712 uint32_t opcode1 = 0, opcode2 = 0;
12713 uint32_t record_buf[8];
12714
12715 opcode1 = bits (thumb2_insn_r->arm_insn, 20, 22);
12716 opcode2 = bits (thumb2_insn_r->arm_insn, 4, 7);
12717
12718 if (0 == opcode1 || 2 == opcode1 || (opcode1 >= 4 && opcode1 <= 6))
12719 {
12720 /* Handle SMULL, UMULL, SMULAL. */
12721 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
12722 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12723 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12724 record_buf[2] = ARM_PS_REGNUM;
12725 thumb2_insn_r->reg_rec_count = 3;
12726 }
12727 else if (1 == opcode1 || 3 == opcode2)
12728 {
12729 /* Handle SDIV and UDIV. */
12730 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12731 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12732 record_buf[2] = ARM_PS_REGNUM;
12733 thumb2_insn_r->reg_rec_count = 3;
12734 }
12735 else
12736 return ARM_RECORD_FAILURE;
12737
12738 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12739 record_buf);
12740 return ARM_RECORD_SUCCESS;
12741 }
12742
12743 /* Record handler for thumb32 coprocessor instructions. */
12744
12745 static int
12746 thumb2_record_coproc_insn (insn_decode_record *thumb2_insn_r)
12747 {
12748 if (bit (thumb2_insn_r->arm_insn, 25))
12749 return arm_record_coproc_data_proc (thumb2_insn_r);
12750 else
12751 return arm_record_asimd_vfp_coproc (thumb2_insn_r);
12752 }
12753
12754 /* Record handler for advance SIMD structure load/store instructions. */
12755
12756 static int
12757 thumb2_record_asimd_struct_ld_st (insn_decode_record *thumb2_insn_r)
12758 {
12759 struct regcache *reg_cache = thumb2_insn_r->regcache;
12760 uint32_t l_bit, a_bit, b_bits;
12761 uint32_t record_buf[128], record_buf_mem[128];
12762 uint32_t reg_rn, reg_vd, address, f_elem;
12763 uint32_t index_r = 0, index_e = 0, bf_regs = 0, index_m = 0, loop_t = 0;
12764 uint8_t f_ebytes;
12765
12766 l_bit = bit (thumb2_insn_r->arm_insn, 21);
12767 a_bit = bit (thumb2_insn_r->arm_insn, 23);
12768 b_bits = bits (thumb2_insn_r->arm_insn, 8, 11);
12769 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12770 reg_vd = bits (thumb2_insn_r->arm_insn, 12, 15);
12771 reg_vd = (bit (thumb2_insn_r->arm_insn, 22) << 4) | reg_vd;
12772 f_ebytes = (1 << bits (thumb2_insn_r->arm_insn, 6, 7));
12773 f_elem = 8 / f_ebytes;
12774
12775 if (!l_bit)
12776 {
12777 ULONGEST u_regval = 0;
12778 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12779 address = u_regval;
12780
12781 if (!a_bit)
12782 {
12783 /* Handle VST1. */
12784 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
12785 {
12786 if (b_bits == 0x07)
12787 bf_regs = 1;
12788 else if (b_bits == 0x0a)
12789 bf_regs = 2;
12790 else if (b_bits == 0x06)
12791 bf_regs = 3;
12792 else if (b_bits == 0x02)
12793 bf_regs = 4;
12794 else
12795 bf_regs = 0;
12796
12797 for (index_r = 0; index_r < bf_regs; index_r++)
12798 {
12799 for (index_e = 0; index_e < f_elem; index_e++)
12800 {
12801 record_buf_mem[index_m++] = f_ebytes;
12802 record_buf_mem[index_m++] = address;
12803 address = address + f_ebytes;
12804 thumb2_insn_r->mem_rec_count += 1;
12805 }
12806 }
12807 }
12808 /* Handle VST2. */
12809 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
12810 {
12811 if (b_bits == 0x09 || b_bits == 0x08)
12812 bf_regs = 1;
12813 else if (b_bits == 0x03)
12814 bf_regs = 2;
12815 else
12816 bf_regs = 0;
12817
12818 for (index_r = 0; index_r < bf_regs; index_r++)
12819 for (index_e = 0; index_e < f_elem; index_e++)
12820 {
12821 for (loop_t = 0; loop_t < 2; loop_t++)
12822 {
12823 record_buf_mem[index_m++] = f_ebytes;
12824 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12825 thumb2_insn_r->mem_rec_count += 1;
12826 }
12827 address = address + (2 * f_ebytes);
12828 }
12829 }
12830 /* Handle VST3. */
12831 else if ((b_bits & 0x0e) == 0x04)
12832 {
12833 for (index_e = 0; index_e < f_elem; index_e++)
12834 {
12835 for (loop_t = 0; loop_t < 3; loop_t++)
12836 {
12837 record_buf_mem[index_m++] = f_ebytes;
12838 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12839 thumb2_insn_r->mem_rec_count += 1;
12840 }
12841 address = address + (3 * f_ebytes);
12842 }
12843 }
12844 /* Handle VST4. */
12845 else if (!(b_bits & 0x0e))
12846 {
12847 for (index_e = 0; index_e < f_elem; index_e++)
12848 {
12849 for (loop_t = 0; loop_t < 4; loop_t++)
12850 {
12851 record_buf_mem[index_m++] = f_ebytes;
12852 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12853 thumb2_insn_r->mem_rec_count += 1;
12854 }
12855 address = address + (4 * f_ebytes);
12856 }
12857 }
12858 }
12859 else
12860 {
12861 uint8_t bft_size = bits (thumb2_insn_r->arm_insn, 10, 11);
12862
12863 if (bft_size == 0x00)
12864 f_ebytes = 1;
12865 else if (bft_size == 0x01)
12866 f_ebytes = 2;
12867 else if (bft_size == 0x02)
12868 f_ebytes = 4;
12869 else
12870 f_ebytes = 0;
12871
12872 /* Handle VST1. */
12873 if (!(b_bits & 0x0b) || b_bits == 0x08)
12874 thumb2_insn_r->mem_rec_count = 1;
12875 /* Handle VST2. */
12876 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09)
12877 thumb2_insn_r->mem_rec_count = 2;
12878 /* Handle VST3. */
12879 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a)
12880 thumb2_insn_r->mem_rec_count = 3;
12881 /* Handle VST4. */
12882 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b)
12883 thumb2_insn_r->mem_rec_count = 4;
12884
12885 for (index_m = 0; index_m < thumb2_insn_r->mem_rec_count; index_m++)
12886 {
12887 record_buf_mem[index_m] = f_ebytes;
12888 record_buf_mem[index_m] = address + (index_m * f_ebytes);
12889 }
12890 }
12891 }
12892 else
12893 {
12894 if (!a_bit)
12895 {
12896 /* Handle VLD1. */
12897 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
12898 thumb2_insn_r->reg_rec_count = 1;
12899 /* Handle VLD2. */
12900 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
12901 thumb2_insn_r->reg_rec_count = 2;
12902 /* Handle VLD3. */
12903 else if ((b_bits & 0x0e) == 0x04)
12904 thumb2_insn_r->reg_rec_count = 3;
12905 /* Handle VLD4. */
12906 else if (!(b_bits & 0x0e))
12907 thumb2_insn_r->reg_rec_count = 4;
12908 }
12909 else
12910 {
12911 /* Handle VLD1. */
12912 if (!(b_bits & 0x0b) || b_bits == 0x08 || b_bits == 0x0c)
12913 thumb2_insn_r->reg_rec_count = 1;
12914 /* Handle VLD2. */
12915 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09 || b_bits == 0x0d)
12916 thumb2_insn_r->reg_rec_count = 2;
12917 /* Handle VLD3. */
12918 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a || b_bits == 0x0e)
12919 thumb2_insn_r->reg_rec_count = 3;
12920 /* Handle VLD4. */
12921 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b || b_bits == 0x0f)
12922 thumb2_insn_r->reg_rec_count = 4;
12923
12924 for (index_r = 0; index_r < thumb2_insn_r->reg_rec_count; index_r++)
12925 record_buf[index_r] = reg_vd + ARM_D0_REGNUM + index_r;
12926 }
12927 }
12928
12929 if (bits (thumb2_insn_r->arm_insn, 0, 3) != 15)
12930 {
12931 record_buf[index_r] = reg_rn;
12932 thumb2_insn_r->reg_rec_count += 1;
12933 }
12934
12935 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12936 record_buf);
12937 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12938 record_buf_mem);
12939 return 0;
12940 }
12941
12942 /* Decodes thumb2 instruction type and invokes its record handler. */
12943
12944 static unsigned int
12945 thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r)
12946 {
12947 uint32_t op, op1, op2;
12948
12949 op = bit (thumb2_insn_r->arm_insn, 15);
12950 op1 = bits (thumb2_insn_r->arm_insn, 27, 28);
12951 op2 = bits (thumb2_insn_r->arm_insn, 20, 26);
12952
12953 if (op1 == 0x01)
12954 {
12955 if (!(op2 & 0x64 ))
12956 {
12957 /* Load/store multiple instruction. */
12958 return thumb2_record_ld_st_multiple (thumb2_insn_r);
12959 }
12960 else if ((op2 & 0x64) == 0x4)
12961 {
12962 /* Load/store (dual/exclusive) and table branch instruction. */
12963 return thumb2_record_ld_st_dual_ex_tbb (thumb2_insn_r);
12964 }
12965 else if ((op2 & 0x60) == 0x20)
12966 {
12967 /* Data-processing (shifted register). */
12968 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12969 }
12970 else if (op2 & 0x40)
12971 {
12972 /* Co-processor instructions. */
12973 return thumb2_record_coproc_insn (thumb2_insn_r);
12974 }
12975 }
12976 else if (op1 == 0x02)
12977 {
12978 if (op)
12979 {
12980 /* Branches and miscellaneous control instructions. */
12981 return thumb2_record_branch_misc_cntrl (thumb2_insn_r);
12982 }
12983 else if (op2 & 0x20)
12984 {
12985 /* Data-processing (plain binary immediate) instruction. */
12986 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12987 }
12988 else
12989 {
12990 /* Data-processing (modified immediate). */
12991 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12992 }
12993 }
12994 else if (op1 == 0x03)
12995 {
12996 if (!(op2 & 0x71 ))
12997 {
12998 /* Store single data item. */
12999 return thumb2_record_str_single_data (thumb2_insn_r);
13000 }
13001 else if (!((op2 & 0x71) ^ 0x10))
13002 {
13003 /* Advanced SIMD or structure load/store instructions. */
13004 return thumb2_record_asimd_struct_ld_st (thumb2_insn_r);
13005 }
13006 else if (!((op2 & 0x67) ^ 0x01))
13007 {
13008 /* Load byte, memory hints instruction. */
13009 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13010 }
13011 else if (!((op2 & 0x67) ^ 0x03))
13012 {
13013 /* Load halfword, memory hints instruction. */
13014 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13015 }
13016 else if (!((op2 & 0x67) ^ 0x05))
13017 {
13018 /* Load word instruction. */
13019 return thumb2_record_ld_word (thumb2_insn_r);
13020 }
13021 else if (!((op2 & 0x70) ^ 0x20))
13022 {
13023 /* Data-processing (register) instruction. */
13024 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13025 }
13026 else if (!((op2 & 0x78) ^ 0x30))
13027 {
13028 /* Multiply, multiply accumulate, abs diff instruction. */
13029 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13030 }
13031 else if (!((op2 & 0x78) ^ 0x38))
13032 {
13033 /* Long multiply, long multiply accumulate, and divide. */
13034 return thumb2_record_lmul_lmla_div (thumb2_insn_r);
13035 }
13036 else if (op2 & 0x40)
13037 {
13038 /* Co-processor instructions. */
13039 return thumb2_record_coproc_insn (thumb2_insn_r);
13040 }
13041 }
13042
13043 return -1;
13044 }
13045
13046 namespace {
13047 /* Abstract memory reader. */
13048
13049 class abstract_memory_reader
13050 {
13051 public:
13052 /* Read LEN bytes of target memory at address MEMADDR, placing the
13053 results in GDB's memory at BUF. Return true on success. */
13054
13055 virtual bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) = 0;
13056 };
13057
13058 /* Instruction reader from real target. */
13059
13060 class instruction_reader : public abstract_memory_reader
13061 {
13062 public:
13063 bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) override
13064 {
13065 if (target_read_memory (memaddr, buf, len))
13066 return false;
13067 else
13068 return true;
13069 }
13070 };
13071
13072 } // namespace
13073
13074 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
13075 and positive val on failure. */
13076
13077 static int
13078 extract_arm_insn (abstract_memory_reader& reader,
13079 insn_decode_record *insn_record, uint32_t insn_size)
13080 {
13081 gdb_byte buf[insn_size];
13082
13083 memset (&buf[0], 0, insn_size);
13084
13085 if (!reader.read (insn_record->this_addr, buf, insn_size))
13086 return 1;
13087 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
13088 insn_size,
13089 gdbarch_byte_order_for_code (insn_record->gdbarch));
13090 return 0;
13091 }
13092
13093 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
13094
13095 /* Decode arm/thumb insn depending on condition cods and opcodes; and
13096 dispatch it. */
13097
13098 static int
13099 decode_insn (abstract_memory_reader &reader, insn_decode_record *arm_record,
13100 record_type_t record_type, uint32_t insn_size)
13101 {
13102
13103 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm
13104 instruction. */
13105 static const sti_arm_hdl_fp_t arm_handle_insn[8] =
13106 {
13107 arm_record_data_proc_misc_ld_str, /* 000. */
13108 arm_record_data_proc_imm, /* 001. */
13109 arm_record_ld_st_imm_offset, /* 010. */
13110 arm_record_ld_st_reg_offset, /* 011. */
13111 arm_record_ld_st_multiple, /* 100. */
13112 arm_record_b_bl, /* 101. */
13113 arm_record_asimd_vfp_coproc, /* 110. */
13114 arm_record_coproc_data_proc /* 111. */
13115 };
13116
13117 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb
13118 instruction. */
13119 static const sti_arm_hdl_fp_t thumb_handle_insn[8] =
13120 { \
13121 thumb_record_shift_add_sub, /* 000. */
13122 thumb_record_add_sub_cmp_mov, /* 001. */
13123 thumb_record_ld_st_reg_offset, /* 010. */
13124 thumb_record_ld_st_imm_offset, /* 011. */
13125 thumb_record_ld_st_stack, /* 100. */
13126 thumb_record_misc, /* 101. */
13127 thumb_record_ldm_stm_swi, /* 110. */
13128 thumb_record_branch /* 111. */
13129 };
13130
13131 uint32_t ret = 0; /* return value: negative:failure 0:success. */
13132 uint32_t insn_id = 0;
13133
13134 if (extract_arm_insn (reader, arm_record, insn_size))
13135 {
13136 if (record_debug)
13137 {
13138 printf_unfiltered (_("Process record: error reading memory at "
13139 "addr %s len = %d.\n"),
13140 paddress (arm_record->gdbarch,
13141 arm_record->this_addr), insn_size);
13142 }
13143 return -1;
13144 }
13145 else if (ARM_RECORD == record_type)
13146 {
13147 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
13148 insn_id = bits (arm_record->arm_insn, 25, 27);
13149
13150 if (arm_record->cond == 0xf)
13151 ret = arm_record_extension_space (arm_record);
13152 else
13153 {
13154 /* If this insn has fallen into extension space
13155 then we need not decode it anymore. */
13156 ret = arm_handle_insn[insn_id] (arm_record);
13157 }
13158 if (ret != ARM_RECORD_SUCCESS)
13159 {
13160 arm_record_unsupported_insn (arm_record);
13161 ret = -1;
13162 }
13163 }
13164 else if (THUMB_RECORD == record_type)
13165 {
13166 /* As thumb does not have condition codes, we set negative. */
13167 arm_record->cond = -1;
13168 insn_id = bits (arm_record->arm_insn, 13, 15);
13169 ret = thumb_handle_insn[insn_id] (arm_record);
13170 if (ret != ARM_RECORD_SUCCESS)
13171 {
13172 arm_record_unsupported_insn (arm_record);
13173 ret = -1;
13174 }
13175 }
13176 else if (THUMB2_RECORD == record_type)
13177 {
13178 /* As thumb does not have condition codes, we set negative. */
13179 arm_record->cond = -1;
13180
13181 /* Swap first half of 32bit thumb instruction with second half. */
13182 arm_record->arm_insn
13183 = (arm_record->arm_insn >> 16) | (arm_record->arm_insn << 16);
13184
13185 ret = thumb2_record_decode_insn_handler (arm_record);
13186
13187 if (ret != ARM_RECORD_SUCCESS)
13188 {
13189 arm_record_unsupported_insn (arm_record);
13190 ret = -1;
13191 }
13192 }
13193 else
13194 {
13195 /* Throw assertion. */
13196 gdb_assert_not_reached ("not a valid instruction, could not decode");
13197 }
13198
13199 return ret;
13200 }
13201
13202 #if GDB_SELF_TEST
13203 namespace selftests {
13204
13205 /* Provide both 16-bit and 32-bit thumb instructions. */
13206
13207 class instruction_reader_thumb : public abstract_memory_reader
13208 {
13209 public:
13210 template<size_t SIZE>
13211 instruction_reader_thumb (enum bfd_endian endian,
13212 const uint16_t (&insns)[SIZE])
13213 : m_endian (endian), m_insns (insns), m_insns_size (SIZE)
13214 {}
13215
13216 bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) override
13217 {
13218 SELF_CHECK (len == 4 || len == 2);
13219 SELF_CHECK (memaddr % 2 == 0);
13220 SELF_CHECK ((memaddr / 2) < m_insns_size);
13221
13222 store_unsigned_integer (buf, 2, m_endian, m_insns[memaddr / 2]);
13223 if (len == 4)
13224 {
13225 store_unsigned_integer (&buf[2], 2, m_endian,
13226 m_insns[memaddr / 2 + 1]);
13227 }
13228 return true;
13229 }
13230
13231 private:
13232 enum bfd_endian m_endian;
13233 const uint16_t *m_insns;
13234 size_t m_insns_size;
13235 };
13236
13237 static void
13238 arm_record_test (void)
13239 {
13240 struct gdbarch_info info;
13241 info.bfd_arch_info = bfd_scan_arch ("arm");
13242
13243 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
13244
13245 SELF_CHECK (gdbarch != NULL);
13246
13247 /* 16-bit Thumb instructions. */
13248 {
13249 insn_decode_record arm_record;
13250
13251 memset (&arm_record, 0, sizeof (insn_decode_record));
13252 arm_record.gdbarch = gdbarch;
13253
13254 static const uint16_t insns[] = {
13255 /* db b2 uxtb r3, r3 */
13256 0xb2db,
13257 /* cd 58 ldr r5, [r1, r3] */
13258 0x58cd,
13259 };
13260
13261 enum bfd_endian endian = gdbarch_byte_order_for_code (arm_record.gdbarch);
13262 instruction_reader_thumb reader (endian, insns);
13263 int ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13264 THUMB_INSN_SIZE_BYTES);
13265
13266 SELF_CHECK (ret == 0);
13267 SELF_CHECK (arm_record.mem_rec_count == 0);
13268 SELF_CHECK (arm_record.reg_rec_count == 1);
13269 SELF_CHECK (arm_record.arm_regs[0] == 3);
13270
13271 arm_record.this_addr += 2;
13272 ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13273 THUMB_INSN_SIZE_BYTES);
13274
13275 SELF_CHECK (ret == 0);
13276 SELF_CHECK (arm_record.mem_rec_count == 0);
13277 SELF_CHECK (arm_record.reg_rec_count == 1);
13278 SELF_CHECK (arm_record.arm_regs[0] == 5);
13279 }
13280
13281 /* 32-bit Thumb-2 instructions. */
13282 {
13283 insn_decode_record arm_record;
13284
13285 memset (&arm_record, 0, sizeof (insn_decode_record));
13286 arm_record.gdbarch = gdbarch;
13287
13288 static const uint16_t insns[] = {
13289 /* 1d ee 70 7f mrc 15, 0, r7, cr13, cr0, {3} */
13290 0xee1d, 0x7f70,
13291 };
13292
13293 enum bfd_endian endian = gdbarch_byte_order_for_code (arm_record.gdbarch);
13294 instruction_reader_thumb reader (endian, insns);
13295 int ret = decode_insn (reader, &arm_record, THUMB2_RECORD,
13296 THUMB2_INSN_SIZE_BYTES);
13297
13298 SELF_CHECK (ret == 0);
13299 SELF_CHECK (arm_record.mem_rec_count == 0);
13300 SELF_CHECK (arm_record.reg_rec_count == 1);
13301 SELF_CHECK (arm_record.arm_regs[0] == 7);
13302 }
13303 }
13304
13305 /* Instruction reader from manually cooked instruction sequences. */
13306
13307 class test_arm_instruction_reader : public arm_instruction_reader
13308 {
13309 public:
13310 explicit test_arm_instruction_reader (gdb::array_view<const uint32_t> insns)
13311 : m_insns (insns)
13312 {}
13313
13314 uint32_t read (CORE_ADDR memaddr, enum bfd_endian byte_order) const override
13315 {
13316 SELF_CHECK (memaddr % 4 == 0);
13317 SELF_CHECK (memaddr / 4 < m_insns.size ());
13318
13319 return m_insns[memaddr / 4];
13320 }
13321
13322 private:
13323 const gdb::array_view<const uint32_t> m_insns;
13324 };
13325
13326 static void
13327 arm_analyze_prologue_test ()
13328 {
13329 for (bfd_endian endianness : {BFD_ENDIAN_LITTLE, BFD_ENDIAN_BIG})
13330 {
13331 struct gdbarch_info info;
13332 info.byte_order = endianness;
13333 info.byte_order_for_code = endianness;
13334 info.bfd_arch_info = bfd_scan_arch ("arm");
13335
13336 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
13337
13338 SELF_CHECK (gdbarch != NULL);
13339
13340 /* The "sub" instruction contains an immediate value rotate count of 0,
13341 which resulted in a 32-bit shift of a 32-bit value, caught by
13342 UBSan. */
13343 const uint32_t insns[] = {
13344 0xe92d4ff0, /* push {r4, r5, r6, r7, r8, r9, sl, fp, lr} */
13345 0xe1a05000, /* mov r5, r0 */
13346 0xe5903020, /* ldr r3, [r0, #32] */
13347 0xe24dd044, /* sub sp, sp, #68 ; 0x44 */
13348 };
13349
13350 test_arm_instruction_reader mem_reader (insns);
13351 arm_prologue_cache cache;
13352 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
13353
13354 arm_analyze_prologue (gdbarch, 0, sizeof (insns) - 1, &cache, mem_reader);
13355 }
13356 }
13357
13358 } // namespace selftests
13359 #endif /* GDB_SELF_TEST */
13360
13361 /* Cleans up local record registers and memory allocations. */
13362
13363 static void
13364 deallocate_reg_mem (insn_decode_record *record)
13365 {
13366 xfree (record->arm_regs);
13367 xfree (record->arm_mems);
13368 }
13369
13370
13371 /* Parse the current instruction and record the values of the registers and
13372 memory that will be changed in current instruction to record_arch_list".
13373 Return -1 if something is wrong. */
13374
13375 int
13376 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
13377 CORE_ADDR insn_addr)
13378 {
13379
13380 uint32_t no_of_rec = 0;
13381 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
13382 ULONGEST t_bit = 0, insn_id = 0;
13383
13384 ULONGEST u_regval = 0;
13385
13386 insn_decode_record arm_record;
13387
13388 memset (&arm_record, 0, sizeof (insn_decode_record));
13389 arm_record.regcache = regcache;
13390 arm_record.this_addr = insn_addr;
13391 arm_record.gdbarch = gdbarch;
13392
13393
13394 if (record_debug > 1)
13395 {
13396 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
13397 "addr = %s\n",
13398 paddress (gdbarch, arm_record.this_addr));
13399 }
13400
13401 instruction_reader reader;
13402 if (extract_arm_insn (reader, &arm_record, 2))
13403 {
13404 if (record_debug)
13405 {
13406 printf_unfiltered (_("Process record: error reading memory at "
13407 "addr %s len = %d.\n"),
13408 paddress (arm_record.gdbarch,
13409 arm_record.this_addr), 2);
13410 }
13411 return -1;
13412 }
13413
13414 /* Check the insn, whether it is thumb or arm one. */
13415
13416 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
13417 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
13418
13419
13420 if (!(u_regval & t_bit))
13421 {
13422 /* We are decoding arm insn. */
13423 ret = decode_insn (reader, &arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
13424 }
13425 else
13426 {
13427 insn_id = bits (arm_record.arm_insn, 11, 15);
13428 /* is it thumb2 insn? */
13429 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
13430 {
13431 ret = decode_insn (reader, &arm_record, THUMB2_RECORD,
13432 THUMB2_INSN_SIZE_BYTES);
13433 }
13434 else
13435 {
13436 /* We are decoding thumb insn. */
13437 ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13438 THUMB_INSN_SIZE_BYTES);
13439 }
13440 }
13441
13442 if (0 == ret)
13443 {
13444 /* Record registers. */
13445 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
13446 if (arm_record.arm_regs)
13447 {
13448 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
13449 {
13450 if (record_full_arch_list_add_reg
13451 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
13452 ret = -1;
13453 }
13454 }
13455 /* Record memories. */
13456 if (arm_record.arm_mems)
13457 {
13458 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
13459 {
13460 if (record_full_arch_list_add_mem
13461 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
13462 arm_record.arm_mems[no_of_rec].len))
13463 ret = -1;
13464 }
13465 }
13466
13467 if (record_full_arch_list_add_end ())
13468 ret = -1;
13469 }
13470
13471
13472 deallocate_reg_mem (&arm_record);
13473
13474 return ret;
13475 }
13476
13477 /* See arm-tdep.h. */
13478
13479 const target_desc *
13480 arm_read_description (arm_fp_type fp_type)
13481 {
13482 struct target_desc *tdesc = tdesc_arm_list[fp_type];
13483
13484 if (tdesc == nullptr)
13485 {
13486 tdesc = arm_create_target_description (fp_type);
13487 tdesc_arm_list[fp_type] = tdesc;
13488 }
13489
13490 return tdesc;
13491 }
13492
13493 /* See arm-tdep.h. */
13494
13495 const target_desc *
13496 arm_read_mprofile_description (arm_m_profile_type m_type)
13497 {
13498 struct target_desc *tdesc = tdesc_arm_mprofile_list[m_type];
13499
13500 if (tdesc == nullptr)
13501 {
13502 tdesc = arm_create_mprofile_target_description (m_type);
13503 tdesc_arm_mprofile_list[m_type] = tdesc;
13504 }
13505
13506 return tdesc;
13507 }
This page took 0.305187 seconds and 4 git commands to generate.