arm-tdep: replace arm_mapping_symbol VEC with std::vector
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "infrun.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "disasm.h"
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "target-float.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "remote.h"
45 #include "target-descriptions.h"
46 #include "user-regs.h"
47 #include "observable.h"
48
49 #include "arch/arm.h"
50 #include "arch/arm-get-next-pcs.h"
51 #include "arm-tdep.h"
52 #include "gdb/sim-arm.h"
53
54 #include "elf-bfd.h"
55 #include "coff/internal.h"
56 #include "elf/arm.h"
57
58 #include "common/vec.h"
59
60 #include "record.h"
61 #include "record-full.h"
62 #include <algorithm>
63
64 #include "features/arm/arm-with-m.c"
65 #include "features/arm/arm-with-m-fpa-layout.c"
66 #include "features/arm/arm-with-m-vfp-d16.c"
67 #include "features/arm/arm-with-iwmmxt.c"
68 #include "features/arm/arm-with-vfpv2.c"
69 #include "features/arm/arm-with-vfpv3.c"
70 #include "features/arm/arm-with-neon.c"
71
72 #if GDB_SELF_TEST
73 #include "common/selftest.h"
74 #endif
75
76 static int arm_debug;
77
78 /* Macros for setting and testing a bit in a minimal symbol that marks
79 it as Thumb function. The MSB of the minimal symbol's "info" field
80 is used for this purpose.
81
82 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
83 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
84
85 #define MSYMBOL_SET_SPECIAL(msym) \
86 MSYMBOL_TARGET_FLAG_1 (msym) = 1
87
88 #define MSYMBOL_IS_SPECIAL(msym) \
89 MSYMBOL_TARGET_FLAG_1 (msym)
90
91 /* Per-objfile data used for mapping symbols. */
92 static const struct objfile_data *arm_objfile_data_key;
93
94 struct arm_mapping_symbol
95 {
96 bfd_vma value;
97 char type;
98
99 bool operator< (const arm_mapping_symbol &other) const
100 { return this->value < other.value; }
101 };
102
103 typedef std::vector<arm_mapping_symbol> arm_mapping_symbol_vec;
104
105 struct arm_per_objfile
106 {
107 explicit arm_per_objfile (size_t num_sections)
108 : section_maps (new arm_mapping_symbol_vec[num_sections])
109 {}
110
111 DISABLE_COPY_AND_ASSIGN (arm_per_objfile);
112
113 /* Information about mapping symbols ($a, $d, $t) in the objfile.
114
115 The format is an array of vectors of arm_mapping_symbols, there is one
116 vector for each section of the objfile (the array is index by BFD section
117 index).
118
119 For each section, the vector of arm_mapping_symbol is sorted by
120 symbol value (address). */
121 std::unique_ptr<arm_mapping_symbol_vec[]> section_maps;
122 };
123
124 /* The list of available "set arm ..." and "show arm ..." commands. */
125 static struct cmd_list_element *setarmcmdlist = NULL;
126 static struct cmd_list_element *showarmcmdlist = NULL;
127
128 /* The type of floating-point to use. Keep this in sync with enum
129 arm_float_model, and the help string in _initialize_arm_tdep. */
130 static const char *const fp_model_strings[] =
131 {
132 "auto",
133 "softfpa",
134 "fpa",
135 "softvfp",
136 "vfp",
137 NULL
138 };
139
140 /* A variable that can be configured by the user. */
141 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
142 static const char *current_fp_model = "auto";
143
144 /* The ABI to use. Keep this in sync with arm_abi_kind. */
145 static const char *const arm_abi_strings[] =
146 {
147 "auto",
148 "APCS",
149 "AAPCS",
150 NULL
151 };
152
153 /* A variable that can be configured by the user. */
154 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
155 static const char *arm_abi_string = "auto";
156
157 /* The execution mode to assume. */
158 static const char *const arm_mode_strings[] =
159 {
160 "auto",
161 "arm",
162 "thumb",
163 NULL
164 };
165
166 static const char *arm_fallback_mode_string = "auto";
167 static const char *arm_force_mode_string = "auto";
168
169 /* The standard register names, and all the valid aliases for them. Note
170 that `fp', `sp' and `pc' are not added in this alias list, because they
171 have been added as builtin user registers in
172 std-regs.c:_initialize_frame_reg. */
173 static const struct
174 {
175 const char *name;
176 int regnum;
177 } arm_register_aliases[] = {
178 /* Basic register numbers. */
179 { "r0", 0 },
180 { "r1", 1 },
181 { "r2", 2 },
182 { "r3", 3 },
183 { "r4", 4 },
184 { "r5", 5 },
185 { "r6", 6 },
186 { "r7", 7 },
187 { "r8", 8 },
188 { "r9", 9 },
189 { "r10", 10 },
190 { "r11", 11 },
191 { "r12", 12 },
192 { "r13", 13 },
193 { "r14", 14 },
194 { "r15", 15 },
195 /* Synonyms (argument and variable registers). */
196 { "a1", 0 },
197 { "a2", 1 },
198 { "a3", 2 },
199 { "a4", 3 },
200 { "v1", 4 },
201 { "v2", 5 },
202 { "v3", 6 },
203 { "v4", 7 },
204 { "v5", 8 },
205 { "v6", 9 },
206 { "v7", 10 },
207 { "v8", 11 },
208 /* Other platform-specific names for r9. */
209 { "sb", 9 },
210 { "tr", 9 },
211 /* Special names. */
212 { "ip", 12 },
213 { "lr", 14 },
214 /* Names used by GCC (not listed in the ARM EABI). */
215 { "sl", 10 },
216 /* A special name from the older ATPCS. */
217 { "wr", 7 },
218 };
219
220 static const char *const arm_register_names[] =
221 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
222 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
223 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
224 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
225 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
226 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
227 "fps", "cpsr" }; /* 24 25 */
228
229 /* Holds the current set of options to be passed to the disassembler. */
230 static char *arm_disassembler_options;
231
232 /* Valid register name styles. */
233 static const char **valid_disassembly_styles;
234
235 /* Disassembly style to use. Default to "std" register names. */
236 static const char *disassembly_style;
237
238 /* This is used to keep the bfd arch_info in sync with the disassembly
239 style. */
240 static void set_disassembly_style_sfunc (const char *, int,
241 struct cmd_list_element *);
242 static void show_disassembly_style_sfunc (struct ui_file *, int,
243 struct cmd_list_element *,
244 const char *);
245
246 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
247 readable_regcache *regcache,
248 int regnum, gdb_byte *buf);
249 static void arm_neon_quad_write (struct gdbarch *gdbarch,
250 struct regcache *regcache,
251 int regnum, const gdb_byte *buf);
252
253 static CORE_ADDR
254 arm_get_next_pcs_syscall_next_pc (struct arm_get_next_pcs *self);
255
256
257 /* get_next_pcs operations. */
258 static struct arm_get_next_pcs_ops arm_get_next_pcs_ops = {
259 arm_get_next_pcs_read_memory_unsigned_integer,
260 arm_get_next_pcs_syscall_next_pc,
261 arm_get_next_pcs_addr_bits_remove,
262 arm_get_next_pcs_is_thumb,
263 NULL,
264 };
265
266 struct arm_prologue_cache
267 {
268 /* The stack pointer at the time this frame was created; i.e. the
269 caller's stack pointer when this function was called. It is used
270 to identify this frame. */
271 CORE_ADDR prev_sp;
272
273 /* The frame base for this frame is just prev_sp - frame size.
274 FRAMESIZE is the distance from the frame pointer to the
275 initial stack pointer. */
276
277 int framesize;
278
279 /* The register used to hold the frame pointer for this frame. */
280 int framereg;
281
282 /* Saved register offsets. */
283 struct trad_frame_saved_reg *saved_regs;
284 };
285
286 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
287 CORE_ADDR prologue_start,
288 CORE_ADDR prologue_end,
289 struct arm_prologue_cache *cache);
290
291 /* Architecture version for displaced stepping. This effects the behaviour of
292 certain instructions, and really should not be hard-wired. */
293
294 #define DISPLACED_STEPPING_ARCH_VERSION 5
295
296 /* Set to true if the 32-bit mode is in use. */
297
298 int arm_apcs_32 = 1;
299
300 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
301
302 int
303 arm_psr_thumb_bit (struct gdbarch *gdbarch)
304 {
305 if (gdbarch_tdep (gdbarch)->is_m)
306 return XPSR_T;
307 else
308 return CPSR_T;
309 }
310
311 /* Determine if the processor is currently executing in Thumb mode. */
312
313 int
314 arm_is_thumb (struct regcache *regcache)
315 {
316 ULONGEST cpsr;
317 ULONGEST t_bit = arm_psr_thumb_bit (regcache->arch ());
318
319 cpsr = regcache_raw_get_unsigned (regcache, ARM_PS_REGNUM);
320
321 return (cpsr & t_bit) != 0;
322 }
323
324 /* Determine if FRAME is executing in Thumb mode. */
325
326 int
327 arm_frame_is_thumb (struct frame_info *frame)
328 {
329 CORE_ADDR cpsr;
330 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
331
332 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
333 directly (from a signal frame or dummy frame) or by interpreting
334 the saved LR (from a prologue or DWARF frame). So consult it and
335 trust the unwinders. */
336 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
337
338 return (cpsr & t_bit) != 0;
339 }
340
341 /* Search for the mapping symbol covering MEMADDR. If one is found,
342 return its type. Otherwise, return 0. If START is non-NULL,
343 set *START to the location of the mapping symbol. */
344
345 static char
346 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
347 {
348 struct obj_section *sec;
349
350 /* If there are mapping symbols, consult them. */
351 sec = find_pc_section (memaddr);
352 if (sec != NULL)
353 {
354 arm_per_objfile *data
355 = (struct arm_per_objfile *) objfile_data (sec->objfile,
356 arm_objfile_data_key);
357 if (data != NULL)
358 {
359 struct arm_mapping_symbol map_key
360 = { memaddr - obj_section_addr (sec), 0 };
361 const arm_mapping_symbol_vec &map
362 = data->section_maps[sec->the_bfd_section->index];
363 arm_mapping_symbol_vec::const_iterator it
364 = std::lower_bound (map.begin (), map.end (), map_key);
365
366 /* std::lower_bound finds the earliest ordered insertion
367 point. If the symbol at this position starts at this exact
368 address, we use that; otherwise, the preceding
369 mapping symbol covers this address. */
370 if (it < map.end ())
371 {
372 if (it->value == map_key.value)
373 {
374 if (start)
375 *start = it->value + obj_section_addr (sec);
376 return it->type;
377 }
378 }
379
380 if (it > map.begin ())
381 {
382 arm_mapping_symbol_vec::const_iterator prev_it
383 = it - 1;
384
385 if (start)
386 *start = prev_it->value + obj_section_addr (sec);
387 return prev_it->type;
388 }
389 }
390 }
391
392 return 0;
393 }
394
395 /* Determine if the program counter specified in MEMADDR is in a Thumb
396 function. This function should be called for addresses unrelated to
397 any executing frame; otherwise, prefer arm_frame_is_thumb. */
398
399 int
400 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
401 {
402 struct bound_minimal_symbol sym;
403 char type;
404 arm_displaced_step_closure *dsc
405 = ((arm_displaced_step_closure * )
406 get_displaced_step_closure_by_addr (memaddr));
407
408 /* If checking the mode of displaced instruction in copy area, the mode
409 should be determined by instruction on the original address. */
410 if (dsc)
411 {
412 if (debug_displaced)
413 fprintf_unfiltered (gdb_stdlog,
414 "displaced: check mode of %.8lx instead of %.8lx\n",
415 (unsigned long) dsc->insn_addr,
416 (unsigned long) memaddr);
417 memaddr = dsc->insn_addr;
418 }
419
420 /* If bit 0 of the address is set, assume this is a Thumb address. */
421 if (IS_THUMB_ADDR (memaddr))
422 return 1;
423
424 /* If the user wants to override the symbol table, let him. */
425 if (strcmp (arm_force_mode_string, "arm") == 0)
426 return 0;
427 if (strcmp (arm_force_mode_string, "thumb") == 0)
428 return 1;
429
430 /* ARM v6-M and v7-M are always in Thumb mode. */
431 if (gdbarch_tdep (gdbarch)->is_m)
432 return 1;
433
434 /* If there are mapping symbols, consult them. */
435 type = arm_find_mapping_symbol (memaddr, NULL);
436 if (type)
437 return type == 't';
438
439 /* Thumb functions have a "special" bit set in minimal symbols. */
440 sym = lookup_minimal_symbol_by_pc (memaddr);
441 if (sym.minsym)
442 return (MSYMBOL_IS_SPECIAL (sym.minsym));
443
444 /* If the user wants to override the fallback mode, let them. */
445 if (strcmp (arm_fallback_mode_string, "arm") == 0)
446 return 0;
447 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
448 return 1;
449
450 /* If we couldn't find any symbol, but we're talking to a running
451 target, then trust the current value of $cpsr. This lets
452 "display/i $pc" always show the correct mode (though if there is
453 a symbol table we will not reach here, so it still may not be
454 displayed in the mode it will be executed). */
455 if (target_has_registers)
456 return arm_frame_is_thumb (get_current_frame ());
457
458 /* Otherwise we're out of luck; we assume ARM. */
459 return 0;
460 }
461
462 /* Determine if the address specified equals any of these magic return
463 values, called EXC_RETURN, defined by the ARM v6-M and v7-M
464 architectures.
465
466 From ARMv6-M Reference Manual B1.5.8
467 Table B1-5 Exception return behavior
468
469 EXC_RETURN Return To Return Stack
470 0xFFFFFFF1 Handler mode Main
471 0xFFFFFFF9 Thread mode Main
472 0xFFFFFFFD Thread mode Process
473
474 From ARMv7-M Reference Manual B1.5.8
475 Table B1-8 EXC_RETURN definition of exception return behavior, no FP
476
477 EXC_RETURN Return To Return Stack
478 0xFFFFFFF1 Handler mode Main
479 0xFFFFFFF9 Thread mode Main
480 0xFFFFFFFD Thread mode Process
481
482 Table B1-9 EXC_RETURN definition of exception return behavior, with
483 FP
484
485 EXC_RETURN Return To Return Stack Frame Type
486 0xFFFFFFE1 Handler mode Main Extended
487 0xFFFFFFE9 Thread mode Main Extended
488 0xFFFFFFED Thread mode Process Extended
489 0xFFFFFFF1 Handler mode Main Basic
490 0xFFFFFFF9 Thread mode Main Basic
491 0xFFFFFFFD Thread mode Process Basic
492
493 For more details see "B1.5.8 Exception return behavior"
494 in both ARMv6-M and ARMv7-M Architecture Reference Manuals. */
495
496 static int
497 arm_m_addr_is_magic (CORE_ADDR addr)
498 {
499 switch (addr)
500 {
501 /* Values from Tables in B1.5.8 the EXC_RETURN definitions of
502 the exception return behavior. */
503 case 0xffffffe1:
504 case 0xffffffe9:
505 case 0xffffffed:
506 case 0xfffffff1:
507 case 0xfffffff9:
508 case 0xfffffffd:
509 /* Address is magic. */
510 return 1;
511
512 default:
513 /* Address is not magic. */
514 return 0;
515 }
516 }
517
518 /* Remove useless bits from addresses in a running program. */
519 static CORE_ADDR
520 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
521 {
522 /* On M-profile devices, do not strip the low bit from EXC_RETURN
523 (the magic exception return address). */
524 if (gdbarch_tdep (gdbarch)->is_m
525 && arm_m_addr_is_magic (val))
526 return val;
527
528 if (arm_apcs_32)
529 return UNMAKE_THUMB_ADDR (val);
530 else
531 return (val & 0x03fffffc);
532 }
533
534 /* Return 1 if PC is the start of a compiler helper function which
535 can be safely ignored during prologue skipping. IS_THUMB is true
536 if the function is known to be a Thumb function due to the way it
537 is being called. */
538 static int
539 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
540 {
541 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
542 struct bound_minimal_symbol msym;
543
544 msym = lookup_minimal_symbol_by_pc (pc);
545 if (msym.minsym != NULL
546 && BMSYMBOL_VALUE_ADDRESS (msym) == pc
547 && MSYMBOL_LINKAGE_NAME (msym.minsym) != NULL)
548 {
549 const char *name = MSYMBOL_LINKAGE_NAME (msym.minsym);
550
551 /* The GNU linker's Thumb call stub to foo is named
552 __foo_from_thumb. */
553 if (strstr (name, "_from_thumb") != NULL)
554 name += 2;
555
556 /* On soft-float targets, __truncdfsf2 is called to convert promoted
557 arguments to their argument types in non-prototyped
558 functions. */
559 if (startswith (name, "__truncdfsf2"))
560 return 1;
561 if (startswith (name, "__aeabi_d2f"))
562 return 1;
563
564 /* Internal functions related to thread-local storage. */
565 if (startswith (name, "__tls_get_addr"))
566 return 1;
567 if (startswith (name, "__aeabi_read_tp"))
568 return 1;
569 }
570 else
571 {
572 /* If we run against a stripped glibc, we may be unable to identify
573 special functions by name. Check for one important case,
574 __aeabi_read_tp, by comparing the *code* against the default
575 implementation (this is hand-written ARM assembler in glibc). */
576
577 if (!is_thumb
578 && read_code_unsigned_integer (pc, 4, byte_order_for_code)
579 == 0xe3e00a0f /* mov r0, #0xffff0fff */
580 && read_code_unsigned_integer (pc + 4, 4, byte_order_for_code)
581 == 0xe240f01f) /* sub pc, r0, #31 */
582 return 1;
583 }
584
585 return 0;
586 }
587
588 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
589 the first 16-bit of instruction, and INSN2 is the second 16-bit of
590 instruction. */
591 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
592 ((bits ((insn1), 0, 3) << 12) \
593 | (bits ((insn1), 10, 10) << 11) \
594 | (bits ((insn2), 12, 14) << 8) \
595 | bits ((insn2), 0, 7))
596
597 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
598 the 32-bit instruction. */
599 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
600 ((bits ((insn), 16, 19) << 12) \
601 | bits ((insn), 0, 11))
602
603 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
604
605 static unsigned int
606 thumb_expand_immediate (unsigned int imm)
607 {
608 unsigned int count = imm >> 7;
609
610 if (count < 8)
611 switch (count / 2)
612 {
613 case 0:
614 return imm & 0xff;
615 case 1:
616 return (imm & 0xff) | ((imm & 0xff) << 16);
617 case 2:
618 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
619 case 3:
620 return (imm & 0xff) | ((imm & 0xff) << 8)
621 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
622 }
623
624 return (0x80 | (imm & 0x7f)) << (32 - count);
625 }
626
627 /* Return 1 if the 16-bit Thumb instruction INSN restores SP in
628 epilogue, 0 otherwise. */
629
630 static int
631 thumb_instruction_restores_sp (unsigned short insn)
632 {
633 return (insn == 0x46bd /* mov sp, r7 */
634 || (insn & 0xff80) == 0xb000 /* add sp, imm */
635 || (insn & 0xfe00) == 0xbc00); /* pop <registers> */
636 }
637
638 /* Analyze a Thumb prologue, looking for a recognizable stack frame
639 and frame pointer. Scan until we encounter a store that could
640 clobber the stack frame unexpectedly, or an unknown instruction.
641 Return the last address which is definitely safe to skip for an
642 initial breakpoint. */
643
644 static CORE_ADDR
645 thumb_analyze_prologue (struct gdbarch *gdbarch,
646 CORE_ADDR start, CORE_ADDR limit,
647 struct arm_prologue_cache *cache)
648 {
649 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
650 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
651 int i;
652 pv_t regs[16];
653 CORE_ADDR offset;
654 CORE_ADDR unrecognized_pc = 0;
655
656 for (i = 0; i < 16; i++)
657 regs[i] = pv_register (i, 0);
658 pv_area stack (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
659
660 while (start < limit)
661 {
662 unsigned short insn;
663
664 insn = read_code_unsigned_integer (start, 2, byte_order_for_code);
665
666 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
667 {
668 int regno;
669 int mask;
670
671 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
672 break;
673
674 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
675 whether to save LR (R14). */
676 mask = (insn & 0xff) | ((insn & 0x100) << 6);
677
678 /* Calculate offsets of saved R0-R7 and LR. */
679 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
680 if (mask & (1 << regno))
681 {
682 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
683 -4);
684 stack.store (regs[ARM_SP_REGNUM], 4, regs[regno]);
685 }
686 }
687 else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */
688 {
689 offset = (insn & 0x7f) << 2; /* get scaled offset */
690 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
691 -offset);
692 }
693 else if (thumb_instruction_restores_sp (insn))
694 {
695 /* Don't scan past the epilogue. */
696 break;
697 }
698 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
699 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
700 (insn & 0xff) << 2);
701 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
702 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
703 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
704 bits (insn, 6, 8));
705 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
706 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
707 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
708 bits (insn, 0, 7));
709 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
710 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
711 && pv_is_constant (regs[bits (insn, 3, 5)]))
712 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
713 regs[bits (insn, 6, 8)]);
714 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
715 && pv_is_constant (regs[bits (insn, 3, 6)]))
716 {
717 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
718 int rm = bits (insn, 3, 6);
719 regs[rd] = pv_add (regs[rd], regs[rm]);
720 }
721 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
722 {
723 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
724 int src_reg = (insn & 0x78) >> 3;
725 regs[dst_reg] = regs[src_reg];
726 }
727 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
728 {
729 /* Handle stores to the stack. Normally pushes are used,
730 but with GCC -mtpcs-frame, there may be other stores
731 in the prologue to create the frame. */
732 int regno = (insn >> 8) & 0x7;
733 pv_t addr;
734
735 offset = (insn & 0xff) << 2;
736 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
737
738 if (stack.store_would_trash (addr))
739 break;
740
741 stack.store (addr, 4, regs[regno]);
742 }
743 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
744 {
745 int rd = bits (insn, 0, 2);
746 int rn = bits (insn, 3, 5);
747 pv_t addr;
748
749 offset = bits (insn, 6, 10) << 2;
750 addr = pv_add_constant (regs[rn], offset);
751
752 if (stack.store_would_trash (addr))
753 break;
754
755 stack.store (addr, 4, regs[rd]);
756 }
757 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
758 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
759 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
760 /* Ignore stores of argument registers to the stack. */
761 ;
762 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
763 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
764 /* Ignore block loads from the stack, potentially copying
765 parameters from memory. */
766 ;
767 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
768 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
769 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
770 /* Similarly ignore single loads from the stack. */
771 ;
772 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
773 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
774 /* Skip register copies, i.e. saves to another register
775 instead of the stack. */
776 ;
777 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
778 /* Recognize constant loads; even with small stacks these are necessary
779 on Thumb. */
780 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
781 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
782 {
783 /* Constant pool loads, for the same reason. */
784 unsigned int constant;
785 CORE_ADDR loc;
786
787 loc = start + 4 + bits (insn, 0, 7) * 4;
788 constant = read_memory_unsigned_integer (loc, 4, byte_order);
789 regs[bits (insn, 8, 10)] = pv_constant (constant);
790 }
791 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
792 {
793 unsigned short inst2;
794
795 inst2 = read_code_unsigned_integer (start + 2, 2,
796 byte_order_for_code);
797
798 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
799 {
800 /* BL, BLX. Allow some special function calls when
801 skipping the prologue; GCC generates these before
802 storing arguments to the stack. */
803 CORE_ADDR nextpc;
804 int j1, j2, imm1, imm2;
805
806 imm1 = sbits (insn, 0, 10);
807 imm2 = bits (inst2, 0, 10);
808 j1 = bit (inst2, 13);
809 j2 = bit (inst2, 11);
810
811 offset = ((imm1 << 12) + (imm2 << 1));
812 offset ^= ((!j2) << 22) | ((!j1) << 23);
813
814 nextpc = start + 4 + offset;
815 /* For BLX make sure to clear the low bits. */
816 if (bit (inst2, 12) == 0)
817 nextpc = nextpc & 0xfffffffc;
818
819 if (!skip_prologue_function (gdbarch, nextpc,
820 bit (inst2, 12) != 0))
821 break;
822 }
823
824 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
825 { registers } */
826 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
827 {
828 pv_t addr = regs[bits (insn, 0, 3)];
829 int regno;
830
831 if (stack.store_would_trash (addr))
832 break;
833
834 /* Calculate offsets of saved registers. */
835 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
836 if (inst2 & (1 << regno))
837 {
838 addr = pv_add_constant (addr, -4);
839 stack.store (addr, 4, regs[regno]);
840 }
841
842 if (insn & 0x0020)
843 regs[bits (insn, 0, 3)] = addr;
844 }
845
846 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
847 [Rn, #+/-imm]{!} */
848 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
849 {
850 int regno1 = bits (inst2, 12, 15);
851 int regno2 = bits (inst2, 8, 11);
852 pv_t addr = regs[bits (insn, 0, 3)];
853
854 offset = inst2 & 0xff;
855 if (insn & 0x0080)
856 addr = pv_add_constant (addr, offset);
857 else
858 addr = pv_add_constant (addr, -offset);
859
860 if (stack.store_would_trash (addr))
861 break;
862
863 stack.store (addr, 4, regs[regno1]);
864 stack.store (pv_add_constant (addr, 4),
865 4, regs[regno2]);
866
867 if (insn & 0x0020)
868 regs[bits (insn, 0, 3)] = addr;
869 }
870
871 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
872 && (inst2 & 0x0c00) == 0x0c00
873 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
874 {
875 int regno = bits (inst2, 12, 15);
876 pv_t addr = regs[bits (insn, 0, 3)];
877
878 offset = inst2 & 0xff;
879 if (inst2 & 0x0200)
880 addr = pv_add_constant (addr, offset);
881 else
882 addr = pv_add_constant (addr, -offset);
883
884 if (stack.store_would_trash (addr))
885 break;
886
887 stack.store (addr, 4, regs[regno]);
888
889 if (inst2 & 0x0100)
890 regs[bits (insn, 0, 3)] = addr;
891 }
892
893 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
894 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
895 {
896 int regno = bits (inst2, 12, 15);
897 pv_t addr;
898
899 offset = inst2 & 0xfff;
900 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
901
902 if (stack.store_would_trash (addr))
903 break;
904
905 stack.store (addr, 4, regs[regno]);
906 }
907
908 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
909 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
910 /* Ignore stores of argument registers to the stack. */
911 ;
912
913 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
914 && (inst2 & 0x0d00) == 0x0c00
915 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
916 /* Ignore stores of argument registers to the stack. */
917 ;
918
919 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
920 { registers } */
921 && (inst2 & 0x8000) == 0x0000
922 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
923 /* Ignore block loads from the stack, potentially copying
924 parameters from memory. */
925 ;
926
927 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
928 [Rn, #+/-imm] */
929 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
930 /* Similarly ignore dual loads from the stack. */
931 ;
932
933 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
934 && (inst2 & 0x0d00) == 0x0c00
935 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
936 /* Similarly ignore single loads from the stack. */
937 ;
938
939 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
940 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
941 /* Similarly ignore single loads from the stack. */
942 ;
943
944 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
945 && (inst2 & 0x8000) == 0x0000)
946 {
947 unsigned int imm = ((bits (insn, 10, 10) << 11)
948 | (bits (inst2, 12, 14) << 8)
949 | bits (inst2, 0, 7));
950
951 regs[bits (inst2, 8, 11)]
952 = pv_add_constant (regs[bits (insn, 0, 3)],
953 thumb_expand_immediate (imm));
954 }
955
956 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
957 && (inst2 & 0x8000) == 0x0000)
958 {
959 unsigned int imm = ((bits (insn, 10, 10) << 11)
960 | (bits (inst2, 12, 14) << 8)
961 | bits (inst2, 0, 7));
962
963 regs[bits (inst2, 8, 11)]
964 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
965 }
966
967 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
968 && (inst2 & 0x8000) == 0x0000)
969 {
970 unsigned int imm = ((bits (insn, 10, 10) << 11)
971 | (bits (inst2, 12, 14) << 8)
972 | bits (inst2, 0, 7));
973
974 regs[bits (inst2, 8, 11)]
975 = pv_add_constant (regs[bits (insn, 0, 3)],
976 - (CORE_ADDR) thumb_expand_immediate (imm));
977 }
978
979 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
980 && (inst2 & 0x8000) == 0x0000)
981 {
982 unsigned int imm = ((bits (insn, 10, 10) << 11)
983 | (bits (inst2, 12, 14) << 8)
984 | bits (inst2, 0, 7));
985
986 regs[bits (inst2, 8, 11)]
987 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
988 }
989
990 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
991 {
992 unsigned int imm = ((bits (insn, 10, 10) << 11)
993 | (bits (inst2, 12, 14) << 8)
994 | bits (inst2, 0, 7));
995
996 regs[bits (inst2, 8, 11)]
997 = pv_constant (thumb_expand_immediate (imm));
998 }
999
1000 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1001 {
1002 unsigned int imm
1003 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1004
1005 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1006 }
1007
1008 else if (insn == 0xea5f /* mov.w Rd,Rm */
1009 && (inst2 & 0xf0f0) == 0)
1010 {
1011 int dst_reg = (inst2 & 0x0f00) >> 8;
1012 int src_reg = inst2 & 0xf;
1013 regs[dst_reg] = regs[src_reg];
1014 }
1015
1016 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1017 {
1018 /* Constant pool loads. */
1019 unsigned int constant;
1020 CORE_ADDR loc;
1021
1022 offset = bits (inst2, 0, 11);
1023 if (insn & 0x0080)
1024 loc = start + 4 + offset;
1025 else
1026 loc = start + 4 - offset;
1027
1028 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1029 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1030 }
1031
1032 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1033 {
1034 /* Constant pool loads. */
1035 unsigned int constant;
1036 CORE_ADDR loc;
1037
1038 offset = bits (inst2, 0, 7) << 2;
1039 if (insn & 0x0080)
1040 loc = start + 4 + offset;
1041 else
1042 loc = start + 4 - offset;
1043
1044 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1045 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1046
1047 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1048 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1049 }
1050
1051 else if (thumb2_instruction_changes_pc (insn, inst2))
1052 {
1053 /* Don't scan past anything that might change control flow. */
1054 break;
1055 }
1056 else
1057 {
1058 /* The optimizer might shove anything into the prologue,
1059 so we just skip what we don't recognize. */
1060 unrecognized_pc = start;
1061 }
1062
1063 start += 2;
1064 }
1065 else if (thumb_instruction_changes_pc (insn))
1066 {
1067 /* Don't scan past anything that might change control flow. */
1068 break;
1069 }
1070 else
1071 {
1072 /* The optimizer might shove anything into the prologue,
1073 so we just skip what we don't recognize. */
1074 unrecognized_pc = start;
1075 }
1076
1077 start += 2;
1078 }
1079
1080 if (arm_debug)
1081 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1082 paddress (gdbarch, start));
1083
1084 if (unrecognized_pc == 0)
1085 unrecognized_pc = start;
1086
1087 if (cache == NULL)
1088 return unrecognized_pc;
1089
1090 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1091 {
1092 /* Frame pointer is fp. Frame size is constant. */
1093 cache->framereg = ARM_FP_REGNUM;
1094 cache->framesize = -regs[ARM_FP_REGNUM].k;
1095 }
1096 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1097 {
1098 /* Frame pointer is r7. Frame size is constant. */
1099 cache->framereg = THUMB_FP_REGNUM;
1100 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1101 }
1102 else
1103 {
1104 /* Try the stack pointer... this is a bit desperate. */
1105 cache->framereg = ARM_SP_REGNUM;
1106 cache->framesize = -regs[ARM_SP_REGNUM].k;
1107 }
1108
1109 for (i = 0; i < 16; i++)
1110 if (stack.find_reg (gdbarch, i, &offset))
1111 cache->saved_regs[i].addr = offset;
1112
1113 return unrecognized_pc;
1114 }
1115
1116
1117 /* Try to analyze the instructions starting from PC, which load symbol
1118 __stack_chk_guard. Return the address of instruction after loading this
1119 symbol, set the dest register number to *BASEREG, and set the size of
1120 instructions for loading symbol in OFFSET. Return 0 if instructions are
1121 not recognized. */
1122
1123 static CORE_ADDR
1124 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1125 unsigned int *destreg, int *offset)
1126 {
1127 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1128 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1129 unsigned int low, high, address;
1130
1131 address = 0;
1132 if (is_thumb)
1133 {
1134 unsigned short insn1
1135 = read_code_unsigned_integer (pc, 2, byte_order_for_code);
1136
1137 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1138 {
1139 *destreg = bits (insn1, 8, 10);
1140 *offset = 2;
1141 address = (pc & 0xfffffffc) + 4 + (bits (insn1, 0, 7) << 2);
1142 address = read_memory_unsigned_integer (address, 4,
1143 byte_order_for_code);
1144 }
1145 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1146 {
1147 unsigned short insn2
1148 = read_code_unsigned_integer (pc + 2, 2, byte_order_for_code);
1149
1150 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1151
1152 insn1
1153 = read_code_unsigned_integer (pc + 4, 2, byte_order_for_code);
1154 insn2
1155 = read_code_unsigned_integer (pc + 6, 2, byte_order_for_code);
1156
1157 /* movt Rd, #const */
1158 if ((insn1 & 0xfbc0) == 0xf2c0)
1159 {
1160 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1161 *destreg = bits (insn2, 8, 11);
1162 *offset = 8;
1163 address = (high << 16 | low);
1164 }
1165 }
1166 }
1167 else
1168 {
1169 unsigned int insn
1170 = read_code_unsigned_integer (pc, 4, byte_order_for_code);
1171
1172 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, [PC, #immed] */
1173 {
1174 address = bits (insn, 0, 11) + pc + 8;
1175 address = read_memory_unsigned_integer (address, 4,
1176 byte_order_for_code);
1177
1178 *destreg = bits (insn, 12, 15);
1179 *offset = 4;
1180 }
1181 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1182 {
1183 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1184
1185 insn
1186 = read_code_unsigned_integer (pc + 4, 4, byte_order_for_code);
1187
1188 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1189 {
1190 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1191 *destreg = bits (insn, 12, 15);
1192 *offset = 8;
1193 address = (high << 16 | low);
1194 }
1195 }
1196 }
1197
1198 return address;
1199 }
1200
1201 /* Try to skip a sequence of instructions used for stack protector. If PC
1202 points to the first instruction of this sequence, return the address of
1203 first instruction after this sequence, otherwise, return original PC.
1204
1205 On arm, this sequence of instructions is composed of mainly three steps,
1206 Step 1: load symbol __stack_chk_guard,
1207 Step 2: load from address of __stack_chk_guard,
1208 Step 3: store it to somewhere else.
1209
1210 Usually, instructions on step 2 and step 3 are the same on various ARM
1211 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1212 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1213 instructions in step 1 vary from different ARM architectures. On ARMv7,
1214 they are,
1215
1216 movw Rn, #:lower16:__stack_chk_guard
1217 movt Rn, #:upper16:__stack_chk_guard
1218
1219 On ARMv5t, it is,
1220
1221 ldr Rn, .Label
1222 ....
1223 .Lable:
1224 .word __stack_chk_guard
1225
1226 Since ldr/str is a very popular instruction, we can't use them as
1227 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1228 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1229 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1230
1231 static CORE_ADDR
1232 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1233 {
1234 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1235 unsigned int basereg;
1236 struct bound_minimal_symbol stack_chk_guard;
1237 int offset;
1238 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1239 CORE_ADDR addr;
1240
1241 /* Try to parse the instructions in Step 1. */
1242 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1243 &basereg, &offset);
1244 if (!addr)
1245 return pc;
1246
1247 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1248 /* ADDR must correspond to a symbol whose name is __stack_chk_guard.
1249 Otherwise, this sequence cannot be for stack protector. */
1250 if (stack_chk_guard.minsym == NULL
1251 || !startswith (MSYMBOL_LINKAGE_NAME (stack_chk_guard.minsym), "__stack_chk_guard"))
1252 return pc;
1253
1254 if (is_thumb)
1255 {
1256 unsigned int destreg;
1257 unsigned short insn
1258 = read_code_unsigned_integer (pc + offset, 2, byte_order_for_code);
1259
1260 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1261 if ((insn & 0xf800) != 0x6800)
1262 return pc;
1263 if (bits (insn, 3, 5) != basereg)
1264 return pc;
1265 destreg = bits (insn, 0, 2);
1266
1267 insn = read_code_unsigned_integer (pc + offset + 2, 2,
1268 byte_order_for_code);
1269 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1270 if ((insn & 0xf800) != 0x6000)
1271 return pc;
1272 if (destreg != bits (insn, 0, 2))
1273 return pc;
1274 }
1275 else
1276 {
1277 unsigned int destreg;
1278 unsigned int insn
1279 = read_code_unsigned_integer (pc + offset, 4, byte_order_for_code);
1280
1281 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1282 if ((insn & 0x0e500000) != 0x04100000)
1283 return pc;
1284 if (bits (insn, 16, 19) != basereg)
1285 return pc;
1286 destreg = bits (insn, 12, 15);
1287 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1288 insn = read_code_unsigned_integer (pc + offset + 4,
1289 4, byte_order_for_code);
1290 if ((insn & 0x0e500000) != 0x04000000)
1291 return pc;
1292 if (bits (insn, 12, 15) != destreg)
1293 return pc;
1294 }
1295 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1296 on arm. */
1297 if (is_thumb)
1298 return pc + offset + 4;
1299 else
1300 return pc + offset + 8;
1301 }
1302
1303 /* Advance the PC across any function entry prologue instructions to
1304 reach some "real" code.
1305
1306 The APCS (ARM Procedure Call Standard) defines the following
1307 prologue:
1308
1309 mov ip, sp
1310 [stmfd sp!, {a1,a2,a3,a4}]
1311 stmfd sp!, {...,fp,ip,lr,pc}
1312 [stfe f7, [sp, #-12]!]
1313 [stfe f6, [sp, #-12]!]
1314 [stfe f5, [sp, #-12]!]
1315 [stfe f4, [sp, #-12]!]
1316 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1317
1318 static CORE_ADDR
1319 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1320 {
1321 CORE_ADDR func_addr, limit_pc;
1322
1323 /* See if we can determine the end of the prologue via the symbol table.
1324 If so, then return either PC, or the PC after the prologue, whichever
1325 is greater. */
1326 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1327 {
1328 CORE_ADDR post_prologue_pc
1329 = skip_prologue_using_sal (gdbarch, func_addr);
1330 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
1331
1332 if (post_prologue_pc)
1333 post_prologue_pc
1334 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1335
1336
1337 /* GCC always emits a line note before the prologue and another
1338 one after, even if the two are at the same address or on the
1339 same line. Take advantage of this so that we do not need to
1340 know every instruction that might appear in the prologue. We
1341 will have producer information for most binaries; if it is
1342 missing (e.g. for -gstabs), assuming the GNU tools. */
1343 if (post_prologue_pc
1344 && (cust == NULL
1345 || COMPUNIT_PRODUCER (cust) == NULL
1346 || startswith (COMPUNIT_PRODUCER (cust), "GNU ")
1347 || startswith (COMPUNIT_PRODUCER (cust), "clang ")))
1348 return post_prologue_pc;
1349
1350 if (post_prologue_pc != 0)
1351 {
1352 CORE_ADDR analyzed_limit;
1353
1354 /* For non-GCC compilers, make sure the entire line is an
1355 acceptable prologue; GDB will round this function's
1356 return value up to the end of the following line so we
1357 can not skip just part of a line (and we do not want to).
1358
1359 RealView does not treat the prologue specially, but does
1360 associate prologue code with the opening brace; so this
1361 lets us skip the first line if we think it is the opening
1362 brace. */
1363 if (arm_pc_is_thumb (gdbarch, func_addr))
1364 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1365 post_prologue_pc, NULL);
1366 else
1367 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1368 post_prologue_pc, NULL);
1369
1370 if (analyzed_limit != post_prologue_pc)
1371 return func_addr;
1372
1373 return post_prologue_pc;
1374 }
1375 }
1376
1377 /* Can't determine prologue from the symbol table, need to examine
1378 instructions. */
1379
1380 /* Find an upper limit on the function prologue using the debug
1381 information. If the debug information could not be used to provide
1382 that bound, then use an arbitrary large number as the upper bound. */
1383 /* Like arm_scan_prologue, stop no later than pc + 64. */
1384 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1385 if (limit_pc == 0)
1386 limit_pc = pc + 64; /* Magic. */
1387
1388
1389 /* Check if this is Thumb code. */
1390 if (arm_pc_is_thumb (gdbarch, pc))
1391 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1392 else
1393 return arm_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1394 }
1395
1396 /* *INDENT-OFF* */
1397 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1398 This function decodes a Thumb function prologue to determine:
1399 1) the size of the stack frame
1400 2) which registers are saved on it
1401 3) the offsets of saved regs
1402 4) the offset from the stack pointer to the frame pointer
1403
1404 A typical Thumb function prologue would create this stack frame
1405 (offsets relative to FP)
1406 old SP -> 24 stack parameters
1407 20 LR
1408 16 R7
1409 R7 -> 0 local variables (16 bytes)
1410 SP -> -12 additional stack space (12 bytes)
1411 The frame size would thus be 36 bytes, and the frame offset would be
1412 12 bytes. The frame register is R7.
1413
1414 The comments for thumb_skip_prolog() describe the algorithm we use
1415 to detect the end of the prolog. */
1416 /* *INDENT-ON* */
1417
1418 static void
1419 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1420 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1421 {
1422 CORE_ADDR prologue_start;
1423 CORE_ADDR prologue_end;
1424
1425 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1426 &prologue_end))
1427 {
1428 /* See comment in arm_scan_prologue for an explanation of
1429 this heuristics. */
1430 if (prologue_end > prologue_start + 64)
1431 {
1432 prologue_end = prologue_start + 64;
1433 }
1434 }
1435 else
1436 /* We're in the boondocks: we have no idea where the start of the
1437 function is. */
1438 return;
1439
1440 prologue_end = std::min (prologue_end, prev_pc);
1441
1442 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1443 }
1444
1445 /* Return 1 if the ARM instruction INSN restores SP in epilogue, 0
1446 otherwise. */
1447
1448 static int
1449 arm_instruction_restores_sp (unsigned int insn)
1450 {
1451 if (bits (insn, 28, 31) != INST_NV)
1452 {
1453 if ((insn & 0x0df0f000) == 0x0080d000
1454 /* ADD SP (register or immediate). */
1455 || (insn & 0x0df0f000) == 0x0040d000
1456 /* SUB SP (register or immediate). */
1457 || (insn & 0x0ffffff0) == 0x01a0d000
1458 /* MOV SP. */
1459 || (insn & 0x0fff0000) == 0x08bd0000
1460 /* POP (LDMIA). */
1461 || (insn & 0x0fff0000) == 0x049d0000)
1462 /* POP of a single register. */
1463 return 1;
1464 }
1465
1466 return 0;
1467 }
1468
1469 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1470 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1471 fill it in. Return the first address not recognized as a prologue
1472 instruction.
1473
1474 We recognize all the instructions typically found in ARM prologues,
1475 plus harmless instructions which can be skipped (either for analysis
1476 purposes, or a more restrictive set that can be skipped when finding
1477 the end of the prologue). */
1478
1479 static CORE_ADDR
1480 arm_analyze_prologue (struct gdbarch *gdbarch,
1481 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1482 struct arm_prologue_cache *cache)
1483 {
1484 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1485 int regno;
1486 CORE_ADDR offset, current_pc;
1487 pv_t regs[ARM_FPS_REGNUM];
1488 CORE_ADDR unrecognized_pc = 0;
1489
1490 /* Search the prologue looking for instructions that set up the
1491 frame pointer, adjust the stack pointer, and save registers.
1492
1493 Be careful, however, and if it doesn't look like a prologue,
1494 don't try to scan it. If, for instance, a frameless function
1495 begins with stmfd sp!, then we will tell ourselves there is
1496 a frame, which will confuse stack traceback, as well as "finish"
1497 and other operations that rely on a knowledge of the stack
1498 traceback. */
1499
1500 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1501 regs[regno] = pv_register (regno, 0);
1502 pv_area stack (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1503
1504 for (current_pc = prologue_start;
1505 current_pc < prologue_end;
1506 current_pc += 4)
1507 {
1508 unsigned int insn
1509 = read_code_unsigned_integer (current_pc, 4, byte_order_for_code);
1510
1511 if (insn == 0xe1a0c00d) /* mov ip, sp */
1512 {
1513 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1514 continue;
1515 }
1516 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1517 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1518 {
1519 unsigned imm = insn & 0xff; /* immediate value */
1520 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1521 int rd = bits (insn, 12, 15);
1522 imm = (imm >> rot) | (imm << (32 - rot));
1523 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1524 continue;
1525 }
1526 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1527 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1528 {
1529 unsigned imm = insn & 0xff; /* immediate value */
1530 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1531 int rd = bits (insn, 12, 15);
1532 imm = (imm >> rot) | (imm << (32 - rot));
1533 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1534 continue;
1535 }
1536 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1537 [sp, #-4]! */
1538 {
1539 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1540 break;
1541 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1542 stack.store (regs[ARM_SP_REGNUM], 4,
1543 regs[bits (insn, 12, 15)]);
1544 continue;
1545 }
1546 else if ((insn & 0xffff0000) == 0xe92d0000)
1547 /* stmfd sp!, {..., fp, ip, lr, pc}
1548 or
1549 stmfd sp!, {a1, a2, a3, a4} */
1550 {
1551 int mask = insn & 0xffff;
1552
1553 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1554 break;
1555
1556 /* Calculate offsets of saved registers. */
1557 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1558 if (mask & (1 << regno))
1559 {
1560 regs[ARM_SP_REGNUM]
1561 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1562 stack.store (regs[ARM_SP_REGNUM], 4, regs[regno]);
1563 }
1564 }
1565 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1566 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1567 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1568 {
1569 /* No need to add this to saved_regs -- it's just an arg reg. */
1570 continue;
1571 }
1572 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1573 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1574 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1575 {
1576 /* No need to add this to saved_regs -- it's just an arg reg. */
1577 continue;
1578 }
1579 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1580 { registers } */
1581 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1582 {
1583 /* No need to add this to saved_regs -- it's just arg regs. */
1584 continue;
1585 }
1586 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1587 {
1588 unsigned imm = insn & 0xff; /* immediate value */
1589 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1590 imm = (imm >> rot) | (imm << (32 - rot));
1591 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1592 }
1593 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1594 {
1595 unsigned imm = insn & 0xff; /* immediate value */
1596 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1597 imm = (imm >> rot) | (imm << (32 - rot));
1598 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1599 }
1600 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1601 [sp, -#c]! */
1602 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1603 {
1604 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1605 break;
1606
1607 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1608 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1609 stack.store (regs[ARM_SP_REGNUM], 12, regs[regno]);
1610 }
1611 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1612 [sp!] */
1613 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1614 {
1615 int n_saved_fp_regs;
1616 unsigned int fp_start_reg, fp_bound_reg;
1617
1618 if (stack.store_would_trash (regs[ARM_SP_REGNUM]))
1619 break;
1620
1621 if ((insn & 0x800) == 0x800) /* N0 is set */
1622 {
1623 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1624 n_saved_fp_regs = 3;
1625 else
1626 n_saved_fp_regs = 1;
1627 }
1628 else
1629 {
1630 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1631 n_saved_fp_regs = 2;
1632 else
1633 n_saved_fp_regs = 4;
1634 }
1635
1636 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1637 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1638 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1639 {
1640 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1641 stack.store (regs[ARM_SP_REGNUM], 12,
1642 regs[fp_start_reg++]);
1643 }
1644 }
1645 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1646 {
1647 /* Allow some special function calls when skipping the
1648 prologue; GCC generates these before storing arguments to
1649 the stack. */
1650 CORE_ADDR dest = BranchDest (current_pc, insn);
1651
1652 if (skip_prologue_function (gdbarch, dest, 0))
1653 continue;
1654 else
1655 break;
1656 }
1657 else if ((insn & 0xf0000000) != 0xe0000000)
1658 break; /* Condition not true, exit early. */
1659 else if (arm_instruction_changes_pc (insn))
1660 /* Don't scan past anything that might change control flow. */
1661 break;
1662 else if (arm_instruction_restores_sp (insn))
1663 {
1664 /* Don't scan past the epilogue. */
1665 break;
1666 }
1667 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1668 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1669 /* Ignore block loads from the stack, potentially copying
1670 parameters from memory. */
1671 continue;
1672 else if ((insn & 0xfc500000) == 0xe4100000
1673 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1674 /* Similarly ignore single loads from the stack. */
1675 continue;
1676 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1677 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1678 register instead of the stack. */
1679 continue;
1680 else
1681 {
1682 /* The optimizer might shove anything into the prologue, if
1683 we build up cache (cache != NULL) from scanning prologue,
1684 we just skip what we don't recognize and scan further to
1685 make cache as complete as possible. However, if we skip
1686 prologue, we'll stop immediately on unrecognized
1687 instruction. */
1688 unrecognized_pc = current_pc;
1689 if (cache != NULL)
1690 continue;
1691 else
1692 break;
1693 }
1694 }
1695
1696 if (unrecognized_pc == 0)
1697 unrecognized_pc = current_pc;
1698
1699 if (cache)
1700 {
1701 int framereg, framesize;
1702
1703 /* The frame size is just the distance from the frame register
1704 to the original stack pointer. */
1705 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1706 {
1707 /* Frame pointer is fp. */
1708 framereg = ARM_FP_REGNUM;
1709 framesize = -regs[ARM_FP_REGNUM].k;
1710 }
1711 else
1712 {
1713 /* Try the stack pointer... this is a bit desperate. */
1714 framereg = ARM_SP_REGNUM;
1715 framesize = -regs[ARM_SP_REGNUM].k;
1716 }
1717
1718 cache->framereg = framereg;
1719 cache->framesize = framesize;
1720
1721 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1722 if (stack.find_reg (gdbarch, regno, &offset))
1723 cache->saved_regs[regno].addr = offset;
1724 }
1725
1726 if (arm_debug)
1727 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1728 paddress (gdbarch, unrecognized_pc));
1729
1730 return unrecognized_pc;
1731 }
1732
1733 static void
1734 arm_scan_prologue (struct frame_info *this_frame,
1735 struct arm_prologue_cache *cache)
1736 {
1737 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1738 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1739 CORE_ADDR prologue_start, prologue_end;
1740 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1741 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1742
1743 /* Assume there is no frame until proven otherwise. */
1744 cache->framereg = ARM_SP_REGNUM;
1745 cache->framesize = 0;
1746
1747 /* Check for Thumb prologue. */
1748 if (arm_frame_is_thumb (this_frame))
1749 {
1750 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1751 return;
1752 }
1753
1754 /* Find the function prologue. If we can't find the function in
1755 the symbol table, peek in the stack frame to find the PC. */
1756 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1757 &prologue_end))
1758 {
1759 /* One way to find the end of the prologue (which works well
1760 for unoptimized code) is to do the following:
1761
1762 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1763
1764 if (sal.line == 0)
1765 prologue_end = prev_pc;
1766 else if (sal.end < prologue_end)
1767 prologue_end = sal.end;
1768
1769 This mechanism is very accurate so long as the optimizer
1770 doesn't move any instructions from the function body into the
1771 prologue. If this happens, sal.end will be the last
1772 instruction in the first hunk of prologue code just before
1773 the first instruction that the scheduler has moved from
1774 the body to the prologue.
1775
1776 In order to make sure that we scan all of the prologue
1777 instructions, we use a slightly less accurate mechanism which
1778 may scan more than necessary. To help compensate for this
1779 lack of accuracy, the prologue scanning loop below contains
1780 several clauses which'll cause the loop to terminate early if
1781 an implausible prologue instruction is encountered.
1782
1783 The expression
1784
1785 prologue_start + 64
1786
1787 is a suitable endpoint since it accounts for the largest
1788 possible prologue plus up to five instructions inserted by
1789 the scheduler. */
1790
1791 if (prologue_end > prologue_start + 64)
1792 {
1793 prologue_end = prologue_start + 64; /* See above. */
1794 }
1795 }
1796 else
1797 {
1798 /* We have no symbol information. Our only option is to assume this
1799 function has a standard stack frame and the normal frame register.
1800 Then, we can find the value of our frame pointer on entrance to
1801 the callee (or at the present moment if this is the innermost frame).
1802 The value stored there should be the address of the stmfd + 8. */
1803 CORE_ADDR frame_loc;
1804 ULONGEST return_value;
1805
1806 /* AAPCS does not use a frame register, so we can abort here. */
1807 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_AAPCS)
1808 return;
1809
1810 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1811 if (!safe_read_memory_unsigned_integer (frame_loc, 4, byte_order,
1812 &return_value))
1813 return;
1814 else
1815 {
1816 prologue_start = gdbarch_addr_bits_remove
1817 (gdbarch, return_value) - 8;
1818 prologue_end = prologue_start + 64; /* See above. */
1819 }
1820 }
1821
1822 if (prev_pc < prologue_end)
1823 prologue_end = prev_pc;
1824
1825 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1826 }
1827
1828 static struct arm_prologue_cache *
1829 arm_make_prologue_cache (struct frame_info *this_frame)
1830 {
1831 int reg;
1832 struct arm_prologue_cache *cache;
1833 CORE_ADDR unwound_fp;
1834
1835 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
1836 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1837
1838 arm_scan_prologue (this_frame, cache);
1839
1840 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1841 if (unwound_fp == 0)
1842 return cache;
1843
1844 cache->prev_sp = unwound_fp + cache->framesize;
1845
1846 /* Calculate actual addresses of saved registers using offsets
1847 determined by arm_scan_prologue. */
1848 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1849 if (trad_frame_addr_p (cache->saved_regs, reg))
1850 cache->saved_regs[reg].addr += cache->prev_sp;
1851
1852 return cache;
1853 }
1854
1855 /* Implementation of the stop_reason hook for arm_prologue frames. */
1856
1857 static enum unwind_stop_reason
1858 arm_prologue_unwind_stop_reason (struct frame_info *this_frame,
1859 void **this_cache)
1860 {
1861 struct arm_prologue_cache *cache;
1862 CORE_ADDR pc;
1863
1864 if (*this_cache == NULL)
1865 *this_cache = arm_make_prologue_cache (this_frame);
1866 cache = (struct arm_prologue_cache *) *this_cache;
1867
1868 /* This is meant to halt the backtrace at "_start". */
1869 pc = get_frame_pc (this_frame);
1870 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1871 return UNWIND_OUTERMOST;
1872
1873 /* If we've hit a wall, stop. */
1874 if (cache->prev_sp == 0)
1875 return UNWIND_OUTERMOST;
1876
1877 return UNWIND_NO_REASON;
1878 }
1879
1880 /* Our frame ID for a normal frame is the current function's starting PC
1881 and the caller's SP when we were called. */
1882
1883 static void
1884 arm_prologue_this_id (struct frame_info *this_frame,
1885 void **this_cache,
1886 struct frame_id *this_id)
1887 {
1888 struct arm_prologue_cache *cache;
1889 struct frame_id id;
1890 CORE_ADDR pc, func;
1891
1892 if (*this_cache == NULL)
1893 *this_cache = arm_make_prologue_cache (this_frame);
1894 cache = (struct arm_prologue_cache *) *this_cache;
1895
1896 /* Use function start address as part of the frame ID. If we cannot
1897 identify the start address (due to missing symbol information),
1898 fall back to just using the current PC. */
1899 pc = get_frame_pc (this_frame);
1900 func = get_frame_func (this_frame);
1901 if (!func)
1902 func = pc;
1903
1904 id = frame_id_build (cache->prev_sp, func);
1905 *this_id = id;
1906 }
1907
1908 static struct value *
1909 arm_prologue_prev_register (struct frame_info *this_frame,
1910 void **this_cache,
1911 int prev_regnum)
1912 {
1913 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1914 struct arm_prologue_cache *cache;
1915
1916 if (*this_cache == NULL)
1917 *this_cache = arm_make_prologue_cache (this_frame);
1918 cache = (struct arm_prologue_cache *) *this_cache;
1919
1920 /* If we are asked to unwind the PC, then we need to return the LR
1921 instead. The prologue may save PC, but it will point into this
1922 frame's prologue, not the next frame's resume location. Also
1923 strip the saved T bit. A valid LR may have the low bit set, but
1924 a valid PC never does. */
1925 if (prev_regnum == ARM_PC_REGNUM)
1926 {
1927 CORE_ADDR lr;
1928
1929 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1930 return frame_unwind_got_constant (this_frame, prev_regnum,
1931 arm_addr_bits_remove (gdbarch, lr));
1932 }
1933
1934 /* SP is generally not saved to the stack, but this frame is
1935 identified by the next frame's stack pointer at the time of the call.
1936 The value was already reconstructed into PREV_SP. */
1937 if (prev_regnum == ARM_SP_REGNUM)
1938 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
1939
1940 /* The CPSR may have been changed by the call instruction and by the
1941 called function. The only bit we can reconstruct is the T bit,
1942 by checking the low bit of LR as of the call. This is a reliable
1943 indicator of Thumb-ness except for some ARM v4T pre-interworking
1944 Thumb code, which could get away with a clear low bit as long as
1945 the called function did not use bx. Guess that all other
1946 bits are unchanged; the condition flags are presumably lost,
1947 but the processor status is likely valid. */
1948 if (prev_regnum == ARM_PS_REGNUM)
1949 {
1950 CORE_ADDR lr, cpsr;
1951 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
1952
1953 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
1954 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
1955 if (IS_THUMB_ADDR (lr))
1956 cpsr |= t_bit;
1957 else
1958 cpsr &= ~t_bit;
1959 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
1960 }
1961
1962 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1963 prev_regnum);
1964 }
1965
1966 struct frame_unwind arm_prologue_unwind = {
1967 NORMAL_FRAME,
1968 arm_prologue_unwind_stop_reason,
1969 arm_prologue_this_id,
1970 arm_prologue_prev_register,
1971 NULL,
1972 default_frame_sniffer
1973 };
1974
1975 /* Maintain a list of ARM exception table entries per objfile, similar to the
1976 list of mapping symbols. We only cache entries for standard ARM-defined
1977 personality routines; the cache will contain only the frame unwinding
1978 instructions associated with the entry (not the descriptors). */
1979
1980 static const struct objfile_data *arm_exidx_data_key;
1981
1982 struct arm_exidx_entry
1983 {
1984 bfd_vma addr;
1985 gdb_byte *entry;
1986 };
1987 typedef struct arm_exidx_entry arm_exidx_entry_s;
1988 DEF_VEC_O(arm_exidx_entry_s);
1989
1990 struct arm_exidx_data
1991 {
1992 VEC(arm_exidx_entry_s) **section_maps;
1993 };
1994
1995 static void
1996 arm_exidx_data_free (struct objfile *objfile, void *arg)
1997 {
1998 struct arm_exidx_data *data = (struct arm_exidx_data *) arg;
1999 unsigned int i;
2000
2001 for (i = 0; i < objfile->obfd->section_count; i++)
2002 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2003 }
2004
2005 static inline int
2006 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2007 const struct arm_exidx_entry *rhs)
2008 {
2009 return lhs->addr < rhs->addr;
2010 }
2011
2012 static struct obj_section *
2013 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2014 {
2015 struct obj_section *osect;
2016
2017 ALL_OBJFILE_OSECTIONS (objfile, osect)
2018 if (bfd_get_section_flags (objfile->obfd,
2019 osect->the_bfd_section) & SEC_ALLOC)
2020 {
2021 bfd_vma start, size;
2022 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2023 size = bfd_get_section_size (osect->the_bfd_section);
2024
2025 if (start <= vma && vma < start + size)
2026 return osect;
2027 }
2028
2029 return NULL;
2030 }
2031
2032 /* Parse contents of exception table and exception index sections
2033 of OBJFILE, and fill in the exception table entry cache.
2034
2035 For each entry that refers to a standard ARM-defined personality
2036 routine, extract the frame unwinding instructions (from either
2037 the index or the table section). The unwinding instructions
2038 are normalized by:
2039 - extracting them from the rest of the table data
2040 - converting to host endianness
2041 - appending the implicit 0xb0 ("Finish") code
2042
2043 The extracted and normalized instructions are stored for later
2044 retrieval by the arm_find_exidx_entry routine. */
2045
2046 static void
2047 arm_exidx_new_objfile (struct objfile *objfile)
2048 {
2049 struct arm_exidx_data *data;
2050 asection *exidx, *extab;
2051 bfd_vma exidx_vma = 0, extab_vma = 0;
2052 LONGEST i;
2053
2054 /* If we've already touched this file, do nothing. */
2055 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2056 return;
2057
2058 /* Read contents of exception table and index. */
2059 exidx = bfd_get_section_by_name (objfile->obfd, ELF_STRING_ARM_unwind);
2060 gdb::byte_vector exidx_data;
2061 if (exidx)
2062 {
2063 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2064 exidx_data.resize (bfd_get_section_size (exidx));
2065
2066 if (!bfd_get_section_contents (objfile->obfd, exidx,
2067 exidx_data.data (), 0,
2068 exidx_data.size ()))
2069 return;
2070 }
2071
2072 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2073 gdb::byte_vector extab_data;
2074 if (extab)
2075 {
2076 extab_vma = bfd_section_vma (objfile->obfd, extab);
2077 extab_data.resize (bfd_get_section_size (extab));
2078
2079 if (!bfd_get_section_contents (objfile->obfd, extab,
2080 extab_data.data (), 0,
2081 extab_data.size ()))
2082 return;
2083 }
2084
2085 /* Allocate exception table data structure. */
2086 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2087 set_objfile_data (objfile, arm_exidx_data_key, data);
2088 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2089 objfile->obfd->section_count,
2090 VEC(arm_exidx_entry_s) *);
2091
2092 /* Fill in exception table. */
2093 for (i = 0; i < exidx_data.size () / 8; i++)
2094 {
2095 struct arm_exidx_entry new_exidx_entry;
2096 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data.data () + i * 8);
2097 bfd_vma val = bfd_h_get_32 (objfile->obfd,
2098 exidx_data.data () + i * 8 + 4);
2099 bfd_vma addr = 0, word = 0;
2100 int n_bytes = 0, n_words = 0;
2101 struct obj_section *sec;
2102 gdb_byte *entry = NULL;
2103
2104 /* Extract address of start of function. */
2105 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2106 idx += exidx_vma + i * 8;
2107
2108 /* Find section containing function and compute section offset. */
2109 sec = arm_obj_section_from_vma (objfile, idx);
2110 if (sec == NULL)
2111 continue;
2112 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2113
2114 /* Determine address of exception table entry. */
2115 if (val == 1)
2116 {
2117 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2118 }
2119 else if ((val & 0xff000000) == 0x80000000)
2120 {
2121 /* Exception table entry embedded in .ARM.exidx
2122 -- must be short form. */
2123 word = val;
2124 n_bytes = 3;
2125 }
2126 else if (!(val & 0x80000000))
2127 {
2128 /* Exception table entry in .ARM.extab. */
2129 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2130 addr += exidx_vma + i * 8 + 4;
2131
2132 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_data.size ())
2133 {
2134 word = bfd_h_get_32 (objfile->obfd,
2135 extab_data.data () + addr - extab_vma);
2136 addr += 4;
2137
2138 if ((word & 0xff000000) == 0x80000000)
2139 {
2140 /* Short form. */
2141 n_bytes = 3;
2142 }
2143 else if ((word & 0xff000000) == 0x81000000
2144 || (word & 0xff000000) == 0x82000000)
2145 {
2146 /* Long form. */
2147 n_bytes = 2;
2148 n_words = ((word >> 16) & 0xff);
2149 }
2150 else if (!(word & 0x80000000))
2151 {
2152 bfd_vma pers;
2153 struct obj_section *pers_sec;
2154 int gnu_personality = 0;
2155
2156 /* Custom personality routine. */
2157 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2158 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2159
2160 /* Check whether we've got one of the variants of the
2161 GNU personality routines. */
2162 pers_sec = arm_obj_section_from_vma (objfile, pers);
2163 if (pers_sec)
2164 {
2165 static const char *personality[] =
2166 {
2167 "__gcc_personality_v0",
2168 "__gxx_personality_v0",
2169 "__gcj_personality_v0",
2170 "__gnu_objc_personality_v0",
2171 NULL
2172 };
2173
2174 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2175 int k;
2176
2177 for (k = 0; personality[k]; k++)
2178 if (lookup_minimal_symbol_by_pc_name
2179 (pc, personality[k], objfile))
2180 {
2181 gnu_personality = 1;
2182 break;
2183 }
2184 }
2185
2186 /* If so, the next word contains a word count in the high
2187 byte, followed by the same unwind instructions as the
2188 pre-defined forms. */
2189 if (gnu_personality
2190 && addr + 4 <= extab_vma + extab_data.size ())
2191 {
2192 word = bfd_h_get_32 (objfile->obfd,
2193 (extab_data.data ()
2194 + addr - extab_vma));
2195 addr += 4;
2196 n_bytes = 3;
2197 n_words = ((word >> 24) & 0xff);
2198 }
2199 }
2200 }
2201 }
2202
2203 /* Sanity check address. */
2204 if (n_words)
2205 if (addr < extab_vma
2206 || addr + 4 * n_words > extab_vma + extab_data.size ())
2207 n_words = n_bytes = 0;
2208
2209 /* The unwind instructions reside in WORD (only the N_BYTES least
2210 significant bytes are valid), followed by N_WORDS words in the
2211 extab section starting at ADDR. */
2212 if (n_bytes || n_words)
2213 {
2214 gdb_byte *p = entry
2215 = (gdb_byte *) obstack_alloc (&objfile->objfile_obstack,
2216 n_bytes + n_words * 4 + 1);
2217
2218 while (n_bytes--)
2219 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2220
2221 while (n_words--)
2222 {
2223 word = bfd_h_get_32 (objfile->obfd,
2224 extab_data.data () + addr - extab_vma);
2225 addr += 4;
2226
2227 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2228 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2229 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2230 *p++ = (gdb_byte) (word & 0xff);
2231 }
2232
2233 /* Implied "Finish" to terminate the list. */
2234 *p++ = 0xb0;
2235 }
2236
2237 /* Push entry onto vector. They are guaranteed to always
2238 appear in order of increasing addresses. */
2239 new_exidx_entry.addr = idx;
2240 new_exidx_entry.entry = entry;
2241 VEC_safe_push (arm_exidx_entry_s,
2242 data->section_maps[sec->the_bfd_section->index],
2243 &new_exidx_entry);
2244 }
2245 }
2246
2247 /* Search for the exception table entry covering MEMADDR. If one is found,
2248 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2249 set *START to the start of the region covered by this entry. */
2250
2251 static gdb_byte *
2252 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2253 {
2254 struct obj_section *sec;
2255
2256 sec = find_pc_section (memaddr);
2257 if (sec != NULL)
2258 {
2259 struct arm_exidx_data *data;
2260 VEC(arm_exidx_entry_s) *map;
2261 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2262 unsigned int idx;
2263
2264 data = ((struct arm_exidx_data *)
2265 objfile_data (sec->objfile, arm_exidx_data_key));
2266 if (data != NULL)
2267 {
2268 map = data->section_maps[sec->the_bfd_section->index];
2269 if (!VEC_empty (arm_exidx_entry_s, map))
2270 {
2271 struct arm_exidx_entry *map_sym;
2272
2273 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2274 arm_compare_exidx_entries);
2275
2276 /* VEC_lower_bound finds the earliest ordered insertion
2277 point. If the following symbol starts at this exact
2278 address, we use that; otherwise, the preceding
2279 exception table entry covers this address. */
2280 if (idx < VEC_length (arm_exidx_entry_s, map))
2281 {
2282 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2283 if (map_sym->addr == map_key.addr)
2284 {
2285 if (start)
2286 *start = map_sym->addr + obj_section_addr (sec);
2287 return map_sym->entry;
2288 }
2289 }
2290
2291 if (idx > 0)
2292 {
2293 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2294 if (start)
2295 *start = map_sym->addr + obj_section_addr (sec);
2296 return map_sym->entry;
2297 }
2298 }
2299 }
2300 }
2301
2302 return NULL;
2303 }
2304
2305 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2306 instruction list from the ARM exception table entry ENTRY, allocate and
2307 return a prologue cache structure describing how to unwind this frame.
2308
2309 Return NULL if the unwinding instruction list contains a "spare",
2310 "reserved" or "refuse to unwind" instruction as defined in section
2311 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2312 for the ARM Architecture" document. */
2313
2314 static struct arm_prologue_cache *
2315 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2316 {
2317 CORE_ADDR vsp = 0;
2318 int vsp_valid = 0;
2319
2320 struct arm_prologue_cache *cache;
2321 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2322 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2323
2324 for (;;)
2325 {
2326 gdb_byte insn;
2327
2328 /* Whenever we reload SP, we actually have to retrieve its
2329 actual value in the current frame. */
2330 if (!vsp_valid)
2331 {
2332 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2333 {
2334 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2335 vsp = get_frame_register_unsigned (this_frame, reg);
2336 }
2337 else
2338 {
2339 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2340 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2341 }
2342
2343 vsp_valid = 1;
2344 }
2345
2346 /* Decode next unwind instruction. */
2347 insn = *entry++;
2348
2349 if ((insn & 0xc0) == 0)
2350 {
2351 int offset = insn & 0x3f;
2352 vsp += (offset << 2) + 4;
2353 }
2354 else if ((insn & 0xc0) == 0x40)
2355 {
2356 int offset = insn & 0x3f;
2357 vsp -= (offset << 2) + 4;
2358 }
2359 else if ((insn & 0xf0) == 0x80)
2360 {
2361 int mask = ((insn & 0xf) << 8) | *entry++;
2362 int i;
2363
2364 /* The special case of an all-zero mask identifies
2365 "Refuse to unwind". We return NULL to fall back
2366 to the prologue analyzer. */
2367 if (mask == 0)
2368 return NULL;
2369
2370 /* Pop registers r4..r15 under mask. */
2371 for (i = 0; i < 12; i++)
2372 if (mask & (1 << i))
2373 {
2374 cache->saved_regs[4 + i].addr = vsp;
2375 vsp += 4;
2376 }
2377
2378 /* Special-case popping SP -- we need to reload vsp. */
2379 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2380 vsp_valid = 0;
2381 }
2382 else if ((insn & 0xf0) == 0x90)
2383 {
2384 int reg = insn & 0xf;
2385
2386 /* Reserved cases. */
2387 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2388 return NULL;
2389
2390 /* Set SP from another register and mark VSP for reload. */
2391 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2392 vsp_valid = 0;
2393 }
2394 else if ((insn & 0xf0) == 0xa0)
2395 {
2396 int count = insn & 0x7;
2397 int pop_lr = (insn & 0x8) != 0;
2398 int i;
2399
2400 /* Pop r4..r[4+count]. */
2401 for (i = 0; i <= count; i++)
2402 {
2403 cache->saved_regs[4 + i].addr = vsp;
2404 vsp += 4;
2405 }
2406
2407 /* If indicated by flag, pop LR as well. */
2408 if (pop_lr)
2409 {
2410 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2411 vsp += 4;
2412 }
2413 }
2414 else if (insn == 0xb0)
2415 {
2416 /* We could only have updated PC by popping into it; if so, it
2417 will show up as address. Otherwise, copy LR into PC. */
2418 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2419 cache->saved_regs[ARM_PC_REGNUM]
2420 = cache->saved_regs[ARM_LR_REGNUM];
2421
2422 /* We're done. */
2423 break;
2424 }
2425 else if (insn == 0xb1)
2426 {
2427 int mask = *entry++;
2428 int i;
2429
2430 /* All-zero mask and mask >= 16 is "spare". */
2431 if (mask == 0 || mask >= 16)
2432 return NULL;
2433
2434 /* Pop r0..r3 under mask. */
2435 for (i = 0; i < 4; i++)
2436 if (mask & (1 << i))
2437 {
2438 cache->saved_regs[i].addr = vsp;
2439 vsp += 4;
2440 }
2441 }
2442 else if (insn == 0xb2)
2443 {
2444 ULONGEST offset = 0;
2445 unsigned shift = 0;
2446
2447 do
2448 {
2449 offset |= (*entry & 0x7f) << shift;
2450 shift += 7;
2451 }
2452 while (*entry++ & 0x80);
2453
2454 vsp += 0x204 + (offset << 2);
2455 }
2456 else if (insn == 0xb3)
2457 {
2458 int start = *entry >> 4;
2459 int count = (*entry++) & 0xf;
2460 int i;
2461
2462 /* Only registers D0..D15 are valid here. */
2463 if (start + count >= 16)
2464 return NULL;
2465
2466 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2467 for (i = 0; i <= count; i++)
2468 {
2469 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2470 vsp += 8;
2471 }
2472
2473 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2474 vsp += 4;
2475 }
2476 else if ((insn & 0xf8) == 0xb8)
2477 {
2478 int count = insn & 0x7;
2479 int i;
2480
2481 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2482 for (i = 0; i <= count; i++)
2483 {
2484 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2485 vsp += 8;
2486 }
2487
2488 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2489 vsp += 4;
2490 }
2491 else if (insn == 0xc6)
2492 {
2493 int start = *entry >> 4;
2494 int count = (*entry++) & 0xf;
2495 int i;
2496
2497 /* Only registers WR0..WR15 are valid. */
2498 if (start + count >= 16)
2499 return NULL;
2500
2501 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2502 for (i = 0; i <= count; i++)
2503 {
2504 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2505 vsp += 8;
2506 }
2507 }
2508 else if (insn == 0xc7)
2509 {
2510 int mask = *entry++;
2511 int i;
2512
2513 /* All-zero mask and mask >= 16 is "spare". */
2514 if (mask == 0 || mask >= 16)
2515 return NULL;
2516
2517 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2518 for (i = 0; i < 4; i++)
2519 if (mask & (1 << i))
2520 {
2521 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2522 vsp += 4;
2523 }
2524 }
2525 else if ((insn & 0xf8) == 0xc0)
2526 {
2527 int count = insn & 0x7;
2528 int i;
2529
2530 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2531 for (i = 0; i <= count; i++)
2532 {
2533 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2534 vsp += 8;
2535 }
2536 }
2537 else if (insn == 0xc8)
2538 {
2539 int start = *entry >> 4;
2540 int count = (*entry++) & 0xf;
2541 int i;
2542
2543 /* Only registers D0..D31 are valid. */
2544 if (start + count >= 16)
2545 return NULL;
2546
2547 /* Pop VFP double-precision registers
2548 D[16+start]..D[16+start+count]. */
2549 for (i = 0; i <= count; i++)
2550 {
2551 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2552 vsp += 8;
2553 }
2554 }
2555 else if (insn == 0xc9)
2556 {
2557 int start = *entry >> 4;
2558 int count = (*entry++) & 0xf;
2559 int i;
2560
2561 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2562 for (i = 0; i <= count; i++)
2563 {
2564 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2565 vsp += 8;
2566 }
2567 }
2568 else if ((insn & 0xf8) == 0xd0)
2569 {
2570 int count = insn & 0x7;
2571 int i;
2572
2573 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2574 for (i = 0; i <= count; i++)
2575 {
2576 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2577 vsp += 8;
2578 }
2579 }
2580 else
2581 {
2582 /* Everything else is "spare". */
2583 return NULL;
2584 }
2585 }
2586
2587 /* If we restore SP from a register, assume this was the frame register.
2588 Otherwise just fall back to SP as frame register. */
2589 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2590 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2591 else
2592 cache->framereg = ARM_SP_REGNUM;
2593
2594 /* Determine offset to previous frame. */
2595 cache->framesize
2596 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2597
2598 /* We already got the previous SP. */
2599 cache->prev_sp = vsp;
2600
2601 return cache;
2602 }
2603
2604 /* Unwinding via ARM exception table entries. Note that the sniffer
2605 already computes a filled-in prologue cache, which is then used
2606 with the same arm_prologue_this_id and arm_prologue_prev_register
2607 routines also used for prologue-parsing based unwinding. */
2608
2609 static int
2610 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2611 struct frame_info *this_frame,
2612 void **this_prologue_cache)
2613 {
2614 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2615 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2616 CORE_ADDR addr_in_block, exidx_region, func_start;
2617 struct arm_prologue_cache *cache;
2618 gdb_byte *entry;
2619
2620 /* See if we have an ARM exception table entry covering this address. */
2621 addr_in_block = get_frame_address_in_block (this_frame);
2622 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2623 if (!entry)
2624 return 0;
2625
2626 /* The ARM exception table does not describe unwind information
2627 for arbitrary PC values, but is guaranteed to be correct only
2628 at call sites. We have to decide here whether we want to use
2629 ARM exception table information for this frame, or fall back
2630 to using prologue parsing. (Note that if we have DWARF CFI,
2631 this sniffer isn't even called -- CFI is always preferred.)
2632
2633 Before we make this decision, however, we check whether we
2634 actually have *symbol* information for the current frame.
2635 If not, prologue parsing would not work anyway, so we might
2636 as well use the exception table and hope for the best. */
2637 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2638 {
2639 int exc_valid = 0;
2640
2641 /* If the next frame is "normal", we are at a call site in this
2642 frame, so exception information is guaranteed to be valid. */
2643 if (get_next_frame (this_frame)
2644 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2645 exc_valid = 1;
2646
2647 /* We also assume exception information is valid if we're currently
2648 blocked in a system call. The system library is supposed to
2649 ensure this, so that e.g. pthread cancellation works. */
2650 if (arm_frame_is_thumb (this_frame))
2651 {
2652 ULONGEST insn;
2653
2654 if (safe_read_memory_unsigned_integer (get_frame_pc (this_frame) - 2,
2655 2, byte_order_for_code, &insn)
2656 && (insn & 0xff00) == 0xdf00 /* svc */)
2657 exc_valid = 1;
2658 }
2659 else
2660 {
2661 ULONGEST insn;
2662
2663 if (safe_read_memory_unsigned_integer (get_frame_pc (this_frame) - 4,
2664 4, byte_order_for_code, &insn)
2665 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2666 exc_valid = 1;
2667 }
2668
2669 /* Bail out if we don't know that exception information is valid. */
2670 if (!exc_valid)
2671 return 0;
2672
2673 /* The ARM exception index does not mark the *end* of the region
2674 covered by the entry, and some functions will not have any entry.
2675 To correctly recognize the end of the covered region, the linker
2676 should have inserted dummy records with a CANTUNWIND marker.
2677
2678 Unfortunately, current versions of GNU ld do not reliably do
2679 this, and thus we may have found an incorrect entry above.
2680 As a (temporary) sanity check, we only use the entry if it
2681 lies *within* the bounds of the function. Note that this check
2682 might reject perfectly valid entries that just happen to cover
2683 multiple functions; therefore this check ought to be removed
2684 once the linker is fixed. */
2685 if (func_start > exidx_region)
2686 return 0;
2687 }
2688
2689 /* Decode the list of unwinding instructions into a prologue cache.
2690 Note that this may fail due to e.g. a "refuse to unwind" code. */
2691 cache = arm_exidx_fill_cache (this_frame, entry);
2692 if (!cache)
2693 return 0;
2694
2695 *this_prologue_cache = cache;
2696 return 1;
2697 }
2698
2699 struct frame_unwind arm_exidx_unwind = {
2700 NORMAL_FRAME,
2701 default_frame_unwind_stop_reason,
2702 arm_prologue_this_id,
2703 arm_prologue_prev_register,
2704 NULL,
2705 arm_exidx_unwind_sniffer
2706 };
2707
2708 static struct arm_prologue_cache *
2709 arm_make_epilogue_frame_cache (struct frame_info *this_frame)
2710 {
2711 struct arm_prologue_cache *cache;
2712 int reg;
2713
2714 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2715 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2716
2717 /* Still rely on the offset calculated from prologue. */
2718 arm_scan_prologue (this_frame, cache);
2719
2720 /* Since we are in epilogue, the SP has been restored. */
2721 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2722
2723 /* Calculate actual addresses of saved registers using offsets
2724 determined by arm_scan_prologue. */
2725 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2726 if (trad_frame_addr_p (cache->saved_regs, reg))
2727 cache->saved_regs[reg].addr += cache->prev_sp;
2728
2729 return cache;
2730 }
2731
2732 /* Implementation of function hook 'this_id' in
2733 'struct frame_uwnind' for epilogue unwinder. */
2734
2735 static void
2736 arm_epilogue_frame_this_id (struct frame_info *this_frame,
2737 void **this_cache,
2738 struct frame_id *this_id)
2739 {
2740 struct arm_prologue_cache *cache;
2741 CORE_ADDR pc, func;
2742
2743 if (*this_cache == NULL)
2744 *this_cache = arm_make_epilogue_frame_cache (this_frame);
2745 cache = (struct arm_prologue_cache *) *this_cache;
2746
2747 /* Use function start address as part of the frame ID. If we cannot
2748 identify the start address (due to missing symbol information),
2749 fall back to just using the current PC. */
2750 pc = get_frame_pc (this_frame);
2751 func = get_frame_func (this_frame);
2752 if (func == 0)
2753 func = pc;
2754
2755 (*this_id) = frame_id_build (cache->prev_sp, pc);
2756 }
2757
2758 /* Implementation of function hook 'prev_register' in
2759 'struct frame_uwnind' for epilogue unwinder. */
2760
2761 static struct value *
2762 arm_epilogue_frame_prev_register (struct frame_info *this_frame,
2763 void **this_cache, int regnum)
2764 {
2765 if (*this_cache == NULL)
2766 *this_cache = arm_make_epilogue_frame_cache (this_frame);
2767
2768 return arm_prologue_prev_register (this_frame, this_cache, regnum);
2769 }
2770
2771 static int arm_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch,
2772 CORE_ADDR pc);
2773 static int thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch,
2774 CORE_ADDR pc);
2775
2776 /* Implementation of function hook 'sniffer' in
2777 'struct frame_uwnind' for epilogue unwinder. */
2778
2779 static int
2780 arm_epilogue_frame_sniffer (const struct frame_unwind *self,
2781 struct frame_info *this_frame,
2782 void **this_prologue_cache)
2783 {
2784 if (frame_relative_level (this_frame) == 0)
2785 {
2786 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2787 CORE_ADDR pc = get_frame_pc (this_frame);
2788
2789 if (arm_frame_is_thumb (this_frame))
2790 return thumb_stack_frame_destroyed_p (gdbarch, pc);
2791 else
2792 return arm_stack_frame_destroyed_p_1 (gdbarch, pc);
2793 }
2794 else
2795 return 0;
2796 }
2797
2798 /* Frame unwinder from epilogue. */
2799
2800 static const struct frame_unwind arm_epilogue_frame_unwind =
2801 {
2802 NORMAL_FRAME,
2803 default_frame_unwind_stop_reason,
2804 arm_epilogue_frame_this_id,
2805 arm_epilogue_frame_prev_register,
2806 NULL,
2807 arm_epilogue_frame_sniffer,
2808 };
2809
2810 /* Recognize GCC's trampoline for thumb call-indirect. If we are in a
2811 trampoline, return the target PC. Otherwise return 0.
2812
2813 void call0a (char c, short s, int i, long l) {}
2814
2815 int main (void)
2816 {
2817 (*pointer_to_call0a) (c, s, i, l);
2818 }
2819
2820 Instead of calling a stub library function _call_via_xx (xx is
2821 the register name), GCC may inline the trampoline in the object
2822 file as below (register r2 has the address of call0a).
2823
2824 .global main
2825 .type main, %function
2826 ...
2827 bl .L1
2828 ...
2829 .size main, .-main
2830
2831 .L1:
2832 bx r2
2833
2834 The trampoline 'bx r2' doesn't belong to main. */
2835
2836 static CORE_ADDR
2837 arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc)
2838 {
2839 /* The heuristics of recognizing such trampoline is that FRAME is
2840 executing in Thumb mode and the instruction on PC is 'bx Rm'. */
2841 if (arm_frame_is_thumb (frame))
2842 {
2843 gdb_byte buf[2];
2844
2845 if (target_read_memory (pc, buf, 2) == 0)
2846 {
2847 struct gdbarch *gdbarch = get_frame_arch (frame);
2848 enum bfd_endian byte_order_for_code
2849 = gdbarch_byte_order_for_code (gdbarch);
2850 uint16_t insn
2851 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2852
2853 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2854 {
2855 CORE_ADDR dest
2856 = get_frame_register_unsigned (frame, bits (insn, 3, 6));
2857
2858 /* Clear the LSB so that gdb core sets step-resume
2859 breakpoint at the right address. */
2860 return UNMAKE_THUMB_ADDR (dest);
2861 }
2862 }
2863 }
2864
2865 return 0;
2866 }
2867
2868 static struct arm_prologue_cache *
2869 arm_make_stub_cache (struct frame_info *this_frame)
2870 {
2871 struct arm_prologue_cache *cache;
2872
2873 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2874 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2875
2876 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2877
2878 return cache;
2879 }
2880
2881 /* Our frame ID for a stub frame is the current SP and LR. */
2882
2883 static void
2884 arm_stub_this_id (struct frame_info *this_frame,
2885 void **this_cache,
2886 struct frame_id *this_id)
2887 {
2888 struct arm_prologue_cache *cache;
2889
2890 if (*this_cache == NULL)
2891 *this_cache = arm_make_stub_cache (this_frame);
2892 cache = (struct arm_prologue_cache *) *this_cache;
2893
2894 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2895 }
2896
2897 static int
2898 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2899 struct frame_info *this_frame,
2900 void **this_prologue_cache)
2901 {
2902 CORE_ADDR addr_in_block;
2903 gdb_byte dummy[4];
2904 CORE_ADDR pc, start_addr;
2905 const char *name;
2906
2907 addr_in_block = get_frame_address_in_block (this_frame);
2908 pc = get_frame_pc (this_frame);
2909 if (in_plt_section (addr_in_block)
2910 /* We also use the stub winder if the target memory is unreadable
2911 to avoid having the prologue unwinder trying to read it. */
2912 || target_read_memory (pc, dummy, 4) != 0)
2913 return 1;
2914
2915 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0
2916 && arm_skip_bx_reg (this_frame, pc) != 0)
2917 return 1;
2918
2919 return 0;
2920 }
2921
2922 struct frame_unwind arm_stub_unwind = {
2923 NORMAL_FRAME,
2924 default_frame_unwind_stop_reason,
2925 arm_stub_this_id,
2926 arm_prologue_prev_register,
2927 NULL,
2928 arm_stub_unwind_sniffer
2929 };
2930
2931 /* Put here the code to store, into CACHE->saved_regs, the addresses
2932 of the saved registers of frame described by THIS_FRAME. CACHE is
2933 returned. */
2934
2935 static struct arm_prologue_cache *
2936 arm_m_exception_cache (struct frame_info *this_frame)
2937 {
2938 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2939 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2940 struct arm_prologue_cache *cache;
2941 CORE_ADDR unwound_sp;
2942 LONGEST xpsr;
2943
2944 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2945 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2946
2947 unwound_sp = get_frame_register_unsigned (this_frame,
2948 ARM_SP_REGNUM);
2949
2950 /* The hardware saves eight 32-bit words, comprising xPSR,
2951 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
2952 "B1.5.6 Exception entry behavior" in
2953 "ARMv7-M Architecture Reference Manual". */
2954 cache->saved_regs[0].addr = unwound_sp;
2955 cache->saved_regs[1].addr = unwound_sp + 4;
2956 cache->saved_regs[2].addr = unwound_sp + 8;
2957 cache->saved_regs[3].addr = unwound_sp + 12;
2958 cache->saved_regs[12].addr = unwound_sp + 16;
2959 cache->saved_regs[14].addr = unwound_sp + 20;
2960 cache->saved_regs[15].addr = unwound_sp + 24;
2961 cache->saved_regs[ARM_PS_REGNUM].addr = unwound_sp + 28;
2962
2963 /* If bit 9 of the saved xPSR is set, then there is a four-byte
2964 aligner between the top of the 32-byte stack frame and the
2965 previous context's stack pointer. */
2966 cache->prev_sp = unwound_sp + 32;
2967 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
2968 && (xpsr & (1 << 9)) != 0)
2969 cache->prev_sp += 4;
2970
2971 return cache;
2972 }
2973
2974 /* Implementation of function hook 'this_id' in
2975 'struct frame_uwnind'. */
2976
2977 static void
2978 arm_m_exception_this_id (struct frame_info *this_frame,
2979 void **this_cache,
2980 struct frame_id *this_id)
2981 {
2982 struct arm_prologue_cache *cache;
2983
2984 if (*this_cache == NULL)
2985 *this_cache = arm_m_exception_cache (this_frame);
2986 cache = (struct arm_prologue_cache *) *this_cache;
2987
2988 /* Our frame ID for a stub frame is the current SP and LR. */
2989 *this_id = frame_id_build (cache->prev_sp,
2990 get_frame_pc (this_frame));
2991 }
2992
2993 /* Implementation of function hook 'prev_register' in
2994 'struct frame_uwnind'. */
2995
2996 static struct value *
2997 arm_m_exception_prev_register (struct frame_info *this_frame,
2998 void **this_cache,
2999 int prev_regnum)
3000 {
3001 struct arm_prologue_cache *cache;
3002
3003 if (*this_cache == NULL)
3004 *this_cache = arm_m_exception_cache (this_frame);
3005 cache = (struct arm_prologue_cache *) *this_cache;
3006
3007 /* The value was already reconstructed into PREV_SP. */
3008 if (prev_regnum == ARM_SP_REGNUM)
3009 return frame_unwind_got_constant (this_frame, prev_regnum,
3010 cache->prev_sp);
3011
3012 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3013 prev_regnum);
3014 }
3015
3016 /* Implementation of function hook 'sniffer' in
3017 'struct frame_uwnind'. */
3018
3019 static int
3020 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3021 struct frame_info *this_frame,
3022 void **this_prologue_cache)
3023 {
3024 CORE_ADDR this_pc = get_frame_pc (this_frame);
3025
3026 /* No need to check is_m; this sniffer is only registered for
3027 M-profile architectures. */
3028
3029 /* Check if exception frame returns to a magic PC value. */
3030 return arm_m_addr_is_magic (this_pc);
3031 }
3032
3033 /* Frame unwinder for M-profile exceptions. */
3034
3035 struct frame_unwind arm_m_exception_unwind =
3036 {
3037 SIGTRAMP_FRAME,
3038 default_frame_unwind_stop_reason,
3039 arm_m_exception_this_id,
3040 arm_m_exception_prev_register,
3041 NULL,
3042 arm_m_exception_unwind_sniffer
3043 };
3044
3045 static CORE_ADDR
3046 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3047 {
3048 struct arm_prologue_cache *cache;
3049
3050 if (*this_cache == NULL)
3051 *this_cache = arm_make_prologue_cache (this_frame);
3052 cache = (struct arm_prologue_cache *) *this_cache;
3053
3054 return cache->prev_sp - cache->framesize;
3055 }
3056
3057 struct frame_base arm_normal_base = {
3058 &arm_prologue_unwind,
3059 arm_normal_frame_base,
3060 arm_normal_frame_base,
3061 arm_normal_frame_base
3062 };
3063
3064 static struct value *
3065 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3066 int regnum)
3067 {
3068 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3069 CORE_ADDR lr, cpsr;
3070 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3071
3072 switch (regnum)
3073 {
3074 case ARM_PC_REGNUM:
3075 /* The PC is normally copied from the return column, which
3076 describes saves of LR. However, that version may have an
3077 extra bit set to indicate Thumb state. The bit is not
3078 part of the PC. */
3079 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3080 return frame_unwind_got_constant (this_frame, regnum,
3081 arm_addr_bits_remove (gdbarch, lr));
3082
3083 case ARM_PS_REGNUM:
3084 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3085 cpsr = get_frame_register_unsigned (this_frame, regnum);
3086 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3087 if (IS_THUMB_ADDR (lr))
3088 cpsr |= t_bit;
3089 else
3090 cpsr &= ~t_bit;
3091 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3092
3093 default:
3094 internal_error (__FILE__, __LINE__,
3095 _("Unexpected register %d"), regnum);
3096 }
3097 }
3098
3099 static void
3100 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3101 struct dwarf2_frame_state_reg *reg,
3102 struct frame_info *this_frame)
3103 {
3104 switch (regnum)
3105 {
3106 case ARM_PC_REGNUM:
3107 case ARM_PS_REGNUM:
3108 reg->how = DWARF2_FRAME_REG_FN;
3109 reg->loc.fn = arm_dwarf2_prev_register;
3110 break;
3111 case ARM_SP_REGNUM:
3112 reg->how = DWARF2_FRAME_REG_CFA;
3113 break;
3114 }
3115 }
3116
3117 /* Implement the stack_frame_destroyed_p gdbarch method. */
3118
3119 static int
3120 thumb_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3121 {
3122 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3123 unsigned int insn, insn2;
3124 int found_return = 0, found_stack_adjust = 0;
3125 CORE_ADDR func_start, func_end;
3126 CORE_ADDR scan_pc;
3127 gdb_byte buf[4];
3128
3129 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3130 return 0;
3131
3132 /* The epilogue is a sequence of instructions along the following lines:
3133
3134 - add stack frame size to SP or FP
3135 - [if frame pointer used] restore SP from FP
3136 - restore registers from SP [may include PC]
3137 - a return-type instruction [if PC wasn't already restored]
3138
3139 In a first pass, we scan forward from the current PC and verify the
3140 instructions we find as compatible with this sequence, ending in a
3141 return instruction.
3142
3143 However, this is not sufficient to distinguish indirect function calls
3144 within a function from indirect tail calls in the epilogue in some cases.
3145 Therefore, if we didn't already find any SP-changing instruction during
3146 forward scan, we add a backward scanning heuristic to ensure we actually
3147 are in the epilogue. */
3148
3149 scan_pc = pc;
3150 while (scan_pc < func_end && !found_return)
3151 {
3152 if (target_read_memory (scan_pc, buf, 2))
3153 break;
3154
3155 scan_pc += 2;
3156 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3157
3158 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3159 found_return = 1;
3160 else if (insn == 0x46f7) /* mov pc, lr */
3161 found_return = 1;
3162 else if (thumb_instruction_restores_sp (insn))
3163 {
3164 if ((insn & 0xff00) == 0xbd00) /* pop <registers, PC> */
3165 found_return = 1;
3166 }
3167 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3168 {
3169 if (target_read_memory (scan_pc, buf, 2))
3170 break;
3171
3172 scan_pc += 2;
3173 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3174
3175 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3176 {
3177 if (insn2 & 0x8000) /* <registers> include PC. */
3178 found_return = 1;
3179 }
3180 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3181 && (insn2 & 0x0fff) == 0x0b04)
3182 {
3183 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3184 found_return = 1;
3185 }
3186 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3187 && (insn2 & 0x0e00) == 0x0a00)
3188 ;
3189 else
3190 break;
3191 }
3192 else
3193 break;
3194 }
3195
3196 if (!found_return)
3197 return 0;
3198
3199 /* Since any instruction in the epilogue sequence, with the possible
3200 exception of return itself, updates the stack pointer, we need to
3201 scan backwards for at most one instruction. Try either a 16-bit or
3202 a 32-bit instruction. This is just a heuristic, so we do not worry
3203 too much about false positives. */
3204
3205 if (pc - 4 < func_start)
3206 return 0;
3207 if (target_read_memory (pc - 4, buf, 4))
3208 return 0;
3209
3210 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3211 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3212
3213 if (thumb_instruction_restores_sp (insn2))
3214 found_stack_adjust = 1;
3215 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3216 found_stack_adjust = 1;
3217 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3218 && (insn2 & 0x0fff) == 0x0b04)
3219 found_stack_adjust = 1;
3220 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3221 && (insn2 & 0x0e00) == 0x0a00)
3222 found_stack_adjust = 1;
3223
3224 return found_stack_adjust;
3225 }
3226
3227 static int
3228 arm_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch, CORE_ADDR pc)
3229 {
3230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3231 unsigned int insn;
3232 int found_return;
3233 CORE_ADDR func_start, func_end;
3234
3235 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3236 return 0;
3237
3238 /* We are in the epilogue if the previous instruction was a stack
3239 adjustment and the next instruction is a possible return (bx, mov
3240 pc, or pop). We could have to scan backwards to find the stack
3241 adjustment, or forwards to find the return, but this is a decent
3242 approximation. First scan forwards. */
3243
3244 found_return = 0;
3245 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3246 if (bits (insn, 28, 31) != INST_NV)
3247 {
3248 if ((insn & 0x0ffffff0) == 0x012fff10)
3249 /* BX. */
3250 found_return = 1;
3251 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3252 /* MOV PC. */
3253 found_return = 1;
3254 else if ((insn & 0x0fff0000) == 0x08bd0000
3255 && (insn & 0x0000c000) != 0)
3256 /* POP (LDMIA), including PC or LR. */
3257 found_return = 1;
3258 }
3259
3260 if (!found_return)
3261 return 0;
3262
3263 /* Scan backwards. This is just a heuristic, so do not worry about
3264 false positives from mode changes. */
3265
3266 if (pc < func_start + 4)
3267 return 0;
3268
3269 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3270 if (arm_instruction_restores_sp (insn))
3271 return 1;
3272
3273 return 0;
3274 }
3275
3276 /* Implement the stack_frame_destroyed_p gdbarch method. */
3277
3278 static int
3279 arm_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3280 {
3281 if (arm_pc_is_thumb (gdbarch, pc))
3282 return thumb_stack_frame_destroyed_p (gdbarch, pc);
3283 else
3284 return arm_stack_frame_destroyed_p_1 (gdbarch, pc);
3285 }
3286
3287 /* When arguments must be pushed onto the stack, they go on in reverse
3288 order. The code below implements a FILO (stack) to do this. */
3289
3290 struct stack_item
3291 {
3292 int len;
3293 struct stack_item *prev;
3294 gdb_byte *data;
3295 };
3296
3297 static struct stack_item *
3298 push_stack_item (struct stack_item *prev, const gdb_byte *contents, int len)
3299 {
3300 struct stack_item *si;
3301 si = XNEW (struct stack_item);
3302 si->data = (gdb_byte *) xmalloc (len);
3303 si->len = len;
3304 si->prev = prev;
3305 memcpy (si->data, contents, len);
3306 return si;
3307 }
3308
3309 static struct stack_item *
3310 pop_stack_item (struct stack_item *si)
3311 {
3312 struct stack_item *dead = si;
3313 si = si->prev;
3314 xfree (dead->data);
3315 xfree (dead);
3316 return si;
3317 }
3318
3319 /* Implement the gdbarch type alignment method, overrides the generic
3320 alignment algorithm for anything that is arm specific. */
3321
3322 static ULONGEST
3323 arm_type_align (gdbarch *gdbarch, struct type *t)
3324 {
3325 t = check_typedef (t);
3326 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
3327 {
3328 /* Use the natural alignment for vector types (the same for
3329 scalar type), but the maximum alignment is 64-bit. */
3330 if (TYPE_LENGTH (t) > 8)
3331 return 8;
3332 else
3333 return TYPE_LENGTH (t);
3334 }
3335
3336 /* Allow the common code to calculate the alignment. */
3337 return 0;
3338 }
3339
3340 /* Possible base types for a candidate for passing and returning in
3341 VFP registers. */
3342
3343 enum arm_vfp_cprc_base_type
3344 {
3345 VFP_CPRC_UNKNOWN,
3346 VFP_CPRC_SINGLE,
3347 VFP_CPRC_DOUBLE,
3348 VFP_CPRC_VEC64,
3349 VFP_CPRC_VEC128
3350 };
3351
3352 /* The length of one element of base type B. */
3353
3354 static unsigned
3355 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3356 {
3357 switch (b)
3358 {
3359 case VFP_CPRC_SINGLE:
3360 return 4;
3361 case VFP_CPRC_DOUBLE:
3362 return 8;
3363 case VFP_CPRC_VEC64:
3364 return 8;
3365 case VFP_CPRC_VEC128:
3366 return 16;
3367 default:
3368 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3369 (int) b);
3370 }
3371 }
3372
3373 /* The character ('s', 'd' or 'q') for the type of VFP register used
3374 for passing base type B. */
3375
3376 static int
3377 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3378 {
3379 switch (b)
3380 {
3381 case VFP_CPRC_SINGLE:
3382 return 's';
3383 case VFP_CPRC_DOUBLE:
3384 return 'd';
3385 case VFP_CPRC_VEC64:
3386 return 'd';
3387 case VFP_CPRC_VEC128:
3388 return 'q';
3389 default:
3390 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3391 (int) b);
3392 }
3393 }
3394
3395 /* Determine whether T may be part of a candidate for passing and
3396 returning in VFP registers, ignoring the limit on the total number
3397 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3398 classification of the first valid component found; if it is not
3399 VFP_CPRC_UNKNOWN, all components must have the same classification
3400 as *BASE_TYPE. If it is found that T contains a type not permitted
3401 for passing and returning in VFP registers, a type differently
3402 classified from *BASE_TYPE, or two types differently classified
3403 from each other, return -1, otherwise return the total number of
3404 base-type elements found (possibly 0 in an empty structure or
3405 array). Vector types are not currently supported, matching the
3406 generic AAPCS support. */
3407
3408 static int
3409 arm_vfp_cprc_sub_candidate (struct type *t,
3410 enum arm_vfp_cprc_base_type *base_type)
3411 {
3412 t = check_typedef (t);
3413 switch (TYPE_CODE (t))
3414 {
3415 case TYPE_CODE_FLT:
3416 switch (TYPE_LENGTH (t))
3417 {
3418 case 4:
3419 if (*base_type == VFP_CPRC_UNKNOWN)
3420 *base_type = VFP_CPRC_SINGLE;
3421 else if (*base_type != VFP_CPRC_SINGLE)
3422 return -1;
3423 return 1;
3424
3425 case 8:
3426 if (*base_type == VFP_CPRC_UNKNOWN)
3427 *base_type = VFP_CPRC_DOUBLE;
3428 else if (*base_type != VFP_CPRC_DOUBLE)
3429 return -1;
3430 return 1;
3431
3432 default:
3433 return -1;
3434 }
3435 break;
3436
3437 case TYPE_CODE_COMPLEX:
3438 /* Arguments of complex T where T is one of the types float or
3439 double get treated as if they are implemented as:
3440
3441 struct complexT
3442 {
3443 T real;
3444 T imag;
3445 };
3446
3447 */
3448 switch (TYPE_LENGTH (t))
3449 {
3450 case 8:
3451 if (*base_type == VFP_CPRC_UNKNOWN)
3452 *base_type = VFP_CPRC_SINGLE;
3453 else if (*base_type != VFP_CPRC_SINGLE)
3454 return -1;
3455 return 2;
3456
3457 case 16:
3458 if (*base_type == VFP_CPRC_UNKNOWN)
3459 *base_type = VFP_CPRC_DOUBLE;
3460 else if (*base_type != VFP_CPRC_DOUBLE)
3461 return -1;
3462 return 2;
3463
3464 default:
3465 return -1;
3466 }
3467 break;
3468
3469 case TYPE_CODE_ARRAY:
3470 {
3471 if (TYPE_VECTOR (t))
3472 {
3473 /* A 64-bit or 128-bit containerized vector type are VFP
3474 CPRCs. */
3475 switch (TYPE_LENGTH (t))
3476 {
3477 case 8:
3478 if (*base_type == VFP_CPRC_UNKNOWN)
3479 *base_type = VFP_CPRC_VEC64;
3480 return 1;
3481 case 16:
3482 if (*base_type == VFP_CPRC_UNKNOWN)
3483 *base_type = VFP_CPRC_VEC128;
3484 return 1;
3485 default:
3486 return -1;
3487 }
3488 }
3489 else
3490 {
3491 int count;
3492 unsigned unitlen;
3493
3494 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t),
3495 base_type);
3496 if (count == -1)
3497 return -1;
3498 if (TYPE_LENGTH (t) == 0)
3499 {
3500 gdb_assert (count == 0);
3501 return 0;
3502 }
3503 else if (count == 0)
3504 return -1;
3505 unitlen = arm_vfp_cprc_unit_length (*base_type);
3506 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3507 return TYPE_LENGTH (t) / unitlen;
3508 }
3509 }
3510 break;
3511
3512 case TYPE_CODE_STRUCT:
3513 {
3514 int count = 0;
3515 unsigned unitlen;
3516 int i;
3517 for (i = 0; i < TYPE_NFIELDS (t); i++)
3518 {
3519 int sub_count = 0;
3520
3521 if (!field_is_static (&TYPE_FIELD (t, i)))
3522 sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3523 base_type);
3524 if (sub_count == -1)
3525 return -1;
3526 count += sub_count;
3527 }
3528 if (TYPE_LENGTH (t) == 0)
3529 {
3530 gdb_assert (count == 0);
3531 return 0;
3532 }
3533 else if (count == 0)
3534 return -1;
3535 unitlen = arm_vfp_cprc_unit_length (*base_type);
3536 if (TYPE_LENGTH (t) != unitlen * count)
3537 return -1;
3538 return count;
3539 }
3540
3541 case TYPE_CODE_UNION:
3542 {
3543 int count = 0;
3544 unsigned unitlen;
3545 int i;
3546 for (i = 0; i < TYPE_NFIELDS (t); i++)
3547 {
3548 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3549 base_type);
3550 if (sub_count == -1)
3551 return -1;
3552 count = (count > sub_count ? count : sub_count);
3553 }
3554 if (TYPE_LENGTH (t) == 0)
3555 {
3556 gdb_assert (count == 0);
3557 return 0;
3558 }
3559 else if (count == 0)
3560 return -1;
3561 unitlen = arm_vfp_cprc_unit_length (*base_type);
3562 if (TYPE_LENGTH (t) != unitlen * count)
3563 return -1;
3564 return count;
3565 }
3566
3567 default:
3568 break;
3569 }
3570
3571 return -1;
3572 }
3573
3574 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3575 if passed to or returned from a non-variadic function with the VFP
3576 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3577 *BASE_TYPE to the base type for T and *COUNT to the number of
3578 elements of that base type before returning. */
3579
3580 static int
3581 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3582 int *count)
3583 {
3584 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3585 int c = arm_vfp_cprc_sub_candidate (t, &b);
3586 if (c <= 0 || c > 4)
3587 return 0;
3588 *base_type = b;
3589 *count = c;
3590 return 1;
3591 }
3592
3593 /* Return 1 if the VFP ABI should be used for passing arguments to and
3594 returning values from a function of type FUNC_TYPE, 0
3595 otherwise. */
3596
3597 static int
3598 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3599 {
3600 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3601 /* Variadic functions always use the base ABI. Assume that functions
3602 without debug info are not variadic. */
3603 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3604 return 0;
3605 /* The VFP ABI is only supported as a variant of AAPCS. */
3606 if (tdep->arm_abi != ARM_ABI_AAPCS)
3607 return 0;
3608 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3609 }
3610
3611 /* We currently only support passing parameters in integer registers, which
3612 conforms with GCC's default model, and VFP argument passing following
3613 the VFP variant of AAPCS. Several other variants exist and
3614 we should probably support some of them based on the selected ABI. */
3615
3616 static CORE_ADDR
3617 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3618 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3619 struct value **args, CORE_ADDR sp,
3620 function_call_return_method return_method,
3621 CORE_ADDR struct_addr)
3622 {
3623 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3624 int argnum;
3625 int argreg;
3626 int nstack;
3627 struct stack_item *si = NULL;
3628 int use_vfp_abi;
3629 struct type *ftype;
3630 unsigned vfp_regs_free = (1 << 16) - 1;
3631
3632 /* Determine the type of this function and whether the VFP ABI
3633 applies. */
3634 ftype = check_typedef (value_type (function));
3635 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3636 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3637 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3638
3639 /* Set the return address. For the ARM, the return breakpoint is
3640 always at BP_ADDR. */
3641 if (arm_pc_is_thumb (gdbarch, bp_addr))
3642 bp_addr |= 1;
3643 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3644
3645 /* Walk through the list of args and determine how large a temporary
3646 stack is required. Need to take care here as structs may be
3647 passed on the stack, and we have to push them. */
3648 nstack = 0;
3649
3650 argreg = ARM_A1_REGNUM;
3651 nstack = 0;
3652
3653 /* The struct_return pointer occupies the first parameter
3654 passing register. */
3655 if (return_method == return_method_struct)
3656 {
3657 if (arm_debug)
3658 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3659 gdbarch_register_name (gdbarch, argreg),
3660 paddress (gdbarch, struct_addr));
3661 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3662 argreg++;
3663 }
3664
3665 for (argnum = 0; argnum < nargs; argnum++)
3666 {
3667 int len;
3668 struct type *arg_type;
3669 struct type *target_type;
3670 enum type_code typecode;
3671 const bfd_byte *val;
3672 int align;
3673 enum arm_vfp_cprc_base_type vfp_base_type;
3674 int vfp_base_count;
3675 int may_use_core_reg = 1;
3676
3677 arg_type = check_typedef (value_type (args[argnum]));
3678 len = TYPE_LENGTH (arg_type);
3679 target_type = TYPE_TARGET_TYPE (arg_type);
3680 typecode = TYPE_CODE (arg_type);
3681 val = value_contents (args[argnum]);
3682
3683 align = type_align (arg_type);
3684 /* Round alignment up to a whole number of words. */
3685 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3686 /* Different ABIs have different maximum alignments. */
3687 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3688 {
3689 /* The APCS ABI only requires word alignment. */
3690 align = INT_REGISTER_SIZE;
3691 }
3692 else
3693 {
3694 /* The AAPCS requires at most doubleword alignment. */
3695 if (align > INT_REGISTER_SIZE * 2)
3696 align = INT_REGISTER_SIZE * 2;
3697 }
3698
3699 if (use_vfp_abi
3700 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3701 &vfp_base_count))
3702 {
3703 int regno;
3704 int unit_length;
3705 int shift;
3706 unsigned mask;
3707
3708 /* Because this is a CPRC it cannot go in a core register or
3709 cause a core register to be skipped for alignment.
3710 Either it goes in VFP registers and the rest of this loop
3711 iteration is skipped for this argument, or it goes on the
3712 stack (and the stack alignment code is correct for this
3713 case). */
3714 may_use_core_reg = 0;
3715
3716 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3717 shift = unit_length / 4;
3718 mask = (1 << (shift * vfp_base_count)) - 1;
3719 for (regno = 0; regno < 16; regno += shift)
3720 if (((vfp_regs_free >> regno) & mask) == mask)
3721 break;
3722
3723 if (regno < 16)
3724 {
3725 int reg_char;
3726 int reg_scaled;
3727 int i;
3728
3729 vfp_regs_free &= ~(mask << regno);
3730 reg_scaled = regno / shift;
3731 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3732 for (i = 0; i < vfp_base_count; i++)
3733 {
3734 char name_buf[4];
3735 int regnum;
3736 if (reg_char == 'q')
3737 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3738 val + i * unit_length);
3739 else
3740 {
3741 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3742 reg_char, reg_scaled + i);
3743 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3744 strlen (name_buf));
3745 regcache->cooked_write (regnum, val + i * unit_length);
3746 }
3747 }
3748 continue;
3749 }
3750 else
3751 {
3752 /* This CPRC could not go in VFP registers, so all VFP
3753 registers are now marked as used. */
3754 vfp_regs_free = 0;
3755 }
3756 }
3757
3758 /* Push stack padding for dowubleword alignment. */
3759 if (nstack & (align - 1))
3760 {
3761 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3762 nstack += INT_REGISTER_SIZE;
3763 }
3764
3765 /* Doubleword aligned quantities must go in even register pairs. */
3766 if (may_use_core_reg
3767 && argreg <= ARM_LAST_ARG_REGNUM
3768 && align > INT_REGISTER_SIZE
3769 && argreg & 1)
3770 argreg++;
3771
3772 /* If the argument is a pointer to a function, and it is a
3773 Thumb function, create a LOCAL copy of the value and set
3774 the THUMB bit in it. */
3775 if (TYPE_CODE_PTR == typecode
3776 && target_type != NULL
3777 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3778 {
3779 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3780 if (arm_pc_is_thumb (gdbarch, regval))
3781 {
3782 bfd_byte *copy = (bfd_byte *) alloca (len);
3783 store_unsigned_integer (copy, len, byte_order,
3784 MAKE_THUMB_ADDR (regval));
3785 val = copy;
3786 }
3787 }
3788
3789 /* Copy the argument to general registers or the stack in
3790 register-sized pieces. Large arguments are split between
3791 registers and stack. */
3792 while (len > 0)
3793 {
3794 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3795 CORE_ADDR regval
3796 = extract_unsigned_integer (val, partial_len, byte_order);
3797
3798 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3799 {
3800 /* The argument is being passed in a general purpose
3801 register. */
3802 if (byte_order == BFD_ENDIAN_BIG)
3803 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3804 if (arm_debug)
3805 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3806 argnum,
3807 gdbarch_register_name
3808 (gdbarch, argreg),
3809 phex (regval, INT_REGISTER_SIZE));
3810 regcache_cooked_write_unsigned (regcache, argreg, regval);
3811 argreg++;
3812 }
3813 else
3814 {
3815 gdb_byte buf[INT_REGISTER_SIZE];
3816
3817 memset (buf, 0, sizeof (buf));
3818 store_unsigned_integer (buf, partial_len, byte_order, regval);
3819
3820 /* Push the arguments onto the stack. */
3821 if (arm_debug)
3822 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3823 argnum, nstack);
3824 si = push_stack_item (si, buf, INT_REGISTER_SIZE);
3825 nstack += INT_REGISTER_SIZE;
3826 }
3827
3828 len -= partial_len;
3829 val += partial_len;
3830 }
3831 }
3832 /* If we have an odd number of words to push, then decrement the stack
3833 by one word now, so first stack argument will be dword aligned. */
3834 if (nstack & 4)
3835 sp -= 4;
3836
3837 while (si)
3838 {
3839 sp -= si->len;
3840 write_memory (sp, si->data, si->len);
3841 si = pop_stack_item (si);
3842 }
3843
3844 /* Finally, update teh SP register. */
3845 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3846
3847 return sp;
3848 }
3849
3850
3851 /* Always align the frame to an 8-byte boundary. This is required on
3852 some platforms and harmless on the rest. */
3853
3854 static CORE_ADDR
3855 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3856 {
3857 /* Align the stack to eight bytes. */
3858 return sp & ~ (CORE_ADDR) 7;
3859 }
3860
3861 static void
3862 print_fpu_flags (struct ui_file *file, int flags)
3863 {
3864 if (flags & (1 << 0))
3865 fputs_filtered ("IVO ", file);
3866 if (flags & (1 << 1))
3867 fputs_filtered ("DVZ ", file);
3868 if (flags & (1 << 2))
3869 fputs_filtered ("OFL ", file);
3870 if (flags & (1 << 3))
3871 fputs_filtered ("UFL ", file);
3872 if (flags & (1 << 4))
3873 fputs_filtered ("INX ", file);
3874 fputc_filtered ('\n', file);
3875 }
3876
3877 /* Print interesting information about the floating point processor
3878 (if present) or emulator. */
3879 static void
3880 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3881 struct frame_info *frame, const char *args)
3882 {
3883 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3884 int type;
3885
3886 type = (status >> 24) & 127;
3887 if (status & (1 << 31))
3888 fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
3889 else
3890 fprintf_filtered (file, _("Software FPU type %d\n"), type);
3891 /* i18n: [floating point unit] mask */
3892 fputs_filtered (_("mask: "), file);
3893 print_fpu_flags (file, status >> 16);
3894 /* i18n: [floating point unit] flags */
3895 fputs_filtered (_("flags: "), file);
3896 print_fpu_flags (file, status);
3897 }
3898
3899 /* Construct the ARM extended floating point type. */
3900 static struct type *
3901 arm_ext_type (struct gdbarch *gdbarch)
3902 {
3903 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3904
3905 if (!tdep->arm_ext_type)
3906 tdep->arm_ext_type
3907 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3908 floatformats_arm_ext);
3909
3910 return tdep->arm_ext_type;
3911 }
3912
3913 static struct type *
3914 arm_neon_double_type (struct gdbarch *gdbarch)
3915 {
3916 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3917
3918 if (tdep->neon_double_type == NULL)
3919 {
3920 struct type *t, *elem;
3921
3922 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3923 TYPE_CODE_UNION);
3924 elem = builtin_type (gdbarch)->builtin_uint8;
3925 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3926 elem = builtin_type (gdbarch)->builtin_uint16;
3927 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3928 elem = builtin_type (gdbarch)->builtin_uint32;
3929 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3930 elem = builtin_type (gdbarch)->builtin_uint64;
3931 append_composite_type_field (t, "u64", elem);
3932 elem = builtin_type (gdbarch)->builtin_float;
3933 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3934 elem = builtin_type (gdbarch)->builtin_double;
3935 append_composite_type_field (t, "f64", elem);
3936
3937 TYPE_VECTOR (t) = 1;
3938 TYPE_NAME (t) = "neon_d";
3939 tdep->neon_double_type = t;
3940 }
3941
3942 return tdep->neon_double_type;
3943 }
3944
3945 /* FIXME: The vector types are not correctly ordered on big-endian
3946 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3947 bits of d0 - regardless of what unit size is being held in d0. So
3948 the offset of the first uint8 in d0 is 7, but the offset of the
3949 first float is 4. This code works as-is for little-endian
3950 targets. */
3951
3952 static struct type *
3953 arm_neon_quad_type (struct gdbarch *gdbarch)
3954 {
3955 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3956
3957 if (tdep->neon_quad_type == NULL)
3958 {
3959 struct type *t, *elem;
3960
3961 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3962 TYPE_CODE_UNION);
3963 elem = builtin_type (gdbarch)->builtin_uint8;
3964 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3965 elem = builtin_type (gdbarch)->builtin_uint16;
3966 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3967 elem = builtin_type (gdbarch)->builtin_uint32;
3968 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3969 elem = builtin_type (gdbarch)->builtin_uint64;
3970 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3971 elem = builtin_type (gdbarch)->builtin_float;
3972 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3973 elem = builtin_type (gdbarch)->builtin_double;
3974 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3975
3976 TYPE_VECTOR (t) = 1;
3977 TYPE_NAME (t) = "neon_q";
3978 tdep->neon_quad_type = t;
3979 }
3980
3981 return tdep->neon_quad_type;
3982 }
3983
3984 /* Return the GDB type object for the "standard" data type of data in
3985 register N. */
3986
3987 static struct type *
3988 arm_register_type (struct gdbarch *gdbarch, int regnum)
3989 {
3990 int num_regs = gdbarch_num_regs (gdbarch);
3991
3992 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3993 && regnum >= num_regs && regnum < num_regs + 32)
3994 return builtin_type (gdbarch)->builtin_float;
3995
3996 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3997 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3998 return arm_neon_quad_type (gdbarch);
3999
4000 /* If the target description has register information, we are only
4001 in this function so that we can override the types of
4002 double-precision registers for NEON. */
4003 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4004 {
4005 struct type *t = tdesc_register_type (gdbarch, regnum);
4006
4007 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4008 && TYPE_CODE (t) == TYPE_CODE_FLT
4009 && gdbarch_tdep (gdbarch)->have_neon)
4010 return arm_neon_double_type (gdbarch);
4011 else
4012 return t;
4013 }
4014
4015 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4016 {
4017 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4018 return builtin_type (gdbarch)->builtin_void;
4019
4020 return arm_ext_type (gdbarch);
4021 }
4022 else if (regnum == ARM_SP_REGNUM)
4023 return builtin_type (gdbarch)->builtin_data_ptr;
4024 else if (regnum == ARM_PC_REGNUM)
4025 return builtin_type (gdbarch)->builtin_func_ptr;
4026 else if (regnum >= ARRAY_SIZE (arm_register_names))
4027 /* These registers are only supported on targets which supply
4028 an XML description. */
4029 return builtin_type (gdbarch)->builtin_int0;
4030 else
4031 return builtin_type (gdbarch)->builtin_uint32;
4032 }
4033
4034 /* Map a DWARF register REGNUM onto the appropriate GDB register
4035 number. */
4036
4037 static int
4038 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4039 {
4040 /* Core integer regs. */
4041 if (reg >= 0 && reg <= 15)
4042 return reg;
4043
4044 /* Legacy FPA encoding. These were once used in a way which
4045 overlapped with VFP register numbering, so their use is
4046 discouraged, but GDB doesn't support the ARM toolchain
4047 which used them for VFP. */
4048 if (reg >= 16 && reg <= 23)
4049 return ARM_F0_REGNUM + reg - 16;
4050
4051 /* New assignments for the FPA registers. */
4052 if (reg >= 96 && reg <= 103)
4053 return ARM_F0_REGNUM + reg - 96;
4054
4055 /* WMMX register assignments. */
4056 if (reg >= 104 && reg <= 111)
4057 return ARM_WCGR0_REGNUM + reg - 104;
4058
4059 if (reg >= 112 && reg <= 127)
4060 return ARM_WR0_REGNUM + reg - 112;
4061
4062 if (reg >= 192 && reg <= 199)
4063 return ARM_WC0_REGNUM + reg - 192;
4064
4065 /* VFP v2 registers. A double precision value is actually
4066 in d1 rather than s2, but the ABI only defines numbering
4067 for the single precision registers. This will "just work"
4068 in GDB for little endian targets (we'll read eight bytes,
4069 starting in s0 and then progressing to s1), but will be
4070 reversed on big endian targets with VFP. This won't
4071 be a problem for the new Neon quad registers; you're supposed
4072 to use DW_OP_piece for those. */
4073 if (reg >= 64 && reg <= 95)
4074 {
4075 char name_buf[4];
4076
4077 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4078 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4079 strlen (name_buf));
4080 }
4081
4082 /* VFP v3 / Neon registers. This range is also used for VFP v2
4083 registers, except that it now describes d0 instead of s0. */
4084 if (reg >= 256 && reg <= 287)
4085 {
4086 char name_buf[4];
4087
4088 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4089 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4090 strlen (name_buf));
4091 }
4092
4093 return -1;
4094 }
4095
4096 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4097 static int
4098 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4099 {
4100 int reg = regnum;
4101 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4102
4103 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4104 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4105
4106 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4107 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4108
4109 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4110 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4111
4112 if (reg < NUM_GREGS)
4113 return SIM_ARM_R0_REGNUM + reg;
4114 reg -= NUM_GREGS;
4115
4116 if (reg < NUM_FREGS)
4117 return SIM_ARM_FP0_REGNUM + reg;
4118 reg -= NUM_FREGS;
4119
4120 if (reg < NUM_SREGS)
4121 return SIM_ARM_FPS_REGNUM + reg;
4122 reg -= NUM_SREGS;
4123
4124 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4125 }
4126
4127 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
4128 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
4129 NULL if an error occurs. BUF is freed. */
4130
4131 static gdb_byte *
4132 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
4133 int old_len, int new_len)
4134 {
4135 gdb_byte *new_buf;
4136 int bytes_to_read = new_len - old_len;
4137
4138 new_buf = (gdb_byte *) xmalloc (new_len);
4139 memcpy (new_buf + bytes_to_read, buf, old_len);
4140 xfree (buf);
4141 if (target_read_code (endaddr - new_len, new_buf, bytes_to_read) != 0)
4142 {
4143 xfree (new_buf);
4144 return NULL;
4145 }
4146 return new_buf;
4147 }
4148
4149 /* An IT block is at most the 2-byte IT instruction followed by
4150 four 4-byte instructions. The furthest back we must search to
4151 find an IT block that affects the current instruction is thus
4152 2 + 3 * 4 == 14 bytes. */
4153 #define MAX_IT_BLOCK_PREFIX 14
4154
4155 /* Use a quick scan if there are more than this many bytes of
4156 code. */
4157 #define IT_SCAN_THRESHOLD 32
4158
4159 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
4160 A breakpoint in an IT block may not be hit, depending on the
4161 condition flags. */
4162 static CORE_ADDR
4163 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
4164 {
4165 gdb_byte *buf;
4166 char map_type;
4167 CORE_ADDR boundary, func_start;
4168 int buf_len;
4169 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
4170 int i, any, last_it, last_it_count;
4171
4172 /* If we are using BKPT breakpoints, none of this is necessary. */
4173 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
4174 return bpaddr;
4175
4176 /* ARM mode does not have this problem. */
4177 if (!arm_pc_is_thumb (gdbarch, bpaddr))
4178 return bpaddr;
4179
4180 /* We are setting a breakpoint in Thumb code that could potentially
4181 contain an IT block. The first step is to find how much Thumb
4182 code there is; we do not need to read outside of known Thumb
4183 sequences. */
4184 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
4185 if (map_type == 0)
4186 /* Thumb-2 code must have mapping symbols to have a chance. */
4187 return bpaddr;
4188
4189 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
4190
4191 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
4192 && func_start > boundary)
4193 boundary = func_start;
4194
4195 /* Search for a candidate IT instruction. We have to do some fancy
4196 footwork to distinguish a real IT instruction from the second
4197 half of a 32-bit instruction, but there is no need for that if
4198 there's no candidate. */
4199 buf_len = std::min (bpaddr - boundary, (CORE_ADDR) MAX_IT_BLOCK_PREFIX);
4200 if (buf_len == 0)
4201 /* No room for an IT instruction. */
4202 return bpaddr;
4203
4204 buf = (gdb_byte *) xmalloc (buf_len);
4205 if (target_read_code (bpaddr - buf_len, buf, buf_len) != 0)
4206 return bpaddr;
4207 any = 0;
4208 for (i = 0; i < buf_len; i += 2)
4209 {
4210 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4211 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4212 {
4213 any = 1;
4214 break;
4215 }
4216 }
4217
4218 if (any == 0)
4219 {
4220 xfree (buf);
4221 return bpaddr;
4222 }
4223
4224 /* OK, the code bytes before this instruction contain at least one
4225 halfword which resembles an IT instruction. We know that it's
4226 Thumb code, but there are still two possibilities. Either the
4227 halfword really is an IT instruction, or it is the second half of
4228 a 32-bit Thumb instruction. The only way we can tell is to
4229 scan forwards from a known instruction boundary. */
4230 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
4231 {
4232 int definite;
4233
4234 /* There's a lot of code before this instruction. Start with an
4235 optimistic search; it's easy to recognize halfwords that can
4236 not be the start of a 32-bit instruction, and use that to
4237 lock on to the instruction boundaries. */
4238 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
4239 if (buf == NULL)
4240 return bpaddr;
4241 buf_len = IT_SCAN_THRESHOLD;
4242
4243 definite = 0;
4244 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
4245 {
4246 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4247 if (thumb_insn_size (inst1) == 2)
4248 {
4249 definite = 1;
4250 break;
4251 }
4252 }
4253
4254 /* At this point, if DEFINITE, BUF[I] is the first place we
4255 are sure that we know the instruction boundaries, and it is far
4256 enough from BPADDR that we could not miss an IT instruction
4257 affecting BPADDR. If ! DEFINITE, give up - start from a
4258 known boundary. */
4259 if (! definite)
4260 {
4261 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
4262 bpaddr - boundary);
4263 if (buf == NULL)
4264 return bpaddr;
4265 buf_len = bpaddr - boundary;
4266 i = 0;
4267 }
4268 }
4269 else
4270 {
4271 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
4272 if (buf == NULL)
4273 return bpaddr;
4274 buf_len = bpaddr - boundary;
4275 i = 0;
4276 }
4277
4278 /* Scan forwards. Find the last IT instruction before BPADDR. */
4279 last_it = -1;
4280 last_it_count = 0;
4281 while (i < buf_len)
4282 {
4283 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
4284 last_it_count--;
4285 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4286 {
4287 last_it = i;
4288 if (inst1 & 0x0001)
4289 last_it_count = 4;
4290 else if (inst1 & 0x0002)
4291 last_it_count = 3;
4292 else if (inst1 & 0x0004)
4293 last_it_count = 2;
4294 else
4295 last_it_count = 1;
4296 }
4297 i += thumb_insn_size (inst1);
4298 }
4299
4300 xfree (buf);
4301
4302 if (last_it == -1)
4303 /* There wasn't really an IT instruction after all. */
4304 return bpaddr;
4305
4306 if (last_it_count < 1)
4307 /* It was too far away. */
4308 return bpaddr;
4309
4310 /* This really is a trouble spot. Move the breakpoint to the IT
4311 instruction. */
4312 return bpaddr - buf_len + last_it;
4313 }
4314
4315 /* ARM displaced stepping support.
4316
4317 Generally ARM displaced stepping works as follows:
4318
4319 1. When an instruction is to be single-stepped, it is first decoded by
4320 arm_process_displaced_insn. Depending on the type of instruction, it is
4321 then copied to a scratch location, possibly in a modified form. The
4322 copy_* set of functions performs such modification, as necessary. A
4323 breakpoint is placed after the modified instruction in the scratch space
4324 to return control to GDB. Note in particular that instructions which
4325 modify the PC will no longer do so after modification.
4326
4327 2. The instruction is single-stepped, by setting the PC to the scratch
4328 location address, and resuming. Control returns to GDB when the
4329 breakpoint is hit.
4330
4331 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
4332 function used for the current instruction. This function's job is to
4333 put the CPU/memory state back to what it would have been if the
4334 instruction had been executed unmodified in its original location. */
4335
4336 /* NOP instruction (mov r0, r0). */
4337 #define ARM_NOP 0xe1a00000
4338 #define THUMB_NOP 0x4600
4339
4340 /* Helper for register reads for displaced stepping. In particular, this
4341 returns the PC as it would be seen by the instruction at its original
4342 location. */
4343
4344 ULONGEST
4345 displaced_read_reg (struct regcache *regs, arm_displaced_step_closure *dsc,
4346 int regno)
4347 {
4348 ULONGEST ret;
4349 CORE_ADDR from = dsc->insn_addr;
4350
4351 if (regno == ARM_PC_REGNUM)
4352 {
4353 /* Compute pipeline offset:
4354 - When executing an ARM instruction, PC reads as the address of the
4355 current instruction plus 8.
4356 - When executing a Thumb instruction, PC reads as the address of the
4357 current instruction plus 4. */
4358
4359 if (!dsc->is_thumb)
4360 from += 8;
4361 else
4362 from += 4;
4363
4364 if (debug_displaced)
4365 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
4366 (unsigned long) from);
4367 return (ULONGEST) from;
4368 }
4369 else
4370 {
4371 regcache_cooked_read_unsigned (regs, regno, &ret);
4372 if (debug_displaced)
4373 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
4374 regno, (unsigned long) ret);
4375 return ret;
4376 }
4377 }
4378
4379 static int
4380 displaced_in_arm_mode (struct regcache *regs)
4381 {
4382 ULONGEST ps;
4383 ULONGEST t_bit = arm_psr_thumb_bit (regs->arch ());
4384
4385 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4386
4387 return (ps & t_bit) == 0;
4388 }
4389
4390 /* Write to the PC as from a branch instruction. */
4391
4392 static void
4393 branch_write_pc (struct regcache *regs, arm_displaced_step_closure *dsc,
4394 ULONGEST val)
4395 {
4396 if (!dsc->is_thumb)
4397 /* Note: If bits 0/1 are set, this branch would be unpredictable for
4398 architecture versions < 6. */
4399 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4400 val & ~(ULONGEST) 0x3);
4401 else
4402 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
4403 val & ~(ULONGEST) 0x1);
4404 }
4405
4406 /* Write to the PC as from a branch-exchange instruction. */
4407
4408 static void
4409 bx_write_pc (struct regcache *regs, ULONGEST val)
4410 {
4411 ULONGEST ps;
4412 ULONGEST t_bit = arm_psr_thumb_bit (regs->arch ());
4413
4414 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
4415
4416 if ((val & 1) == 1)
4417 {
4418 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
4419 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
4420 }
4421 else if ((val & 2) == 0)
4422 {
4423 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4424 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
4425 }
4426 else
4427 {
4428 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
4429 mode, align dest to 4 bytes). */
4430 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
4431 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
4432 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
4433 }
4434 }
4435
4436 /* Write to the PC as if from a load instruction. */
4437
4438 static void
4439 load_write_pc (struct regcache *regs, arm_displaced_step_closure *dsc,
4440 ULONGEST val)
4441 {
4442 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
4443 bx_write_pc (regs, val);
4444 else
4445 branch_write_pc (regs, dsc, val);
4446 }
4447
4448 /* Write to the PC as if from an ALU instruction. */
4449
4450 static void
4451 alu_write_pc (struct regcache *regs, arm_displaced_step_closure *dsc,
4452 ULONGEST val)
4453 {
4454 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
4455 bx_write_pc (regs, val);
4456 else
4457 branch_write_pc (regs, dsc, val);
4458 }
4459
4460 /* Helper for writing to registers for displaced stepping. Writing to the PC
4461 has a varying effects depending on the instruction which does the write:
4462 this is controlled by the WRITE_PC argument. */
4463
4464 void
4465 displaced_write_reg (struct regcache *regs, arm_displaced_step_closure *dsc,
4466 int regno, ULONGEST val, enum pc_write_style write_pc)
4467 {
4468 if (regno == ARM_PC_REGNUM)
4469 {
4470 if (debug_displaced)
4471 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
4472 (unsigned long) val);
4473 switch (write_pc)
4474 {
4475 case BRANCH_WRITE_PC:
4476 branch_write_pc (regs, dsc, val);
4477 break;
4478
4479 case BX_WRITE_PC:
4480 bx_write_pc (regs, val);
4481 break;
4482
4483 case LOAD_WRITE_PC:
4484 load_write_pc (regs, dsc, val);
4485 break;
4486
4487 case ALU_WRITE_PC:
4488 alu_write_pc (regs, dsc, val);
4489 break;
4490
4491 case CANNOT_WRITE_PC:
4492 warning (_("Instruction wrote to PC in an unexpected way when "
4493 "single-stepping"));
4494 break;
4495
4496 default:
4497 internal_error (__FILE__, __LINE__,
4498 _("Invalid argument to displaced_write_reg"));
4499 }
4500
4501 dsc->wrote_to_pc = 1;
4502 }
4503 else
4504 {
4505 if (debug_displaced)
4506 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
4507 regno, (unsigned long) val);
4508 regcache_cooked_write_unsigned (regs, regno, val);
4509 }
4510 }
4511
4512 /* This function is used to concisely determine if an instruction INSN
4513 references PC. Register fields of interest in INSN should have the
4514 corresponding fields of BITMASK set to 0b1111. The function
4515 returns return 1 if any of these fields in INSN reference the PC
4516 (also 0b1111, r15), else it returns 0. */
4517
4518 static int
4519 insn_references_pc (uint32_t insn, uint32_t bitmask)
4520 {
4521 uint32_t lowbit = 1;
4522
4523 while (bitmask != 0)
4524 {
4525 uint32_t mask;
4526
4527 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
4528 ;
4529
4530 if (!lowbit)
4531 break;
4532
4533 mask = lowbit * 0xf;
4534
4535 if ((insn & mask) == mask)
4536 return 1;
4537
4538 bitmask &= ~mask;
4539 }
4540
4541 return 0;
4542 }
4543
4544 /* The simplest copy function. Many instructions have the same effect no
4545 matter what address they are executed at: in those cases, use this. */
4546
4547 static int
4548 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
4549 const char *iname, arm_displaced_step_closure *dsc)
4550 {
4551 if (debug_displaced)
4552 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
4553 "opcode/class '%s' unmodified\n", (unsigned long) insn,
4554 iname);
4555
4556 dsc->modinsn[0] = insn;
4557
4558 return 0;
4559 }
4560
4561 static int
4562 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
4563 uint16_t insn2, const char *iname,
4564 arm_displaced_step_closure *dsc)
4565 {
4566 if (debug_displaced)
4567 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
4568 "opcode/class '%s' unmodified\n", insn1, insn2,
4569 iname);
4570
4571 dsc->modinsn[0] = insn1;
4572 dsc->modinsn[1] = insn2;
4573 dsc->numinsns = 2;
4574
4575 return 0;
4576 }
4577
4578 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
4579 modification. */
4580 static int
4581 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, uint16_t insn,
4582 const char *iname,
4583 arm_displaced_step_closure *dsc)
4584 {
4585 if (debug_displaced)
4586 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
4587 "opcode/class '%s' unmodified\n", insn,
4588 iname);
4589
4590 dsc->modinsn[0] = insn;
4591
4592 return 0;
4593 }
4594
4595 /* Preload instructions with immediate offset. */
4596
4597 static void
4598 cleanup_preload (struct gdbarch *gdbarch,
4599 struct regcache *regs, arm_displaced_step_closure *dsc)
4600 {
4601 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4602 if (!dsc->u.preload.immed)
4603 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
4604 }
4605
4606 static void
4607 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
4608 arm_displaced_step_closure *dsc, unsigned int rn)
4609 {
4610 ULONGEST rn_val;
4611 /* Preload instructions:
4612
4613 {pli/pld} [rn, #+/-imm]
4614 ->
4615 {pli/pld} [r0, #+/-imm]. */
4616
4617 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4618 rn_val = displaced_read_reg (regs, dsc, rn);
4619 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4620 dsc->u.preload.immed = 1;
4621
4622 dsc->cleanup = &cleanup_preload;
4623 }
4624
4625 static int
4626 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
4627 arm_displaced_step_closure *dsc)
4628 {
4629 unsigned int rn = bits (insn, 16, 19);
4630
4631 if (!insn_references_pc (insn, 0x000f0000ul))
4632 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
4633
4634 if (debug_displaced)
4635 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4636 (unsigned long) insn);
4637
4638 dsc->modinsn[0] = insn & 0xfff0ffff;
4639
4640 install_preload (gdbarch, regs, dsc, rn);
4641
4642 return 0;
4643 }
4644
4645 static int
4646 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
4647 struct regcache *regs, arm_displaced_step_closure *dsc)
4648 {
4649 unsigned int rn = bits (insn1, 0, 3);
4650 unsigned int u_bit = bit (insn1, 7);
4651 int imm12 = bits (insn2, 0, 11);
4652 ULONGEST pc_val;
4653
4654 if (rn != ARM_PC_REGNUM)
4655 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
4656
4657 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
4658 PLD (literal) Encoding T1. */
4659 if (debug_displaced)
4660 fprintf_unfiltered (gdb_stdlog,
4661 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
4662 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
4663 imm12);
4664
4665 if (!u_bit)
4666 imm12 = -1 * imm12;
4667
4668 /* Rewrite instruction {pli/pld} PC imm12 into:
4669 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
4670
4671 {pli/pld} [r0, r1]
4672
4673 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
4674
4675 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4676 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
4677
4678 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
4679
4680 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
4681 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
4682 dsc->u.preload.immed = 0;
4683
4684 /* {pli/pld} [r0, r1] */
4685 dsc->modinsn[0] = insn1 & 0xfff0;
4686 dsc->modinsn[1] = 0xf001;
4687 dsc->numinsns = 2;
4688
4689 dsc->cleanup = &cleanup_preload;
4690 return 0;
4691 }
4692
4693 /* Preload instructions with register offset. */
4694
4695 static void
4696 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
4697 arm_displaced_step_closure *dsc, unsigned int rn,
4698 unsigned int rm)
4699 {
4700 ULONGEST rn_val, rm_val;
4701
4702 /* Preload register-offset instructions:
4703
4704 {pli/pld} [rn, rm {, shift}]
4705 ->
4706 {pli/pld} [r0, r1 {, shift}]. */
4707
4708 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4709 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
4710 rn_val = displaced_read_reg (regs, dsc, rn);
4711 rm_val = displaced_read_reg (regs, dsc, rm);
4712 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4713 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
4714 dsc->u.preload.immed = 0;
4715
4716 dsc->cleanup = &cleanup_preload;
4717 }
4718
4719 static int
4720 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
4721 struct regcache *regs,
4722 arm_displaced_step_closure *dsc)
4723 {
4724 unsigned int rn = bits (insn, 16, 19);
4725 unsigned int rm = bits (insn, 0, 3);
4726
4727
4728 if (!insn_references_pc (insn, 0x000f000ful))
4729 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
4730
4731 if (debug_displaced)
4732 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
4733 (unsigned long) insn);
4734
4735 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
4736
4737 install_preload_reg (gdbarch, regs, dsc, rn, rm);
4738 return 0;
4739 }
4740
4741 /* Copy/cleanup coprocessor load and store instructions. */
4742
4743 static void
4744 cleanup_copro_load_store (struct gdbarch *gdbarch,
4745 struct regcache *regs,
4746 arm_displaced_step_closure *dsc)
4747 {
4748 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
4749
4750 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
4751
4752 if (dsc->u.ldst.writeback)
4753 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
4754 }
4755
4756 static void
4757 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
4758 arm_displaced_step_closure *dsc,
4759 int writeback, unsigned int rn)
4760 {
4761 ULONGEST rn_val;
4762
4763 /* Coprocessor load/store instructions:
4764
4765 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
4766 ->
4767 {stc/stc2} [r0, #+/-imm].
4768
4769 ldc/ldc2 are handled identically. */
4770
4771 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
4772 rn_val = displaced_read_reg (regs, dsc, rn);
4773 /* PC should be 4-byte aligned. */
4774 rn_val = rn_val & 0xfffffffc;
4775 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
4776
4777 dsc->u.ldst.writeback = writeback;
4778 dsc->u.ldst.rn = rn;
4779
4780 dsc->cleanup = &cleanup_copro_load_store;
4781 }
4782
4783 static int
4784 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
4785 struct regcache *regs,
4786 arm_displaced_step_closure *dsc)
4787 {
4788 unsigned int rn = bits (insn, 16, 19);
4789
4790 if (!insn_references_pc (insn, 0x000f0000ul))
4791 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
4792
4793 if (debug_displaced)
4794 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4795 "load/store insn %.8lx\n", (unsigned long) insn);
4796
4797 dsc->modinsn[0] = insn & 0xfff0ffff;
4798
4799 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
4800
4801 return 0;
4802 }
4803
4804 static int
4805 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
4806 uint16_t insn2, struct regcache *regs,
4807 arm_displaced_step_closure *dsc)
4808 {
4809 unsigned int rn = bits (insn1, 0, 3);
4810
4811 if (rn != ARM_PC_REGNUM)
4812 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
4813 "copro load/store", dsc);
4814
4815 if (debug_displaced)
4816 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
4817 "load/store insn %.4x%.4x\n", insn1, insn2);
4818
4819 dsc->modinsn[0] = insn1 & 0xfff0;
4820 dsc->modinsn[1] = insn2;
4821 dsc->numinsns = 2;
4822
4823 /* This function is called for copying instruction LDC/LDC2/VLDR, which
4824 doesn't support writeback, so pass 0. */
4825 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
4826
4827 return 0;
4828 }
4829
4830 /* Clean up branch instructions (actually perform the branch, by setting
4831 PC). */
4832
4833 static void
4834 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
4835 arm_displaced_step_closure *dsc)
4836 {
4837 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
4838 int branch_taken = condition_true (dsc->u.branch.cond, status);
4839 enum pc_write_style write_pc = dsc->u.branch.exchange
4840 ? BX_WRITE_PC : BRANCH_WRITE_PC;
4841
4842 if (!branch_taken)
4843 return;
4844
4845 if (dsc->u.branch.link)
4846 {
4847 /* The value of LR should be the next insn of current one. In order
4848 not to confuse logic hanlding later insn `bx lr', if current insn mode
4849 is Thumb, the bit 0 of LR value should be set to 1. */
4850 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
4851
4852 if (dsc->is_thumb)
4853 next_insn_addr |= 0x1;
4854
4855 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
4856 CANNOT_WRITE_PC);
4857 }
4858
4859 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
4860 }
4861
4862 /* Copy B/BL/BLX instructions with immediate destinations. */
4863
4864 static void
4865 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
4866 arm_displaced_step_closure *dsc,
4867 unsigned int cond, int exchange, int link, long offset)
4868 {
4869 /* Implement "BL<cond> <label>" as:
4870
4871 Preparation: cond <- instruction condition
4872 Insn: mov r0, r0 (nop)
4873 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
4874
4875 B<cond> similar, but don't set r14 in cleanup. */
4876
4877 dsc->u.branch.cond = cond;
4878 dsc->u.branch.link = link;
4879 dsc->u.branch.exchange = exchange;
4880
4881 dsc->u.branch.dest = dsc->insn_addr;
4882 if (link && exchange)
4883 /* For BLX, offset is computed from the Align (PC, 4). */
4884 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
4885
4886 if (dsc->is_thumb)
4887 dsc->u.branch.dest += 4 + offset;
4888 else
4889 dsc->u.branch.dest += 8 + offset;
4890
4891 dsc->cleanup = &cleanup_branch;
4892 }
4893 static int
4894 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
4895 struct regcache *regs, arm_displaced_step_closure *dsc)
4896 {
4897 unsigned int cond = bits (insn, 28, 31);
4898 int exchange = (cond == 0xf);
4899 int link = exchange || bit (insn, 24);
4900 long offset;
4901
4902 if (debug_displaced)
4903 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
4904 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
4905 (unsigned long) insn);
4906 if (exchange)
4907 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
4908 then arrange the switch into Thumb mode. */
4909 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
4910 else
4911 offset = bits (insn, 0, 23) << 2;
4912
4913 if (bit (offset, 25))
4914 offset = offset | ~0x3ffffff;
4915
4916 dsc->modinsn[0] = ARM_NOP;
4917
4918 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
4919 return 0;
4920 }
4921
4922 static int
4923 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
4924 uint16_t insn2, struct regcache *regs,
4925 arm_displaced_step_closure *dsc)
4926 {
4927 int link = bit (insn2, 14);
4928 int exchange = link && !bit (insn2, 12);
4929 int cond = INST_AL;
4930 long offset = 0;
4931 int j1 = bit (insn2, 13);
4932 int j2 = bit (insn2, 11);
4933 int s = sbits (insn1, 10, 10);
4934 int i1 = !(j1 ^ bit (insn1, 10));
4935 int i2 = !(j2 ^ bit (insn1, 10));
4936
4937 if (!link && !exchange) /* B */
4938 {
4939 offset = (bits (insn2, 0, 10) << 1);
4940 if (bit (insn2, 12)) /* Encoding T4 */
4941 {
4942 offset |= (bits (insn1, 0, 9) << 12)
4943 | (i2 << 22)
4944 | (i1 << 23)
4945 | (s << 24);
4946 cond = INST_AL;
4947 }
4948 else /* Encoding T3 */
4949 {
4950 offset |= (bits (insn1, 0, 5) << 12)
4951 | (j1 << 18)
4952 | (j2 << 19)
4953 | (s << 20);
4954 cond = bits (insn1, 6, 9);
4955 }
4956 }
4957 else
4958 {
4959 offset = (bits (insn1, 0, 9) << 12);
4960 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
4961 offset |= exchange ?
4962 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
4963 }
4964
4965 if (debug_displaced)
4966 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
4967 "%.4x %.4x with offset %.8lx\n",
4968 link ? (exchange) ? "blx" : "bl" : "b",
4969 insn1, insn2, offset);
4970
4971 dsc->modinsn[0] = THUMB_NOP;
4972
4973 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
4974 return 0;
4975 }
4976
4977 /* Copy B Thumb instructions. */
4978 static int
4979 thumb_copy_b (struct gdbarch *gdbarch, uint16_t insn,
4980 arm_displaced_step_closure *dsc)
4981 {
4982 unsigned int cond = 0;
4983 int offset = 0;
4984 unsigned short bit_12_15 = bits (insn, 12, 15);
4985 CORE_ADDR from = dsc->insn_addr;
4986
4987 if (bit_12_15 == 0xd)
4988 {
4989 /* offset = SignExtend (imm8:0, 32) */
4990 offset = sbits ((insn << 1), 0, 8);
4991 cond = bits (insn, 8, 11);
4992 }
4993 else if (bit_12_15 == 0xe) /* Encoding T2 */
4994 {
4995 offset = sbits ((insn << 1), 0, 11);
4996 cond = INST_AL;
4997 }
4998
4999 if (debug_displaced)
5000 fprintf_unfiltered (gdb_stdlog,
5001 "displaced: copying b immediate insn %.4x "
5002 "with offset %d\n", insn, offset);
5003
5004 dsc->u.branch.cond = cond;
5005 dsc->u.branch.link = 0;
5006 dsc->u.branch.exchange = 0;
5007 dsc->u.branch.dest = from + 4 + offset;
5008
5009 dsc->modinsn[0] = THUMB_NOP;
5010
5011 dsc->cleanup = &cleanup_branch;
5012
5013 return 0;
5014 }
5015
5016 /* Copy BX/BLX with register-specified destinations. */
5017
5018 static void
5019 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
5020 arm_displaced_step_closure *dsc, int link,
5021 unsigned int cond, unsigned int rm)
5022 {
5023 /* Implement {BX,BLX}<cond> <reg>" as:
5024
5025 Preparation: cond <- instruction condition
5026 Insn: mov r0, r0 (nop)
5027 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
5028
5029 Don't set r14 in cleanup for BX. */
5030
5031 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
5032
5033 dsc->u.branch.cond = cond;
5034 dsc->u.branch.link = link;
5035
5036 dsc->u.branch.exchange = 1;
5037
5038 dsc->cleanup = &cleanup_branch;
5039 }
5040
5041 static int
5042 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
5043 struct regcache *regs, arm_displaced_step_closure *dsc)
5044 {
5045 unsigned int cond = bits (insn, 28, 31);
5046 /* BX: x12xxx1x
5047 BLX: x12xxx3x. */
5048 int link = bit (insn, 5);
5049 unsigned int rm = bits (insn, 0, 3);
5050
5051 if (debug_displaced)
5052 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
5053 (unsigned long) insn);
5054
5055 dsc->modinsn[0] = ARM_NOP;
5056
5057 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
5058 return 0;
5059 }
5060
5061 static int
5062 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
5063 struct regcache *regs,
5064 arm_displaced_step_closure *dsc)
5065 {
5066 int link = bit (insn, 7);
5067 unsigned int rm = bits (insn, 3, 6);
5068
5069 if (debug_displaced)
5070 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
5071 (unsigned short) insn);
5072
5073 dsc->modinsn[0] = THUMB_NOP;
5074
5075 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
5076
5077 return 0;
5078 }
5079
5080
5081 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
5082
5083 static void
5084 cleanup_alu_imm (struct gdbarch *gdbarch,
5085 struct regcache *regs, arm_displaced_step_closure *dsc)
5086 {
5087 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5088 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5089 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5090 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5091 }
5092
5093 static int
5094 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5095 arm_displaced_step_closure *dsc)
5096 {
5097 unsigned int rn = bits (insn, 16, 19);
5098 unsigned int rd = bits (insn, 12, 15);
5099 unsigned int op = bits (insn, 21, 24);
5100 int is_mov = (op == 0xd);
5101 ULONGEST rd_val, rn_val;
5102
5103 if (!insn_references_pc (insn, 0x000ff000ul))
5104 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
5105
5106 if (debug_displaced)
5107 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
5108 "%.8lx\n", is_mov ? "move" : "ALU",
5109 (unsigned long) insn);
5110
5111 /* Instruction is of form:
5112
5113 <op><cond> rd, [rn,] #imm
5114
5115 Rewrite as:
5116
5117 Preparation: tmp1, tmp2 <- r0, r1;
5118 r0, r1 <- rd, rn
5119 Insn: <op><cond> r0, r1, #imm
5120 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5121 */
5122
5123 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5124 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5125 rn_val = displaced_read_reg (regs, dsc, rn);
5126 rd_val = displaced_read_reg (regs, dsc, rd);
5127 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5128 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5129 dsc->rd = rd;
5130
5131 if (is_mov)
5132 dsc->modinsn[0] = insn & 0xfff00fff;
5133 else
5134 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
5135
5136 dsc->cleanup = &cleanup_alu_imm;
5137
5138 return 0;
5139 }
5140
5141 static int
5142 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
5143 uint16_t insn2, struct regcache *regs,
5144 arm_displaced_step_closure *dsc)
5145 {
5146 unsigned int op = bits (insn1, 5, 8);
5147 unsigned int rn, rm, rd;
5148 ULONGEST rd_val, rn_val;
5149
5150 rn = bits (insn1, 0, 3); /* Rn */
5151 rm = bits (insn2, 0, 3); /* Rm */
5152 rd = bits (insn2, 8, 11); /* Rd */
5153
5154 /* This routine is only called for instruction MOV. */
5155 gdb_assert (op == 0x2 && rn == 0xf);
5156
5157 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
5158 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
5159
5160 if (debug_displaced)
5161 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
5162 "ALU", insn1, insn2);
5163
5164 /* Instruction is of form:
5165
5166 <op><cond> rd, [rn,] #imm
5167
5168 Rewrite as:
5169
5170 Preparation: tmp1, tmp2 <- r0, r1;
5171 r0, r1 <- rd, rn
5172 Insn: <op><cond> r0, r1, #imm
5173 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
5174 */
5175
5176 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5177 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5178 rn_val = displaced_read_reg (regs, dsc, rn);
5179 rd_val = displaced_read_reg (regs, dsc, rd);
5180 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5181 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5182 dsc->rd = rd;
5183
5184 dsc->modinsn[0] = insn1;
5185 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
5186 dsc->numinsns = 2;
5187
5188 dsc->cleanup = &cleanup_alu_imm;
5189
5190 return 0;
5191 }
5192
5193 /* Copy/cleanup arithmetic/logic insns with register RHS. */
5194
5195 static void
5196 cleanup_alu_reg (struct gdbarch *gdbarch,
5197 struct regcache *regs, arm_displaced_step_closure *dsc)
5198 {
5199 ULONGEST rd_val;
5200 int i;
5201
5202 rd_val = displaced_read_reg (regs, dsc, 0);
5203
5204 for (i = 0; i < 3; i++)
5205 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5206
5207 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5208 }
5209
5210 static void
5211 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
5212 arm_displaced_step_closure *dsc,
5213 unsigned int rd, unsigned int rn, unsigned int rm)
5214 {
5215 ULONGEST rd_val, rn_val, rm_val;
5216
5217 /* Instruction is of form:
5218
5219 <op><cond> rd, [rn,] rm [, <shift>]
5220
5221 Rewrite as:
5222
5223 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
5224 r0, r1, r2 <- rd, rn, rm
5225 Insn: <op><cond> r0, [r1,] r2 [, <shift>]
5226 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
5227 */
5228
5229 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5230 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5231 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5232 rd_val = displaced_read_reg (regs, dsc, rd);
5233 rn_val = displaced_read_reg (regs, dsc, rn);
5234 rm_val = displaced_read_reg (regs, dsc, rm);
5235 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5236 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5237 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5238 dsc->rd = rd;
5239
5240 dsc->cleanup = &cleanup_alu_reg;
5241 }
5242
5243 static int
5244 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5245 arm_displaced_step_closure *dsc)
5246 {
5247 unsigned int op = bits (insn, 21, 24);
5248 int is_mov = (op == 0xd);
5249
5250 if (!insn_references_pc (insn, 0x000ff00ful))
5251 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
5252
5253 if (debug_displaced)
5254 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
5255 is_mov ? "move" : "ALU", (unsigned long) insn);
5256
5257 if (is_mov)
5258 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
5259 else
5260 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
5261
5262 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
5263 bits (insn, 0, 3));
5264 return 0;
5265 }
5266
5267 static int
5268 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
5269 struct regcache *regs,
5270 arm_displaced_step_closure *dsc)
5271 {
5272 unsigned rm, rd;
5273
5274 rm = bits (insn, 3, 6);
5275 rd = (bit (insn, 7) << 3) | bits (insn, 0, 2);
5276
5277 if (rd != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
5278 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
5279
5280 if (debug_displaced)
5281 fprintf_unfiltered (gdb_stdlog, "displaced: copying ALU reg insn %.4x\n",
5282 (unsigned short) insn);
5283
5284 dsc->modinsn[0] = ((insn & 0xff00) | 0x10);
5285
5286 install_alu_reg (gdbarch, regs, dsc, rd, rd, rm);
5287
5288 return 0;
5289 }
5290
5291 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
5292
5293 static void
5294 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
5295 struct regcache *regs,
5296 arm_displaced_step_closure *dsc)
5297 {
5298 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
5299 int i;
5300
5301 for (i = 0; i < 4; i++)
5302 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
5303
5304 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
5305 }
5306
5307 static void
5308 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
5309 arm_displaced_step_closure *dsc,
5310 unsigned int rd, unsigned int rn, unsigned int rm,
5311 unsigned rs)
5312 {
5313 int i;
5314 ULONGEST rd_val, rn_val, rm_val, rs_val;
5315
5316 /* Instruction is of form:
5317
5318 <op><cond> rd, [rn,] rm, <shift> rs
5319
5320 Rewrite as:
5321
5322 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
5323 r0, r1, r2, r3 <- rd, rn, rm, rs
5324 Insn: <op><cond> r0, r1, r2, <shift> r3
5325 Cleanup: tmp5 <- r0
5326 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
5327 rd <- tmp5
5328 */
5329
5330 for (i = 0; i < 4; i++)
5331 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
5332
5333 rd_val = displaced_read_reg (regs, dsc, rd);
5334 rn_val = displaced_read_reg (regs, dsc, rn);
5335 rm_val = displaced_read_reg (regs, dsc, rm);
5336 rs_val = displaced_read_reg (regs, dsc, rs);
5337 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
5338 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
5339 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
5340 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
5341 dsc->rd = rd;
5342 dsc->cleanup = &cleanup_alu_shifted_reg;
5343 }
5344
5345 static int
5346 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
5347 struct regcache *regs,
5348 arm_displaced_step_closure *dsc)
5349 {
5350 unsigned int op = bits (insn, 21, 24);
5351 int is_mov = (op == 0xd);
5352 unsigned int rd, rn, rm, rs;
5353
5354 if (!insn_references_pc (insn, 0x000fff0ful))
5355 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
5356
5357 if (debug_displaced)
5358 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
5359 "%.8lx\n", is_mov ? "move" : "ALU",
5360 (unsigned long) insn);
5361
5362 rn = bits (insn, 16, 19);
5363 rm = bits (insn, 0, 3);
5364 rs = bits (insn, 8, 11);
5365 rd = bits (insn, 12, 15);
5366
5367 if (is_mov)
5368 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
5369 else
5370 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
5371
5372 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
5373
5374 return 0;
5375 }
5376
5377 /* Clean up load instructions. */
5378
5379 static void
5380 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
5381 arm_displaced_step_closure *dsc)
5382 {
5383 ULONGEST rt_val, rt_val2 = 0, rn_val;
5384
5385 rt_val = displaced_read_reg (regs, dsc, 0);
5386 if (dsc->u.ldst.xfersize == 8)
5387 rt_val2 = displaced_read_reg (regs, dsc, 1);
5388 rn_val = displaced_read_reg (regs, dsc, 2);
5389
5390 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5391 if (dsc->u.ldst.xfersize > 4)
5392 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5393 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5394 if (!dsc->u.ldst.immed)
5395 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5396
5397 /* Handle register writeback. */
5398 if (dsc->u.ldst.writeback)
5399 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5400 /* Put result in right place. */
5401 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
5402 if (dsc->u.ldst.xfersize == 8)
5403 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
5404 }
5405
5406 /* Clean up store instructions. */
5407
5408 static void
5409 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
5410 arm_displaced_step_closure *dsc)
5411 {
5412 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
5413
5414 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5415 if (dsc->u.ldst.xfersize > 4)
5416 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5417 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
5418 if (!dsc->u.ldst.immed)
5419 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
5420 if (!dsc->u.ldst.restore_r4)
5421 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
5422
5423 /* Writeback. */
5424 if (dsc->u.ldst.writeback)
5425 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
5426 }
5427
5428 /* Copy "extra" load/store instructions. These are halfword/doubleword
5429 transfers, which have a different encoding to byte/word transfers. */
5430
5431 static int
5432 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unprivileged,
5433 struct regcache *regs, arm_displaced_step_closure *dsc)
5434 {
5435 unsigned int op1 = bits (insn, 20, 24);
5436 unsigned int op2 = bits (insn, 5, 6);
5437 unsigned int rt = bits (insn, 12, 15);
5438 unsigned int rn = bits (insn, 16, 19);
5439 unsigned int rm = bits (insn, 0, 3);
5440 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
5441 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
5442 int immed = (op1 & 0x4) != 0;
5443 int opcode;
5444 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
5445
5446 if (!insn_references_pc (insn, 0x000ff00ful))
5447 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
5448
5449 if (debug_displaced)
5450 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
5451 "insn %.8lx\n", unprivileged ? "unprivileged " : "",
5452 (unsigned long) insn);
5453
5454 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
5455
5456 if (opcode < 0)
5457 internal_error (__FILE__, __LINE__,
5458 _("copy_extra_ld_st: instruction decode error"));
5459
5460 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5461 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5462 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5463 if (!immed)
5464 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5465
5466 rt_val = displaced_read_reg (regs, dsc, rt);
5467 if (bytesize[opcode] == 8)
5468 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
5469 rn_val = displaced_read_reg (regs, dsc, rn);
5470 if (!immed)
5471 rm_val = displaced_read_reg (regs, dsc, rm);
5472
5473 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5474 if (bytesize[opcode] == 8)
5475 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
5476 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5477 if (!immed)
5478 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5479
5480 dsc->rd = rt;
5481 dsc->u.ldst.xfersize = bytesize[opcode];
5482 dsc->u.ldst.rn = rn;
5483 dsc->u.ldst.immed = immed;
5484 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
5485 dsc->u.ldst.restore_r4 = 0;
5486
5487 if (immed)
5488 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
5489 ->
5490 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
5491 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5492 else
5493 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
5494 ->
5495 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
5496 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5497
5498 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
5499
5500 return 0;
5501 }
5502
5503 /* Copy byte/half word/word loads and stores. */
5504
5505 static void
5506 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5507 arm_displaced_step_closure *dsc, int load,
5508 int immed, int writeback, int size, int usermode,
5509 int rt, int rm, int rn)
5510 {
5511 ULONGEST rt_val, rn_val, rm_val = 0;
5512
5513 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5514 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5515 if (!immed)
5516 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5517 if (!load)
5518 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
5519
5520 rt_val = displaced_read_reg (regs, dsc, rt);
5521 rn_val = displaced_read_reg (regs, dsc, rn);
5522 if (!immed)
5523 rm_val = displaced_read_reg (regs, dsc, rm);
5524
5525 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
5526 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
5527 if (!immed)
5528 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
5529 dsc->rd = rt;
5530 dsc->u.ldst.xfersize = size;
5531 dsc->u.ldst.rn = rn;
5532 dsc->u.ldst.immed = immed;
5533 dsc->u.ldst.writeback = writeback;
5534
5535 /* To write PC we can do:
5536
5537 Before this sequence of instructions:
5538 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
5539 r2 is the Rn value got from dispalced_read_reg.
5540
5541 Insn1: push {pc} Write address of STR instruction + offset on stack
5542 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
5543 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
5544 = addr(Insn1) + offset - addr(Insn3) - 8
5545 = offset - 16
5546 Insn4: add r4, r4, #8 r4 = offset - 8
5547 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
5548 = from + offset
5549 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
5550
5551 Otherwise we don't know what value to write for PC, since the offset is
5552 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
5553 of this can be found in Section "Saving from r15" in
5554 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
5555
5556 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5557 }
5558
5559
5560 static int
5561 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
5562 uint16_t insn2, struct regcache *regs,
5563 arm_displaced_step_closure *dsc, int size)
5564 {
5565 unsigned int u_bit = bit (insn1, 7);
5566 unsigned int rt = bits (insn2, 12, 15);
5567 int imm12 = bits (insn2, 0, 11);
5568 ULONGEST pc_val;
5569
5570 if (debug_displaced)
5571 fprintf_unfiltered (gdb_stdlog,
5572 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
5573 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
5574 imm12);
5575
5576 if (!u_bit)
5577 imm12 = -1 * imm12;
5578
5579 /* Rewrite instruction LDR Rt imm12 into:
5580
5581 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
5582
5583 LDR R0, R2, R3,
5584
5585 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
5586
5587
5588 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5589 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
5590 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
5591
5592 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5593
5594 pc_val = pc_val & 0xfffffffc;
5595
5596 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
5597 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
5598
5599 dsc->rd = rt;
5600
5601 dsc->u.ldst.xfersize = size;
5602 dsc->u.ldst.immed = 0;
5603 dsc->u.ldst.writeback = 0;
5604 dsc->u.ldst.restore_r4 = 0;
5605
5606 /* LDR R0, R2, R3 */
5607 dsc->modinsn[0] = 0xf852;
5608 dsc->modinsn[1] = 0x3;
5609 dsc->numinsns = 2;
5610
5611 dsc->cleanup = &cleanup_load;
5612
5613 return 0;
5614 }
5615
5616 static int
5617 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
5618 uint16_t insn2, struct regcache *regs,
5619 arm_displaced_step_closure *dsc,
5620 int writeback, int immed)
5621 {
5622 unsigned int rt = bits (insn2, 12, 15);
5623 unsigned int rn = bits (insn1, 0, 3);
5624 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
5625 /* In LDR (register), there is also a register Rm, which is not allowed to
5626 be PC, so we don't have to check it. */
5627
5628 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
5629 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
5630 dsc);
5631
5632 if (debug_displaced)
5633 fprintf_unfiltered (gdb_stdlog,
5634 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
5635 rt, rn, insn1, insn2);
5636
5637 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
5638 0, rt, rm, rn);
5639
5640 dsc->u.ldst.restore_r4 = 0;
5641
5642 if (immed)
5643 /* ldr[b]<cond> rt, [rn, #imm], etc.
5644 ->
5645 ldr[b]<cond> r0, [r2, #imm]. */
5646 {
5647 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
5648 dsc->modinsn[1] = insn2 & 0x0fff;
5649 }
5650 else
5651 /* ldr[b]<cond> rt, [rn, rm], etc.
5652 ->
5653 ldr[b]<cond> r0, [r2, r3]. */
5654 {
5655 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
5656 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
5657 }
5658
5659 dsc->numinsns = 2;
5660
5661 return 0;
5662 }
5663
5664
5665 static int
5666 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
5667 struct regcache *regs,
5668 arm_displaced_step_closure *dsc,
5669 int load, int size, int usermode)
5670 {
5671 int immed = !bit (insn, 25);
5672 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
5673 unsigned int rt = bits (insn, 12, 15);
5674 unsigned int rn = bits (insn, 16, 19);
5675 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
5676
5677 if (!insn_references_pc (insn, 0x000ff00ful))
5678 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
5679
5680 if (debug_displaced)
5681 fprintf_unfiltered (gdb_stdlog,
5682 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
5683 load ? (size == 1 ? "ldrb" : "ldr")
5684 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
5685 rt, rn,
5686 (unsigned long) insn);
5687
5688 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
5689 usermode, rt, rm, rn);
5690
5691 if (load || rt != ARM_PC_REGNUM)
5692 {
5693 dsc->u.ldst.restore_r4 = 0;
5694
5695 if (immed)
5696 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
5697 ->
5698 {ldr,str}[b]<cond> r0, [r2, #imm]. */
5699 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
5700 else
5701 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
5702 ->
5703 {ldr,str}[b]<cond> r0, [r2, r3]. */
5704 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
5705 }
5706 else
5707 {
5708 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
5709 dsc->u.ldst.restore_r4 = 1;
5710 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
5711 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
5712 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
5713 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
5714 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
5715
5716 /* As above. */
5717 if (immed)
5718 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
5719 else
5720 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
5721
5722 dsc->numinsns = 6;
5723 }
5724
5725 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
5726
5727 return 0;
5728 }
5729
5730 /* Cleanup LDM instructions with fully-populated register list. This is an
5731 unfortunate corner case: it's impossible to implement correctly by modifying
5732 the instruction. The issue is as follows: we have an instruction,
5733
5734 ldm rN, {r0-r15}
5735
5736 which we must rewrite to avoid loading PC. A possible solution would be to
5737 do the load in two halves, something like (with suitable cleanup
5738 afterwards):
5739
5740 mov r8, rN
5741 ldm[id][ab] r8!, {r0-r7}
5742 str r7, <temp>
5743 ldm[id][ab] r8, {r7-r14}
5744 <bkpt>
5745
5746 but at present there's no suitable place for <temp>, since the scratch space
5747 is overwritten before the cleanup routine is called. For now, we simply
5748 emulate the instruction. */
5749
5750 static void
5751 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
5752 arm_displaced_step_closure *dsc)
5753 {
5754 int inc = dsc->u.block.increment;
5755 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
5756 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
5757 uint32_t regmask = dsc->u.block.regmask;
5758 int regno = inc ? 0 : 15;
5759 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
5760 int exception_return = dsc->u.block.load && dsc->u.block.user
5761 && (regmask & 0x8000) != 0;
5762 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5763 int do_transfer = condition_true (dsc->u.block.cond, status);
5764 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5765
5766 if (!do_transfer)
5767 return;
5768
5769 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
5770 sensible we can do here. Complain loudly. */
5771 if (exception_return)
5772 error (_("Cannot single-step exception return"));
5773
5774 /* We don't handle any stores here for now. */
5775 gdb_assert (dsc->u.block.load != 0);
5776
5777 if (debug_displaced)
5778 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
5779 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
5780 dsc->u.block.increment ? "inc" : "dec",
5781 dsc->u.block.before ? "before" : "after");
5782
5783 while (regmask)
5784 {
5785 uint32_t memword;
5786
5787 if (inc)
5788 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
5789 regno++;
5790 else
5791 while (regno >= 0 && (regmask & (1 << regno)) == 0)
5792 regno--;
5793
5794 xfer_addr += bump_before;
5795
5796 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
5797 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
5798
5799 xfer_addr += bump_after;
5800
5801 regmask &= ~(1 << regno);
5802 }
5803
5804 if (dsc->u.block.writeback)
5805 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
5806 CANNOT_WRITE_PC);
5807 }
5808
5809 /* Clean up an STM which included the PC in the register list. */
5810
5811 static void
5812 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
5813 arm_displaced_step_closure *dsc)
5814 {
5815 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5816 int store_executed = condition_true (dsc->u.block.cond, status);
5817 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
5818 CORE_ADDR stm_insn_addr;
5819 uint32_t pc_val;
5820 long offset;
5821 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
5822
5823 /* If condition code fails, there's nothing else to do. */
5824 if (!store_executed)
5825 return;
5826
5827 if (dsc->u.block.increment)
5828 {
5829 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
5830
5831 if (dsc->u.block.before)
5832 pc_stored_at += 4;
5833 }
5834 else
5835 {
5836 pc_stored_at = dsc->u.block.xfer_addr;
5837
5838 if (dsc->u.block.before)
5839 pc_stored_at -= 4;
5840 }
5841
5842 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
5843 stm_insn_addr = dsc->scratch_base;
5844 offset = pc_val - stm_insn_addr;
5845
5846 if (debug_displaced)
5847 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
5848 "STM instruction\n", offset);
5849
5850 /* Rewrite the stored PC to the proper value for the non-displaced original
5851 instruction. */
5852 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
5853 dsc->insn_addr + offset);
5854 }
5855
5856 /* Clean up an LDM which includes the PC in the register list. We clumped all
5857 the registers in the transferred list into a contiguous range r0...rX (to
5858 avoid loading PC directly and losing control of the debugged program), so we
5859 must undo that here. */
5860
5861 static void
5862 cleanup_block_load_pc (struct gdbarch *gdbarch,
5863 struct regcache *regs,
5864 arm_displaced_step_closure *dsc)
5865 {
5866 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5867 int load_executed = condition_true (dsc->u.block.cond, status);
5868 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
5869 unsigned int regs_loaded = bitcount (mask);
5870 unsigned int num_to_shuffle = regs_loaded, clobbered;
5871
5872 /* The method employed here will fail if the register list is fully populated
5873 (we need to avoid loading PC directly). */
5874 gdb_assert (num_to_shuffle < 16);
5875
5876 if (!load_executed)
5877 return;
5878
5879 clobbered = (1 << num_to_shuffle) - 1;
5880
5881 while (num_to_shuffle > 0)
5882 {
5883 if ((mask & (1 << write_reg)) != 0)
5884 {
5885 unsigned int read_reg = num_to_shuffle - 1;
5886
5887 if (read_reg != write_reg)
5888 {
5889 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
5890 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
5891 if (debug_displaced)
5892 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
5893 "loaded register r%d to r%d\n"), read_reg,
5894 write_reg);
5895 }
5896 else if (debug_displaced)
5897 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
5898 "r%d already in the right place\n"),
5899 write_reg);
5900
5901 clobbered &= ~(1 << write_reg);
5902
5903 num_to_shuffle--;
5904 }
5905
5906 write_reg--;
5907 }
5908
5909 /* Restore any registers we scribbled over. */
5910 for (write_reg = 0; clobbered != 0; write_reg++)
5911 {
5912 if ((clobbered & (1 << write_reg)) != 0)
5913 {
5914 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
5915 CANNOT_WRITE_PC);
5916 if (debug_displaced)
5917 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
5918 "clobbered register r%d\n"), write_reg);
5919 clobbered &= ~(1 << write_reg);
5920 }
5921 }
5922
5923 /* Perform register writeback manually. */
5924 if (dsc->u.block.writeback)
5925 {
5926 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
5927
5928 if (dsc->u.block.increment)
5929 new_rn_val += regs_loaded * 4;
5930 else
5931 new_rn_val -= regs_loaded * 4;
5932
5933 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
5934 CANNOT_WRITE_PC);
5935 }
5936 }
5937
5938 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
5939 in user-level code (in particular exception return, ldm rn, {...pc}^). */
5940
5941 static int
5942 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
5943 struct regcache *regs,
5944 arm_displaced_step_closure *dsc)
5945 {
5946 int load = bit (insn, 20);
5947 int user = bit (insn, 22);
5948 int increment = bit (insn, 23);
5949 int before = bit (insn, 24);
5950 int writeback = bit (insn, 21);
5951 int rn = bits (insn, 16, 19);
5952
5953 /* Block transfers which don't mention PC can be run directly
5954 out-of-line. */
5955 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
5956 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
5957
5958 if (rn == ARM_PC_REGNUM)
5959 {
5960 warning (_("displaced: Unpredictable LDM or STM with "
5961 "base register r15"));
5962 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
5963 }
5964
5965 if (debug_displaced)
5966 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
5967 "%.8lx\n", (unsigned long) insn);
5968
5969 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
5970 dsc->u.block.rn = rn;
5971
5972 dsc->u.block.load = load;
5973 dsc->u.block.user = user;
5974 dsc->u.block.increment = increment;
5975 dsc->u.block.before = before;
5976 dsc->u.block.writeback = writeback;
5977 dsc->u.block.cond = bits (insn, 28, 31);
5978
5979 dsc->u.block.regmask = insn & 0xffff;
5980
5981 if (load)
5982 {
5983 if ((insn & 0xffff) == 0xffff)
5984 {
5985 /* LDM with a fully-populated register list. This case is
5986 particularly tricky. Implement for now by fully emulating the
5987 instruction (which might not behave perfectly in all cases, but
5988 these instructions should be rare enough for that not to matter
5989 too much). */
5990 dsc->modinsn[0] = ARM_NOP;
5991
5992 dsc->cleanup = &cleanup_block_load_all;
5993 }
5994 else
5995 {
5996 /* LDM of a list of registers which includes PC. Implement by
5997 rewriting the list of registers to be transferred into a
5998 contiguous chunk r0...rX before doing the transfer, then shuffling
5999 registers into the correct places in the cleanup routine. */
6000 unsigned int regmask = insn & 0xffff;
6001 unsigned int num_in_list = bitcount (regmask), new_regmask;
6002 unsigned int i;
6003
6004 for (i = 0; i < num_in_list; i++)
6005 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6006
6007 /* Writeback makes things complicated. We need to avoid clobbering
6008 the base register with one of the registers in our modified
6009 register list, but just using a different register can't work in
6010 all cases, e.g.:
6011
6012 ldm r14!, {r0-r13,pc}
6013
6014 which would need to be rewritten as:
6015
6016 ldm rN!, {r0-r14}
6017
6018 but that can't work, because there's no free register for N.
6019
6020 Solve this by turning off the writeback bit, and emulating
6021 writeback manually in the cleanup routine. */
6022
6023 if (writeback)
6024 insn &= ~(1 << 21);
6025
6026 new_regmask = (1 << num_in_list) - 1;
6027
6028 if (debug_displaced)
6029 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6030 "{..., pc}: original reg list %.4x, modified "
6031 "list %.4x\n"), rn, writeback ? "!" : "",
6032 (int) insn & 0xffff, new_regmask);
6033
6034 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
6035
6036 dsc->cleanup = &cleanup_block_load_pc;
6037 }
6038 }
6039 else
6040 {
6041 /* STM of a list of registers which includes PC. Run the instruction
6042 as-is, but out of line: this will store the wrong value for the PC,
6043 so we must manually fix up the memory in the cleanup routine.
6044 Doing things this way has the advantage that we can auto-detect
6045 the offset of the PC write (which is architecture-dependent) in
6046 the cleanup routine. */
6047 dsc->modinsn[0] = insn;
6048
6049 dsc->cleanup = &cleanup_block_store_pc;
6050 }
6051
6052 return 0;
6053 }
6054
6055 static int
6056 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6057 struct regcache *regs,
6058 arm_displaced_step_closure *dsc)
6059 {
6060 int rn = bits (insn1, 0, 3);
6061 int load = bit (insn1, 4);
6062 int writeback = bit (insn1, 5);
6063
6064 /* Block transfers which don't mention PC can be run directly
6065 out-of-line. */
6066 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
6067 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
6068
6069 if (rn == ARM_PC_REGNUM)
6070 {
6071 warning (_("displaced: Unpredictable LDM or STM with "
6072 "base register r15"));
6073 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6074 "unpredictable ldm/stm", dsc);
6075 }
6076
6077 if (debug_displaced)
6078 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6079 "%.4x%.4x\n", insn1, insn2);
6080
6081 /* Clear bit 13, since it should be always zero. */
6082 dsc->u.block.regmask = (insn2 & 0xdfff);
6083 dsc->u.block.rn = rn;
6084
6085 dsc->u.block.load = load;
6086 dsc->u.block.user = 0;
6087 dsc->u.block.increment = bit (insn1, 7);
6088 dsc->u.block.before = bit (insn1, 8);
6089 dsc->u.block.writeback = writeback;
6090 dsc->u.block.cond = INST_AL;
6091 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6092
6093 if (load)
6094 {
6095 if (dsc->u.block.regmask == 0xffff)
6096 {
6097 /* This branch is impossible to happen. */
6098 gdb_assert (0);
6099 }
6100 else
6101 {
6102 unsigned int regmask = dsc->u.block.regmask;
6103 unsigned int num_in_list = bitcount (regmask), new_regmask;
6104 unsigned int i;
6105
6106 for (i = 0; i < num_in_list; i++)
6107 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6108
6109 if (writeback)
6110 insn1 &= ~(1 << 5);
6111
6112 new_regmask = (1 << num_in_list) - 1;
6113
6114 if (debug_displaced)
6115 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
6116 "{..., pc}: original reg list %.4x, modified "
6117 "list %.4x\n"), rn, writeback ? "!" : "",
6118 (int) dsc->u.block.regmask, new_regmask);
6119
6120 dsc->modinsn[0] = insn1;
6121 dsc->modinsn[1] = (new_regmask & 0xffff);
6122 dsc->numinsns = 2;
6123
6124 dsc->cleanup = &cleanup_block_load_pc;
6125 }
6126 }
6127 else
6128 {
6129 dsc->modinsn[0] = insn1;
6130 dsc->modinsn[1] = insn2;
6131 dsc->numinsns = 2;
6132 dsc->cleanup = &cleanup_block_store_pc;
6133 }
6134 return 0;
6135 }
6136
6137 /* Wrapper over read_memory_unsigned_integer for use in arm_get_next_pcs.
6138 This is used to avoid a dependency on BFD's bfd_endian enum. */
6139
6140 ULONGEST
6141 arm_get_next_pcs_read_memory_unsigned_integer (CORE_ADDR memaddr, int len,
6142 int byte_order)
6143 {
6144 return read_memory_unsigned_integer (memaddr, len,
6145 (enum bfd_endian) byte_order);
6146 }
6147
6148 /* Wrapper over gdbarch_addr_bits_remove for use in arm_get_next_pcs. */
6149
6150 CORE_ADDR
6151 arm_get_next_pcs_addr_bits_remove (struct arm_get_next_pcs *self,
6152 CORE_ADDR val)
6153 {
6154 return gdbarch_addr_bits_remove (self->regcache->arch (), val);
6155 }
6156
6157 /* Wrapper over syscall_next_pc for use in get_next_pcs. */
6158
6159 static CORE_ADDR
6160 arm_get_next_pcs_syscall_next_pc (struct arm_get_next_pcs *self)
6161 {
6162 return 0;
6163 }
6164
6165 /* Wrapper over arm_is_thumb for use in arm_get_next_pcs. */
6166
6167 int
6168 arm_get_next_pcs_is_thumb (struct arm_get_next_pcs *self)
6169 {
6170 return arm_is_thumb (self->regcache);
6171 }
6172
6173 /* single_step() is called just before we want to resume the inferior,
6174 if we want to single-step it but there is no hardware or kernel
6175 single-step support. We find the target of the coming instructions
6176 and breakpoint them. */
6177
6178 std::vector<CORE_ADDR>
6179 arm_software_single_step (struct regcache *regcache)
6180 {
6181 struct gdbarch *gdbarch = regcache->arch ();
6182 struct arm_get_next_pcs next_pcs_ctx;
6183
6184 arm_get_next_pcs_ctor (&next_pcs_ctx,
6185 &arm_get_next_pcs_ops,
6186 gdbarch_byte_order (gdbarch),
6187 gdbarch_byte_order_for_code (gdbarch),
6188 0,
6189 regcache);
6190
6191 std::vector<CORE_ADDR> next_pcs = arm_get_next_pcs (&next_pcs_ctx);
6192
6193 for (CORE_ADDR &pc_ref : next_pcs)
6194 pc_ref = gdbarch_addr_bits_remove (gdbarch, pc_ref);
6195
6196 return next_pcs;
6197 }
6198
6199 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
6200 for Linux, where some SVC instructions must be treated specially. */
6201
6202 static void
6203 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
6204 arm_displaced_step_closure *dsc)
6205 {
6206 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
6207
6208 if (debug_displaced)
6209 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
6210 "%.8lx\n", (unsigned long) resume_addr);
6211
6212 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
6213 }
6214
6215
6216 /* Common copy routine for svc instruciton. */
6217
6218 static int
6219 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
6220 arm_displaced_step_closure *dsc)
6221 {
6222 /* Preparation: none.
6223 Insn: unmodified svc.
6224 Cleanup: pc <- insn_addr + insn_size. */
6225
6226 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
6227 instruction. */
6228 dsc->wrote_to_pc = 1;
6229
6230 /* Allow OS-specific code to override SVC handling. */
6231 if (dsc->u.svc.copy_svc_os)
6232 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
6233 else
6234 {
6235 dsc->cleanup = &cleanup_svc;
6236 return 0;
6237 }
6238 }
6239
6240 static int
6241 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
6242 struct regcache *regs, arm_displaced_step_closure *dsc)
6243 {
6244
6245 if (debug_displaced)
6246 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
6247 (unsigned long) insn);
6248
6249 dsc->modinsn[0] = insn;
6250
6251 return install_svc (gdbarch, regs, dsc);
6252 }
6253
6254 static int
6255 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
6256 struct regcache *regs, arm_displaced_step_closure *dsc)
6257 {
6258
6259 if (debug_displaced)
6260 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
6261 insn);
6262
6263 dsc->modinsn[0] = insn;
6264
6265 return install_svc (gdbarch, regs, dsc);
6266 }
6267
6268 /* Copy undefined instructions. */
6269
6270 static int
6271 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
6272 arm_displaced_step_closure *dsc)
6273 {
6274 if (debug_displaced)
6275 fprintf_unfiltered (gdb_stdlog,
6276 "displaced: copying undefined insn %.8lx\n",
6277 (unsigned long) insn);
6278
6279 dsc->modinsn[0] = insn;
6280
6281 return 0;
6282 }
6283
6284 static int
6285 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
6286 arm_displaced_step_closure *dsc)
6287 {
6288
6289 if (debug_displaced)
6290 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
6291 "%.4x %.4x\n", (unsigned short) insn1,
6292 (unsigned short) insn2);
6293
6294 dsc->modinsn[0] = insn1;
6295 dsc->modinsn[1] = insn2;
6296 dsc->numinsns = 2;
6297
6298 return 0;
6299 }
6300
6301 /* Copy unpredictable instructions. */
6302
6303 static int
6304 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
6305 arm_displaced_step_closure *dsc)
6306 {
6307 if (debug_displaced)
6308 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
6309 "%.8lx\n", (unsigned long) insn);
6310
6311 dsc->modinsn[0] = insn;
6312
6313 return 0;
6314 }
6315
6316 /* The decode_* functions are instruction decoding helpers. They mostly follow
6317 the presentation in the ARM ARM. */
6318
6319 static int
6320 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
6321 struct regcache *regs,
6322 arm_displaced_step_closure *dsc)
6323 {
6324 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
6325 unsigned int rn = bits (insn, 16, 19);
6326
6327 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0x1) == 0x0)
6328 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
6329 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0x1) == 0x1)
6330 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
6331 else if ((op1 & 0x60) == 0x20)
6332 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
6333 else if ((op1 & 0x71) == 0x40)
6334 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
6335 dsc);
6336 else if ((op1 & 0x77) == 0x41)
6337 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6338 else if ((op1 & 0x77) == 0x45)
6339 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
6340 else if ((op1 & 0x77) == 0x51)
6341 {
6342 if (rn != 0xf)
6343 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6344 else
6345 return arm_copy_unpred (gdbarch, insn, dsc);
6346 }
6347 else if ((op1 & 0x77) == 0x55)
6348 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
6349 else if (op1 == 0x57)
6350 switch (op2)
6351 {
6352 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
6353 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
6354 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
6355 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
6356 default: return arm_copy_unpred (gdbarch, insn, dsc);
6357 }
6358 else if ((op1 & 0x63) == 0x43)
6359 return arm_copy_unpred (gdbarch, insn, dsc);
6360 else if ((op2 & 0x1) == 0x0)
6361 switch (op1 & ~0x80)
6362 {
6363 case 0x61:
6364 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
6365 case 0x65:
6366 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
6367 case 0x71: case 0x75:
6368 /* pld/pldw reg. */
6369 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
6370 case 0x63: case 0x67: case 0x73: case 0x77:
6371 return arm_copy_unpred (gdbarch, insn, dsc);
6372 default:
6373 return arm_copy_undef (gdbarch, insn, dsc);
6374 }
6375 else
6376 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
6377 }
6378
6379 static int
6380 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
6381 struct regcache *regs,
6382 arm_displaced_step_closure *dsc)
6383 {
6384 if (bit (insn, 27) == 0)
6385 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
6386 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
6387 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
6388 {
6389 case 0x0: case 0x2:
6390 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
6391
6392 case 0x1: case 0x3:
6393 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
6394
6395 case 0x4: case 0x5: case 0x6: case 0x7:
6396 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6397
6398 case 0x8:
6399 switch ((insn & 0xe00000) >> 21)
6400 {
6401 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
6402 /* stc/stc2. */
6403 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6404
6405 case 0x2:
6406 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6407
6408 default:
6409 return arm_copy_undef (gdbarch, insn, dsc);
6410 }
6411
6412 case 0x9:
6413 {
6414 int rn_f = (bits (insn, 16, 19) == 0xf);
6415 switch ((insn & 0xe00000) >> 21)
6416 {
6417 case 0x1: case 0x3:
6418 /* ldc/ldc2 imm (undefined for rn == pc). */
6419 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
6420 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6421
6422 case 0x2:
6423 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6424
6425 case 0x4: case 0x5: case 0x6: case 0x7:
6426 /* ldc/ldc2 lit (undefined for rn != pc). */
6427 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
6428 : arm_copy_undef (gdbarch, insn, dsc);
6429
6430 default:
6431 return arm_copy_undef (gdbarch, insn, dsc);
6432 }
6433 }
6434
6435 case 0xa:
6436 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
6437
6438 case 0xb:
6439 if (bits (insn, 16, 19) == 0xf)
6440 /* ldc/ldc2 lit. */
6441 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6442 else
6443 return arm_copy_undef (gdbarch, insn, dsc);
6444
6445 case 0xc:
6446 if (bit (insn, 4))
6447 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6448 else
6449 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6450
6451 case 0xd:
6452 if (bit (insn, 4))
6453 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6454 else
6455 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6456
6457 default:
6458 return arm_copy_undef (gdbarch, insn, dsc);
6459 }
6460 }
6461
6462 /* Decode miscellaneous instructions in dp/misc encoding space. */
6463
6464 static int
6465 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
6466 struct regcache *regs,
6467 arm_displaced_step_closure *dsc)
6468 {
6469 unsigned int op2 = bits (insn, 4, 6);
6470 unsigned int op = bits (insn, 21, 22);
6471
6472 switch (op2)
6473 {
6474 case 0x0:
6475 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
6476
6477 case 0x1:
6478 if (op == 0x1) /* bx. */
6479 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
6480 else if (op == 0x3)
6481 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
6482 else
6483 return arm_copy_undef (gdbarch, insn, dsc);
6484
6485 case 0x2:
6486 if (op == 0x1)
6487 /* Not really supported. */
6488 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
6489 else
6490 return arm_copy_undef (gdbarch, insn, dsc);
6491
6492 case 0x3:
6493 if (op == 0x1)
6494 return arm_copy_bx_blx_reg (gdbarch, insn,
6495 regs, dsc); /* blx register. */
6496 else
6497 return arm_copy_undef (gdbarch, insn, dsc);
6498
6499 case 0x5:
6500 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
6501
6502 case 0x7:
6503 if (op == 0x1)
6504 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
6505 else if (op == 0x3)
6506 /* Not really supported. */
6507 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
6508 /* Fall through. */
6509
6510 default:
6511 return arm_copy_undef (gdbarch, insn, dsc);
6512 }
6513 }
6514
6515 static int
6516 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
6517 struct regcache *regs,
6518 arm_displaced_step_closure *dsc)
6519 {
6520 if (bit (insn, 25))
6521 switch (bits (insn, 20, 24))
6522 {
6523 case 0x10:
6524 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
6525
6526 case 0x14:
6527 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
6528
6529 case 0x12: case 0x16:
6530 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
6531
6532 default:
6533 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
6534 }
6535 else
6536 {
6537 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
6538
6539 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
6540 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
6541 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
6542 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
6543 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
6544 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
6545 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
6546 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
6547 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
6548 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
6549 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
6550 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
6551 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
6552 /* 2nd arg means "unprivileged". */
6553 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
6554 dsc);
6555 }
6556
6557 /* Should be unreachable. */
6558 return 1;
6559 }
6560
6561 static int
6562 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
6563 struct regcache *regs,
6564 arm_displaced_step_closure *dsc)
6565 {
6566 int a = bit (insn, 25), b = bit (insn, 4);
6567 uint32_t op1 = bits (insn, 20, 24);
6568
6569 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
6570 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
6571 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
6572 else if ((!a && (op1 & 0x17) == 0x02)
6573 || (a && (op1 & 0x17) == 0x02 && !b))
6574 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
6575 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
6576 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
6577 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
6578 else if ((!a && (op1 & 0x17) == 0x03)
6579 || (a && (op1 & 0x17) == 0x03 && !b))
6580 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
6581 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
6582 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
6583 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
6584 else if ((!a && (op1 & 0x17) == 0x06)
6585 || (a && (op1 & 0x17) == 0x06 && !b))
6586 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
6587 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
6588 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
6589 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
6590 else if ((!a && (op1 & 0x17) == 0x07)
6591 || (a && (op1 & 0x17) == 0x07 && !b))
6592 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
6593
6594 /* Should be unreachable. */
6595 return 1;
6596 }
6597
6598 static int
6599 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
6600 arm_displaced_step_closure *dsc)
6601 {
6602 switch (bits (insn, 20, 24))
6603 {
6604 case 0x00: case 0x01: case 0x02: case 0x03:
6605 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
6606
6607 case 0x04: case 0x05: case 0x06: case 0x07:
6608 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
6609
6610 case 0x08: case 0x09: case 0x0a: case 0x0b:
6611 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
6612 return arm_copy_unmodified (gdbarch, insn,
6613 "decode/pack/unpack/saturate/reverse", dsc);
6614
6615 case 0x18:
6616 if (bits (insn, 5, 7) == 0) /* op2. */
6617 {
6618 if (bits (insn, 12, 15) == 0xf)
6619 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
6620 else
6621 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
6622 }
6623 else
6624 return arm_copy_undef (gdbarch, insn, dsc);
6625
6626 case 0x1a: case 0x1b:
6627 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6628 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
6629 else
6630 return arm_copy_undef (gdbarch, insn, dsc);
6631
6632 case 0x1c: case 0x1d:
6633 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
6634 {
6635 if (bits (insn, 0, 3) == 0xf)
6636 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
6637 else
6638 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
6639 }
6640 else
6641 return arm_copy_undef (gdbarch, insn, dsc);
6642
6643 case 0x1e: case 0x1f:
6644 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
6645 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
6646 else
6647 return arm_copy_undef (gdbarch, insn, dsc);
6648 }
6649
6650 /* Should be unreachable. */
6651 return 1;
6652 }
6653
6654 static int
6655 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, uint32_t insn,
6656 struct regcache *regs,
6657 arm_displaced_step_closure *dsc)
6658 {
6659 if (bit (insn, 25))
6660 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
6661 else
6662 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
6663 }
6664
6665 static int
6666 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
6667 struct regcache *regs,
6668 arm_displaced_step_closure *dsc)
6669 {
6670 unsigned int opcode = bits (insn, 20, 24);
6671
6672 switch (opcode)
6673 {
6674 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
6675 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
6676
6677 case 0x08: case 0x0a: case 0x0c: case 0x0e:
6678 case 0x12: case 0x16:
6679 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
6680
6681 case 0x09: case 0x0b: case 0x0d: case 0x0f:
6682 case 0x13: case 0x17:
6683 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
6684
6685 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6686 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6687 /* Note: no writeback for these instructions. Bit 25 will always be
6688 zero though (via caller), so the following works OK. */
6689 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6690 }
6691
6692 /* Should be unreachable. */
6693 return 1;
6694 }
6695
6696 /* Decode shifted register instructions. */
6697
6698 static int
6699 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
6700 uint16_t insn2, struct regcache *regs,
6701 arm_displaced_step_closure *dsc)
6702 {
6703 /* PC is only allowed to be used in instruction MOV. */
6704
6705 unsigned int op = bits (insn1, 5, 8);
6706 unsigned int rn = bits (insn1, 0, 3);
6707
6708 if (op == 0x2 && rn == 0xf) /* MOV */
6709 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
6710 else
6711 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6712 "dp (shift reg)", dsc);
6713 }
6714
6715
6716 /* Decode extension register load/store. Exactly the same as
6717 arm_decode_ext_reg_ld_st. */
6718
6719 static int
6720 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
6721 uint16_t insn2, struct regcache *regs,
6722 arm_displaced_step_closure *dsc)
6723 {
6724 unsigned int opcode = bits (insn1, 4, 8);
6725
6726 switch (opcode)
6727 {
6728 case 0x04: case 0x05:
6729 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6730 "vfp/neon vmov", dsc);
6731
6732 case 0x08: case 0x0c: /* 01x00 */
6733 case 0x0a: case 0x0e: /* 01x10 */
6734 case 0x12: case 0x16: /* 10x10 */
6735 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6736 "vfp/neon vstm/vpush", dsc);
6737
6738 case 0x09: case 0x0d: /* 01x01 */
6739 case 0x0b: case 0x0f: /* 01x11 */
6740 case 0x13: case 0x17: /* 10x11 */
6741 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6742 "vfp/neon vldm/vpop", dsc);
6743
6744 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
6745 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6746 "vstr", dsc);
6747 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
6748 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
6749 }
6750
6751 /* Should be unreachable. */
6752 return 1;
6753 }
6754
6755 static int
6756 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn,
6757 struct regcache *regs, arm_displaced_step_closure *dsc)
6758 {
6759 unsigned int op1 = bits (insn, 20, 25);
6760 int op = bit (insn, 4);
6761 unsigned int coproc = bits (insn, 8, 11);
6762
6763 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
6764 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
6765 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
6766 && (coproc & 0xe) != 0xa)
6767 /* stc/stc2. */
6768 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6769 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
6770 && (coproc & 0xe) != 0xa)
6771 /* ldc/ldc2 imm/lit. */
6772 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
6773 else if ((op1 & 0x3e) == 0x00)
6774 return arm_copy_undef (gdbarch, insn, dsc);
6775 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
6776 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
6777 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
6778 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
6779 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
6780 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
6781 else if ((op1 & 0x30) == 0x20 && !op)
6782 {
6783 if ((coproc & 0xe) == 0xa)
6784 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
6785 else
6786 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
6787 }
6788 else if ((op1 & 0x30) == 0x20 && op)
6789 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
6790 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
6791 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
6792 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
6793 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
6794 else if ((op1 & 0x30) == 0x30)
6795 return arm_copy_svc (gdbarch, insn, regs, dsc);
6796 else
6797 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
6798 }
6799
6800 static int
6801 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
6802 uint16_t insn2, struct regcache *regs,
6803 arm_displaced_step_closure *dsc)
6804 {
6805 unsigned int coproc = bits (insn2, 8, 11);
6806 unsigned int bit_5_8 = bits (insn1, 5, 8);
6807 unsigned int bit_9 = bit (insn1, 9);
6808 unsigned int bit_4 = bit (insn1, 4);
6809
6810 if (bit_9 == 0)
6811 {
6812 if (bit_5_8 == 2)
6813 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6814 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
6815 dsc);
6816 else if (bit_5_8 == 0) /* UNDEFINED. */
6817 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
6818 else
6819 {
6820 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
6821 if ((coproc & 0xe) == 0xa)
6822 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
6823 dsc);
6824 else /* coproc is not 101x. */
6825 {
6826 if (bit_4 == 0) /* STC/STC2. */
6827 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
6828 "stc/stc2", dsc);
6829 else /* LDC/LDC2 {literal, immeidate}. */
6830 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
6831 regs, dsc);
6832 }
6833 }
6834 }
6835 else
6836 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
6837
6838 return 0;
6839 }
6840
6841 static void
6842 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
6843 arm_displaced_step_closure *dsc, int rd)
6844 {
6845 /* ADR Rd, #imm
6846
6847 Rewrite as:
6848
6849 Preparation: Rd <- PC
6850 Insn: ADD Rd, #imm
6851 Cleanup: Null.
6852 */
6853
6854 /* Rd <- PC */
6855 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6856 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
6857 }
6858
6859 static int
6860 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
6861 arm_displaced_step_closure *dsc,
6862 int rd, unsigned int imm)
6863 {
6864
6865 /* Encoding T2: ADDS Rd, #imm */
6866 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
6867
6868 install_pc_relative (gdbarch, regs, dsc, rd);
6869
6870 return 0;
6871 }
6872
6873 static int
6874 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
6875 struct regcache *regs,
6876 arm_displaced_step_closure *dsc)
6877 {
6878 unsigned int rd = bits (insn, 8, 10);
6879 unsigned int imm8 = bits (insn, 0, 7);
6880
6881 if (debug_displaced)
6882 fprintf_unfiltered (gdb_stdlog,
6883 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
6884 rd, imm8, insn);
6885
6886 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
6887 }
6888
6889 static int
6890 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
6891 uint16_t insn2, struct regcache *regs,
6892 arm_displaced_step_closure *dsc)
6893 {
6894 unsigned int rd = bits (insn2, 8, 11);
6895 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
6896 extract raw immediate encoding rather than computing immediate. When
6897 generating ADD or SUB instruction, we can simply perform OR operation to
6898 set immediate into ADD. */
6899 unsigned int imm_3_8 = insn2 & 0x70ff;
6900 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
6901
6902 if (debug_displaced)
6903 fprintf_unfiltered (gdb_stdlog,
6904 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
6905 rd, imm_i, imm_3_8, insn1, insn2);
6906
6907 if (bit (insn1, 7)) /* Encoding T2 */
6908 {
6909 /* Encoding T3: SUB Rd, Rd, #imm */
6910 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
6911 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
6912 }
6913 else /* Encoding T3 */
6914 {
6915 /* Encoding T3: ADD Rd, Rd, #imm */
6916 dsc->modinsn[0] = (0xf100 | rd | imm_i);
6917 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
6918 }
6919 dsc->numinsns = 2;
6920
6921 install_pc_relative (gdbarch, regs, dsc, rd);
6922
6923 return 0;
6924 }
6925
6926 static int
6927 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, uint16_t insn1,
6928 struct regcache *regs,
6929 arm_displaced_step_closure *dsc)
6930 {
6931 unsigned int rt = bits (insn1, 8, 10);
6932 unsigned int pc;
6933 int imm8 = (bits (insn1, 0, 7) << 2);
6934
6935 /* LDR Rd, #imm8
6936
6937 Rwrite as:
6938
6939 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
6940
6941 Insn: LDR R0, [R2, R3];
6942 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
6943
6944 if (debug_displaced)
6945 fprintf_unfiltered (gdb_stdlog,
6946 "displaced: copying thumb ldr r%d [pc #%d]\n"
6947 , rt, imm8);
6948
6949 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6950 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6951 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6952 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6953 /* The assembler calculates the required value of the offset from the
6954 Align(PC,4) value of this instruction to the label. */
6955 pc = pc & 0xfffffffc;
6956
6957 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
6958 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
6959
6960 dsc->rd = rt;
6961 dsc->u.ldst.xfersize = 4;
6962 dsc->u.ldst.rn = 0;
6963 dsc->u.ldst.immed = 0;
6964 dsc->u.ldst.writeback = 0;
6965 dsc->u.ldst.restore_r4 = 0;
6966
6967 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
6968
6969 dsc->cleanup = &cleanup_load;
6970
6971 return 0;
6972 }
6973
6974 /* Copy Thumb cbnz/cbz insruction. */
6975
6976 static int
6977 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
6978 struct regcache *regs,
6979 arm_displaced_step_closure *dsc)
6980 {
6981 int non_zero = bit (insn1, 11);
6982 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
6983 CORE_ADDR from = dsc->insn_addr;
6984 int rn = bits (insn1, 0, 2);
6985 int rn_val = displaced_read_reg (regs, dsc, rn);
6986
6987 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
6988 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
6989 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
6990 condition is false, let it be, cleanup_branch will do nothing. */
6991 if (dsc->u.branch.cond)
6992 {
6993 dsc->u.branch.cond = INST_AL;
6994 dsc->u.branch.dest = from + 4 + imm5;
6995 }
6996 else
6997 dsc->u.branch.dest = from + 2;
6998
6999 dsc->u.branch.link = 0;
7000 dsc->u.branch.exchange = 0;
7001
7002 if (debug_displaced)
7003 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
7004 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
7005 rn, rn_val, insn1, dsc->u.branch.dest);
7006
7007 dsc->modinsn[0] = THUMB_NOP;
7008
7009 dsc->cleanup = &cleanup_branch;
7010 return 0;
7011 }
7012
7013 /* Copy Table Branch Byte/Halfword */
7014 static int
7015 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7016 uint16_t insn2, struct regcache *regs,
7017 arm_displaced_step_closure *dsc)
7018 {
7019 ULONGEST rn_val, rm_val;
7020 int is_tbh = bit (insn2, 4);
7021 CORE_ADDR halfwords = 0;
7022 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7023
7024 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7025 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7026
7027 if (is_tbh)
7028 {
7029 gdb_byte buf[2];
7030
7031 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7032 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7033 }
7034 else
7035 {
7036 gdb_byte buf[1];
7037
7038 target_read_memory (rn_val + rm_val, buf, 1);
7039 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7040 }
7041
7042 if (debug_displaced)
7043 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
7044 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
7045 (unsigned int) rn_val, (unsigned int) rm_val,
7046 (unsigned int) halfwords);
7047
7048 dsc->u.branch.cond = INST_AL;
7049 dsc->u.branch.link = 0;
7050 dsc->u.branch.exchange = 0;
7051 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7052
7053 dsc->cleanup = &cleanup_branch;
7054
7055 return 0;
7056 }
7057
7058 static void
7059 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
7060 arm_displaced_step_closure *dsc)
7061 {
7062 /* PC <- r7 */
7063 int val = displaced_read_reg (regs, dsc, 7);
7064 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
7065
7066 /* r7 <- r8 */
7067 val = displaced_read_reg (regs, dsc, 8);
7068 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
7069
7070 /* r8 <- tmp[0] */
7071 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
7072
7073 }
7074
7075 static int
7076 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, uint16_t insn1,
7077 struct regcache *regs,
7078 arm_displaced_step_closure *dsc)
7079 {
7080 dsc->u.block.regmask = insn1 & 0x00ff;
7081
7082 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
7083 to :
7084
7085 (1) register list is full, that is, r0-r7 are used.
7086 Prepare: tmp[0] <- r8
7087
7088 POP {r0, r1, ...., r6, r7}; remove PC from reglist
7089 MOV r8, r7; Move value of r7 to r8;
7090 POP {r7}; Store PC value into r7.
7091
7092 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
7093
7094 (2) register list is not full, supposing there are N registers in
7095 register list (except PC, 0 <= N <= 7).
7096 Prepare: for each i, 0 - N, tmp[i] <- ri.
7097
7098 POP {r0, r1, ...., rN};
7099
7100 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
7101 from tmp[] properly.
7102 */
7103 if (debug_displaced)
7104 fprintf_unfiltered (gdb_stdlog,
7105 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
7106 dsc->u.block.regmask, insn1);
7107
7108 if (dsc->u.block.regmask == 0xff)
7109 {
7110 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
7111
7112 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
7113 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
7114 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
7115
7116 dsc->numinsns = 3;
7117 dsc->cleanup = &cleanup_pop_pc_16bit_all;
7118 }
7119 else
7120 {
7121 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
7122 unsigned int i;
7123 unsigned int new_regmask;
7124
7125 for (i = 0; i < num_in_list + 1; i++)
7126 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7127
7128 new_regmask = (1 << (num_in_list + 1)) - 1;
7129
7130 if (debug_displaced)
7131 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
7132 "{..., pc}: original reg list %.4x,"
7133 " modified list %.4x\n"),
7134 (int) dsc->u.block.regmask, new_regmask);
7135
7136 dsc->u.block.regmask |= 0x8000;
7137 dsc->u.block.writeback = 0;
7138 dsc->u.block.cond = INST_AL;
7139
7140 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
7141
7142 dsc->cleanup = &cleanup_block_load_pc;
7143 }
7144
7145 return 0;
7146 }
7147
7148 static void
7149 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7150 struct regcache *regs,
7151 arm_displaced_step_closure *dsc)
7152 {
7153 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
7154 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
7155 int err = 0;
7156
7157 /* 16-bit thumb instructions. */
7158 switch (op_bit_12_15)
7159 {
7160 /* Shift (imme), add, subtract, move and compare. */
7161 case 0: case 1: case 2: case 3:
7162 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7163 "shift/add/sub/mov/cmp",
7164 dsc);
7165 break;
7166 case 4:
7167 switch (op_bit_10_11)
7168 {
7169 case 0: /* Data-processing */
7170 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
7171 "data-processing",
7172 dsc);
7173 break;
7174 case 1: /* Special data instructions and branch and exchange. */
7175 {
7176 unsigned short op = bits (insn1, 7, 9);
7177 if (op == 6 || op == 7) /* BX or BLX */
7178 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
7179 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
7180 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
7181 else
7182 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
7183 dsc);
7184 }
7185 break;
7186 default: /* LDR (literal) */
7187 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
7188 }
7189 break;
7190 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
7191 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
7192 break;
7193 case 10:
7194 if (op_bit_10_11 < 2) /* Generate PC-relative address */
7195 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
7196 else /* Generate SP-relative address */
7197 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
7198 break;
7199 case 11: /* Misc 16-bit instructions */
7200 {
7201 switch (bits (insn1, 8, 11))
7202 {
7203 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
7204 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
7205 break;
7206 case 12: case 13: /* POP */
7207 if (bit (insn1, 8)) /* PC is in register list. */
7208 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
7209 else
7210 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
7211 break;
7212 case 15: /* If-Then, and hints */
7213 if (bits (insn1, 0, 3))
7214 /* If-Then makes up to four following instructions conditional.
7215 IT instruction itself is not conditional, so handle it as a
7216 common unmodified instruction. */
7217 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
7218 dsc);
7219 else
7220 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
7221 break;
7222 default:
7223 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
7224 }
7225 }
7226 break;
7227 case 12:
7228 if (op_bit_10_11 < 2) /* Store multiple registers */
7229 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
7230 else /* Load multiple registers */
7231 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
7232 break;
7233 case 13: /* Conditional branch and supervisor call */
7234 if (bits (insn1, 9, 11) != 7) /* conditional branch */
7235 err = thumb_copy_b (gdbarch, insn1, dsc);
7236 else
7237 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
7238 break;
7239 case 14: /* Unconditional branch */
7240 err = thumb_copy_b (gdbarch, insn1, dsc);
7241 break;
7242 default:
7243 err = 1;
7244 }
7245
7246 if (err)
7247 internal_error (__FILE__, __LINE__,
7248 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
7249 }
7250
7251 static int
7252 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
7253 uint16_t insn1, uint16_t insn2,
7254 struct regcache *regs,
7255 arm_displaced_step_closure *dsc)
7256 {
7257 int rt = bits (insn2, 12, 15);
7258 int rn = bits (insn1, 0, 3);
7259 int op1 = bits (insn1, 7, 8);
7260
7261 switch (bits (insn1, 5, 6))
7262 {
7263 case 0: /* Load byte and memory hints */
7264 if (rt == 0xf) /* PLD/PLI */
7265 {
7266 if (rn == 0xf)
7267 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
7268 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
7269 else
7270 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7271 "pli/pld", dsc);
7272 }
7273 else
7274 {
7275 if (rn == 0xf) /* LDRB/LDRSB (literal) */
7276 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7277 1);
7278 else
7279 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7280 "ldrb{reg, immediate}/ldrbt",
7281 dsc);
7282 }
7283
7284 break;
7285 case 1: /* Load halfword and memory hints. */
7286 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
7287 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7288 "pld/unalloc memhint", dsc);
7289 else
7290 {
7291 if (rn == 0xf)
7292 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
7293 2);
7294 else
7295 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7296 "ldrh/ldrht", dsc);
7297 }
7298 break;
7299 case 2: /* Load word */
7300 {
7301 int insn2_bit_8_11 = bits (insn2, 8, 11);
7302
7303 if (rn == 0xf)
7304 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
7305 else if (op1 == 0x1) /* Encoding T3 */
7306 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
7307 0, 1);
7308 else /* op1 == 0x0 */
7309 {
7310 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
7311 /* LDR (immediate) */
7312 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
7313 dsc, bit (insn2, 8), 1);
7314 else if (insn2_bit_8_11 == 0xe) /* LDRT */
7315 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7316 "ldrt", dsc);
7317 else
7318 /* LDR (register) */
7319 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
7320 dsc, 0, 0);
7321 }
7322 break;
7323 }
7324 default:
7325 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7326 break;
7327 }
7328 return 0;
7329 }
7330
7331 static void
7332 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
7333 uint16_t insn2, struct regcache *regs,
7334 arm_displaced_step_closure *dsc)
7335 {
7336 int err = 0;
7337 unsigned short op = bit (insn2, 15);
7338 unsigned int op1 = bits (insn1, 11, 12);
7339
7340 switch (op1)
7341 {
7342 case 1:
7343 {
7344 switch (bits (insn1, 9, 10))
7345 {
7346 case 0:
7347 if (bit (insn1, 6))
7348 {
7349 /* Load/store {dual, execlusive}, table branch. */
7350 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
7351 && bits (insn2, 5, 7) == 0)
7352 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
7353 dsc);
7354 else
7355 /* PC is not allowed to use in load/store {dual, exclusive}
7356 instructions. */
7357 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7358 "load/store dual/ex", dsc);
7359 }
7360 else /* load/store multiple */
7361 {
7362 switch (bits (insn1, 7, 8))
7363 {
7364 case 0: case 3: /* SRS, RFE */
7365 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7366 "srs/rfe", dsc);
7367 break;
7368 case 1: case 2: /* LDM/STM/PUSH/POP */
7369 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
7370 break;
7371 }
7372 }
7373 break;
7374
7375 case 1:
7376 /* Data-processing (shift register). */
7377 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
7378 dsc);
7379 break;
7380 default: /* Coprocessor instructions. */
7381 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
7382 break;
7383 }
7384 break;
7385 }
7386 case 2: /* op1 = 2 */
7387 if (op) /* Branch and misc control. */
7388 {
7389 if (bit (insn2, 14) /* BLX/BL */
7390 || bit (insn2, 12) /* Unconditional branch */
7391 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
7392 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
7393 else
7394 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7395 "misc ctrl", dsc);
7396 }
7397 else
7398 {
7399 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
7400 {
7401 int dp_op = bits (insn1, 4, 8);
7402 int rn = bits (insn1, 0, 3);
7403 if ((dp_op == 0 || dp_op == 0xa) && rn == 0xf)
7404 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
7405 regs, dsc);
7406 else
7407 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7408 "dp/pb", dsc);
7409 }
7410 else /* Data processing (modified immeidate) */
7411 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7412 "dp/mi", dsc);
7413 }
7414 break;
7415 case 3: /* op1 = 3 */
7416 switch (bits (insn1, 9, 10))
7417 {
7418 case 0:
7419 if (bit (insn1, 4))
7420 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
7421 regs, dsc);
7422 else /* NEON Load/Store and Store single data item */
7423 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7424 "neon elt/struct load/store",
7425 dsc);
7426 break;
7427 case 1: /* op1 = 3, bits (9, 10) == 1 */
7428 switch (bits (insn1, 7, 8))
7429 {
7430 case 0: case 1: /* Data processing (register) */
7431 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7432 "dp(reg)", dsc);
7433 break;
7434 case 2: /* Multiply and absolute difference */
7435 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7436 "mul/mua/diff", dsc);
7437 break;
7438 case 3: /* Long multiply and divide */
7439 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7440 "lmul/lmua", dsc);
7441 break;
7442 }
7443 break;
7444 default: /* Coprocessor instructions */
7445 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
7446 break;
7447 }
7448 break;
7449 default:
7450 err = 1;
7451 }
7452
7453 if (err)
7454 internal_error (__FILE__, __LINE__,
7455 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
7456
7457 }
7458
7459 static void
7460 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
7461 struct regcache *regs,
7462 arm_displaced_step_closure *dsc)
7463 {
7464 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7465 uint16_t insn1
7466 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
7467
7468 if (debug_displaced)
7469 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
7470 "at %.8lx\n", insn1, (unsigned long) from);
7471
7472 dsc->is_thumb = 1;
7473 dsc->insn_size = thumb_insn_size (insn1);
7474 if (thumb_insn_size (insn1) == 4)
7475 {
7476 uint16_t insn2
7477 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
7478 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
7479 }
7480 else
7481 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
7482 }
7483
7484 void
7485 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
7486 CORE_ADDR to, struct regcache *regs,
7487 arm_displaced_step_closure *dsc)
7488 {
7489 int err = 0;
7490 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7491 uint32_t insn;
7492
7493 /* Most displaced instructions use a 1-instruction scratch space, so set this
7494 here and override below if/when necessary. */
7495 dsc->numinsns = 1;
7496 dsc->insn_addr = from;
7497 dsc->scratch_base = to;
7498 dsc->cleanup = NULL;
7499 dsc->wrote_to_pc = 0;
7500
7501 if (!displaced_in_arm_mode (regs))
7502 return thumb_process_displaced_insn (gdbarch, from, regs, dsc);
7503
7504 dsc->is_thumb = 0;
7505 dsc->insn_size = 4;
7506 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
7507 if (debug_displaced)
7508 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
7509 "at %.8lx\n", (unsigned long) insn,
7510 (unsigned long) from);
7511
7512 if ((insn & 0xf0000000) == 0xf0000000)
7513 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
7514 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
7515 {
7516 case 0x0: case 0x1: case 0x2: case 0x3:
7517 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
7518 break;
7519
7520 case 0x4: case 0x5: case 0x6:
7521 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
7522 break;
7523
7524 case 0x7:
7525 err = arm_decode_media (gdbarch, insn, dsc);
7526 break;
7527
7528 case 0x8: case 0x9: case 0xa: case 0xb:
7529 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
7530 break;
7531
7532 case 0xc: case 0xd: case 0xe: case 0xf:
7533 err = arm_decode_svc_copro (gdbarch, insn, regs, dsc);
7534 break;
7535 }
7536
7537 if (err)
7538 internal_error (__FILE__, __LINE__,
7539 _("arm_process_displaced_insn: Instruction decode error"));
7540 }
7541
7542 /* Actually set up the scratch space for a displaced instruction. */
7543
7544 void
7545 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
7546 CORE_ADDR to, arm_displaced_step_closure *dsc)
7547 {
7548 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7549 unsigned int i, len, offset;
7550 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7551 int size = dsc->is_thumb? 2 : 4;
7552 const gdb_byte *bkp_insn;
7553
7554 offset = 0;
7555 /* Poke modified instruction(s). */
7556 for (i = 0; i < dsc->numinsns; i++)
7557 {
7558 if (debug_displaced)
7559 {
7560 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
7561 if (size == 4)
7562 fprintf_unfiltered (gdb_stdlog, "%.8lx",
7563 dsc->modinsn[i]);
7564 else if (size == 2)
7565 fprintf_unfiltered (gdb_stdlog, "%.4x",
7566 (unsigned short)dsc->modinsn[i]);
7567
7568 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
7569 (unsigned long) to + offset);
7570
7571 }
7572 write_memory_unsigned_integer (to + offset, size,
7573 byte_order_for_code,
7574 dsc->modinsn[i]);
7575 offset += size;
7576 }
7577
7578 /* Choose the correct breakpoint instruction. */
7579 if (dsc->is_thumb)
7580 {
7581 bkp_insn = tdep->thumb_breakpoint;
7582 len = tdep->thumb_breakpoint_size;
7583 }
7584 else
7585 {
7586 bkp_insn = tdep->arm_breakpoint;
7587 len = tdep->arm_breakpoint_size;
7588 }
7589
7590 /* Put breakpoint afterwards. */
7591 write_memory (to + offset, bkp_insn, len);
7592
7593 if (debug_displaced)
7594 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
7595 paddress (gdbarch, from), paddress (gdbarch, to));
7596 }
7597
7598 /* Entry point for cleaning things up after a displaced instruction has been
7599 single-stepped. */
7600
7601 void
7602 arm_displaced_step_fixup (struct gdbarch *gdbarch,
7603 struct displaced_step_closure *dsc_,
7604 CORE_ADDR from, CORE_ADDR to,
7605 struct regcache *regs)
7606 {
7607 arm_displaced_step_closure *dsc = (arm_displaced_step_closure *) dsc_;
7608
7609 if (dsc->cleanup)
7610 dsc->cleanup (gdbarch, regs, dsc);
7611
7612 if (!dsc->wrote_to_pc)
7613 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
7614 dsc->insn_addr + dsc->insn_size);
7615
7616 }
7617
7618 #include "bfd-in2.h"
7619 #include "libcoff.h"
7620
7621 static int
7622 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
7623 {
7624 gdb_disassembler *di
7625 = static_cast<gdb_disassembler *>(info->application_data);
7626 struct gdbarch *gdbarch = di->arch ();
7627
7628 if (arm_pc_is_thumb (gdbarch, memaddr))
7629 {
7630 static asymbol *asym;
7631 static combined_entry_type ce;
7632 static struct coff_symbol_struct csym;
7633 static struct bfd fake_bfd;
7634 static bfd_target fake_target;
7635
7636 if (csym.native == NULL)
7637 {
7638 /* Create a fake symbol vector containing a Thumb symbol.
7639 This is solely so that the code in print_insn_little_arm()
7640 and print_insn_big_arm() in opcodes/arm-dis.c will detect
7641 the presence of a Thumb symbol and switch to decoding
7642 Thumb instructions. */
7643
7644 fake_target.flavour = bfd_target_coff_flavour;
7645 fake_bfd.xvec = &fake_target;
7646 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
7647 csym.native = &ce;
7648 csym.symbol.the_bfd = &fake_bfd;
7649 csym.symbol.name = "fake";
7650 asym = (asymbol *) & csym;
7651 }
7652
7653 memaddr = UNMAKE_THUMB_ADDR (memaddr);
7654 info->symbols = &asym;
7655 }
7656 else
7657 info->symbols = NULL;
7658
7659 /* GDB is able to get bfd_mach from the exe_bfd, info->mach is
7660 accurate, so mark USER_SPECIFIED_MACHINE_TYPE bit. Otherwise,
7661 opcodes/arm-dis.c:print_insn reset info->mach, and it will trigger
7662 the assert on the mismatch of info->mach and bfd_get_mach (exec_bfd)
7663 in default_print_insn. */
7664 if (exec_bfd != NULL)
7665 info->flags |= USER_SPECIFIED_MACHINE_TYPE;
7666
7667 return default_print_insn (memaddr, info);
7668 }
7669
7670 /* The following define instruction sequences that will cause ARM
7671 cpu's to take an undefined instruction trap. These are used to
7672 signal a breakpoint to GDB.
7673
7674 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
7675 modes. A different instruction is required for each mode. The ARM
7676 cpu's can also be big or little endian. Thus four different
7677 instructions are needed to support all cases.
7678
7679 Note: ARMv4 defines several new instructions that will take the
7680 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
7681 not in fact add the new instructions. The new undefined
7682 instructions in ARMv4 are all instructions that had no defined
7683 behaviour in earlier chips. There is no guarantee that they will
7684 raise an exception, but may be treated as NOP's. In practice, it
7685 may only safe to rely on instructions matching:
7686
7687 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
7688 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
7689 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
7690
7691 Even this may only true if the condition predicate is true. The
7692 following use a condition predicate of ALWAYS so it is always TRUE.
7693
7694 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
7695 and NetBSD all use a software interrupt rather than an undefined
7696 instruction to force a trap. This can be handled by by the
7697 abi-specific code during establishment of the gdbarch vector. */
7698
7699 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
7700 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
7701 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
7702 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
7703
7704 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
7705 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
7706 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
7707 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
7708
7709 /* Implement the breakpoint_kind_from_pc gdbarch method. */
7710
7711 static int
7712 arm_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
7713 {
7714 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7715 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
7716
7717 if (arm_pc_is_thumb (gdbarch, *pcptr))
7718 {
7719 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
7720
7721 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
7722 check whether we are replacing a 32-bit instruction. */
7723 if (tdep->thumb2_breakpoint != NULL)
7724 {
7725 gdb_byte buf[2];
7726
7727 if (target_read_memory (*pcptr, buf, 2) == 0)
7728 {
7729 unsigned short inst1;
7730
7731 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
7732 if (thumb_insn_size (inst1) == 4)
7733 return ARM_BP_KIND_THUMB2;
7734 }
7735 }
7736
7737 return ARM_BP_KIND_THUMB;
7738 }
7739 else
7740 return ARM_BP_KIND_ARM;
7741
7742 }
7743
7744 /* Implement the sw_breakpoint_from_kind gdbarch method. */
7745
7746 static const gdb_byte *
7747 arm_sw_breakpoint_from_kind (struct gdbarch *gdbarch, int kind, int *size)
7748 {
7749 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
7750
7751 switch (kind)
7752 {
7753 case ARM_BP_KIND_ARM:
7754 *size = tdep->arm_breakpoint_size;
7755 return tdep->arm_breakpoint;
7756 case ARM_BP_KIND_THUMB:
7757 *size = tdep->thumb_breakpoint_size;
7758 return tdep->thumb_breakpoint;
7759 case ARM_BP_KIND_THUMB2:
7760 *size = tdep->thumb2_breakpoint_size;
7761 return tdep->thumb2_breakpoint;
7762 default:
7763 gdb_assert_not_reached ("unexpected arm breakpoint kind");
7764 }
7765 }
7766
7767 /* Implement the breakpoint_kind_from_current_state gdbarch method. */
7768
7769 static int
7770 arm_breakpoint_kind_from_current_state (struct gdbarch *gdbarch,
7771 struct regcache *regcache,
7772 CORE_ADDR *pcptr)
7773 {
7774 gdb_byte buf[4];
7775
7776 /* Check the memory pointed by PC is readable. */
7777 if (target_read_memory (regcache_read_pc (regcache), buf, 4) == 0)
7778 {
7779 struct arm_get_next_pcs next_pcs_ctx;
7780
7781 arm_get_next_pcs_ctor (&next_pcs_ctx,
7782 &arm_get_next_pcs_ops,
7783 gdbarch_byte_order (gdbarch),
7784 gdbarch_byte_order_for_code (gdbarch),
7785 0,
7786 regcache);
7787
7788 std::vector<CORE_ADDR> next_pcs = arm_get_next_pcs (&next_pcs_ctx);
7789
7790 /* If MEMADDR is the next instruction of current pc, do the
7791 software single step computation, and get the thumb mode by
7792 the destination address. */
7793 for (CORE_ADDR pc : next_pcs)
7794 {
7795 if (UNMAKE_THUMB_ADDR (pc) == *pcptr)
7796 {
7797 if (IS_THUMB_ADDR (pc))
7798 {
7799 *pcptr = MAKE_THUMB_ADDR (*pcptr);
7800 return arm_breakpoint_kind_from_pc (gdbarch, pcptr);
7801 }
7802 else
7803 return ARM_BP_KIND_ARM;
7804 }
7805 }
7806 }
7807
7808 return arm_breakpoint_kind_from_pc (gdbarch, pcptr);
7809 }
7810
7811 /* Extract from an array REGBUF containing the (raw) register state a
7812 function return value of type TYPE, and copy that, in virtual
7813 format, into VALBUF. */
7814
7815 static void
7816 arm_extract_return_value (struct type *type, struct regcache *regs,
7817 gdb_byte *valbuf)
7818 {
7819 struct gdbarch *gdbarch = regs->arch ();
7820 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7821
7822 if (TYPE_CODE_FLT == TYPE_CODE (type))
7823 {
7824 switch (gdbarch_tdep (gdbarch)->fp_model)
7825 {
7826 case ARM_FLOAT_FPA:
7827 {
7828 /* The value is in register F0 in internal format. We need to
7829 extract the raw value and then convert it to the desired
7830 internal type. */
7831 bfd_byte tmpbuf[FP_REGISTER_SIZE];
7832
7833 regs->cooked_read (ARM_F0_REGNUM, tmpbuf);
7834 target_float_convert (tmpbuf, arm_ext_type (gdbarch),
7835 valbuf, type);
7836 }
7837 break;
7838
7839 case ARM_FLOAT_SOFT_FPA:
7840 case ARM_FLOAT_SOFT_VFP:
7841 /* ARM_FLOAT_VFP can arise if this is a variadic function so
7842 not using the VFP ABI code. */
7843 case ARM_FLOAT_VFP:
7844 regs->cooked_read (ARM_A1_REGNUM, valbuf);
7845 if (TYPE_LENGTH (type) > 4)
7846 regs->cooked_read (ARM_A1_REGNUM + 1, valbuf + INT_REGISTER_SIZE);
7847 break;
7848
7849 default:
7850 internal_error (__FILE__, __LINE__,
7851 _("arm_extract_return_value: "
7852 "Floating point model not supported"));
7853 break;
7854 }
7855 }
7856 else if (TYPE_CODE (type) == TYPE_CODE_INT
7857 || TYPE_CODE (type) == TYPE_CODE_CHAR
7858 || TYPE_CODE (type) == TYPE_CODE_BOOL
7859 || TYPE_CODE (type) == TYPE_CODE_PTR
7860 || TYPE_IS_REFERENCE (type)
7861 || TYPE_CODE (type) == TYPE_CODE_ENUM)
7862 {
7863 /* If the type is a plain integer, then the access is
7864 straight-forward. Otherwise we have to play around a bit
7865 more. */
7866 int len = TYPE_LENGTH (type);
7867 int regno = ARM_A1_REGNUM;
7868 ULONGEST tmp;
7869
7870 while (len > 0)
7871 {
7872 /* By using store_unsigned_integer we avoid having to do
7873 anything special for small big-endian values. */
7874 regcache_cooked_read_unsigned (regs, regno++, &tmp);
7875 store_unsigned_integer (valbuf,
7876 (len > INT_REGISTER_SIZE
7877 ? INT_REGISTER_SIZE : len),
7878 byte_order, tmp);
7879 len -= INT_REGISTER_SIZE;
7880 valbuf += INT_REGISTER_SIZE;
7881 }
7882 }
7883 else
7884 {
7885 /* For a structure or union the behaviour is as if the value had
7886 been stored to word-aligned memory and then loaded into
7887 registers with 32-bit load instruction(s). */
7888 int len = TYPE_LENGTH (type);
7889 int regno = ARM_A1_REGNUM;
7890 bfd_byte tmpbuf[INT_REGISTER_SIZE];
7891
7892 while (len > 0)
7893 {
7894 regs->cooked_read (regno++, tmpbuf);
7895 memcpy (valbuf, tmpbuf,
7896 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
7897 len -= INT_REGISTER_SIZE;
7898 valbuf += INT_REGISTER_SIZE;
7899 }
7900 }
7901 }
7902
7903
7904 /* Will a function return an aggregate type in memory or in a
7905 register? Return 0 if an aggregate type can be returned in a
7906 register, 1 if it must be returned in memory. */
7907
7908 static int
7909 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
7910 {
7911 enum type_code code;
7912
7913 type = check_typedef (type);
7914
7915 /* Simple, non-aggregate types (ie not including vectors and
7916 complex) are always returned in a register (or registers). */
7917 code = TYPE_CODE (type);
7918 if (TYPE_CODE_STRUCT != code && TYPE_CODE_UNION != code
7919 && TYPE_CODE_ARRAY != code && TYPE_CODE_COMPLEX != code)
7920 return 0;
7921
7922 if (TYPE_CODE_ARRAY == code && TYPE_VECTOR (type))
7923 {
7924 /* Vector values should be returned using ARM registers if they
7925 are not over 16 bytes. */
7926 return (TYPE_LENGTH (type) > 16);
7927 }
7928
7929 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
7930 {
7931 /* The AAPCS says all aggregates not larger than a word are returned
7932 in a register. */
7933 if (TYPE_LENGTH (type) <= INT_REGISTER_SIZE)
7934 return 0;
7935
7936 return 1;
7937 }
7938 else
7939 {
7940 int nRc;
7941
7942 /* All aggregate types that won't fit in a register must be returned
7943 in memory. */
7944 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
7945 return 1;
7946
7947 /* In the ARM ABI, "integer" like aggregate types are returned in
7948 registers. For an aggregate type to be integer like, its size
7949 must be less than or equal to INT_REGISTER_SIZE and the
7950 offset of each addressable subfield must be zero. Note that bit
7951 fields are not addressable, and all addressable subfields of
7952 unions always start at offset zero.
7953
7954 This function is based on the behaviour of GCC 2.95.1.
7955 See: gcc/arm.c: arm_return_in_memory() for details.
7956
7957 Note: All versions of GCC before GCC 2.95.2 do not set up the
7958 parameters correctly for a function returning the following
7959 structure: struct { float f;}; This should be returned in memory,
7960 not a register. Richard Earnshaw sent me a patch, but I do not
7961 know of any way to detect if a function like the above has been
7962 compiled with the correct calling convention. */
7963
7964 /* Assume all other aggregate types can be returned in a register.
7965 Run a check for structures, unions and arrays. */
7966 nRc = 0;
7967
7968 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
7969 {
7970 int i;
7971 /* Need to check if this struct/union is "integer" like. For
7972 this to be true, its size must be less than or equal to
7973 INT_REGISTER_SIZE and the offset of each addressable
7974 subfield must be zero. Note that bit fields are not
7975 addressable, and unions always start at offset zero. If any
7976 of the subfields is a floating point type, the struct/union
7977 cannot be an integer type. */
7978
7979 /* For each field in the object, check:
7980 1) Is it FP? --> yes, nRc = 1;
7981 2) Is it addressable (bitpos != 0) and
7982 not packed (bitsize == 0)?
7983 --> yes, nRc = 1
7984 */
7985
7986 for (i = 0; i < TYPE_NFIELDS (type); i++)
7987 {
7988 enum type_code field_type_code;
7989
7990 field_type_code
7991 = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
7992 i)));
7993
7994 /* Is it a floating point type field? */
7995 if (field_type_code == TYPE_CODE_FLT)
7996 {
7997 nRc = 1;
7998 break;
7999 }
8000
8001 /* If bitpos != 0, then we have to care about it. */
8002 if (TYPE_FIELD_BITPOS (type, i) != 0)
8003 {
8004 /* Bitfields are not addressable. If the field bitsize is
8005 zero, then the field is not packed. Hence it cannot be
8006 a bitfield or any other packed type. */
8007 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8008 {
8009 nRc = 1;
8010 break;
8011 }
8012 }
8013 }
8014 }
8015
8016 return nRc;
8017 }
8018 }
8019
8020 /* Write into appropriate registers a function return value of type
8021 TYPE, given in virtual format. */
8022
8023 static void
8024 arm_store_return_value (struct type *type, struct regcache *regs,
8025 const gdb_byte *valbuf)
8026 {
8027 struct gdbarch *gdbarch = regs->arch ();
8028 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8029
8030 if (TYPE_CODE (type) == TYPE_CODE_FLT)
8031 {
8032 gdb_byte buf[FP_REGISTER_SIZE];
8033
8034 switch (gdbarch_tdep (gdbarch)->fp_model)
8035 {
8036 case ARM_FLOAT_FPA:
8037
8038 target_float_convert (valbuf, type, buf, arm_ext_type (gdbarch));
8039 regs->cooked_write (ARM_F0_REGNUM, buf);
8040 break;
8041
8042 case ARM_FLOAT_SOFT_FPA:
8043 case ARM_FLOAT_SOFT_VFP:
8044 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8045 not using the VFP ABI code. */
8046 case ARM_FLOAT_VFP:
8047 regs->cooked_write (ARM_A1_REGNUM, valbuf);
8048 if (TYPE_LENGTH (type) > 4)
8049 regs->cooked_write (ARM_A1_REGNUM + 1, valbuf + INT_REGISTER_SIZE);
8050 break;
8051
8052 default:
8053 internal_error (__FILE__, __LINE__,
8054 _("arm_store_return_value: Floating "
8055 "point model not supported"));
8056 break;
8057 }
8058 }
8059 else if (TYPE_CODE (type) == TYPE_CODE_INT
8060 || TYPE_CODE (type) == TYPE_CODE_CHAR
8061 || TYPE_CODE (type) == TYPE_CODE_BOOL
8062 || TYPE_CODE (type) == TYPE_CODE_PTR
8063 || TYPE_IS_REFERENCE (type)
8064 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8065 {
8066 if (TYPE_LENGTH (type) <= 4)
8067 {
8068 /* Values of one word or less are zero/sign-extended and
8069 returned in r0. */
8070 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8071 LONGEST val = unpack_long (type, valbuf);
8072
8073 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
8074 regs->cooked_write (ARM_A1_REGNUM, tmpbuf);
8075 }
8076 else
8077 {
8078 /* Integral values greater than one word are stored in consecutive
8079 registers starting with r0. This will always be a multiple of
8080 the regiser size. */
8081 int len = TYPE_LENGTH (type);
8082 int regno = ARM_A1_REGNUM;
8083
8084 while (len > 0)
8085 {
8086 regs->cooked_write (regno++, valbuf);
8087 len -= INT_REGISTER_SIZE;
8088 valbuf += INT_REGISTER_SIZE;
8089 }
8090 }
8091 }
8092 else
8093 {
8094 /* For a structure or union the behaviour is as if the value had
8095 been stored to word-aligned memory and then loaded into
8096 registers with 32-bit load instruction(s). */
8097 int len = TYPE_LENGTH (type);
8098 int regno = ARM_A1_REGNUM;
8099 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8100
8101 while (len > 0)
8102 {
8103 memcpy (tmpbuf, valbuf,
8104 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8105 regs->cooked_write (regno++, tmpbuf);
8106 len -= INT_REGISTER_SIZE;
8107 valbuf += INT_REGISTER_SIZE;
8108 }
8109 }
8110 }
8111
8112
8113 /* Handle function return values. */
8114
8115 static enum return_value_convention
8116 arm_return_value (struct gdbarch *gdbarch, struct value *function,
8117 struct type *valtype, struct regcache *regcache,
8118 gdb_byte *readbuf, const gdb_byte *writebuf)
8119 {
8120 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8121 struct type *func_type = function ? value_type (function) : NULL;
8122 enum arm_vfp_cprc_base_type vfp_base_type;
8123 int vfp_base_count;
8124
8125 if (arm_vfp_abi_for_function (gdbarch, func_type)
8126 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
8127 {
8128 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
8129 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
8130 int i;
8131 for (i = 0; i < vfp_base_count; i++)
8132 {
8133 if (reg_char == 'q')
8134 {
8135 if (writebuf)
8136 arm_neon_quad_write (gdbarch, regcache, i,
8137 writebuf + i * unit_length);
8138
8139 if (readbuf)
8140 arm_neon_quad_read (gdbarch, regcache, i,
8141 readbuf + i * unit_length);
8142 }
8143 else
8144 {
8145 char name_buf[4];
8146 int regnum;
8147
8148 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
8149 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8150 strlen (name_buf));
8151 if (writebuf)
8152 regcache->cooked_write (regnum, writebuf + i * unit_length);
8153 if (readbuf)
8154 regcache->cooked_read (regnum, readbuf + i * unit_length);
8155 }
8156 }
8157 return RETURN_VALUE_REGISTER_CONVENTION;
8158 }
8159
8160 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
8161 || TYPE_CODE (valtype) == TYPE_CODE_UNION
8162 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
8163 {
8164 if (tdep->struct_return == pcc_struct_return
8165 || arm_return_in_memory (gdbarch, valtype))
8166 return RETURN_VALUE_STRUCT_CONVENTION;
8167 }
8168 else if (TYPE_CODE (valtype) == TYPE_CODE_COMPLEX)
8169 {
8170 if (arm_return_in_memory (gdbarch, valtype))
8171 return RETURN_VALUE_STRUCT_CONVENTION;
8172 }
8173
8174 if (writebuf)
8175 arm_store_return_value (valtype, regcache, writebuf);
8176
8177 if (readbuf)
8178 arm_extract_return_value (valtype, regcache, readbuf);
8179
8180 return RETURN_VALUE_REGISTER_CONVENTION;
8181 }
8182
8183
8184 static int
8185 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
8186 {
8187 struct gdbarch *gdbarch = get_frame_arch (frame);
8188 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8189 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8190 CORE_ADDR jb_addr;
8191 gdb_byte buf[INT_REGISTER_SIZE];
8192
8193 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
8194
8195 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
8196 INT_REGISTER_SIZE))
8197 return 0;
8198
8199 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
8200 return 1;
8201 }
8202
8203 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
8204 return the target PC. Otherwise return 0. */
8205
8206 CORE_ADDR
8207 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
8208 {
8209 const char *name;
8210 int namelen;
8211 CORE_ADDR start_addr;
8212
8213 /* Find the starting address and name of the function containing the PC. */
8214 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
8215 {
8216 /* Trampoline 'bx reg' doesn't belong to any functions. Do the
8217 check here. */
8218 start_addr = arm_skip_bx_reg (frame, pc);
8219 if (start_addr != 0)
8220 return start_addr;
8221
8222 return 0;
8223 }
8224
8225 /* If PC is in a Thumb call or return stub, return the address of the
8226 target PC, which is in a register. The thunk functions are called
8227 _call_via_xx, where x is the register name. The possible names
8228 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
8229 functions, named __ARM_call_via_r[0-7]. */
8230 if (startswith (name, "_call_via_")
8231 || startswith (name, "__ARM_call_via_"))
8232 {
8233 /* Use the name suffix to determine which register contains the
8234 target PC. */
8235 static const char *table[15] =
8236 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
8237 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
8238 };
8239 int regno;
8240 int offset = strlen (name) - 2;
8241
8242 for (regno = 0; regno <= 14; regno++)
8243 if (strcmp (&name[offset], table[regno]) == 0)
8244 return get_frame_register_unsigned (frame, regno);
8245 }
8246
8247 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
8248 non-interworking calls to foo. We could decode the stubs
8249 to find the target but it's easier to use the symbol table. */
8250 namelen = strlen (name);
8251 if (name[0] == '_' && name[1] == '_'
8252 && ((namelen > 2 + strlen ("_from_thumb")
8253 && startswith (name + namelen - strlen ("_from_thumb"), "_from_thumb"))
8254 || (namelen > 2 + strlen ("_from_arm")
8255 && startswith (name + namelen - strlen ("_from_arm"), "_from_arm"))))
8256 {
8257 char *target_name;
8258 int target_len = namelen - 2;
8259 struct bound_minimal_symbol minsym;
8260 struct objfile *objfile;
8261 struct obj_section *sec;
8262
8263 if (name[namelen - 1] == 'b')
8264 target_len -= strlen ("_from_thumb");
8265 else
8266 target_len -= strlen ("_from_arm");
8267
8268 target_name = (char *) alloca (target_len + 1);
8269 memcpy (target_name, name + 2, target_len);
8270 target_name[target_len] = '\0';
8271
8272 sec = find_pc_section (pc);
8273 objfile = (sec == NULL) ? NULL : sec->objfile;
8274 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
8275 if (minsym.minsym != NULL)
8276 return BMSYMBOL_VALUE_ADDRESS (minsym);
8277 else
8278 return 0;
8279 }
8280
8281 return 0; /* not a stub */
8282 }
8283
8284 static void
8285 set_arm_command (const char *args, int from_tty)
8286 {
8287 printf_unfiltered (_("\
8288 \"set arm\" must be followed by an apporpriate subcommand.\n"));
8289 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
8290 }
8291
8292 static void
8293 show_arm_command (const char *args, int from_tty)
8294 {
8295 cmd_show_list (showarmcmdlist, from_tty, "");
8296 }
8297
8298 static void
8299 arm_update_current_architecture (void)
8300 {
8301 struct gdbarch_info info;
8302
8303 /* If the current architecture is not ARM, we have nothing to do. */
8304 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
8305 return;
8306
8307 /* Update the architecture. */
8308 gdbarch_info_init (&info);
8309
8310 if (!gdbarch_update_p (info))
8311 internal_error (__FILE__, __LINE__, _("could not update architecture"));
8312 }
8313
8314 static void
8315 set_fp_model_sfunc (const char *args, int from_tty,
8316 struct cmd_list_element *c)
8317 {
8318 int fp_model;
8319
8320 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
8321 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
8322 {
8323 arm_fp_model = (enum arm_float_model) fp_model;
8324 break;
8325 }
8326
8327 if (fp_model == ARM_FLOAT_LAST)
8328 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
8329 current_fp_model);
8330
8331 arm_update_current_architecture ();
8332 }
8333
8334 static void
8335 show_fp_model (struct ui_file *file, int from_tty,
8336 struct cmd_list_element *c, const char *value)
8337 {
8338 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
8339
8340 if (arm_fp_model == ARM_FLOAT_AUTO
8341 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
8342 fprintf_filtered (file, _("\
8343 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
8344 fp_model_strings[tdep->fp_model]);
8345 else
8346 fprintf_filtered (file, _("\
8347 The current ARM floating point model is \"%s\".\n"),
8348 fp_model_strings[arm_fp_model]);
8349 }
8350
8351 static void
8352 arm_set_abi (const char *args, int from_tty,
8353 struct cmd_list_element *c)
8354 {
8355 int arm_abi;
8356
8357 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
8358 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
8359 {
8360 arm_abi_global = (enum arm_abi_kind) arm_abi;
8361 break;
8362 }
8363
8364 if (arm_abi == ARM_ABI_LAST)
8365 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
8366 arm_abi_string);
8367
8368 arm_update_current_architecture ();
8369 }
8370
8371 static void
8372 arm_show_abi (struct ui_file *file, int from_tty,
8373 struct cmd_list_element *c, const char *value)
8374 {
8375 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
8376
8377 if (arm_abi_global == ARM_ABI_AUTO
8378 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
8379 fprintf_filtered (file, _("\
8380 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
8381 arm_abi_strings[tdep->arm_abi]);
8382 else
8383 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
8384 arm_abi_string);
8385 }
8386
8387 static void
8388 arm_show_fallback_mode (struct ui_file *file, int from_tty,
8389 struct cmd_list_element *c, const char *value)
8390 {
8391 fprintf_filtered (file,
8392 _("The current execution mode assumed "
8393 "(when symbols are unavailable) is \"%s\".\n"),
8394 arm_fallback_mode_string);
8395 }
8396
8397 static void
8398 arm_show_force_mode (struct ui_file *file, int from_tty,
8399 struct cmd_list_element *c, const char *value)
8400 {
8401 fprintf_filtered (file,
8402 _("The current execution mode assumed "
8403 "(even when symbols are available) is \"%s\".\n"),
8404 arm_force_mode_string);
8405 }
8406
8407 /* If the user changes the register disassembly style used for info
8408 register and other commands, we have to also switch the style used
8409 in opcodes for disassembly output. This function is run in the "set
8410 arm disassembly" command, and does that. */
8411
8412 static void
8413 set_disassembly_style_sfunc (const char *args, int from_tty,
8414 struct cmd_list_element *c)
8415 {
8416 /* Convert the short style name into the long style name (eg, reg-names-*)
8417 before calling the generic set_disassembler_options() function. */
8418 std::string long_name = std::string ("reg-names-") + disassembly_style;
8419 set_disassembler_options (&long_name[0]);
8420 }
8421
8422 static void
8423 show_disassembly_style_sfunc (struct ui_file *file, int from_tty,
8424 struct cmd_list_element *c, const char *value)
8425 {
8426 struct gdbarch *gdbarch = get_current_arch ();
8427 char *options = get_disassembler_options (gdbarch);
8428 const char *style = "";
8429 int len = 0;
8430 const char *opt;
8431
8432 FOR_EACH_DISASSEMBLER_OPTION (opt, options)
8433 if (CONST_STRNEQ (opt, "reg-names-"))
8434 {
8435 style = &opt[strlen ("reg-names-")];
8436 len = strcspn (style, ",");
8437 }
8438
8439 fprintf_unfiltered (file, "The disassembly style is \"%.*s\".\n", len, style);
8440 }
8441 \f
8442 /* Return the ARM register name corresponding to register I. */
8443 static const char *
8444 arm_register_name (struct gdbarch *gdbarch, int i)
8445 {
8446 const int num_regs = gdbarch_num_regs (gdbarch);
8447
8448 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
8449 && i >= num_regs && i < num_regs + 32)
8450 {
8451 static const char *const vfp_pseudo_names[] = {
8452 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
8453 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
8454 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
8455 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
8456 };
8457
8458 return vfp_pseudo_names[i - num_regs];
8459 }
8460
8461 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
8462 && i >= num_regs + 32 && i < num_regs + 32 + 16)
8463 {
8464 static const char *const neon_pseudo_names[] = {
8465 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
8466 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
8467 };
8468
8469 return neon_pseudo_names[i - num_regs - 32];
8470 }
8471
8472 if (i >= ARRAY_SIZE (arm_register_names))
8473 /* These registers are only supported on targets which supply
8474 an XML description. */
8475 return "";
8476
8477 return arm_register_names[i];
8478 }
8479
8480 /* Test whether the coff symbol specific value corresponds to a Thumb
8481 function. */
8482
8483 static int
8484 coff_sym_is_thumb (int val)
8485 {
8486 return (val == C_THUMBEXT
8487 || val == C_THUMBSTAT
8488 || val == C_THUMBEXTFUNC
8489 || val == C_THUMBSTATFUNC
8490 || val == C_THUMBLABEL);
8491 }
8492
8493 /* arm_coff_make_msymbol_special()
8494 arm_elf_make_msymbol_special()
8495
8496 These functions test whether the COFF or ELF symbol corresponds to
8497 an address in thumb code, and set a "special" bit in a minimal
8498 symbol to indicate that it does. */
8499
8500 static void
8501 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
8502 {
8503 elf_symbol_type *elfsym = (elf_symbol_type *) sym;
8504
8505 if (ARM_GET_SYM_BRANCH_TYPE (elfsym->internal_elf_sym.st_target_internal)
8506 == ST_BRANCH_TO_THUMB)
8507 MSYMBOL_SET_SPECIAL (msym);
8508 }
8509
8510 static void
8511 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
8512 {
8513 if (coff_sym_is_thumb (val))
8514 MSYMBOL_SET_SPECIAL (msym);
8515 }
8516
8517 static void
8518 arm_objfile_data_free (struct objfile *objfile, void *arg)
8519 {
8520 struct arm_per_objfile *data = (struct arm_per_objfile *) arg;
8521
8522 delete data;
8523 }
8524
8525 static void
8526 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
8527 asymbol *sym)
8528 {
8529 const char *name = bfd_asymbol_name (sym);
8530 struct arm_per_objfile *data;
8531 struct arm_mapping_symbol new_map_sym;
8532
8533 gdb_assert (name[0] == '$');
8534 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
8535 return;
8536
8537 data = (struct arm_per_objfile *) objfile_data (objfile,
8538 arm_objfile_data_key);
8539 if (data == NULL)
8540 {
8541 data = new arm_per_objfile (objfile->obfd->section_count);
8542 set_objfile_data (objfile, arm_objfile_data_key, data);
8543 }
8544 arm_mapping_symbol_vec &map
8545 = data->section_maps[bfd_get_section (sym)->index];
8546
8547 new_map_sym.value = sym->value;
8548 new_map_sym.type = name[1];
8549
8550 /* Assume that most mapping symbols appear in order of increasing
8551 value. If they were randomly distributed, it would be faster to
8552 always push here and then sort at first use. */
8553 arm_mapping_symbol_vec::iterator it
8554 = std::lower_bound (map.begin (), map.end (), new_map_sym);
8555 map.insert (it, new_map_sym);
8556 }
8557
8558 static void
8559 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
8560 {
8561 struct gdbarch *gdbarch = regcache->arch ();
8562 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
8563
8564 /* If necessary, set the T bit. */
8565 if (arm_apcs_32)
8566 {
8567 ULONGEST val, t_bit;
8568 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
8569 t_bit = arm_psr_thumb_bit (gdbarch);
8570 if (arm_pc_is_thumb (gdbarch, pc))
8571 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
8572 val | t_bit);
8573 else
8574 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
8575 val & ~t_bit);
8576 }
8577 }
8578
8579 /* Read the contents of a NEON quad register, by reading from two
8580 double registers. This is used to implement the quad pseudo
8581 registers, and for argument passing in case the quad registers are
8582 missing; vectors are passed in quad registers when using the VFP
8583 ABI, even if a NEON unit is not present. REGNUM is the index of
8584 the quad register, in [0, 15]. */
8585
8586 static enum register_status
8587 arm_neon_quad_read (struct gdbarch *gdbarch, readable_regcache *regcache,
8588 int regnum, gdb_byte *buf)
8589 {
8590 char name_buf[4];
8591 gdb_byte reg_buf[8];
8592 int offset, double_regnum;
8593 enum register_status status;
8594
8595 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
8596 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8597 strlen (name_buf));
8598
8599 /* d0 is always the least significant half of q0. */
8600 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8601 offset = 8;
8602 else
8603 offset = 0;
8604
8605 status = regcache->raw_read (double_regnum, reg_buf);
8606 if (status != REG_VALID)
8607 return status;
8608 memcpy (buf + offset, reg_buf, 8);
8609
8610 offset = 8 - offset;
8611 status = regcache->raw_read (double_regnum + 1, reg_buf);
8612 if (status != REG_VALID)
8613 return status;
8614 memcpy (buf + offset, reg_buf, 8);
8615
8616 return REG_VALID;
8617 }
8618
8619 static enum register_status
8620 arm_pseudo_read (struct gdbarch *gdbarch, readable_regcache *regcache,
8621 int regnum, gdb_byte *buf)
8622 {
8623 const int num_regs = gdbarch_num_regs (gdbarch);
8624 char name_buf[4];
8625 gdb_byte reg_buf[8];
8626 int offset, double_regnum;
8627
8628 gdb_assert (regnum >= num_regs);
8629 regnum -= num_regs;
8630
8631 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8632 /* Quad-precision register. */
8633 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
8634 else
8635 {
8636 enum register_status status;
8637
8638 /* Single-precision register. */
8639 gdb_assert (regnum < 32);
8640
8641 /* s0 is always the least significant half of d0. */
8642 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8643 offset = (regnum & 1) ? 0 : 4;
8644 else
8645 offset = (regnum & 1) ? 4 : 0;
8646
8647 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
8648 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8649 strlen (name_buf));
8650
8651 status = regcache->raw_read (double_regnum, reg_buf);
8652 if (status == REG_VALID)
8653 memcpy (buf, reg_buf + offset, 4);
8654 return status;
8655 }
8656 }
8657
8658 /* Store the contents of BUF to a NEON quad register, by writing to
8659 two double registers. This is used to implement the quad pseudo
8660 registers, and for argument passing in case the quad registers are
8661 missing; vectors are passed in quad registers when using the VFP
8662 ABI, even if a NEON unit is not present. REGNUM is the index
8663 of the quad register, in [0, 15]. */
8664
8665 static void
8666 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
8667 int regnum, const gdb_byte *buf)
8668 {
8669 char name_buf[4];
8670 int offset, double_regnum;
8671
8672 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
8673 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8674 strlen (name_buf));
8675
8676 /* d0 is always the least significant half of q0. */
8677 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8678 offset = 8;
8679 else
8680 offset = 0;
8681
8682 regcache->raw_write (double_regnum, buf + offset);
8683 offset = 8 - offset;
8684 regcache->raw_write (double_regnum + 1, buf + offset);
8685 }
8686
8687 static void
8688 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
8689 int regnum, const gdb_byte *buf)
8690 {
8691 const int num_regs = gdbarch_num_regs (gdbarch);
8692 char name_buf[4];
8693 gdb_byte reg_buf[8];
8694 int offset, double_regnum;
8695
8696 gdb_assert (regnum >= num_regs);
8697 regnum -= num_regs;
8698
8699 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
8700 /* Quad-precision register. */
8701 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
8702 else
8703 {
8704 /* Single-precision register. */
8705 gdb_assert (regnum < 32);
8706
8707 /* s0 is always the least significant half of d0. */
8708 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
8709 offset = (regnum & 1) ? 0 : 4;
8710 else
8711 offset = (regnum & 1) ? 4 : 0;
8712
8713 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
8714 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
8715 strlen (name_buf));
8716
8717 regcache->raw_read (double_regnum, reg_buf);
8718 memcpy (reg_buf + offset, buf, 4);
8719 regcache->raw_write (double_regnum, reg_buf);
8720 }
8721 }
8722
8723 static struct value *
8724 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
8725 {
8726 const int *reg_p = (const int *) baton;
8727 return value_of_register (*reg_p, frame);
8728 }
8729 \f
8730 static enum gdb_osabi
8731 arm_elf_osabi_sniffer (bfd *abfd)
8732 {
8733 unsigned int elfosabi;
8734 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
8735
8736 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
8737
8738 if (elfosabi == ELFOSABI_ARM)
8739 /* GNU tools use this value. Check note sections in this case,
8740 as well. */
8741 bfd_map_over_sections (abfd,
8742 generic_elf_osabi_sniff_abi_tag_sections,
8743 &osabi);
8744
8745 /* Anything else will be handled by the generic ELF sniffer. */
8746 return osabi;
8747 }
8748
8749 static int
8750 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
8751 struct reggroup *group)
8752 {
8753 /* FPS register's type is INT, but belongs to float_reggroup. Beside
8754 this, FPS register belongs to save_regroup, restore_reggroup, and
8755 all_reggroup, of course. */
8756 if (regnum == ARM_FPS_REGNUM)
8757 return (group == float_reggroup
8758 || group == save_reggroup
8759 || group == restore_reggroup
8760 || group == all_reggroup);
8761 else
8762 return default_register_reggroup_p (gdbarch, regnum, group);
8763 }
8764
8765 \f
8766 /* For backward-compatibility we allow two 'g' packet lengths with
8767 the remote protocol depending on whether FPA registers are
8768 supplied. M-profile targets do not have FPA registers, but some
8769 stubs already exist in the wild which use a 'g' packet which
8770 supplies them albeit with dummy values. The packet format which
8771 includes FPA registers should be considered deprecated for
8772 M-profile targets. */
8773
8774 static void
8775 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
8776 {
8777 if (gdbarch_tdep (gdbarch)->is_m)
8778 {
8779 /* If we know from the executable this is an M-profile target,
8780 cater for remote targets whose register set layout is the
8781 same as the FPA layout. */
8782 register_remote_g_packet_guess (gdbarch,
8783 /* r0-r12,sp,lr,pc; f0-f7; fps,xpsr */
8784 (16 * INT_REGISTER_SIZE)
8785 + (8 * FP_REGISTER_SIZE)
8786 + (2 * INT_REGISTER_SIZE),
8787 tdesc_arm_with_m_fpa_layout);
8788
8789 /* The regular M-profile layout. */
8790 register_remote_g_packet_guess (gdbarch,
8791 /* r0-r12,sp,lr,pc; xpsr */
8792 (16 * INT_REGISTER_SIZE)
8793 + INT_REGISTER_SIZE,
8794 tdesc_arm_with_m);
8795
8796 /* M-profile plus M4F VFP. */
8797 register_remote_g_packet_guess (gdbarch,
8798 /* r0-r12,sp,lr,pc; d0-d15; fpscr,xpsr */
8799 (16 * INT_REGISTER_SIZE)
8800 + (16 * VFP_REGISTER_SIZE)
8801 + (2 * INT_REGISTER_SIZE),
8802 tdesc_arm_with_m_vfp_d16);
8803 }
8804
8805 /* Otherwise we don't have a useful guess. */
8806 }
8807
8808 /* Implement the code_of_frame_writable gdbarch method. */
8809
8810 static int
8811 arm_code_of_frame_writable (struct gdbarch *gdbarch, struct frame_info *frame)
8812 {
8813 if (gdbarch_tdep (gdbarch)->is_m
8814 && get_frame_type (frame) == SIGTRAMP_FRAME)
8815 {
8816 /* M-profile exception frames return to some magic PCs, where
8817 isn't writable at all. */
8818 return 0;
8819 }
8820 else
8821 return 1;
8822 }
8823
8824 \f
8825 /* Initialize the current architecture based on INFO. If possible,
8826 re-use an architecture from ARCHES, which is a list of
8827 architectures already created during this debugging session.
8828
8829 Called e.g. at program startup, when reading a core file, and when
8830 reading a binary file. */
8831
8832 static struct gdbarch *
8833 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
8834 {
8835 struct gdbarch_tdep *tdep;
8836 struct gdbarch *gdbarch;
8837 struct gdbarch_list *best_arch;
8838 enum arm_abi_kind arm_abi = arm_abi_global;
8839 enum arm_float_model fp_model = arm_fp_model;
8840 struct tdesc_arch_data *tdesc_data = NULL;
8841 int i, is_m = 0;
8842 int vfp_register_count = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
8843 int have_wmmx_registers = 0;
8844 int have_neon = 0;
8845 int have_fpa_registers = 1;
8846 const struct target_desc *tdesc = info.target_desc;
8847
8848 /* If we have an object to base this architecture on, try to determine
8849 its ABI. */
8850
8851 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
8852 {
8853 int ei_osabi, e_flags;
8854
8855 switch (bfd_get_flavour (info.abfd))
8856 {
8857 case bfd_target_coff_flavour:
8858 /* Assume it's an old APCS-style ABI. */
8859 /* XXX WinCE? */
8860 arm_abi = ARM_ABI_APCS;
8861 break;
8862
8863 case bfd_target_elf_flavour:
8864 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
8865 e_flags = elf_elfheader (info.abfd)->e_flags;
8866
8867 if (ei_osabi == ELFOSABI_ARM)
8868 {
8869 /* GNU tools used to use this value, but do not for EABI
8870 objects. There's nowhere to tag an EABI version
8871 anyway, so assume APCS. */
8872 arm_abi = ARM_ABI_APCS;
8873 }
8874 else if (ei_osabi == ELFOSABI_NONE || ei_osabi == ELFOSABI_GNU)
8875 {
8876 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
8877
8878 switch (eabi_ver)
8879 {
8880 case EF_ARM_EABI_UNKNOWN:
8881 /* Assume GNU tools. */
8882 arm_abi = ARM_ABI_APCS;
8883 break;
8884
8885 case EF_ARM_EABI_VER4:
8886 case EF_ARM_EABI_VER5:
8887 arm_abi = ARM_ABI_AAPCS;
8888 /* EABI binaries default to VFP float ordering.
8889 They may also contain build attributes that can
8890 be used to identify if the VFP argument-passing
8891 ABI is in use. */
8892 if (fp_model == ARM_FLOAT_AUTO)
8893 {
8894 #ifdef HAVE_ELF
8895 switch (bfd_elf_get_obj_attr_int (info.abfd,
8896 OBJ_ATTR_PROC,
8897 Tag_ABI_VFP_args))
8898 {
8899 case AEABI_VFP_args_base:
8900 /* "The user intended FP parameter/result
8901 passing to conform to AAPCS, base
8902 variant". */
8903 fp_model = ARM_FLOAT_SOFT_VFP;
8904 break;
8905 case AEABI_VFP_args_vfp:
8906 /* "The user intended FP parameter/result
8907 passing to conform to AAPCS, VFP
8908 variant". */
8909 fp_model = ARM_FLOAT_VFP;
8910 break;
8911 case AEABI_VFP_args_toolchain:
8912 /* "The user intended FP parameter/result
8913 passing to conform to tool chain-specific
8914 conventions" - we don't know any such
8915 conventions, so leave it as "auto". */
8916 break;
8917 case AEABI_VFP_args_compatible:
8918 /* "Code is compatible with both the base
8919 and VFP variants; the user did not permit
8920 non-variadic functions to pass FP
8921 parameters/results" - leave it as
8922 "auto". */
8923 break;
8924 default:
8925 /* Attribute value not mentioned in the
8926 November 2012 ABI, so leave it as
8927 "auto". */
8928 break;
8929 }
8930 #else
8931 fp_model = ARM_FLOAT_SOFT_VFP;
8932 #endif
8933 }
8934 break;
8935
8936 default:
8937 /* Leave it as "auto". */
8938 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
8939 break;
8940 }
8941
8942 #ifdef HAVE_ELF
8943 /* Detect M-profile programs. This only works if the
8944 executable file includes build attributes; GCC does
8945 copy them to the executable, but e.g. RealView does
8946 not. */
8947 int attr_arch
8948 = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8949 Tag_CPU_arch);
8950 int attr_profile
8951 = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
8952 Tag_CPU_arch_profile);
8953
8954 /* GCC specifies the profile for v6-M; RealView only
8955 specifies the profile for architectures starting with
8956 V7 (as opposed to architectures with a tag
8957 numerically greater than TAG_CPU_ARCH_V7). */
8958 if (!tdesc_has_registers (tdesc)
8959 && (attr_arch == TAG_CPU_ARCH_V6_M
8960 || attr_arch == TAG_CPU_ARCH_V6S_M
8961 || attr_profile == 'M'))
8962 is_m = 1;
8963 #endif
8964 }
8965
8966 if (fp_model == ARM_FLOAT_AUTO)
8967 {
8968 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
8969 {
8970 case 0:
8971 /* Leave it as "auto". Strictly speaking this case
8972 means FPA, but almost nobody uses that now, and
8973 many toolchains fail to set the appropriate bits
8974 for the floating-point model they use. */
8975 break;
8976 case EF_ARM_SOFT_FLOAT:
8977 fp_model = ARM_FLOAT_SOFT_FPA;
8978 break;
8979 case EF_ARM_VFP_FLOAT:
8980 fp_model = ARM_FLOAT_VFP;
8981 break;
8982 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
8983 fp_model = ARM_FLOAT_SOFT_VFP;
8984 break;
8985 }
8986 }
8987
8988 if (e_flags & EF_ARM_BE8)
8989 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
8990
8991 break;
8992
8993 default:
8994 /* Leave it as "auto". */
8995 break;
8996 }
8997 }
8998
8999 /* Check any target description for validity. */
9000 if (tdesc_has_registers (tdesc))
9001 {
9002 /* For most registers we require GDB's default names; but also allow
9003 the numeric names for sp / lr / pc, as a convenience. */
9004 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9005 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9006 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9007
9008 const struct tdesc_feature *feature;
9009 int valid_p;
9010
9011 feature = tdesc_find_feature (tdesc,
9012 "org.gnu.gdb.arm.core");
9013 if (feature == NULL)
9014 {
9015 feature = tdesc_find_feature (tdesc,
9016 "org.gnu.gdb.arm.m-profile");
9017 if (feature == NULL)
9018 return NULL;
9019 else
9020 is_m = 1;
9021 }
9022
9023 tdesc_data = tdesc_data_alloc ();
9024
9025 valid_p = 1;
9026 for (i = 0; i < ARM_SP_REGNUM; i++)
9027 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9028 arm_register_names[i]);
9029 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9030 ARM_SP_REGNUM,
9031 arm_sp_names);
9032 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9033 ARM_LR_REGNUM,
9034 arm_lr_names);
9035 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9036 ARM_PC_REGNUM,
9037 arm_pc_names);
9038 if (is_m)
9039 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9040 ARM_PS_REGNUM, "xpsr");
9041 else
9042 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9043 ARM_PS_REGNUM, "cpsr");
9044
9045 if (!valid_p)
9046 {
9047 tdesc_data_cleanup (tdesc_data);
9048 return NULL;
9049 }
9050
9051 feature = tdesc_find_feature (tdesc,
9052 "org.gnu.gdb.arm.fpa");
9053 if (feature != NULL)
9054 {
9055 valid_p = 1;
9056 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9057 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9058 arm_register_names[i]);
9059 if (!valid_p)
9060 {
9061 tdesc_data_cleanup (tdesc_data);
9062 return NULL;
9063 }
9064 }
9065 else
9066 have_fpa_registers = 0;
9067
9068 feature = tdesc_find_feature (tdesc,
9069 "org.gnu.gdb.xscale.iwmmxt");
9070 if (feature != NULL)
9071 {
9072 static const char *const iwmmxt_names[] = {
9073 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9074 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9075 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9076 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9077 };
9078
9079 valid_p = 1;
9080 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9081 valid_p
9082 &= tdesc_numbered_register (feature, tdesc_data, i,
9083 iwmmxt_names[i - ARM_WR0_REGNUM]);
9084
9085 /* Check for the control registers, but do not fail if they
9086 are missing. */
9087 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9088 tdesc_numbered_register (feature, tdesc_data, i,
9089 iwmmxt_names[i - ARM_WR0_REGNUM]);
9090
9091 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9092 valid_p
9093 &= tdesc_numbered_register (feature, tdesc_data, i,
9094 iwmmxt_names[i - ARM_WR0_REGNUM]);
9095
9096 if (!valid_p)
9097 {
9098 tdesc_data_cleanup (tdesc_data);
9099 return NULL;
9100 }
9101
9102 have_wmmx_registers = 1;
9103 }
9104
9105 /* If we have a VFP unit, check whether the single precision registers
9106 are present. If not, then we will synthesize them as pseudo
9107 registers. */
9108 feature = tdesc_find_feature (tdesc,
9109 "org.gnu.gdb.arm.vfp");
9110 if (feature != NULL)
9111 {
9112 static const char *const vfp_double_names[] = {
9113 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9114 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9115 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9116 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
9117 };
9118
9119 /* Require the double precision registers. There must be either
9120 16 or 32. */
9121 valid_p = 1;
9122 for (i = 0; i < 32; i++)
9123 {
9124 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9125 ARM_D0_REGNUM + i,
9126 vfp_double_names[i]);
9127 if (!valid_p)
9128 break;
9129 }
9130 if (!valid_p && i == 16)
9131 valid_p = 1;
9132
9133 /* Also require FPSCR. */
9134 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9135 ARM_FPSCR_REGNUM, "fpscr");
9136 if (!valid_p)
9137 {
9138 tdesc_data_cleanup (tdesc_data);
9139 return NULL;
9140 }
9141
9142 if (tdesc_unnumbered_register (feature, "s0") == 0)
9143 have_vfp_pseudos = 1;
9144
9145 vfp_register_count = i;
9146
9147 /* If we have VFP, also check for NEON. The architecture allows
9148 NEON without VFP (integer vector operations only), but GDB
9149 does not support that. */
9150 feature = tdesc_find_feature (tdesc,
9151 "org.gnu.gdb.arm.neon");
9152 if (feature != NULL)
9153 {
9154 /* NEON requires 32 double-precision registers. */
9155 if (i != 32)
9156 {
9157 tdesc_data_cleanup (tdesc_data);
9158 return NULL;
9159 }
9160
9161 /* If there are quad registers defined by the stub, use
9162 their type; otherwise (normally) provide them with
9163 the default type. */
9164 if (tdesc_unnumbered_register (feature, "q0") == 0)
9165 have_neon_pseudos = 1;
9166
9167 have_neon = 1;
9168 }
9169 }
9170 }
9171
9172 /* If there is already a candidate, use it. */
9173 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
9174 best_arch != NULL;
9175 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
9176 {
9177 if (arm_abi != ARM_ABI_AUTO
9178 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
9179 continue;
9180
9181 if (fp_model != ARM_FLOAT_AUTO
9182 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
9183 continue;
9184
9185 /* There are various other properties in tdep that we do not
9186 need to check here: those derived from a target description,
9187 since gdbarches with a different target description are
9188 automatically disqualified. */
9189
9190 /* Do check is_m, though, since it might come from the binary. */
9191 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
9192 continue;
9193
9194 /* Found a match. */
9195 break;
9196 }
9197
9198 if (best_arch != NULL)
9199 {
9200 if (tdesc_data != NULL)
9201 tdesc_data_cleanup (tdesc_data);
9202 return best_arch->gdbarch;
9203 }
9204
9205 tdep = XCNEW (struct gdbarch_tdep);
9206 gdbarch = gdbarch_alloc (&info, tdep);
9207
9208 /* Record additional information about the architecture we are defining.
9209 These are gdbarch discriminators, like the OSABI. */
9210 tdep->arm_abi = arm_abi;
9211 tdep->fp_model = fp_model;
9212 tdep->is_m = is_m;
9213 tdep->have_fpa_registers = have_fpa_registers;
9214 tdep->have_wmmx_registers = have_wmmx_registers;
9215 gdb_assert (vfp_register_count == 0
9216 || vfp_register_count == 16
9217 || vfp_register_count == 32);
9218 tdep->vfp_register_count = vfp_register_count;
9219 tdep->have_vfp_pseudos = have_vfp_pseudos;
9220 tdep->have_neon_pseudos = have_neon_pseudos;
9221 tdep->have_neon = have_neon;
9222
9223 arm_register_g_packet_guesses (gdbarch);
9224
9225 /* Breakpoints. */
9226 switch (info.byte_order_for_code)
9227 {
9228 case BFD_ENDIAN_BIG:
9229 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
9230 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
9231 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
9232 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
9233
9234 break;
9235
9236 case BFD_ENDIAN_LITTLE:
9237 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
9238 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
9239 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
9240 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
9241
9242 break;
9243
9244 default:
9245 internal_error (__FILE__, __LINE__,
9246 _("arm_gdbarch_init: bad byte order for float format"));
9247 }
9248
9249 /* On ARM targets char defaults to unsigned. */
9250 set_gdbarch_char_signed (gdbarch, 0);
9251
9252 /* wchar_t is unsigned under the AAPCS. */
9253 if (tdep->arm_abi == ARM_ABI_AAPCS)
9254 set_gdbarch_wchar_signed (gdbarch, 0);
9255 else
9256 set_gdbarch_wchar_signed (gdbarch, 1);
9257
9258 /* Compute type alignment. */
9259 set_gdbarch_type_align (gdbarch, arm_type_align);
9260
9261 /* Note: for displaced stepping, this includes the breakpoint, and one word
9262 of additional scratch space. This setting isn't used for anything beside
9263 displaced stepping at present. */
9264 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
9265
9266 /* This should be low enough for everything. */
9267 tdep->lowest_pc = 0x20;
9268 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
9269
9270 /* The default, for both APCS and AAPCS, is to return small
9271 structures in registers. */
9272 tdep->struct_return = reg_struct_return;
9273
9274 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
9275 set_gdbarch_frame_align (gdbarch, arm_frame_align);
9276
9277 if (is_m)
9278 set_gdbarch_code_of_frame_writable (gdbarch, arm_code_of_frame_writable);
9279
9280 set_gdbarch_write_pc (gdbarch, arm_write_pc);
9281
9282 frame_base_set_default (gdbarch, &arm_normal_base);
9283
9284 /* Address manipulation. */
9285 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
9286
9287 /* Advance PC across function entry code. */
9288 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
9289
9290 /* Detect whether PC is at a point where the stack has been destroyed. */
9291 set_gdbarch_stack_frame_destroyed_p (gdbarch, arm_stack_frame_destroyed_p);
9292
9293 /* Skip trampolines. */
9294 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
9295
9296 /* The stack grows downward. */
9297 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
9298
9299 /* Breakpoint manipulation. */
9300 set_gdbarch_breakpoint_kind_from_pc (gdbarch, arm_breakpoint_kind_from_pc);
9301 set_gdbarch_sw_breakpoint_from_kind (gdbarch, arm_sw_breakpoint_from_kind);
9302 set_gdbarch_breakpoint_kind_from_current_state (gdbarch,
9303 arm_breakpoint_kind_from_current_state);
9304
9305 /* Information about registers, etc. */
9306 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
9307 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
9308 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
9309 set_gdbarch_register_type (gdbarch, arm_register_type);
9310 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
9311
9312 /* This "info float" is FPA-specific. Use the generic version if we
9313 do not have FPA. */
9314 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
9315 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
9316
9317 /* Internal <-> external register number maps. */
9318 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
9319 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
9320
9321 set_gdbarch_register_name (gdbarch, arm_register_name);
9322
9323 /* Returning results. */
9324 set_gdbarch_return_value (gdbarch, arm_return_value);
9325
9326 /* Disassembly. */
9327 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
9328
9329 /* Minsymbol frobbing. */
9330 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
9331 set_gdbarch_coff_make_msymbol_special (gdbarch,
9332 arm_coff_make_msymbol_special);
9333 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
9334
9335 /* Thumb-2 IT block support. */
9336 set_gdbarch_adjust_breakpoint_address (gdbarch,
9337 arm_adjust_breakpoint_address);
9338
9339 /* Virtual tables. */
9340 set_gdbarch_vbit_in_delta (gdbarch, 1);
9341
9342 /* Hook in the ABI-specific overrides, if they have been registered. */
9343 gdbarch_init_osabi (info, gdbarch);
9344
9345 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
9346
9347 /* Add some default predicates. */
9348 if (is_m)
9349 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
9350 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
9351 dwarf2_append_unwinders (gdbarch);
9352 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
9353 frame_unwind_append_unwinder (gdbarch, &arm_epilogue_frame_unwind);
9354 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
9355
9356 /* Now we have tuned the configuration, set a few final things,
9357 based on what the OS ABI has told us. */
9358
9359 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
9360 binaries are always marked. */
9361 if (tdep->arm_abi == ARM_ABI_AUTO)
9362 tdep->arm_abi = ARM_ABI_APCS;
9363
9364 /* Watchpoints are not steppable. */
9365 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9366
9367 /* We used to default to FPA for generic ARM, but almost nobody
9368 uses that now, and we now provide a way for the user to force
9369 the model. So default to the most useful variant. */
9370 if (tdep->fp_model == ARM_FLOAT_AUTO)
9371 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
9372
9373 if (tdep->jb_pc >= 0)
9374 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
9375
9376 /* Floating point sizes and format. */
9377 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
9378 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
9379 {
9380 set_gdbarch_double_format
9381 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9382 set_gdbarch_long_double_format
9383 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
9384 }
9385 else
9386 {
9387 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
9388 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
9389 }
9390
9391 if (have_vfp_pseudos)
9392 {
9393 /* NOTE: These are the only pseudo registers used by
9394 the ARM target at the moment. If more are added, a
9395 little more care in numbering will be needed. */
9396
9397 int num_pseudos = 32;
9398 if (have_neon_pseudos)
9399 num_pseudos += 16;
9400 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
9401 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
9402 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
9403 }
9404
9405 if (tdesc_data)
9406 {
9407 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
9408
9409 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
9410
9411 /* Override tdesc_register_type to adjust the types of VFP
9412 registers for NEON. */
9413 set_gdbarch_register_type (gdbarch, arm_register_type);
9414 }
9415
9416 /* Add standard register aliases. We add aliases even for those
9417 nanes which are used by the current architecture - it's simpler,
9418 and does no harm, since nothing ever lists user registers. */
9419 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
9420 user_reg_add (gdbarch, arm_register_aliases[i].name,
9421 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
9422
9423 set_gdbarch_disassembler_options (gdbarch, &arm_disassembler_options);
9424 set_gdbarch_valid_disassembler_options (gdbarch, disassembler_options_arm ());
9425
9426 return gdbarch;
9427 }
9428
9429 static void
9430 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
9431 {
9432 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9433
9434 if (tdep == NULL)
9435 return;
9436
9437 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
9438 (unsigned long) tdep->lowest_pc);
9439 }
9440
9441 #if GDB_SELF_TEST
9442 namespace selftests
9443 {
9444 static void arm_record_test (void);
9445 }
9446 #endif
9447
9448 void
9449 _initialize_arm_tdep (void)
9450 {
9451 long length;
9452 int i, j;
9453 char regdesc[1024], *rdptr = regdesc;
9454 size_t rest = sizeof (regdesc);
9455
9456 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
9457
9458 arm_objfile_data_key
9459 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
9460
9461 /* Add ourselves to objfile event chain. */
9462 gdb::observers::new_objfile.attach (arm_exidx_new_objfile);
9463 arm_exidx_data_key
9464 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
9465
9466 /* Register an ELF OS ABI sniffer for ARM binaries. */
9467 gdbarch_register_osabi_sniffer (bfd_arch_arm,
9468 bfd_target_elf_flavour,
9469 arm_elf_osabi_sniffer);
9470
9471 /* Initialize the standard target descriptions. */
9472 initialize_tdesc_arm_with_m ();
9473 initialize_tdesc_arm_with_m_fpa_layout ();
9474 initialize_tdesc_arm_with_m_vfp_d16 ();
9475 initialize_tdesc_arm_with_iwmmxt ();
9476 initialize_tdesc_arm_with_vfpv2 ();
9477 initialize_tdesc_arm_with_vfpv3 ();
9478 initialize_tdesc_arm_with_neon ();
9479
9480 /* Add root prefix command for all "set arm"/"show arm" commands. */
9481 add_prefix_cmd ("arm", no_class, set_arm_command,
9482 _("Various ARM-specific commands."),
9483 &setarmcmdlist, "set arm ", 0, &setlist);
9484
9485 add_prefix_cmd ("arm", no_class, show_arm_command,
9486 _("Various ARM-specific commands."),
9487 &showarmcmdlist, "show arm ", 0, &showlist);
9488
9489
9490 arm_disassembler_options = xstrdup ("reg-names-std");
9491 const disasm_options_t *disasm_options
9492 = &disassembler_options_arm ()->options;
9493 int num_disassembly_styles = 0;
9494 for (i = 0; disasm_options->name[i] != NULL; i++)
9495 if (CONST_STRNEQ (disasm_options->name[i], "reg-names-"))
9496 num_disassembly_styles++;
9497
9498 /* Initialize the array that will be passed to add_setshow_enum_cmd(). */
9499 valid_disassembly_styles = XNEWVEC (const char *,
9500 num_disassembly_styles + 1);
9501 for (i = j = 0; disasm_options->name[i] != NULL; i++)
9502 if (CONST_STRNEQ (disasm_options->name[i], "reg-names-"))
9503 {
9504 size_t offset = strlen ("reg-names-");
9505 const char *style = disasm_options->name[i];
9506 valid_disassembly_styles[j++] = &style[offset];
9507 length = snprintf (rdptr, rest, "%s - %s\n", &style[offset],
9508 disasm_options->description[i]);
9509 rdptr += length;
9510 rest -= length;
9511 }
9512 /* Mark the end of valid options. */
9513 valid_disassembly_styles[num_disassembly_styles] = NULL;
9514
9515 /* Create the help text. */
9516 std::string helptext = string_printf ("%s%s%s",
9517 _("The valid values are:\n"),
9518 regdesc,
9519 _("The default is \"std\"."));
9520
9521 add_setshow_enum_cmd("disassembler", no_class,
9522 valid_disassembly_styles, &disassembly_style,
9523 _("Set the disassembly style."),
9524 _("Show the disassembly style."),
9525 helptext.c_str (),
9526 set_disassembly_style_sfunc,
9527 show_disassembly_style_sfunc,
9528 &setarmcmdlist, &showarmcmdlist);
9529
9530 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
9531 _("Set usage of ARM 32-bit mode."),
9532 _("Show usage of ARM 32-bit mode."),
9533 _("When off, a 26-bit PC will be used."),
9534 NULL,
9535 NULL, /* FIXME: i18n: Usage of ARM 32-bit
9536 mode is %s. */
9537 &setarmcmdlist, &showarmcmdlist);
9538
9539 /* Add a command to allow the user to force the FPU model. */
9540 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
9541 _("Set the floating point type."),
9542 _("Show the floating point type."),
9543 _("auto - Determine the FP typefrom the OS-ABI.\n\
9544 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
9545 fpa - FPA co-processor (GCC compiled).\n\
9546 softvfp - Software FP with pure-endian doubles.\n\
9547 vfp - VFP co-processor."),
9548 set_fp_model_sfunc, show_fp_model,
9549 &setarmcmdlist, &showarmcmdlist);
9550
9551 /* Add a command to allow the user to force the ABI. */
9552 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
9553 _("Set the ABI."),
9554 _("Show the ABI."),
9555 NULL, arm_set_abi, arm_show_abi,
9556 &setarmcmdlist, &showarmcmdlist);
9557
9558 /* Add two commands to allow the user to force the assumed
9559 execution mode. */
9560 add_setshow_enum_cmd ("fallback-mode", class_support,
9561 arm_mode_strings, &arm_fallback_mode_string,
9562 _("Set the mode assumed when symbols are unavailable."),
9563 _("Show the mode assumed when symbols are unavailable."),
9564 NULL, NULL, arm_show_fallback_mode,
9565 &setarmcmdlist, &showarmcmdlist);
9566 add_setshow_enum_cmd ("force-mode", class_support,
9567 arm_mode_strings, &arm_force_mode_string,
9568 _("Set the mode assumed even when symbols are available."),
9569 _("Show the mode assumed even when symbols are available."),
9570 NULL, NULL, arm_show_force_mode,
9571 &setarmcmdlist, &showarmcmdlist);
9572
9573 /* Debugging flag. */
9574 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
9575 _("Set ARM debugging."),
9576 _("Show ARM debugging."),
9577 _("When on, arm-specific debugging is enabled."),
9578 NULL,
9579 NULL, /* FIXME: i18n: "ARM debugging is %s. */
9580 &setdebuglist, &showdebuglist);
9581
9582 #if GDB_SELF_TEST
9583 selftests::register_test ("arm-record", selftests::arm_record_test);
9584 #endif
9585
9586 }
9587
9588 /* ARM-reversible process record data structures. */
9589
9590 #define ARM_INSN_SIZE_BYTES 4
9591 #define THUMB_INSN_SIZE_BYTES 2
9592 #define THUMB2_INSN_SIZE_BYTES 4
9593
9594
9595 /* Position of the bit within a 32-bit ARM instruction
9596 that defines whether the instruction is a load or store. */
9597 #define INSN_S_L_BIT_NUM 20
9598
9599 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
9600 do \
9601 { \
9602 unsigned int reg_len = LENGTH; \
9603 if (reg_len) \
9604 { \
9605 REGS = XNEWVEC (uint32_t, reg_len); \
9606 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
9607 } \
9608 } \
9609 while (0)
9610
9611 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
9612 do \
9613 { \
9614 unsigned int mem_len = LENGTH; \
9615 if (mem_len) \
9616 { \
9617 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
9618 memcpy(&MEMS->len, &RECORD_BUF[0], \
9619 sizeof(struct arm_mem_r) * LENGTH); \
9620 } \
9621 } \
9622 while (0)
9623
9624 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
9625 #define INSN_RECORDED(ARM_RECORD) \
9626 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
9627
9628 /* ARM memory record structure. */
9629 struct arm_mem_r
9630 {
9631 uint32_t len; /* Record length. */
9632 uint32_t addr; /* Memory address. */
9633 };
9634
9635 /* ARM instruction record contains opcode of current insn
9636 and execution state (before entry to decode_insn()),
9637 contains list of to-be-modified registers and
9638 memory blocks (on return from decode_insn()). */
9639
9640 typedef struct insn_decode_record_t
9641 {
9642 struct gdbarch *gdbarch;
9643 struct regcache *regcache;
9644 CORE_ADDR this_addr; /* Address of the insn being decoded. */
9645 uint32_t arm_insn; /* Should accommodate thumb. */
9646 uint32_t cond; /* Condition code. */
9647 uint32_t opcode; /* Insn opcode. */
9648 uint32_t decode; /* Insn decode bits. */
9649 uint32_t mem_rec_count; /* No of mem records. */
9650 uint32_t reg_rec_count; /* No of reg records. */
9651 uint32_t *arm_regs; /* Registers to be saved for this record. */
9652 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
9653 } insn_decode_record;
9654
9655
9656 /* Checks ARM SBZ and SBO mandatory fields. */
9657
9658 static int
9659 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
9660 {
9661 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
9662
9663 if (!len)
9664 return 1;
9665
9666 if (!sbo)
9667 ones = ~ones;
9668
9669 while (ones)
9670 {
9671 if (!(ones & sbo))
9672 {
9673 return 0;
9674 }
9675 ones = ones >> 1;
9676 }
9677 return 1;
9678 }
9679
9680 enum arm_record_result
9681 {
9682 ARM_RECORD_SUCCESS = 0,
9683 ARM_RECORD_FAILURE = 1
9684 };
9685
9686 typedef enum
9687 {
9688 ARM_RECORD_STRH=1,
9689 ARM_RECORD_STRD
9690 } arm_record_strx_t;
9691
9692 typedef enum
9693 {
9694 ARM_RECORD=1,
9695 THUMB_RECORD,
9696 THUMB2_RECORD
9697 } record_type_t;
9698
9699
9700 static int
9701 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
9702 uint32_t *record_buf_mem, arm_record_strx_t str_type)
9703 {
9704
9705 struct regcache *reg_cache = arm_insn_r->regcache;
9706 ULONGEST u_regval[2]= {0};
9707
9708 uint32_t reg_src1 = 0, reg_src2 = 0;
9709 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
9710
9711 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
9712 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
9713
9714 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
9715 {
9716 /* 1) Handle misc store, immediate offset. */
9717 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
9718 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
9719 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
9720 regcache_raw_read_unsigned (reg_cache, reg_src1,
9721 &u_regval[0]);
9722 if (ARM_PC_REGNUM == reg_src1)
9723 {
9724 /* If R15 was used as Rn, hence current PC+8. */
9725 u_regval[0] = u_regval[0] + 8;
9726 }
9727 offset_8 = (immed_high << 4) | immed_low;
9728 /* Calculate target store address. */
9729 if (14 == arm_insn_r->opcode)
9730 {
9731 tgt_mem_addr = u_regval[0] + offset_8;
9732 }
9733 else
9734 {
9735 tgt_mem_addr = u_regval[0] - offset_8;
9736 }
9737 if (ARM_RECORD_STRH == str_type)
9738 {
9739 record_buf_mem[0] = 2;
9740 record_buf_mem[1] = tgt_mem_addr;
9741 arm_insn_r->mem_rec_count = 1;
9742 }
9743 else if (ARM_RECORD_STRD == str_type)
9744 {
9745 record_buf_mem[0] = 4;
9746 record_buf_mem[1] = tgt_mem_addr;
9747 record_buf_mem[2] = 4;
9748 record_buf_mem[3] = tgt_mem_addr + 4;
9749 arm_insn_r->mem_rec_count = 2;
9750 }
9751 }
9752 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
9753 {
9754 /* 2) Store, register offset. */
9755 /* Get Rm. */
9756 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
9757 /* Get Rn. */
9758 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
9759 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9760 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
9761 if (15 == reg_src2)
9762 {
9763 /* If R15 was used as Rn, hence current PC+8. */
9764 u_regval[0] = u_regval[0] + 8;
9765 }
9766 /* Calculate target store address, Rn +/- Rm, register offset. */
9767 if (12 == arm_insn_r->opcode)
9768 {
9769 tgt_mem_addr = u_regval[0] + u_regval[1];
9770 }
9771 else
9772 {
9773 tgt_mem_addr = u_regval[1] - u_regval[0];
9774 }
9775 if (ARM_RECORD_STRH == str_type)
9776 {
9777 record_buf_mem[0] = 2;
9778 record_buf_mem[1] = tgt_mem_addr;
9779 arm_insn_r->mem_rec_count = 1;
9780 }
9781 else if (ARM_RECORD_STRD == str_type)
9782 {
9783 record_buf_mem[0] = 4;
9784 record_buf_mem[1] = tgt_mem_addr;
9785 record_buf_mem[2] = 4;
9786 record_buf_mem[3] = tgt_mem_addr + 4;
9787 arm_insn_r->mem_rec_count = 2;
9788 }
9789 }
9790 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
9791 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
9792 {
9793 /* 3) Store, immediate pre-indexed. */
9794 /* 5) Store, immediate post-indexed. */
9795 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
9796 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
9797 offset_8 = (immed_high << 4) | immed_low;
9798 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
9799 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9800 /* Calculate target store address, Rn +/- Rm, register offset. */
9801 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
9802 {
9803 tgt_mem_addr = u_regval[0] + offset_8;
9804 }
9805 else
9806 {
9807 tgt_mem_addr = u_regval[0] - offset_8;
9808 }
9809 if (ARM_RECORD_STRH == str_type)
9810 {
9811 record_buf_mem[0] = 2;
9812 record_buf_mem[1] = tgt_mem_addr;
9813 arm_insn_r->mem_rec_count = 1;
9814 }
9815 else if (ARM_RECORD_STRD == str_type)
9816 {
9817 record_buf_mem[0] = 4;
9818 record_buf_mem[1] = tgt_mem_addr;
9819 record_buf_mem[2] = 4;
9820 record_buf_mem[3] = tgt_mem_addr + 4;
9821 arm_insn_r->mem_rec_count = 2;
9822 }
9823 /* Record Rn also as it changes. */
9824 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
9825 arm_insn_r->reg_rec_count = 1;
9826 }
9827 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
9828 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
9829 {
9830 /* 4) Store, register pre-indexed. */
9831 /* 6) Store, register post -indexed. */
9832 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
9833 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
9834 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
9835 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
9836 /* Calculate target store address, Rn +/- Rm, register offset. */
9837 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
9838 {
9839 tgt_mem_addr = u_regval[0] + u_regval[1];
9840 }
9841 else
9842 {
9843 tgt_mem_addr = u_regval[1] - u_regval[0];
9844 }
9845 if (ARM_RECORD_STRH == str_type)
9846 {
9847 record_buf_mem[0] = 2;
9848 record_buf_mem[1] = tgt_mem_addr;
9849 arm_insn_r->mem_rec_count = 1;
9850 }
9851 else if (ARM_RECORD_STRD == str_type)
9852 {
9853 record_buf_mem[0] = 4;
9854 record_buf_mem[1] = tgt_mem_addr;
9855 record_buf_mem[2] = 4;
9856 record_buf_mem[3] = tgt_mem_addr + 4;
9857 arm_insn_r->mem_rec_count = 2;
9858 }
9859 /* Record Rn also as it changes. */
9860 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
9861 arm_insn_r->reg_rec_count = 1;
9862 }
9863 return 0;
9864 }
9865
9866 /* Handling ARM extension space insns. */
9867
9868 static int
9869 arm_record_extension_space (insn_decode_record *arm_insn_r)
9870 {
9871 int ret = 0; /* Return value: -1:record failure ; 0:success */
9872 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
9873 uint32_t record_buf[8], record_buf_mem[8];
9874 uint32_t reg_src1 = 0;
9875 struct regcache *reg_cache = arm_insn_r->regcache;
9876 ULONGEST u_regval = 0;
9877
9878 gdb_assert (!INSN_RECORDED(arm_insn_r));
9879 /* Handle unconditional insn extension space. */
9880
9881 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
9882 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
9883 if (arm_insn_r->cond)
9884 {
9885 /* PLD has no affect on architectural state, it just affects
9886 the caches. */
9887 if (5 == ((opcode1 & 0xE0) >> 5))
9888 {
9889 /* BLX(1) */
9890 record_buf[0] = ARM_PS_REGNUM;
9891 record_buf[1] = ARM_LR_REGNUM;
9892 arm_insn_r->reg_rec_count = 2;
9893 }
9894 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
9895 }
9896
9897
9898 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
9899 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
9900 {
9901 ret = -1;
9902 /* Undefined instruction on ARM V5; need to handle if later
9903 versions define it. */
9904 }
9905
9906 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
9907 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
9908 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
9909
9910 /* Handle arithmetic insn extension space. */
9911 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
9912 && !INSN_RECORDED(arm_insn_r))
9913 {
9914 /* Handle MLA(S) and MUL(S). */
9915 if (in_inclusive_range (insn_op1, 0U, 3U))
9916 {
9917 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
9918 record_buf[1] = ARM_PS_REGNUM;
9919 arm_insn_r->reg_rec_count = 2;
9920 }
9921 else if (in_inclusive_range (insn_op1, 4U, 15U))
9922 {
9923 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
9924 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
9925 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
9926 record_buf[2] = ARM_PS_REGNUM;
9927 arm_insn_r->reg_rec_count = 3;
9928 }
9929 }
9930
9931 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
9932 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
9933 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
9934
9935 /* Handle control insn extension space. */
9936
9937 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
9938 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
9939 {
9940 if (!bit (arm_insn_r->arm_insn,25))
9941 {
9942 if (!bits (arm_insn_r->arm_insn, 4, 7))
9943 {
9944 if ((0 == insn_op1) || (2 == insn_op1))
9945 {
9946 /* MRS. */
9947 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
9948 arm_insn_r->reg_rec_count = 1;
9949 }
9950 else if (1 == insn_op1)
9951 {
9952 /* CSPR is going to be changed. */
9953 record_buf[0] = ARM_PS_REGNUM;
9954 arm_insn_r->reg_rec_count = 1;
9955 }
9956 else if (3 == insn_op1)
9957 {
9958 /* SPSR is going to be changed. */
9959 /* We need to get SPSR value, which is yet to be done. */
9960 return -1;
9961 }
9962 }
9963 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
9964 {
9965 if (1 == insn_op1)
9966 {
9967 /* BX. */
9968 record_buf[0] = ARM_PS_REGNUM;
9969 arm_insn_r->reg_rec_count = 1;
9970 }
9971 else if (3 == insn_op1)
9972 {
9973 /* CLZ. */
9974 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
9975 arm_insn_r->reg_rec_count = 1;
9976 }
9977 }
9978 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
9979 {
9980 /* BLX. */
9981 record_buf[0] = ARM_PS_REGNUM;
9982 record_buf[1] = ARM_LR_REGNUM;
9983 arm_insn_r->reg_rec_count = 2;
9984 }
9985 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
9986 {
9987 /* QADD, QSUB, QDADD, QDSUB */
9988 record_buf[0] = ARM_PS_REGNUM;
9989 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
9990 arm_insn_r->reg_rec_count = 2;
9991 }
9992 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
9993 {
9994 /* BKPT. */
9995 record_buf[0] = ARM_PS_REGNUM;
9996 record_buf[1] = ARM_LR_REGNUM;
9997 arm_insn_r->reg_rec_count = 2;
9998
9999 /* Save SPSR also;how? */
10000 return -1;
10001 }
10002 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
10003 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
10004 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
10005 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
10006 )
10007 {
10008 if (0 == insn_op1 || 1 == insn_op1)
10009 {
10010 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
10011 /* We dont do optimization for SMULW<y> where we
10012 need only Rd. */
10013 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10014 record_buf[1] = ARM_PS_REGNUM;
10015 arm_insn_r->reg_rec_count = 2;
10016 }
10017 else if (2 == insn_op1)
10018 {
10019 /* SMLAL<x><y>. */
10020 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10021 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
10022 arm_insn_r->reg_rec_count = 2;
10023 }
10024 else if (3 == insn_op1)
10025 {
10026 /* SMUL<x><y>. */
10027 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10028 arm_insn_r->reg_rec_count = 1;
10029 }
10030 }
10031 }
10032 else
10033 {
10034 /* MSR : immediate form. */
10035 if (1 == insn_op1)
10036 {
10037 /* CSPR is going to be changed. */
10038 record_buf[0] = ARM_PS_REGNUM;
10039 arm_insn_r->reg_rec_count = 1;
10040 }
10041 else if (3 == insn_op1)
10042 {
10043 /* SPSR is going to be changed. */
10044 /* we need to get SPSR value, which is yet to be done */
10045 return -1;
10046 }
10047 }
10048 }
10049
10050 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10051 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
10052 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
10053
10054 /* Handle load/store insn extension space. */
10055
10056 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
10057 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
10058 && !INSN_RECORDED(arm_insn_r))
10059 {
10060 /* SWP/SWPB. */
10061 if (0 == insn_op1)
10062 {
10063 /* These insn, changes register and memory as well. */
10064 /* SWP or SWPB insn. */
10065 /* Get memory address given by Rn. */
10066 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10067 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
10068 /* SWP insn ?, swaps word. */
10069 if (8 == arm_insn_r->opcode)
10070 {
10071 record_buf_mem[0] = 4;
10072 }
10073 else
10074 {
10075 /* SWPB insn, swaps only byte. */
10076 record_buf_mem[0] = 1;
10077 }
10078 record_buf_mem[1] = u_regval;
10079 arm_insn_r->mem_rec_count = 1;
10080 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10081 arm_insn_r->reg_rec_count = 1;
10082 }
10083 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10084 {
10085 /* STRH. */
10086 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10087 ARM_RECORD_STRH);
10088 }
10089 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10090 {
10091 /* LDRD. */
10092 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10093 record_buf[1] = record_buf[0] + 1;
10094 arm_insn_r->reg_rec_count = 2;
10095 }
10096 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10097 {
10098 /* STRD. */
10099 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10100 ARM_RECORD_STRD);
10101 }
10102 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
10103 {
10104 /* LDRH, LDRSB, LDRSH. */
10105 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10106 arm_insn_r->reg_rec_count = 1;
10107 }
10108
10109 }
10110
10111 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
10112 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
10113 && !INSN_RECORDED(arm_insn_r))
10114 {
10115 ret = -1;
10116 /* Handle coprocessor insn extension space. */
10117 }
10118
10119 /* To be done for ARMv5 and later; as of now we return -1. */
10120 if (-1 == ret)
10121 return ret;
10122
10123 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10124 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10125
10126 return ret;
10127 }
10128
10129 /* Handling opcode 000 insns. */
10130
10131 static int
10132 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
10133 {
10134 struct regcache *reg_cache = arm_insn_r->regcache;
10135 uint32_t record_buf[8], record_buf_mem[8];
10136 ULONGEST u_regval[2] = {0};
10137
10138 uint32_t reg_src1 = 0;
10139 uint32_t opcode1 = 0;
10140
10141 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10142 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10143 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10144
10145 if (!((opcode1 & 0x19) == 0x10))
10146 {
10147 /* Data-processing (register) and Data-processing (register-shifted
10148 register */
10149 /* Out of 11 shifter operands mode, all the insn modifies destination
10150 register, which is specified by 13-16 decode. */
10151 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10152 record_buf[1] = ARM_PS_REGNUM;
10153 arm_insn_r->reg_rec_count = 2;
10154 }
10155 else if ((arm_insn_r->decode < 8) && ((opcode1 & 0x19) == 0x10))
10156 {
10157 /* Miscellaneous instructions */
10158
10159 if (3 == arm_insn_r->decode && 0x12 == opcode1
10160 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
10161 {
10162 /* Handle BLX, branch and link/exchange. */
10163 if (9 == arm_insn_r->opcode)
10164 {
10165 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
10166 and R14 stores the return address. */
10167 record_buf[0] = ARM_PS_REGNUM;
10168 record_buf[1] = ARM_LR_REGNUM;
10169 arm_insn_r->reg_rec_count = 2;
10170 }
10171 }
10172 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
10173 {
10174 /* Handle enhanced software breakpoint insn, BKPT. */
10175 /* CPSR is changed to be executed in ARM state, disabling normal
10176 interrupts, entering abort mode. */
10177 /* According to high vector configuration PC is set. */
10178 /* user hit breakpoint and type reverse, in
10179 that case, we need to go back with previous CPSR and
10180 Program Counter. */
10181 record_buf[0] = ARM_PS_REGNUM;
10182 record_buf[1] = ARM_LR_REGNUM;
10183 arm_insn_r->reg_rec_count = 2;
10184
10185 /* Save SPSR also; how? */
10186 return -1;
10187 }
10188 else if (1 == arm_insn_r->decode && 0x12 == opcode1
10189 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
10190 {
10191 /* Handle BX, branch and link/exchange. */
10192 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
10193 record_buf[0] = ARM_PS_REGNUM;
10194 arm_insn_r->reg_rec_count = 1;
10195 }
10196 else if (1 == arm_insn_r->decode && 0x16 == opcode1
10197 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
10198 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
10199 {
10200 /* Count leading zeros: CLZ. */
10201 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10202 arm_insn_r->reg_rec_count = 1;
10203 }
10204 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
10205 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10206 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
10207 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0))
10208 {
10209 /* Handle MRS insn. */
10210 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10211 arm_insn_r->reg_rec_count = 1;
10212 }
10213 }
10214 else if (9 == arm_insn_r->decode && opcode1 < 0x10)
10215 {
10216 /* Multiply and multiply-accumulate */
10217
10218 /* Handle multiply instructions. */
10219 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
10220 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
10221 {
10222 /* Handle MLA and MUL. */
10223 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10224 record_buf[1] = ARM_PS_REGNUM;
10225 arm_insn_r->reg_rec_count = 2;
10226 }
10227 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
10228 {
10229 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
10230 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10231 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10232 record_buf[2] = ARM_PS_REGNUM;
10233 arm_insn_r->reg_rec_count = 3;
10234 }
10235 }
10236 else if (9 == arm_insn_r->decode && opcode1 > 0x10)
10237 {
10238 /* Synchronization primitives */
10239
10240 /* Handling SWP, SWPB. */
10241 /* These insn, changes register and memory as well. */
10242 /* SWP or SWPB insn. */
10243
10244 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10245 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10246 /* SWP insn ?, swaps word. */
10247 if (8 == arm_insn_r->opcode)
10248 {
10249 record_buf_mem[0] = 4;
10250 }
10251 else
10252 {
10253 /* SWPB insn, swaps only byte. */
10254 record_buf_mem[0] = 1;
10255 }
10256 record_buf_mem[1] = u_regval[0];
10257 arm_insn_r->mem_rec_count = 1;
10258 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10259 arm_insn_r->reg_rec_count = 1;
10260 }
10261 else if (11 == arm_insn_r->decode || 13 == arm_insn_r->decode
10262 || 15 == arm_insn_r->decode)
10263 {
10264 if ((opcode1 & 0x12) == 2)
10265 {
10266 /* Extra load/store (unprivileged) */
10267 return -1;
10268 }
10269 else
10270 {
10271 /* Extra load/store */
10272 switch (bits (arm_insn_r->arm_insn, 5, 6))
10273 {
10274 case 1:
10275 if ((opcode1 & 0x05) == 0x0 || (opcode1 & 0x05) == 0x4)
10276 {
10277 /* STRH (register), STRH (immediate) */
10278 arm_record_strx (arm_insn_r, &record_buf[0],
10279 &record_buf_mem[0], ARM_RECORD_STRH);
10280 }
10281 else if ((opcode1 & 0x05) == 0x1)
10282 {
10283 /* LDRH (register) */
10284 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10285 arm_insn_r->reg_rec_count = 1;
10286
10287 if (bit (arm_insn_r->arm_insn, 21))
10288 {
10289 /* Write back to Rn. */
10290 record_buf[arm_insn_r->reg_rec_count++]
10291 = bits (arm_insn_r->arm_insn, 16, 19);
10292 }
10293 }
10294 else if ((opcode1 & 0x05) == 0x5)
10295 {
10296 /* LDRH (immediate), LDRH (literal) */
10297 int rn = bits (arm_insn_r->arm_insn, 16, 19);
10298
10299 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10300 arm_insn_r->reg_rec_count = 1;
10301
10302 if (rn != 15)
10303 {
10304 /*LDRH (immediate) */
10305 if (bit (arm_insn_r->arm_insn, 21))
10306 {
10307 /* Write back to Rn. */
10308 record_buf[arm_insn_r->reg_rec_count++] = rn;
10309 }
10310 }
10311 }
10312 else
10313 return -1;
10314 break;
10315 case 2:
10316 if ((opcode1 & 0x05) == 0x0)
10317 {
10318 /* LDRD (register) */
10319 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10320 record_buf[1] = record_buf[0] + 1;
10321 arm_insn_r->reg_rec_count = 2;
10322
10323 if (bit (arm_insn_r->arm_insn, 21))
10324 {
10325 /* Write back to Rn. */
10326 record_buf[arm_insn_r->reg_rec_count++]
10327 = bits (arm_insn_r->arm_insn, 16, 19);
10328 }
10329 }
10330 else if ((opcode1 & 0x05) == 0x1)
10331 {
10332 /* LDRSB (register) */
10333 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10334 arm_insn_r->reg_rec_count = 1;
10335
10336 if (bit (arm_insn_r->arm_insn, 21))
10337 {
10338 /* Write back to Rn. */
10339 record_buf[arm_insn_r->reg_rec_count++]
10340 = bits (arm_insn_r->arm_insn, 16, 19);
10341 }
10342 }
10343 else if ((opcode1 & 0x05) == 0x4 || (opcode1 & 0x05) == 0x5)
10344 {
10345 /* LDRD (immediate), LDRD (literal), LDRSB (immediate),
10346 LDRSB (literal) */
10347 int rn = bits (arm_insn_r->arm_insn, 16, 19);
10348
10349 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10350 arm_insn_r->reg_rec_count = 1;
10351
10352 if (rn != 15)
10353 {
10354 /*LDRD (immediate), LDRSB (immediate) */
10355 if (bit (arm_insn_r->arm_insn, 21))
10356 {
10357 /* Write back to Rn. */
10358 record_buf[arm_insn_r->reg_rec_count++] = rn;
10359 }
10360 }
10361 }
10362 else
10363 return -1;
10364 break;
10365 case 3:
10366 if ((opcode1 & 0x05) == 0x0)
10367 {
10368 /* STRD (register) */
10369 arm_record_strx (arm_insn_r, &record_buf[0],
10370 &record_buf_mem[0], ARM_RECORD_STRD);
10371 }
10372 else if ((opcode1 & 0x05) == 0x1)
10373 {
10374 /* LDRSH (register) */
10375 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10376 arm_insn_r->reg_rec_count = 1;
10377
10378 if (bit (arm_insn_r->arm_insn, 21))
10379 {
10380 /* Write back to Rn. */
10381 record_buf[arm_insn_r->reg_rec_count++]
10382 = bits (arm_insn_r->arm_insn, 16, 19);
10383 }
10384 }
10385 else if ((opcode1 & 0x05) == 0x4)
10386 {
10387 /* STRD (immediate) */
10388 arm_record_strx (arm_insn_r, &record_buf[0],
10389 &record_buf_mem[0], ARM_RECORD_STRD);
10390 }
10391 else if ((opcode1 & 0x05) == 0x5)
10392 {
10393 /* LDRSH (immediate), LDRSH (literal) */
10394 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10395 arm_insn_r->reg_rec_count = 1;
10396
10397 if (bit (arm_insn_r->arm_insn, 21))
10398 {
10399 /* Write back to Rn. */
10400 record_buf[arm_insn_r->reg_rec_count++]
10401 = bits (arm_insn_r->arm_insn, 16, 19);
10402 }
10403 }
10404 else
10405 return -1;
10406 break;
10407 default:
10408 return -1;
10409 }
10410 }
10411 }
10412 else
10413 {
10414 return -1;
10415 }
10416
10417 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10418 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10419 return 0;
10420 }
10421
10422 /* Handling opcode 001 insns. */
10423
10424 static int
10425 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
10426 {
10427 uint32_t record_buf[8], record_buf_mem[8];
10428
10429 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10430 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10431
10432 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
10433 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
10434 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
10435 )
10436 {
10437 /* Handle MSR insn. */
10438 if (9 == arm_insn_r->opcode)
10439 {
10440 /* CSPR is going to be changed. */
10441 record_buf[0] = ARM_PS_REGNUM;
10442 arm_insn_r->reg_rec_count = 1;
10443 }
10444 else
10445 {
10446 /* SPSR is going to be changed. */
10447 }
10448 }
10449 else if (arm_insn_r->opcode <= 15)
10450 {
10451 /* Normal data processing insns. */
10452 /* Out of 11 shifter operands mode, all the insn modifies destination
10453 register, which is specified by 13-16 decode. */
10454 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10455 record_buf[1] = ARM_PS_REGNUM;
10456 arm_insn_r->reg_rec_count = 2;
10457 }
10458 else
10459 {
10460 return -1;
10461 }
10462
10463 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10464 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10465 return 0;
10466 }
10467
10468 static int
10469 arm_record_media (insn_decode_record *arm_insn_r)
10470 {
10471 uint32_t record_buf[8];
10472
10473 switch (bits (arm_insn_r->arm_insn, 22, 24))
10474 {
10475 case 0:
10476 /* Parallel addition and subtraction, signed */
10477 case 1:
10478 /* Parallel addition and subtraction, unsigned */
10479 case 2:
10480 case 3:
10481 /* Packing, unpacking, saturation and reversal */
10482 {
10483 int rd = bits (arm_insn_r->arm_insn, 12, 15);
10484
10485 record_buf[arm_insn_r->reg_rec_count++] = rd;
10486 }
10487 break;
10488
10489 case 4:
10490 case 5:
10491 /* Signed multiplies */
10492 {
10493 int rd = bits (arm_insn_r->arm_insn, 16, 19);
10494 unsigned int op1 = bits (arm_insn_r->arm_insn, 20, 22);
10495
10496 record_buf[arm_insn_r->reg_rec_count++] = rd;
10497 if (op1 == 0x0)
10498 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10499 else if (op1 == 0x4)
10500 record_buf[arm_insn_r->reg_rec_count++]
10501 = bits (arm_insn_r->arm_insn, 12, 15);
10502 }
10503 break;
10504
10505 case 6:
10506 {
10507 if (bit (arm_insn_r->arm_insn, 21)
10508 && bits (arm_insn_r->arm_insn, 5, 6) == 0x2)
10509 {
10510 /* SBFX */
10511 record_buf[arm_insn_r->reg_rec_count++]
10512 = bits (arm_insn_r->arm_insn, 12, 15);
10513 }
10514 else if (bits (arm_insn_r->arm_insn, 20, 21) == 0x0
10515 && bits (arm_insn_r->arm_insn, 5, 7) == 0x0)
10516 {
10517 /* USAD8 and USADA8 */
10518 record_buf[arm_insn_r->reg_rec_count++]
10519 = bits (arm_insn_r->arm_insn, 16, 19);
10520 }
10521 }
10522 break;
10523
10524 case 7:
10525 {
10526 if (bits (arm_insn_r->arm_insn, 20, 21) == 0x3
10527 && bits (arm_insn_r->arm_insn, 5, 7) == 0x7)
10528 {
10529 /* Permanently UNDEFINED */
10530 return -1;
10531 }
10532 else
10533 {
10534 /* BFC, BFI and UBFX */
10535 record_buf[arm_insn_r->reg_rec_count++]
10536 = bits (arm_insn_r->arm_insn, 12, 15);
10537 }
10538 }
10539 break;
10540
10541 default:
10542 return -1;
10543 }
10544
10545 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10546
10547 return 0;
10548 }
10549
10550 /* Handle ARM mode instructions with opcode 010. */
10551
10552 static int
10553 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
10554 {
10555 struct regcache *reg_cache = arm_insn_r->regcache;
10556
10557 uint32_t reg_base , reg_dest;
10558 uint32_t offset_12, tgt_mem_addr;
10559 uint32_t record_buf[8], record_buf_mem[8];
10560 unsigned char wback;
10561 ULONGEST u_regval;
10562
10563 /* Calculate wback. */
10564 wback = (bit (arm_insn_r->arm_insn, 24) == 0)
10565 || (bit (arm_insn_r->arm_insn, 21) == 1);
10566
10567 arm_insn_r->reg_rec_count = 0;
10568 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
10569
10570 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10571 {
10572 /* LDR (immediate), LDR (literal), LDRB (immediate), LDRB (literal), LDRBT
10573 and LDRT. */
10574
10575 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
10576 record_buf[arm_insn_r->reg_rec_count++] = reg_dest;
10577
10578 /* The LDR instruction is capable of doing branching. If MOV LR, PC
10579 preceeds a LDR instruction having R15 as reg_base, it
10580 emulates a branch and link instruction, and hence we need to save
10581 CPSR and PC as well. */
10582 if (ARM_PC_REGNUM == reg_dest)
10583 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10584
10585 /* If wback is true, also save the base register, which is going to be
10586 written to. */
10587 if (wback)
10588 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10589 }
10590 else
10591 {
10592 /* STR (immediate), STRB (immediate), STRBT and STRT. */
10593
10594 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
10595 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
10596
10597 /* Handle bit U. */
10598 if (bit (arm_insn_r->arm_insn, 23))
10599 {
10600 /* U == 1: Add the offset. */
10601 tgt_mem_addr = (uint32_t) u_regval + offset_12;
10602 }
10603 else
10604 {
10605 /* U == 0: subtract the offset. */
10606 tgt_mem_addr = (uint32_t) u_regval - offset_12;
10607 }
10608
10609 /* Bit 22 tells us whether the store instruction writes 1 byte or 4
10610 bytes. */
10611 if (bit (arm_insn_r->arm_insn, 22))
10612 {
10613 /* STRB and STRBT: 1 byte. */
10614 record_buf_mem[0] = 1;
10615 }
10616 else
10617 {
10618 /* STR and STRT: 4 bytes. */
10619 record_buf_mem[0] = 4;
10620 }
10621
10622 /* Handle bit P. */
10623 if (bit (arm_insn_r->arm_insn, 24))
10624 record_buf_mem[1] = tgt_mem_addr;
10625 else
10626 record_buf_mem[1] = (uint32_t) u_regval;
10627
10628 arm_insn_r->mem_rec_count = 1;
10629
10630 /* If wback is true, also save the base register, which is going to be
10631 written to. */
10632 if (wback)
10633 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10634 }
10635
10636 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10637 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10638 return 0;
10639 }
10640
10641 /* Handling opcode 011 insns. */
10642
10643 static int
10644 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
10645 {
10646 struct regcache *reg_cache = arm_insn_r->regcache;
10647
10648 uint32_t shift_imm = 0;
10649 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
10650 uint32_t offset_12 = 0, tgt_mem_addr = 0;
10651 uint32_t record_buf[8], record_buf_mem[8];
10652
10653 LONGEST s_word;
10654 ULONGEST u_regval[2];
10655
10656 if (bit (arm_insn_r->arm_insn, 4))
10657 return arm_record_media (arm_insn_r);
10658
10659 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10660 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10661
10662 /* Handle enhanced store insns and LDRD DSP insn,
10663 order begins according to addressing modes for store insns
10664 STRH insn. */
10665
10666 /* LDR or STR? */
10667 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10668 {
10669 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
10670 /* LDR insn has a capability to do branching, if
10671 MOV LR, PC is precedded by LDR insn having Rn as R15
10672 in that case, it emulates branch and link insn, and hence we
10673 need to save CSPR and PC as well. */
10674 if (15 != reg_dest)
10675 {
10676 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10677 arm_insn_r->reg_rec_count = 1;
10678 }
10679 else
10680 {
10681 record_buf[0] = reg_dest;
10682 record_buf[1] = ARM_PS_REGNUM;
10683 arm_insn_r->reg_rec_count = 2;
10684 }
10685 }
10686 else
10687 {
10688 if (! bits (arm_insn_r->arm_insn, 4, 11))
10689 {
10690 /* Store insn, register offset and register pre-indexed,
10691 register post-indexed. */
10692 /* Get Rm. */
10693 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10694 /* Get Rn. */
10695 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10696 regcache_raw_read_unsigned (reg_cache, reg_src1
10697 , &u_regval[0]);
10698 regcache_raw_read_unsigned (reg_cache, reg_src2
10699 , &u_regval[1]);
10700 if (15 == reg_src2)
10701 {
10702 /* If R15 was used as Rn, hence current PC+8. */
10703 /* Pre-indexed mode doesnt reach here ; illegal insn. */
10704 u_regval[0] = u_regval[0] + 8;
10705 }
10706 /* Calculate target store address, Rn +/- Rm, register offset. */
10707 /* U == 1. */
10708 if (bit (arm_insn_r->arm_insn, 23))
10709 {
10710 tgt_mem_addr = u_regval[0] + u_regval[1];
10711 }
10712 else
10713 {
10714 tgt_mem_addr = u_regval[1] - u_regval[0];
10715 }
10716
10717 switch (arm_insn_r->opcode)
10718 {
10719 /* STR. */
10720 case 8:
10721 case 12:
10722 /* STR. */
10723 case 9:
10724 case 13:
10725 /* STRT. */
10726 case 1:
10727 case 5:
10728 /* STR. */
10729 case 0:
10730 case 4:
10731 record_buf_mem[0] = 4;
10732 break;
10733
10734 /* STRB. */
10735 case 10:
10736 case 14:
10737 /* STRB. */
10738 case 11:
10739 case 15:
10740 /* STRBT. */
10741 case 3:
10742 case 7:
10743 /* STRB. */
10744 case 2:
10745 case 6:
10746 record_buf_mem[0] = 1;
10747 break;
10748
10749 default:
10750 gdb_assert_not_reached ("no decoding pattern found");
10751 break;
10752 }
10753 record_buf_mem[1] = tgt_mem_addr;
10754 arm_insn_r->mem_rec_count = 1;
10755
10756 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
10757 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10758 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
10759 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
10760 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
10761 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
10762 )
10763 {
10764 /* Rn is going to be changed in pre-indexed mode and
10765 post-indexed mode as well. */
10766 record_buf[0] = reg_src2;
10767 arm_insn_r->reg_rec_count = 1;
10768 }
10769 }
10770 else
10771 {
10772 /* Store insn, scaled register offset; scaled pre-indexed. */
10773 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
10774 /* Get Rm. */
10775 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10776 /* Get Rn. */
10777 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10778 /* Get shift_imm. */
10779 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
10780 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10781 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
10782 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10783 /* Offset_12 used as shift. */
10784 switch (offset_12)
10785 {
10786 case 0:
10787 /* Offset_12 used as index. */
10788 offset_12 = u_regval[0] << shift_imm;
10789 break;
10790
10791 case 1:
10792 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
10793 break;
10794
10795 case 2:
10796 if (!shift_imm)
10797 {
10798 if (bit (u_regval[0], 31))
10799 {
10800 offset_12 = 0xFFFFFFFF;
10801 }
10802 else
10803 {
10804 offset_12 = 0;
10805 }
10806 }
10807 else
10808 {
10809 /* This is arithmetic shift. */
10810 offset_12 = s_word >> shift_imm;
10811 }
10812 break;
10813
10814 case 3:
10815 if (!shift_imm)
10816 {
10817 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
10818 &u_regval[1]);
10819 /* Get C flag value and shift it by 31. */
10820 offset_12 = (((bit (u_regval[1], 29)) << 31) \
10821 | (u_regval[0]) >> 1);
10822 }
10823 else
10824 {
10825 offset_12 = (u_regval[0] >> shift_imm) \
10826 | (u_regval[0] <<
10827 (sizeof(uint32_t) - shift_imm));
10828 }
10829 break;
10830
10831 default:
10832 gdb_assert_not_reached ("no decoding pattern found");
10833 break;
10834 }
10835
10836 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10837 /* bit U set. */
10838 if (bit (arm_insn_r->arm_insn, 23))
10839 {
10840 tgt_mem_addr = u_regval[1] + offset_12;
10841 }
10842 else
10843 {
10844 tgt_mem_addr = u_regval[1] - offset_12;
10845 }
10846
10847 switch (arm_insn_r->opcode)
10848 {
10849 /* STR. */
10850 case 8:
10851 case 12:
10852 /* STR. */
10853 case 9:
10854 case 13:
10855 /* STRT. */
10856 case 1:
10857 case 5:
10858 /* STR. */
10859 case 0:
10860 case 4:
10861 record_buf_mem[0] = 4;
10862 break;
10863
10864 /* STRB. */
10865 case 10:
10866 case 14:
10867 /* STRB. */
10868 case 11:
10869 case 15:
10870 /* STRBT. */
10871 case 3:
10872 case 7:
10873 /* STRB. */
10874 case 2:
10875 case 6:
10876 record_buf_mem[0] = 1;
10877 break;
10878
10879 default:
10880 gdb_assert_not_reached ("no decoding pattern found");
10881 break;
10882 }
10883 record_buf_mem[1] = tgt_mem_addr;
10884 arm_insn_r->mem_rec_count = 1;
10885
10886 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
10887 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10888 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
10889 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
10890 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
10891 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
10892 )
10893 {
10894 /* Rn is going to be changed in register scaled pre-indexed
10895 mode,and scaled post indexed mode. */
10896 record_buf[0] = reg_src2;
10897 arm_insn_r->reg_rec_count = 1;
10898 }
10899 }
10900 }
10901
10902 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
10903 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
10904 return 0;
10905 }
10906
10907 /* Handle ARM mode instructions with opcode 100. */
10908
10909 static int
10910 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
10911 {
10912 struct regcache *reg_cache = arm_insn_r->regcache;
10913 uint32_t register_count = 0, register_bits;
10914 uint32_t reg_base, addr_mode;
10915 uint32_t record_buf[24], record_buf_mem[48];
10916 uint32_t wback;
10917 ULONGEST u_regval;
10918
10919 /* Fetch the list of registers. */
10920 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
10921 arm_insn_r->reg_rec_count = 0;
10922
10923 /* Fetch the base register that contains the address we are loading data
10924 to. */
10925 reg_base = bits (arm_insn_r->arm_insn, 16, 19);
10926
10927 /* Calculate wback. */
10928 wback = (bit (arm_insn_r->arm_insn, 21) == 1);
10929
10930 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
10931 {
10932 /* LDM/LDMIA/LDMFD, LDMDA/LDMFA, LDMDB and LDMIB. */
10933
10934 /* Find out which registers are going to be loaded from memory. */
10935 while (register_bits)
10936 {
10937 if (register_bits & 0x00000001)
10938 record_buf[arm_insn_r->reg_rec_count++] = register_count;
10939 register_bits = register_bits >> 1;
10940 register_count++;
10941 }
10942
10943
10944 /* If wback is true, also save the base register, which is going to be
10945 written to. */
10946 if (wback)
10947 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
10948
10949 /* Save the CPSR register. */
10950 record_buf[arm_insn_r->reg_rec_count++] = ARM_PS_REGNUM;
10951 }
10952 else
10953 {
10954 /* STM (STMIA, STMEA), STMDA (STMED), STMDB (STMFD) and STMIB (STMFA). */
10955
10956 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
10957
10958 regcache_raw_read_unsigned (reg_cache, reg_base, &u_regval);
10959
10960 /* Find out how many registers are going to be stored to memory. */
10961 while (register_bits)
10962 {
10963 if (register_bits & 0x00000001)
10964 register_count++;
10965 register_bits = register_bits >> 1;
10966 }
10967
10968 switch (addr_mode)
10969 {
10970 /* STMDA (STMED): Decrement after. */
10971 case 0:
10972 record_buf_mem[1] = (uint32_t) u_regval
10973 - register_count * INT_REGISTER_SIZE + 4;
10974 break;
10975 /* STM (STMIA, STMEA): Increment after. */
10976 case 1:
10977 record_buf_mem[1] = (uint32_t) u_regval;
10978 break;
10979 /* STMDB (STMFD): Decrement before. */
10980 case 2:
10981 record_buf_mem[1] = (uint32_t) u_regval
10982 - register_count * INT_REGISTER_SIZE;
10983 break;
10984 /* STMIB (STMFA): Increment before. */
10985 case 3:
10986 record_buf_mem[1] = (uint32_t) u_regval + INT_REGISTER_SIZE;
10987 break;
10988 default:
10989 gdb_assert_not_reached ("no decoding pattern found");
10990 break;
10991 }
10992
10993 record_buf_mem[0] = register_count * INT_REGISTER_SIZE;
10994 arm_insn_r->mem_rec_count = 1;
10995
10996 /* If wback is true, also save the base register, which is going to be
10997 written to. */
10998 if (wback)
10999 record_buf[arm_insn_r->reg_rec_count++] = reg_base;
11000 }
11001
11002 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11003 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11004 return 0;
11005 }
11006
11007 /* Handling opcode 101 insns. */
11008
11009 static int
11010 arm_record_b_bl (insn_decode_record *arm_insn_r)
11011 {
11012 uint32_t record_buf[8];
11013
11014 /* Handle B, BL, BLX(1) insns. */
11015 /* B simply branches so we do nothing here. */
11016 /* Note: BLX(1) doesnt fall here but instead it falls into
11017 extension space. */
11018 if (bit (arm_insn_r->arm_insn, 24))
11019 {
11020 record_buf[0] = ARM_LR_REGNUM;
11021 arm_insn_r->reg_rec_count = 1;
11022 }
11023
11024 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11025
11026 return 0;
11027 }
11028
11029 static int
11030 arm_record_unsupported_insn (insn_decode_record *arm_insn_r)
11031 {
11032 printf_unfiltered (_("Process record does not support instruction "
11033 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11034 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11035
11036 return -1;
11037 }
11038
11039 /* Record handler for vector data transfer instructions. */
11040
11041 static int
11042 arm_record_vdata_transfer_insn (insn_decode_record *arm_insn_r)
11043 {
11044 uint32_t bits_a, bit_c, bit_l, reg_t, reg_v;
11045 uint32_t record_buf[4];
11046
11047 reg_t = bits (arm_insn_r->arm_insn, 12, 15);
11048 reg_v = bits (arm_insn_r->arm_insn, 21, 23);
11049 bits_a = bits (arm_insn_r->arm_insn, 21, 23);
11050 bit_l = bit (arm_insn_r->arm_insn, 20);
11051 bit_c = bit (arm_insn_r->arm_insn, 8);
11052
11053 /* Handle VMOV instruction. */
11054 if (bit_l && bit_c)
11055 {
11056 record_buf[0] = reg_t;
11057 arm_insn_r->reg_rec_count = 1;
11058 }
11059 else if (bit_l && !bit_c)
11060 {
11061 /* Handle VMOV instruction. */
11062 if (bits_a == 0x00)
11063 {
11064 record_buf[0] = reg_t;
11065 arm_insn_r->reg_rec_count = 1;
11066 }
11067 /* Handle VMRS instruction. */
11068 else if (bits_a == 0x07)
11069 {
11070 if (reg_t == 15)
11071 reg_t = ARM_PS_REGNUM;
11072
11073 record_buf[0] = reg_t;
11074 arm_insn_r->reg_rec_count = 1;
11075 }
11076 }
11077 else if (!bit_l && !bit_c)
11078 {
11079 /* Handle VMOV instruction. */
11080 if (bits_a == 0x00)
11081 {
11082 record_buf[0] = ARM_D0_REGNUM + reg_v;
11083
11084 arm_insn_r->reg_rec_count = 1;
11085 }
11086 /* Handle VMSR instruction. */
11087 else if (bits_a == 0x07)
11088 {
11089 record_buf[0] = ARM_FPSCR_REGNUM;
11090 arm_insn_r->reg_rec_count = 1;
11091 }
11092 }
11093 else if (!bit_l && bit_c)
11094 {
11095 /* Handle VMOV instruction. */
11096 if (!(bits_a & 0x04))
11097 {
11098 record_buf[0] = (reg_v | (bit (arm_insn_r->arm_insn, 7) << 4))
11099 + ARM_D0_REGNUM;
11100 arm_insn_r->reg_rec_count = 1;
11101 }
11102 /* Handle VDUP instruction. */
11103 else
11104 {
11105 if (bit (arm_insn_r->arm_insn, 21))
11106 {
11107 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
11108 record_buf[0] = reg_v + ARM_D0_REGNUM;
11109 record_buf[1] = reg_v + ARM_D0_REGNUM + 1;
11110 arm_insn_r->reg_rec_count = 2;
11111 }
11112 else
11113 {
11114 reg_v = reg_v | (bit (arm_insn_r->arm_insn, 7) << 4);
11115 record_buf[0] = reg_v + ARM_D0_REGNUM;
11116 arm_insn_r->reg_rec_count = 1;
11117 }
11118 }
11119 }
11120
11121 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11122 return 0;
11123 }
11124
11125 /* Record handler for extension register load/store instructions. */
11126
11127 static int
11128 arm_record_exreg_ld_st_insn (insn_decode_record *arm_insn_r)
11129 {
11130 uint32_t opcode, single_reg;
11131 uint8_t op_vldm_vstm;
11132 uint32_t record_buf[8], record_buf_mem[128];
11133 ULONGEST u_regval = 0;
11134
11135 struct regcache *reg_cache = arm_insn_r->regcache;
11136
11137 opcode = bits (arm_insn_r->arm_insn, 20, 24);
11138 single_reg = !bit (arm_insn_r->arm_insn, 8);
11139 op_vldm_vstm = opcode & 0x1b;
11140
11141 /* Handle VMOV instructions. */
11142 if ((opcode & 0x1e) == 0x04)
11143 {
11144 if (bit (arm_insn_r->arm_insn, 20)) /* to_arm_registers bit 20? */
11145 {
11146 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11147 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11148 arm_insn_r->reg_rec_count = 2;
11149 }
11150 else
11151 {
11152 uint8_t reg_m = bits (arm_insn_r->arm_insn, 0, 3);
11153 uint8_t bit_m = bit (arm_insn_r->arm_insn, 5);
11154
11155 if (single_reg)
11156 {
11157 /* The first S register number m is REG_M:M (M is bit 5),
11158 the corresponding D register number is REG_M:M / 2, which
11159 is REG_M. */
11160 record_buf[arm_insn_r->reg_rec_count++] = ARM_D0_REGNUM + reg_m;
11161 /* The second S register number is REG_M:M + 1, the
11162 corresponding D register number is (REG_M:M + 1) / 2.
11163 IOW, if bit M is 1, the first and second S registers
11164 are mapped to different D registers, otherwise, they are
11165 in the same D register. */
11166 if (bit_m)
11167 {
11168 record_buf[arm_insn_r->reg_rec_count++]
11169 = ARM_D0_REGNUM + reg_m + 1;
11170 }
11171 }
11172 else
11173 {
11174 record_buf[0] = ((bit_m << 4) + reg_m + ARM_D0_REGNUM);
11175 arm_insn_r->reg_rec_count = 1;
11176 }
11177 }
11178 }
11179 /* Handle VSTM and VPUSH instructions. */
11180 else if (op_vldm_vstm == 0x08 || op_vldm_vstm == 0x0a
11181 || op_vldm_vstm == 0x12)
11182 {
11183 uint32_t start_address, reg_rn, imm_off32, imm_off8, memory_count;
11184 uint32_t memory_index = 0;
11185
11186 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
11187 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
11188 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
11189 imm_off32 = imm_off8 << 2;
11190 memory_count = imm_off8;
11191
11192 if (bit (arm_insn_r->arm_insn, 23))
11193 start_address = u_regval;
11194 else
11195 start_address = u_regval - imm_off32;
11196
11197 if (bit (arm_insn_r->arm_insn, 21))
11198 {
11199 record_buf[0] = reg_rn;
11200 arm_insn_r->reg_rec_count = 1;
11201 }
11202
11203 while (memory_count > 0)
11204 {
11205 if (single_reg)
11206 {
11207 record_buf_mem[memory_index] = 4;
11208 record_buf_mem[memory_index + 1] = start_address;
11209 start_address = start_address + 4;
11210 memory_index = memory_index + 2;
11211 }
11212 else
11213 {
11214 record_buf_mem[memory_index] = 4;
11215 record_buf_mem[memory_index + 1] = start_address;
11216 record_buf_mem[memory_index + 2] = 4;
11217 record_buf_mem[memory_index + 3] = start_address + 4;
11218 start_address = start_address + 8;
11219 memory_index = memory_index + 4;
11220 }
11221 memory_count--;
11222 }
11223 arm_insn_r->mem_rec_count = (memory_index >> 1);
11224 }
11225 /* Handle VLDM instructions. */
11226 else if (op_vldm_vstm == 0x09 || op_vldm_vstm == 0x0b
11227 || op_vldm_vstm == 0x13)
11228 {
11229 uint32_t reg_count, reg_vd;
11230 uint32_t reg_index = 0;
11231 uint32_t bit_d = bit (arm_insn_r->arm_insn, 22);
11232
11233 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11234 reg_count = bits (arm_insn_r->arm_insn, 0, 7);
11235
11236 /* REG_VD is the first D register number. If the instruction
11237 loads memory to S registers (SINGLE_REG is TRUE), the register
11238 number is (REG_VD << 1 | bit D), so the corresponding D
11239 register number is (REG_VD << 1 | bit D) / 2 = REG_VD. */
11240 if (!single_reg)
11241 reg_vd = reg_vd | (bit_d << 4);
11242
11243 if (bit (arm_insn_r->arm_insn, 21) /* write back */)
11244 record_buf[reg_index++] = bits (arm_insn_r->arm_insn, 16, 19);
11245
11246 /* If the instruction loads memory to D register, REG_COUNT should
11247 be divided by 2, according to the ARM Architecture Reference
11248 Manual. If the instruction loads memory to S register, divide by
11249 2 as well because two S registers are mapped to D register. */
11250 reg_count = reg_count / 2;
11251 if (single_reg && bit_d)
11252 {
11253 /* Increase the register count if S register list starts from
11254 an odd number (bit d is one). */
11255 reg_count++;
11256 }
11257
11258 while (reg_count > 0)
11259 {
11260 record_buf[reg_index++] = ARM_D0_REGNUM + reg_vd + reg_count - 1;
11261 reg_count--;
11262 }
11263 arm_insn_r->reg_rec_count = reg_index;
11264 }
11265 /* VSTR Vector store register. */
11266 else if ((opcode & 0x13) == 0x10)
11267 {
11268 uint32_t start_address, reg_rn, imm_off32, imm_off8;
11269 uint32_t memory_index = 0;
11270
11271 reg_rn = bits (arm_insn_r->arm_insn, 16, 19);
11272 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
11273 imm_off8 = bits (arm_insn_r->arm_insn, 0, 7);
11274 imm_off32 = imm_off8 << 2;
11275
11276 if (bit (arm_insn_r->arm_insn, 23))
11277 start_address = u_regval + imm_off32;
11278 else
11279 start_address = u_regval - imm_off32;
11280
11281 if (single_reg)
11282 {
11283 record_buf_mem[memory_index] = 4;
11284 record_buf_mem[memory_index + 1] = start_address;
11285 arm_insn_r->mem_rec_count = 1;
11286 }
11287 else
11288 {
11289 record_buf_mem[memory_index] = 4;
11290 record_buf_mem[memory_index + 1] = start_address;
11291 record_buf_mem[memory_index + 2] = 4;
11292 record_buf_mem[memory_index + 3] = start_address + 4;
11293 arm_insn_r->mem_rec_count = 2;
11294 }
11295 }
11296 /* VLDR Vector load register. */
11297 else if ((opcode & 0x13) == 0x11)
11298 {
11299 uint32_t reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11300
11301 if (!single_reg)
11302 {
11303 reg_vd = reg_vd | (bit (arm_insn_r->arm_insn, 22) << 4);
11304 record_buf[0] = ARM_D0_REGNUM + reg_vd;
11305 }
11306 else
11307 {
11308 reg_vd = (reg_vd << 1) | bit (arm_insn_r->arm_insn, 22);
11309 /* Record register D rather than pseudo register S. */
11310 record_buf[0] = ARM_D0_REGNUM + reg_vd / 2;
11311 }
11312 arm_insn_r->reg_rec_count = 1;
11313 }
11314
11315 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11316 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11317 return 0;
11318 }
11319
11320 /* Record handler for arm/thumb mode VFP data processing instructions. */
11321
11322 static int
11323 arm_record_vfp_data_proc_insn (insn_decode_record *arm_insn_r)
11324 {
11325 uint32_t opc1, opc2, opc3, dp_op_sz, bit_d, reg_vd;
11326 uint32_t record_buf[4];
11327 enum insn_types {INSN_T0, INSN_T1, INSN_T2, INSN_T3, INSN_INV};
11328 enum insn_types curr_insn_type = INSN_INV;
11329
11330 reg_vd = bits (arm_insn_r->arm_insn, 12, 15);
11331 opc1 = bits (arm_insn_r->arm_insn, 20, 23);
11332 opc2 = bits (arm_insn_r->arm_insn, 16, 19);
11333 opc3 = bits (arm_insn_r->arm_insn, 6, 7);
11334 dp_op_sz = bit (arm_insn_r->arm_insn, 8);
11335 bit_d = bit (arm_insn_r->arm_insn, 22);
11336 /* Mask off the "D" bit. */
11337 opc1 = opc1 & ~0x04;
11338
11339 /* Handle VMLA, VMLS. */
11340 if (opc1 == 0x00)
11341 {
11342 if (bit (arm_insn_r->arm_insn, 10))
11343 {
11344 if (bit (arm_insn_r->arm_insn, 6))
11345 curr_insn_type = INSN_T0;
11346 else
11347 curr_insn_type = INSN_T1;
11348 }
11349 else
11350 {
11351 if (dp_op_sz)
11352 curr_insn_type = INSN_T1;
11353 else
11354 curr_insn_type = INSN_T2;
11355 }
11356 }
11357 /* Handle VNMLA, VNMLS, VNMUL. */
11358 else if (opc1 == 0x01)
11359 {
11360 if (dp_op_sz)
11361 curr_insn_type = INSN_T1;
11362 else
11363 curr_insn_type = INSN_T2;
11364 }
11365 /* Handle VMUL. */
11366 else if (opc1 == 0x02 && !(opc3 & 0x01))
11367 {
11368 if (bit (arm_insn_r->arm_insn, 10))
11369 {
11370 if (bit (arm_insn_r->arm_insn, 6))
11371 curr_insn_type = INSN_T0;
11372 else
11373 curr_insn_type = INSN_T1;
11374 }
11375 else
11376 {
11377 if (dp_op_sz)
11378 curr_insn_type = INSN_T1;
11379 else
11380 curr_insn_type = INSN_T2;
11381 }
11382 }
11383 /* Handle VADD, VSUB. */
11384 else if (opc1 == 0x03)
11385 {
11386 if (!bit (arm_insn_r->arm_insn, 9))
11387 {
11388 if (bit (arm_insn_r->arm_insn, 6))
11389 curr_insn_type = INSN_T0;
11390 else
11391 curr_insn_type = INSN_T1;
11392 }
11393 else
11394 {
11395 if (dp_op_sz)
11396 curr_insn_type = INSN_T1;
11397 else
11398 curr_insn_type = INSN_T2;
11399 }
11400 }
11401 /* Handle VDIV. */
11402 else if (opc1 == 0x08)
11403 {
11404 if (dp_op_sz)
11405 curr_insn_type = INSN_T1;
11406 else
11407 curr_insn_type = INSN_T2;
11408 }
11409 /* Handle all other vfp data processing instructions. */
11410 else if (opc1 == 0x0b)
11411 {
11412 /* Handle VMOV. */
11413 if (!(opc3 & 0x01) || (opc2 == 0x00 && opc3 == 0x01))
11414 {
11415 if (bit (arm_insn_r->arm_insn, 4))
11416 {
11417 if (bit (arm_insn_r->arm_insn, 6))
11418 curr_insn_type = INSN_T0;
11419 else
11420 curr_insn_type = INSN_T1;
11421 }
11422 else
11423 {
11424 if (dp_op_sz)
11425 curr_insn_type = INSN_T1;
11426 else
11427 curr_insn_type = INSN_T2;
11428 }
11429 }
11430 /* Handle VNEG and VABS. */
11431 else if ((opc2 == 0x01 && opc3 == 0x01)
11432 || (opc2 == 0x00 && opc3 == 0x03))
11433 {
11434 if (!bit (arm_insn_r->arm_insn, 11))
11435 {
11436 if (bit (arm_insn_r->arm_insn, 6))
11437 curr_insn_type = INSN_T0;
11438 else
11439 curr_insn_type = INSN_T1;
11440 }
11441 else
11442 {
11443 if (dp_op_sz)
11444 curr_insn_type = INSN_T1;
11445 else
11446 curr_insn_type = INSN_T2;
11447 }
11448 }
11449 /* Handle VSQRT. */
11450 else if (opc2 == 0x01 && opc3 == 0x03)
11451 {
11452 if (dp_op_sz)
11453 curr_insn_type = INSN_T1;
11454 else
11455 curr_insn_type = INSN_T2;
11456 }
11457 /* Handle VCVT. */
11458 else if (opc2 == 0x07 && opc3 == 0x03)
11459 {
11460 if (!dp_op_sz)
11461 curr_insn_type = INSN_T1;
11462 else
11463 curr_insn_type = INSN_T2;
11464 }
11465 else if (opc3 & 0x01)
11466 {
11467 /* Handle VCVT. */
11468 if ((opc2 == 0x08) || (opc2 & 0x0e) == 0x0c)
11469 {
11470 if (!bit (arm_insn_r->arm_insn, 18))
11471 curr_insn_type = INSN_T2;
11472 else
11473 {
11474 if (dp_op_sz)
11475 curr_insn_type = INSN_T1;
11476 else
11477 curr_insn_type = INSN_T2;
11478 }
11479 }
11480 /* Handle VCVT. */
11481 else if ((opc2 & 0x0e) == 0x0a || (opc2 & 0x0e) == 0x0e)
11482 {
11483 if (dp_op_sz)
11484 curr_insn_type = INSN_T1;
11485 else
11486 curr_insn_type = INSN_T2;
11487 }
11488 /* Handle VCVTB, VCVTT. */
11489 else if ((opc2 & 0x0e) == 0x02)
11490 curr_insn_type = INSN_T2;
11491 /* Handle VCMP, VCMPE. */
11492 else if ((opc2 & 0x0e) == 0x04)
11493 curr_insn_type = INSN_T3;
11494 }
11495 }
11496
11497 switch (curr_insn_type)
11498 {
11499 case INSN_T0:
11500 reg_vd = reg_vd | (bit_d << 4);
11501 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11502 record_buf[1] = reg_vd + ARM_D0_REGNUM + 1;
11503 arm_insn_r->reg_rec_count = 2;
11504 break;
11505
11506 case INSN_T1:
11507 reg_vd = reg_vd | (bit_d << 4);
11508 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11509 arm_insn_r->reg_rec_count = 1;
11510 break;
11511
11512 case INSN_T2:
11513 reg_vd = (reg_vd << 1) | bit_d;
11514 record_buf[0] = reg_vd + ARM_D0_REGNUM;
11515 arm_insn_r->reg_rec_count = 1;
11516 break;
11517
11518 case INSN_T3:
11519 record_buf[0] = ARM_FPSCR_REGNUM;
11520 arm_insn_r->reg_rec_count = 1;
11521 break;
11522
11523 default:
11524 gdb_assert_not_reached ("no decoding pattern found");
11525 break;
11526 }
11527
11528 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11529 return 0;
11530 }
11531
11532 /* Handling opcode 110 insns. */
11533
11534 static int
11535 arm_record_asimd_vfp_coproc (insn_decode_record *arm_insn_r)
11536 {
11537 uint32_t op1, op1_ebit, coproc;
11538
11539 coproc = bits (arm_insn_r->arm_insn, 8, 11);
11540 op1 = bits (arm_insn_r->arm_insn, 20, 25);
11541 op1_ebit = bit (arm_insn_r->arm_insn, 20);
11542
11543 if ((coproc & 0x0e) == 0x0a)
11544 {
11545 /* Handle extension register ld/st instructions. */
11546 if (!(op1 & 0x20))
11547 return arm_record_exreg_ld_st_insn (arm_insn_r);
11548
11549 /* 64-bit transfers between arm core and extension registers. */
11550 if ((op1 & 0x3e) == 0x04)
11551 return arm_record_exreg_ld_st_insn (arm_insn_r);
11552 }
11553 else
11554 {
11555 /* Handle coprocessor ld/st instructions. */
11556 if (!(op1 & 0x3a))
11557 {
11558 /* Store. */
11559 if (!op1_ebit)
11560 return arm_record_unsupported_insn (arm_insn_r);
11561 else
11562 /* Load. */
11563 return arm_record_unsupported_insn (arm_insn_r);
11564 }
11565
11566 /* Move to coprocessor from two arm core registers. */
11567 if (op1 == 0x4)
11568 return arm_record_unsupported_insn (arm_insn_r);
11569
11570 /* Move to two arm core registers from coprocessor. */
11571 if (op1 == 0x5)
11572 {
11573 uint32_t reg_t[2];
11574
11575 reg_t[0] = bits (arm_insn_r->arm_insn, 12, 15);
11576 reg_t[1] = bits (arm_insn_r->arm_insn, 16, 19);
11577 arm_insn_r->reg_rec_count = 2;
11578
11579 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, reg_t);
11580 return 0;
11581 }
11582 }
11583 return arm_record_unsupported_insn (arm_insn_r);
11584 }
11585
11586 /* Handling opcode 111 insns. */
11587
11588 static int
11589 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11590 {
11591 uint32_t op, op1_ebit, coproc, bits_24_25;
11592 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
11593 struct regcache *reg_cache = arm_insn_r->regcache;
11594
11595 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
11596 coproc = bits (arm_insn_r->arm_insn, 8, 11);
11597 op1_ebit = bit (arm_insn_r->arm_insn, 20);
11598 op = bit (arm_insn_r->arm_insn, 4);
11599 bits_24_25 = bits (arm_insn_r->arm_insn, 24, 25);
11600
11601 /* Handle arm SWI/SVC system call instructions. */
11602 if (bits_24_25 == 0x3)
11603 {
11604 if (tdep->arm_syscall_record != NULL)
11605 {
11606 ULONGEST svc_operand, svc_number;
11607
11608 svc_operand = (0x00ffffff & arm_insn_r->arm_insn);
11609
11610 if (svc_operand) /* OABI. */
11611 svc_number = svc_operand - 0x900000;
11612 else /* EABI. */
11613 regcache_raw_read_unsigned (reg_cache, 7, &svc_number);
11614
11615 return tdep->arm_syscall_record (reg_cache, svc_number);
11616 }
11617 else
11618 {
11619 printf_unfiltered (_("no syscall record support\n"));
11620 return -1;
11621 }
11622 }
11623 else if (bits_24_25 == 0x02)
11624 {
11625 if (op)
11626 {
11627 if ((coproc & 0x0e) == 0x0a)
11628 {
11629 /* 8, 16, and 32-bit transfer */
11630 return arm_record_vdata_transfer_insn (arm_insn_r);
11631 }
11632 else
11633 {
11634 if (op1_ebit)
11635 {
11636 /* MRC, MRC2 */
11637 uint32_t record_buf[1];
11638
11639 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11640 if (record_buf[0] == 15)
11641 record_buf[0] = ARM_PS_REGNUM;
11642
11643 arm_insn_r->reg_rec_count = 1;
11644 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count,
11645 record_buf);
11646 return 0;
11647 }
11648 else
11649 {
11650 /* MCR, MCR2 */
11651 return -1;
11652 }
11653 }
11654 }
11655 else
11656 {
11657 if ((coproc & 0x0e) == 0x0a)
11658 {
11659 /* VFP data-processing instructions. */
11660 return arm_record_vfp_data_proc_insn (arm_insn_r);
11661 }
11662 else
11663 {
11664 /* CDP, CDP2 */
11665 return -1;
11666 }
11667 }
11668 }
11669 else
11670 {
11671 unsigned int op1 = bits (arm_insn_r->arm_insn, 20, 25);
11672
11673 if (op1 == 5)
11674 {
11675 if ((coproc & 0x0e) != 0x0a)
11676 {
11677 /* MRRC, MRRC2 */
11678 return -1;
11679 }
11680 }
11681 else if (op1 == 4 || op1 == 5)
11682 {
11683 if ((coproc & 0x0e) == 0x0a)
11684 {
11685 /* 64-bit transfers between ARM core and extension */
11686 return -1;
11687 }
11688 else if (op1 == 4)
11689 {
11690 /* MCRR, MCRR2 */
11691 return -1;
11692 }
11693 }
11694 else if (op1 == 0 || op1 == 1)
11695 {
11696 /* UNDEFINED */
11697 return -1;
11698 }
11699 else
11700 {
11701 if ((coproc & 0x0e) == 0x0a)
11702 {
11703 /* Extension register load/store */
11704 }
11705 else
11706 {
11707 /* STC, STC2, LDC, LDC2 */
11708 }
11709 return -1;
11710 }
11711 }
11712
11713 return -1;
11714 }
11715
11716 /* Handling opcode 000 insns. */
11717
11718 static int
11719 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
11720 {
11721 uint32_t record_buf[8];
11722 uint32_t reg_src1 = 0;
11723
11724 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11725
11726 record_buf[0] = ARM_PS_REGNUM;
11727 record_buf[1] = reg_src1;
11728 thumb_insn_r->reg_rec_count = 2;
11729
11730 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11731
11732 return 0;
11733 }
11734
11735
11736 /* Handling opcode 001 insns. */
11737
11738 static int
11739 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
11740 {
11741 uint32_t record_buf[8];
11742 uint32_t reg_src1 = 0;
11743
11744 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11745
11746 record_buf[0] = ARM_PS_REGNUM;
11747 record_buf[1] = reg_src1;
11748 thumb_insn_r->reg_rec_count = 2;
11749
11750 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11751
11752 return 0;
11753 }
11754
11755 /* Handling opcode 010 insns. */
11756
11757 static int
11758 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
11759 {
11760 struct regcache *reg_cache = thumb_insn_r->regcache;
11761 uint32_t record_buf[8], record_buf_mem[8];
11762
11763 uint32_t reg_src1 = 0, reg_src2 = 0;
11764 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
11765
11766 ULONGEST u_regval[2] = {0};
11767
11768 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
11769
11770 if (bit (thumb_insn_r->arm_insn, 12))
11771 {
11772 /* Handle load/store register offset. */
11773 uint32_t opB = bits (thumb_insn_r->arm_insn, 9, 11);
11774
11775 if (in_inclusive_range (opB, 4U, 7U))
11776 {
11777 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
11778 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
11779 record_buf[0] = reg_src1;
11780 thumb_insn_r->reg_rec_count = 1;
11781 }
11782 else if (in_inclusive_range (opB, 0U, 2U))
11783 {
11784 /* STR(2), STRB(2), STRH(2) . */
11785 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11786 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
11787 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11788 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11789 if (0 == opB)
11790 record_buf_mem[0] = 4; /* STR (2). */
11791 else if (2 == opB)
11792 record_buf_mem[0] = 1; /* STRB (2). */
11793 else if (1 == opB)
11794 record_buf_mem[0] = 2; /* STRH (2). */
11795 record_buf_mem[1] = u_regval[0] + u_regval[1];
11796 thumb_insn_r->mem_rec_count = 1;
11797 }
11798 }
11799 else if (bit (thumb_insn_r->arm_insn, 11))
11800 {
11801 /* Handle load from literal pool. */
11802 /* LDR(3). */
11803 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11804 record_buf[0] = reg_src1;
11805 thumb_insn_r->reg_rec_count = 1;
11806 }
11807 else if (opcode1)
11808 {
11809 /* Special data instructions and branch and exchange */
11810 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
11811 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
11812 if ((3 == opcode2) && (!opcode3))
11813 {
11814 /* Branch with exchange. */
11815 record_buf[0] = ARM_PS_REGNUM;
11816 thumb_insn_r->reg_rec_count = 1;
11817 }
11818 else
11819 {
11820 /* Format 8; special data processing insns. */
11821 record_buf[0] = ARM_PS_REGNUM;
11822 record_buf[1] = (bit (thumb_insn_r->arm_insn, 7) << 3
11823 | bits (thumb_insn_r->arm_insn, 0, 2));
11824 thumb_insn_r->reg_rec_count = 2;
11825 }
11826 }
11827 else
11828 {
11829 /* Format 5; data processing insns. */
11830 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11831 if (bit (thumb_insn_r->arm_insn, 7))
11832 {
11833 reg_src1 = reg_src1 + 8;
11834 }
11835 record_buf[0] = ARM_PS_REGNUM;
11836 record_buf[1] = reg_src1;
11837 thumb_insn_r->reg_rec_count = 2;
11838 }
11839
11840 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11841 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11842 record_buf_mem);
11843
11844 return 0;
11845 }
11846
11847 /* Handling opcode 001 insns. */
11848
11849 static int
11850 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
11851 {
11852 struct regcache *reg_cache = thumb_insn_r->regcache;
11853 uint32_t record_buf[8], record_buf_mem[8];
11854
11855 uint32_t reg_src1 = 0;
11856 uint32_t opcode = 0, immed_5 = 0;
11857
11858 ULONGEST u_regval = 0;
11859
11860 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11861
11862 if (opcode)
11863 {
11864 /* LDR(1). */
11865 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11866 record_buf[0] = reg_src1;
11867 thumb_insn_r->reg_rec_count = 1;
11868 }
11869 else
11870 {
11871 /* STR(1). */
11872 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11873 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
11874 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11875 record_buf_mem[0] = 4;
11876 record_buf_mem[1] = u_regval + (immed_5 * 4);
11877 thumb_insn_r->mem_rec_count = 1;
11878 }
11879
11880 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11881 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11882 record_buf_mem);
11883
11884 return 0;
11885 }
11886
11887 /* Handling opcode 100 insns. */
11888
11889 static int
11890 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
11891 {
11892 struct regcache *reg_cache = thumb_insn_r->regcache;
11893 uint32_t record_buf[8], record_buf_mem[8];
11894
11895 uint32_t reg_src1 = 0;
11896 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
11897
11898 ULONGEST u_regval = 0;
11899
11900 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11901
11902 if (3 == opcode)
11903 {
11904 /* LDR(4). */
11905 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11906 record_buf[0] = reg_src1;
11907 thumb_insn_r->reg_rec_count = 1;
11908 }
11909 else if (1 == opcode)
11910 {
11911 /* LDRH(1). */
11912 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11913 record_buf[0] = reg_src1;
11914 thumb_insn_r->reg_rec_count = 1;
11915 }
11916 else if (2 == opcode)
11917 {
11918 /* STR(3). */
11919 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
11920 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
11921 record_buf_mem[0] = 4;
11922 record_buf_mem[1] = u_regval + (immed_8 * 4);
11923 thumb_insn_r->mem_rec_count = 1;
11924 }
11925 else if (0 == opcode)
11926 {
11927 /* STRH(1). */
11928 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
11929 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11930 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11931 record_buf_mem[0] = 2;
11932 record_buf_mem[1] = u_regval + (immed_5 * 2);
11933 thumb_insn_r->mem_rec_count = 1;
11934 }
11935
11936 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11937 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11938 record_buf_mem);
11939
11940 return 0;
11941 }
11942
11943 /* Handling opcode 101 insns. */
11944
11945 static int
11946 thumb_record_misc (insn_decode_record *thumb_insn_r)
11947 {
11948 struct regcache *reg_cache = thumb_insn_r->regcache;
11949
11950 uint32_t opcode = 0;
11951 uint32_t register_bits = 0, register_count = 0;
11952 uint32_t index = 0, start_address = 0;
11953 uint32_t record_buf[24], record_buf_mem[48];
11954 uint32_t reg_src1;
11955
11956 ULONGEST u_regval = 0;
11957
11958 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11959
11960 if (opcode == 0 || opcode == 1)
11961 {
11962 /* ADR and ADD (SP plus immediate) */
11963
11964 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11965 record_buf[0] = reg_src1;
11966 thumb_insn_r->reg_rec_count = 1;
11967 }
11968 else
11969 {
11970 /* Miscellaneous 16-bit instructions */
11971 uint32_t opcode2 = bits (thumb_insn_r->arm_insn, 8, 11);
11972
11973 switch (opcode2)
11974 {
11975 case 6:
11976 /* SETEND and CPS */
11977 break;
11978 case 0:
11979 /* ADD/SUB (SP plus immediate) */
11980 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11981 record_buf[0] = ARM_SP_REGNUM;
11982 thumb_insn_r->reg_rec_count = 1;
11983 break;
11984 case 1: /* fall through */
11985 case 3: /* fall through */
11986 case 9: /* fall through */
11987 case 11:
11988 /* CBNZ, CBZ */
11989 break;
11990 case 2:
11991 /* SXTH, SXTB, UXTH, UXTB */
11992 record_buf[0] = bits (thumb_insn_r->arm_insn, 0, 2);
11993 thumb_insn_r->reg_rec_count = 1;
11994 break;
11995 case 4: /* fall through */
11996 case 5:
11997 /* PUSH. */
11998 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
11999 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12000 while (register_bits)
12001 {
12002 if (register_bits & 0x00000001)
12003 register_count++;
12004 register_bits = register_bits >> 1;
12005 }
12006 start_address = u_regval - \
12007 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12008 thumb_insn_r->mem_rec_count = register_count;
12009 while (register_count)
12010 {
12011 record_buf_mem[(register_count * 2) - 1] = start_address;
12012 record_buf_mem[(register_count * 2) - 2] = 4;
12013 start_address = start_address + 4;
12014 register_count--;
12015 }
12016 record_buf[0] = ARM_SP_REGNUM;
12017 thumb_insn_r->reg_rec_count = 1;
12018 break;
12019 case 10:
12020 /* REV, REV16, REVSH */
12021 record_buf[0] = bits (thumb_insn_r->arm_insn, 0, 2);
12022 thumb_insn_r->reg_rec_count = 1;
12023 break;
12024 case 12: /* fall through */
12025 case 13:
12026 /* POP. */
12027 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12028 while (register_bits)
12029 {
12030 if (register_bits & 0x00000001)
12031 record_buf[index++] = register_count;
12032 register_bits = register_bits >> 1;
12033 register_count++;
12034 }
12035 record_buf[index++] = ARM_PS_REGNUM;
12036 record_buf[index++] = ARM_SP_REGNUM;
12037 thumb_insn_r->reg_rec_count = index;
12038 break;
12039 case 0xe:
12040 /* BKPT insn. */
12041 /* Handle enhanced software breakpoint insn, BKPT. */
12042 /* CPSR is changed to be executed in ARM state, disabling normal
12043 interrupts, entering abort mode. */
12044 /* According to high vector configuration PC is set. */
12045 /* User hits breakpoint and type reverse, in that case, we need to go back with
12046 previous CPSR and Program Counter. */
12047 record_buf[0] = ARM_PS_REGNUM;
12048 record_buf[1] = ARM_LR_REGNUM;
12049 thumb_insn_r->reg_rec_count = 2;
12050 /* We need to save SPSR value, which is not yet done. */
12051 printf_unfiltered (_("Process record does not support instruction "
12052 "0x%0x at address %s.\n"),
12053 thumb_insn_r->arm_insn,
12054 paddress (thumb_insn_r->gdbarch,
12055 thumb_insn_r->this_addr));
12056 return -1;
12057
12058 case 0xf:
12059 /* If-Then, and hints */
12060 break;
12061 default:
12062 return -1;
12063 };
12064 }
12065
12066 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12067 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12068 record_buf_mem);
12069
12070 return 0;
12071 }
12072
12073 /* Handling opcode 110 insns. */
12074
12075 static int
12076 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12077 {
12078 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12079 struct regcache *reg_cache = thumb_insn_r->regcache;
12080
12081 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12082 uint32_t reg_src1 = 0;
12083 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12084 uint32_t index = 0, start_address = 0;
12085 uint32_t record_buf[24], record_buf_mem[48];
12086
12087 ULONGEST u_regval = 0;
12088
12089 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12090 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12091
12092 if (1 == opcode2)
12093 {
12094
12095 /* LDMIA. */
12096 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12097 /* Get Rn. */
12098 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12099 while (register_bits)
12100 {
12101 if (register_bits & 0x00000001)
12102 record_buf[index++] = register_count;
12103 register_bits = register_bits >> 1;
12104 register_count++;
12105 }
12106 record_buf[index++] = reg_src1;
12107 thumb_insn_r->reg_rec_count = index;
12108 }
12109 else if (0 == opcode2)
12110 {
12111 /* It handles both STMIA. */
12112 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12113 /* Get Rn. */
12114 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12115 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12116 while (register_bits)
12117 {
12118 if (register_bits & 0x00000001)
12119 register_count++;
12120 register_bits = register_bits >> 1;
12121 }
12122 start_address = u_regval;
12123 thumb_insn_r->mem_rec_count = register_count;
12124 while (register_count)
12125 {
12126 record_buf_mem[(register_count * 2) - 1] = start_address;
12127 record_buf_mem[(register_count * 2) - 2] = 4;
12128 start_address = start_address + 4;
12129 register_count--;
12130 }
12131 }
12132 else if (0x1F == opcode1)
12133 {
12134 /* Handle arm syscall insn. */
12135 if (tdep->arm_syscall_record != NULL)
12136 {
12137 regcache_raw_read_unsigned (reg_cache, 7, &u_regval);
12138 ret = tdep->arm_syscall_record (reg_cache, u_regval);
12139 }
12140 else
12141 {
12142 printf_unfiltered (_("no syscall record support\n"));
12143 return -1;
12144 }
12145 }
12146
12147 /* B (1), conditional branch is automatically taken care in process_record,
12148 as PC is saved there. */
12149
12150 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12151 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12152 record_buf_mem);
12153
12154 return ret;
12155 }
12156
12157 /* Handling opcode 111 insns. */
12158
12159 static int
12160 thumb_record_branch (insn_decode_record *thumb_insn_r)
12161 {
12162 uint32_t record_buf[8];
12163 uint32_t bits_h = 0;
12164
12165 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12166
12167 if (2 == bits_h || 3 == bits_h)
12168 {
12169 /* BL */
12170 record_buf[0] = ARM_LR_REGNUM;
12171 thumb_insn_r->reg_rec_count = 1;
12172 }
12173 else if (1 == bits_h)
12174 {
12175 /* BLX(1). */
12176 record_buf[0] = ARM_PS_REGNUM;
12177 record_buf[1] = ARM_LR_REGNUM;
12178 thumb_insn_r->reg_rec_count = 2;
12179 }
12180
12181 /* B(2) is automatically taken care in process_record, as PC is
12182 saved there. */
12183
12184 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12185
12186 return 0;
12187 }
12188
12189 /* Handler for thumb2 load/store multiple instructions. */
12190
12191 static int
12192 thumb2_record_ld_st_multiple (insn_decode_record *thumb2_insn_r)
12193 {
12194 struct regcache *reg_cache = thumb2_insn_r->regcache;
12195
12196 uint32_t reg_rn, op;
12197 uint32_t register_bits = 0, register_count = 0;
12198 uint32_t index = 0, start_address = 0;
12199 uint32_t record_buf[24], record_buf_mem[48];
12200
12201 ULONGEST u_regval = 0;
12202
12203 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12204 op = bits (thumb2_insn_r->arm_insn, 23, 24);
12205
12206 if (0 == op || 3 == op)
12207 {
12208 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12209 {
12210 /* Handle RFE instruction. */
12211 record_buf[0] = ARM_PS_REGNUM;
12212 thumb2_insn_r->reg_rec_count = 1;
12213 }
12214 else
12215 {
12216 /* Handle SRS instruction after reading banked SP. */
12217 return arm_record_unsupported_insn (thumb2_insn_r);
12218 }
12219 }
12220 else if (1 == op || 2 == op)
12221 {
12222 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12223 {
12224 /* Handle LDM/LDMIA/LDMFD and LDMDB/LDMEA instructions. */
12225 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12226 while (register_bits)
12227 {
12228 if (register_bits & 0x00000001)
12229 record_buf[index++] = register_count;
12230
12231 register_count++;
12232 register_bits = register_bits >> 1;
12233 }
12234 record_buf[index++] = reg_rn;
12235 record_buf[index++] = ARM_PS_REGNUM;
12236 thumb2_insn_r->reg_rec_count = index;
12237 }
12238 else
12239 {
12240 /* Handle STM/STMIA/STMEA and STMDB/STMFD. */
12241 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12242 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12243 while (register_bits)
12244 {
12245 if (register_bits & 0x00000001)
12246 register_count++;
12247
12248 register_bits = register_bits >> 1;
12249 }
12250
12251 if (1 == op)
12252 {
12253 /* Start address calculation for LDMDB/LDMEA. */
12254 start_address = u_regval;
12255 }
12256 else if (2 == op)
12257 {
12258 /* Start address calculation for LDMDB/LDMEA. */
12259 start_address = u_regval - register_count * 4;
12260 }
12261
12262 thumb2_insn_r->mem_rec_count = register_count;
12263 while (register_count)
12264 {
12265 record_buf_mem[register_count * 2 - 1] = start_address;
12266 record_buf_mem[register_count * 2 - 2] = 4;
12267 start_address = start_address + 4;
12268 register_count--;
12269 }
12270 record_buf[0] = reg_rn;
12271 record_buf[1] = ARM_PS_REGNUM;
12272 thumb2_insn_r->reg_rec_count = 2;
12273 }
12274 }
12275
12276 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12277 record_buf_mem);
12278 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12279 record_buf);
12280 return ARM_RECORD_SUCCESS;
12281 }
12282
12283 /* Handler for thumb2 load/store (dual/exclusive) and table branch
12284 instructions. */
12285
12286 static int
12287 thumb2_record_ld_st_dual_ex_tbb (insn_decode_record *thumb2_insn_r)
12288 {
12289 struct regcache *reg_cache = thumb2_insn_r->regcache;
12290
12291 uint32_t reg_rd, reg_rn, offset_imm;
12292 uint32_t reg_dest1, reg_dest2;
12293 uint32_t address, offset_addr;
12294 uint32_t record_buf[8], record_buf_mem[8];
12295 uint32_t op1, op2, op3;
12296
12297 ULONGEST u_regval[2];
12298
12299 op1 = bits (thumb2_insn_r->arm_insn, 23, 24);
12300 op2 = bits (thumb2_insn_r->arm_insn, 20, 21);
12301 op3 = bits (thumb2_insn_r->arm_insn, 4, 7);
12302
12303 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12304 {
12305 if(!(1 == op1 && 1 == op2 && (0 == op3 || 1 == op3)))
12306 {
12307 reg_dest1 = bits (thumb2_insn_r->arm_insn, 12, 15);
12308 record_buf[0] = reg_dest1;
12309 record_buf[1] = ARM_PS_REGNUM;
12310 thumb2_insn_r->reg_rec_count = 2;
12311 }
12312
12313 if (3 == op2 || (op1 & 2) || (1 == op1 && 1 == op2 && 7 == op3))
12314 {
12315 reg_dest2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12316 record_buf[2] = reg_dest2;
12317 thumb2_insn_r->reg_rec_count = 3;
12318 }
12319 }
12320 else
12321 {
12322 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12323 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12324
12325 if (0 == op1 && 0 == op2)
12326 {
12327 /* Handle STREX. */
12328 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12329 address = u_regval[0] + (offset_imm * 4);
12330 record_buf_mem[0] = 4;
12331 record_buf_mem[1] = address;
12332 thumb2_insn_r->mem_rec_count = 1;
12333 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12334 record_buf[0] = reg_rd;
12335 thumb2_insn_r->reg_rec_count = 1;
12336 }
12337 else if (1 == op1 && 0 == op2)
12338 {
12339 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12340 record_buf[0] = reg_rd;
12341 thumb2_insn_r->reg_rec_count = 1;
12342 address = u_regval[0];
12343 record_buf_mem[1] = address;
12344
12345 if (4 == op3)
12346 {
12347 /* Handle STREXB. */
12348 record_buf_mem[0] = 1;
12349 thumb2_insn_r->mem_rec_count = 1;
12350 }
12351 else if (5 == op3)
12352 {
12353 /* Handle STREXH. */
12354 record_buf_mem[0] = 2 ;
12355 thumb2_insn_r->mem_rec_count = 1;
12356 }
12357 else if (7 == op3)
12358 {
12359 /* Handle STREXD. */
12360 address = u_regval[0];
12361 record_buf_mem[0] = 4;
12362 record_buf_mem[2] = 4;
12363 record_buf_mem[3] = address + 4;
12364 thumb2_insn_r->mem_rec_count = 2;
12365 }
12366 }
12367 else
12368 {
12369 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12370
12371 if (bit (thumb2_insn_r->arm_insn, 24))
12372 {
12373 if (bit (thumb2_insn_r->arm_insn, 23))
12374 offset_addr = u_regval[0] + (offset_imm * 4);
12375 else
12376 offset_addr = u_regval[0] - (offset_imm * 4);
12377
12378 address = offset_addr;
12379 }
12380 else
12381 address = u_regval[0];
12382
12383 record_buf_mem[0] = 4;
12384 record_buf_mem[1] = address;
12385 record_buf_mem[2] = 4;
12386 record_buf_mem[3] = address + 4;
12387 thumb2_insn_r->mem_rec_count = 2;
12388 record_buf[0] = reg_rn;
12389 thumb2_insn_r->reg_rec_count = 1;
12390 }
12391 }
12392
12393 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12394 record_buf);
12395 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12396 record_buf_mem);
12397 return ARM_RECORD_SUCCESS;
12398 }
12399
12400 /* Handler for thumb2 data processing (shift register and modified immediate)
12401 instructions. */
12402
12403 static int
12404 thumb2_record_data_proc_sreg_mimm (insn_decode_record *thumb2_insn_r)
12405 {
12406 uint32_t reg_rd, op;
12407 uint32_t record_buf[8];
12408
12409 op = bits (thumb2_insn_r->arm_insn, 21, 24);
12410 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12411
12412 if ((0 == op || 4 == op || 8 == op || 13 == op) && 15 == reg_rd)
12413 {
12414 record_buf[0] = ARM_PS_REGNUM;
12415 thumb2_insn_r->reg_rec_count = 1;
12416 }
12417 else
12418 {
12419 record_buf[0] = reg_rd;
12420 record_buf[1] = ARM_PS_REGNUM;
12421 thumb2_insn_r->reg_rec_count = 2;
12422 }
12423
12424 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12425 record_buf);
12426 return ARM_RECORD_SUCCESS;
12427 }
12428
12429 /* Generic handler for thumb2 instructions which effect destination and PS
12430 registers. */
12431
12432 static int
12433 thumb2_record_ps_dest_generic (insn_decode_record *thumb2_insn_r)
12434 {
12435 uint32_t reg_rd;
12436 uint32_t record_buf[8];
12437
12438 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12439
12440 record_buf[0] = reg_rd;
12441 record_buf[1] = ARM_PS_REGNUM;
12442 thumb2_insn_r->reg_rec_count = 2;
12443
12444 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12445 record_buf);
12446 return ARM_RECORD_SUCCESS;
12447 }
12448
12449 /* Handler for thumb2 branch and miscellaneous control instructions. */
12450
12451 static int
12452 thumb2_record_branch_misc_cntrl (insn_decode_record *thumb2_insn_r)
12453 {
12454 uint32_t op, op1, op2;
12455 uint32_t record_buf[8];
12456
12457 op = bits (thumb2_insn_r->arm_insn, 20, 26);
12458 op1 = bits (thumb2_insn_r->arm_insn, 12, 14);
12459 op2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12460
12461 /* Handle MSR insn. */
12462 if (!(op1 & 0x2) && 0x38 == op)
12463 {
12464 if (!(op2 & 0x3))
12465 {
12466 /* CPSR is going to be changed. */
12467 record_buf[0] = ARM_PS_REGNUM;
12468 thumb2_insn_r->reg_rec_count = 1;
12469 }
12470 else
12471 {
12472 arm_record_unsupported_insn(thumb2_insn_r);
12473 return -1;
12474 }
12475 }
12476 else if (4 == (op1 & 0x5) || 5 == (op1 & 0x5))
12477 {
12478 /* BLX. */
12479 record_buf[0] = ARM_PS_REGNUM;
12480 record_buf[1] = ARM_LR_REGNUM;
12481 thumb2_insn_r->reg_rec_count = 2;
12482 }
12483
12484 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12485 record_buf);
12486 return ARM_RECORD_SUCCESS;
12487 }
12488
12489 /* Handler for thumb2 store single data item instructions. */
12490
12491 static int
12492 thumb2_record_str_single_data (insn_decode_record *thumb2_insn_r)
12493 {
12494 struct regcache *reg_cache = thumb2_insn_r->regcache;
12495
12496 uint32_t reg_rn, reg_rm, offset_imm, shift_imm;
12497 uint32_t address, offset_addr;
12498 uint32_t record_buf[8], record_buf_mem[8];
12499 uint32_t op1, op2;
12500
12501 ULONGEST u_regval[2];
12502
12503 op1 = bits (thumb2_insn_r->arm_insn, 21, 23);
12504 op2 = bits (thumb2_insn_r->arm_insn, 6, 11);
12505 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12506 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12507
12508 if (bit (thumb2_insn_r->arm_insn, 23))
12509 {
12510 /* T2 encoding. */
12511 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 11);
12512 offset_addr = u_regval[0] + offset_imm;
12513 address = offset_addr;
12514 }
12515 else
12516 {
12517 /* T3 encoding. */
12518 if ((0 == op1 || 1 == op1 || 2 == op1) && !(op2 & 0x20))
12519 {
12520 /* Handle STRB (register). */
12521 reg_rm = bits (thumb2_insn_r->arm_insn, 0, 3);
12522 regcache_raw_read_unsigned (reg_cache, reg_rm, &u_regval[1]);
12523 shift_imm = bits (thumb2_insn_r->arm_insn, 4, 5);
12524 offset_addr = u_regval[1] << shift_imm;
12525 address = u_regval[0] + offset_addr;
12526 }
12527 else
12528 {
12529 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12530 if (bit (thumb2_insn_r->arm_insn, 10))
12531 {
12532 if (bit (thumb2_insn_r->arm_insn, 9))
12533 offset_addr = u_regval[0] + offset_imm;
12534 else
12535 offset_addr = u_regval[0] - offset_imm;
12536
12537 address = offset_addr;
12538 }
12539 else
12540 address = u_regval[0];
12541 }
12542 }
12543
12544 switch (op1)
12545 {
12546 /* Store byte instructions. */
12547 case 4:
12548 case 0:
12549 record_buf_mem[0] = 1;
12550 break;
12551 /* Store half word instructions. */
12552 case 1:
12553 case 5:
12554 record_buf_mem[0] = 2;
12555 break;
12556 /* Store word instructions. */
12557 case 2:
12558 case 6:
12559 record_buf_mem[0] = 4;
12560 break;
12561
12562 default:
12563 gdb_assert_not_reached ("no decoding pattern found");
12564 break;
12565 }
12566
12567 record_buf_mem[1] = address;
12568 thumb2_insn_r->mem_rec_count = 1;
12569 record_buf[0] = reg_rn;
12570 thumb2_insn_r->reg_rec_count = 1;
12571
12572 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12573 record_buf);
12574 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12575 record_buf_mem);
12576 return ARM_RECORD_SUCCESS;
12577 }
12578
12579 /* Handler for thumb2 load memory hints instructions. */
12580
12581 static int
12582 thumb2_record_ld_mem_hints (insn_decode_record *thumb2_insn_r)
12583 {
12584 uint32_t record_buf[8];
12585 uint32_t reg_rt, reg_rn;
12586
12587 reg_rt = bits (thumb2_insn_r->arm_insn, 12, 15);
12588 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12589
12590 if (ARM_PC_REGNUM != reg_rt)
12591 {
12592 record_buf[0] = reg_rt;
12593 record_buf[1] = reg_rn;
12594 record_buf[2] = ARM_PS_REGNUM;
12595 thumb2_insn_r->reg_rec_count = 3;
12596
12597 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12598 record_buf);
12599 return ARM_RECORD_SUCCESS;
12600 }
12601
12602 return ARM_RECORD_FAILURE;
12603 }
12604
12605 /* Handler for thumb2 load word instructions. */
12606
12607 static int
12608 thumb2_record_ld_word (insn_decode_record *thumb2_insn_r)
12609 {
12610 uint32_t record_buf[8];
12611
12612 record_buf[0] = bits (thumb2_insn_r->arm_insn, 12, 15);
12613 record_buf[1] = ARM_PS_REGNUM;
12614 thumb2_insn_r->reg_rec_count = 2;
12615
12616 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12617 record_buf);
12618 return ARM_RECORD_SUCCESS;
12619 }
12620
12621 /* Handler for thumb2 long multiply, long multiply accumulate, and
12622 divide instructions. */
12623
12624 static int
12625 thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r)
12626 {
12627 uint32_t opcode1 = 0, opcode2 = 0;
12628 uint32_t record_buf[8];
12629
12630 opcode1 = bits (thumb2_insn_r->arm_insn, 20, 22);
12631 opcode2 = bits (thumb2_insn_r->arm_insn, 4, 7);
12632
12633 if (0 == opcode1 || 2 == opcode1 || (opcode1 >= 4 && opcode1 <= 6))
12634 {
12635 /* Handle SMULL, UMULL, SMULAL. */
12636 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
12637 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12638 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12639 record_buf[2] = ARM_PS_REGNUM;
12640 thumb2_insn_r->reg_rec_count = 3;
12641 }
12642 else if (1 == opcode1 || 3 == opcode2)
12643 {
12644 /* Handle SDIV and UDIV. */
12645 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12646 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12647 record_buf[2] = ARM_PS_REGNUM;
12648 thumb2_insn_r->reg_rec_count = 3;
12649 }
12650 else
12651 return ARM_RECORD_FAILURE;
12652
12653 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12654 record_buf);
12655 return ARM_RECORD_SUCCESS;
12656 }
12657
12658 /* Record handler for thumb32 coprocessor instructions. */
12659
12660 static int
12661 thumb2_record_coproc_insn (insn_decode_record *thumb2_insn_r)
12662 {
12663 if (bit (thumb2_insn_r->arm_insn, 25))
12664 return arm_record_coproc_data_proc (thumb2_insn_r);
12665 else
12666 return arm_record_asimd_vfp_coproc (thumb2_insn_r);
12667 }
12668
12669 /* Record handler for advance SIMD structure load/store instructions. */
12670
12671 static int
12672 thumb2_record_asimd_struct_ld_st (insn_decode_record *thumb2_insn_r)
12673 {
12674 struct regcache *reg_cache = thumb2_insn_r->regcache;
12675 uint32_t l_bit, a_bit, b_bits;
12676 uint32_t record_buf[128], record_buf_mem[128];
12677 uint32_t reg_rn, reg_vd, address, f_elem;
12678 uint32_t index_r = 0, index_e = 0, bf_regs = 0, index_m = 0, loop_t = 0;
12679 uint8_t f_ebytes;
12680
12681 l_bit = bit (thumb2_insn_r->arm_insn, 21);
12682 a_bit = bit (thumb2_insn_r->arm_insn, 23);
12683 b_bits = bits (thumb2_insn_r->arm_insn, 8, 11);
12684 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12685 reg_vd = bits (thumb2_insn_r->arm_insn, 12, 15);
12686 reg_vd = (bit (thumb2_insn_r->arm_insn, 22) << 4) | reg_vd;
12687 f_ebytes = (1 << bits (thumb2_insn_r->arm_insn, 6, 7));
12688 f_elem = 8 / f_ebytes;
12689
12690 if (!l_bit)
12691 {
12692 ULONGEST u_regval = 0;
12693 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12694 address = u_regval;
12695
12696 if (!a_bit)
12697 {
12698 /* Handle VST1. */
12699 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
12700 {
12701 if (b_bits == 0x07)
12702 bf_regs = 1;
12703 else if (b_bits == 0x0a)
12704 bf_regs = 2;
12705 else if (b_bits == 0x06)
12706 bf_regs = 3;
12707 else if (b_bits == 0x02)
12708 bf_regs = 4;
12709 else
12710 bf_regs = 0;
12711
12712 for (index_r = 0; index_r < bf_regs; index_r++)
12713 {
12714 for (index_e = 0; index_e < f_elem; index_e++)
12715 {
12716 record_buf_mem[index_m++] = f_ebytes;
12717 record_buf_mem[index_m++] = address;
12718 address = address + f_ebytes;
12719 thumb2_insn_r->mem_rec_count += 1;
12720 }
12721 }
12722 }
12723 /* Handle VST2. */
12724 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
12725 {
12726 if (b_bits == 0x09 || b_bits == 0x08)
12727 bf_regs = 1;
12728 else if (b_bits == 0x03)
12729 bf_regs = 2;
12730 else
12731 bf_regs = 0;
12732
12733 for (index_r = 0; index_r < bf_regs; index_r++)
12734 for (index_e = 0; index_e < f_elem; index_e++)
12735 {
12736 for (loop_t = 0; loop_t < 2; loop_t++)
12737 {
12738 record_buf_mem[index_m++] = f_ebytes;
12739 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12740 thumb2_insn_r->mem_rec_count += 1;
12741 }
12742 address = address + (2 * f_ebytes);
12743 }
12744 }
12745 /* Handle VST3. */
12746 else if ((b_bits & 0x0e) == 0x04)
12747 {
12748 for (index_e = 0; index_e < f_elem; index_e++)
12749 {
12750 for (loop_t = 0; loop_t < 3; loop_t++)
12751 {
12752 record_buf_mem[index_m++] = f_ebytes;
12753 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12754 thumb2_insn_r->mem_rec_count += 1;
12755 }
12756 address = address + (3 * f_ebytes);
12757 }
12758 }
12759 /* Handle VST4. */
12760 else if (!(b_bits & 0x0e))
12761 {
12762 for (index_e = 0; index_e < f_elem; index_e++)
12763 {
12764 for (loop_t = 0; loop_t < 4; loop_t++)
12765 {
12766 record_buf_mem[index_m++] = f_ebytes;
12767 record_buf_mem[index_m++] = address + (loop_t * f_ebytes);
12768 thumb2_insn_r->mem_rec_count += 1;
12769 }
12770 address = address + (4 * f_ebytes);
12771 }
12772 }
12773 }
12774 else
12775 {
12776 uint8_t bft_size = bits (thumb2_insn_r->arm_insn, 10, 11);
12777
12778 if (bft_size == 0x00)
12779 f_ebytes = 1;
12780 else if (bft_size == 0x01)
12781 f_ebytes = 2;
12782 else if (bft_size == 0x02)
12783 f_ebytes = 4;
12784 else
12785 f_ebytes = 0;
12786
12787 /* Handle VST1. */
12788 if (!(b_bits & 0x0b) || b_bits == 0x08)
12789 thumb2_insn_r->mem_rec_count = 1;
12790 /* Handle VST2. */
12791 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09)
12792 thumb2_insn_r->mem_rec_count = 2;
12793 /* Handle VST3. */
12794 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a)
12795 thumb2_insn_r->mem_rec_count = 3;
12796 /* Handle VST4. */
12797 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b)
12798 thumb2_insn_r->mem_rec_count = 4;
12799
12800 for (index_m = 0; index_m < thumb2_insn_r->mem_rec_count; index_m++)
12801 {
12802 record_buf_mem[index_m] = f_ebytes;
12803 record_buf_mem[index_m] = address + (index_m * f_ebytes);
12804 }
12805 }
12806 }
12807 else
12808 {
12809 if (!a_bit)
12810 {
12811 /* Handle VLD1. */
12812 if (b_bits == 0x02 || b_bits == 0x0a || (b_bits & 0x0e) == 0x06)
12813 thumb2_insn_r->reg_rec_count = 1;
12814 /* Handle VLD2. */
12815 else if (b_bits == 0x03 || (b_bits & 0x0e) == 0x08)
12816 thumb2_insn_r->reg_rec_count = 2;
12817 /* Handle VLD3. */
12818 else if ((b_bits & 0x0e) == 0x04)
12819 thumb2_insn_r->reg_rec_count = 3;
12820 /* Handle VLD4. */
12821 else if (!(b_bits & 0x0e))
12822 thumb2_insn_r->reg_rec_count = 4;
12823 }
12824 else
12825 {
12826 /* Handle VLD1. */
12827 if (!(b_bits & 0x0b) || b_bits == 0x08 || b_bits == 0x0c)
12828 thumb2_insn_r->reg_rec_count = 1;
12829 /* Handle VLD2. */
12830 else if ((b_bits & 0x0b) == 0x01 || b_bits == 0x09 || b_bits == 0x0d)
12831 thumb2_insn_r->reg_rec_count = 2;
12832 /* Handle VLD3. */
12833 else if ((b_bits & 0x0b) == 0x02 || b_bits == 0x0a || b_bits == 0x0e)
12834 thumb2_insn_r->reg_rec_count = 3;
12835 /* Handle VLD4. */
12836 else if ((b_bits & 0x0b) == 0x03 || b_bits == 0x0b || b_bits == 0x0f)
12837 thumb2_insn_r->reg_rec_count = 4;
12838
12839 for (index_r = 0; index_r < thumb2_insn_r->reg_rec_count; index_r++)
12840 record_buf[index_r] = reg_vd + ARM_D0_REGNUM + index_r;
12841 }
12842 }
12843
12844 if (bits (thumb2_insn_r->arm_insn, 0, 3) != 15)
12845 {
12846 record_buf[index_r] = reg_rn;
12847 thumb2_insn_r->reg_rec_count += 1;
12848 }
12849
12850 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12851 record_buf);
12852 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12853 record_buf_mem);
12854 return 0;
12855 }
12856
12857 /* Decodes thumb2 instruction type and invokes its record handler. */
12858
12859 static unsigned int
12860 thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r)
12861 {
12862 uint32_t op, op1, op2;
12863
12864 op = bit (thumb2_insn_r->arm_insn, 15);
12865 op1 = bits (thumb2_insn_r->arm_insn, 27, 28);
12866 op2 = bits (thumb2_insn_r->arm_insn, 20, 26);
12867
12868 if (op1 == 0x01)
12869 {
12870 if (!(op2 & 0x64 ))
12871 {
12872 /* Load/store multiple instruction. */
12873 return thumb2_record_ld_st_multiple (thumb2_insn_r);
12874 }
12875 else if ((op2 & 0x64) == 0x4)
12876 {
12877 /* Load/store (dual/exclusive) and table branch instruction. */
12878 return thumb2_record_ld_st_dual_ex_tbb (thumb2_insn_r);
12879 }
12880 else if ((op2 & 0x60) == 0x20)
12881 {
12882 /* Data-processing (shifted register). */
12883 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12884 }
12885 else if (op2 & 0x40)
12886 {
12887 /* Co-processor instructions. */
12888 return thumb2_record_coproc_insn (thumb2_insn_r);
12889 }
12890 }
12891 else if (op1 == 0x02)
12892 {
12893 if (op)
12894 {
12895 /* Branches and miscellaneous control instructions. */
12896 return thumb2_record_branch_misc_cntrl (thumb2_insn_r);
12897 }
12898 else if (op2 & 0x20)
12899 {
12900 /* Data-processing (plain binary immediate) instruction. */
12901 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12902 }
12903 else
12904 {
12905 /* Data-processing (modified immediate). */
12906 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12907 }
12908 }
12909 else if (op1 == 0x03)
12910 {
12911 if (!(op2 & 0x71 ))
12912 {
12913 /* Store single data item. */
12914 return thumb2_record_str_single_data (thumb2_insn_r);
12915 }
12916 else if (!((op2 & 0x71) ^ 0x10))
12917 {
12918 /* Advanced SIMD or structure load/store instructions. */
12919 return thumb2_record_asimd_struct_ld_st (thumb2_insn_r);
12920 }
12921 else if (!((op2 & 0x67) ^ 0x01))
12922 {
12923 /* Load byte, memory hints instruction. */
12924 return thumb2_record_ld_mem_hints (thumb2_insn_r);
12925 }
12926 else if (!((op2 & 0x67) ^ 0x03))
12927 {
12928 /* Load halfword, memory hints instruction. */
12929 return thumb2_record_ld_mem_hints (thumb2_insn_r);
12930 }
12931 else if (!((op2 & 0x67) ^ 0x05))
12932 {
12933 /* Load word instruction. */
12934 return thumb2_record_ld_word (thumb2_insn_r);
12935 }
12936 else if (!((op2 & 0x70) ^ 0x20))
12937 {
12938 /* Data-processing (register) instruction. */
12939 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12940 }
12941 else if (!((op2 & 0x78) ^ 0x30))
12942 {
12943 /* Multiply, multiply accumulate, abs diff instruction. */
12944 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12945 }
12946 else if (!((op2 & 0x78) ^ 0x38))
12947 {
12948 /* Long multiply, long multiply accumulate, and divide. */
12949 return thumb2_record_lmul_lmla_div (thumb2_insn_r);
12950 }
12951 else if (op2 & 0x40)
12952 {
12953 /* Co-processor instructions. */
12954 return thumb2_record_coproc_insn (thumb2_insn_r);
12955 }
12956 }
12957
12958 return -1;
12959 }
12960
12961 namespace {
12962 /* Abstract memory reader. */
12963
12964 class abstract_memory_reader
12965 {
12966 public:
12967 /* Read LEN bytes of target memory at address MEMADDR, placing the
12968 results in GDB's memory at BUF. Return true on success. */
12969
12970 virtual bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) = 0;
12971 };
12972
12973 /* Instruction reader from real target. */
12974
12975 class instruction_reader : public abstract_memory_reader
12976 {
12977 public:
12978 bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) override
12979 {
12980 if (target_read_memory (memaddr, buf, len))
12981 return false;
12982 else
12983 return true;
12984 }
12985 };
12986
12987 } // namespace
12988
12989 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
12990 and positive val on fauilure. */
12991
12992 static int
12993 extract_arm_insn (abstract_memory_reader& reader,
12994 insn_decode_record *insn_record, uint32_t insn_size)
12995 {
12996 gdb_byte buf[insn_size];
12997
12998 memset (&buf[0], 0, insn_size);
12999
13000 if (!reader.read (insn_record->this_addr, buf, insn_size))
13001 return 1;
13002 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
13003 insn_size,
13004 gdbarch_byte_order_for_code (insn_record->gdbarch));
13005 return 0;
13006 }
13007
13008 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
13009
13010 /* Decode arm/thumb insn depending on condition cods and opcodes; and
13011 dispatch it. */
13012
13013 static int
13014 decode_insn (abstract_memory_reader &reader, insn_decode_record *arm_record,
13015 record_type_t record_type, uint32_t insn_size)
13016 {
13017
13018 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm
13019 instruction. */
13020 static const sti_arm_hdl_fp_t arm_handle_insn[8] =
13021 {
13022 arm_record_data_proc_misc_ld_str, /* 000. */
13023 arm_record_data_proc_imm, /* 001. */
13024 arm_record_ld_st_imm_offset, /* 010. */
13025 arm_record_ld_st_reg_offset, /* 011. */
13026 arm_record_ld_st_multiple, /* 100. */
13027 arm_record_b_bl, /* 101. */
13028 arm_record_asimd_vfp_coproc, /* 110. */
13029 arm_record_coproc_data_proc /* 111. */
13030 };
13031
13032 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb
13033 instruction. */
13034 static const sti_arm_hdl_fp_t thumb_handle_insn[8] =
13035 { \
13036 thumb_record_shift_add_sub, /* 000. */
13037 thumb_record_add_sub_cmp_mov, /* 001. */
13038 thumb_record_ld_st_reg_offset, /* 010. */
13039 thumb_record_ld_st_imm_offset, /* 011. */
13040 thumb_record_ld_st_stack, /* 100. */
13041 thumb_record_misc, /* 101. */
13042 thumb_record_ldm_stm_swi, /* 110. */
13043 thumb_record_branch /* 111. */
13044 };
13045
13046 uint32_t ret = 0; /* return value: negative:failure 0:success. */
13047 uint32_t insn_id = 0;
13048
13049 if (extract_arm_insn (reader, arm_record, insn_size))
13050 {
13051 if (record_debug)
13052 {
13053 printf_unfiltered (_("Process record: error reading memory at "
13054 "addr %s len = %d.\n"),
13055 paddress (arm_record->gdbarch,
13056 arm_record->this_addr), insn_size);
13057 }
13058 return -1;
13059 }
13060 else if (ARM_RECORD == record_type)
13061 {
13062 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
13063 insn_id = bits (arm_record->arm_insn, 25, 27);
13064
13065 if (arm_record->cond == 0xf)
13066 ret = arm_record_extension_space (arm_record);
13067 else
13068 {
13069 /* If this insn has fallen into extension space
13070 then we need not decode it anymore. */
13071 ret = arm_handle_insn[insn_id] (arm_record);
13072 }
13073 if (ret != ARM_RECORD_SUCCESS)
13074 {
13075 arm_record_unsupported_insn (arm_record);
13076 ret = -1;
13077 }
13078 }
13079 else if (THUMB_RECORD == record_type)
13080 {
13081 /* As thumb does not have condition codes, we set negative. */
13082 arm_record->cond = -1;
13083 insn_id = bits (arm_record->arm_insn, 13, 15);
13084 ret = thumb_handle_insn[insn_id] (arm_record);
13085 if (ret != ARM_RECORD_SUCCESS)
13086 {
13087 arm_record_unsupported_insn (arm_record);
13088 ret = -1;
13089 }
13090 }
13091 else if (THUMB2_RECORD == record_type)
13092 {
13093 /* As thumb does not have condition codes, we set negative. */
13094 arm_record->cond = -1;
13095
13096 /* Swap first half of 32bit thumb instruction with second half. */
13097 arm_record->arm_insn
13098 = (arm_record->arm_insn >> 16) | (arm_record->arm_insn << 16);
13099
13100 ret = thumb2_record_decode_insn_handler (arm_record);
13101
13102 if (ret != ARM_RECORD_SUCCESS)
13103 {
13104 arm_record_unsupported_insn (arm_record);
13105 ret = -1;
13106 }
13107 }
13108 else
13109 {
13110 /* Throw assertion. */
13111 gdb_assert_not_reached ("not a valid instruction, could not decode");
13112 }
13113
13114 return ret;
13115 }
13116
13117 #if GDB_SELF_TEST
13118 namespace selftests {
13119
13120 /* Provide both 16-bit and 32-bit thumb instructions. */
13121
13122 class instruction_reader_thumb : public abstract_memory_reader
13123 {
13124 public:
13125 template<size_t SIZE>
13126 instruction_reader_thumb (enum bfd_endian endian,
13127 const uint16_t (&insns)[SIZE])
13128 : m_endian (endian), m_insns (insns), m_insns_size (SIZE)
13129 {}
13130
13131 bool read (CORE_ADDR memaddr, gdb_byte *buf, const size_t len) override
13132 {
13133 SELF_CHECK (len == 4 || len == 2);
13134 SELF_CHECK (memaddr % 2 == 0);
13135 SELF_CHECK ((memaddr / 2) < m_insns_size);
13136
13137 store_unsigned_integer (buf, 2, m_endian, m_insns[memaddr / 2]);
13138 if (len == 4)
13139 {
13140 store_unsigned_integer (&buf[2], 2, m_endian,
13141 m_insns[memaddr / 2 + 1]);
13142 }
13143 return true;
13144 }
13145
13146 private:
13147 enum bfd_endian m_endian;
13148 const uint16_t *m_insns;
13149 size_t m_insns_size;
13150 };
13151
13152 static void
13153 arm_record_test (void)
13154 {
13155 struct gdbarch_info info;
13156 gdbarch_info_init (&info);
13157 info.bfd_arch_info = bfd_scan_arch ("arm");
13158
13159 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
13160
13161 SELF_CHECK (gdbarch != NULL);
13162
13163 /* 16-bit Thumb instructions. */
13164 {
13165 insn_decode_record arm_record;
13166
13167 memset (&arm_record, 0, sizeof (insn_decode_record));
13168 arm_record.gdbarch = gdbarch;
13169
13170 static const uint16_t insns[] = {
13171 /* db b2 uxtb r3, r3 */
13172 0xb2db,
13173 /* cd 58 ldr r5, [r1, r3] */
13174 0x58cd,
13175 };
13176
13177 enum bfd_endian endian = gdbarch_byte_order_for_code (arm_record.gdbarch);
13178 instruction_reader_thumb reader (endian, insns);
13179 int ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13180 THUMB_INSN_SIZE_BYTES);
13181
13182 SELF_CHECK (ret == 0);
13183 SELF_CHECK (arm_record.mem_rec_count == 0);
13184 SELF_CHECK (arm_record.reg_rec_count == 1);
13185 SELF_CHECK (arm_record.arm_regs[0] == 3);
13186
13187 arm_record.this_addr += 2;
13188 ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13189 THUMB_INSN_SIZE_BYTES);
13190
13191 SELF_CHECK (ret == 0);
13192 SELF_CHECK (arm_record.mem_rec_count == 0);
13193 SELF_CHECK (arm_record.reg_rec_count == 1);
13194 SELF_CHECK (arm_record.arm_regs[0] == 5);
13195 }
13196
13197 /* 32-bit Thumb-2 instructions. */
13198 {
13199 insn_decode_record arm_record;
13200
13201 memset (&arm_record, 0, sizeof (insn_decode_record));
13202 arm_record.gdbarch = gdbarch;
13203
13204 static const uint16_t insns[] = {
13205 /* 1d ee 70 7f mrc 15, 0, r7, cr13, cr0, {3} */
13206 0xee1d, 0x7f70,
13207 };
13208
13209 enum bfd_endian endian = gdbarch_byte_order_for_code (arm_record.gdbarch);
13210 instruction_reader_thumb reader (endian, insns);
13211 int ret = decode_insn (reader, &arm_record, THUMB2_RECORD,
13212 THUMB2_INSN_SIZE_BYTES);
13213
13214 SELF_CHECK (ret == 0);
13215 SELF_CHECK (arm_record.mem_rec_count == 0);
13216 SELF_CHECK (arm_record.reg_rec_count == 1);
13217 SELF_CHECK (arm_record.arm_regs[0] == 7);
13218 }
13219 }
13220 } // namespace selftests
13221 #endif /* GDB_SELF_TEST */
13222
13223 /* Cleans up local record registers and memory allocations. */
13224
13225 static void
13226 deallocate_reg_mem (insn_decode_record *record)
13227 {
13228 xfree (record->arm_regs);
13229 xfree (record->arm_mems);
13230 }
13231
13232
13233 /* Parse the current instruction and record the values of the registers and
13234 memory that will be changed in current instruction to record_arch_list".
13235 Return -1 if something is wrong. */
13236
13237 int
13238 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
13239 CORE_ADDR insn_addr)
13240 {
13241
13242 uint32_t no_of_rec = 0;
13243 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
13244 ULONGEST t_bit = 0, insn_id = 0;
13245
13246 ULONGEST u_regval = 0;
13247
13248 insn_decode_record arm_record;
13249
13250 memset (&arm_record, 0, sizeof (insn_decode_record));
13251 arm_record.regcache = regcache;
13252 arm_record.this_addr = insn_addr;
13253 arm_record.gdbarch = gdbarch;
13254
13255
13256 if (record_debug > 1)
13257 {
13258 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
13259 "addr = %s\n",
13260 paddress (gdbarch, arm_record.this_addr));
13261 }
13262
13263 instruction_reader reader;
13264 if (extract_arm_insn (reader, &arm_record, 2))
13265 {
13266 if (record_debug)
13267 {
13268 printf_unfiltered (_("Process record: error reading memory at "
13269 "addr %s len = %d.\n"),
13270 paddress (arm_record.gdbarch,
13271 arm_record.this_addr), 2);
13272 }
13273 return -1;
13274 }
13275
13276 /* Check the insn, whether it is thumb or arm one. */
13277
13278 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
13279 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
13280
13281
13282 if (!(u_regval & t_bit))
13283 {
13284 /* We are decoding arm insn. */
13285 ret = decode_insn (reader, &arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
13286 }
13287 else
13288 {
13289 insn_id = bits (arm_record.arm_insn, 11, 15);
13290 /* is it thumb2 insn? */
13291 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
13292 {
13293 ret = decode_insn (reader, &arm_record, THUMB2_RECORD,
13294 THUMB2_INSN_SIZE_BYTES);
13295 }
13296 else
13297 {
13298 /* We are decoding thumb insn. */
13299 ret = decode_insn (reader, &arm_record, THUMB_RECORD,
13300 THUMB_INSN_SIZE_BYTES);
13301 }
13302 }
13303
13304 if (0 == ret)
13305 {
13306 /* Record registers. */
13307 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
13308 if (arm_record.arm_regs)
13309 {
13310 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
13311 {
13312 if (record_full_arch_list_add_reg
13313 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
13314 ret = -1;
13315 }
13316 }
13317 /* Record memories. */
13318 if (arm_record.arm_mems)
13319 {
13320 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
13321 {
13322 if (record_full_arch_list_add_mem
13323 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
13324 arm_record.arm_mems[no_of_rec].len))
13325 ret = -1;
13326 }
13327 }
13328
13329 if (record_full_arch_list_add_end ())
13330 ret = -1;
13331 }
13332
13333
13334 deallocate_reg_mem (&arm_record);
13335
13336 return ret;
13337 }
This page took 0.31498 seconds and 5 git commands to generate.